operator.cc 89.4 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Q
Qiao Longfei 已提交
2 3 4 5 6 7 8 9 10 11 12 13

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
D
dzhwinter 已提交
14

15 16
#include "paddle/fluid/framework/operator.h"

17
#include <glog/logging.h>
P
peizhilin 已提交
18 19
#include <sstream>
#include <string>
20

21
#include "gflags/gflags.h"
22
#include "paddle/fluid/framework/convert_utils.h"
Y
Yi Wang 已提交
23
#include "paddle/fluid/framework/data_transform.h"
24
#include "paddle/fluid/framework/data_type_transform.h"
W
WangXi 已提交
25
#include "paddle/fluid/framework/details/nan_inf_utils.h"
26
#include "paddle/fluid/framework/op_call_stack.h"
27
#include "paddle/fluid/framework/phi_utils.h"
Y
Yi Wang 已提交
28
#include "paddle/fluid/framework/shape_inference.h"
29
#include "paddle/fluid/framework/transfer_scope_cache.h"
30
#include "paddle/fluid/framework/unused_var_check.h"
Y
Yi Wang 已提交
31
#include "paddle/fluid/framework/var_type.h"
32
#include "paddle/fluid/platform/device/device_wrapper.h"
L
Leo Chen 已提交
33
#include "paddle/fluid/platform/enforce.h"
34
#include "paddle/fluid/platform/profiler.h"
C
chenjian 已提交
35
#include "paddle/fluid/platform/profiler/event_tracing.h"
36 37 38 39
#include "paddle/phi/common/scalar.h"
#include "paddle/phi/common/scalar_array.h"
#include "paddle/phi/core/kernel_factory.h"
#include "paddle/phi/ops/compat/signatures.h"
40

41
namespace phi {
42
class DenseTensor;
43
}  // namespace phi
44

45
#ifdef PADDLE_WITH_XPU
46 47
#include "paddle/fluid/platform/device/xpu/xpu_info.h"
#include "paddle/fluid/platform/device/xpu/xpu_op_list.h"
48
#endif
Q
Qiao Longfei 已提交
49

50 51 52 53
#ifdef PADDLE_WITH_MKLDNN
#include "paddle/fluid/platform/mkldnn_helper.h"
#endif

F
fwenguang 已提交
54 55 56 57
#ifdef PADDLE_WITH_MLU
#include "paddle/fluid/platform/device/mlu/mlu_info.h"
#endif

D
dzhwinter 已提交
58
DECLARE_bool(benchmark);
59
DECLARE_bool(check_nan_inf);
60
DECLARE_bool(enable_unused_var_check);
F
Feng Xing 已提交
61
DECLARE_bool(run_kp_kernel);
D
dzhwinter 已提交
62

Q
Qiao Longfei 已提交
63 64 65
namespace paddle {
namespace framework {

66 67 68 69 70 71
std::vector<std::tuple<platform::Place, LibraryType>> kKernelPriority = {
    std::make_tuple(platform::CUDAPlace(0), LibraryType::kCUDNN),
    std::make_tuple(platform::CUDAPlace(0), LibraryType::kPlain),
    std::make_tuple(platform::CPUPlace(), LibraryType::kMKLDNN),
    std::make_tuple(platform::CPUPlace(), LibraryType::kPlain),
};
D
dzhwinter 已提交
72

73
static DDim GetDimsDebug(const ScopeBase& scope, const std::string& name,
74
                         bool get_actual_dim = false) {
75
  Variable* var = scope.FindVar(name);
Q
qiaolongfei 已提交
76 77
  if (var == nullptr) {
    return DDim({-1});
Q
Qiao Longfei 已提交
78 79
  }

M
minqiyang 已提交
80 81 82
  if (var->IsType<LoDTensor>()) {
    const LoDTensor& tensor = var->Get<LoDTensor>();
    return tensor.dims();
83
  } else if (var->IsType<phi::SelectedRows>()) {
M
minqiyang 已提交
84
    if (get_actual_dim) {
85
      return var->Get<phi::SelectedRows>().value().dims();
M
minqiyang 已提交
86
    } else {
87
      return var->Get<phi::SelectedRows>().GetCompleteDims();
M
minqiyang 已提交
88
    }
S
Steffy-zxf 已提交
89 90
  } else if (var->IsType<Strings>()) {
    return DDim({static_cast<int64_t>(var->Get<Strings>().size())});
91 92 93 94 95
  } else {
    return DDim({-1});
  }
}

96
static bool VarInited(const ScopeBase& scope, const std::string& name) {
Q
Qiao Longfei 已提交
97 98 99 100 101
  Variable* var = scope.FindVar(name);
  if (var == nullptr) return false;
  return var->IsInitialized();
}

102
static std::string GetDtype(const ScopeBase& scope, const std::string& name) {
D
dzhwinter 已提交
103 104 105 106
  Variable* var = scope.FindVar(name);
  if (var == nullptr) {
    return "";
  }
107

M
minqiyang 已提交
108 109 110
  if (var->IsType<LoDTensor>()) {
    const LoDTensor& tensor = var->Get<LoDTensor>();
    if (UNLIKELY(!tensor.IsInitialized())) {
111 112
      return "";
    }
113
    return DataTypeToString(framework::TransToProtoVarType(tensor.dtype()));
114 115
  } else if (var->IsType<phi::SelectedRows>()) {
    auto tensor = var->Get<phi::SelectedRows>().value();
Q
Qiao Longfei 已提交
116 117 118
    if (UNLIKELY(!tensor.IsInitialized())) {
      return "uninited";
    } else {
119
      return DataTypeToString(framework::TransToProtoVarType(tensor.dtype()));
Q
Qiao Longfei 已提交
120
    }
S
Steffy-zxf 已提交
121 122
  } else if (var->IsType<Strings>()) {
    return "strings";
D
dzhwinter 已提交
123 124 125 126 127
  } else {
    return "";
  }
}

128
static std::string GetPlace(const ScopeBase& scope, const std::string& name) {
L
Leo Chen 已提交
129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144
  Variable* var = scope.FindVar(name);
  if (var == nullptr) {
    return "";
  }
  auto to_string = [](const platform::Place& p) {
    std::stringstream sstream;
    sstream << p;
    return sstream.str();
  };

  if (var->IsType<LoDTensor>()) {
    const LoDTensor& tensor = var->Get<LoDTensor>();
    if (UNLIKELY(!tensor.IsInitialized())) {
      return "";
    }
    return to_string(tensor.place());
145 146
  } else if (var->IsType<phi::SelectedRows>()) {
    auto tensor = var->Get<phi::SelectedRows>().value();
L
Leo Chen 已提交
147 148 149 150 151 152 153 154 155 156
    if (UNLIKELY(!tensor.IsInitialized())) {
      return "uninited";
    } else {
      return to_string(tensor.place());
    }
  } else {
    return "";
  }
}

157
static int GetRowSize(const ScopeBase& scope, const std::string& name) {
158 159 160 161 162
  Variable* var = scope.FindVar(name);
  if (var == nullptr) {
    return -1;
  }

163 164
  if (var->IsType<phi::SelectedRows>()) {
    return var->Get<phi::SelectedRows>().rows().size();
165 166 167 168 169
  }

  return -1;
}

170
static LoD GetLoDDebug(const ScopeBase& scope, const std::string& name) {
Q
Qiao Longfei 已提交
171 172 173 174 175 176 177
  Variable* var = scope.FindVar(name);
  auto default_lod = LoD({{}});

  if (var == nullptr) {
    return default_lod;
  }

M
minqiyang 已提交
178 179 180
  if (var->IsType<LoDTensor>()) {
    const LoDTensor& tensor = var->Get<LoDTensor>();
    return tensor.lod();
Q
Qiao Longfei 已提交
181 182 183 184 185
  } else {
    return default_lod;
  }
}

X
Xin Pan 已提交
186 187 188 189 190
RuntimeContext::RuntimeContext(const VariableNameMap& innames,
                               const VariableNameMap& outnames,
                               const Scope& scope) {
  for (auto& var_name_item : innames) {
    std::vector<Variable*>& input_vars = inputs[var_name_item.first];
X
Xin Pan 已提交
191
    input_vars.reserve(var_name_item.second.size());
X
Xin Pan 已提交
192 193 194 195 196 197
    for (auto& var_name : var_name_item.second) {
      input_vars.push_back(scope.FindVar(var_name));
    }
  }
  for (auto& var_name_item : outnames) {
    std::vector<Variable*>& output_vars = outputs[var_name_item.first];
X
Xin Pan 已提交
198
    output_vars.reserve(var_name_item.second.size());
X
Xin Pan 已提交
199 200 201 202 203 204
    for (auto& var_name : var_name_item.second) {
      output_vars.push_back(scope.FindVar(var_name));
    }
  }
}

205
void OperatorBase::Run(const Scope& scope, const platform::Place& place) {
P
peizhilin 已提交
206 207 208
  try {
    VLOG(4) << place << " " << DebugStringEx(&scope);
    if (platform::is_gpu_place(place)) {
209
#if !defined(PADDLE_WITH_CUDA) && !defined(PADDLE_WITH_HIP)
210 211 212 213
      PADDLE_THROW(platform::errors::Unavailable(
          "Cannot run operator on place %s, please recompile paddle or "
          "reinstall Paddle with CUDA support.",
          place));
214
#else
215
      auto dev_id = place.device;
P
peizhilin 已提交
216
      platform::SetDeviceId(dev_id);
217 218 219
#endif
    } else if (platform::is_xpu_place(place)) {
#ifndef PADDLE_WITH_XPU
220 221 222 223
      PADDLE_THROW(platform::errors::Unavailable(
          "Cannot run operator on place %s, please recompile paddle or "
          "reinstall Paddle with XPU support.",
          place));
224
#else
225
      auto dev_id = place.device;
226
      platform::SetXPUDeviceId(dev_id);
227 228 229 230 231 232 233 234
#endif
    } else if (platform::is_npu_place(place)) {
#ifndef PADDLE_WITH_ASCEND_CL
      PADDLE_THROW(platform::errors::Unavailable(
          "Cannot run operator on place %s, please recompile paddle or "
          "reinstall Paddle with NPU support.",
          place));
#else
235
      auto dev_id = place.device;
236
      platform::SetNPUDeviceId(dev_id);
F
fwenguang 已提交
237 238 239 240 241 242 243 244
#endif
    } else if (platform::is_mlu_place(place)) {
#ifndef PADDLE_WITH_MLU
      PADDLE_THROW(platform::errors::Unavailable(
          "Cannot run operator on place %s, please recompile paddle or "
          "reinstall Paddle with MLU support.",
          place));
#else
245
      auto dev_id = place.device;
F
fwenguang 已提交
246
      platform::SetMLUDeviceId(dev_id);
247 248 249 250 251 252 253 254
#endif
    } else if (platform::is_custom_place(place)) {
#ifndef PADDLE_WITH_CUSTOM_DEVICE
      PADDLE_THROW(platform::errors::Unavailable(
          "Cannot run operator on place %s, please recompile paddle or "
          "reinstall Paddle with CustomDevice support.",
          place));
#else
255
      phi::DeviceManager::SetDevice(place);
256
#endif
P
peizhilin 已提交
257
    }
P
peizhilin 已提交
258

259
    {
260 261 262
      // TODO(wangchaochaohu) : refine code to use only one RecordEvent)
      // in order to record different op type cost time
      // and different op name cost time,we set two event.
C
chenjian 已提交
263
      platform::RecordEvent op_type_record_event(
C
chenjian 已提交
264
          Type(), platform::TracerEventType::Operator, 1);
C
chenjian 已提交
265 266 267 268
      auto op_name = platform::OpName(outputs_, Type());
      platform::RecordEvent op_name_record_event(
          op_name, platform::TracerEventType::Operator, 10,
          platform::EventRole::kUniqueOp);
P
peizhilin 已提交
269 270
      RunImpl(scope, place);
    }
271

Z
Zhang Ting 已提交
272
    VLOG(3) << GetExecutionPlace(place) << " " << DebugStringEx(&scope);
273
  } catch (platform::EnforceNotMet& exception) {
274
    framework::InsertCallStackInfo(Type(), Attrs(), &exception);
275
    throw std::move(exception);
276 277 278 279 280 281
  } catch (platform::EOFException&) {
    std::rethrow_exception(std::current_exception());
  } catch (std::exception& ex) {
    LOG(WARNING) << Type() << " raises an exception "
                 << platform::demangle(typeid(ex).name()) << ", " << ex.what();
    std::rethrow_exception(std::current_exception());
P
peizhilin 已提交
282
  } catch (...) {
283
    LOG(WARNING) << Type() << " raises an unknown exception";
P
peizhilin 已提交
284
    std::rethrow_exception(std::current_exception());
285
  }
286 287
}

288
bool OperatorBase::HasInputs(const std::string& name) const {
M
minqiyang 已提交
289
  return inputs_.find(name) != inputs_.end();
290 291
}

292
std::string OperatorBase::Input(const std::string& name) const {
Y
Yu Yang 已提交
293
  auto& ins = Inputs(name);
294 295
  PADDLE_ENFORCE_LE(
      ins.size(), 1UL,
296
      platform::errors::InvalidArgument(
297 298
          "Operator %s's input %s should contain only one variable.", type_,
          name));
Y
Yu Yang 已提交
299
  return ins.empty() ? kEmptyVarName : ins[0];
Y
Yan Chunwei 已提交
300 301
}

Y
Yu Yang 已提交
302 303
const std::vector<std::string>& OperatorBase::Inputs(
    const std::string& name) const {
Y
Yu Yang 已提交
304
  auto it = inputs_.find(name);
305 306 307 308
  PADDLE_ENFORCE_NE(
      it, inputs_.end(),
      platform::errors::NotFound("Operator %s does not have the input %s.",
                                 type_, name));
Y
Yu Yang 已提交
309
  return it->second;
Y
Yan Chunwei 已提交
310 311
}

312
bool OperatorBase::HasOutputs(const std::string& name) const {
313
  if (outputs_.find(name) != outputs_.end()) {
314 315 316 317 318 319
    return true;
  } else {
    return false;
  }
}

320
std::string OperatorBase::Output(const std::string& name) const {
Y
Yu Yang 已提交
321
  auto& outs = Outputs(name);
322 323 324 325 326
  PADDLE_ENFORCE_LE(
      outs.size(), 1UL,
      platform::errors::InvalidArgument(
          "Operator %s's output %s should contain only one variable.", type_,
          name));
Y
Yu Yang 已提交
327
  return outs.empty() ? kEmptyVarName : outs[0];
Y
Yan Chunwei 已提交
328 329
}

Y
Yu Yang 已提交
330 331
const std::vector<std::string>& OperatorBase::Outputs(
    const std::string& name) const {
Y
Yu Yang 已提交
332
  auto it = outputs_.find(name);
333 334 335 336
  PADDLE_ENFORCE_NE(
      it, outputs_.end(),
      platform::errors::NotFound(
          "Operator %s does not have an output called %s.", type_, name));
Y
Yu Yang 已提交
337
  return it->second;
Y
Yan Chunwei 已提交
338 339
}

340
std::string OperatorBase::DebugStringEx(const ScopeBase* scope) const {
Q
Qiao Longfei 已提交
341
  std::stringstream ss;
Y
Yu Yang 已提交
342
  ss << "Op(" << type_ << "), inputs:{";
343

344
  const std::unordered_set<std::string>* no_need_buffer_vars = nullptr;
345 346
  if (info_ && info_->NoNeedBufferVarsInferer()) {
    no_need_buffer_vars =
347 348
        &(Info().NoNeedBufferVarsInferer()(Inputs(), Outputs(), Attrs()));
    if (no_need_buffer_vars->empty()) no_need_buffer_vars = nullptr;
349 350
  }

Y
Yu Yang 已提交
351 352
  for (auto it = inputs_.begin(); it != inputs_.end();) {
    auto& input = *it;
353 354
    bool is_no_need_buffer_var =
        (no_need_buffer_vars && no_need_buffer_vars->count(input.first) > 0);
Y
Yu Yang 已提交
355 356
    ss << input.first << "[";
    for (size_t i = 0; i < input.second.size(); ++i) {
Q
Qiao Longfei 已提交
357 358
      auto var_name = input.second[i];
      ss << var_name;
359
      if (scope) {
Q
Qiao Longfei 已提交
360 361 362 363 364 365 366
        if (!VarInited(*scope, var_name)) {
          ss << "[uninited]";
        } else {
          int row_size = GetRowSize(*scope, var_name);
          if (row_size >= 0) {
            ss << "[row_size=" << row_size << "]";
          }
367 368 369
          std::string dtype = is_no_need_buffer_var
                                  ? "unknown_dtype"
                                  : GetDtype(*scope, var_name);
Q
Qiao Longfei 已提交
370
          ss << ":" << dtype;
371 372
          ss << "[" << GetDimsDebug(*scope, var_name, true) << "]";
          ss << "(" << GetLoDDebug(*scope, var_name) << ")";
L
Leo Chen 已提交
373
          ss << "(" << GetPlace(*scope, var_name) << ")";
374
        }
375
      }
Y
Yu Yang 已提交
376 377 378
      if (i != input.second.size() - 1) {
        ss << ", ";
      }
379
    }
Y
Yu Yang 已提交
380
    ss << "]";
Y
Yu Yang 已提交
381 382
    ++it;
    if (it != inputs_.end()) {
383 384
      ss << ", ";
    }
Q
Qiao Longfei 已提交
385
  }
Y
Yu Yang 已提交
386
  ss << "}, outputs:{";
Y
Yu Yang 已提交
387 388
  for (auto it = outputs_.begin(); it != outputs_.end();) {
    auto& output = *it;
Y
Yu Yang 已提交
389 390
    ss << output.first << "[";
    for (size_t i = 0; i < output.second.size(); ++i) {
Q
Qiao Longfei 已提交
391 392
      auto var_name = output.second[i];
      ss << var_name;
393
      if (scope) {
Q
Qiao Longfei 已提交
394 395 396 397 398 399 400
        if (!VarInited(*scope, var_name)) {
          ss << "[uninited]";
        } else {
          int row_size = GetRowSize(*scope, output.second[i]);
          if (row_size >= 0) {
            ss << "[row_size=" << row_size << "]";
          }
C
chengduo 已提交
401 402
          std::string dtype = GetDtype(*scope, output.second[i]);
          ss << ":" << dtype;
403 404
          ss << "[" << GetDimsDebug(*scope, var_name, true) << "]";
          ss << "(" << GetLoDDebug(*scope, var_name) << ")";
L
Leo Chen 已提交
405
          ss << "(" << GetPlace(*scope, var_name) << ")";
406
        }
407
      }
Y
Yu Yang 已提交
408 409 410
      if (i != output.second.size() - 1) {
        ss << ", ";
      }
411
    }
Y
Yu Yang 已提交
412
    ss << "]";
Y
Yu Yang 已提交
413 414
    ++it;
    if (it != outputs_.end()) {
415 416
      ss << ", ";
    }
Q
Qiao Longfei 已提交
417
  }
Y
Yu Yang 已提交
418
  ss << "}.";
Q
Qiao Longfei 已提交
419 420 421
  return ss.str();
}

Y
Yu Yang 已提交
422
OperatorBase::OperatorBase(const std::string& type,
Y
Yu Yang 已提交
423 424
                           const VariableNameMap& inputs,
                           const VariableNameMap& outputs,
Y
Yu Yang 已提交
425
                           const AttributeMap& attrs)
S
sneaxiy 已提交
426 427 428 429 430 431
    : type_(type),
      inputs_(inputs),
      outputs_(outputs),
      attrs_(attrs),
      // NOTE(zjl): why op_info may be nullptr?
      info_(OpInfoMap::Instance().GetNullable(type)) {
H
hong 已提交
432 433 434 435 436 437 438 439
  // In dygraph mode, all the OperatorBase will be constructed by function:
  // framework::OpRegistry::CreateOp(type, {}, {}, {}, false).
  // Inputs, outputs and attrs will be set to empty map
  // to improve the execution efficiency of dygraph.
  if (inputs_.size() > 0 || outputs_.size() > 0) {
    GenerateTemporaryNames();
    CheckAllInputOutputSet();
  }
Y
Yu Yang 已提交
440
}
441

Q
qijun 已提交
442 443
std::vector<std::string> OperatorBase::InputVars() const {
  std::vector<std::string> ret_val;
Y
Yu Yang 已提交
444
  for (auto& o : inputs_) {
Q
qijun 已提交
445 446 447 448 449 450
    ret_val.reserve(ret_val.size() + o.second.size());
    ret_val.insert(ret_val.end(), o.second.begin(), o.second.end());
  }
  return ret_val;
}

Y
Yu Yang 已提交
451 452 453 454 455 456 457 458 459 460
std::vector<std::string> OperatorBase::OutputVars(bool has_intermediate) const {
  std::vector<std::string> ret_val;
  if (has_intermediate) {
    // push all outputs into ret_val
    for (auto& o : outputs_) {
      ret_val.reserve(ret_val.size() + o.second.size());
      ret_val.insert(ret_val.end(), o.second.begin(), o.second.end());
    }
    return ret_val;
  }
S
sneaxiy 已提交
461
  auto& info = Info();
Y
Yu Yang 已提交
462 463

  // get all OpProto::Var for outputs
Y
Yu Yang 已提交
464
  for (auto& o : info.Proto().outputs()) {
Y
Yu Yang 已提交
465 466 467 468 469 470 471 472 473
    // ignore all intermediate output
    if (o.intermediate()) continue;
    auto out = outputs_.find(o.name());
    if (out != outputs_.end()) {
      ret_val.reserve(ret_val.size() + out->second.size());
      ret_val.insert(ret_val.end(), out->second.begin(), out->second.end());
    }
  }
  return ret_val;
D
dongzhihong 已提交
474 475
}

476
void OperatorBase::CheckAllInputOutputSet() const {
S
sneaxiy 已提交
477
  if (info_ == nullptr || info_->proto_ == nullptr) return;
478

S
sneaxiy 已提交
479
  for (auto& in : info_->Proto().inputs()) {
480
    if (!in.dispensable() && !in.extra()) {
481 482 483 484
      PADDLE_ENFORCE_NE(
          inputs_.find(in.name()), inputs_.end(),
          platform::errors::NotFound("Operator %s's input (%s) is not set.",
                                     Type(), in.name()));
485
    }
486 487
  }

S
sneaxiy 已提交
488
  for (auto& out : info_->Proto().outputs()) {
489
    if (!out.dispensable() && !out.extra()) {
490 491 492 493
      PADDLE_ENFORCE_NE(
          outputs_.find(out.name()), outputs_.end(),
          platform::errors::NotFound("Operator %s's output (%s) is not set.",
                                     Type(), out.name()));
494
    }
495 496 497 498 499 500 501 502 503 504 505 506 507 508 509
  }
}

void OperatorBase::GenerateTemporaryNames() {
  static std::atomic<size_t> gUniqId(0UL);
  for (auto& output : outputs_) {
    for (auto& output_name : output.second) {
      if (output_name == kTempVarName) {
        output_name += type_;
        output_name += "@";
        output_name += std::to_string(gUniqId.fetch_add(1));
      }
    }
  }
}
510

C
chengduo 已提交
511
const Tensor* GetLoDTensorOrSelectedRowsValueFromVar(const Variable& var) {
C
chengduo 已提交
512 513
  if (var.IsType<LoDTensor>()) {
    return static_cast<const Tensor*>(&(var.Get<LoDTensor>()));
514 515
  } else if (var.IsType<phi::SelectedRows>()) {
    return &(var.Get<phi::SelectedRows>().value());
Q
QI JUN 已提交
516
  } else {
517 518 519
    PADDLE_THROW(platform::errors::InvalidArgument(
        "Variable type is %s, expect LoDTensor or SelectedRows.",
        ToTypeName(var.Type())));
Q
QI JUN 已提交
520 521 522
  }
}

C
chengduo 已提交
523
Tensor* GetMutableLoDTensorOrSelectedRowsValueFromVar(Variable* var) {
Q
QI JUN 已提交
524
  if (var->IsType<LoDTensor>()) {
525
    return var->GetMutable<LoDTensor>();
526 527
  } else if (var->IsType<phi::SelectedRows>()) {
    return var->GetMutable<phi::SelectedRows>()->mutable_value();
Q
QI JUN 已提交
528
  } else {
529 530 531
    PADDLE_THROW(platform::errors::InvalidArgument(
        "Variable type is %s, expect LoDTensor or SelectedRows.",
        ToTypeName(var->Type())));
Q
QI JUN 已提交
532 533 534
  }
}

535
bool ExecutionContext::HasInput(const std::string& name) const {
536
  auto* var = InputVar(name);
537 538 539
  return var != nullptr;
}

540 541 542 543 544 545 546 547 548 549 550 551 552 553
bool ExecutionContext::HasInputs(const std::string& name) const {
  const auto& ins = ctx_.inputs;
  auto it = ins.find(name);
  if (it == ins.end() || it->second.empty()) {
    return false;
  }
  for (const auto* input : it->second) {
    if (input == nullptr) {
      return false;
    }
  }
  return true;
}

554
bool ExecutionContext::HasOutput(const std::string& name) const {
555
  auto* var = OutputVar(name);
556 557 558
  return var != nullptr;
}

X
Xin Pan 已提交
559
const Variable* ExecutionContext::InputVar(const std::string& name) const {
560 561
  LogVarUsageIfUnusedVarCheckEnabled(name);

X
Xin Pan 已提交
562 563 564
  auto it = ctx_.inputs.find(name);
  if (it == ctx_.inputs.end()) return nullptr;

565 566
  PADDLE_ENFORCE_LE(
      it->second.size(), 1UL,
567
      platform::errors::InvalidArgument(
568 569
          "Operator %s's input %s should contain only one variable.",
          op_.Type(), name));
X
Xin Pan 已提交
570 571 572
  return it->second.empty() ? nullptr : it->second[0];
}

X
clean  
Xin Pan 已提交
573
Variable* ExecutionContext::OutputVar(const std::string& name) const {
X
Xin Pan 已提交
574 575 576
  auto it = ctx_.outputs.find(name);
  if (it == ctx_.outputs.end()) return nullptr;

577 578 579 580 581
  PADDLE_ENFORCE_LE(
      it->second.size(), 1UL,
      platform::errors::InvalidArgument(
          "Operator %s's output %s should contain only one variable.",
          op_.Type(), name));
X
Xin Pan 已提交
582 583 584
  return it->second.empty() ? nullptr : it->second[0];
}

585
template <>
586
const std::vector<const Tensor*> ExecutionContext::MultiInput<Tensor>(
587
    const std::string& name) const {
588 589
  LogVarUsageIfUnusedVarCheckEnabled(name);

H
hong 已提交
590 591
  auto vars = MultiInputVar(name);
  if (vars.size() == 0) {
X
Xin Pan 已提交
592 593 594 595 596
    return {};
  }
  std::vector<const Tensor*> res;
  res.reserve(vars.size());
  std::transform(vars.begin(), vars.end(), std::back_inserter(res),
H
hong 已提交
597
                 [&](const Variable* var) -> const Tensor* {
X
Xin Pan 已提交
598
                   if (var == nullptr) return nullptr;
599 600 601 602 603
                   PADDLE_ENFORCE_EQ(var->IsType<LoDTensor>(), true,
                                     platform::errors::InvalidArgument(
                                         "Input variable should be LoDTensor, "
                                         "but the received type is %s.",
                                         ToTypeName(var->Type())));
X
Xin Pan 已提交
604 605 606 607 608
                   return &(var->Get<LoDTensor>());
                 });
  return res;
}

609
template <>
610
std::vector<Tensor*> ExecutionContext::MultiOutput<Tensor>(
611
    const std::string& name) const {
H
hong 已提交
612 613 614
  auto vars = MultiOutputVar(name);

  if (vars.size() == 0) {
615 616
    return {};
  }
617
  std::vector<Tensor*> res;
618 619 620 621 622
  res.reserve(vars.size());
  std::transform(vars.begin(), vars.end(), std::back_inserter(res),
                 [&](Variable* var) -> Tensor* {
                   return var == nullptr ? nullptr
                                         : var->GetMutable<LoDTensor>();
623
                 });
624 625 626
  return res;
}

Y
Yu Yang 已提交
627
bool OpSupportGPU(const std::string& op_type) {
H
hong 已提交
628
  // check in new Function kernel first
629
  bool has_phi_kernel = false;
630
  auto& kernel_factory = phi::KernelFactory::Instance();
H
hong 已提交
631
  auto kernel_key_map =
632
      kernel_factory.SelectKernelMap(phi::TransToPhiKernelName(op_type));
H
hong 已提交
633
  for (auto& kernel : kernel_key_map) {
634
    has_phi_kernel = true;
635
    if (platform::is_gpu_place(phi::TransToPhiPlace(kernel.first.backend()))) {
H
hong 已提交
636 637 638 639
      return true;
    }
  }

Y
Yu Yang 已提交
640 641
  auto& all_kernels = OperatorWithKernel::AllOpKernels();
  auto it = all_kernels.find(op_type);
642 643 644 645 646 647 648 649 650 651 652 653 654
  if (it != all_kernels.end()) {
    for (auto& kern_pair : it->second) {
      if (platform::is_gpu_place(kern_pair.first.place_)) {
        return true;
      }
    }
  } else {
    if (has_phi_kernel) {
      // if has phi kernel, but not find phi gpu kernel and fluid gpu kernel,
      // this op doesn't support GPU
      return false;
    } else {
      // All control operator must support GPU
Y
Yu Yang 已提交
655 656 657
      return true;
    }
  }
H
hong 已提交
658

Y
Yu Yang 已提交
659 660 661
  return false;
}

662 663
class RuntimeInferShapeContext : public InferShapeContext {
 public:
664
  RuntimeInferShapeContext(const OperatorBase& op, const RuntimeContext& ctx)
G
Gabor Buella 已提交
665
      : op_(op), ctx_(ctx) {}
666 667

  bool HasInput(const std::string& name) const override {
668
    // has only one input
X
Xin Pan 已提交
669
    const auto& ins = ctx_.inputs;
670 671
    auto it = ins.find(name);
    if (it == ins.end()) {
672 673
      return false;
    }
674
    const auto& in = it->second;
X
Xin Pan 已提交
675
    if (in.size() == 0) return false;
676 677 678 679
    PADDLE_ENFORCE_EQ(
        in.size(), 1UL,
        platform::errors::InvalidArgument(
            "Input %s should not contain more than one inputs.", name));
X
Xin Pan 已提交
680
    return in[0] != nullptr;
681 682 683
  }

  bool HasOutput(const std::string& name) const override {
684
    // has only one output
X
Xin Pan 已提交
685
    const auto& outs = ctx_.outputs;
686 687
    auto it = outs.find(name);
    if (it == outs.end()) {
688 689
      return false;
    }
690
    const auto& out = it->second;
X
Xin Pan 已提交
691
    if (out.size() == 0) {
692 693
      return false;
    }
694 695 696 697
    PADDLE_ENFORCE_EQ(
        out.size(), 1UL,
        platform::errors::InvalidArgument(
            "Output %s should not contain more than one outputs.", name));
X
Xin Pan 已提交
698
    return out[0] != nullptr;
699 700
  }

701 702 703 704
  bool HasAttr(const std::string& name) const override {
    return op_.HasAttr(name);
  }

705
  bool HasInputs(const std::string& name) const override {
X
Xin Pan 已提交
706 707
    const auto& ins = ctx_.inputs;
    auto it = ins.find(name);
X
fix  
Xin Pan 已提交
708
    if (it == ins.end() || it->second.empty()) {
709 710
      return false;
    }
X
Xin Pan 已提交
711 712
    for (auto& input : it->second) {
      if (input == nullptr) {
713 714 715 716 717 718 719
        return false;
      }
    }
    return true;
  }

  bool HasOutputs(const std::string& name) const override {
X
Xin Pan 已提交
720 721
    const auto& outs = ctx_.outputs;
    auto it = outs.find(name);
X
fix  
Xin Pan 已提交
722
    if (it == outs.end() || it->second.empty()) {
723 724
      return false;
    }
X
Xin Pan 已提交
725 726
    for (auto& output : it->second) {
      if (output == nullptr) {
727 728 729 730 731 732 733 734
        return false;
      }
    }
    return true;
  }

  AttrReader Attrs() const override { return AttrReader(op_.Attrs()); }

H
hong 已提交
735
  std::vector<std::string> Inputs(const std::string& name) const override {
736 737 738
    return op_.Inputs(name);
  }

H
hong 已提交
739
  std::vector<std::string> Outputs(const std::string& name) const override {
740 741 742
    return op_.Outputs(name);
  }

743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765
  std::string GetInputNameByIdx(size_t idx) const override {
    auto& op_proto =
        paddle::framework::OpInfoMap::Instance().Get(op_.Type()).proto_;
    PADDLE_ENFORCE_LT(idx, op_proto->inputs().size(),
                      platform::errors::OutOfRange(
                          "The index should be less than the size of inputs of "
                          "operator %s, but got index is %d and size is %d",
                          op_.Type(), idx, op_proto->inputs().size()));
    return op_proto->inputs()[idx].name();
  }

  std::string GetOutputNameByIdx(size_t idx) const override {
    auto& op_proto =
        paddle::framework::OpInfoMap::Instance().Get(op_.Type()).proto_;
    PADDLE_ENFORCE_LT(
        idx, op_proto->outputs().size(),
        platform::errors::OutOfRange(
            "The index should be less than the size of outputs of "
            "operator %s, but got index is %d and size is %d",
            op_.Type(), idx, op_proto->outputs().size()));
    return op_proto->outputs()[idx].name();
  }

766 767
  void ShareDim(const std::string& in, const std::string& out, size_t i = 0,
                size_t j = 0) override {
X
Xin Pan 已提交
768 769
    auto in_it = ctx_.inputs.find(in);
    auto out_it = ctx_.outputs.find(out);
770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785
    PADDLE_ENFORCE_NE(
        in_it, ctx_.inputs.end(),
        platform::errors::NotFound("Input %s does not exist.", in));
    PADDLE_ENFORCE_NE(
        out_it, ctx_.outputs.end(),
        platform::errors::NotFound("Output %s does not exist.", out));
    PADDLE_ENFORCE_LT(i, in_it->second.size(),
                      platform::errors::InvalidArgument(
                          "The index of input dimension is out of range, "
                          "excepted index less than %zu, but received %zu.",
                          in_it->second.size(), i));
    PADDLE_ENFORCE_LT(j, out_it->second.size(),
                      platform::errors::InvalidArgument(
                          "The index of output dimension is out of range, "
                          "excepted index less than %zu, but received %zu.",
                          out_it->second.size(), j));
X
Xin Pan 已提交
786 787 788

    Variable* in_var = in_it->second[i];
    Variable* out_var = out_it->second[j];
789

790 791 792 793 794
    PADDLE_ENFORCE_EQ(
        in_var->Type(), out_var->Type(),
        platform::errors::InvalidArgument(
            "The type of input (%s) and output (%s) are inconsistent.", in,
            out));
795

796 797 798
    if (in_var->IsType<phi::SelectedRows>()) {
      auto& in_sele_rows = in_var->Get<phi::SelectedRows>();
      auto out_sele_rows = out_var->GetMutable<phi::SelectedRows>();
799 800 801 802 803 804 805 806
      out_sele_rows->mutable_value()->Resize(in_sele_rows.value().dims());
      out_sele_rows->set_rows(in_sele_rows.rows());
      out_sele_rows->set_height(in_sele_rows.height());
    } else if (in_var->IsType<framework::LoDTensor>()) {
      auto& in_lod_tensor = in_var->Get<framework::LoDTensor>();
      auto* out_lod_tensor = out_var->GetMutable<framework::LoDTensor>();
      out_lod_tensor->Resize(in_lod_tensor.dims());
    } else {
807
      PADDLE_THROW(platform::errors::Unimplemented(
808
          "Currently, the input type of ShareDim only can be LoDTensor "
809
          "or SelectedRows."));
810 811 812
    }
  }

H
hong 已提交
813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830
  void ShareAllLoD(const std::string& in,
                   const std::string& out) const override {
    auto in_it = ctx_.inputs.find(in);
    auto out_it = ctx_.outputs.find(out);
    PADDLE_ENFORCE_NE(in_it, ctx_.inputs.end(),
                      platform::errors::NotFound(
                          "Input [%s] found error in Op [%s]", in, op_.Type()));
    PADDLE_ENFORCE_NE(
        out_it, ctx_.outputs.end(),
        platform::errors::NotFound("Output [%s] found error in Op [%s]", out,
                                   op_.Type()));

    auto& in_var_list = in_it->second;
    auto& out_var_list = out_it->second;

    PADDLE_ENFORCE_EQ(
        in_var_list.size(), out_var_list.size(),
        platform::errors::PreconditionNotMet(
T
tianshuo78520a 已提交
831
            "Op [%s]: Input var size should be equal with output var size",
H
hong 已提交
832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857
            op_.Type()));

    auto& out_var_names = op_.Outputs(out);

    for (size_t i = 0; i < in_var_list.size(); ++i) {
      if (out_var_names[i] == framework::kEmptyVarName) {
        continue;
      }

      Variable* in_var = in_var_list[i];
      if (!in_var->IsType<LoDTensor>()) return;
      Variable* out_var = out_var_list[i];
      PADDLE_ENFORCE_EQ(out_var->IsType<LoDTensor>(), true,
                        platform::errors::PreconditionNotMet(
                            "The %d-th output of Output(%s) must be LoDTensor.",
                            i, out_var_names[i]));
      auto& in_tensor = in_var->Get<LoDTensor>();
      auto* out_tensor = out_var->GetMutable<LoDTensor>();
      out_tensor->set_lod(in_tensor.lod());
#ifdef PADDLE_WITH_MKLDNN
      if (in_tensor.layout() != DataLayout::kMKLDNN)
#endif
        out_tensor->set_layout(in_tensor.layout());
    }
  }

Q
Qiao Longfei 已提交
858 859
  void ShareLoD(const std::string& in, const std::string& out, size_t i = 0,
                size_t j = 0) const override {
X
Xin Pan 已提交
860 861
    auto in_it = ctx_.inputs.find(in);
    auto out_it = ctx_.outputs.find(out);
862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877
    PADDLE_ENFORCE_NE(
        in_it, ctx_.inputs.end(),
        platform::errors::NotFound("Input %s does not exist.", in));
    PADDLE_ENFORCE_NE(
        out_it, ctx_.outputs.end(),
        platform::errors::NotFound("Output %s does not exist.", out));
    PADDLE_ENFORCE_LT(i, in_it->second.size(),
                      platform::errors::InvalidArgument(
                          "The index of input dimension is out of range, "
                          "excepted index less than %zu, but received %zu.",
                          in_it->second.size(), i));
    PADDLE_ENFORCE_LT(j, out_it->second.size(),
                      platform::errors::InvalidArgument(
                          "The index of output dimension is out of range, "
                          "excepted index less than %zu, but received %zu.",
                          out_it->second.size(), j));
X
Xin Pan 已提交
878 879

    Variable* in_var = in_it->second.at(i);
Q
Qiao Longfei 已提交
880
    if (!in_var->IsType<LoDTensor>()) return;
X
Xin Pan 已提交
881
    Variable* out_var = out_it->second.at(j);
882 883 884 885
    PADDLE_ENFORCE_EQ(
        out_var->IsType<LoDTensor>(), true,
        platform::errors::InvalidArgument(
            "The %zu-th output of Output(%s) must be LoDTensor.", j, out));
886
    auto& in_tensor = in_var->Get<LoDTensor>();
Q
Qiao Longfei 已提交
887 888
    auto* out_tensor = out_var->GetMutable<LoDTensor>();
    out_tensor->set_lod(in_tensor.lod());
D
dzhwinter 已提交
889

M
mozga-intel 已提交
890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908
// TODO(dzhwinter) : reuse ShareLoD in most operators.
// Need to call ShareLayout explicitly in sequence related ops.
// Shall we have a better method to shared info between in/out Tensor?
#ifdef PADDLE_WITH_MKLDNN
    // Fix me: ugly workaround below
    // Correct solution:
    //    set_layout() should NOT be called here (i.e. ShareLoD). Instead,
    //    layout of output tensor should be set "manually" in Compute()
    //    of each OPKernel. The reason layout should NOT be shared between
    //    input and output "automatically" (now by InferShape()->ShareLoD())
    //    is that layout transform may occur after InferShape().
    // Workaround:
    //    Skip set_layout() when input layout is kMKLDNN
    //    This is to avoid kMKLDNN is populated wrongly into a non-MKLDNN
    //    OPKernel. In all MKLDNN OPkernel, set_layout(kMKLDNN) should be called
    //    in Compute()
    if (in_tensor.layout() != DataLayout::kMKLDNN)
#endif
      out_tensor->set_layout(in_tensor.layout());
D
dzhwinter 已提交
909 910
  }

911
  int32_t GetLoDLevel(const std::string& in, size_t i = 0) const override {
912
    PADDLE_THROW(platform::errors::PreconditionNotMet(
913
        "GetLoDLevel is only used in compile time. The calculation of "
914
        "output's actual lod is different among operators so that should be "
915
        "set in the runtime kernel."));
916 917
  }

918 919
  void SetLoDLevel(const std::string& out, int32_t lod_level,
                   size_t j = 0) const override {
920
    PADDLE_THROW(platform::errors::PreconditionNotMet(
921
        "SetLoDLevel is only used in compile time. The calculation of "
922
        "output's actual lod is different among operators so that should be "
923
        "set in the runtime kernel."));
C
chengduo 已提交
924 925
  }

926 927
  bool IsRuntime() const override { return true; }

928 929 930 931 932 933 934 935 936 937 938
  bool IsRunMKLDNNKernel() const override {
    try {
      auto& op_with_kernel = dynamic_cast<const OperatorWithKernel&>(op_);
      return ((op_with_kernel.kernel_type()) &&
              (op_with_kernel.kernel_type()->data_layout_ ==
               framework::DataLayout::kMKLDNN));
    } catch (std::bad_cast exp) {
      return false;
    }
  }

939 940
  // TODO(paddle-dev): Can this be template?
  std::vector<InferShapeVarPtr> GetInputVarPtrs(
941
      const std::string& name) const override {
942 943 944 945 946 947 948 949
    const std::vector<Variable*>& vars = InputVars(name);
    std::vector<InferShapeVarPtr> res;
    res.reserve(vars.size());
    res.insert(res.begin(), vars.begin(), vars.end());
    return res;
  }

  std::vector<InferShapeVarPtr> GetOutputVarPtrs(
950
      const std::string& name) const override {
951 952 953 954 955 956 957
    const std::vector<Variable*>& vars = OutputVars(name);
    std::vector<InferShapeVarPtr> res;
    res.reserve(vars.size());
    res.insert(res.begin(), vars.begin(), vars.end());
    return res;
  }

X
Xin Pan 已提交
958 959
  DDim GetInputDim(const std::string& name) const override {
    const std::vector<Variable*>& vars = InputVars(name);
960 961 962 963 964
    PADDLE_ENFORCE_EQ(
        vars.size(), 1UL,
        platform::errors::InvalidArgument(
            "Input(%s) should hold one element, but now it holds %zu elements.",
            name, vars.size()));
X
Xin Pan 已提交
965 966 967 968 969 970 971 972
    return this->GetDim(vars[0]);
  }

  std::vector<DDim> GetInputsDim(const std::string& name) const override {
    const std::vector<Variable*>& vars = InputVars(name);
    return GetDims(vars);
  }

X
Xin Pan 已提交
973 974 975 976 977 978 979 980 981 982
  std::vector<proto::VarType::Type> GetInputsVarType(
      const std::string& name) const override {
    return GetVarTypes(InputVars(name));
  }

  std::vector<proto::VarType::Type> GetOutputsVarType(
      const std::string& name) const override {
    return GetVarTypes(OutputVars(name));
  }

X
Xin Pan 已提交
983 984
  void SetOutputDim(const std::string& name, const DDim& dim) override {
    auto& vars = OutputVars(name);
985 986 987 988 989
    PADDLE_ENFORCE_EQ(
        vars.size(), 1UL,
        platform::errors::InvalidArgument("Output(%s) should hold one element, "
                                          "but now it holds %zu elements.",
                                          name, vars.size()));
X
Xin Pan 已提交
990 991 992 993 994 995 996 997 998
    SetDim(vars[0], dim);
  }

  void SetOutputsDim(const std::string& name,
                     const std::vector<DDim>& dims) override {
    auto& vars = OutputVars(name);
    SetDims(vars, dims);
  }

999
 protected:
X
Xin Pan 已提交
1000
  DDim GetDim(Variable* var) const {
1001 1002
    PADDLE_ENFORCE_NOT_NULL(
        var, platform::errors::InvalidArgument("Input variable is nullptr."));
1003 1004
    if (var->IsType<LoDTensor>()) {
      return var->Get<LoDTensor>().dims();
1005 1006
    } else if (var->IsType<phi::SelectedRows>()) {
      return var->Get<phi::SelectedRows>().GetCompleteDims();
1007
    } else {
1008 1009 1010 1011
      PADDLE_THROW(platform::errors::InvalidArgument(
          "Only LoDTensor or SelectedRows support 'GetDim', but input "
          "Variable's type is %s.",
          ToTypeName(var->Type())));
F
fengjiayi 已提交
1012 1013 1014
    }
  }

X
Xin Pan 已提交
1015 1016 1017 1018 1019 1020 1021 1022
  std::vector<DDim> GetDims(const std::vector<Variable*>& vars) const {
    std::vector<DDim> ret;
    ret.reserve(vars.size());
    std::transform(vars.begin(), vars.end(), std::back_inserter(ret),
                   [this](Variable* var) { return this->GetDim(var); });
    return ret;
  }

F
fengjiayi 已提交
1023
  std::vector<DDim> GetRepeatedDims(const std::string& name) const override {
1024 1025
    PADDLE_THROW(platform::errors::PreconditionNotMet(
        "GetRepeatedDims method only ban be used in compile time."));
1026 1027
  }

X
Xin Pan 已提交
1028
  void SetDim(Variable* var, const DDim& dim) {
1029 1030
    if (var->IsType<LoDTensor>()) {
      var->GetMutable<LoDTensor>()->Resize(dim);
1031 1032
    } else if (var->IsType<phi::SelectedRows>()) {
      var->GetMutable<phi::SelectedRows>()->set_height(dim[0]);
1033
    } else {
1034 1035 1036 1037
      PADDLE_THROW(platform::errors::Unimplemented(
          "Variable type error, expect LoDTensor or SelectedRows, but received "
          "(%s).",
          ToTypeName(var->Type())));
X
Xin Pan 已提交
1038 1039 1040 1041 1042 1043
    }
  }

  void SetDims(const std::vector<Variable*>& vars,
               const std::vector<DDim>& dims) {
    size_t length = vars.size();
1044 1045 1046 1047 1048 1049
    PADDLE_ENFORCE_EQ(length, dims.size(),
                      platform::errors::InvalidArgument(
                          "The number of input variables do not match the "
                          "number of input dimensions, the number of variables "
                          "is %zu, the number of dimensions is %zu.",
                          length, dims.size()));
X
Xin Pan 已提交
1050 1051 1052 1053 1054
    for (size_t i = 0; i < length; ++i) {
      if (vars[i] == nullptr) {
        continue;
      }
      SetDim(vars[i], dims[i]);
1055 1056 1057
    }
  }

F
fengjiayi 已提交
1058 1059
  void SetRepeatedDims(const std::string& name,
                       const std::vector<DDim>& dims) override {
1060 1061
    PADDLE_THROW(platform::errors::PreconditionNotMet(
        "SetRepeatedDims method only can be used in compile time."));
F
fengjiayi 已提交
1062 1063
  }

X
Xin Pan 已提交
1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074
  std::vector<proto::VarType::Type> GetVarTypes(
      const std::vector<Variable*>& vars) const {
    std::vector<proto::VarType::Type> retv;
    retv.resize(vars.size());
    std::transform(vars.begin(), vars.end(), retv.begin(),
                   std::bind(std::mem_fn(&RuntimeInferShapeContext::GetVarType),
                             this, std::placeholders::_1));
    return retv;
  }

  proto::VarType::Type GetVarType(Variable* var) const {
1075 1076 1077
    return ToVarType(var->Type());
  }

1078 1079 1080
 private:
  const std::vector<Variable*>& InputVars(const std::string& name) const {
    auto it = ctx_.inputs.find(name);
1081 1082 1083 1084
    PADDLE_ENFORCE_NE(
        it, ctx_.inputs.end(),
        platform::errors::NotFound(
            "Operator (%s) does not have the input (%s).", op_.Type(), name));
1085 1086 1087 1088 1089
    return it->second;
  }

  const std::vector<Variable*>& OutputVars(const std::string& name) const {
    auto it = ctx_.outputs.find(name);
1090 1091 1092 1093
    PADDLE_ENFORCE_NE(
        it, ctx_.outputs.end(),
        platform::errors::NotFound(
            "Operator (%s) does not have the outputs (%s).", op_.Type(), name));
1094
    return it->second;
F
fengjiayi 已提交
1095 1096
  }

1097
  const OperatorBase& op_;
X
Xin Pan 已提交
1098
  const RuntimeContext& ctx_;
1099 1100
};

1101 1102
static void CheckTensorNANOrInf(const std::string& op_type,
                                const std::string& name,
C
chengduoZH 已提交
1103 1104 1105 1106
                                const framework::Tensor& tensor) {
  if (tensor.memory_size() == 0) {
    return;
  }
1107 1108
  if (framework::TransToProtoVarType(tensor.dtype()) != proto::VarType::FP32 &&
      framework::TransToProtoVarType(tensor.dtype()) != proto::VarType::FP64) {
C
chengduoZH 已提交
1109 1110
    return;
  }
1111 1112 1113 1114 1115 1116 1117 1118
  PADDLE_ENFORCE_NE(
      framework::TensorContainsInf(tensor), true,
      platform::errors::Fatal("Operator %s output Tensor %s contains Inf.",
                              op_type, name));
  PADDLE_ENFORCE_NE(
      framework::TensorContainsNAN(tensor), true,
      platform::errors::Fatal("Operator %s output Tensor %s contains NAN.",
                              op_type, name));
C
chengduoZH 已提交
1119 1120
}

1121 1122
bool OperatorWithKernel::SupportsMKLDNN(
    const proto::VarType::Type data_type) const {
1123 1124 1125 1126 1127 1128 1129 1130 1131
  auto op_kernel_iter = OperatorWithKernel::AllOpKernels().find(type_);
  if (op_kernel_iter == OperatorWithKernel::AllOpKernels().end()) {
    VLOG(6) << "Warning: " << type_ << " don't find its MKLDNN Kernel in Fluid "
                                       "Registered Kernels. And We don't "
                                       "search its kernels in phi lib, "
                                       "SupportsMKLDNN() return false.";
    return false;
  }
  auto& op_kernels = op_kernel_iter->second;
1132
  return std::any_of(op_kernels.begin(), op_kernels.end(),
1133
                     [data_type](OpKernelMap::const_reference kern_pair) {
1134 1135
                       return platform::is_cpu_place(kern_pair.first.place_) &&
                              kern_pair.first.library_type_ ==
1136 1137
                                  LibraryType::kMKLDNN &&
                              kern_pair.first.data_type_ == data_type;
1138 1139 1140
                     });
}

1141 1142
bool OperatorWithKernel::CanMKLDNNBeUsed(const framework::ExecutionContext& ctx,
                                         proto::VarType::Type data_type) const {
1143 1144 1145
  bool use_mkldnn_ctx = ctx.HasAttr("use_mkldnn") &&
                        ctx.Attr<bool>("use_mkldnn") &&
                        platform::is_cpu_place(ctx.GetPlace());
1146
  return use_mkldnn_ctx && this->SupportsMKLDNN(data_type);
1147 1148
}

1149 1150 1151 1152 1153 1154 1155
void OperatorWithKernel::InferShape(InferShapeContext* ctx) const {
  PADDLE_THROW(platform::errors::PermissionDenied(
      "The default InferShape function of OperatorWithKernel is not allowed to "
      "be called, please override corresponding InferShape function in the "
      "specific operator."));
}

B
baojun-nervana 已提交
1156
void OperatorWithKernel::RuntimeInferShape(const Scope& scope,
X
Xin Pan 已提交
1157 1158
                                           const platform::Place& place,
                                           const RuntimeContext& ctx) const {
1159
  RuntimeInferShapeContext infer_shape_ctx(*this, ctx);
1160
  this->Info().infer_shape_(&infer_shape_ctx);
B
baojun-nervana 已提交
1161 1162
}

L
luotao1 已提交
1163 1164
void OperatorWithKernel::RunImpl(const Scope& scope,
                                 const platform::Place& place) const {
L
luotao1 已提交
1165 1166
  // To reduce the elapsed time of HasAttr, we use bool variable to record the
  // result of HasAttr.
1167 1168 1169
  if (!enable_cache_runtime_context_ && HasAttr(kEnableCacheRuntimeContext))
    enable_cache_runtime_context_ = true;
  if (!all_kernels_must_compute_runtime_shape_ &&
L
luotao1 已提交
1170
      HasAttr(kAllKernelsMustComputeRuntimeShape))
1171
    all_kernels_must_compute_runtime_shape_ = true;
1172
  const Scope* cur_scope = &scope;
1173
  if (!enable_cache_runtime_context_) {
L
luotao1 已提交
1174 1175
    RuntimeContext ctx(Inputs(), Outputs(), scope);
    RunImpl(scope, place, &ctx);
1176
    pre_scope_ = cur_scope;
L
luotao1 已提交
1177
  } else {
1178 1179 1180 1181 1182 1183
    if (runtime_ctx_.get() == nullptr || pre_scope_ != cur_scope) {
      std::lock_guard<std::mutex> lock(cache_update_mutex_);
      if (runtime_ctx_.get() == nullptr || pre_scope_ != cur_scope) {
        runtime_ctx_.reset(new RuntimeContext(Inputs(), Outputs(), scope));
        pre_scope_ = cur_scope;
      }
L
luotao1 已提交
1184 1185 1186 1187 1188 1189 1190 1191
    }
    RunImpl(scope, place, runtime_ctx_.get());
  }
}

void OperatorWithKernel::RunImpl(const Scope& scope,
                                 const platform::Place& place,
                                 RuntimeContext* runtime_ctx) const {
Y
Yu Yang 已提交
1192
  platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
1193
  auto* dev_ctx = pool.Get(place);
1194

1195 1196 1197 1198 1199 1200 1201 1202 1203 1204
#ifdef PADDLE_WITH_ASCEND_CL
  // NOTE(wangxi): nan/inf cannot be detected on NPU by checking the variable
  // values, but only through special `float_status` to checks whether
  // the operation is overflow. More about `float_status`, see:
  // https://gitee.com/ascend/modelzoo/issues/I3NF8V?from=project-issue
  if (FLAGS_check_nan_inf) {
    framework::details::NPUAllocAndClearFloatStatus(*this, scope, place);
  }
#endif

1205
  auto exe_ctx = ExecutionContext(*this, scope, *dev_ctx, *runtime_ctx);
1206 1207 1208 1209
  // using cache
  if (kernel_type_.get()) {
    dev_ctx = pool.Get(kernel_type_->place_);
  }
1210 1211 1212 1213 1214 1215

  // TODO(chenweihang): Now we are still reusing a lot of the original fluid
  // implementation, this is a gradual replacement process
  // TODO(chenweihang): in the first phase of project, we only support CPU, CUDA
  // and RCOM backend, the XPU, NPU and MKLDNN will be supported in the second
  // phase
1216
  phi::KernelKey pt_kernel_key;
1217
  std::string pt_kernel_name;
1218
  if (phi::KernelFactory::Instance().HasCompatiblePhiKernel(type_)) {
1219
    if (pt_kernel_signature_ == nullptr || pt_kernel_ == nullptr) {
1220
      pt_kernel_signature_.reset(
1221
          new KernelSignature(std::move(GetExpectedPhiKernelArgs(exe_ctx))));
1222 1223 1224 1225 1226 1227 1228
      VLOG(6) << *pt_kernel_signature_.get();

      kernel_type_.reset(
          new OpKernelType(std::move(InnerGetExpectedKernelType(exe_ctx))));
      dev_ctx = pool.Get(kernel_type_->place_);

      pt_kernel_name = pt_kernel_signature_->name;
1229
      pt_kernel_key = TransOpKernelTypeToPhiKernelKey(*kernel_type_.get());
1230
      pt_kernel_.reset(
1231
          new phi::Kernel(phi::KernelFactory::Instance().SelectKernel(
1232 1233 1234
              pt_kernel_name, pt_kernel_key)));

      if (pt_kernel_->IsValid()) {
1235
        VLOG(6) << "Static mode ChoosePhiKernel - kernel name: "
1236 1237 1238
                << pt_kernel_name << " | kernel key: " << pt_kernel_key
                << " | kernel: " << *pt_kernel_;
      } else {
1239
        VLOG(6) << "Static mode ChoosePhiKernel - kernel `" << pt_kernel_name
1240 1241
                << "` not found.";
      }
1242 1243 1244
    } else {
      pt_kernel_name = pt_kernel_signature_->name;
      pt_kernel_key = TransOpKernelTypeToPhiKernelKey(*kernel_type_.get());
1245
    }
1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256
#ifdef PADDLE_WITH_XPU
    bool is_xpu_unsupport =
        paddle::platform::is_xpu_place(kernel_type_->place_) &&
            !paddle::platform::is_xpu_support_op(type_, *kernel_type_.get()) ||
        paddle::platform::is_in_xpu_black_list(type_);
#endif
    if (pt_kernel_->IsValid()
#ifdef PADDLE_WITH_XPU
        && !is_xpu_unsupport
#endif
        ) {
1257
      run_phi_kernel_ = true;
1258 1259 1260 1261 1262 1263 1264
    } else {
      auto& all_op_kernels = AllOpKernels();
      auto kernels_iter = all_op_kernels.find(type_);
      if (kernels_iter == all_op_kernels.end() ||
          kernels_iter->second.find(*kernel_type_.get()) ==
              kernels_iter->second.end()
#ifdef PADDLE_WITH_XPU
1265
          || is_xpu_unsupport
1266
#endif
1267
          ) {
1268 1269 1270
        auto pt_cpu_kernel_key =
            FallBackToCpu(*kernel_type_.get(), pt_kernel_key, *this);
        pt_kernel_.reset(
1271
            new phi::Kernel(phi::KernelFactory::Instance().SelectKernel(
1272 1273 1274 1275 1276 1277 1278
                pt_kernel_name, pt_cpu_kernel_key)));

        dev_ctx = pool.Get(platform::CPUPlace());
        if (pt_kernel_->IsValid()) {
          VLOG(6) << "Static mode PrepareImpl - kernel name: " << pt_kernel_name
                  << " | kernel key: " << pt_cpu_kernel_key
                  << " | kernel: " << *pt_kernel_;
1279
          run_phi_kernel_ = true;
1280 1281
        }
      }
1282 1283
    }
  }
1284
  if (!run_phi_kernel_) {
1285 1286
    if (kernel_type_.get() == nullptr || kernel_func_.get() == nullptr) {
      ChooseKernel(exe_ctx);
1287
      dev_ctx = pool.Get(kernel_type_->place_);
1288
    }
1289 1290
  }

Y
yuyang18 已提交
1291 1292
  // do data transformScope &transfer_scope;
  std::vector<std::string> transfered_inplace_vars;
1293 1294
  Scope* transfer_scope = nullptr;
  {
1295
    platform::RecordEvent record_event("prepare_data",
C
chenjian 已提交
1296 1297
                                       platform::TracerEventType::OperatorInner,
                                       1, platform::EventRole::kInnerOp);
1298 1299 1300 1301
    if (need_prepare_data_) {
      transfer_scope = PrepareData(scope, *kernel_type_,
                                   &transfered_inplace_vars, runtime_ctx);
    }
1302
  }
Y
yuyang18 已提交
1303 1304 1305 1306
  // exec scope is the scope that kernel actually executed on.
  const Scope& exec_scope =
      (transfer_scope == nullptr ? scope : *transfer_scope);

1307
  if (!all_kernels_must_compute_runtime_shape_) {
1308
    platform::RecordEvent record_event("infer_shape",
C
chenjian 已提交
1309 1310
                                       platform::TracerEventType::OperatorInner,
                                       1, platform::EventRole::kInnerOp);
1311
    RuntimeInferShapeContext infer_shape_ctx(*this, *runtime_ctx);
1312
    this->Info().infer_shape_(&infer_shape_ctx);
1313
  }
1314 1315 1316 1317 1318

  if (FLAGS_enable_unused_var_check) {
    GetThreadLocalUsedVarNameSet()->clear();
  }

X
clean  
Xin Pan 已提交
1319 1320
  // TODO(panyx0718): ExecutionContext should only depend on RuntimeContext
  // not Scope. Imperative mode only pass inputs and get outputs.
1321
  {
1322
    platform::RecordEvent record_event("compute",
C
chenjian 已提交
1323 1324
                                       platform::TracerEventType::OperatorInner,
                                       1, platform::EventRole::kInnerOp);
1325
    if (run_phi_kernel_) {
1326
      phi::KernelContext pt_kernel_context;
1327
      // Do data transform before building KernelContext
1328
      // TODO(zhiqiu): support TransferInplaceVarsBack
1329 1330 1331
      PreparePhiData(exec_scope, *pt_kernel_, *pt_kernel_signature_,
                     runtime_ctx);
      BuildPhiKernelContext(*runtime_ctx, dev_ctx, &pt_kernel_context);
1332
      (*pt_kernel_)(&pt_kernel_context);
1333 1334 1335 1336
    } else {
      (*kernel_func_)(
          ExecutionContext(*this, exec_scope, *dev_ctx, *runtime_ctx));
    }
1337
  }
D
dzhwinter 已提交
1338

Y
yuyang18 已提交
1339
  if (!transfered_inplace_vars.empty()) {
T
tianshuo78520a 已提交
1340
    // there is inplace variable has been transferred.
Y
yuyang18 已提交
1341
    TransferInplaceVarsBack(scope, transfered_inplace_vars, *transfer_scope);
1342
  }
1343 1344 1345 1346 1347 1348 1349

  // See [ Why need handle complex gradient to real gradient? ]
  // Only handle the case where the current kernel data type is complex
  if (framework::IsComplexType(kernel_type_->data_type_)) {
    HandleComplexGradToRealGrad(scope, runtime_ctx);
  }

1350 1351 1352 1353 1354 1355 1356 1357
  if (FLAGS_enable_unused_var_check) {
    // skip op that uses mkldnn because it has different memory reuse strategy.
    // use attr here because some GradMakers (like ActivationGradOpMaker) add
    // input when use_mkldnn=true;
    if (!(HasAttr("use_mkldnn") && Attr<bool>("use_mkldnn"))) {
      CheckUnusedVar(*this, scope);
    }
  }
1358

D
dzhwinter 已提交
1359
  /*For profiling/benchmark only*/
D
dzhwinter 已提交
1360
  if (FLAGS_benchmark) {
Y
yuyang18 已提交
1361
    dev_ctx->Wait();
1362 1363
#if defined(PADDLE_WITH_CUDA) || defined(PADLDE_WITH_ROCM)
    PADDLE_ENFORCE_GPU_SUCCESS(platform::GpuGetLastError());
1364 1365
#endif
    VLOG(4) << "Operator(" << Type() << "): context wait and get last error";
D
dzhwinter 已提交
1366
  }
C
chengduoZH 已提交
1367 1368

  if (FLAGS_check_nan_inf) {
W
WangXi 已提交
1369
    framework::details::CheckOpHasNanOrInf(*this, exec_scope, place);
C
chengduoZH 已提交
1370
  }
1371 1372 1373 1374 1375 1376 1377

  // To solve issue #15032, have a discussion with @Luotao for cpu inference,
  // do not cache transfer scope, hence in this case delete transfer scope
  // after run to avoid memory leak
  if (transfer_scope && !run_by_executor_ && !enable_cache_transfer_scope_) {
    scope.DeleteScope(transfer_scope);
  }
Q
Qiao Longfei 已提交
1378
}
X
Xin Pan 已提交
1379

1380 1381 1382
OpKernelType OperatorWithKernel::InnerGetExpectedKernelType(
    const ExecutionContext& ctx) const {
  auto expected_kernel_key = this->GetExpectedKernelType(ctx);
1383 1384 1385
  if (HasAttr("op_device")) {
    if (Attr<std::string>("op_device") == "cpu") {
      expected_kernel_key.place_ = platform::CPUPlace();
1386 1387 1388 1389 1390 1391 1392 1393 1394 1395
    } else if (Attr<std::string>("op_device").find("gpu") !=
               std::string::npos) {
      auto device = Attr<std::string>("op_device");
      size_t pos = device.find(':');
      if (pos != std::string::npos) {
        device = device.substr(0, pos);
        LOG_FIRST_N(WARNING, 1)
            << "Device index is only supported under pipeline parallelism, "
            << "so it will be ignored.";
      }
1396 1397
      // when the Op that only has CPUKernel is assigned to GPU, the CPUKernel
      // will be executed and a warning will be given at the same time.
1398 1399
      expected_kernel_key.place_ = platform::CPUPlace();
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
1400
      if (SupportGPU()) {
1401
        auto& dev_ctx = ctx.device_context();
1402
        expected_kernel_key.place_ = dev_ctx.GetPlace();
1403 1404 1405 1406 1407
      }
#endif
#ifdef PADDLE_WITH_ASCEND_CL
      if (SupportNPU()) {
        auto& dev_ctx = ctx.device_context();
1408
        expected_kernel_key.place_ = dev_ctx.GetPlace();
1409 1410 1411
      }
#endif
      if (platform::is_cpu_place(expected_kernel_key.place_)) {
1412 1413 1414 1415 1416 1417
        LOG_FIRST_N(WARNING, 1)
            << "Op(" << type_
            << ") has no CUDA implementation. It will be assigned to CPUPlace.";
      }
    }
  }
C
cc 已提交
1418 1419
  VLOG(3) << "op type:" << type_
          << ", expected_kernel_key:" << expected_kernel_key;
1420 1421 1422
  return expected_kernel_key;
}

1423
phi::KernelKey OperatorWithKernel::ChoosePhiKernel(
1424
    const ExecutionContext& ctx) const {
1425
  pt_kernel_signature_.reset(
1426
      new KernelSignature(std::move(GetExpectedPhiKernelArgs(ctx))));
1427
  VLOG(6) << *pt_kernel_signature_.get();
1428 1429 1430 1431

  kernel_type_.reset(
      new OpKernelType(std::move(InnerGetExpectedKernelType(ctx))));

Y
YuanRisheng 已提交
1432
  auto pt_kernel_name = pt_kernel_signature_->name;
1433
  auto pt_kernel_key = TransOpKernelTypeToPhiKernelKey(*kernel_type_.get());
1434 1435
  pt_kernel_.reset(new phi::Kernel(phi::KernelFactory::Instance().SelectKernel(
      pt_kernel_name, pt_kernel_key)));
1436 1437

  if (pt_kernel_->IsValid()) {
1438
    VLOG(6) << "Static mode ChoosePhiKernel - kernel name: " << pt_kernel_name
1439 1440 1441
            << " | kernel key: " << pt_kernel_key
            << " | kernel: " << *pt_kernel_;
  } else {
1442
    VLOG(6) << "Static mode ChoosePhiKernel - kernel `" << pt_kernel_name
1443 1444
            << "` not found.";
  }
1445
  return pt_kernel_key;
1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460
}

void OperatorWithKernel::ChooseKernel(const ExecutionContext& ctx) const {
  // check if op[type] has kernel registered.
  auto& all_op_kernels = AllOpKernels();
  auto kernels_iter = all_op_kernels.find(type_);
  PADDLE_ENFORCE_NE(
      kernels_iter, all_op_kernels.end(),
      platform::errors::Unavailable(
          "There are no kernels which are registered in the %s operator.",
          type_));

  OpKernelMap& kernels = kernels_iter->second;

  auto expected_kernel_key = InnerGetExpectedKernelType(ctx);
L
Liu Yiqun 已提交
1461 1462

  auto kernel_iter = kernels.find(expected_kernel_key);
L
Liu-xiandong 已提交
1463

L
Liu Yiqun 已提交
1464 1465 1466 1467 1468 1469 1470 1471 1472
#ifdef PADDLE_WITH_MKLDNN
  // workaround for missing MKLDNN kernel when FLAGS_use_mkldnn env var is set
  if (kernel_iter == kernels.end() &&
      expected_kernel_key.library_type_ == LibraryType::kMKLDNN) {
    VLOG(3) << "missing MKLDNN kernel: fallbacking to PLAIN one";
    expected_kernel_key.library_type_ = LibraryType::kPlain;
    expected_kernel_key.data_layout_ = DataLayout::kAnyLayout;
    kernel_iter = kernels.find(expected_kernel_key);
  }
1473
#endif
1474 1475

#if defined(PADDLE_WITH_XPU) && !defined(PADDLE_WITH_XPU_KP)
1476
  if (platform::is_xpu_place(expected_kernel_key.place_) &&
Q
QingshuChen 已提交
1477 1478 1479
      (kernel_iter == kernels.end() ||
       !paddle::platform::is_xpu_support_op(type_, expected_kernel_key) ||
       paddle::platform::is_in_xpu_black_list(type_))) {
1480 1481 1482 1483 1484 1485
    VLOG(3) << "missing XPU kernel: " << type_
            << ", expected_kernel_key:" << expected_kernel_key
            << ", fallbacking to CPU one!";
    expected_kernel_key.place_ = platform::CPUPlace();
    kernel_iter = kernels.find(expected_kernel_key);
  }
1486
#endif
L
Liu-xiandong 已提交
1487 1488

#ifdef PADDLE_WITH_XPU_KP
1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518
  if (paddle::platform::is_xpu_place(expected_kernel_key.place_)) {
    bool use_xpu_kp_kernel_rt =
        FLAGS_run_kp_kernel &&
        paddle::platform::is_xpu_kp_support_op(type_, expected_kernel_key);
    bool use_xpu_kp_kernel_debug =
        paddle::platform::is_in_xpu_kpwhite_list(type_);
    if (use_xpu_kp_kernel_rt) {
      VLOG(3) << "xpu_kp using rt mode ";
    }
    if (use_xpu_kp_kernel_debug) {
      VLOG(3) << "xpu_kp using debug mode ";
    }
    bool is_xpu_kp_support = (use_xpu_kp_kernel_rt || use_xpu_kp_kernel_debug);
    if (is_xpu_kp_support) {
      expected_kernel_key.library_type_ = LibraryType::kKP;
      kernel_iter = kernels.find(expected_kernel_key);
      VLOG(3) << "using XPU KP kernel: " << type_
              << ", using_kernel_key:" << expected_kernel_key;
    }
    bool is_xpu_unsupport =
        (!paddle::platform::is_xpu_support_op(type_, expected_kernel_key) ||
         paddle::platform::is_in_xpu_black_list(type_));
    if (!is_xpu_kp_support &&
        (kernel_iter == kernels.end() || is_xpu_unsupport)) {
      VLOG(3) << "missing XPU kernel: " << type_
              << ", expected_kernel_key:" << expected_kernel_key
              << ", fallbacking to CPU one!";
      expected_kernel_key.place_ = platform::CPUPlace();
      kernel_iter = kernels.find(expected_kernel_key);
    }
L
Liu-xiandong 已提交
1519 1520 1521
  }
#endif

A
Allen Guo 已提交
1522 1523 1524 1525 1526 1527 1528 1529 1530 1531
#ifdef PADDLE_WITH_IPU
  if (kernel_iter == kernels.end() &&
      platform::is_ipu_place(expected_kernel_key.place_)) {
    VLOG(3) << "missing IPU kernel: " << type_
            << ", expected_kernel_key:" << expected_kernel_key
            << ", fallbacking to CPU one!";
    expected_kernel_key.place_ = platform::CPUPlace();
    kernel_iter = kernels.find(expected_kernel_key);
  }
#endif
1532 1533
#ifdef PADDLE_WITH_ASCEND_CL
  if (kernel_iter == kernels.end() &&
1534
      platform::is_npu_place(expected_kernel_key.place_)) {
1535 1536 1537 1538 1539 1540
    VLOG(3) << "missing NPU kernel: " << type_
            << ", expected_kernel_key:" << expected_kernel_key
            << ", fallbacking to CPU one!";
    expected_kernel_key.place_ = platform::CPUPlace();
    kernel_iter = kernels.find(expected_kernel_key);
  }
F
fwenguang 已提交
1541 1542 1543
#endif
#ifdef PADDLE_WITH_MLU
  if (kernel_iter == kernels.end() &&
1544
      platform::is_mlu_place(expected_kernel_key.place_)) {
F
fwenguang 已提交
1545 1546 1547 1548 1549 1550
    VLOG(3) << "missing MLU kernel: " << type_
            << ", expected_kernel_key:" << expected_kernel_key
            << ", fallbacking to CPU one!";
    expected_kernel_key.place_ = platform::CPUPlace();
    kernel_iter = kernels.find(expected_kernel_key);
  }
L
Liu Yiqun 已提交
1551
#endif
1552 1553 1554 1555
  PADDLE_ENFORCE_NE(kernel_iter, kernels.end(),
                    platform::errors::NotFound(
                        "Operator (%s) does not have kernel for %s.", type_,
                        KernelTypeToString(expected_kernel_key)));
L
Liu Yiqun 已提交
1556

1557 1558 1559 1560 1561
  std::lock_guard<std::mutex> lock(cache_update_mutex_);
  if (kernel_type_.get() == nullptr || kernel_func_.get() == nullptr) {
    kernel_type_.reset(new OpKernelType(expected_kernel_key));
    kernel_func_.reset(new OpKernelFunc(kernel_iter->second));
  }
L
Liu Yiqun 已提交
1562 1563
}

Y
yuyang18 已提交
1564 1565 1566 1567
void OperatorWithKernel::TransferInplaceVarsBack(
    const Scope& scope, const std::vector<std::string>& inplace_vars,
    const Scope& transfer_scope) const {
  for (auto& var_name : inplace_vars) {
M
minqiyang 已提交
1568
    VLOG(3) << "share inplace var " + var_name + " back to it's original scope";
C
chengduo 已提交
1569
    auto* origin_var = scope.FindVar(var_name);
1570 1571 1572
    PADDLE_ENFORCE_NOT_NULL(origin_var,
                            platform::errors::InvalidArgument(
                                "The variable[%s] is nullptr.", var_name));
C
chengduo 已提交
1573
    auto* original_tensor =
C
chengduo 已提交
1574
        GetMutableLoDTensorOrSelectedRowsValueFromVar(origin_var);
C
chengduo 已提交
1575
    auto* var = transfer_scope.FindVar(var_name);
1576 1577
    PADDLE_ENFORCE_NOT_NULL(var, platform::errors::InvalidArgument(
                                     "The variable[%s] is nullptr.", var_name));
C
chengduo 已提交
1578
    auto* transformed_tensor = GetLoDTensorOrSelectedRowsValueFromVar(*var);
1579
    auto original_dims = original_tensor->dims();
Y
yuyang18 已提交
1580
    original_tensor->ShareDataWith(*transformed_tensor);
B
Baibaifan 已提交
1581 1582 1583 1584 1585
    // In order to solve the problem that the output latitude of NPU reshape
    // operator is not changed when inplace.
    if (type_ != "reshape2" && type_ != "reshape2_grad") {
      original_tensor->Resize(original_dims);
    }
Y
yuyang18 已提交
1586 1587 1588
  }
}

1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617
void OperatorWithKernel::HandleComplexGradToRealGrad(
    const Scope& scope, RuntimeContext* ctx) const {
  for (auto& var_name_item : Outputs()) {
    std::vector<Variable*>& output_vars = ctx->outputs[var_name_item.first];
    for (size_t i = 0; i < var_name_item.second.size(); ++i) {
      // 1. find grad_var & check whether is complex tensor
      auto var_name = var_name_item.second[i];
      auto orig_var_name = GradOriginalVarName(var_name);
      // only focus on gradient var
      if (var_name == orig_var_name) {
        continue;
      }
      auto* grad_var = output_vars[i];
      // skip nullptr var
      if (grad_var == nullptr) {
        continue;
      }
      // don't process LoDTensorArray temporarily,
      // add support if necessary for complex number calculations in the future
      if (!VarIsTensor(*grad_var)) {
        continue;
      }
      auto* grad_tensor =
          GetMutableLoDTensorOrSelectedRowsValueFromVar(grad_var);
      // skip nullptr tensor
      if (grad_tensor == nullptr || !grad_tensor->IsInitialized()) {
        continue;
      }
      // only focus on complex dtype now
1618
      auto src_type = framework::TransToProtoVarType(grad_tensor->dtype());
1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637
      if (!IsComplexType(src_type)) {
        continue;
      }

      // 2. find forward var & check whether need to cast
      auto* var = scope.FindVar(orig_var_name);
      // if forward var not exists, do nothing
      if (var == nullptr) {
        continue;
      }
      if (!VarIsTensor(*var)) {
        continue;
      }
      const auto* tensor = GetLoDTensorOrSelectedRowsValueFromVar(*var);
      PADDLE_ENFORCE_NOT_NULL(
          tensor,
          platform::errors::Unavailable(
              "Forward tensor is nullptr when handle complex data to real."));
      // only need record type, the allocation may have been released
1638
      auto dst_type = framework::TransToProtoVarType(tensor->dtype());
1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655
      // only focus on real dtype and need casting
      if (IsComplexType(dst_type)) {
        continue;
      }

      // 3. cast complex grad to real grad
      VLOG(6) << "Transform " << framework::DataTypeToString(src_type)
              << " var `" << var_name << "` to "
              << framework::DataTypeToString(dst_type)
              << " real var in static graph.";
      Tensor out;
      TransComplexToReal(dst_type, src_type, *grad_tensor, &out);
      SetTensorToVariable(*grad_var, out, grad_var);
    }
  }
}

X
Xin Pan 已提交
1656
Scope* OperatorWithKernel::PrepareData(
Y
yuyang18 已提交
1657
    const Scope& scope, const OpKernelType& expected_kernel_key,
X
Xin Pan 已提交
1658 1659
    std::vector<std::string>* transfered_inplace_vars,
    RuntimeContext* ctx) const {
Y
yuyang18 已提交
1660
  Scope* new_scope = nullptr;
S
sneaxiy 已提交
1661

1662
  const std::unordered_set<std::string>* no_buffer_ins = nullptr;
S
sneaxiy 已提交
1663 1664 1665 1666
  if (info_) {
    auto& no_buffer_inferer = info_->NoNeedBufferVarsInferer();
    // Some op may not register NoNeedBufferVarsInferer
    if (no_buffer_inferer) {
1667 1668
      no_buffer_ins = &(no_buffer_inferer(Inputs(), Outputs(), Attrs()));
      if (no_buffer_ins->empty()) no_buffer_ins = nullptr;
S
sneaxiy 已提交
1669 1670 1671
    }
  }

Y
yuyang18 已提交
1672
  for (auto& var_name_item : Inputs()) {
1673 1674
    bool should_skip_input =
        no_buffer_ins && no_buffer_ins->count(var_name_item.first) > 0;
S
sneaxiy 已提交
1675

X
Xin Pan 已提交
1676 1677 1678 1679
    std::vector<Variable*>& input_vars = ctx->inputs[var_name_item.first];

    for (size_t i = 0; i < var_name_item.second.size(); ++i) {
      auto& var_name = var_name_item.second[i];
X
Xin Pan 已提交
1680
      auto* var = input_vars[i];
X
Xin Pan 已提交
1681

Y
yuyang18 已提交
1682
      // Only tensor can be tranfer to another device.
C
chengduo 已提交
1683
      if (var == nullptr || !VarIsTensor(*var)) {
Y
yuyang18 已提交
1684 1685 1686
        continue;
      }

C
chengduo 已提交
1687
      auto* tensor_in = GetLoDTensorOrSelectedRowsValueFromVar(*var);
1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702

      // When no_buffer_ins then checking of Tensor::holder_ is
      // not a thread safe. And for infershape scenario checks
      // to be omitted are not really needed
      if (should_skip_input == true) {
#ifdef PADDLE_WITH_MKLDNN
        // Var without buffer may be needed
        // for some situation like InferShape().
        // In this situation We cannot skip Var analysis, as
        // MKL-DNN shape of Var may differ from kNHWC Var
        // In such situation corressponding resized Var
        // has to be created and registered
        if ((tensor_in->layout() == DataLayout::kMKLDNN) &&
            (var->IsType<LoDTensor>() == true) &&
            (expected_kernel_key.data_layout_ != DataLayout::kMKLDNN) &&
1703 1704
            (paddle::platform::MKLDNNDeviceContext::tls()
                 .get_cur_paddle_data_layout() == DataLayout::kNHWC)) {
1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725
          // Mixed execution : MKL-DNN and GPU is not supported!
          if (!new_scope) {
            new_scope = &scope.NewScope();
          }
          auto* trans_var = new_scope->Var(var_name);
          input_vars[i] = trans_var;
          auto out = trans_var->GetMutable<LoDTensor>();
          out->Resize(tensor_in->dims());
          platform::MatchShapeToLayout(out, tensor_in->layout(),
                                       DataLayout::kNHWC);
          VLOG(7) << "Created reshaped dummy input based on MKL-DNN Tensor , "
                     "but kNHWC layout"
                  << var_name_item.first << " in Operator " << type_;
        } else {
          VLOG(7) << "Skip scanning input " << var_name_item.first
                  << " in Operator " << type_;
        }
#endif
        continue;
      }

Y
yuyang18 已提交
1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736
      if (!tensor_in->IsInitialized()) {
        continue;
      }

      auto kernel_type_for_var = GetKernelTypeForVar(
          var_name_item.first, *tensor_in, expected_kernel_key);

      if (!NeedTransform(kernel_type_for_var, expected_kernel_key)) {
        continue;
      }

M
minqiyang 已提交
1737 1738
      VLOG(3) << "Transform Variable " << var_name << " from "
              << kernel_type_for_var << " to " << expected_kernel_key;
Y
yuyang18 已提交
1739

1740 1741 1742
      // In the inference scenerio, the scopes will be reused across the
      // batches, so the `new_scope` here will result in GPU memroy explosion
      // over the  running of operators.
1743
      // We use a thread_local cache to fix that issue, the key in the cache is
1744 1745 1746 1747 1748
      // the combination of the `scope` argument, from_kernel_type,
      // target_kernel_type.
      // Have a discussion with @Superjomn or the inference developers if some
      // changes on this logic for this macro might not tested on the other
      // scenerios.
1749 1750
      // If this op is not called by an Executor or ParallelExecutor, it should
      // called by a NaiveExecutor, the NaiveExecutor will cache the scopes and
1751
      // variables, that behavior a lot different.
1752 1753 1754 1755 1756 1757 1758 1759 1760
      //
      // To solve issue #15032, have a discussion with @Luotao for cpu
      // inference, for all cpu kernels cases without GPU participation, here
      // not do transfer scope caching, and cpu inference performance is not
      // impacted by test.
      enable_cache_transfer_scope_ = false;
      if (!run_by_executor_ &&
          (platform::is_gpu_place(kernel_type_for_var.place_) ||
           platform::is_gpu_place(expected_kernel_key.place_))) {
1761 1762
        new_scope = TryCreateTransferScope(kernel_type_for_var,
                                           expected_kernel_key, &scope);
1763
        enable_cache_transfer_scope_ = true;
1764
      }
1765
      if (!new_scope) {
Y
yuyang18 已提交
1766 1767
        new_scope = &scope.NewScope();
      }
1768 1769 1770 1771
      // For inference, if a gpu model has an op which could only run on CPU,
      // each result of different input will be the same with the first one.
      // The reason is that if a gpu tensor is the input of a cpu kernel,
      // we will create a new cpu tensor in new scope.
1772
      // However, if enable_cache_runtime_context_, we get the cpu tensor each
1773 1774
      // time, not the gpu tensor. Thus, we set pre_scope_ = nullptr
      // to trigger `new RuntimeContext()` in RunImpl().
1775
      if (enable_cache_runtime_context_) {
1776 1777
        pre_scope_ = nullptr;
      }
L
Leo Chen 已提交
1778 1779

      // Create new var with the same name in transfer scopes
Y
yuyang18 已提交
1780
      auto* trans_var = new_scope->Var(var_name);
X
fix  
Xin Pan 已提交
1781
      input_vars[i] = trans_var;
L
Leo Chen 已提交
1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798

      // Find if inplace exists between input and output
      // If inplace exists, set the new created var to inplaced output, and
      // record its name in transfered_inplace_vars.
      for (auto& pair : Outputs()) {
        for (size_t j = 0; j < pair.second.size(); ++j) {
          if (pair.second[j] == var_name) {
            VLOG(4) << "Found inplace between input(" << var_name_item.first
                    << ") and output(" << pair.first
                    << "), the variable name is " << var_name;
            ctx->outputs[pair.first][j] = trans_var;
            transfered_inplace_vars->emplace_back(var_name);
          }
        }
      }

      // Do transfer
Y
yuyang18 已提交
1799
      Tensor out;
Y
yuyang18 已提交
1800
      TransformData(expected_kernel_key, kernel_type_for_var, *tensor_in, &out);
Y
yuyang18 已提交
1801 1802 1803
      SetTensorToVariable(*var, out, trans_var);
    }
  }
L
Leo Chen 已提交
1804

1805 1806 1807 1808 1809 1810
  // If pre_scope = &scope, it means that scope is cached and the op is not in
  // while block. If new_scope = nullptr, it means that for each input of this
  // Op, there is no need to do PrepareData. So PrepareData could be skipped at
  // the rest iterations to save the elapsed time.
  // We do not support skipping PrepareData in while block, because the Op's
  // input may be changed by subsequent Ops, which may cause an error.
W
wenbin 已提交
1811 1812 1813 1814 1815 1816

  // For inference, ops that behind conditional branch aren't supported well,
  // so disable prepare optimization conservatively.
  bool force_prepare_data = HasAttr("inference_force_prepare_data") &&
                            Attr<bool>("inference_force_prepare_data");
  if (pre_scope_ == &scope && new_scope == nullptr && !force_prepare_data) {
1817 1818
    need_prepare_data_ = false;
  }
Y
yuyang18 已提交
1819 1820 1821

  return new_scope;
}
Q
Qiao Longfei 已提交
1822

1823
void OperatorWithKernel::ParseInputDataType(
1824
    const std::vector<Variable*>& vars, const std::string& name,
1825
    proto::VarType::Type* data_type) const {
1826
  proto::VarType::Type default_data_type =
1827 1828 1829 1830 1831 1832 1833 1834 1835
      static_cast<proto::VarType::Type>(-1);
  for (size_t i = 0; i < vars.size(); ++i) {
    const Variable* var = vars[i];
    if (var != nullptr) {
      const Tensor* t = nullptr;
      if (var->IsType<Tensor>()) {
        t = &var->Get<Tensor>();
      } else if (var->IsType<LoDTensor>()) {
        t = &var->Get<LoDTensor>();
1836 1837
      } else if (var->IsType<phi::SelectedRows>()) {
        t = &(var->Get<phi::SelectedRows>().value());
1838
      } else if (var->IsType<LoDTensorArray>()) {
1839 1840 1841 1842
        auto t_arr = &var->Get<LoDTensorArray>();
        for (size_t j = 0; j < t_arr->size(); j++) {
          if (t_arr->at(j).IsInitialized()) {
            t = &(t_arr->at(j));
1843 1844
          }
        }
1845 1846
      }
      if (t != nullptr) {
1847 1848
        PADDLE_ENFORCE_EQ(
            t->IsInitialized(), true,
1849 1850 1851
            platform::errors::InvalidArgument("The %s Op's Input Variable `%s` "
                                              "contains uninitialized Tensor.",
                                              Type(), name));
1852 1853
        proto::VarType::Type tmp =
            paddle::framework::TransToProtoVarType(t->dtype());
1854 1855 1856 1857 1858 1859 1860 1861 1862
        PADDLE_ENFORCE(tmp == *data_type || *data_type == default_data_type,
                       platform::errors::InvalidArgument(
                           "The DataType of %s Op's duplicable or different "
                           "slot Variable %s must be "
                           "consistent or reigster GetExpectedKernelType. The "
                           "current variable type is (%s), but the "
                           "previous variable type is (%s).",
                           Type(), name, DataTypeToString(tmp),
                           DataTypeToString(*data_type)));
1863 1864 1865 1866 1867 1868
        *data_type = tmp;
      }
    }
  }
}

1869
proto::VarType::Type OperatorWithKernel::IndicateDataType(
Y
Yu Yang 已提交
1870
    const ExecutionContext& ctx) const {
1871 1872 1873
  proto::VarType::Type dafault_data_type =
      static_cast<proto::VarType::Type>(-1);
  proto::VarType::Type data_type = dafault_data_type;
H
hong 已提交
1874
  for (auto& input : ctx.InNameList()) {
1875 1876
    const std::vector<Variable*> vars = ctx.MultiInputVar(input);
    ParseInputDataType(vars, input, &data_type);
Y
Yu Yang 已提交
1877
  }
1878 1879 1880 1881
  PADDLE_ENFORCE_NE(
      data_type, dafault_data_type,
      platform::errors::NotFound(
          "DataType should be indicated by input Variable at %s.", Type()));
1882 1883 1884 1885 1886 1887 1888 1889
  return data_type;
}

proto::VarType::Type OperatorWithKernel::IndicateVarDataType(
    const ExecutionContext& ctx, const std::string& name) const {
  proto::VarType::Type dafault_data_type =
      static_cast<proto::VarType::Type>(-1);
  proto::VarType::Type data_type = dafault_data_type;
1890
  ParseInputDataType(ctx.MultiInputVar(name), name, &data_type);
1891 1892
  PADDLE_ENFORCE_NE(
      data_type, dafault_data_type,
1893 1894 1895 1896 1897
      platform::errors::InvalidArgument(
          "The Input Variable(%s) of (%s) Operator used to determine kernel "
          "data type is empty or not LoDTensor or SelectedRows or "
          "LoDTensorArray.",
          name, Type()));
1898
  return data_type;
Y
Yu Yang 已提交
1899
}
1900

1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918
Tensor* OperatorWithKernel::GetTensorFormInputSafely(
    const ExecutionContext& ctx, const std::string& name) const {
  // 1. get variable and check
  // NOTE: only supports signal input var now
  // NOTE: using const_cast is because we don't have method
  // can get single mutable var, and here will not change
  // the var's data, only use some attribute
  Variable* var = const_cast<Variable*>(ctx.InputVar(name));
  PADDLE_ENFORCE_NOT_NULL(
      var,
      platform::errors::NotFound(
          "The variable %s is not found when promote complex types.", name));
  // 2. get tensor and check
  Tensor* t = nullptr;
  if (var->IsType<Tensor>()) {
    t = var->GetMutable<Tensor>();
  } else if (var->IsType<LoDTensor>()) {
    t = var->GetMutable<LoDTensor>();
1919 1920
  } else if (var->IsType<phi::SelectedRows>()) {
    t = var->GetMutable<phi::SelectedRows>()->mutable_value();
1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951
  } else {
    PADDLE_THROW(platform::errors::Unimplemented(
        "Unsupported input variable type in complex type promotion."));
  }
  PADDLE_ENFORCE_NOT_NULL(
      t,
      platform::errors::InvalidArgument(
          "The Tensor of variable %s is nullptr when promote complex types."));
  PADDLE_ENFORCE_EQ(t->IsInitialized(), true,
                    platform::errors::InvalidArgument(
                        "The Tensor in the %s Op's Input Variable %s(%s) is "
                        "not initialized.",
                        Type(), name, ctx.InputName(name)));
  return t;
}

/** NOTE(chenweihang): For safety reasons, we now only
 * perform type promotes for binary operations with
 * complex type inputs, which is used to support the
 * paddle quantum function.
 * In other cases, the first input data type is used as
 * the kernel data type.
 */
proto::VarType::Type OperatorWithKernel::IndicateOrPromoteVarDataTypes(
    const ExecutionContext& ctx, const std::string& name1,
    const std::string& name2) const {
  // 1. Get tensor
  auto* tensor_a = GetTensorFormInputSafely(ctx, name1);
  auto* tensor_b = GetTensorFormInputSafely(ctx, name2);

  // 2. Get two input types
1952 1953
  auto type_a = framework::TransToProtoVarType(tensor_a->dtype());
  auto type_b = framework::TransToProtoVarType(tensor_b->dtype());
1954 1955 1956 1957 1958 1959 1960

  // 3. Get first input type or promote complex types
  auto target_type = PromoteTypesIfComplexExists(type_a, type_b);

  return target_type;
}

1961 1962 1963 1964 1965 1966 1967 1968
OpKernelType OperatorWithKernel::GetExpectedKernelType(
    const ExecutionContext& ctx) const {
  return OpKernelType(IndicateDataType(ctx), ctx.GetPlace());
}

OpKernelType OperatorWithKernel::GetKernelTypeForVar(
    const std::string& var_name, const Tensor& tensor,
    const OpKernelType& expected_kernel_type) const {
M
mozga-intel 已提交
1969 1970
  return OpKernelType(expected_kernel_type.data_type_, tensor.place(),
                      tensor.layout());
1971 1972
}

1973
KernelSignature OperatorWithKernel::GetExpectedPhiKernelArgs(
1974
    const ExecutionContext& ctx) const {
1975 1976
  InitDefaultKernelSignatureMap();
  ExecutionArgumentMappingContext arg_mapping_ctx(ctx);
1977
  return phi::OpUtilsMap::Instance().GetArgumentMappingFn(Type())(
1978
      arg_mapping_ctx);
1979 1980
}

1981
Scope* OperatorWithKernel::PreparePhiData(
1982
    const Scope& scope, const phi::Kernel& pt_kernel,
1983 1984 1985 1986 1987 1988 1989 1990 1991
    const KernelSignature& pt_kernel_signature, RuntimeContext* ctx) const {
  auto& input_names = std::get<0>(pt_kernel_signature.args);
  auto input_defs = pt_kernel.args_def().input_defs();
  PADDLE_ENFORCE_EQ(input_names.size(), input_defs.size(),
                    platform::errors::InvalidArgument(
                        "The size of inputs_args names (%d) must be equal to "
                        "the size of kernel input_defs (%d).",
                        input_names.size(), input_defs.size()));
  Scope* new_scope = nullptr;
1992
  auto& name_map = Inputs();
Y
YuanRisheng 已提交
1993 1994 1995 1996 1997 1998 1999 2000 2001 2002
  const std::unordered_set<std::string>* no_buffer_ins = nullptr;
  if (info_) {
    auto& no_buffer_inferer = info_->NoNeedBufferVarsInferer();
    // Some op may not register NoNeedBufferVarsInferer
    if (no_buffer_inferer) {
      no_buffer_ins = &(no_buffer_inferer(Inputs(), Outputs(), Attrs()));
      if (no_buffer_ins->empty()) no_buffer_ins = nullptr;
    }
  }

2003 2004
  for (size_t i = 0; i < input_defs.size(); ++i) {
    auto& in_def = input_defs.at(i);
2005
    if (ctx->inputs.find(input_names[i]) == ctx->inputs.end()) {
H
hong 已提交
2006 2007
      continue;
    }
2008
    auto& ins_vector = ctx->inputs.at(input_names[i]);
2009
    auto& name_vec = name_map.at(input_names[i]);
Y
YuanRisheng 已提交
2010 2011 2012
    bool should_skip_input =
        no_buffer_ins && no_buffer_ins->count(input_names[i]) > 0;

2013 2014 2015 2016 2017 2018 2019
    for (size_t offset = 0; offset < ins_vector.size(); ++offset) {
      // Only tensor can be tranfer to another device.
      auto* var = ins_vector[offset];
      if (var == nullptr || !VarIsTensor(*var)) {
        continue;
      }
      auto* tensor_in = GetLoDTensorOrSelectedRowsValueFromVar(*var);
Y
YuanRisheng 已提交
2020 2021 2022 2023 2024 2025 2026 2027 2028

      // When no_buffer_ins then checking of Tensor::holder_ is
      // not a thread safe. And for infershape scenario checks
      // to be omitted are not really needed
      if (should_skip_input == true) {
        // TODO(YuanRisheng) : There need to supplement MKLDNN code later
        continue;
      }

2029 2030 2031 2032
      if (!tensor_in->IsInitialized()) {
        continue;
      }

2033 2034 2035
      if (in_def.backend == phi::Backend::ALL_BACKEND) {
        continue;
      }
2036
      auto expected_place = phi::TransToPhiPlace(in_def.backend);
2037 2038 2039 2040
      if (platform::is_same_place(tensor_in->place(), expected_place)) {
        continue;
      }

2041
      VLOG(3) << "phi Transform Variable " << input_names[i] << " from "
2042
              << tensor_in->place() << " to " << expected_place;
2043

2044 2045 2046
      if (!new_scope) {
        new_scope = &scope.NewScope();
      }
2047

2048
      // Create new var with the same name in transfer scopes
2049
      auto* trans_var = new_scope->Var(name_vec[offset]);
2050
      ins_vector[offset] = trans_var;
2051

2052 2053 2054 2055
      // Do transfer
      Tensor out;
      framework::TensorCopySync(*tensor_in, expected_place, &out);
      SetTensorToVariable(*var, out, trans_var);
2056 2057 2058 2059 2060 2061
    }
  }

  return new_scope;
}

2062
void OperatorWithKernel::BuildPhiKernelContext(
2063
    const RuntimeContext& ctx, platform::DeviceContext* dev_ctx,
2064
    phi::KernelContext* pt_kernel_context) const {
2065
  pt_kernel_context->SetDeviceContext(dev_ctx);
2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093

  auto& input_names = std::get<0>(pt_kernel_signature_->args);
  auto& attr_names = std::get<1>(pt_kernel_signature_->args);
  auto& output_names = std::get<2>(pt_kernel_signature_->args);

  auto input_defs = pt_kernel_->args_def().input_defs();
  auto attr_defs = pt_kernel_->args_def().attribute_defs();
  auto output_defs = pt_kernel_->args_def().output_defs();

  PADDLE_ENFORCE_EQ(input_names.size(), input_defs.size(),
                    platform::errors::InvalidArgument(
                        "The size of inputs_args names (%d) must be equal to "
                        "the size of kernel input_defs (%d).",
                        input_names.size(), input_defs.size()));

  PADDLE_ENFORCE_EQ(output_names.size(), output_defs.size(),
                    platform::errors::InvalidArgument(
                        "The size of outputs_args names (%d) must be equal to "
                        "the size of kernel output_defs (%d).",
                        output_names.size(), output_defs.size()));

  PADDLE_ENFORCE_EQ(attr_names.size(), attr_defs.size(),
                    platform::errors::InvalidArgument(
                        "The size of attribute_args names (%d) must be equal "
                        "to the size of kernel attribute_defs (%d).",
                        attr_names.size(), attr_defs.size()));

  for (size_t i = 0; i < input_names.size(); ++i) {
H
hong 已提交
2094
    auto it = ctx.inputs.find(input_names[i]);
2095 2096 2097

    // calcute the start and end index of the input tensors
    size_t start_idx =
2098
        (i == 0 ? 0 : pt_kernel_context->InputRangeAt(i - 1).second);
2099

H
hong 已提交
2100
    // deal with optional here
2101
    if ((it == ctx.inputs.end() || it->second.size() == 0) &&
H
hong 已提交
2102
        (input_defs[i].type_index ==
H
hong 已提交
2103 2104 2105 2106 2107
             std::type_index(
                 typeid(paddle::optional<const phi::DenseTensor&>)) ||
         input_defs[i].type_index ==
             std::type_index(
                 typeid(paddle::optional<const phi::SelectedRows&>)))) {
H
hong 已提交
2108 2109 2110 2111 2112 2113 2114 2115
      pt_kernel_context->EmplaceBackInputWithoutSetRange(nullptr);
      auto end_idx = start_idx + 1;
      pt_kernel_context->AssignInputRange(std::make_pair(start_idx, end_idx),
                                          i);
      continue;
    }
    auto ins_vector = it->second;
    size_t end_idx = start_idx + ins_vector.size();
2116
    for (size_t offset = 0; offset < ins_vector.size(); ++offset) {
2117
      const phi::TensorBase* tensor_in = nullptr;
2118
      auto* var = ins_vector[offset];
H
hong 已提交
2119 2120
      if (var->IsType<framework::LoDTensor>()) {
        tensor_in = &(var->Get<framework::LoDTensor>());
2121
        pt_kernel_context->EmplaceBackInputWithoutSetRange(tensor_in);
2122 2123
      } else if (var->IsType<phi::SelectedRows>()) {
        tensor_in = &(var->Get<phi::SelectedRows>());
2124 2125 2126 2127 2128 2129 2130 2131 2132
        pt_kernel_context->EmplaceBackInputWithoutSetRange(tensor_in);
      } else if (var->IsType<framework::LoDTensorArray>()) {
        paddle::SmallVector<const phi::TensorBase*> tensor_vector;
        auto& tensor_array = var->Get<framework::LoDTensorArray>();
        for (auto& t : tensor_array) {
          tensor_vector.emplace_back(&t);
        }
        pt_kernel_context->EmplaceBackInputsWithoutSetRange(tensor_vector);
        end_idx += tensor_array.size() - 1;
2133 2134 2135 2136
      } else {
        PADDLE_THROW(platform::errors::Unimplemented(
            "Unsupported input `%s` type when call pt kernel.",
            framework::ToTypeName(var->Type())));
2137
      }
2138
    }
2139
    // Note: here cannot deal with vector<LoDTensorArray> input
2140
    pt_kernel_context->AssignInputRange(std::make_pair(start_idx, end_idx), i);
2141
  }
2142
  VLOG(4) << "Done inputs";
2143 2144

  for (size_t i = 0; i < output_names.size(); ++i) {
H
hong 已提交
2145
    auto it = ctx.outputs.find(output_names[i]);
2146
    size_t start_idx =
2147
        (i == 0 ? 0 : pt_kernel_context->OutputRangeAt(i - 1).second);
H
hong 已提交
2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161

    if (it == ctx.outputs.end() || it->second.empty()) {
      // Deal with the case that some outputs are not found or be NULL when run
      // the kernel.
      // For example : the outputs of matmul_grad are dx and dy,
      // sometimes dx or dy may be NULL.
      pt_kernel_context->EmplaceBackOutputWithoutSetRange(nullptr);
      auto end_idx = start_idx + 1;
      pt_kernel_context->AssignOutputRange(std::make_pair(start_idx, end_idx),
                                           i);
      continue;
    }
    auto& outs_vector = it->second;

2162
    size_t end_idx = start_idx + outs_vector.size();
2163 2164

    for (size_t offset = 0; offset < outs_vector.size(); ++offset) {
2165
      phi::TensorBase* tensor_out = nullptr;
2166
      auto* var = outs_vector[offset];
2167 2168 2169
      if (var) {
        if (var->template IsType<framework::LoDTensor>()) {
          tensor_out = var->template GetMutable<framework::LoDTensor>();
2170
          pt_kernel_context->EmplaceBackOutputWithoutSetRange(tensor_out);
2171 2172
        } else if (var->template IsType<phi::SelectedRows>()) {
          tensor_out = var->template GetMutable<phi::SelectedRows>();
2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184
          pt_kernel_context->EmplaceBackOutputWithoutSetRange(tensor_out);
        } else if (var->template IsType<framework::LoDTensorArray>()) {
          paddle::SmallVector<phi::TensorBase*> tensor_vector;
          auto* tensor_array =
              var->template GetMutable<framework::LoDTensorArray>();
          // Note: If the input LoDTensorArray size is 0, the output
          // LoDTensorArray is also 0
          for (auto& t : *tensor_array) {
            tensor_vector.emplace_back(&t);
          }
          pt_kernel_context->EmplaceBackOutputsWithoutSetRange(tensor_vector);
          end_idx += tensor_array->size() - 1;
2185 2186 2187 2188 2189
        } else {
          PADDLE_THROW(platform::errors::Unimplemented(
              "Unsupported output `%s` type when call pt kernel.",
              framework::ToTypeName(var->Type())));
        }
2190 2191
      } else {
        pt_kernel_context->EmplaceBackOutputWithoutSetRange(tensor_out);
2192
      }
2193
    }
2194
    pt_kernel_context->AssignOutputRange(std::make_pair(start_idx, end_idx), i);
2195
  }
2196
  VLOG(4) << "Done outputs";
2197 2198

  for (size_t i = 0; i < attr_names.size(); ++i) {
2199
    if (attr_defs[i].type_index == std::type_index(typeid(phi::ScalarArray))) {
2200 2201 2202 2203
      auto attr_iter = Attrs().find(attr_names[i]);
      if (attr_iter != Attrs().end()) {  // shape is in the attribute
        if (std::type_index(attr_iter->second.type()) ==
            std::type_index(typeid(std::vector<int64_t>))) {
2204
          pt_kernel_context->EmplaceBackAttr(std::move(phi::ScalarArray(
2205
              BOOST_GET_CONST(std::vector<int64_t>, attr_iter->second))));
2206 2207
        } else if (std::type_index(attr_iter->second.type()) ==
                   std::type_index(typeid(std::vector<int32_t>))) {
2208
          pt_kernel_context->EmplaceBackAttr(std::move(phi::ScalarArray(
2209
              BOOST_GET_CONST(std::vector<int32_t>, attr_iter->second))));
C
chentianyu03 已提交
2210 2211
        } else if (std::type_index(attr_iter->second.type()) ==
                   std::type_index(typeid(int32_t))) {
2212
          pt_kernel_context->EmplaceBackAttr(std::move(phi::ScalarArray(
C
chentianyu03 已提交
2213
              &BOOST_GET_CONST(int32_t, attr_iter->second), 1)));
2214 2215 2216 2217 2218 2219 2220 2221 2222
        } else {
          PADDLE_THROW(platform::errors::Unimplemented(
              "Unsupported cast op attribute `%s` to ScalarArray when "
              "construct KernelContext.",
              attr_names[i]));
        }
      } else {  // shape is in the input
        auto& ins_vector = ctx.inputs.at(attr_names[i]);
        if (ins_vector.size() == 1) {  // ShapeTensor
2223
          pt_kernel_context->EmplaceBackAttr(std::move(
2224
              experimental::MakePhiScalarArrayFromVar(*ins_vector.front())));
2225
        } else {  // ShapeTensorList
2226
          pt_kernel_context->EmplaceBackAttr(std::move(
2227
              experimental::MakePhiScalarArrayFromVarList(ins_vector)));
2228 2229 2230
        }
      }
    } else if (attr_defs[i].type_index ==
2231
               std::type_index(typeid(phi::Scalar))) {
2232 2233 2234
      // TODO(chenweihang): support other attrs later
      // TODO(zhangyunfei): Scalar should hold scaler type, and we should check
      // attribtue type by attr_defs
2235 2236 2237 2238
      auto attr_iter = Attrs().find(attr_names[i]);
      if (attr_iter != Attrs().end()) {  // scalar is in the attribute
        auto& attr = Attrs().at(attr_names[i]);
        if (std::type_index(attr.type()) == std::type_index(typeid(float))) {
2239
          pt_kernel_context->EmplaceBackAttr(
2240
              std::move(phi::Scalar(BOOST_GET_CONST(float, attr))));
2241 2242
        } else if (std::type_index(attr.type()) ==
                   std::type_index(typeid(std::string))) {
2243
          pt_kernel_context->EmplaceBackAttr(
2244
              std::move(phi::Scalar(BOOST_GET_CONST(std::string, attr))));
2245 2246 2247
        } else if (std::type_index(attr.type()) ==
                   std::type_index(typeid(int))) {
          pt_kernel_context->EmplaceBackAttr(
2248
              std::move(phi::Scalar(BOOST_GET_CONST(int, attr))));
2249 2250 2251 2252 2253 2254
        } else {
          PADDLE_THROW(platform::errors::Unimplemented(
              "Unsupported cast op attribute `%s` to Scalar when construct "
              "KernelContext.",
              attr_names[i]));
        }
2255
      } else {
2256
        auto& ins_vector = ctx.inputs.at(attr_names[i]);
2257 2258
        pt_kernel_context->EmplaceBackAttr(
            std::move(experimental::MakePhiScalarFromVar(*ins_vector.front())));
2259
      }
2260

2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305
    } else if (attr_defs[i].type_index ==
               std::type_index(typeid(std::vector<phi::Scalar>))) {
      auto& attr = Attrs().at(attr_names[i]);
      if (std::type_index(attr.type()) ==
          std::type_index(typeid(std::vector<int32_t>))) {
        const auto& vec = BOOST_GET_CONST(std::vector<int32_t>, attr);
        std::vector<phi::Scalar> scalar_list;
        scalar_list.reserve(vec.size());
        for (const auto& val : vec) {
          scalar_list.emplace_back(val);
        }
        pt_kernel_context->EmplaceBackAttr(std::move(scalar_list));
      } else if (std::type_index(attr.type()) ==
                 std::type_index(typeid(std::vector<int64_t>))) {
        const auto& vec = BOOST_GET_CONST(std::vector<int64_t>, attr);
        std::vector<phi::Scalar> scalar_list;
        scalar_list.reserve(vec.size());
        for (const auto& val : vec) {
          scalar_list.emplace_back(val);
        }
        pt_kernel_context->EmplaceBackAttr(std::move(scalar_list));
      } else if (std::type_index(attr.type()) ==
                 std::type_index(typeid(std::vector<float>))) {
        const auto& vec = BOOST_GET_CONST(std::vector<float>, attr);
        std::vector<phi::Scalar> scalar_list;
        scalar_list.reserve(vec.size());
        for (const auto& val : vec) {
          scalar_list.emplace_back(val);
        }
        pt_kernel_context->EmplaceBackAttr(std::move(scalar_list));
      } else if (std::type_index(attr.type()) ==
                 std::type_index(typeid(std::vector<double>))) {
        const auto& vec = BOOST_GET_CONST(std::vector<double>, attr);
        std::vector<phi::Scalar> scalar_list;
        scalar_list.reserve(vec.size());
        for (const auto& val : vec) {
          scalar_list.emplace_back(val);
        }
        pt_kernel_context->EmplaceBackAttr(std::move(scalar_list));
      } else {
        PADDLE_THROW(platform::errors::Unimplemented(
            "Unsupported cast op attribute `%s` to vector<Scalar> when "
            "construct KernelContext.",
            attr_names[i]));
      }
2306 2307
    } else {
      // TODO(chenweihang): support other attrs later
H
hong 已提交
2308
      auto attr_it = attrs_.find(attr_names[i]);
2309
      if (attr_defs[i].type_index == std::type_index(typeid(int))) {
H
hong 已提交
2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325
        if (attr_it == attrs_.end()) {
          auto in_it = ctx.inputs.find(attr_names[i]);
          if (in_it != ctx.inputs.end()) {
            // get data from input
            auto val = experimental::MakePhiScalarFromVar(*(in_it->second[0]));
            int32_t val_int = val.template to<int32_t>();
            pt_kernel_context->EmplaceBackAttr(val_int);
          } else {
            PADDLE_THROW(platform::errors::NotFound(
                "can not find attribute `%s` both in attribute and input ",
                attr_names[i]));
          }
        } else {
          pt_kernel_context->EmplaceBackAttr(
              BOOST_GET_CONST(int, attr_it->second));
        }
2326
      } else if (attr_defs[i].type_index == std::type_index(typeid(float))) {
H
hong 已提交
2327 2328
        pt_kernel_context->EmplaceBackAttr(
            BOOST_GET_CONST(float, attr_it->second));
2329
      } else if (attr_defs[i].type_index == std::type_index(typeid(bool))) {
H
hong 已提交
2330 2331
        pt_kernel_context->EmplaceBackAttr(
            BOOST_GET_CONST(bool, attr_it->second));
H
hong 已提交
2332
      } else if (attr_defs[i].type_index == std::type_index(typeid(int64_t))) {
H
hong 已提交
2333 2334
        pt_kernel_context->EmplaceBackAttr(
            BOOST_GET_CONST(int64_t, attr_it->second));
H
hong 已提交
2335 2336
      } else if (attr_defs[i].type_index ==
                 std::type_index(typeid(std::string))) {
H
hong 已提交
2337 2338
        pt_kernel_context->EmplaceBackAttr(
            BOOST_GET_CONST(std::string, attr_it->second));
2339
      } else if (attr_defs[i].type_index ==
2340
                 std::type_index(typeid(phi::DataType))) {
2341
        auto data_type = paddle::framework::TransToPhiDataType(
2342
            static_cast<framework::proto::VarType::Type>(
H
hong 已提交
2343
                BOOST_GET_CONST(int, attr_it->second)));
2344
        pt_kernel_context->EmplaceBackAttr(data_type);
2345 2346
      } else if (attr_defs[i].type_index ==
                 std::type_index(typeid(std::vector<int64_t>))) {
H
hong 已提交
2347
        if (std::type_index(attr_it->second.type()) ==
2348 2349
            std::type_index(typeid(std::vector<int64_t>))) {
          pt_kernel_context->EmplaceBackAttr(
H
hong 已提交
2350 2351
              BOOST_GET_CONST(std::vector<int64_t>, attr_it->second));
        } else if (std::type_index(attr_it->second.type()) ==
2352
                   std::type_index(typeid(std::vector<int>))) {
2353
          // Emplace Back Attr according to the type of Phi_Kernel args.
H
hong 已提交
2354 2355
          const auto& vector_int_attr =
              BOOST_GET_CONST(std::vector<int>, attr_it->second);
2356 2357
          const std::vector<int64_t> vector_int64_attr(vector_int_attr.begin(),
                                                       vector_int_attr.end());
2358
          pt_kernel_context->EmplaceBackAttr(vector_int64_attr);
2359
        }
H
hong 已提交
2360 2361
      } else if (attr_defs[i].type_index ==
                 std::type_index(typeid(std::vector<int32_t>))) {
H
hong 已提交
2362 2363
        const auto& vector_int_attr =
            BOOST_GET_CONST(std::vector<int>, attr_it->second);
H
hong 已提交
2364
        pt_kernel_context->EmplaceBackAttr(vector_int_attr);
2365 2366 2367 2368
      } else if (attr_defs[i].type_index ==
                 std::type_index(typeid(std::vector<std::string>))) {
        pt_kernel_context->EmplaceBackAttr(
            BOOST_GET_CONST(std::vector<std::string>, attr_it->second));
2369 2370
      } else {
        PADDLE_THROW(platform::errors::Unimplemented(
2371
            "Unsupported cast op attribute `%s` when construct "
2372 2373 2374 2375 2376
            "KernelContext.",
            attr_names[i]));
      }
    }
  }
2377
  VLOG(4) << "Done attributes";
2378 2379
}

Q
Qiao Longfei 已提交
2380
}  // namespace framework
L
liaogang 已提交
2381
}  // namespace paddle