operator.cc 105.6 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Q
Qiao Longfei 已提交
2 3 4 5 6 7 8 9 10
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
    http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
D
dzhwinter 已提交
11

12 13
#include "paddle/fluid/framework/operator.h"

14
#include <glog/logging.h>
15

P
peizhilin 已提交
16 17
#include <sstream>
#include <string>
18

19
#include "gflags/gflags.h"
20
#include "paddle/fluid/framework/convert_utils.h"
Y
Yi Wang 已提交
21
#include "paddle/fluid/framework/data_transform.h"
22
#include "paddle/fluid/framework/data_type_transform.h"
W
WangXi 已提交
23
#include "paddle/fluid/framework/details/nan_inf_utils.h"
24
#include "paddle/fluid/framework/op_call_stack.h"
25
#include "paddle/fluid/framework/phi_utils.h"
Y
Yi Wang 已提交
26
#include "paddle/fluid/framework/shape_inference.h"
27
#include "paddle/fluid/framework/transfer_scope_cache.h"
28
#include "paddle/fluid/framework/unused_var_check.h"
Y
Yi Wang 已提交
29
#include "paddle/fluid/framework/var_type.h"
30
#include "paddle/fluid/platform/device/device_wrapper.h"
L
Leo Chen 已提交
31
#include "paddle/fluid/platform/enforce.h"
32
#include "paddle/fluid/platform/profiler.h"
C
chenjian 已提交
33
#include "paddle/fluid/platform/profiler/event_tracing.h"
34
#include "paddle/fluid/platform/profiler/supplement_tracing.h"
35
#include "paddle/phi/common/int_array.h"
36
#include "paddle/phi/common/scalar.h"
37
#include "paddle/phi/core/kernel_context.h"
38 39
#include "paddle/phi/core/kernel_factory.h"
#include "paddle/phi/ops/compat/signatures.h"
40

41
namespace phi {
42
class DenseTensor;
43
}  // namespace phi
44

45
#ifdef PADDLE_WITH_XPU
46 47
#include "paddle/fluid/platform/device/xpu/xpu_info.h"
#include "paddle/fluid/platform/device/xpu/xpu_op_list.h"
48
#endif
Q
Qiao Longfei 已提交
49

50 51 52 53
#ifdef PADDLE_WITH_MKLDNN
#include "paddle/fluid/platform/mkldnn_helper.h"
#endif

F
fwenguang 已提交
54 55 56 57
#ifdef PADDLE_WITH_MLU
#include "paddle/fluid/platform/device/mlu/mlu_info.h"
#endif

D
dzhwinter 已提交
58
DECLARE_bool(benchmark);
59
DECLARE_bool(check_nan_inf);
60
DECLARE_bool(enable_unused_var_check);
F
Feng Xing 已提交
61
DECLARE_bool(run_kp_kernel);
C
chenjian 已提交
62
DECLARE_bool(enable_host_event_recorder_hook);
D
dzhwinter 已提交
63

Q
Qiao Longfei 已提交
64 65 66
namespace paddle {
namespace framework {

67 68 69 70 71 72
std::vector<std::tuple<platform::Place, LibraryType>> kKernelPriority = {
    std::make_tuple(platform::CUDAPlace(0), LibraryType::kCUDNN),
    std::make_tuple(platform::CUDAPlace(0), LibraryType::kPlain),
    std::make_tuple(platform::CPUPlace(), LibraryType::kMKLDNN),
    std::make_tuple(platform::CPUPlace(), LibraryType::kPlain),
};
D
dzhwinter 已提交
73

74 75
static DDim GetDimsDebug(const ScopeBase& scope,
                         const std::string& name,
76
                         bool get_actual_dim = false) {
77
  Variable* var = scope.FindVar(name);
Q
qiaolongfei 已提交
78 79
  if (var == nullptr) {
    return DDim({-1});
Q
Qiao Longfei 已提交
80 81
  }

M
minqiyang 已提交
82 83 84
  if (var->IsType<LoDTensor>()) {
    const LoDTensor& tensor = var->Get<LoDTensor>();
    return tensor.dims();
85
  } else if (var->IsType<phi::SelectedRows>()) {
M
minqiyang 已提交
86
    if (get_actual_dim) {
87
      return var->Get<phi::SelectedRows>().value().dims();
M
minqiyang 已提交
88
    } else {
89
      return var->Get<phi::SelectedRows>().GetCompleteDims();
M
minqiyang 已提交
90
    }
S
Steffy-zxf 已提交
91 92
  } else if (var->IsType<Strings>()) {
    return DDim({static_cast<int64_t>(var->Get<Strings>().size())});
93 94 95 96 97
  } else {
    return DDim({-1});
  }
}

98
static bool VarInited(const ScopeBase& scope, const std::string& name) {
Q
Qiao Longfei 已提交
99 100 101 102 103
  Variable* var = scope.FindVar(name);
  if (var == nullptr) return false;
  return var->IsInitialized();
}

104
static std::string GetDtype(const ScopeBase& scope, const std::string& name) {
D
dzhwinter 已提交
105 106 107 108
  Variable* var = scope.FindVar(name);
  if (var == nullptr) {
    return "";
  }
109

M
minqiyang 已提交
110 111 112
  if (var->IsType<LoDTensor>()) {
    const LoDTensor& tensor = var->Get<LoDTensor>();
    if (UNLIKELY(!tensor.IsInitialized())) {
113 114
      return "";
    }
115
    return DataTypeToString(framework::TransToProtoVarType(tensor.dtype()));
116 117
  } else if (var->IsType<phi::SelectedRows>()) {
    auto tensor = var->Get<phi::SelectedRows>().value();
Q
Qiao Longfei 已提交
118 119 120
    if (UNLIKELY(!tensor.IsInitialized())) {
      return "uninited";
    } else {
121
      return DataTypeToString(framework::TransToProtoVarType(tensor.dtype()));
Q
Qiao Longfei 已提交
122
    }
S
Steffy-zxf 已提交
123 124
  } else if (var->IsType<Strings>()) {
    return "strings";
D
dzhwinter 已提交
125 126 127 128 129
  } else {
    return "";
  }
}

130
static std::string GetPlace(const ScopeBase& scope, const std::string& name) {
L
Leo Chen 已提交
131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146
  Variable* var = scope.FindVar(name);
  if (var == nullptr) {
    return "";
  }
  auto to_string = [](const platform::Place& p) {
    std::stringstream sstream;
    sstream << p;
    return sstream.str();
  };

  if (var->IsType<LoDTensor>()) {
    const LoDTensor& tensor = var->Get<LoDTensor>();
    if (UNLIKELY(!tensor.IsInitialized())) {
      return "";
    }
    return to_string(tensor.place());
147 148
  } else if (var->IsType<phi::SelectedRows>()) {
    auto tensor = var->Get<phi::SelectedRows>().value();
L
Leo Chen 已提交
149 150 151 152 153 154 155 156 157 158
    if (UNLIKELY(!tensor.IsInitialized())) {
      return "uninited";
    } else {
      return to_string(tensor.place());
    }
  } else {
    return "";
  }
}

159
static int GetRowSize(const ScopeBase& scope, const std::string& name) {
160 161 162 163 164
  Variable* var = scope.FindVar(name);
  if (var == nullptr) {
    return -1;
  }

165 166
  if (var->IsType<phi::SelectedRows>()) {
    return var->Get<phi::SelectedRows>().rows().size();
167 168 169 170 171
  }

  return -1;
}

172
static LoD GetLoDDebug(const ScopeBase& scope, const std::string& name) {
Q
Qiao Longfei 已提交
173 174 175 176 177 178 179
  Variable* var = scope.FindVar(name);
  auto default_lod = LoD({{}});

  if (var == nullptr) {
    return default_lod;
  }

M
minqiyang 已提交
180 181 182
  if (var->IsType<LoDTensor>()) {
    const LoDTensor& tensor = var->Get<LoDTensor>();
    return tensor.lod();
Q
Qiao Longfei 已提交
183 184 185 186 187
  } else {
    return default_lod;
  }
}

X
Xin Pan 已提交
188 189 190 191 192
RuntimeContext::RuntimeContext(const VariableNameMap& innames,
                               const VariableNameMap& outnames,
                               const Scope& scope) {
  for (auto& var_name_item : innames) {
    std::vector<Variable*>& input_vars = inputs[var_name_item.first];
X
Xin Pan 已提交
193
    input_vars.reserve(var_name_item.second.size());
X
Xin Pan 已提交
194 195 196 197 198 199
    for (auto& var_name : var_name_item.second) {
      input_vars.push_back(scope.FindVar(var_name));
    }
  }
  for (auto& var_name_item : outnames) {
    std::vector<Variable*>& output_vars = outputs[var_name_item.first];
X
Xin Pan 已提交
200
    output_vars.reserve(var_name_item.second.size());
X
Xin Pan 已提交
201 202 203 204 205 206
    for (auto& var_name : var_name_item.second) {
      output_vars.push_back(scope.FindVar(var_name));
    }
  }
}

207
void OperatorBase::Run(const Scope& scope, const platform::Place& place) {
P
peizhilin 已提交
208 209 210
  try {
    VLOG(4) << place << " " << DebugStringEx(&scope);
    if (platform::is_gpu_place(place)) {
211
#if !defined(PADDLE_WITH_CUDA) && !defined(PADDLE_WITH_HIP)
212 213 214 215
      PADDLE_THROW(platform::errors::Unavailable(
          "Cannot run operator on place %s, please recompile paddle or "
          "reinstall Paddle with CUDA support.",
          place));
216
#else
217
      auto dev_id = place.device;
P
peizhilin 已提交
218
      platform::SetDeviceId(dev_id);
219 220 221
#endif
    } else if (platform::is_xpu_place(place)) {
#ifndef PADDLE_WITH_XPU
222 223 224 225
      PADDLE_THROW(platform::errors::Unavailable(
          "Cannot run operator on place %s, please recompile paddle or "
          "reinstall Paddle with XPU support.",
          place));
226
#else
227
      auto dev_id = place.device;
228
      platform::SetXPUDeviceId(dev_id);
229 230 231 232 233 234 235 236
#endif
    } else if (platform::is_npu_place(place)) {
#ifndef PADDLE_WITH_ASCEND_CL
      PADDLE_THROW(platform::errors::Unavailable(
          "Cannot run operator on place %s, please recompile paddle or "
          "reinstall Paddle with NPU support.",
          place));
#else
237
      auto dev_id = place.device;
238
      platform::SetNPUDeviceId(dev_id);
F
fwenguang 已提交
239 240 241 242 243 244 245 246
#endif
    } else if (platform::is_mlu_place(place)) {
#ifndef PADDLE_WITH_MLU
      PADDLE_THROW(platform::errors::Unavailable(
          "Cannot run operator on place %s, please recompile paddle or "
          "reinstall Paddle with MLU support.",
          place));
#else
247
      auto dev_id = place.device;
F
fwenguang 已提交
248
      platform::SetMLUDeviceId(dev_id);
249 250 251 252 253 254 255 256
#endif
    } else if (platform::is_custom_place(place)) {
#ifndef PADDLE_WITH_CUSTOM_DEVICE
      PADDLE_THROW(platform::errors::Unavailable(
          "Cannot run operator on place %s, please recompile paddle or "
          "reinstall Paddle with CustomDevice support.",
          place));
#else
257
      phi::DeviceManager::SetDevice(place);
258
#endif
P
peizhilin 已提交
259
    }
P
peizhilin 已提交
260

261
    {
262 263 264
      // TODO(wangchaochaohu) : refine code to use only one RecordEvent)
      // in order to record different op type cost time
      // and different op name cost time,we set two event.
C
chenjian 已提交
265
      platform::RecordEvent op_type_record_event(
C
chenjian 已提交
266
          Type(), platform::TracerEventType::Operator, 1);
C
chenjian 已提交
267 268
      auto op_name = platform::OpName(outputs_, Type());
      platform::RecordEvent op_name_record_event(
269 270
          op_name,
          platform::TracerEventType::Operator,
C
chenjian 已提交
271
          FLAGS_enable_host_event_recorder_hook ? 20 : 1,
C
chenjian 已提交
272
          platform::EventRole::kUniqueOp);
P
peizhilin 已提交
273 274
      RunImpl(scope, place);
    }
275

Z
Zhang Ting 已提交
276
    VLOG(3) << GetExecutionPlace(place) << " " << DebugStringEx(&scope);
277
  } catch (platform::EnforceNotMet& exception) {
278
    framework::InsertCallStackInfo(Type(), Attrs(), &exception);
279
    throw std::move(exception);
280 281 282 283 284 285
  } catch (platform::EOFException&) {
    std::rethrow_exception(std::current_exception());
  } catch (std::exception& ex) {
    LOG(WARNING) << Type() << " raises an exception "
                 << platform::demangle(typeid(ex).name()) << ", " << ex.what();
    std::rethrow_exception(std::current_exception());
P
peizhilin 已提交
286
  } catch (...) {
287
    LOG(WARNING) << Type() << " raises an unknown exception";
P
peizhilin 已提交
288
    std::rethrow_exception(std::current_exception());
289
  }
290 291
}

292
bool OperatorBase::HasInputs(const std::string& name) const {
M
minqiyang 已提交
293
  return inputs_.find(name) != inputs_.end();
294 295
}

296
std::string OperatorBase::Input(const std::string& name) const {
Y
Yu Yang 已提交
297
  auto& ins = Inputs(name);
298
  PADDLE_ENFORCE_LE(
299 300
      ins.size(),
      1UL,
301
      platform::errors::InvalidArgument(
302 303
          "Operator %s's input %s should contain only one variable.",
          type_,
304
          name));
Y
Yu Yang 已提交
305
  return ins.empty() ? kEmptyVarName : ins[0];
Y
Yan Chunwei 已提交
306 307
}

Y
Yu Yang 已提交
308 309
const std::vector<std::string>& OperatorBase::Inputs(
    const std::string& name) const {
Y
Yu Yang 已提交
310
  auto it = inputs_.find(name);
311
  PADDLE_ENFORCE_NE(
312 313 314 315
      it,
      inputs_.end(),
      platform::errors::NotFound(
          "Operator %s does not have the input %s.", type_, name));
Y
Yu Yang 已提交
316
  return it->second;
Y
Yan Chunwei 已提交
317 318
}

319
bool OperatorBase::HasOutputs(const std::string& name) const {
320
  if (outputs_.find(name) != outputs_.end()) {
321 322 323 324 325 326
    return true;
  } else {
    return false;
  }
}

327
std::string OperatorBase::Output(const std::string& name) const {
Y
Yu Yang 已提交
328
  auto& outs = Outputs(name);
329
  PADDLE_ENFORCE_LE(
330 331
      outs.size(),
      1UL,
332
      platform::errors::InvalidArgument(
333 334
          "Operator %s's output %s should contain only one variable.",
          type_,
335
          name));
Y
Yu Yang 已提交
336
  return outs.empty() ? kEmptyVarName : outs[0];
Y
Yan Chunwei 已提交
337 338
}

Y
Yu Yang 已提交
339 340
const std::vector<std::string>& OperatorBase::Outputs(
    const std::string& name) const {
Y
Yu Yang 已提交
341
  auto it = outputs_.find(name);
342
  PADDLE_ENFORCE_NE(
343 344
      it,
      outputs_.end(),
345 346
      platform::errors::NotFound(
          "Operator %s does not have an output called %s.", type_, name));
Y
Yu Yang 已提交
347
  return it->second;
Y
Yan Chunwei 已提交
348 349
}

350
std::string OperatorBase::DebugStringEx(const ScopeBase* scope) const {
Q
Qiao Longfei 已提交
351
  std::stringstream ss;
Y
Yu Yang 已提交
352
  ss << "Op(" << type_ << "), inputs:{";
353

354
  const std::unordered_set<std::string>* no_need_buffer_vars = nullptr;
355 356
  if (info_ && info_->NoNeedBufferVarsInferer()) {
    no_need_buffer_vars =
357 358
        &(Info().NoNeedBufferVarsInferer()(Inputs(), Outputs(), Attrs()));
    if (no_need_buffer_vars->empty()) no_need_buffer_vars = nullptr;
359 360
  }

Y
Yu Yang 已提交
361 362
  for (auto it = inputs_.begin(); it != inputs_.end();) {
    auto& input = *it;
363 364
    bool is_no_need_buffer_var =
        (no_need_buffer_vars && no_need_buffer_vars->count(input.first) > 0);
Y
Yu Yang 已提交
365 366
    ss << input.first << "[";
    for (size_t i = 0; i < input.second.size(); ++i) {
Q
Qiao Longfei 已提交
367 368
      auto var_name = input.second[i];
      ss << var_name;
369
      if (scope) {
Q
Qiao Longfei 已提交
370 371 372 373 374 375 376
        if (!VarInited(*scope, var_name)) {
          ss << "[uninited]";
        } else {
          int row_size = GetRowSize(*scope, var_name);
          if (row_size >= 0) {
            ss << "[row_size=" << row_size << "]";
          }
377 378 379
          std::string dtype = is_no_need_buffer_var
                                  ? "unknown_dtype"
                                  : GetDtype(*scope, var_name);
Q
Qiao Longfei 已提交
380
          ss << ":" << dtype;
381 382
          ss << "[" << GetDimsDebug(*scope, var_name, true) << "]";
          ss << "(" << GetLoDDebug(*scope, var_name) << ")";
L
Leo Chen 已提交
383
          ss << "(" << GetPlace(*scope, var_name) << ")";
384
        }
385
      }
Y
Yu Yang 已提交
386 387 388
      if (i != input.second.size() - 1) {
        ss << ", ";
      }
389
    }
Y
Yu Yang 已提交
390
    ss << "]";
Y
Yu Yang 已提交
391 392
    ++it;
    if (it != inputs_.end()) {
393 394
      ss << ", ";
    }
Q
Qiao Longfei 已提交
395
  }
Y
Yu Yang 已提交
396
  ss << "}, outputs:{";
Y
Yu Yang 已提交
397 398
  for (auto it = outputs_.begin(); it != outputs_.end();) {
    auto& output = *it;
Y
Yu Yang 已提交
399 400
    ss << output.first << "[";
    for (size_t i = 0; i < output.second.size(); ++i) {
Q
Qiao Longfei 已提交
401 402
      auto var_name = output.second[i];
      ss << var_name;
403
      if (scope) {
Q
Qiao Longfei 已提交
404 405 406 407 408 409 410
        if (!VarInited(*scope, var_name)) {
          ss << "[uninited]";
        } else {
          int row_size = GetRowSize(*scope, output.second[i]);
          if (row_size >= 0) {
            ss << "[row_size=" << row_size << "]";
          }
C
chengduo 已提交
411 412
          std::string dtype = GetDtype(*scope, output.second[i]);
          ss << ":" << dtype;
413 414
          ss << "[" << GetDimsDebug(*scope, var_name, true) << "]";
          ss << "(" << GetLoDDebug(*scope, var_name) << ")";
L
Leo Chen 已提交
415
          ss << "(" << GetPlace(*scope, var_name) << ")";
416
        }
417
      }
Y
Yu Yang 已提交
418 419 420
      if (i != output.second.size() - 1) {
        ss << ", ";
      }
421
    }
Y
Yu Yang 已提交
422
    ss << "]";
Y
Yu Yang 已提交
423 424
    ++it;
    if (it != outputs_.end()) {
425 426
      ss << ", ";
    }
Q
Qiao Longfei 已提交
427
  }
Y
Yu Yang 已提交
428
  ss << "}.";
Q
Qiao Longfei 已提交
429 430 431
  return ss.str();
}

Y
Yu Yang 已提交
432
OperatorBase::OperatorBase(const std::string& type,
Y
Yu Yang 已提交
433 434
                           const VariableNameMap& inputs,
                           const VariableNameMap& outputs,
Y
Yu Yang 已提交
435
                           const AttributeMap& attrs)
S
sneaxiy 已提交
436 437 438 439 440 441
    : type_(type),
      inputs_(inputs),
      outputs_(outputs),
      attrs_(attrs),
      // NOTE(zjl): why op_info may be nullptr?
      info_(OpInfoMap::Instance().GetNullable(type)) {
H
hong 已提交
442 443 444 445 446 447 448 449
  // In dygraph mode, all the OperatorBase will be constructed by function:
  // framework::OpRegistry::CreateOp(type, {}, {}, {}, false).
  // Inputs, outputs and attrs will be set to empty map
  // to improve the execution efficiency of dygraph.
  if (inputs_.size() > 0 || outputs_.size() > 0) {
    GenerateTemporaryNames();
    CheckAllInputOutputSet();
  }
Y
Yu Yang 已提交
450
}
451

Q
qijun 已提交
452 453
std::vector<std::string> OperatorBase::InputVars() const {
  std::vector<std::string> ret_val;
Y
Yu Yang 已提交
454
  for (auto& o : inputs_) {
Q
qijun 已提交
455 456 457 458 459 460
    ret_val.reserve(ret_val.size() + o.second.size());
    ret_val.insert(ret_val.end(), o.second.begin(), o.second.end());
  }
  return ret_val;
}

Y
Yu Yang 已提交
461 462 463 464 465 466 467 468 469 470
std::vector<std::string> OperatorBase::OutputVars(bool has_intermediate) const {
  std::vector<std::string> ret_val;
  if (has_intermediate) {
    // push all outputs into ret_val
    for (auto& o : outputs_) {
      ret_val.reserve(ret_val.size() + o.second.size());
      ret_val.insert(ret_val.end(), o.second.begin(), o.second.end());
    }
    return ret_val;
  }
S
sneaxiy 已提交
471
  auto& info = Info();
Y
Yu Yang 已提交
472 473

  // get all OpProto::Var for outputs
Y
Yu Yang 已提交
474
  for (auto& o : info.Proto().outputs()) {
Y
Yu Yang 已提交
475 476 477 478 479 480 481 482 483
    // ignore all intermediate output
    if (o.intermediate()) continue;
    auto out = outputs_.find(o.name());
    if (out != outputs_.end()) {
      ret_val.reserve(ret_val.size() + out->second.size());
      ret_val.insert(ret_val.end(), out->second.begin(), out->second.end());
    }
  }
  return ret_val;
D
dongzhihong 已提交
484 485
}

486
void OperatorBase::CheckAllInputOutputSet() const {
S
sneaxiy 已提交
487
  if (info_ == nullptr || info_->proto_ == nullptr) return;
488

S
sneaxiy 已提交
489
  for (auto& in : info_->Proto().inputs()) {
490
    if (!in.dispensable() && !in.extra()) {
491
      PADDLE_ENFORCE_NE(
492 493 494 495
          inputs_.find(in.name()),
          inputs_.end(),
          platform::errors::NotFound(
              "Operator %s's input (%s) is not set.", Type(), in.name()));
496
    }
497 498
  }

S
sneaxiy 已提交
499
  for (auto& out : info_->Proto().outputs()) {
500
    if (!out.dispensable() && !out.extra()) {
501
      PADDLE_ENFORCE_NE(
502 503 504 505
          outputs_.find(out.name()),
          outputs_.end(),
          platform::errors::NotFound(
              "Operator %s's output (%s) is not set.", Type(), out.name()));
506
    }
507 508 509 510 511 512 513 514 515 516 517 518 519 520 521
  }
}

void OperatorBase::GenerateTemporaryNames() {
  static std::atomic<size_t> gUniqId(0UL);
  for (auto& output : outputs_) {
    for (auto& output_name : output.second) {
      if (output_name == kTempVarName) {
        output_name += type_;
        output_name += "@";
        output_name += std::to_string(gUniqId.fetch_add(1));
      }
    }
  }
}
522

C
chengduo 已提交
523
const Tensor* GetLoDTensorOrSelectedRowsValueFromVar(const Variable& var) {
C
chengduo 已提交
524 525
  if (var.IsType<LoDTensor>()) {
    return static_cast<const Tensor*>(&(var.Get<LoDTensor>()));
526 527
  } else if (var.IsType<phi::SelectedRows>()) {
    return &(var.Get<phi::SelectedRows>().value());
Q
QI JUN 已提交
528
  } else {
529 530 531
    PADDLE_THROW(platform::errors::InvalidArgument(
        "Variable type is %s, expect LoDTensor or SelectedRows.",
        ToTypeName(var.Type())));
Q
QI JUN 已提交
532 533 534
  }
}

C
chengduo 已提交
535
Tensor* GetMutableLoDTensorOrSelectedRowsValueFromVar(Variable* var) {
Q
QI JUN 已提交
536
  if (var->IsType<LoDTensor>()) {
537
    return var->GetMutable<LoDTensor>();
538 539
  } else if (var->IsType<phi::SelectedRows>()) {
    return var->GetMutable<phi::SelectedRows>()->mutable_value();
Q
QI JUN 已提交
540
  } else {
541 542 543
    PADDLE_THROW(platform::errors::InvalidArgument(
        "Variable type is %s, expect LoDTensor or SelectedRows.",
        ToTypeName(var->Type())));
Q
QI JUN 已提交
544 545 546
  }
}

547
bool ExecutionContext::HasInput(const std::string& name) const {
548
  auto* var = InputVar(name);
549 550 551
  return var != nullptr;
}

552 553 554 555 556 557 558 559 560 561 562 563 564 565
bool ExecutionContext::HasInputs(const std::string& name) const {
  const auto& ins = ctx_.inputs;
  auto it = ins.find(name);
  if (it == ins.end() || it->second.empty()) {
    return false;
  }
  for (const auto* input : it->second) {
    if (input == nullptr) {
      return false;
    }
  }
  return true;
}

566
bool ExecutionContext::HasOutput(const std::string& name) const {
567
  auto* var = OutputVar(name);
568 569 570
  return var != nullptr;
}

X
Xin Pan 已提交
571
const Variable* ExecutionContext::InputVar(const std::string& name) const {
572 573
  LogVarUsageIfUnusedVarCheckEnabled(name);

X
Xin Pan 已提交
574 575 576
  auto it = ctx_.inputs.find(name);
  if (it == ctx_.inputs.end()) return nullptr;

577
  PADDLE_ENFORCE_LE(
578 579
      it->second.size(),
      1UL,
580
      platform::errors::InvalidArgument(
581
          "Operator %s's input %s should contain only one variable.",
582 583
          op_.Type(),
          name));
X
Xin Pan 已提交
584 585 586
  return it->second.empty() ? nullptr : it->second[0];
}

X
clean  
Xin Pan 已提交
587
Variable* ExecutionContext::OutputVar(const std::string& name) const {
X
Xin Pan 已提交
588 589 590
  auto it = ctx_.outputs.find(name);
  if (it == ctx_.outputs.end()) return nullptr;

591
  PADDLE_ENFORCE_LE(
592 593
      it->second.size(),
      1UL,
594 595
      platform::errors::InvalidArgument(
          "Operator %s's output %s should contain only one variable.",
596 597
          op_.Type(),
          name));
X
Xin Pan 已提交
598 599 600
  return it->second.empty() ? nullptr : it->second[0];
}

601
template <>
602
const std::vector<const Tensor*> ExecutionContext::MultiInput<Tensor>(
603
    const std::string& name) const {
604 605
  LogVarUsageIfUnusedVarCheckEnabled(name);

H
hong 已提交
606 607
  auto vars = MultiInputVar(name);
  if (vars.size() == 0) {
X
Xin Pan 已提交
608 609 610 611
    return {};
  }
  std::vector<const Tensor*> res;
  res.reserve(vars.size());
612 613 614
  std::transform(vars.begin(),
                 vars.end(),
                 std::back_inserter(res),
H
hong 已提交
615
                 [&](const Variable* var) -> const Tensor* {
X
Xin Pan 已提交
616
                   if (var == nullptr) return nullptr;
617 618
                   PADDLE_ENFORCE_EQ(var->IsType<LoDTensor>(),
                                     true,
619 620 621 622
                                     platform::errors::InvalidArgument(
                                         "Input variable should be LoDTensor, "
                                         "but the received type is %s.",
                                         ToTypeName(var->Type())));
X
Xin Pan 已提交
623 624 625 626 627
                   return &(var->Get<LoDTensor>());
                 });
  return res;
}

628
template <>
629
std::vector<Tensor*> ExecutionContext::MultiOutput<Tensor>(
630
    const std::string& name) const {
H
hong 已提交
631 632 633
  auto vars = MultiOutputVar(name);

  if (vars.size() == 0) {
634 635
    return {};
  }
636
  std::vector<Tensor*> res;
637
  res.reserve(vars.size());
638 639 640
  std::transform(vars.begin(),
                 vars.end(),
                 std::back_inserter(res),
641 642 643
                 [&](Variable* var) -> Tensor* {
                   return var == nullptr ? nullptr
                                         : var->GetMutable<LoDTensor>();
644
                 });
645 646 647
  return res;
}

Y
Yu Yang 已提交
648
bool OpSupportGPU(const std::string& op_type) {
H
hong 已提交
649
  // check in new Function kernel first
650
  bool has_phi_kernel = false;
651
  auto& kernel_factory = phi::KernelFactory::Instance();
H
hong 已提交
652
  auto kernel_key_map =
653
      kernel_factory.SelectKernelMap(phi::TransToPhiKernelName(op_type));
H
hong 已提交
654
  for (auto& kernel : kernel_key_map) {
655
    has_phi_kernel = true;
656
    if (platform::is_gpu_place(phi::TransToPhiPlace(kernel.first.backend()))) {
H
hong 已提交
657 658 659 660
      return true;
    }
  }

Y
Yu Yang 已提交
661 662
  auto& all_kernels = OperatorWithKernel::AllOpKernels();
  auto it = all_kernels.find(op_type);
663 664 665 666 667 668 669 670 671 672 673 674 675
  if (it != all_kernels.end()) {
    for (auto& kern_pair : it->second) {
      if (platform::is_gpu_place(kern_pair.first.place_)) {
        return true;
      }
    }
  } else {
    if (has_phi_kernel) {
      // if has phi kernel, but not find phi gpu kernel and fluid gpu kernel,
      // this op doesn't support GPU
      return false;
    } else {
      // All control operator must support GPU
Y
Yu Yang 已提交
676 677 678
      return true;
    }
  }
H
hong 已提交
679

Y
Yu Yang 已提交
680 681 682
  return false;
}

683 684
class RuntimeInferShapeContext : public InferShapeContext {
 public:
685
  RuntimeInferShapeContext(const OperatorBase& op, const RuntimeContext& ctx)
G
Gabor Buella 已提交
686
      : op_(op), ctx_(ctx) {}
687 688

  bool HasInput(const std::string& name) const override {
689
    // has only one input
X
Xin Pan 已提交
690
    const auto& ins = ctx_.inputs;
691 692
    auto it = ins.find(name);
    if (it == ins.end()) {
693 694
      return false;
    }
695
    const auto& in = it->second;
X
Xin Pan 已提交
696
    if (in.size() == 0) return false;
697
    PADDLE_ENFORCE_EQ(
698 699
        in.size(),
        1UL,
700 701
        platform::errors::InvalidArgument(
            "Input %s should not contain more than one inputs.", name));
X
Xin Pan 已提交
702
    return in[0] != nullptr;
703 704 705
  }

  bool HasOutput(const std::string& name) const override {
706
    // has only one output
X
Xin Pan 已提交
707
    const auto& outs = ctx_.outputs;
708 709
    auto it = outs.find(name);
    if (it == outs.end()) {
710 711
      return false;
    }
712
    const auto& out = it->second;
X
Xin Pan 已提交
713
    if (out.size() == 0) {
714 715
      return false;
    }
716
    PADDLE_ENFORCE_EQ(
717 718
        out.size(),
        1UL,
719 720
        platform::errors::InvalidArgument(
            "Output %s should not contain more than one outputs.", name));
X
Xin Pan 已提交
721
    return out[0] != nullptr;
722 723
  }

724 725 726 727
  bool HasAttr(const std::string& name) const override {
    return op_.HasAttr(name);
  }

728
  bool HasInputs(const std::string& name) const override {
X
Xin Pan 已提交
729 730
    const auto& ins = ctx_.inputs;
    auto it = ins.find(name);
X
fix  
Xin Pan 已提交
731
    if (it == ins.end() || it->second.empty()) {
732 733
      return false;
    }
X
Xin Pan 已提交
734 735
    for (auto& input : it->second) {
      if (input == nullptr) {
736 737 738 739 740 741
        return false;
      }
    }
    return true;
  }

742 743
  bool HasOutputs(const std::string& name,
                  bool allow_null = false) const override {
X
Xin Pan 已提交
744 745
    const auto& outs = ctx_.outputs;
    auto it = outs.find(name);
X
fix  
Xin Pan 已提交
746
    if (it == outs.end() || it->second.empty()) {
747 748
      return false;
    }
749 750 751 752 753 754 755 756
    if (allow_null) {
      for (auto& output : it->second) {
        if (output != nullptr) return true;
      }
      return false;
    } else {
      for (auto& output : it->second) {
        if (output == nullptr) return false;
757
      }
758
      return true;
759 760 761 762 763
    }
  }

  AttrReader Attrs() const override { return AttrReader(op_.Attrs()); }

H
hong 已提交
764
  std::vector<std::string> Inputs(const std::string& name) const override {
765 766 767
    return op_.Inputs(name);
  }

H
hong 已提交
768
  std::vector<std::string> Outputs(const std::string& name) const override {
769 770 771
    return op_.Outputs(name);
  }

772 773 774
  std::string GetInputNameByIdx(size_t idx) const override {
    auto& op_proto =
        paddle::framework::OpInfoMap::Instance().Get(op_.Type()).proto_;
775 776
    PADDLE_ENFORCE_LT(idx,
                      op_proto->inputs().size(),
777 778 779
                      platform::errors::OutOfRange(
                          "The index should be less than the size of inputs of "
                          "operator %s, but got index is %d and size is %d",
780 781 782
                          op_.Type(),
                          idx,
                          op_proto->inputs().size()));
783 784 785 786 787 788 789
    return op_proto->inputs()[idx].name();
  }

  std::string GetOutputNameByIdx(size_t idx) const override {
    auto& op_proto =
        paddle::framework::OpInfoMap::Instance().Get(op_.Type()).proto_;
    PADDLE_ENFORCE_LT(
790 791
        idx,
        op_proto->outputs().size(),
792 793 794
        platform::errors::OutOfRange(
            "The index should be less than the size of outputs of "
            "operator %s, but got index is %d and size is %d",
795 796 797
            op_.Type(),
            idx,
            op_proto->outputs().size()));
798 799 800
    return op_proto->outputs()[idx].name();
  }

801 802 803
  void ShareDim(const std::string& in,
                const std::string& out,
                size_t i = 0,
804
                size_t j = 0) override {
X
Xin Pan 已提交
805 806
    auto in_it = ctx_.inputs.find(in);
    auto out_it = ctx_.outputs.find(out);
807
    PADDLE_ENFORCE_NE(
808 809
        in_it,
        ctx_.inputs.end(),
810 811
        platform::errors::NotFound("Input %s does not exist.", in));
    PADDLE_ENFORCE_NE(
812 813
        out_it,
        ctx_.outputs.end(),
814
        platform::errors::NotFound("Output %s does not exist.", out));
815 816
    PADDLE_ENFORCE_LT(i,
                      in_it->second.size(),
817 818 819
                      platform::errors::InvalidArgument(
                          "The index of input dimension is out of range, "
                          "excepted index less than %zu, but received %zu.",
820 821 822 823
                          in_it->second.size(),
                          i));
    PADDLE_ENFORCE_LT(j,
                      out_it->second.size(),
824 825 826
                      platform::errors::InvalidArgument(
                          "The index of output dimension is out of range, "
                          "excepted index less than %zu, but received %zu.",
827 828
                          out_it->second.size(),
                          j));
X
Xin Pan 已提交
829 830 831

    Variable* in_var = in_it->second[i];
    Variable* out_var = out_it->second[j];
832

833
    PADDLE_ENFORCE_EQ(
834 835
        in_var->Type(),
        out_var->Type(),
836
        platform::errors::InvalidArgument(
837 838
            "The type of input (%s) and output (%s) are inconsistent.",
            in,
839
            out));
840

841 842 843
    if (in_var->IsType<phi::SelectedRows>()) {
      auto& in_sele_rows = in_var->Get<phi::SelectedRows>();
      auto out_sele_rows = out_var->GetMutable<phi::SelectedRows>();
844 845 846 847 848 849 850 851
      out_sele_rows->mutable_value()->Resize(in_sele_rows.value().dims());
      out_sele_rows->set_rows(in_sele_rows.rows());
      out_sele_rows->set_height(in_sele_rows.height());
    } else if (in_var->IsType<framework::LoDTensor>()) {
      auto& in_lod_tensor = in_var->Get<framework::LoDTensor>();
      auto* out_lod_tensor = out_var->GetMutable<framework::LoDTensor>();
      out_lod_tensor->Resize(in_lod_tensor.dims());
    } else {
852
      PADDLE_THROW(platform::errors::Unimplemented(
853
          "Currently, the input type of ShareDim only can be LoDTensor "
854
          "or SelectedRows."));
855 856 857
    }
  }

H
hong 已提交
858 859 860 861
  void ShareAllLoD(const std::string& in,
                   const std::string& out) const override {
    auto in_it = ctx_.inputs.find(in);
    auto out_it = ctx_.outputs.find(out);
862 863
    PADDLE_ENFORCE_NE(in_it,
                      ctx_.inputs.end(),
H
hong 已提交
864 865 866
                      platform::errors::NotFound(
                          "Input [%s] found error in Op [%s]", in, op_.Type()));
    PADDLE_ENFORCE_NE(
867 868 869 870
        out_it,
        ctx_.outputs.end(),
        platform::errors::NotFound(
            "Output [%s] found error in Op [%s]", out, op_.Type()));
H
hong 已提交
871 872 873 874 875

    auto& in_var_list = in_it->second;
    auto& out_var_list = out_it->second;

    PADDLE_ENFORCE_EQ(
876 877
        in_var_list.size(),
        out_var_list.size(),
H
hong 已提交
878
        platform::errors::PreconditionNotMet(
T
tianshuo78520a 已提交
879
            "Op [%s]: Input var size should be equal with output var size",
H
hong 已提交
880 881 882 883 884 885 886 887 888 889 890 891
            op_.Type()));

    auto& out_var_names = op_.Outputs(out);

    for (size_t i = 0; i < in_var_list.size(); ++i) {
      if (out_var_names[i] == framework::kEmptyVarName) {
        continue;
      }

      Variable* in_var = in_var_list[i];
      if (!in_var->IsType<LoDTensor>()) return;
      Variable* out_var = out_var_list[i];
892 893
      PADDLE_ENFORCE_EQ(out_var->IsType<LoDTensor>(),
                        true,
H
hong 已提交
894 895
                        platform::errors::PreconditionNotMet(
                            "The %d-th output of Output(%s) must be LoDTensor.",
896 897
                            i,
                            out_var_names[i]));
H
hong 已提交
898 899 900 901 902 903 904 905 906 907
      auto& in_tensor = in_var->Get<LoDTensor>();
      auto* out_tensor = out_var->GetMutable<LoDTensor>();
      out_tensor->set_lod(in_tensor.lod());
#ifdef PADDLE_WITH_MKLDNN
      if (in_tensor.layout() != DataLayout::kMKLDNN)
#endif
        out_tensor->set_layout(in_tensor.layout());
    }
  }

908 909 910
  void ShareLoD(const std::string& in,
                const std::string& out,
                size_t i = 0,
Q
Qiao Longfei 已提交
911
                size_t j = 0) const override {
X
Xin Pan 已提交
912 913
    auto in_it = ctx_.inputs.find(in);
    auto out_it = ctx_.outputs.find(out);
914
    PADDLE_ENFORCE_NE(
915 916
        in_it,
        ctx_.inputs.end(),
917 918
        platform::errors::NotFound("Input %s does not exist.", in));
    PADDLE_ENFORCE_NE(
919 920
        out_it,
        ctx_.outputs.end(),
921
        platform::errors::NotFound("Output %s does not exist.", out));
922 923
    PADDLE_ENFORCE_LT(i,
                      in_it->second.size(),
924 925 926
                      platform::errors::InvalidArgument(
                          "The index of input dimension is out of range, "
                          "excepted index less than %zu, but received %zu.",
927 928 929 930
                          in_it->second.size(),
                          i));
    PADDLE_ENFORCE_LT(j,
                      out_it->second.size(),
931 932 933
                      platform::errors::InvalidArgument(
                          "The index of output dimension is out of range, "
                          "excepted index less than %zu, but received %zu.",
934 935
                          out_it->second.size(),
                          j));
X
Xin Pan 已提交
936 937

    Variable* in_var = in_it->second.at(i);
Q
Qiao Longfei 已提交
938
    if (!in_var->IsType<LoDTensor>()) return;
X
Xin Pan 已提交
939
    Variable* out_var = out_it->second.at(j);
940
    PADDLE_ENFORCE_EQ(
941 942
        out_var->IsType<LoDTensor>(),
        true,
943 944
        platform::errors::InvalidArgument(
            "The %zu-th output of Output(%s) must be LoDTensor.", j, out));
945
    auto& in_tensor = in_var->Get<LoDTensor>();
Q
Qiao Longfei 已提交
946 947
    auto* out_tensor = out_var->GetMutable<LoDTensor>();
    out_tensor->set_lod(in_tensor.lod());
D
dzhwinter 已提交
948

M
mozga-intel 已提交
949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967
// TODO(dzhwinter) : reuse ShareLoD in most operators.
// Need to call ShareLayout explicitly in sequence related ops.
// Shall we have a better method to shared info between in/out Tensor?
#ifdef PADDLE_WITH_MKLDNN
    // Fix me: ugly workaround below
    // Correct solution:
    //    set_layout() should NOT be called here (i.e. ShareLoD). Instead,
    //    layout of output tensor should be set "manually" in Compute()
    //    of each OPKernel. The reason layout should NOT be shared between
    //    input and output "automatically" (now by InferShape()->ShareLoD())
    //    is that layout transform may occur after InferShape().
    // Workaround:
    //    Skip set_layout() when input layout is kMKLDNN
    //    This is to avoid kMKLDNN is populated wrongly into a non-MKLDNN
    //    OPKernel. In all MKLDNN OPkernel, set_layout(kMKLDNN) should be called
    //    in Compute()
    if (in_tensor.layout() != DataLayout::kMKLDNN)
#endif
      out_tensor->set_layout(in_tensor.layout());
D
dzhwinter 已提交
968 969
  }

970
  int32_t GetLoDLevel(const std::string& in, size_t i = 0) const override {
971
    PADDLE_THROW(platform::errors::PreconditionNotMet(
972
        "GetLoDLevel is only used in compile time. The calculation of "
973
        "output's actual lod is different among operators so that should be "
974
        "set in the runtime kernel."));
975 976
  }

977 978
  void SetLoDLevel(const std::string& out,
                   int32_t lod_level,
979
                   size_t j = 0) const override {
980
    PADDLE_THROW(platform::errors::PreconditionNotMet(
981
        "SetLoDLevel is only used in compile time. The calculation of "
982
        "output's actual lod is different among operators so that should be "
983
        "set in the runtime kernel."));
C
chengduo 已提交
984 985
  }

986 987
  bool IsRuntime() const override { return true; }

988 989 990 991 992 993
  bool IsRunMKLDNNKernel() const override {
    try {
      auto& op_with_kernel = dynamic_cast<const OperatorWithKernel&>(op_);
      return ((op_with_kernel.kernel_type()) &&
              (op_with_kernel.kernel_type()->data_layout_ ==
               framework::DataLayout::kMKLDNN));
994
    } catch (const std::bad_cast& exp) {
995 996 997 998
      return false;
    }
  }

999
  // TODO(paddle-dev): Can this be template?
C
Chen Weihang 已提交
1000
  paddle::small_vector<InferShapeVarPtr, phi::kInputSmallVectorSize>
1001
  GetInputVarPtrs(const std::string& name) const override {
1002
    const std::vector<Variable*>& vars = InputVars(name);
C
Chen Weihang 已提交
1003
    paddle::small_vector<InferShapeVarPtr, phi::kInputSmallVectorSize> res;
1004 1005 1006 1007 1008
    res.reserve(vars.size());
    res.insert(res.begin(), vars.begin(), vars.end());
    return res;
  }

C
Chen Weihang 已提交
1009
  paddle::small_vector<InferShapeVarPtr, phi::kOutputSmallVectorSize>
1010
  GetOutputVarPtrs(const std::string& name) const override {
1011
    const std::vector<Variable*>& vars = OutputVars(name);
C
Chen Weihang 已提交
1012
    paddle::small_vector<InferShapeVarPtr, phi::kOutputSmallVectorSize> res;
1013 1014 1015 1016 1017
    res.reserve(vars.size());
    res.insert(res.begin(), vars.begin(), vars.end());
    return res;
  }

X
Xin Pan 已提交
1018 1019
  DDim GetInputDim(const std::string& name) const override {
    const std::vector<Variable*>& vars = InputVars(name);
1020
    PADDLE_ENFORCE_EQ(
1021 1022
        vars.size(),
        1UL,
1023 1024
        platform::errors::InvalidArgument(
            "Input(%s) should hold one element, but now it holds %zu elements.",
1025 1026
            name,
            vars.size()));
X
Xin Pan 已提交
1027 1028 1029 1030 1031 1032 1033 1034
    return this->GetDim(vars[0]);
  }

  std::vector<DDim> GetInputsDim(const std::string& name) const override {
    const std::vector<Variable*>& vars = InputVars(name);
    return GetDims(vars);
  }

1035 1036 1037 1038
  proto::VarType::Type GetInputVarType(const std::string& name) const override {
    return GetVarType(InputVars(name).at(0));
  }

X
Xin Pan 已提交
1039 1040 1041 1042 1043 1044 1045 1046 1047 1048
  std::vector<proto::VarType::Type> GetInputsVarType(
      const std::string& name) const override {
    return GetVarTypes(InputVars(name));
  }

  std::vector<proto::VarType::Type> GetOutputsVarType(
      const std::string& name) const override {
    return GetVarTypes(OutputVars(name));
  }

X
Xin Pan 已提交
1049 1050
  void SetOutputDim(const std::string& name, const DDim& dim) override {
    auto& vars = OutputVars(name);
1051
    PADDLE_ENFORCE_EQ(
1052 1053
        vars.size(),
        1UL,
1054 1055
        platform::errors::InvalidArgument("Output(%s) should hold one element, "
                                          "but now it holds %zu elements.",
1056 1057
                                          name,
                                          vars.size()));
X
Xin Pan 已提交
1058 1059 1060 1061 1062 1063 1064 1065 1066
    SetDim(vars[0], dim);
  }

  void SetOutputsDim(const std::string& name,
                     const std::vector<DDim>& dims) override {
    auto& vars = OutputVars(name);
    SetDims(vars, dims);
  }

1067 1068 1069 1070 1071 1072 1073 1074
  const phi::ArgumentMappingFn* GetPhiArgumentMappingFn() const override {
    return phi::OpUtilsMap::Instance().GetArgumentMappingFn(op_.Type());
  }

  const phi::KernelSignature* GetPhiDefaultKernelSignature() const override {
    return &phi::DefaultKernelSignatureMap::Instance().Get(op_.Type());
  }

1075
 protected:
X
Xin Pan 已提交
1076
  DDim GetDim(Variable* var) const {
1077 1078
    PADDLE_ENFORCE_NOT_NULL(
        var, platform::errors::InvalidArgument("Input variable is nullptr."));
1079 1080
    if (var->IsType<LoDTensor>()) {
      return var->Get<LoDTensor>().dims();
1081 1082
    } else if (var->IsType<phi::SelectedRows>()) {
      return var->Get<phi::SelectedRows>().GetCompleteDims();
1083
    } else {
1084 1085 1086 1087
      PADDLE_THROW(platform::errors::InvalidArgument(
          "Only LoDTensor or SelectedRows support 'GetDim', but input "
          "Variable's type is %s.",
          ToTypeName(var->Type())));
F
fengjiayi 已提交
1088 1089 1090
    }
  }

X
Xin Pan 已提交
1091 1092 1093
  std::vector<DDim> GetDims(const std::vector<Variable*>& vars) const {
    std::vector<DDim> ret;
    ret.reserve(vars.size());
1094 1095 1096
    std::transform(vars.begin(),
                   vars.end(),
                   std::back_inserter(ret),
X
Xin Pan 已提交
1097 1098 1099 1100
                   [this](Variable* var) { return this->GetDim(var); });
    return ret;
  }

F
fengjiayi 已提交
1101
  std::vector<DDim> GetRepeatedDims(const std::string& name) const override {
1102 1103
    PADDLE_THROW(platform::errors::PreconditionNotMet(
        "GetRepeatedDims method only ban be used in compile time."));
1104 1105
  }

X
Xin Pan 已提交
1106
  void SetDim(Variable* var, const DDim& dim) {
1107 1108
    if (var->IsType<LoDTensor>()) {
      var->GetMutable<LoDTensor>()->Resize(dim);
1109 1110
    } else if (var->IsType<phi::SelectedRows>()) {
      var->GetMutable<phi::SelectedRows>()->set_height(dim[0]);
1111
    } else {
1112 1113 1114 1115
      PADDLE_THROW(platform::errors::Unimplemented(
          "Variable type error, expect LoDTensor or SelectedRows, but received "
          "(%s).",
          ToTypeName(var->Type())));
X
Xin Pan 已提交
1116 1117 1118 1119 1120 1121
    }
  }

  void SetDims(const std::vector<Variable*>& vars,
               const std::vector<DDim>& dims) {
    size_t length = vars.size();
1122 1123
    PADDLE_ENFORCE_EQ(length,
                      dims.size(),
1124 1125 1126 1127
                      platform::errors::InvalidArgument(
                          "The number of input variables do not match the "
                          "number of input dimensions, the number of variables "
                          "is %zu, the number of dimensions is %zu.",
1128 1129
                          length,
                          dims.size()));
X
Xin Pan 已提交
1130 1131 1132 1133 1134
    for (size_t i = 0; i < length; ++i) {
      if (vars[i] == nullptr) {
        continue;
      }
      SetDim(vars[i], dims[i]);
1135 1136 1137
    }
  }

F
fengjiayi 已提交
1138 1139
  void SetRepeatedDims(const std::string& name,
                       const std::vector<DDim>& dims) override {
1140 1141
    PADDLE_THROW(platform::errors::PreconditionNotMet(
        "SetRepeatedDims method only can be used in compile time."));
F
fengjiayi 已提交
1142 1143
  }

X
Xin Pan 已提交
1144 1145 1146 1147
  std::vector<proto::VarType::Type> GetVarTypes(
      const std::vector<Variable*>& vars) const {
    std::vector<proto::VarType::Type> retv;
    retv.resize(vars.size());
1148 1149 1150
    std::transform(vars.begin(),
                   vars.end(),
                   retv.begin(),
X
Xin Pan 已提交
1151
                   std::bind(std::mem_fn(&RuntimeInferShapeContext::GetVarType),
1152 1153
                             this,
                             std::placeholders::_1));
X
Xin Pan 已提交
1154 1155 1156 1157
    return retv;
  }

  proto::VarType::Type GetVarType(Variable* var) const {
1158 1159 1160
    return ToVarType(var->Type());
  }

1161 1162 1163
 private:
  const std::vector<Variable*>& InputVars(const std::string& name) const {
    auto it = ctx_.inputs.find(name);
1164
    PADDLE_ENFORCE_NE(
1165 1166
        it,
        ctx_.inputs.end(),
1167 1168
        platform::errors::NotFound(
            "Operator (%s) does not have the input (%s).", op_.Type(), name));
1169 1170 1171 1172 1173
    return it->second;
  }

  const std::vector<Variable*>& OutputVars(const std::string& name) const {
    auto it = ctx_.outputs.find(name);
1174
    PADDLE_ENFORCE_NE(
1175 1176
        it,
        ctx_.outputs.end(),
1177 1178
        platform::errors::NotFound(
            "Operator (%s) does not have the outputs (%s).", op_.Type(), name));
1179
    return it->second;
F
fengjiayi 已提交
1180 1181
  }

1182
  const OperatorBase& op_;
X
Xin Pan 已提交
1183
  const RuntimeContext& ctx_;
1184 1185
};

1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200
struct OperatorWithKernel::CacheImpl {
  explicit CacheImpl(phi::KernelContext* kernel_ctx,
                     RuntimeInferShapeContext* infer_shape_ctx)
      : kernel_ctx_(kernel_ctx), infer_shape_ctx_(infer_shape_ctx) {}

  phi::KernelContext* getKernelContext() { return kernel_ctx_.get(); }
  RuntimeInferShapeContext* getRuntimeInferShapeContext() {
    return infer_shape_ctx_.get();
  }

 private:
  std::unique_ptr<phi::KernelContext> kernel_ctx_;
  std::unique_ptr<RuntimeInferShapeContext> infer_shape_ctx_;
};

1201 1202
static void CheckTensorNANOrInf(const std::string& op_type,
                                const std::string& name,
C
chengduoZH 已提交
1203 1204 1205 1206
                                const framework::Tensor& tensor) {
  if (tensor.memory_size() == 0) {
    return;
  }
1207 1208
  if (framework::TransToProtoVarType(tensor.dtype()) != proto::VarType::FP32 &&
      framework::TransToProtoVarType(tensor.dtype()) != proto::VarType::FP64) {
C
chengduoZH 已提交
1209 1210
    return;
  }
1211
  PADDLE_ENFORCE_NE(
1212 1213 1214 1215
      framework::TensorContainsInf(tensor),
      true,
      platform::errors::Fatal(
          "Operator %s output Tensor %s contains Inf.", op_type, name));
1216
  PADDLE_ENFORCE_NE(
1217 1218 1219 1220
      framework::TensorContainsNAN(tensor),
      true,
      platform::errors::Fatal(
          "Operator %s output Tensor %s contains NAN.", op_type, name));
C
chengduoZH 已提交
1221 1222
}

1223 1224 1225 1226
bool OperatorWithKernel::SupportGPU() const {
  auto phi_kernels = phi::KernelFactory::Instance().SelectKernelMap(
      phi::TransToPhiKernelName(type_));
  auto has_phi_kernel =
1227 1228
      std::any_of(phi_kernels.begin(),
                  phi_kernels.end(),
1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240
                  [](phi::KernelKeyMap::const_reference kern_pair) {
                    return kern_pair.first.backend() == phi::Backend::GPU;
                  });
  if (has_phi_kernel) {
    return true;
  } else {
    auto kernel_iter = OperatorWithKernel::AllOpKernels().find(type_);
    if (kernel_iter == OperatorWithKernel::AllOpKernels().end()) {
      return false;
    } else {
      auto& op_kernels = kernel_iter->second;
      return std::any_of(
1241 1242
          op_kernels.begin(),
          op_kernels.end(),
1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253
          [](OpKernelMap::const_reference kern_pair) {
            return platform::is_gpu_place(kern_pair.first.place_);
          });
    }
  }
}

bool OperatorWithKernel::SupportNPU() const {
  auto phi_kernels = phi::KernelFactory::Instance().SelectKernelMap(
      phi::TransToPhiKernelName(type_));
  auto has_phi_kernel =
1254 1255
      std::any_of(phi_kernels.begin(),
                  phi_kernels.end(),
1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267
                  [](phi::KernelKeyMap::const_reference kern_pair) {
                    return kern_pair.first.backend() == phi::Backend::NPU;
                  });
  if (has_phi_kernel) {
    return true;
  } else {
    auto kernel_iter = OperatorWithKernel::AllOpKernels().find(type_);
    if (kernel_iter == OperatorWithKernel::AllOpKernels().end()) {
      return false;
    } else {
      auto& op_kernels = kernel_iter->second;
      return std::any_of(
1268 1269
          op_kernels.begin(),
          op_kernels.end(),
1270 1271 1272 1273 1274 1275 1276
          [](OpKernelMap::const_reference kern_pair) {
            return platform::is_npu_place(kern_pair.first.place_);
          });
    }
  }
}

1277 1278
bool OperatorWithKernel::SupportsMKLDNN(
    const proto::VarType::Type data_type) const {
1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303
  auto phi_kernels = phi::KernelFactory::Instance().SelectKernelMap(
      phi::TransToPhiKernelName(type_));
  auto has_phi_kernel =
      std::any_of(phi_kernels.begin(),
                  phi_kernels.end(),
                  [](phi::KernelKeyMap::const_reference kern_pair) {
                    return kern_pair.first.backend() == phi::Backend::ONEDNN;
                  });
  if (has_phi_kernel) {
    return true;
  } else {
    auto op_kernel_iter = OperatorWithKernel::AllOpKernels().find(type_);
    if (op_kernel_iter == OperatorWithKernel::AllOpKernels().end()) {
      return false;
    } else {
      auto& op_kernels = op_kernel_iter->second;
      return std::any_of(
          op_kernels.begin(),
          op_kernels.end(),
          [data_type](OpKernelMap::const_reference kern_pair) {
            return platform::is_cpu_place(kern_pair.first.place_) &&
                   kern_pair.first.library_type_ == LibraryType::kMKLDNN &&
                   kern_pair.first.data_type_ == data_type;
          });
    }
1304
  }
1305 1306
}

1307 1308 1309 1310
bool OperatorWithKernel::SupportsKernelType(
    const OpKernelType& kernel_type) const {
  auto& all_op_kernels = AllOpKernels();
  auto kernels_iter = all_op_kernels.find(type_);
1311 1312 1313 1314 1315
  if (kernels_iter == all_op_kernels.end()) return false;
  OpKernelMap& kernels = kernels_iter->second;
  auto kernel_iter = kernels.find(kernel_type);

#if defined(PADDLE_WITH_XPU) && !defined(PADDLE_WITH_XPU_KP)
1316
  if (paddle::platform::is_xpu_place(kernel_type.place_)) {
1317 1318 1319
    return kernel_iter != kernels.end() &&
           paddle::platform::is_xpu_support_op(type_, kernel_type) &&
           !paddle::platform::is_in_xpu_black_list(type_);
1320 1321
  }
#endif
1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342

#ifdef PADDLE_WITH_XPU_KP
  if (paddle::platform::is_xpu_place(kernel_type.place_)) {
    bool use_xpu_kp_kernel_rt =
        FLAGS_run_kp_kernel &&
        paddle::platform::is_xpu_kp_support_op(type_, kernel_type);
    bool use_xpu_kp_kernel_debug =
        paddle::platform::is_in_xpu_kpwhite_list(type_);
    bool is_xpu_kp_support = (use_xpu_kp_kernel_rt || use_xpu_kp_kernel_debug);
    if (is_xpu_kp_support) {
      auto tmp_kernel_type = kernel_type;
      tmp_kernel_type.library_type_ = LibraryType::kKP;
      return kernels.find(tmp_kernel_type) != kernels.end();
    }
    return kernel_iter != kernels.end() &&
           paddle::platform::is_xpu_support_op(type_, kernel_type) &&
           !paddle::platform::is_in_xpu_black_list(type_);
  }
#endif

  return kernel_iter != kernels.end();
1343 1344
}

1345 1346
bool OperatorWithKernel::CanMKLDNNBeUsed(const framework::ExecutionContext& ctx,
                                         proto::VarType::Type data_type) const {
1347 1348 1349
  const auto& attrs_map = ctx.Attrs();
  auto iter = attrs_map.find("use_mkldnn");
  bool use_mkldnn_ctx = iter != attrs_map.end() &&
R
Ruibiao Chen 已提交
1350
                        PADDLE_GET_CONST(bool, iter->second) &&
1351
                        platform::is_cpu_place(ctx.GetPlace());
1352
  return use_mkldnn_ctx && this->SupportsMKLDNN(data_type);
1353 1354
}

1355 1356 1357 1358 1359 1360 1361
void OperatorWithKernel::InferShape(InferShapeContext* ctx) const {
  PADDLE_THROW(platform::errors::PermissionDenied(
      "The default InferShape function of OperatorWithKernel is not allowed to "
      "be called, please override corresponding InferShape function in the "
      "specific operator."));
}

B
baojun-nervana 已提交
1362
void OperatorWithKernel::RuntimeInferShape(const Scope& scope,
X
Xin Pan 已提交
1363 1364
                                           const platform::Place& place,
                                           const RuntimeContext& ctx) const {
1365
  RuntimeInferShapeContext infer_shape_ctx(*this, ctx);
1366
  this->Info().infer_shape_(&infer_shape_ctx);
B
baojun-nervana 已提交
1367 1368
}

L
luotao1 已提交
1369 1370
void OperatorWithKernel::RunImpl(const Scope& scope,
                                 const platform::Place& place) const {
L
luotao1 已提交
1371 1372
  // To reduce the elapsed time of HasAttr, we use bool variable to record the
  // result of HasAttr.
1373 1374 1375
  if (!enable_cache_runtime_context_ && HasAttr(kEnableCacheRuntimeContext))
    enable_cache_runtime_context_ = true;
  if (!all_kernels_must_compute_runtime_shape_ &&
L
luotao1 已提交
1376
      HasAttr(kAllKernelsMustComputeRuntimeShape))
1377
    all_kernels_must_compute_runtime_shape_ = true;
1378
  const Scope* cur_scope = &scope;
1379
  if (!enable_cache_runtime_context_) {
L
luotao1 已提交
1380 1381
    RuntimeContext ctx(Inputs(), Outputs(), scope);
    RunImpl(scope, place, &ctx);
1382
    pre_scope_ = cur_scope;
1383 1384 1385 1386 1387
  } else if (run_phi_kernel_ && impl_ != nullptr && !need_prepare_data_ &&
             !need_prepare_phi_data_) {
    if (!all_kernels_must_compute_runtime_shape_)
      this->Info().infer_shape_(impl_->getRuntimeInferShapeContext());
    (*pt_kernel_)(impl_->getKernelContext());
L
luotao1 已提交
1388
  } else {
1389 1390 1391 1392 1393 1394
    if (runtime_ctx_.get() == nullptr || pre_scope_ != cur_scope) {
      std::lock_guard<std::mutex> lock(cache_update_mutex_);
      if (runtime_ctx_.get() == nullptr || pre_scope_ != cur_scope) {
        runtime_ctx_.reset(new RuntimeContext(Inputs(), Outputs(), scope));
        pre_scope_ = cur_scope;
      }
L
luotao1 已提交
1395 1396 1397 1398 1399 1400 1401 1402
    }
    RunImpl(scope, place, runtime_ctx_.get());
  }
}

void OperatorWithKernel::RunImpl(const Scope& scope,
                                 const platform::Place& place,
                                 RuntimeContext* runtime_ctx) const {
Y
Yu Yang 已提交
1403
  platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
1404
  auto* dev_ctx = pool.Get(place);
1405

1406 1407 1408 1409 1410 1411 1412 1413 1414 1415
#ifdef PADDLE_WITH_ASCEND_CL
  // NOTE(wangxi): nan/inf cannot be detected on NPU by checking the variable
  // values, but only through special `float_status` to checks whether
  // the operation is overflow. More about `float_status`, see:
  // https://gitee.com/ascend/modelzoo/issues/I3NF8V?from=project-issue
  if (FLAGS_check_nan_inf) {
    framework::details::NPUAllocAndClearFloatStatus(*this, scope, place);
  }
#endif

1416
  auto exe_ctx = ExecutionContext(*this, scope, *dev_ctx, *runtime_ctx);
1417 1418 1419 1420
  // using cache
  if (kernel_type_.get()) {
    dev_ctx = pool.Get(kernel_type_->place_);
  }
1421

1422 1423 1424 1425 1426 1427
// TODO(Liu-xiandong): Now we are using too much if-else and hard code in XPU
// device, it's ugly, and we will refactor in the future.
#if defined(PADDLE_WITH_XPU_KP)
  bool use_phi_xpu_kp = false;
#endif

1428 1429 1430 1431 1432
  // TODO(chenweihang): Now we are still reusing a lot of the original fluid
  // implementation, this is a gradual replacement process
  // TODO(chenweihang): in the first phase of project, we only support CPU, CUDA
  // and RCOM backend, the XPU, NPU and MKLDNN will be supported in the second
  // phase
1433
  phi::KernelKey pt_kernel_key;
1434
  std::string pt_kernel_name;
1435
  if (phi::KernelFactory::Instance().HasCompatiblePhiKernel(type_)) {
1436 1437 1438 1439
    if (kernel_signature_ == nullptr || pt_kernel_ == nullptr) {
      kernel_signature_.reset(new phi::KernelSignature(
          std::move(GetExpectedPhiKernelArgs(exe_ctx))));
      VLOG(6) << *kernel_signature_.get();
1440 1441 1442 1443 1444

      kernel_type_.reset(
          new OpKernelType(std::move(InnerGetExpectedKernelType(exe_ctx))));
      dev_ctx = pool.Get(kernel_type_->place_);

1445
      pt_kernel_name = kernel_signature_->name;
1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484
// NOTE(Liu-xiandong): The register kernel used KP have library_type[KP],
// But the default library_type is Plain, so we need to modify the
// library_type here, otherwise it can't work.
#ifdef PADDLE_WITH_XPU_KP
      if (paddle::platform::is_xpu_place(kernel_type_->place_)) {
        bool use_xpu_kp_kernel_rt =
            FLAGS_run_kp_kernel &&
            paddle::platform::is_xpu_kp_support_op(type_, *kernel_type_);
        bool use_xpu_kp_kernel_debug =
            paddle::platform::is_in_xpu_kpwhite_list(type_);
        if (use_xpu_kp_kernel_rt) {
          VLOG(3) << "phi xpu_kp using rt mode in static graph";
        }
        if (use_xpu_kp_kernel_debug) {
          VLOG(3) << "phi xpu_kp using debug mode in static graph";
        }
        bool is_xpu_kp_support =
            (use_xpu_kp_kernel_rt || use_xpu_kp_kernel_debug);
        if (is_xpu_kp_support) {
          auto expected_kernel_key_library_type = kernel_type_->library_type_;
          kernel_type_->library_type_ = LibraryType::kKP;
          VLOG(3) << "modifing XPU KP kernel in static graph: "
                  << pt_kernel_name
                  << ", using_kernel_key:" << *kernel_type_.get();
          auto try_pt_kernel_key =
              TransOpKernelTypeToPhiKernelKey(*kernel_type_.get());
          if (!phi::KernelFactory::Instance().HasKernel(pt_kernel_name,
                                                        try_pt_kernel_key)) {
            kernel_type_->library_type_ = expected_kernel_key_library_type;
            VLOG(3) << "modify XPU KP kernel in static graph: "
                    << pt_kernel_name << " is failed " << *kernel_type_.get();
          } else {
            use_phi_xpu_kp = true;
            VLOG(3) << "modify XPU KP kernel in static graph: "
                    << pt_kernel_name << " is succeed " << *kernel_type_.get();
          }
        }
      }
#endif
1485
      pt_kernel_key = TransOpKernelTypeToPhiKernelKey(*kernel_type_.get());
1486
      pt_kernel_.reset(
1487
          new phi::Kernel(phi::KernelFactory::Instance().SelectKernel(
1488 1489 1490
              pt_kernel_name, pt_kernel_key)));

      if (pt_kernel_->IsValid()) {
1491
        VLOG(6) << "Static mode ChoosePhiKernel - kernel name: "
1492 1493 1494
                << pt_kernel_name << " | kernel key: " << pt_kernel_key
                << " | kernel: " << *pt_kernel_;
      } else {
1495
        VLOG(6) << "Static mode ChoosePhiKernel - kernel `" << pt_kernel_name
1496 1497
                << "` not found.";
      }
1498
    } else {
1499
      pt_kernel_name = kernel_signature_->name;
1500 1501 1502
// NOTE(Liu-xiandong):In my ctest, this branch do not be executed,
// I can't understand it, it's really confusing.
// But we still need to keep this to avoid errors.
1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520
#ifdef PADDLE_WITH_XPU_KP
      if (paddle::platform::is_xpu_place(kernel_type_->place_)) {
        bool use_xpu_kp_kernel_rt =
            FLAGS_run_kp_kernel &&
            paddle::platform::is_xpu_kp_support_op(type_, *kernel_type_);
        bool use_xpu_kp_kernel_debug =
            paddle::platform::is_in_xpu_kpwhite_list(type_);
        if (use_xpu_kp_kernel_rt) {
          VLOG(3) << "phi xpu_kp using rt mode in static graph";
        }
        if (use_xpu_kp_kernel_debug) {
          VLOG(3) << "phi xpu_kp using debug mode in static graph";
        }
        bool is_xpu_kp_support =
            (use_xpu_kp_kernel_rt || use_xpu_kp_kernel_debug);
        if (is_xpu_kp_support) {
          auto expected_kernel_key_library_type = kernel_type_->library_type_;
          kernel_type_->library_type_ = LibraryType::kKP;
1521 1522
          VLOG(3) << "modifing XPU KP kernel in static graph: "
                  << pt_kernel_name
1523 1524 1525
                  << ", using_kernel_key:" << *kernel_type_.get();
          auto try_pt_kernel_key =
              TransOpKernelTypeToPhiKernelKey(*kernel_type_.get());
1526 1527
          if (!phi::KernelFactory::Instance().HasKernel(pt_kernel_name,
                                                        try_pt_kernel_key)) {
1528
            kernel_type_->library_type_ = expected_kernel_key_library_type;
1529 1530 1531 1532 1533 1534
            VLOG(3) << "modify XPU KP kernel in static graph: "
                    << pt_kernel_name << " is failed " << *kernel_type_.get();
          } else {
            use_phi_xpu_kp = true;
            VLOG(3) << "modify XPU KP kernel in static graph: "
                    << pt_kernel_name << " is succeed " << *kernel_type_.get();
1535 1536 1537 1538
          }
        }
      }
#endif
1539
      pt_kernel_key = TransOpKernelTypeToPhiKernelKey(*kernel_type_.get());
1540
    }
1541 1542 1543 1544

// NOTE(Liu-xiandong): Determine whether the selected kernel is valid
// If not, use the kernel registered in fluid. And if the fluid do not
// contains the related heterogeneous kernel, use phi CPU kernel.
1545
#if defined(PADDLE_WITH_XPU)
1546 1547 1548 1549 1550
    bool is_xpu_unsupport =
        paddle::platform::is_xpu_place(kernel_type_->place_) &&
            !paddle::platform::is_xpu_support_op(type_, *kernel_type_.get()) ||
        paddle::platform::is_in_xpu_black_list(type_);
#endif
1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561
#ifdef PADDLE_WITH_XPU_KP
    bool use_xpu_kp_kernel_rt =
        paddle::platform::is_xpu_place(kernel_type_->place_) &&
        FLAGS_run_kp_kernel &&
        paddle::platform::is_xpu_kp_support_op(type_, *kernel_type_);
    bool use_xpu_kp_kernel_debug =
        paddle::platform::is_xpu_place(kernel_type_->place_) &&
        paddle::platform::is_in_xpu_kpwhite_list(type_);
    bool is_xpu_kp_support = (use_xpu_kp_kernel_rt || use_xpu_kp_kernel_debug);
#endif

1562
    if (pt_kernel_->IsValid()
1563
#if defined(PADDLE_WITH_XPU) && !defined(PADDLE_WITH_XPU_KP)
1564 1565
        && !is_xpu_unsupport
#endif
1566 1567 1568
#if defined(PADDLE_WITH_XPU_KP)
        && (!is_xpu_unsupport || use_phi_xpu_kp)
#endif
1569
    ) {
1570
      run_phi_kernel_ = true;
1571 1572 1573
    } else {
      auto& all_op_kernels = AllOpKernels();
      auto kernels_iter = all_op_kernels.find(type_);
1574 1575 1576 1577 1578 1579 1580 1581 1582 1583

// NOTE(Liu-xiandong): If we can't find heterogeneous kernel in phi,
// we need to select the heterogeneous kernel in fluid, but the kernel
// registered in KP use library_type[KP], we need to modify it.
#ifdef PADDLE_WITH_XPU_KP
      if (is_xpu_kp_support) {
        kernel_type_->library_type_ = LibraryType::kKP;
      }
#endif

1584 1585 1586
      if (kernels_iter == all_op_kernels.end() ||
          kernels_iter->second.find(*kernel_type_.get()) ==
              kernels_iter->second.end()
1587
#if defined(PADDLE_WITH_XPU) && !defined(PADDLE_WITH_XPU_KP)
1588
          || is_xpu_unsupport
1589
#endif
1590 1591 1592
#if defined(PADDLE_WITH_XPU_KP)
          || (is_xpu_unsupport && !is_xpu_kp_support)
#endif
1593
      ) {
1594 1595 1596
        auto pt_cpu_kernel_key =
            FallBackToCpu(*kernel_type_.get(), pt_kernel_key, *this);
        pt_kernel_.reset(
1597
            new phi::Kernel(phi::KernelFactory::Instance().SelectKernel(
1598 1599 1600 1601 1602 1603 1604
                pt_kernel_name, pt_cpu_kernel_key)));

        dev_ctx = pool.Get(platform::CPUPlace());
        if (pt_kernel_->IsValid()) {
          VLOG(6) << "Static mode PrepareImpl - kernel name: " << pt_kernel_name
                  << " | kernel key: " << pt_cpu_kernel_key
                  << " | kernel: " << *pt_kernel_;
1605
          run_phi_kernel_ = true;
1606 1607
        }
      }
1608 1609
    }
  }
1610
  if (!run_phi_kernel_) {
1611 1612
    if (kernel_type_.get() == nullptr || kernel_func_.get() == nullptr) {
      ChooseKernel(exe_ctx);
1613
      dev_ctx = pool.Get(kernel_type_->place_);
1614
    }
1615 1616
  }

Y
yuyang18 已提交
1617 1618
  // do data transformScope &transfer_scope;
  std::vector<std::string> transfered_inplace_vars;
1619 1620
  Scope* transfer_scope = nullptr;
  {
1621
    platform::RecordEvent record_event("prepare_data",
C
chenjian 已提交
1622
                                       platform::TracerEventType::OperatorInner,
1623 1624
                                       1,
                                       platform::EventRole::kInnerOp);
1625
    if (need_prepare_data_) {
1626 1627
      transfer_scope = PrepareData(
          scope, *kernel_type_, &transfered_inplace_vars, runtime_ctx);
1628
    }
1629
  }
Y
yuyang18 已提交
1630 1631 1632 1633
  // exec scope is the scope that kernel actually executed on.
  const Scope& exec_scope =
      (transfer_scope == nullptr ? scope : *transfer_scope);

1634
  if (!all_kernels_must_compute_runtime_shape_) {
1635
    platform::RecordEvent record_event("infer_shape",
C
chenjian 已提交
1636
                                       platform::TracerEventType::OperatorInner,
1637 1638
                                       1,
                                       platform::EventRole::kInnerOp);
1639
    RuntimeInferShapeContext infer_shape_ctx(*this, *runtime_ctx);
1640
    this->Info().infer_shape_(&infer_shape_ctx);
1641 1642 1643
    record_event.End();
    platform::RecordOpInfoSupplement(
        Type(), Attrs(), infer_shape_ctx, *runtime_ctx);
1644
  }
1645 1646 1647 1648 1649

  if (FLAGS_enable_unused_var_check) {
    GetThreadLocalUsedVarNameSet()->clear();
  }

X
clean  
Xin Pan 已提交
1650 1651
  // TODO(panyx0718): ExecutionContext should only depend on RuntimeContext
  // not Scope. Imperative mode only pass inputs and get outputs.
1652
  {
1653
    platform::RecordEvent record_event("compute",
C
chenjian 已提交
1654
                                       platform::TracerEventType::OperatorInner,
1655 1656
                                       1,
                                       platform::EventRole::kInnerOp);
1657
    if (run_phi_kernel_) {
1658
      phi::KernelContext pt_kernel_context;
1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672
      if (enable_cache_runtime_context_ && !need_prepare_phi_data_ &&
          !need_prepare_data_) {
        impl_ =
            new CacheImpl(new phi::KernelContext(),
                          new RuntimeInferShapeContext(*this, *runtime_ctx));
        BuildPhiKernelContext(*runtime_ctx, dev_ctx, impl_->getKernelContext());
        (*pt_kernel_)(impl_->getKernelContext());
      } else {
        phi::KernelContext pt_kernel_context;
        // Do data transform before building KernelContext
        // TODO(zhiqiu): support TransferInplaceVarsBack
        BuildPhiKernelContext(*runtime_ctx, dev_ctx, &pt_kernel_context);
        (*pt_kernel_)(&pt_kernel_context);
      }
1673 1674 1675 1676
    } else {
      (*kernel_func_)(
          ExecutionContext(*this, exec_scope, *dev_ctx, *runtime_ctx));
    }
1677
  }
D
dzhwinter 已提交
1678

Y
yuyang18 已提交
1679
  if (!transfered_inplace_vars.empty()) {
T
tianshuo78520a 已提交
1680
    // there is inplace variable has been transferred.
Y
yuyang18 已提交
1681
    TransferInplaceVarsBack(scope, transfered_inplace_vars, *transfer_scope);
1682
  }
1683 1684 1685 1686 1687 1688 1689

  // See [ Why need handle complex gradient to real gradient? ]
  // Only handle the case where the current kernel data type is complex
  if (framework::IsComplexType(kernel_type_->data_type_)) {
    HandleComplexGradToRealGrad(scope, runtime_ctx);
  }

1690 1691 1692 1693 1694 1695 1696 1697
  if (FLAGS_enable_unused_var_check) {
    // skip op that uses mkldnn because it has different memory reuse strategy.
    // use attr here because some GradMakers (like ActivationGradOpMaker) add
    // input when use_mkldnn=true;
    if (!(HasAttr("use_mkldnn") && Attr<bool>("use_mkldnn"))) {
      CheckUnusedVar(*this, scope);
    }
  }
1698

D
dzhwinter 已提交
1699
  /*For profiling/benchmark only*/
D
dzhwinter 已提交
1700
  if (FLAGS_benchmark) {
Y
yuyang18 已提交
1701
    dev_ctx->Wait();
1702 1703
#if defined(PADDLE_WITH_CUDA) || defined(PADLDE_WITH_ROCM)
    PADDLE_ENFORCE_GPU_SUCCESS(platform::GpuGetLastError());
1704 1705
#endif
    VLOG(4) << "Operator(" << Type() << "): context wait and get last error";
D
dzhwinter 已提交
1706
  }
C
chengduoZH 已提交
1707 1708

  if (FLAGS_check_nan_inf) {
W
WangXi 已提交
1709
    framework::details::CheckOpHasNanOrInf(*this, exec_scope, place);
C
chengduoZH 已提交
1710
  }
1711 1712 1713 1714 1715 1716 1717

  // To solve issue #15032, have a discussion with @Luotao for cpu inference,
  // do not cache transfer scope, hence in this case delete transfer scope
  // after run to avoid memory leak
  if (transfer_scope && !run_by_executor_ && !enable_cache_transfer_scope_) {
    scope.DeleteScope(transfer_scope);
  }
Q
Qiao Longfei 已提交
1718
}
X
Xin Pan 已提交
1719

1720 1721 1722
OpKernelType OperatorWithKernel::InnerGetExpectedKernelType(
    const ExecutionContext& ctx) const {
  auto expected_kernel_key = this->GetExpectedKernelType(ctx);
1723 1724 1725
  if (HasAttr("op_device")) {
    if (Attr<std::string>("op_device") == "cpu") {
      expected_kernel_key.place_ = platform::CPUPlace();
1726 1727 1728 1729 1730 1731 1732 1733 1734 1735
    } else if (Attr<std::string>("op_device").find("gpu") !=
               std::string::npos) {
      auto device = Attr<std::string>("op_device");
      size_t pos = device.find(':');
      if (pos != std::string::npos) {
        device = device.substr(0, pos);
        LOG_FIRST_N(WARNING, 1)
            << "Device index is only supported under pipeline parallelism, "
            << "so it will be ignored.";
      }
1736 1737
      // when the Op that only has CPUKernel is assigned to GPU, the CPUKernel
      // will be executed and a warning will be given at the same time.
1738 1739
      expected_kernel_key.place_ = platform::CPUPlace();
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
1740
      if (SupportGPU()) {
1741
        auto& dev_ctx = ctx.device_context();
1742
        expected_kernel_key.place_ = dev_ctx.GetPlace();
1743 1744 1745 1746 1747
      }
#endif
#ifdef PADDLE_WITH_ASCEND_CL
      if (SupportNPU()) {
        auto& dev_ctx = ctx.device_context();
1748
        expected_kernel_key.place_ = dev_ctx.GetPlace();
1749 1750 1751
      }
#endif
      if (platform::is_cpu_place(expected_kernel_key.place_)) {
1752 1753 1754 1755 1756 1757
        LOG_FIRST_N(WARNING, 1)
            << "Op(" << type_
            << ") has no CUDA implementation. It will be assigned to CPUPlace.";
      }
    }
  }
C
cc 已提交
1758 1759
  VLOG(3) << "op type:" << type_
          << ", expected_kernel_key:" << expected_kernel_key;
1760 1761 1762
  return expected_kernel_key;
}

1763
phi::KernelKey OperatorWithKernel::ChoosePhiKernel(
1764
    const ExecutionContext& ctx) const {
1765 1766 1767
  kernel_signature_.reset(
      new phi::KernelSignature(std::move(GetExpectedPhiKernelArgs(ctx))));
  VLOG(6) << *kernel_signature_.get();
1768 1769 1770 1771

  kernel_type_.reset(
      new OpKernelType(std::move(InnerGetExpectedKernelType(ctx))));

1772
  auto pt_kernel_name = kernel_signature_->name;
1773
  auto pt_kernel_key = TransOpKernelTypeToPhiKernelKey(*kernel_type_.get());
1774 1775
  pt_kernel_.reset(new phi::Kernel(phi::KernelFactory::Instance().SelectKernel(
      pt_kernel_name, pt_kernel_key)));
1776 1777

  if (pt_kernel_->IsValid()) {
1778
    VLOG(6) << "Static mode ChoosePhiKernel - kernel name: " << pt_kernel_name
1779 1780 1781
            << " | kernel key: " << pt_kernel_key
            << " | kernel: " << *pt_kernel_;
  } else {
1782
    VLOG(6) << "Static mode ChoosePhiKernel - kernel `" << pt_kernel_name
1783 1784
            << "` not found.";
  }
1785
  return pt_kernel_key;
1786 1787 1788 1789 1790 1791 1792
}

void OperatorWithKernel::ChooseKernel(const ExecutionContext& ctx) const {
  // check if op[type] has kernel registered.
  auto& all_op_kernels = AllOpKernels();
  auto kernels_iter = all_op_kernels.find(type_);
  PADDLE_ENFORCE_NE(
1793 1794
      kernels_iter,
      all_op_kernels.end(),
1795 1796 1797 1798 1799 1800 1801
      platform::errors::Unavailable(
          "There are no kernels which are registered in the %s operator.",
          type_));

  OpKernelMap& kernels = kernels_iter->second;

  auto expected_kernel_key = InnerGetExpectedKernelType(ctx);
L
Liu Yiqun 已提交
1802 1803

  auto kernel_iter = kernels.find(expected_kernel_key);
L
Liu-xiandong 已提交
1804

L
Liu Yiqun 已提交
1805 1806 1807 1808 1809 1810 1811 1812 1813
#ifdef PADDLE_WITH_MKLDNN
  // workaround for missing MKLDNN kernel when FLAGS_use_mkldnn env var is set
  if (kernel_iter == kernels.end() &&
      expected_kernel_key.library_type_ == LibraryType::kMKLDNN) {
    VLOG(3) << "missing MKLDNN kernel: fallbacking to PLAIN one";
    expected_kernel_key.library_type_ = LibraryType::kPlain;
    expected_kernel_key.data_layout_ = DataLayout::kAnyLayout;
    kernel_iter = kernels.find(expected_kernel_key);
  }
1814
#endif
1815 1816

#if defined(PADDLE_WITH_XPU) && !defined(PADDLE_WITH_XPU_KP)
1817
  if (platform::is_xpu_place(expected_kernel_key.place_) &&
Q
QingshuChen 已提交
1818 1819 1820
      (kernel_iter == kernels.end() ||
       !paddle::platform::is_xpu_support_op(type_, expected_kernel_key) ||
       paddle::platform::is_in_xpu_black_list(type_))) {
1821
    VLOG(3) << "fluid missing XPU kernel: " << type_
1822 1823 1824 1825 1826
            << ", expected_kernel_key:" << expected_kernel_key
            << ", fallbacking to CPU one!";
    expected_kernel_key.place_ = platform::CPUPlace();
    kernel_iter = kernels.find(expected_kernel_key);
  }
1827
#endif
L
Liu-xiandong 已提交
1828 1829

#ifdef PADDLE_WITH_XPU_KP
1830 1831 1832 1833 1834 1835 1836
  if (paddle::platform::is_xpu_place(expected_kernel_key.place_)) {
    bool use_xpu_kp_kernel_rt =
        FLAGS_run_kp_kernel &&
        paddle::platform::is_xpu_kp_support_op(type_, expected_kernel_key);
    bool use_xpu_kp_kernel_debug =
        paddle::platform::is_in_xpu_kpwhite_list(type_);
    if (use_xpu_kp_kernel_rt) {
1837
      VLOG(3) << "fluid xpu_kp using rt mode ";
1838 1839
    }
    if (use_xpu_kp_kernel_debug) {
1840
      VLOG(3) << "fluid xpu_kp using debug mode ";
1841 1842 1843
    }
    bool is_xpu_kp_support = (use_xpu_kp_kernel_rt || use_xpu_kp_kernel_debug);
    if (is_xpu_kp_support) {
1844 1845
      auto cache_expected_kernel_key_library_type =
          expected_kernel_key.library_type_;
1846 1847
      expected_kernel_key.library_type_ = LibraryType::kKP;
      kernel_iter = kernels.find(expected_kernel_key);
1848 1849 1850 1851 1852 1853 1854 1855 1856
      // if can't find corresponding kernel when is_xpu_kp_support is on
      // if the fluid do not register related kernel, it can't work and hava
      // error as before
      if (kernel_iter == kernels.end()) {
        expected_kernel_key.library_type_ =
            cache_expected_kernel_key_library_type;
        expected_kernel_key.place_ = platform::CPUPlace();
        kernel_iter = kernels.find(expected_kernel_key);
      } else {
1857
        VLOG(3) << "fluid using XPU KP kernel: " << type_
1858 1859
                << ", using_kernel_key:" << expected_kernel_key;
      }
1860 1861 1862 1863 1864 1865
    }
    bool is_xpu_unsupport =
        (!paddle::platform::is_xpu_support_op(type_, expected_kernel_key) ||
         paddle::platform::is_in_xpu_black_list(type_));
    if (!is_xpu_kp_support &&
        (kernel_iter == kernels.end() || is_xpu_unsupport)) {
1866
      VLOG(3) << "fluid missing XPU kernel: " << type_
1867 1868 1869 1870 1871
              << ", expected_kernel_key:" << expected_kernel_key
              << ", fallbacking to CPU one!";
      expected_kernel_key.place_ = platform::CPUPlace();
      kernel_iter = kernels.find(expected_kernel_key);
    }
L
Liu-xiandong 已提交
1872 1873 1874
  }
#endif

A
Allen Guo 已提交
1875 1876 1877 1878 1879 1880 1881 1882 1883 1884
#ifdef PADDLE_WITH_IPU
  if (kernel_iter == kernels.end() &&
      platform::is_ipu_place(expected_kernel_key.place_)) {
    VLOG(3) << "missing IPU kernel: " << type_
            << ", expected_kernel_key:" << expected_kernel_key
            << ", fallbacking to CPU one!";
    expected_kernel_key.place_ = platform::CPUPlace();
    kernel_iter = kernels.find(expected_kernel_key);
  }
#endif
1885 1886
#ifdef PADDLE_WITH_ASCEND_CL
  if (kernel_iter == kernels.end() &&
1887
      platform::is_npu_place(expected_kernel_key.place_)) {
1888 1889 1890 1891 1892 1893
    VLOG(3) << "missing NPU kernel: " << type_
            << ", expected_kernel_key:" << expected_kernel_key
            << ", fallbacking to CPU one!";
    expected_kernel_key.place_ = platform::CPUPlace();
    kernel_iter = kernels.find(expected_kernel_key);
  }
F
fwenguang 已提交
1894 1895 1896
#endif
#ifdef PADDLE_WITH_MLU
  if (kernel_iter == kernels.end() &&
1897
      platform::is_mlu_place(expected_kernel_key.place_)) {
F
fwenguang 已提交
1898 1899 1900
    VLOG(3) << "missing MLU kernel: " << type_
            << ", expected_kernel_key:" << expected_kernel_key
            << ", fallbacking to CPU one!";
1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911
    expected_kernel_key.place_ = platform::CPUPlace();
    kernel_iter = kernels.find(expected_kernel_key);
  }
#endif
#ifdef PADDLE_WITH_CUSTOM_DEVICE
  if (kernel_iter == kernels.end() &&
      platform::is_custom_place(expected_kernel_key.place_)) {
    VLOG(3) << "missing " << expected_kernel_key.place_.GetDeviceType()
            << " kernel: " << type_
            << ", expected_kernel_key:" << expected_kernel_key
            << ", fallbacking to CPU one!";
F
fwenguang 已提交
1912 1913 1914
    expected_kernel_key.place_ = platform::CPUPlace();
    kernel_iter = kernels.find(expected_kernel_key);
  }
L
Liu Yiqun 已提交
1915
#endif
1916 1917 1918 1919 1920 1921
  PADDLE_ENFORCE_NE(
      kernel_iter,
      kernels.end(),
      platform::errors::NotFound("Operator (%s) does not have kernel for %s.",
                                 type_,
                                 KernelTypeToString(expected_kernel_key)));
L
Liu Yiqun 已提交
1922

1923 1924 1925 1926 1927
  std::lock_guard<std::mutex> lock(cache_update_mutex_);
  if (kernel_type_.get() == nullptr || kernel_func_.get() == nullptr) {
    kernel_type_.reset(new OpKernelType(expected_kernel_key));
    kernel_func_.reset(new OpKernelFunc(kernel_iter->second));
  }
L
Liu Yiqun 已提交
1928 1929
}

Y
yuyang18 已提交
1930
void OperatorWithKernel::TransferInplaceVarsBack(
1931 1932
    const Scope& scope,
    const std::vector<std::string>& inplace_vars,
Y
yuyang18 已提交
1933 1934
    const Scope& transfer_scope) const {
  for (auto& var_name : inplace_vars) {
M
minqiyang 已提交
1935
    VLOG(3) << "share inplace var " + var_name + " back to it's original scope";
C
chengduo 已提交
1936
    auto* origin_var = scope.FindVar(var_name);
1937 1938 1939
    PADDLE_ENFORCE_NOT_NULL(origin_var,
                            platform::errors::InvalidArgument(
                                "The variable[%s] is nullptr.", var_name));
C
chengduo 已提交
1940
    auto* original_tensor =
C
chengduo 已提交
1941
        GetMutableLoDTensorOrSelectedRowsValueFromVar(origin_var);
C
chengduo 已提交
1942
    auto* var = transfer_scope.FindVar(var_name);
1943 1944 1945
    PADDLE_ENFORCE_NOT_NULL(var,
                            platform::errors::InvalidArgument(
                                "The variable[%s] is nullptr.", var_name));
C
chengduo 已提交
1946
    auto* transformed_tensor = GetLoDTensorOrSelectedRowsValueFromVar(*var);
1947
    auto original_dims = original_tensor->dims();
Y
yuyang18 已提交
1948
    original_tensor->ShareDataWith(*transformed_tensor);
B
Baibaifan 已提交
1949 1950 1951 1952 1953
    // In order to solve the problem that the output latitude of NPU reshape
    // operator is not changed when inplace.
    if (type_ != "reshape2" && type_ != "reshape2_grad") {
      original_tensor->Resize(original_dims);
    }
Y
yuyang18 已提交
1954 1955 1956
  }
}

1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985
void OperatorWithKernel::HandleComplexGradToRealGrad(
    const Scope& scope, RuntimeContext* ctx) const {
  for (auto& var_name_item : Outputs()) {
    std::vector<Variable*>& output_vars = ctx->outputs[var_name_item.first];
    for (size_t i = 0; i < var_name_item.second.size(); ++i) {
      // 1. find grad_var & check whether is complex tensor
      auto var_name = var_name_item.second[i];
      auto orig_var_name = GradOriginalVarName(var_name);
      // only focus on gradient var
      if (var_name == orig_var_name) {
        continue;
      }
      auto* grad_var = output_vars[i];
      // skip nullptr var
      if (grad_var == nullptr) {
        continue;
      }
      // don't process LoDTensorArray temporarily,
      // add support if necessary for complex number calculations in the future
      if (!VarIsTensor(*grad_var)) {
        continue;
      }
      auto* grad_tensor =
          GetMutableLoDTensorOrSelectedRowsValueFromVar(grad_var);
      // skip nullptr tensor
      if (grad_tensor == nullptr || !grad_tensor->IsInitialized()) {
        continue;
      }
      // only focus on complex dtype now
1986
      auto src_type = framework::TransToProtoVarType(grad_tensor->dtype());
1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005
      if (!IsComplexType(src_type)) {
        continue;
      }

      // 2. find forward var & check whether need to cast
      auto* var = scope.FindVar(orig_var_name);
      // if forward var not exists, do nothing
      if (var == nullptr) {
        continue;
      }
      if (!VarIsTensor(*var)) {
        continue;
      }
      const auto* tensor = GetLoDTensorOrSelectedRowsValueFromVar(*var);
      PADDLE_ENFORCE_NOT_NULL(
          tensor,
          platform::errors::Unavailable(
              "Forward tensor is nullptr when handle complex data to real."));
      // only need record type, the allocation may have been released
2006
      auto dst_type = framework::TransToProtoVarType(tensor->dtype());
2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023
      // only focus on real dtype and need casting
      if (IsComplexType(dst_type)) {
        continue;
      }

      // 3. cast complex grad to real grad
      VLOG(6) << "Transform " << framework::DataTypeToString(src_type)
              << " var `" << var_name << "` to "
              << framework::DataTypeToString(dst_type)
              << " real var in static graph.";
      Tensor out;
      TransComplexToReal(dst_type, src_type, *grad_tensor, &out);
      SetTensorToVariable(*grad_var, out, grad_var);
    }
  }
}

X
Xin Pan 已提交
2024
Scope* OperatorWithKernel::PrepareData(
2025 2026
    const Scope& scope,
    const OpKernelType& expected_kernel_key,
X
Xin Pan 已提交
2027 2028
    std::vector<std::string>* transfered_inplace_vars,
    RuntimeContext* ctx) const {
Y
yuyang18 已提交
2029
  Scope* new_scope = nullptr;
S
sneaxiy 已提交
2030

2031
  const std::unordered_set<std::string>* no_buffer_ins = nullptr;
S
sneaxiy 已提交
2032 2033 2034 2035
  if (info_) {
    auto& no_buffer_inferer = info_->NoNeedBufferVarsInferer();
    // Some op may not register NoNeedBufferVarsInferer
    if (no_buffer_inferer) {
2036 2037
      no_buffer_ins = &(no_buffer_inferer(Inputs(), Outputs(), Attrs()));
      if (no_buffer_ins->empty()) no_buffer_ins = nullptr;
S
sneaxiy 已提交
2038 2039 2040
    }
  }

2041 2042 2043 2044 2045 2046 2047 2048 2049
  const auto& name_map = Inputs();
  auto prepare_input_data = [&](const std::string& in_name,
                                std::vector<Variable*>* in_vars,
                                const phi::TensorArgDef* in_def,
                                bool should_skip_input) -> void {
    auto& name_vec = name_map.at(in_name);
    for (size_t i = 0; i < in_vars->size(); ++i) {
      const auto& var_name = name_vec[i];
      auto* var = in_vars->at(i);
X
Xin Pan 已提交
2050

Y
yuyang18 已提交
2051
      // Only tensor can be tranfer to another device.
C
chengduo 已提交
2052
      if (var == nullptr || !VarIsTensor(*var)) {
Y
yuyang18 已提交
2053 2054 2055
        continue;
      }

C
chengduo 已提交
2056
      auto* tensor_in = GetLoDTensorOrSelectedRowsValueFromVar(*var);
2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071

      // When no_buffer_ins then checking of Tensor::holder_ is
      // not a thread safe. And for infershape scenario checks
      // to be omitted are not really needed
      if (should_skip_input == true) {
#ifdef PADDLE_WITH_MKLDNN
        // Var without buffer may be needed
        // for some situation like InferShape().
        // In this situation We cannot skip Var analysis, as
        // MKL-DNN shape of Var may differ from kNHWC Var
        // In such situation corressponding resized Var
        // has to be created and registered
        if ((tensor_in->layout() == DataLayout::kMKLDNN) &&
            (var->IsType<LoDTensor>() == true) &&
            (expected_kernel_key.data_layout_ != DataLayout::kMKLDNN) &&
2072
            (paddle::platform::MKLDNNDeviceContext::tls()
2073 2074
                 .get_cur_paddle_data_layout() == DataLayout::kNHWC) &&
            (tensor_in->dims().size() >= 3)) {
2075 2076 2077 2078 2079
          // Mixed execution : MKL-DNN and GPU is not supported!
          if (!new_scope) {
            new_scope = &scope.NewScope();
          }
          auto* trans_var = new_scope->Var(var_name);
2080
          in_vars->at(i) = trans_var;
2081 2082
          auto out = trans_var->GetMutable<LoDTensor>();
          out->Resize(tensor_in->dims());
2083 2084
          platform::MatchShapeToLayout(
              out, tensor_in->layout(), DataLayout::kNHWC);
2085 2086
          VLOG(7) << "Created reshaped dummy input based on MKL-DNN Tensor , "
                     "but kNHWC layout"
2087
                  << in_name << " in Operator " << type_;
2088
        } else {
2089 2090
          VLOG(7) << "Skip scanning input " << in_name << " in Operator "
                  << type_;
2091 2092 2093 2094 2095
        }
#endif
        continue;
      }

Y
yuyang18 已提交
2096 2097 2098 2099
      if (!tensor_in->IsInitialized()) {
        continue;
      }

2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112
      auto kernel_type_for_var =
          GetKernelTypeForVar(in_name, *tensor_in, expected_kernel_key);
      bool need_trans_dtype =
          kernel_type_for_var.data_type_ != expected_kernel_key.data_type_;
      bool need_trans_layout = NeedTransformLayout(
          kernel_type_for_var.data_layout_, expected_kernel_key.data_layout_);
      if (!need_trans_dtype && !need_trans_layout) {
        if (!run_phi_kernel_ &&
            platform::places_are_same_class(kernel_type_for_var.place_,
                                            expected_kernel_key.place_)) {
          continue;
        }
      }
Y
yuyang18 已提交
2113

2114 2115 2116 2117 2118
      std::unique_ptr<OpKernelType> new_expected_kernel_key = nullptr;
      if (run_phi_kernel_ && in_def->backend != phi::Backend::ALL_BACKEND) {
        auto tensor_backend = phi::TransToPhiBackend(tensor_in->place());
        if ((in_def->backend != tensor_backend &&
             (in_def->backend != phi::Backend::GPUDNN ||
2119 2120 2121
              tensor_backend != phi::Backend::GPU) &&
             (in_def->backend != phi::Backend::KPS ||
              tensor_backend != phi::Backend::XPU)) ||
2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135
            tensor_in->place().GetType() == AllocationType::GPUPINNED) {
          new_expected_kernel_key = std::make_unique<OpKernelType>(
              expected_kernel_key.data_type_,
              phi::TransToPhiPlace(in_def->backend),
              expected_kernel_key.data_layout_,
              expected_kernel_key.library_type_,
              expected_kernel_key.customized_type_value_);
        }
      }

      if (!need_trans_dtype && !need_trans_layout) {
        if (run_phi_kernel_ && new_expected_kernel_key == nullptr) {
          continue;
        }
Y
yuyang18 已提交
2136 2137
      }

M
minqiyang 已提交
2138
      VLOG(3) << "Transform Variable " << var_name << " from "
2139 2140 2141
              << kernel_type_for_var << " to "
              << (new_expected_kernel_key ? *new_expected_kernel_key
                                          : expected_kernel_key);
Y
yuyang18 已提交
2142

2143 2144 2145
      // In the inference scenerio, the scopes will be reused across the
      // batches, so the `new_scope` here will result in GPU memroy explosion
      // over the  running of operators.
2146
      // We use a thread_local cache to fix that issue, the key in the cache is
2147 2148 2149 2150 2151
      // the combination of the `scope` argument, from_kernel_type,
      // target_kernel_type.
      // Have a discussion with @Superjomn or the inference developers if some
      // changes on this logic for this macro might not tested on the other
      // scenerios.
2152 2153
      // If this op is not called by an Executor or ParallelExecutor, it should
      // called by a NaiveExecutor, the NaiveExecutor will cache the scopes and
2154
      // variables, that behavior a lot different.
2155 2156 2157 2158 2159 2160
      //
      // To solve issue #15032, have a discussion with @Luotao for cpu
      // inference, for all cpu kernels cases without GPU participation, here
      // not do transfer scope caching, and cpu inference performance is not
      // impacted by test.
      enable_cache_transfer_scope_ = false;
2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174
      if (!run_by_executor_) {
        if (new_expected_kernel_key) {
          if ((platform::is_gpu_place(kernel_type_for_var.place_) ||
               platform::is_gpu_place(new_expected_kernel_key->place_))) {
            new_scope = TryCreateTransferScope(
                kernel_type_for_var, *new_expected_kernel_key, &scope);
            enable_cache_transfer_scope_ = true;
          }
        } else if ((platform::is_gpu_place(kernel_type_for_var.place_) ||
                    platform::is_gpu_place(expected_kernel_key.place_))) {
          new_scope = TryCreateTransferScope(
              kernel_type_for_var, expected_kernel_key, &scope);
          enable_cache_transfer_scope_ = true;
        }
2175
      }
2176

2177
      if (!new_scope) {
Y
yuyang18 已提交
2178 2179
        new_scope = &scope.NewScope();
      }
2180 2181 2182 2183
      // For inference, if a gpu model has an op which could only run on CPU,
      // each result of different input will be the same with the first one.
      // The reason is that if a gpu tensor is the input of a cpu kernel,
      // we will create a new cpu tensor in new scope.
2184
      // However, if enable_cache_runtime_context_, we get the cpu tensor each
2185 2186
      // time, not the gpu tensor. Thus, we set pre_scope_ = nullptr
      // to trigger `new RuntimeContext()` in RunImpl().
2187
      if (enable_cache_runtime_context_) {
2188 2189
        pre_scope_ = nullptr;
      }
L
Leo Chen 已提交
2190 2191

      // Create new var with the same name in transfer scopes
Y
yuyang18 已提交
2192
      auto* trans_var = new_scope->Var(var_name);
2193
      in_vars->at(i) = trans_var;
L
Leo Chen 已提交
2194 2195 2196 2197 2198 2199 2200

      // Find if inplace exists between input and output
      // If inplace exists, set the new created var to inplaced output, and
      // record its name in transfered_inplace_vars.
      for (auto& pair : Outputs()) {
        for (size_t j = 0; j < pair.second.size(); ++j) {
          if (pair.second[j] == var_name) {
2201
            VLOG(4) << "Found inplace between input(" << in_name
L
Leo Chen 已提交
2202 2203 2204 2205 2206 2207 2208 2209 2210
                    << ") and output(" << pair.first
                    << "), the variable name is " << var_name;
            ctx->outputs[pair.first][j] = trans_var;
            transfered_inplace_vars->emplace_back(var_name);
          }
        }
      }

      // Do transfer
Y
yuyang18 已提交
2211
      Tensor out;
2212 2213 2214 2215 2216
      TransformData(new_expected_kernel_key ? *new_expected_kernel_key
                                            : expected_kernel_key,
                    kernel_type_for_var,
                    *tensor_in,
                    &out);
Y
yuyang18 已提交
2217 2218
      SetTensorToVariable(*var, out, trans_var);
    }
2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252
  };

  if (run_phi_kernel_) {
    const auto& input_names = kernel_signature_->input_names;
    const auto& input_defs = pt_kernel_->args_def().input_defs();
    PADDLE_ENFORCE_EQ(input_names.size(),
                      input_defs.size(),
                      platform::errors::InvalidArgument(
                          "The size of inputs_args names (%d) must be equal to "
                          "the size of kernel input_defs (%d).",
                          input_names.size(),
                          input_defs.size()));
    for (size_t i = 0; i < input_defs.size(); ++i) {
      const auto& input_defs = pt_kernel_->args_def().input_defs();
      auto& in_def = input_defs.at(i);
      std::string input_name = input_names[i];
      auto iter = ctx->inputs.find(input_name);
      if (iter == ctx->inputs.end()) {
        continue;
      }
      auto& ins_vector = iter->second;
      bool should_skip_input =
          no_buffer_ins && no_buffer_ins->count(input_name) > 0;
      prepare_input_data(input_name, &ins_vector, &in_def, should_skip_input);
    }
  } else {
    for (auto& var_name_item : Inputs()) {
      bool should_skip_input =
          no_buffer_ins && no_buffer_ins->count(var_name_item.first) > 0;

      std::vector<Variable*>& input_vars = ctx->inputs[var_name_item.first];
      prepare_input_data(
          var_name_item.first, &input_vars, nullptr, should_skip_input);
    }
Y
yuyang18 已提交
2253
  }
L
Leo Chen 已提交
2254

2255 2256 2257 2258 2259 2260
  // If pre_scope = &scope, it means that scope is cached and the op is not in
  // while block. If new_scope = nullptr, it means that for each input of this
  // Op, there is no need to do PrepareData. So PrepareData could be skipped at
  // the rest iterations to save the elapsed time.
  // We do not support skipping PrepareData in while block, because the Op's
  // input may be changed by subsequent Ops, which may cause an error.
W
wenbin 已提交
2261 2262 2263 2264 2265 2266

  // For inference, ops that behind conditional branch aren't supported well,
  // so disable prepare optimization conservatively.
  bool force_prepare_data = HasAttr("inference_force_prepare_data") &&
                            Attr<bool>("inference_force_prepare_data");
  if (pre_scope_ == &scope && new_scope == nullptr && !force_prepare_data) {
2267 2268
    need_prepare_data_ = false;
  }
Y
yuyang18 已提交
2269 2270 2271

  return new_scope;
}
Q
Qiao Longfei 已提交
2272

2273
void OperatorWithKernel::ParseInputDataType(
2274 2275
    const Variable* var,
    const std::string& name,
2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294
    proto::VarType::Type* data_type) const {
  if (var != nullptr) {
    const Tensor* t = nullptr;
    if (var->IsType<Tensor>()) {
      t = &var->Get<Tensor>();
    } else if (var->IsType<LoDTensor>()) {
      t = &var->Get<LoDTensor>();
    } else if (var->IsType<phi::SelectedRows>()) {
      t = &(var->Get<phi::SelectedRows>().value());
    } else if (var->IsType<LoDTensorArray>()) {
      auto t_arr = &var->Get<LoDTensorArray>();
      for (size_t j = 0; j < t_arr->size(); j++) {
        if (t_arr->at(j).IsInitialized()) {
          t = &(t_arr->at(j));
        }
      }
    }
    if (t != nullptr) {
      PADDLE_ENFORCE_EQ(
2295 2296
          t->IsInitialized(),
          true,
2297 2298
          platform::errors::InvalidArgument("The %s Op's Input Variable `%s` "
                                            "contains uninitialized Tensor.",
2299 2300
                                            Type(),
                                            name));
2301 2302 2303 2304 2305 2306
      *data_type = paddle::framework::TransToProtoVarType(t->dtype());
    }
  }
}

void OperatorWithKernel::ParseMultiInputDataType(
2307 2308
    const std::vector<Variable*>& vars,
    const std::string& name,
2309
    proto::VarType::Type* data_type) const {
2310
  proto::VarType::Type default_data_type =
2311 2312 2313 2314 2315 2316 2317 2318 2319
      static_cast<proto::VarType::Type>(-1);
  for (size_t i = 0; i < vars.size(); ++i) {
    const Variable* var = vars[i];
    if (var != nullptr) {
      const Tensor* t = nullptr;
      if (var->IsType<Tensor>()) {
        t = &var->Get<Tensor>();
      } else if (var->IsType<LoDTensor>()) {
        t = &var->Get<LoDTensor>();
2320 2321
      } else if (var->IsType<phi::SelectedRows>()) {
        t = &(var->Get<phi::SelectedRows>().value());
2322
      } else if (var->IsType<LoDTensorArray>()) {
2323 2324 2325 2326
        auto t_arr = &var->Get<LoDTensorArray>();
        for (size_t j = 0; j < t_arr->size(); j++) {
          if (t_arr->at(j).IsInitialized()) {
            t = &(t_arr->at(j));
2327 2328
          }
        }
2329 2330
      }
      if (t != nullptr) {
2331
        PADDLE_ENFORCE_EQ(
2332 2333
            t->IsInitialized(),
            true,
2334 2335
            platform::errors::InvalidArgument("The %s Op's Input Variable `%s` "
                                              "contains uninitialized Tensor.",
2336 2337
                                              Type(),
                                              name));
2338 2339
        proto::VarType::Type tmp =
            paddle::framework::TransToProtoVarType(t->dtype());
2340 2341 2342 2343 2344 2345 2346
        PADDLE_ENFORCE(tmp == *data_type || *data_type == default_data_type,
                       platform::errors::InvalidArgument(
                           "The DataType of %s Op's duplicable or different "
                           "slot Variable %s must be "
                           "consistent or reigster GetExpectedKernelType. The "
                           "current variable type is (%s), but the "
                           "previous variable type is (%s).",
2347 2348 2349
                           Type(),
                           name,
                           DataTypeToString(tmp),
2350
                           DataTypeToString(*data_type)));
2351 2352 2353 2354 2355 2356
        *data_type = tmp;
      }
    }
  }
}

2357
proto::VarType::Type OperatorWithKernel::IndicateDataType(
Y
Yu Yang 已提交
2358
    const ExecutionContext& ctx) const {
2359 2360 2361
  proto::VarType::Type dafault_data_type =
      static_cast<proto::VarType::Type>(-1);
  proto::VarType::Type data_type = dafault_data_type;
2362 2363 2364 2365 2366 2367
  for (auto* name : ctx.InNameList()) {
    if (ctx.InputSize(*name) == 1UL) {
      ParseInputDataType(ctx.InputVar(*name), *name, &data_type);
    } else {
      ParseMultiInputDataType(ctx.MultiInputVar(*name), *name, &data_type);
    }
Y
Yu Yang 已提交
2368
  }
2369
  PADDLE_ENFORCE_NE(
2370 2371
      data_type,
      dafault_data_type,
2372 2373
      platform::errors::NotFound(
          "DataType should be indicated by input Variable at %s.", Type()));
2374 2375 2376 2377 2378 2379 2380 2381
  return data_type;
}

proto::VarType::Type OperatorWithKernel::IndicateVarDataType(
    const ExecutionContext& ctx, const std::string& name) const {
  proto::VarType::Type dafault_data_type =
      static_cast<proto::VarType::Type>(-1);
  proto::VarType::Type data_type = dafault_data_type;
2382 2383 2384 2385 2386
  if (ctx.InputSize(name) == 1UL) {
    ParseInputDataType(ctx.InputVar(name), name, &data_type);
  } else {
    ParseMultiInputDataType(ctx.MultiInputVar(name), name, &data_type);
  }
2387
  PADDLE_ENFORCE_NE(
2388 2389
      data_type,
      dafault_data_type,
2390 2391 2392 2393
      platform::errors::InvalidArgument(
          "The Input Variable(%s) of (%s) Operator used to determine kernel "
          "data type is empty or not LoDTensor or SelectedRows or "
          "LoDTensorArray.",
2394 2395
          name,
          Type()));
2396
  return data_type;
Y
Yu Yang 已提交
2397
}
2398

2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416
Tensor* OperatorWithKernel::GetTensorFormInputSafely(
    const ExecutionContext& ctx, const std::string& name) const {
  // 1. get variable and check
  // NOTE: only supports signal input var now
  // NOTE: using const_cast is because we don't have method
  // can get single mutable var, and here will not change
  // the var's data, only use some attribute
  Variable* var = const_cast<Variable*>(ctx.InputVar(name));
  PADDLE_ENFORCE_NOT_NULL(
      var,
      platform::errors::NotFound(
          "The variable %s is not found when promote complex types.", name));
  // 2. get tensor and check
  Tensor* t = nullptr;
  if (var->IsType<Tensor>()) {
    t = var->GetMutable<Tensor>();
  } else if (var->IsType<LoDTensor>()) {
    t = var->GetMutable<LoDTensor>();
2417 2418
  } else if (var->IsType<phi::SelectedRows>()) {
    t = var->GetMutable<phi::SelectedRows>()->mutable_value();
2419 2420 2421 2422 2423 2424 2425 2426
  } else {
    PADDLE_THROW(platform::errors::Unimplemented(
        "Unsupported input variable type in complex type promotion."));
  }
  PADDLE_ENFORCE_NOT_NULL(
      t,
      platform::errors::InvalidArgument(
          "The Tensor of variable %s is nullptr when promote complex types."));
2427 2428
  PADDLE_ENFORCE_EQ(t->IsInitialized(),
                    true,
2429 2430 2431
                    platform::errors::InvalidArgument(
                        "The Tensor in the %s Op's Input Variable %s(%s) is "
                        "not initialized.",
2432 2433 2434
                        Type(),
                        name,
                        ctx.InputName(name)));
2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445
  return t;
}

/** NOTE(chenweihang): For safety reasons, we now only
 * perform type promotes for binary operations with
 * complex type inputs, which is used to support the
 * paddle quantum function.
 * In other cases, the first input data type is used as
 * the kernel data type.
 */
proto::VarType::Type OperatorWithKernel::IndicateOrPromoteVarDataTypes(
2446 2447
    const ExecutionContext& ctx,
    const std::string& name1,
2448 2449 2450 2451 2452 2453
    const std::string& name2) const {
  // 1. Get tensor
  auto* tensor_a = GetTensorFormInputSafely(ctx, name1);
  auto* tensor_b = GetTensorFormInputSafely(ctx, name2);

  // 2. Get two input types
2454 2455
  auto type_a = framework::TransToProtoVarType(tensor_a->dtype());
  auto type_b = framework::TransToProtoVarType(tensor_b->dtype());
2456 2457 2458 2459 2460 2461 2462

  // 3. Get first input type or promote complex types
  auto target_type = PromoteTypesIfComplexExists(type_a, type_b);

  return target_type;
}

2463 2464 2465 2466 2467 2468
OpKernelType OperatorWithKernel::GetExpectedKernelType(
    const ExecutionContext& ctx) const {
  return OpKernelType(IndicateDataType(ctx), ctx.GetPlace());
}

OpKernelType OperatorWithKernel::GetKernelTypeForVar(
2469 2470
    const std::string& var_name,
    const Tensor& tensor,
2471
    const OpKernelType& expected_kernel_type) const {
2472 2473
  return OpKernelType(
      expected_kernel_type.data_type_, tensor.place(), tensor.layout());
2474 2475
}

2476
phi::KernelSignature OperatorWithKernel::GetExpectedPhiKernelArgs(
2477
    const ExecutionContext& ctx) const {
2478
  ExecutionArgumentMappingContext arg_mapping_ctx(ctx);
2479
  if (arg_map_fn_ == nullptr) {
2480 2481 2482 2483
    auto* arg_map_fn = phi::OpUtilsMap::Instance().GetArgumentMappingFn(type_);
    if (arg_map_fn) {
      arg_map_fn_.reset(new phi::ArgumentMappingFn(*arg_map_fn));
    } else {
2484 2485 2486
      auto func =
          [this](
              const phi::ArgumentMappingContext& ctx) -> phi::KernelSignature {
2487 2488 2489 2490
        return phi::DefaultKernelSignatureMap::Instance().Get(type_);
      };
      arg_map_fn_.reset(new phi::ArgumentMappingFn(func));
    }
2491 2492
  }
  return (*arg_map_fn_)(arg_mapping_ctx);
2493 2494
}

2495
void OperatorWithKernel::BuildPhiKernelContext(
2496 2497
    const RuntimeContext& ctx,
    platform::DeviceContext* dev_ctx,
2498
    phi::KernelContext* pt_kernel_context) const {
2499
  pt_kernel_context->SetDeviceContext(dev_ctx);
2500

2501 2502 2503
  auto& input_names = kernel_signature_->input_names;
  auto& attr_names = kernel_signature_->attr_names;
  auto& output_names = kernel_signature_->output_names;
2504 2505 2506 2507 2508

  auto input_defs = pt_kernel_->args_def().input_defs();
  auto attr_defs = pt_kernel_->args_def().attribute_defs();
  auto output_defs = pt_kernel_->args_def().output_defs();

2509 2510
  PADDLE_ENFORCE_EQ(input_names.size(),
                    input_defs.size(),
2511 2512 2513
                    platform::errors::InvalidArgument(
                        "The size of inputs_args names (%d) must be equal to "
                        "the size of kernel input_defs (%d).",
2514 2515
                        input_names.size(),
                        input_defs.size()));
2516

2517 2518
  PADDLE_ENFORCE_EQ(output_names.size(),
                    output_defs.size(),
2519 2520 2521
                    platform::errors::InvalidArgument(
                        "The size of outputs_args names (%d) must be equal to "
                        "the size of kernel output_defs (%d).",
2522 2523
                        output_names.size(),
                        output_defs.size()));
2524

2525 2526
  PADDLE_ENFORCE_EQ(attr_names.size(),
                    attr_defs.size(),
2527 2528 2529
                    platform::errors::InvalidArgument(
                        "The size of attribute_args names (%d) must be equal "
                        "to the size of kernel attribute_defs (%d).",
2530 2531
                        attr_names.size(),
                        attr_defs.size()));
2532 2533

  for (size_t i = 0; i < input_names.size(); ++i) {
H
hong 已提交
2534
    auto it = ctx.inputs.find(input_names[i]);
2535 2536 2537

    // calcute the start and end index of the input tensors
    size_t start_idx =
2538
        (i == 0 ? 0 : pt_kernel_context->InputRangeAt(i - 1).second);
H
hong 已提交
2539
    // deal with optional here
2540
    if ((it == ctx.inputs.end() || it->second.size() == 0) &&
H
hong 已提交
2541
        (input_defs[i].type_index ==
2542
             std::type_index(typeid(paddle::optional<phi::DenseTensor>)) ||
H
hong 已提交
2543
         input_defs[i].type_index ==
2544
             std::type_index(typeid(paddle::optional<phi::SelectedRows>)) ||
2545
         input_defs[i].type_index ==
2546 2547
             std::type_index(typeid(
                 paddle::optional<std::vector<const phi::DenseTensor*>>)))) {
H
hong 已提交
2548 2549 2550 2551
      pt_kernel_context->EmplaceBackInputWithoutSetRange(nullptr);
      auto end_idx = start_idx + 1;
      pt_kernel_context->AssignInputRange(std::make_pair(start_idx, end_idx),
                                          i);
2552

H
hong 已提交
2553 2554 2555 2556
      continue;
    }
    auto ins_vector = it->second;
    size_t end_idx = start_idx + ins_vector.size();
2557
    for (size_t offset = 0; offset < ins_vector.size(); ++offset) {
2558
      const phi::TensorBase* tensor_in = nullptr;
2559
      auto* var = ins_vector[offset];
H
hong 已提交
2560 2561
      if (var->IsType<framework::LoDTensor>()) {
        tensor_in = &(var->Get<framework::LoDTensor>());
2562
        pt_kernel_context->EmplaceBackInputWithoutSetRange(tensor_in);
2563 2564
      } else if (var->IsType<phi::SelectedRows>()) {
        tensor_in = &(var->Get<phi::SelectedRows>());
2565 2566
        pt_kernel_context->EmplaceBackInputWithoutSetRange(tensor_in);
      } else if (var->IsType<framework::LoDTensorArray>()) {
2567
        need_prepare_phi_data_ = true;
C
Chen Weihang 已提交
2568
        paddle::small_vector<const phi::TensorBase*> tensor_vector;
2569 2570 2571 2572 2573 2574
        auto& tensor_array = var->Get<framework::LoDTensorArray>();
        for (auto& t : tensor_array) {
          tensor_vector.emplace_back(&t);
        }
        pt_kernel_context->EmplaceBackInputsWithoutSetRange(tensor_vector);
        end_idx += tensor_array.size() - 1;
2575 2576 2577 2578
      } else {
        PADDLE_THROW(platform::errors::Unimplemented(
            "Unsupported input `%s` type when call pt kernel.",
            framework::ToTypeName(var->Type())));
2579
      }
2580
    }
2581
    // Note: here cannot deal with vector<LoDTensorArray> input
2582
    pt_kernel_context->AssignInputRange(std::make_pair(start_idx, end_idx), i);
2583
  }
2584
  VLOG(4) << "Done inputs";
2585 2586

  for (size_t i = 0; i < output_names.size(); ++i) {
H
hong 已提交
2587
    auto it = ctx.outputs.find(output_names[i]);
2588
    size_t start_idx =
2589
        (i == 0 ? 0 : pt_kernel_context->OutputRangeAt(i - 1).second);
H
hong 已提交
2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603

    if (it == ctx.outputs.end() || it->second.empty()) {
      // Deal with the case that some outputs are not found or be NULL when run
      // the kernel.
      // For example : the outputs of matmul_grad are dx and dy,
      // sometimes dx or dy may be NULL.
      pt_kernel_context->EmplaceBackOutputWithoutSetRange(nullptr);
      auto end_idx = start_idx + 1;
      pt_kernel_context->AssignOutputRange(std::make_pair(start_idx, end_idx),
                                           i);
      continue;
    }
    auto& outs_vector = it->second;

2604
    size_t end_idx = start_idx + outs_vector.size();
2605 2606

    for (size_t offset = 0; offset < outs_vector.size(); ++offset) {
2607
      phi::TensorBase* tensor_out = nullptr;
2608
      auto* var = outs_vector[offset];
2609 2610 2611
      if (var) {
        if (var->template IsType<framework::LoDTensor>()) {
          tensor_out = var->template GetMutable<framework::LoDTensor>();
2612
          pt_kernel_context->EmplaceBackOutputWithoutSetRange(tensor_out);
2613 2614
        } else if (var->template IsType<phi::SelectedRows>()) {
          tensor_out = var->template GetMutable<phi::SelectedRows>();
2615 2616
          pt_kernel_context->EmplaceBackOutputWithoutSetRange(tensor_out);
        } else if (var->template IsType<framework::LoDTensorArray>()) {
C
Chen Weihang 已提交
2617
          paddle::small_vector<phi::TensorBase*> tensor_vector;
2618 2619 2620 2621 2622 2623 2624 2625 2626
          auto* tensor_array =
              var->template GetMutable<framework::LoDTensorArray>();
          // Note: If the input LoDTensorArray size is 0, the output
          // LoDTensorArray is also 0
          for (auto& t : *tensor_array) {
            tensor_vector.emplace_back(&t);
          }
          pt_kernel_context->EmplaceBackOutputsWithoutSetRange(tensor_vector);
          end_idx += tensor_array->size() - 1;
2627 2628 2629 2630 2631
        } else {
          PADDLE_THROW(platform::errors::Unimplemented(
              "Unsupported output `%s` type when call pt kernel.",
              framework::ToTypeName(var->Type())));
        }
2632 2633
      } else {
        pt_kernel_context->EmplaceBackOutputWithoutSetRange(tensor_out);
2634
      }
2635
    }
2636
    pt_kernel_context->AssignOutputRange(std::make_pair(start_idx, end_idx), i);
2637
  }
2638
  VLOG(4) << "Done outputs";
2639 2640

  for (size_t i = 0; i < attr_names.size(); ++i) {
2641 2642 2643 2644 2645 2646 2647 2648 2649 2650
    VLOG(6) << "BuildPhiKernelContext: " << attr_names[i] << ": "
            << attr_defs[i].type_index;
    auto attr_iter = Attrs().find(attr_names[i]);
    switch (attr_defs[i].type_index) {
      case phi::AttributeType::SCALAR:
        if (attr_iter != Attrs().end()) {
          // scalar is in the attribute
          switch (AttrTypeID(attr_iter->second)) {
            case proto::AttrType::FLOAT:
              pt_kernel_context->EmplaceBackAttr(std::move(
R
Ruibiao Chen 已提交
2651
                  phi::Scalar(PADDLE_GET_CONST(float, attr_iter->second))));
2652 2653 2654
              break;
            case proto::AttrType::INT:
              pt_kernel_context->EmplaceBackAttr(std::move(
R
Ruibiao Chen 已提交
2655
                  phi::Scalar(PADDLE_GET_CONST(int, attr_iter->second))));
2656 2657 2658
              break;
            case proto::AttrType::STRING:
              pt_kernel_context->EmplaceBackAttr(std::move(phi::Scalar(
R
Ruibiao Chen 已提交
2659
                  PADDLE_GET_CONST(std::string, attr_iter->second))));
2660 2661 2662 2663 2664 2665 2666 2667
              break;
            default:
              PADDLE_THROW(platform::errors::Unimplemented(
                  "Unsupported cast op attribute `%s` to Scalar when construct "
                  "KernelContext in dygraph.",
                  attr_names[i]));
          }
        } else {  // scalar is in the input
2668
          need_prepare_phi_data_ = true;
2669
          auto& ins_vector = ctx.inputs.at(attr_names[i]);
2670
          pt_kernel_context->EmplaceBackAttr(std::move(
2671
              experimental::MakePhiScalarFromVar(*ins_vector.front())));
2672
        }
2673 2674 2675 2676 2677 2678
        break;
      case phi::AttributeType::INT_ARRAY:
        if (attr_iter != Attrs().end()) {
          switch (AttrTypeID(attr_iter->second)) {
            case proto::AttrType::INTS:
              pt_kernel_context->EmplaceBackAttr(std::move(phi::IntArray(
R
Ruibiao Chen 已提交
2679
                  PADDLE_GET_CONST(std::vector<int32_t>, attr_iter->second))));
2680 2681 2682
              break;
            case proto::AttrType::LONGS:
              pt_kernel_context->EmplaceBackAttr(std::move(phi::IntArray(
R
Ruibiao Chen 已提交
2683
                  PADDLE_GET_CONST(std::vector<int64_t>, attr_iter->second))));
2684 2685 2686
              break;
            case proto::AttrType::INT:
              pt_kernel_context->EmplaceBackAttr(std::move(phi::IntArray(
R
Ruibiao Chen 已提交
2687
                  &PADDLE_GET_CONST(int32_t, attr_iter->second), 1)));
2688 2689 2690
              break;
            case proto::AttrType::LONG:
              pt_kernel_context->EmplaceBackAttr(std::move(phi::IntArray(
R
Ruibiao Chen 已提交
2691
                  &PADDLE_GET_CONST(int64_t, attr_iter->second), 1)));
2692 2693 2694 2695 2696 2697 2698 2699
              break;
            default:
              PADDLE_THROW(platform::errors::Unimplemented(
                  "Unsupported cast op attribute `%s` to IntArray when "
                  "construct KernelContext.",
                  attr_names[i]));
          }
        } else {  // shape is in the input
2700
          need_prepare_phi_data_ = true;
2701 2702 2703 2704 2705 2706 2707 2708
          auto& ins_vector = ctx.inputs.at(attr_names[i]);
          if (ins_vector.size() == 1) {  // ShapeTensor
            pt_kernel_context->EmplaceBackAttr(std::move(
                experimental::MakePhiIntArrayFromVar(*ins_vector.front())));
          } else {  // ShapeTensorList
            pt_kernel_context->EmplaceBackAttr(std::move(
                experimental::MakePhiIntArrayFromVarList(ins_vector)));
          }
2709
        }
2710 2711 2712
        break;
      case phi::AttributeType::SCALARS: {
        PADDLE_ENFORCE_NE(
2713 2714
            attr_iter,
            Attrs().end(),
2715 2716 2717 2718 2719 2720
            platform::errors::NotFound("(%s) is not found in AttributeMap when "
                                       "buildind static KernelContext.",
                                       attr_names[i]));
        switch (AttrTypeID(attr_iter->second)) {
          case proto::AttrType::INTS: {
            const auto& vec =
R
Ruibiao Chen 已提交
2721
                PADDLE_GET_CONST(std::vector<int32_t>, attr_iter->second);
2722 2723 2724 2725 2726 2727 2728 2729 2730
            std::vector<phi::Scalar> scalar_list;
            scalar_list.reserve(vec.size());
            for (const auto& val : vec) {
              scalar_list.emplace_back(val);
            }
            pt_kernel_context->EmplaceBackAttr(std::move(scalar_list));
          } break;
          case proto::AttrType::LONGS: {
            const auto& vec =
R
Ruibiao Chen 已提交
2731
                PADDLE_GET_CONST(std::vector<int64_t>, attr_iter->second);
2732 2733 2734 2735 2736 2737 2738 2739 2740
            std::vector<phi::Scalar> scalar_list;
            scalar_list.reserve(vec.size());
            for (const auto& val : vec) {
              scalar_list.emplace_back(val);
            }
            pt_kernel_context->EmplaceBackAttr(std::move(scalar_list));
          } break;
          case proto::AttrType::FLOATS: {
            const auto& vec =
R
Ruibiao Chen 已提交
2741
                PADDLE_GET_CONST(std::vector<float>, attr_iter->second);
2742 2743 2744 2745 2746 2747 2748 2749 2750
            std::vector<phi::Scalar> scalar_list;
            scalar_list.reserve(vec.size());
            for (const auto& val : vec) {
              scalar_list.emplace_back(val);
            }
            pt_kernel_context->EmplaceBackAttr(std::move(scalar_list));
          } break;
          case proto::AttrType::FLOAT64S: {
            const auto& vec =
R
Ruibiao Chen 已提交
2751
                PADDLE_GET_CONST(std::vector<double>, attr_iter->second);
2752 2753 2754 2755 2756 2757 2758 2759 2760
            std::vector<phi::Scalar> scalar_list;
            scalar_list.reserve(vec.size());
            for (const auto& val : vec) {
              scalar_list.emplace_back(val);
            }
            pt_kernel_context->EmplaceBackAttr(std::move(scalar_list));
          } break;
          case proto::AttrType::BOOLEANS: {
            const auto& vec =
R
Ruibiao Chen 已提交
2761
                PADDLE_GET_CONST(std::vector<bool>, attr_iter->second);
2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772
            std::vector<phi::Scalar> scalar_list;
            scalar_list.reserve(vec.size());
            for (const auto& val : vec) {
              scalar_list.emplace_back(val);
            }
            pt_kernel_context->EmplaceBackAttr(std::move(scalar_list));
          } break;
          default:
            PADDLE_THROW(platform::errors::Unimplemented(
                "Unsupported cast op attribute `%s` to vector<Scalar> when "
                "construct KernelContext.",
H
hong 已提交
2773 2774
                attr_names[i]));
        }
2775 2776 2777
      } break;
      default: {
        PADDLE_ENFORCE_NE(
2778 2779
            attr_iter,
            Attrs().end(),
2780 2781 2782 2783 2784 2785
            platform::errors::NotFound("(%s) is not found in AttributeMap when "
                                       "buildind static KernelContext.",
                                       attr_names[i]));
        switch (attr_defs[i].type_index) {
          case phi::AttributeType::FLOAT32:
            pt_kernel_context->EmplaceBackAttr(
R
Ruibiao Chen 已提交
2786
                PADDLE_GET_CONST(float, attr_iter->second));
2787 2788 2789
            break;
          case phi::AttributeType::INT32:
            pt_kernel_context->EmplaceBackAttr(
R
Ruibiao Chen 已提交
2790
                PADDLE_GET_CONST(int, attr_iter->second));
2791 2792 2793
            break;
          case phi::AttributeType::BOOL:
            pt_kernel_context->EmplaceBackAttr(
R
Ruibiao Chen 已提交
2794
                PADDLE_GET_CONST(bool, attr_iter->second));
2795 2796 2797
            break;
          case phi::AttributeType::INT64:
            pt_kernel_context->EmplaceBackAttr(
R
Ruibiao Chen 已提交
2798
                PADDLE_GET_CONST(int64_t, attr_iter->second));
2799 2800 2801
            break;
          case phi::AttributeType::INT32S:
            pt_kernel_context->EmplaceBackAttr(
R
Ruibiao Chen 已提交
2802
                PADDLE_GET_CONST(std::vector<int>, attr_iter->second));
2803 2804 2805 2806
            break;
          case phi::AttributeType::DATA_TYPE: {
            auto data_type = framework::TransToPhiDataType(
                static_cast<framework::proto::VarType::Type>(
R
Ruibiao Chen 已提交
2807
                    PADDLE_GET_CONST(int, attr_iter->second)));
2808 2809 2810 2811
            pt_kernel_context->EmplaceBackAttr(data_type);
          } break;
          case phi::AttributeType::STRING:
            pt_kernel_context->EmplaceBackAttr(
R
Ruibiao Chen 已提交
2812
                std::move(PADDLE_GET_CONST(std::string, attr_iter->second)));
2813 2814 2815 2816 2817
            break;
          case phi::AttributeType::INT64S:
            switch (AttrTypeID(attr_iter->second)) {
              case proto::AttrType::LONGS:
                pt_kernel_context->EmplaceBackAttr(
R
Ruibiao Chen 已提交
2818
                    PADDLE_GET_CONST(std::vector<int64_t>, attr_iter->second));
2819 2820 2821
                break;
              case proto::AttrType::INTS: {
                const auto& vector_int_attr =
R
Ruibiao Chen 已提交
2822
                    PADDLE_GET_CONST(std::vector<int>, attr_iter->second);
2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836
                const std::vector<int64_t> vector_int64_attr(
                    vector_int_attr.begin(), vector_int_attr.end());
                pt_kernel_context->EmplaceBackAttr(vector_int64_attr);
              } break;
              default:
                PADDLE_THROW(platform::errors::Unimplemented(
                    "Unsupported cast op attribute `%s` to vector<int64_t> "
                    "when "
                    "construct KernelContext.",
                    attr_names[i]));
            }
            break;
          case phi::AttributeType::FLOAT32S:
            pt_kernel_context->EmplaceBackAttr(
R
Ruibiao Chen 已提交
2837
                PADDLE_GET_CONST(std::vector<float>, attr_iter->second));
2838 2839 2840
            break;
          case phi::AttributeType::STRINGS:
            pt_kernel_context->EmplaceBackAttr(
R
Ruibiao Chen 已提交
2841
                PADDLE_GET_CONST(std::vector<std::string>, attr_iter->second));
2842 2843 2844 2845 2846 2847
            break;
          default:
            PADDLE_THROW(platform::errors::Unimplemented(
                "Unsupported cast op attribute `%s` when construct "
                "KernelContext in dygraph.",
                attr_names[i]));
2848
        }
2849 2850 2851
      }
    }
  }
2852
  VLOG(4) << "Done attributes";
2853 2854
}

Q
Qiao Longfei 已提交
2855
}  // namespace framework
L
liaogang 已提交
2856
}  // namespace paddle