operator.cc 104.0 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Q
Qiao Longfei 已提交
2 3 4 5 6 7 8 9 10
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
    http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
D
dzhwinter 已提交
11

12 13
#include "paddle/fluid/framework/operator.h"

14
#include <glog/logging.h>
15

P
peizhilin 已提交
16 17
#include <sstream>
#include <string>
18

19
#include "gflags/gflags.h"
20
#include "paddle/fluid/framework/convert_utils.h"
Y
Yi Wang 已提交
21
#include "paddle/fluid/framework/data_transform.h"
22
#include "paddle/fluid/framework/data_type_transform.h"
W
WangXi 已提交
23
#include "paddle/fluid/framework/details/nan_inf_utils.h"
24
#include "paddle/fluid/framework/op_call_stack.h"
25
#include "paddle/fluid/framework/phi_utils.h"
Y
Yi Wang 已提交
26
#include "paddle/fluid/framework/shape_inference.h"
27
#include "paddle/fluid/framework/transfer_scope_cache.h"
28
#include "paddle/fluid/framework/unused_var_check.h"
Y
Yi Wang 已提交
29
#include "paddle/fluid/framework/var_type.h"
30
#include "paddle/fluid/platform/device/device_wrapper.h"
L
Leo Chen 已提交
31
#include "paddle/fluid/platform/enforce.h"
32
#include "paddle/fluid/platform/profiler.h"
C
chenjian 已提交
33
#include "paddle/fluid/platform/profiler/event_tracing.h"
34
#include "paddle/fluid/platform/profiler/supplement_tracing.h"
35
#include "paddle/phi/common/int_array.h"
36
#include "paddle/phi/common/scalar.h"
37
#include "paddle/phi/core/kernel_context.h"
38 39
#include "paddle/phi/core/kernel_factory.h"
#include "paddle/phi/ops/compat/signatures.h"
40

41
namespace phi {
42
class DenseTensor;
43
}  // namespace phi
44

45
#ifdef PADDLE_WITH_XPU
46 47
#include "paddle/fluid/platform/device/xpu/xpu_info.h"
#include "paddle/fluid/platform/device/xpu/xpu_op_list.h"
48
#endif
Q
Qiao Longfei 已提交
49

50 51 52 53
#ifdef PADDLE_WITH_MKLDNN
#include "paddle/fluid/platform/mkldnn_helper.h"
#endif

F
fwenguang 已提交
54 55 56 57
#ifdef PADDLE_WITH_MLU
#include "paddle/fluid/platform/device/mlu/mlu_info.h"
#endif

D
dzhwinter 已提交
58
DECLARE_bool(benchmark);
59
DECLARE_bool(check_nan_inf);
60
DECLARE_bool(enable_unused_var_check);
F
Feng Xing 已提交
61
DECLARE_bool(run_kp_kernel);
C
chenjian 已提交
62
DECLARE_bool(enable_host_event_recorder_hook);
D
dzhwinter 已提交
63

Q
Qiao Longfei 已提交
64 65 66
namespace paddle {
namespace framework {

67 68 69 70 71 72
std::vector<std::tuple<platform::Place, LibraryType>> kKernelPriority = {
    std::make_tuple(platform::CUDAPlace(0), LibraryType::kCUDNN),
    std::make_tuple(platform::CUDAPlace(0), LibraryType::kPlain),
    std::make_tuple(platform::CPUPlace(), LibraryType::kMKLDNN),
    std::make_tuple(platform::CPUPlace(), LibraryType::kPlain),
};
D
dzhwinter 已提交
73

74 75
static DDim GetDimsDebug(const ScopeBase& scope,
                         const std::string& name,
76
                         bool get_actual_dim = false) {
77
  Variable* var = scope.FindVar(name);
Q
qiaolongfei 已提交
78 79
  if (var == nullptr) {
    return DDim({-1});
Q
Qiao Longfei 已提交
80 81
  }

M
minqiyang 已提交
82 83 84
  if (var->IsType<LoDTensor>()) {
    const LoDTensor& tensor = var->Get<LoDTensor>();
    return tensor.dims();
85
  } else if (var->IsType<phi::SelectedRows>()) {
M
minqiyang 已提交
86
    if (get_actual_dim) {
87
      return var->Get<phi::SelectedRows>().value().dims();
M
minqiyang 已提交
88
    } else {
89
      return var->Get<phi::SelectedRows>().GetCompleteDims();
M
minqiyang 已提交
90
    }
S
Steffy-zxf 已提交
91 92
  } else if (var->IsType<Strings>()) {
    return DDim({static_cast<int64_t>(var->Get<Strings>().size())});
93 94 95 96 97
  } else {
    return DDim({-1});
  }
}

98
static bool VarInited(const ScopeBase& scope, const std::string& name) {
Q
Qiao Longfei 已提交
99 100 101 102 103
  Variable* var = scope.FindVar(name);
  if (var == nullptr) return false;
  return var->IsInitialized();
}

104
static std::string GetDtype(const ScopeBase& scope, const std::string& name) {
D
dzhwinter 已提交
105 106 107 108
  Variable* var = scope.FindVar(name);
  if (var == nullptr) {
    return "";
  }
109

M
minqiyang 已提交
110 111 112
  if (var->IsType<LoDTensor>()) {
    const LoDTensor& tensor = var->Get<LoDTensor>();
    if (UNLIKELY(!tensor.IsInitialized())) {
113 114
      return "";
    }
115
    return DataTypeToString(framework::TransToProtoVarType(tensor.dtype()));
116 117
  } else if (var->IsType<phi::SelectedRows>()) {
    auto tensor = var->Get<phi::SelectedRows>().value();
Q
Qiao Longfei 已提交
118 119 120
    if (UNLIKELY(!tensor.IsInitialized())) {
      return "uninited";
    } else {
121
      return DataTypeToString(framework::TransToProtoVarType(tensor.dtype()));
Q
Qiao Longfei 已提交
122
    }
S
Steffy-zxf 已提交
123 124
  } else if (var->IsType<Strings>()) {
    return "strings";
D
dzhwinter 已提交
125 126 127 128 129
  } else {
    return "";
  }
}

130
static std::string GetPlace(const ScopeBase& scope, const std::string& name) {
L
Leo Chen 已提交
131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146
  Variable* var = scope.FindVar(name);
  if (var == nullptr) {
    return "";
  }
  auto to_string = [](const platform::Place& p) {
    std::stringstream sstream;
    sstream << p;
    return sstream.str();
  };

  if (var->IsType<LoDTensor>()) {
    const LoDTensor& tensor = var->Get<LoDTensor>();
    if (UNLIKELY(!tensor.IsInitialized())) {
      return "";
    }
    return to_string(tensor.place());
147 148
  } else if (var->IsType<phi::SelectedRows>()) {
    auto tensor = var->Get<phi::SelectedRows>().value();
L
Leo Chen 已提交
149 150 151 152 153 154 155 156 157 158
    if (UNLIKELY(!tensor.IsInitialized())) {
      return "uninited";
    } else {
      return to_string(tensor.place());
    }
  } else {
    return "";
  }
}

159
static int GetRowSize(const ScopeBase& scope, const std::string& name) {
160 161 162 163 164
  Variable* var = scope.FindVar(name);
  if (var == nullptr) {
    return -1;
  }

165 166
  if (var->IsType<phi::SelectedRows>()) {
    return var->Get<phi::SelectedRows>().rows().size();
167 168 169 170 171
  }

  return -1;
}

172
static LoD GetLoDDebug(const ScopeBase& scope, const std::string& name) {
Q
Qiao Longfei 已提交
173 174 175 176 177 178 179
  Variable* var = scope.FindVar(name);
  auto default_lod = LoD({{}});

  if (var == nullptr) {
    return default_lod;
  }

M
minqiyang 已提交
180 181 182
  if (var->IsType<LoDTensor>()) {
    const LoDTensor& tensor = var->Get<LoDTensor>();
    return tensor.lod();
Q
Qiao Longfei 已提交
183 184 185 186 187
  } else {
    return default_lod;
  }
}

X
Xin Pan 已提交
188 189 190 191 192
RuntimeContext::RuntimeContext(const VariableNameMap& innames,
                               const VariableNameMap& outnames,
                               const Scope& scope) {
  for (auto& var_name_item : innames) {
    std::vector<Variable*>& input_vars = inputs[var_name_item.first];
X
Xin Pan 已提交
193
    input_vars.reserve(var_name_item.second.size());
X
Xin Pan 已提交
194 195 196 197 198 199
    for (auto& var_name : var_name_item.second) {
      input_vars.push_back(scope.FindVar(var_name));
    }
  }
  for (auto& var_name_item : outnames) {
    std::vector<Variable*>& output_vars = outputs[var_name_item.first];
X
Xin Pan 已提交
200
    output_vars.reserve(var_name_item.second.size());
X
Xin Pan 已提交
201 202 203 204 205 206
    for (auto& var_name : var_name_item.second) {
      output_vars.push_back(scope.FindVar(var_name));
    }
  }
}

207
void OperatorBase::Run(const Scope& scope, const platform::Place& place) {
P
peizhilin 已提交
208 209 210
  try {
    VLOG(4) << place << " " << DebugStringEx(&scope);
    if (platform::is_gpu_place(place)) {
211
#if !defined(PADDLE_WITH_CUDA) && !defined(PADDLE_WITH_HIP)
212 213 214 215
      PADDLE_THROW(platform::errors::Unavailable(
          "Cannot run operator on place %s, please recompile paddle or "
          "reinstall Paddle with CUDA support.",
          place));
216
#else
217
      auto dev_id = place.device;
P
peizhilin 已提交
218
      platform::SetDeviceId(dev_id);
219 220 221
#endif
    } else if (platform::is_xpu_place(place)) {
#ifndef PADDLE_WITH_XPU
222 223 224 225
      PADDLE_THROW(platform::errors::Unavailable(
          "Cannot run operator on place %s, please recompile paddle or "
          "reinstall Paddle with XPU support.",
          place));
226
#else
227
      auto dev_id = place.device;
228
      platform::SetXPUDeviceId(dev_id);
229 230 231 232 233 234 235 236
#endif
    } else if (platform::is_npu_place(place)) {
#ifndef PADDLE_WITH_ASCEND_CL
      PADDLE_THROW(platform::errors::Unavailable(
          "Cannot run operator on place %s, please recompile paddle or "
          "reinstall Paddle with NPU support.",
          place));
#else
237
      auto dev_id = place.device;
238
      platform::SetNPUDeviceId(dev_id);
F
fwenguang 已提交
239 240 241 242 243 244 245 246
#endif
    } else if (platform::is_mlu_place(place)) {
#ifndef PADDLE_WITH_MLU
      PADDLE_THROW(platform::errors::Unavailable(
          "Cannot run operator on place %s, please recompile paddle or "
          "reinstall Paddle with MLU support.",
          place));
#else
247
      auto dev_id = place.device;
F
fwenguang 已提交
248
      platform::SetMLUDeviceId(dev_id);
249 250 251 252 253 254 255 256
#endif
    } else if (platform::is_custom_place(place)) {
#ifndef PADDLE_WITH_CUSTOM_DEVICE
      PADDLE_THROW(platform::errors::Unavailable(
          "Cannot run operator on place %s, please recompile paddle or "
          "reinstall Paddle with CustomDevice support.",
          place));
#else
257
      phi::DeviceManager::SetDevice(place);
258
#endif
P
peizhilin 已提交
259
    }
P
peizhilin 已提交
260

261
    {
262 263 264
      // TODO(wangchaochaohu) : refine code to use only one RecordEvent)
      // in order to record different op type cost time
      // and different op name cost time,we set two event.
C
chenjian 已提交
265
      platform::RecordEvent op_type_record_event(
C
chenjian 已提交
266
          Type(), platform::TracerEventType::Operator, 1);
C
chenjian 已提交
267 268
      auto op_name = platform::OpName(outputs_, Type());
      platform::RecordEvent op_name_record_event(
269 270
          op_name,
          platform::TracerEventType::Operator,
C
chenjian 已提交
271
          FLAGS_enable_host_event_recorder_hook ? 20 : 1,
C
chenjian 已提交
272
          platform::EventRole::kUniqueOp);
P
peizhilin 已提交
273 274
      RunImpl(scope, place);
    }
275

Z
Zhang Ting 已提交
276
    VLOG(3) << GetExecutionPlace(place) << " " << DebugStringEx(&scope);
277
  } catch (platform::EnforceNotMet& exception) {
278
    framework::InsertCallStackInfo(Type(), Attrs(), &exception);
279
    throw std::move(exception);
280 281 282 283 284 285
  } catch (platform::EOFException&) {
    std::rethrow_exception(std::current_exception());
  } catch (std::exception& ex) {
    LOG(WARNING) << Type() << " raises an exception "
                 << platform::demangle(typeid(ex).name()) << ", " << ex.what();
    std::rethrow_exception(std::current_exception());
P
peizhilin 已提交
286
  } catch (...) {
287
    LOG(WARNING) << Type() << " raises an unknown exception";
P
peizhilin 已提交
288
    std::rethrow_exception(std::current_exception());
289
  }
290 291
}

292
bool OperatorBase::HasInputs(const std::string& name) const {
M
minqiyang 已提交
293
  return inputs_.find(name) != inputs_.end();
294 295
}

296
std::string OperatorBase::Input(const std::string& name) const {
Y
Yu Yang 已提交
297
  auto& ins = Inputs(name);
298
  PADDLE_ENFORCE_LE(
299 300
      ins.size(),
      1UL,
301
      platform::errors::InvalidArgument(
302 303
          "Operator %s's input %s should contain only one variable.",
          type_,
304
          name));
Y
Yu Yang 已提交
305
  return ins.empty() ? kEmptyVarName : ins[0];
Y
Yan Chunwei 已提交
306 307
}

Y
Yu Yang 已提交
308 309
const std::vector<std::string>& OperatorBase::Inputs(
    const std::string& name) const {
Y
Yu Yang 已提交
310
  auto it = inputs_.find(name);
311
  PADDLE_ENFORCE_NE(
312 313 314 315
      it,
      inputs_.end(),
      platform::errors::NotFound(
          "Operator %s does not have the input %s.", type_, name));
Y
Yu Yang 已提交
316
  return it->second;
Y
Yan Chunwei 已提交
317 318
}

319
bool OperatorBase::HasOutputs(const std::string& name) const {
320
  if (outputs_.find(name) != outputs_.end()) {
321 322 323 324 325 326
    return true;
  } else {
    return false;
  }
}

327
std::string OperatorBase::Output(const std::string& name) const {
Y
Yu Yang 已提交
328
  auto& outs = Outputs(name);
329
  PADDLE_ENFORCE_LE(
330 331
      outs.size(),
      1UL,
332
      platform::errors::InvalidArgument(
333 334
          "Operator %s's output %s should contain only one variable.",
          type_,
335
          name));
Y
Yu Yang 已提交
336
  return outs.empty() ? kEmptyVarName : outs[0];
Y
Yan Chunwei 已提交
337 338
}

Y
Yu Yang 已提交
339 340
const std::vector<std::string>& OperatorBase::Outputs(
    const std::string& name) const {
Y
Yu Yang 已提交
341
  auto it = outputs_.find(name);
342
  PADDLE_ENFORCE_NE(
343 344
      it,
      outputs_.end(),
345 346
      platform::errors::NotFound(
          "Operator %s does not have an output called %s.", type_, name));
Y
Yu Yang 已提交
347
  return it->second;
Y
Yan Chunwei 已提交
348 349
}

350
std::string OperatorBase::DebugStringEx(const ScopeBase* scope) const {
Q
Qiao Longfei 已提交
351
  std::stringstream ss;
Y
Yu Yang 已提交
352
  ss << "Op(" << type_ << "), inputs:{";
353

354
  const std::unordered_set<std::string>* no_need_buffer_vars = nullptr;
355 356
  if (info_ && info_->NoNeedBufferVarsInferer()) {
    no_need_buffer_vars =
357 358
        &(Info().NoNeedBufferVarsInferer()(Inputs(), Outputs(), Attrs()));
    if (no_need_buffer_vars->empty()) no_need_buffer_vars = nullptr;
359 360
  }

Y
Yu Yang 已提交
361 362
  for (auto it = inputs_.begin(); it != inputs_.end();) {
    auto& input = *it;
363 364
    bool is_no_need_buffer_var =
        (no_need_buffer_vars && no_need_buffer_vars->count(input.first) > 0);
Y
Yu Yang 已提交
365 366
    ss << input.first << "[";
    for (size_t i = 0; i < input.second.size(); ++i) {
Q
Qiao Longfei 已提交
367 368
      auto var_name = input.second[i];
      ss << var_name;
369
      if (scope) {
Q
Qiao Longfei 已提交
370 371 372 373 374 375 376
        if (!VarInited(*scope, var_name)) {
          ss << "[uninited]";
        } else {
          int row_size = GetRowSize(*scope, var_name);
          if (row_size >= 0) {
            ss << "[row_size=" << row_size << "]";
          }
377 378 379
          std::string dtype = is_no_need_buffer_var
                                  ? "unknown_dtype"
                                  : GetDtype(*scope, var_name);
Q
Qiao Longfei 已提交
380
          ss << ":" << dtype;
381 382
          ss << "[" << GetDimsDebug(*scope, var_name, true) << "]";
          ss << "(" << GetLoDDebug(*scope, var_name) << ")";
L
Leo Chen 已提交
383
          ss << "(" << GetPlace(*scope, var_name) << ")";
384
        }
385
      }
Y
Yu Yang 已提交
386 387 388
      if (i != input.second.size() - 1) {
        ss << ", ";
      }
389
    }
Y
Yu Yang 已提交
390
    ss << "]";
Y
Yu Yang 已提交
391 392
    ++it;
    if (it != inputs_.end()) {
393 394
      ss << ", ";
    }
Q
Qiao Longfei 已提交
395
  }
Y
Yu Yang 已提交
396
  ss << "}, outputs:{";
Y
Yu Yang 已提交
397 398
  for (auto it = outputs_.begin(); it != outputs_.end();) {
    auto& output = *it;
Y
Yu Yang 已提交
399 400
    ss << output.first << "[";
    for (size_t i = 0; i < output.second.size(); ++i) {
Q
Qiao Longfei 已提交
401 402
      auto var_name = output.second[i];
      ss << var_name;
403
      if (scope) {
Q
Qiao Longfei 已提交
404 405 406 407 408 409 410
        if (!VarInited(*scope, var_name)) {
          ss << "[uninited]";
        } else {
          int row_size = GetRowSize(*scope, output.second[i]);
          if (row_size >= 0) {
            ss << "[row_size=" << row_size << "]";
          }
C
chengduo 已提交
411 412
          std::string dtype = GetDtype(*scope, output.second[i]);
          ss << ":" << dtype;
413 414
          ss << "[" << GetDimsDebug(*scope, var_name, true) << "]";
          ss << "(" << GetLoDDebug(*scope, var_name) << ")";
L
Leo Chen 已提交
415
          ss << "(" << GetPlace(*scope, var_name) << ")";
416
        }
417
      }
Y
Yu Yang 已提交
418 419 420
      if (i != output.second.size() - 1) {
        ss << ", ";
      }
421
    }
Y
Yu Yang 已提交
422
    ss << "]";
Y
Yu Yang 已提交
423 424
    ++it;
    if (it != outputs_.end()) {
425 426
      ss << ", ";
    }
Q
Qiao Longfei 已提交
427
  }
Y
Yu Yang 已提交
428
  ss << "}.";
Q
Qiao Longfei 已提交
429 430 431
  return ss.str();
}

Y
Yu Yang 已提交
432
OperatorBase::OperatorBase(const std::string& type,
Y
Yu Yang 已提交
433 434
                           const VariableNameMap& inputs,
                           const VariableNameMap& outputs,
Y
Yu Yang 已提交
435
                           const AttributeMap& attrs)
S
sneaxiy 已提交
436 437 438 439 440 441
    : type_(type),
      inputs_(inputs),
      outputs_(outputs),
      attrs_(attrs),
      // NOTE(zjl): why op_info may be nullptr?
      info_(OpInfoMap::Instance().GetNullable(type)) {
H
hong 已提交
442 443 444 445 446 447 448 449
  // In dygraph mode, all the OperatorBase will be constructed by function:
  // framework::OpRegistry::CreateOp(type, {}, {}, {}, false).
  // Inputs, outputs and attrs will be set to empty map
  // to improve the execution efficiency of dygraph.
  if (inputs_.size() > 0 || outputs_.size() > 0) {
    GenerateTemporaryNames();
    CheckAllInputOutputSet();
  }
Y
Yu Yang 已提交
450
}
451

Q
qijun 已提交
452 453
std::vector<std::string> OperatorBase::InputVars() const {
  std::vector<std::string> ret_val;
Y
Yu Yang 已提交
454
  for (auto& o : inputs_) {
Q
qijun 已提交
455 456 457 458 459 460
    ret_val.reserve(ret_val.size() + o.second.size());
    ret_val.insert(ret_val.end(), o.second.begin(), o.second.end());
  }
  return ret_val;
}

Y
Yu Yang 已提交
461 462 463 464 465 466 467 468 469 470
std::vector<std::string> OperatorBase::OutputVars(bool has_intermediate) const {
  std::vector<std::string> ret_val;
  if (has_intermediate) {
    // push all outputs into ret_val
    for (auto& o : outputs_) {
      ret_val.reserve(ret_val.size() + o.second.size());
      ret_val.insert(ret_val.end(), o.second.begin(), o.second.end());
    }
    return ret_val;
  }
S
sneaxiy 已提交
471
  auto& info = Info();
Y
Yu Yang 已提交
472 473

  // get all OpProto::Var for outputs
Y
Yu Yang 已提交
474
  for (auto& o : info.Proto().outputs()) {
Y
Yu Yang 已提交
475 476 477 478 479 480 481 482 483
    // ignore all intermediate output
    if (o.intermediate()) continue;
    auto out = outputs_.find(o.name());
    if (out != outputs_.end()) {
      ret_val.reserve(ret_val.size() + out->second.size());
      ret_val.insert(ret_val.end(), out->second.begin(), out->second.end());
    }
  }
  return ret_val;
D
dongzhihong 已提交
484 485
}

486
void OperatorBase::CheckAllInputOutputSet() const {
S
sneaxiy 已提交
487
  if (info_ == nullptr || info_->proto_ == nullptr) return;
488

S
sneaxiy 已提交
489
  for (auto& in : info_->Proto().inputs()) {
490
    if (!in.dispensable() && !in.extra()) {
491
      PADDLE_ENFORCE_NE(
492 493 494 495
          inputs_.find(in.name()),
          inputs_.end(),
          platform::errors::NotFound(
              "Operator %s's input (%s) is not set.", Type(), in.name()));
496
    }
497 498
  }

S
sneaxiy 已提交
499
  for (auto& out : info_->Proto().outputs()) {
500
    if (!out.dispensable() && !out.extra()) {
501
      PADDLE_ENFORCE_NE(
502 503 504 505
          outputs_.find(out.name()),
          outputs_.end(),
          platform::errors::NotFound(
              "Operator %s's output (%s) is not set.", Type(), out.name()));
506
    }
507 508 509 510 511 512 513 514 515 516 517 518 519 520 521
  }
}

void OperatorBase::GenerateTemporaryNames() {
  static std::atomic<size_t> gUniqId(0UL);
  for (auto& output : outputs_) {
    for (auto& output_name : output.second) {
      if (output_name == kTempVarName) {
        output_name += type_;
        output_name += "@";
        output_name += std::to_string(gUniqId.fetch_add(1));
      }
    }
  }
}
522

C
chengduo 已提交
523
const Tensor* GetLoDTensorOrSelectedRowsValueFromVar(const Variable& var) {
C
chengduo 已提交
524 525
  if (var.IsType<LoDTensor>()) {
    return static_cast<const Tensor*>(&(var.Get<LoDTensor>()));
526 527
  } else if (var.IsType<phi::SelectedRows>()) {
    return &(var.Get<phi::SelectedRows>().value());
Q
QI JUN 已提交
528
  } else {
529 530 531
    PADDLE_THROW(platform::errors::InvalidArgument(
        "Variable type is %s, expect LoDTensor or SelectedRows.",
        ToTypeName(var.Type())));
Q
QI JUN 已提交
532 533 534
  }
}

C
chengduo 已提交
535
Tensor* GetMutableLoDTensorOrSelectedRowsValueFromVar(Variable* var) {
Q
QI JUN 已提交
536
  if (var->IsType<LoDTensor>()) {
537
    return var->GetMutable<LoDTensor>();
538 539
  } else if (var->IsType<phi::SelectedRows>()) {
    return var->GetMutable<phi::SelectedRows>()->mutable_value();
Q
QI JUN 已提交
540
  } else {
541 542 543
    PADDLE_THROW(platform::errors::InvalidArgument(
        "Variable type is %s, expect LoDTensor or SelectedRows.",
        ToTypeName(var->Type())));
Q
QI JUN 已提交
544 545 546
  }
}

547
bool ExecutionContext::HasInput(const std::string& name) const {
548
  auto* var = InputVar(name);
549 550 551
  return var != nullptr;
}

552 553 554 555 556 557 558 559 560 561 562 563 564 565
bool ExecutionContext::HasInputs(const std::string& name) const {
  const auto& ins = ctx_.inputs;
  auto it = ins.find(name);
  if (it == ins.end() || it->second.empty()) {
    return false;
  }
  for (const auto* input : it->second) {
    if (input == nullptr) {
      return false;
    }
  }
  return true;
}

566
bool ExecutionContext::HasOutput(const std::string& name) const {
567
  auto* var = OutputVar(name);
568 569 570
  return var != nullptr;
}

X
Xin Pan 已提交
571
const Variable* ExecutionContext::InputVar(const std::string& name) const {
572 573
  LogVarUsageIfUnusedVarCheckEnabled(name);

X
Xin Pan 已提交
574 575 576
  auto it = ctx_.inputs.find(name);
  if (it == ctx_.inputs.end()) return nullptr;

577
  PADDLE_ENFORCE_LE(
578 579
      it->second.size(),
      1UL,
580
      platform::errors::InvalidArgument(
581
          "Operator %s's input %s should contain only one variable.",
582 583
          op_.Type(),
          name));
X
Xin Pan 已提交
584 585 586
  return it->second.empty() ? nullptr : it->second[0];
}

X
clean  
Xin Pan 已提交
587
Variable* ExecutionContext::OutputVar(const std::string& name) const {
X
Xin Pan 已提交
588 589 590
  auto it = ctx_.outputs.find(name);
  if (it == ctx_.outputs.end()) return nullptr;

591
  PADDLE_ENFORCE_LE(
592 593
      it->second.size(),
      1UL,
594 595
      platform::errors::InvalidArgument(
          "Operator %s's output %s should contain only one variable.",
596 597
          op_.Type(),
          name));
X
Xin Pan 已提交
598 599 600
  return it->second.empty() ? nullptr : it->second[0];
}

601
template <>
602
const std::vector<const Tensor*> ExecutionContext::MultiInput<Tensor>(
603
    const std::string& name) const {
604 605
  LogVarUsageIfUnusedVarCheckEnabled(name);

H
hong 已提交
606 607
  auto vars = MultiInputVar(name);
  if (vars.size() == 0) {
X
Xin Pan 已提交
608 609 610 611
    return {};
  }
  std::vector<const Tensor*> res;
  res.reserve(vars.size());
612 613 614
  std::transform(vars.begin(),
                 vars.end(),
                 std::back_inserter(res),
H
hong 已提交
615
                 [&](const Variable* var) -> const Tensor* {
X
Xin Pan 已提交
616
                   if (var == nullptr) return nullptr;
617 618
                   PADDLE_ENFORCE_EQ(var->IsType<LoDTensor>(),
                                     true,
619 620 621 622
                                     platform::errors::InvalidArgument(
                                         "Input variable should be LoDTensor, "
                                         "but the received type is %s.",
                                         ToTypeName(var->Type())));
X
Xin Pan 已提交
623 624 625 626 627
                   return &(var->Get<LoDTensor>());
                 });
  return res;
}

628
template <>
629
std::vector<Tensor*> ExecutionContext::MultiOutput<Tensor>(
630
    const std::string& name) const {
H
hong 已提交
631 632 633
  auto vars = MultiOutputVar(name);

  if (vars.size() == 0) {
634 635
    return {};
  }
636
  std::vector<Tensor*> res;
637
  res.reserve(vars.size());
638 639 640
  std::transform(vars.begin(),
                 vars.end(),
                 std::back_inserter(res),
641 642 643
                 [&](Variable* var) -> Tensor* {
                   return var == nullptr ? nullptr
                                         : var->GetMutable<LoDTensor>();
644
                 });
645 646 647
  return res;
}

Y
Yu Yang 已提交
648
bool OpSupportGPU(const std::string& op_type) {
H
hong 已提交
649
  // check in new Function kernel first
650
  bool has_phi_kernel = false;
651
  auto& kernel_factory = phi::KernelFactory::Instance();
H
hong 已提交
652
  auto kernel_key_map =
653
      kernel_factory.SelectKernelMap(phi::TransToPhiKernelName(op_type));
H
hong 已提交
654
  for (auto& kernel : kernel_key_map) {
655
    has_phi_kernel = true;
656
    if (platform::is_gpu_place(phi::TransToPhiPlace(kernel.first.backend()))) {
H
hong 已提交
657 658 659 660
      return true;
    }
  }

Y
Yu Yang 已提交
661 662
  auto& all_kernels = OperatorWithKernel::AllOpKernels();
  auto it = all_kernels.find(op_type);
663 664 665 666 667 668 669 670 671 672 673 674 675
  if (it != all_kernels.end()) {
    for (auto& kern_pair : it->second) {
      if (platform::is_gpu_place(kern_pair.first.place_)) {
        return true;
      }
    }
  } else {
    if (has_phi_kernel) {
      // if has phi kernel, but not find phi gpu kernel and fluid gpu kernel,
      // this op doesn't support GPU
      return false;
    } else {
      // All control operator must support GPU
Y
Yu Yang 已提交
676 677 678
      return true;
    }
  }
H
hong 已提交
679

Y
Yu Yang 已提交
680 681 682
  return false;
}

683 684
class RuntimeInferShapeContext : public InferShapeContext {
 public:
685
  RuntimeInferShapeContext(const OperatorBase& op, const RuntimeContext& ctx)
G
Gabor Buella 已提交
686
      : op_(op), ctx_(ctx) {}
687 688

  bool HasInput(const std::string& name) const override {
689
    // has only one input
X
Xin Pan 已提交
690
    const auto& ins = ctx_.inputs;
691 692
    auto it = ins.find(name);
    if (it == ins.end()) {
693 694
      return false;
    }
695
    const auto& in = it->second;
X
Xin Pan 已提交
696
    if (in.size() == 0) return false;
697
    PADDLE_ENFORCE_EQ(
698 699
        in.size(),
        1UL,
700 701
        platform::errors::InvalidArgument(
            "Input %s should not contain more than one inputs.", name));
X
Xin Pan 已提交
702
    return in[0] != nullptr;
703 704 705
  }

  bool HasOutput(const std::string& name) const override {
706
    // has only one output
X
Xin Pan 已提交
707
    const auto& outs = ctx_.outputs;
708 709
    auto it = outs.find(name);
    if (it == outs.end()) {
710 711
      return false;
    }
712
    const auto& out = it->second;
X
Xin Pan 已提交
713
    if (out.size() == 0) {
714 715
      return false;
    }
716
    PADDLE_ENFORCE_EQ(
717 718
        out.size(),
        1UL,
719 720
        platform::errors::InvalidArgument(
            "Output %s should not contain more than one outputs.", name));
X
Xin Pan 已提交
721
    return out[0] != nullptr;
722 723
  }

724 725 726 727
  bool HasAttr(const std::string& name) const override {
    return op_.HasAttr(name);
  }

728
  bool HasInputs(const std::string& name) const override {
X
Xin Pan 已提交
729 730
    const auto& ins = ctx_.inputs;
    auto it = ins.find(name);
X
fix  
Xin Pan 已提交
731
    if (it == ins.end() || it->second.empty()) {
732 733
      return false;
    }
X
Xin Pan 已提交
734 735
    for (auto& input : it->second) {
      if (input == nullptr) {
736 737 738 739 740 741
        return false;
      }
    }
    return true;
  }

742 743
  bool HasOutputs(const std::string& name,
                  bool allow_null = false) const override {
X
Xin Pan 已提交
744 745
    const auto& outs = ctx_.outputs;
    auto it = outs.find(name);
X
fix  
Xin Pan 已提交
746
    if (it == outs.end() || it->second.empty()) {
747 748
      return false;
    }
749 750 751 752 753 754 755 756
    if (allow_null) {
      for (auto& output : it->second) {
        if (output != nullptr) return true;
      }
      return false;
    } else {
      for (auto& output : it->second) {
        if (output == nullptr) return false;
757
      }
758
      return true;
759 760 761 762 763
    }
  }

  AttrReader Attrs() const override { return AttrReader(op_.Attrs()); }

H
hong 已提交
764
  std::vector<std::string> Inputs(const std::string& name) const override {
765 766 767
    return op_.Inputs(name);
  }

H
hong 已提交
768
  std::vector<std::string> Outputs(const std::string& name) const override {
769 770 771
    return op_.Outputs(name);
  }

772 773 774
  std::string GetInputNameByIdx(size_t idx) const override {
    auto& op_proto =
        paddle::framework::OpInfoMap::Instance().Get(op_.Type()).proto_;
775 776
    PADDLE_ENFORCE_LT(idx,
                      op_proto->inputs().size(),
777 778 779
                      platform::errors::OutOfRange(
                          "The index should be less than the size of inputs of "
                          "operator %s, but got index is %d and size is %d",
780 781 782
                          op_.Type(),
                          idx,
                          op_proto->inputs().size()));
783 784 785 786 787 788 789
    return op_proto->inputs()[idx].name();
  }

  std::string GetOutputNameByIdx(size_t idx) const override {
    auto& op_proto =
        paddle::framework::OpInfoMap::Instance().Get(op_.Type()).proto_;
    PADDLE_ENFORCE_LT(
790 791
        idx,
        op_proto->outputs().size(),
792 793 794
        platform::errors::OutOfRange(
            "The index should be less than the size of outputs of "
            "operator %s, but got index is %d and size is %d",
795 796 797
            op_.Type(),
            idx,
            op_proto->outputs().size()));
798 799 800
    return op_proto->outputs()[idx].name();
  }

801 802 803
  void ShareDim(const std::string& in,
                const std::string& out,
                size_t i = 0,
804
                size_t j = 0) override {
X
Xin Pan 已提交
805 806
    auto in_it = ctx_.inputs.find(in);
    auto out_it = ctx_.outputs.find(out);
807
    PADDLE_ENFORCE_NE(
808 809
        in_it,
        ctx_.inputs.end(),
810 811
        platform::errors::NotFound("Input %s does not exist.", in));
    PADDLE_ENFORCE_NE(
812 813
        out_it,
        ctx_.outputs.end(),
814
        platform::errors::NotFound("Output %s does not exist.", out));
815 816
    PADDLE_ENFORCE_LT(i,
                      in_it->second.size(),
817 818 819
                      platform::errors::InvalidArgument(
                          "The index of input dimension is out of range, "
                          "excepted index less than %zu, but received %zu.",
820 821 822 823
                          in_it->second.size(),
                          i));
    PADDLE_ENFORCE_LT(j,
                      out_it->second.size(),
824 825 826
                      platform::errors::InvalidArgument(
                          "The index of output dimension is out of range, "
                          "excepted index less than %zu, but received %zu.",
827 828
                          out_it->second.size(),
                          j));
X
Xin Pan 已提交
829 830 831

    Variable* in_var = in_it->second[i];
    Variable* out_var = out_it->second[j];
832

833
    PADDLE_ENFORCE_EQ(
834 835
        in_var->Type(),
        out_var->Type(),
836
        platform::errors::InvalidArgument(
837 838
            "The type of input (%s) and output (%s) are inconsistent.",
            in,
839
            out));
840

841 842 843
    if (in_var->IsType<phi::SelectedRows>()) {
      auto& in_sele_rows = in_var->Get<phi::SelectedRows>();
      auto out_sele_rows = out_var->GetMutable<phi::SelectedRows>();
844 845 846 847 848 849 850 851
      out_sele_rows->mutable_value()->Resize(in_sele_rows.value().dims());
      out_sele_rows->set_rows(in_sele_rows.rows());
      out_sele_rows->set_height(in_sele_rows.height());
    } else if (in_var->IsType<framework::LoDTensor>()) {
      auto& in_lod_tensor = in_var->Get<framework::LoDTensor>();
      auto* out_lod_tensor = out_var->GetMutable<framework::LoDTensor>();
      out_lod_tensor->Resize(in_lod_tensor.dims());
    } else {
852
      PADDLE_THROW(platform::errors::Unimplemented(
853
          "Currently, the input type of ShareDim only can be LoDTensor "
854
          "or SelectedRows."));
855 856 857
    }
  }

H
hong 已提交
858 859 860 861
  void ShareAllLoD(const std::string& in,
                   const std::string& out) const override {
    auto in_it = ctx_.inputs.find(in);
    auto out_it = ctx_.outputs.find(out);
862 863
    PADDLE_ENFORCE_NE(in_it,
                      ctx_.inputs.end(),
H
hong 已提交
864 865 866
                      platform::errors::NotFound(
                          "Input [%s] found error in Op [%s]", in, op_.Type()));
    PADDLE_ENFORCE_NE(
867 868 869 870
        out_it,
        ctx_.outputs.end(),
        platform::errors::NotFound(
            "Output [%s] found error in Op [%s]", out, op_.Type()));
H
hong 已提交
871 872 873 874 875

    auto& in_var_list = in_it->second;
    auto& out_var_list = out_it->second;

    PADDLE_ENFORCE_EQ(
876 877
        in_var_list.size(),
        out_var_list.size(),
H
hong 已提交
878
        platform::errors::PreconditionNotMet(
T
tianshuo78520a 已提交
879
            "Op [%s]: Input var size should be equal with output var size",
H
hong 已提交
880 881 882 883 884 885 886 887 888 889 890 891
            op_.Type()));

    auto& out_var_names = op_.Outputs(out);

    for (size_t i = 0; i < in_var_list.size(); ++i) {
      if (out_var_names[i] == framework::kEmptyVarName) {
        continue;
      }

      Variable* in_var = in_var_list[i];
      if (!in_var->IsType<LoDTensor>()) return;
      Variable* out_var = out_var_list[i];
892 893
      PADDLE_ENFORCE_EQ(out_var->IsType<LoDTensor>(),
                        true,
H
hong 已提交
894 895
                        platform::errors::PreconditionNotMet(
                            "The %d-th output of Output(%s) must be LoDTensor.",
896 897
                            i,
                            out_var_names[i]));
H
hong 已提交
898 899 900 901 902 903 904 905 906 907
      auto& in_tensor = in_var->Get<LoDTensor>();
      auto* out_tensor = out_var->GetMutable<LoDTensor>();
      out_tensor->set_lod(in_tensor.lod());
#ifdef PADDLE_WITH_MKLDNN
      if (in_tensor.layout() != DataLayout::kMKLDNN)
#endif
        out_tensor->set_layout(in_tensor.layout());
    }
  }

908 909 910
  void ShareLoD(const std::string& in,
                const std::string& out,
                size_t i = 0,
Q
Qiao Longfei 已提交
911
                size_t j = 0) const override {
X
Xin Pan 已提交
912 913
    auto in_it = ctx_.inputs.find(in);
    auto out_it = ctx_.outputs.find(out);
914
    PADDLE_ENFORCE_NE(
915 916
        in_it,
        ctx_.inputs.end(),
917 918
        platform::errors::NotFound("Input %s does not exist.", in));
    PADDLE_ENFORCE_NE(
919 920
        out_it,
        ctx_.outputs.end(),
921
        platform::errors::NotFound("Output %s does not exist.", out));
922 923
    PADDLE_ENFORCE_LT(i,
                      in_it->second.size(),
924 925 926
                      platform::errors::InvalidArgument(
                          "The index of input dimension is out of range, "
                          "excepted index less than %zu, but received %zu.",
927 928 929 930
                          in_it->second.size(),
                          i));
    PADDLE_ENFORCE_LT(j,
                      out_it->second.size(),
931 932 933
                      platform::errors::InvalidArgument(
                          "The index of output dimension is out of range, "
                          "excepted index less than %zu, but received %zu.",
934 935
                          out_it->second.size(),
                          j));
X
Xin Pan 已提交
936 937

    Variable* in_var = in_it->second.at(i);
Q
Qiao Longfei 已提交
938
    if (!in_var->IsType<LoDTensor>()) return;
X
Xin Pan 已提交
939
    Variable* out_var = out_it->second.at(j);
940
    PADDLE_ENFORCE_EQ(
941 942
        out_var->IsType<LoDTensor>(),
        true,
943 944
        platform::errors::InvalidArgument(
            "The %zu-th output of Output(%s) must be LoDTensor.", j, out));
945
    auto& in_tensor = in_var->Get<LoDTensor>();
Q
Qiao Longfei 已提交
946 947
    auto* out_tensor = out_var->GetMutable<LoDTensor>();
    out_tensor->set_lod(in_tensor.lod());
D
dzhwinter 已提交
948

M
mozga-intel 已提交
949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967
// TODO(dzhwinter) : reuse ShareLoD in most operators.
// Need to call ShareLayout explicitly in sequence related ops.
// Shall we have a better method to shared info between in/out Tensor?
#ifdef PADDLE_WITH_MKLDNN
    // Fix me: ugly workaround below
    // Correct solution:
    //    set_layout() should NOT be called here (i.e. ShareLoD). Instead,
    //    layout of output tensor should be set "manually" in Compute()
    //    of each OPKernel. The reason layout should NOT be shared between
    //    input and output "automatically" (now by InferShape()->ShareLoD())
    //    is that layout transform may occur after InferShape().
    // Workaround:
    //    Skip set_layout() when input layout is kMKLDNN
    //    This is to avoid kMKLDNN is populated wrongly into a non-MKLDNN
    //    OPKernel. In all MKLDNN OPkernel, set_layout(kMKLDNN) should be called
    //    in Compute()
    if (in_tensor.layout() != DataLayout::kMKLDNN)
#endif
      out_tensor->set_layout(in_tensor.layout());
D
dzhwinter 已提交
968 969
  }

970
  int32_t GetLoDLevel(const std::string& in, size_t i = 0) const override {
971
    PADDLE_THROW(platform::errors::PreconditionNotMet(
972
        "GetLoDLevel is only used in compile time. The calculation of "
973
        "output's actual lod is different among operators so that should be "
974
        "set in the runtime kernel."));
975 976
  }

977 978
  void SetLoDLevel(const std::string& out,
                   int32_t lod_level,
979
                   size_t j = 0) const override {
980
    PADDLE_THROW(platform::errors::PreconditionNotMet(
981
        "SetLoDLevel is only used in compile time. The calculation of "
982
        "output's actual lod is different among operators so that should be "
983
        "set in the runtime kernel."));
C
chengduo 已提交
984 985
  }

986 987
  bool IsRuntime() const override { return true; }

988 989 990 991 992 993
  bool IsRunMKLDNNKernel() const override {
    try {
      auto& op_with_kernel = dynamic_cast<const OperatorWithKernel&>(op_);
      return ((op_with_kernel.kernel_type()) &&
              (op_with_kernel.kernel_type()->data_layout_ ==
               framework::DataLayout::kMKLDNN));
994
    } catch (const std::bad_cast& exp) {
995 996 997 998
      return false;
    }
  }

999
  // TODO(paddle-dev): Can this be template?
C
Chen Weihang 已提交
1000
  paddle::small_vector<InferShapeVarPtr, phi::kInputSmallVectorSize>
1001
  GetInputVarPtrs(const std::string& name) const override {
1002
    const std::vector<Variable*>& vars = InputVars(name);
C
Chen Weihang 已提交
1003
    paddle::small_vector<InferShapeVarPtr, phi::kInputSmallVectorSize> res;
1004 1005 1006 1007 1008
    res.reserve(vars.size());
    res.insert(res.begin(), vars.begin(), vars.end());
    return res;
  }

C
Chen Weihang 已提交
1009
  paddle::small_vector<InferShapeVarPtr, phi::kOutputSmallVectorSize>
1010
  GetOutputVarPtrs(const std::string& name) const override {
1011
    const std::vector<Variable*>& vars = OutputVars(name);
C
Chen Weihang 已提交
1012
    paddle::small_vector<InferShapeVarPtr, phi::kOutputSmallVectorSize> res;
1013 1014 1015 1016 1017
    res.reserve(vars.size());
    res.insert(res.begin(), vars.begin(), vars.end());
    return res;
  }

X
Xin Pan 已提交
1018 1019
  DDim GetInputDim(const std::string& name) const override {
    const std::vector<Variable*>& vars = InputVars(name);
1020
    PADDLE_ENFORCE_EQ(
1021 1022
        vars.size(),
        1UL,
1023 1024
        platform::errors::InvalidArgument(
            "Input(%s) should hold one element, but now it holds %zu elements.",
1025 1026
            name,
            vars.size()));
X
Xin Pan 已提交
1027 1028 1029 1030 1031 1032 1033 1034
    return this->GetDim(vars[0]);
  }

  std::vector<DDim> GetInputsDim(const std::string& name) const override {
    const std::vector<Variable*>& vars = InputVars(name);
    return GetDims(vars);
  }

1035 1036 1037 1038
  proto::VarType::Type GetInputVarType(const std::string& name) const override {
    return GetVarType(InputVars(name).at(0));
  }

X
Xin Pan 已提交
1039 1040 1041 1042 1043 1044 1045 1046 1047 1048
  std::vector<proto::VarType::Type> GetInputsVarType(
      const std::string& name) const override {
    return GetVarTypes(InputVars(name));
  }

  std::vector<proto::VarType::Type> GetOutputsVarType(
      const std::string& name) const override {
    return GetVarTypes(OutputVars(name));
  }

X
Xin Pan 已提交
1049 1050
  void SetOutputDim(const std::string& name, const DDim& dim) override {
    auto& vars = OutputVars(name);
1051
    PADDLE_ENFORCE_EQ(
1052 1053
        vars.size(),
        1UL,
1054 1055
        platform::errors::InvalidArgument("Output(%s) should hold one element, "
                                          "but now it holds %zu elements.",
1056 1057
                                          name,
                                          vars.size()));
X
Xin Pan 已提交
1058 1059 1060 1061 1062 1063 1064 1065 1066
    SetDim(vars[0], dim);
  }

  void SetOutputsDim(const std::string& name,
                     const std::vector<DDim>& dims) override {
    auto& vars = OutputVars(name);
    SetDims(vars, dims);
  }

1067 1068 1069 1070 1071 1072 1073 1074
  const phi::ArgumentMappingFn* GetPhiArgumentMappingFn() const override {
    return phi::OpUtilsMap::Instance().GetArgumentMappingFn(op_.Type());
  }

  const phi::KernelSignature* GetPhiDefaultKernelSignature() const override {
    return &phi::DefaultKernelSignatureMap::Instance().Get(op_.Type());
  }

1075
 protected:
X
Xin Pan 已提交
1076
  DDim GetDim(Variable* var) const {
1077 1078
    PADDLE_ENFORCE_NOT_NULL(
        var, platform::errors::InvalidArgument("Input variable is nullptr."));
1079 1080
    if (var->IsType<LoDTensor>()) {
      return var->Get<LoDTensor>().dims();
1081 1082
    } else if (var->IsType<phi::SelectedRows>()) {
      return var->Get<phi::SelectedRows>().GetCompleteDims();
1083
    } else {
1084 1085 1086 1087
      PADDLE_THROW(platform::errors::InvalidArgument(
          "Only LoDTensor or SelectedRows support 'GetDim', but input "
          "Variable's type is %s.",
          ToTypeName(var->Type())));
F
fengjiayi 已提交
1088 1089 1090
    }
  }

X
Xin Pan 已提交
1091 1092 1093
  std::vector<DDim> GetDims(const std::vector<Variable*>& vars) const {
    std::vector<DDim> ret;
    ret.reserve(vars.size());
1094 1095 1096
    std::transform(vars.begin(),
                   vars.end(),
                   std::back_inserter(ret),
X
Xin Pan 已提交
1097 1098 1099 1100
                   [this](Variable* var) { return this->GetDim(var); });
    return ret;
  }

F
fengjiayi 已提交
1101
  std::vector<DDim> GetRepeatedDims(const std::string& name) const override {
1102 1103
    PADDLE_THROW(platform::errors::PreconditionNotMet(
        "GetRepeatedDims method only ban be used in compile time."));
1104 1105
  }

X
Xin Pan 已提交
1106
  void SetDim(Variable* var, const DDim& dim) {
1107 1108
    if (var->IsType<LoDTensor>()) {
      var->GetMutable<LoDTensor>()->Resize(dim);
1109 1110
    } else if (var->IsType<phi::SelectedRows>()) {
      var->GetMutable<phi::SelectedRows>()->set_height(dim[0]);
1111
    } else {
1112 1113 1114 1115
      PADDLE_THROW(platform::errors::Unimplemented(
          "Variable type error, expect LoDTensor or SelectedRows, but received "
          "(%s).",
          ToTypeName(var->Type())));
X
Xin Pan 已提交
1116 1117 1118 1119 1120 1121
    }
  }

  void SetDims(const std::vector<Variable*>& vars,
               const std::vector<DDim>& dims) {
    size_t length = vars.size();
1122 1123
    PADDLE_ENFORCE_EQ(length,
                      dims.size(),
1124 1125 1126 1127
                      platform::errors::InvalidArgument(
                          "The number of input variables do not match the "
                          "number of input dimensions, the number of variables "
                          "is %zu, the number of dimensions is %zu.",
1128 1129
                          length,
                          dims.size()));
X
Xin Pan 已提交
1130 1131 1132 1133 1134
    for (size_t i = 0; i < length; ++i) {
      if (vars[i] == nullptr) {
        continue;
      }
      SetDim(vars[i], dims[i]);
1135 1136 1137
    }
  }

F
fengjiayi 已提交
1138 1139
  void SetRepeatedDims(const std::string& name,
                       const std::vector<DDim>& dims) override {
1140 1141
    PADDLE_THROW(platform::errors::PreconditionNotMet(
        "SetRepeatedDims method only can be used in compile time."));
F
fengjiayi 已提交
1142 1143
  }

X
Xin Pan 已提交
1144 1145 1146 1147
  std::vector<proto::VarType::Type> GetVarTypes(
      const std::vector<Variable*>& vars) const {
    std::vector<proto::VarType::Type> retv;
    retv.resize(vars.size());
1148 1149 1150
    std::transform(vars.begin(),
                   vars.end(),
                   retv.begin(),
X
Xin Pan 已提交
1151
                   std::bind(std::mem_fn(&RuntimeInferShapeContext::GetVarType),
1152 1153
                             this,
                             std::placeholders::_1));
X
Xin Pan 已提交
1154 1155 1156 1157
    return retv;
  }

  proto::VarType::Type GetVarType(Variable* var) const {
1158 1159 1160
    return ToVarType(var->Type());
  }

1161 1162 1163
 private:
  const std::vector<Variable*>& InputVars(const std::string& name) const {
    auto it = ctx_.inputs.find(name);
1164
    PADDLE_ENFORCE_NE(
1165 1166
        it,
        ctx_.inputs.end(),
1167 1168
        platform::errors::NotFound(
            "Operator (%s) does not have the input (%s).", op_.Type(), name));
1169 1170 1171 1172 1173
    return it->second;
  }

  const std::vector<Variable*>& OutputVars(const std::string& name) const {
    auto it = ctx_.outputs.find(name);
1174
    PADDLE_ENFORCE_NE(
1175 1176
        it,
        ctx_.outputs.end(),
1177 1178
        platform::errors::NotFound(
            "Operator (%s) does not have the outputs (%s).", op_.Type(), name));
1179
    return it->second;
F
fengjiayi 已提交
1180 1181
  }

1182
  const OperatorBase& op_;
X
Xin Pan 已提交
1183
  const RuntimeContext& ctx_;
1184 1185
};

1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200
struct OperatorWithKernel::CacheImpl {
  explicit CacheImpl(phi::KernelContext* kernel_ctx,
                     RuntimeInferShapeContext* infer_shape_ctx)
      : kernel_ctx_(kernel_ctx), infer_shape_ctx_(infer_shape_ctx) {}

  phi::KernelContext* getKernelContext() { return kernel_ctx_.get(); }
  RuntimeInferShapeContext* getRuntimeInferShapeContext() {
    return infer_shape_ctx_.get();
  }

 private:
  std::unique_ptr<phi::KernelContext> kernel_ctx_;
  std::unique_ptr<RuntimeInferShapeContext> infer_shape_ctx_;
};

1201 1202
static void CheckTensorNANOrInf(const std::string& op_type,
                                const std::string& name,
C
chengduoZH 已提交
1203 1204 1205 1206
                                const framework::Tensor& tensor) {
  if (tensor.memory_size() == 0) {
    return;
  }
1207 1208
  if (framework::TransToProtoVarType(tensor.dtype()) != proto::VarType::FP32 &&
      framework::TransToProtoVarType(tensor.dtype()) != proto::VarType::FP64) {
C
chengduoZH 已提交
1209 1210
    return;
  }
1211
  PADDLE_ENFORCE_NE(
1212 1213 1214 1215
      framework::TensorContainsInf(tensor),
      true,
      platform::errors::Fatal(
          "Operator %s output Tensor %s contains Inf.", op_type, name));
1216
  PADDLE_ENFORCE_NE(
1217 1218 1219 1220
      framework::TensorContainsNAN(tensor),
      true,
      platform::errors::Fatal(
          "Operator %s output Tensor %s contains NAN.", op_type, name));
C
chengduoZH 已提交
1221 1222
}

1223 1224 1225 1226
bool OperatorWithKernel::SupportGPU() const {
  auto phi_kernels = phi::KernelFactory::Instance().SelectKernelMap(
      phi::TransToPhiKernelName(type_));
  auto has_phi_kernel =
1227 1228
      std::any_of(phi_kernels.begin(),
                  phi_kernels.end(),
1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240
                  [](phi::KernelKeyMap::const_reference kern_pair) {
                    return kern_pair.first.backend() == phi::Backend::GPU;
                  });
  if (has_phi_kernel) {
    return true;
  } else {
    auto kernel_iter = OperatorWithKernel::AllOpKernels().find(type_);
    if (kernel_iter == OperatorWithKernel::AllOpKernels().end()) {
      return false;
    } else {
      auto& op_kernels = kernel_iter->second;
      return std::any_of(
1241 1242
          op_kernels.begin(),
          op_kernels.end(),
1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253
          [](OpKernelMap::const_reference kern_pair) {
            return platform::is_gpu_place(kern_pair.first.place_);
          });
    }
  }
}

bool OperatorWithKernel::SupportNPU() const {
  auto phi_kernels = phi::KernelFactory::Instance().SelectKernelMap(
      phi::TransToPhiKernelName(type_));
  auto has_phi_kernel =
1254 1255
      std::any_of(phi_kernels.begin(),
                  phi_kernels.end(),
1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267
                  [](phi::KernelKeyMap::const_reference kern_pair) {
                    return kern_pair.first.backend() == phi::Backend::NPU;
                  });
  if (has_phi_kernel) {
    return true;
  } else {
    auto kernel_iter = OperatorWithKernel::AllOpKernels().find(type_);
    if (kernel_iter == OperatorWithKernel::AllOpKernels().end()) {
      return false;
    } else {
      auto& op_kernels = kernel_iter->second;
      return std::any_of(
1268 1269
          op_kernels.begin(),
          op_kernels.end(),
1270 1271 1272 1273 1274 1275 1276
          [](OpKernelMap::const_reference kern_pair) {
            return platform::is_npu_place(kern_pair.first.place_);
          });
    }
  }
}

1277 1278
bool OperatorWithKernel::SupportsMKLDNN(
    const proto::VarType::Type data_type) const {
1279 1280
  auto op_kernel_iter = OperatorWithKernel::AllOpKernels().find(type_);
  if (op_kernel_iter == OperatorWithKernel::AllOpKernels().end()) {
1281 1282 1283 1284 1285
    VLOG(6) << "Warning: " << type_
            << " don't find its MKLDNN Kernel in Fluid "
               "Registered Kernels. And We don't "
               "search its kernels in phi lib, "
               "SupportsMKLDNN() return false.";
1286 1287 1288
    return false;
  }
  auto& op_kernels = op_kernel_iter->second;
1289 1290
  return std::any_of(op_kernels.begin(),
                     op_kernels.end(),
1291
                     [data_type](OpKernelMap::const_reference kern_pair) {
1292 1293
                       return platform::is_cpu_place(kern_pair.first.place_) &&
                              kern_pair.first.library_type_ ==
1294 1295
                                  LibraryType::kMKLDNN &&
                              kern_pair.first.data_type_ == data_type;
1296 1297 1298
                     });
}

1299 1300
bool OperatorWithKernel::CanMKLDNNBeUsed(const framework::ExecutionContext& ctx,
                                         proto::VarType::Type data_type) const {
1301 1302 1303 1304
  const auto& attrs_map = ctx.Attrs();
  auto iter = attrs_map.find("use_mkldnn");
  bool use_mkldnn_ctx = iter != attrs_map.end() &&
                        BOOST_GET_CONST(bool, iter->second) &&
1305
                        platform::is_cpu_place(ctx.GetPlace());
1306
  return use_mkldnn_ctx && this->SupportsMKLDNN(data_type);
1307 1308
}

1309 1310 1311 1312 1313 1314 1315
void OperatorWithKernel::InferShape(InferShapeContext* ctx) const {
  PADDLE_THROW(platform::errors::PermissionDenied(
      "The default InferShape function of OperatorWithKernel is not allowed to "
      "be called, please override corresponding InferShape function in the "
      "specific operator."));
}

B
baojun-nervana 已提交
1316
void OperatorWithKernel::RuntimeInferShape(const Scope& scope,
X
Xin Pan 已提交
1317 1318
                                           const platform::Place& place,
                                           const RuntimeContext& ctx) const {
1319
  RuntimeInferShapeContext infer_shape_ctx(*this, ctx);
1320
  this->Info().infer_shape_(&infer_shape_ctx);
B
baojun-nervana 已提交
1321 1322
}

L
luotao1 已提交
1323 1324
void OperatorWithKernel::RunImpl(const Scope& scope,
                                 const platform::Place& place) const {
L
luotao1 已提交
1325 1326
  // To reduce the elapsed time of HasAttr, we use bool variable to record the
  // result of HasAttr.
1327 1328 1329
  if (!enable_cache_runtime_context_ && HasAttr(kEnableCacheRuntimeContext))
    enable_cache_runtime_context_ = true;
  if (!all_kernels_must_compute_runtime_shape_ &&
L
luotao1 已提交
1330
      HasAttr(kAllKernelsMustComputeRuntimeShape))
1331
    all_kernels_must_compute_runtime_shape_ = true;
1332
  const Scope* cur_scope = &scope;
1333
  if (!enable_cache_runtime_context_) {
L
luotao1 已提交
1334 1335
    RuntimeContext ctx(Inputs(), Outputs(), scope);
    RunImpl(scope, place, &ctx);
1336
    pre_scope_ = cur_scope;
L
luotao1 已提交
1337
  } else {
1338 1339 1340 1341 1342 1343
    if (runtime_ctx_.get() == nullptr || pre_scope_ != cur_scope) {
      std::lock_guard<std::mutex> lock(cache_update_mutex_);
      if (runtime_ctx_.get() == nullptr || pre_scope_ != cur_scope) {
        runtime_ctx_.reset(new RuntimeContext(Inputs(), Outputs(), scope));
        pre_scope_ = cur_scope;
      }
L
luotao1 已提交
1344 1345 1346 1347 1348 1349 1350 1351
    }
    RunImpl(scope, place, runtime_ctx_.get());
  }
}

void OperatorWithKernel::RunImpl(const Scope& scope,
                                 const platform::Place& place,
                                 RuntimeContext* runtime_ctx) const {
Y
Yu Yang 已提交
1352
  platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
1353
  auto* dev_ctx = pool.Get(place);
1354

1355 1356 1357 1358 1359 1360 1361 1362 1363 1364
#ifdef PADDLE_WITH_ASCEND_CL
  // NOTE(wangxi): nan/inf cannot be detected on NPU by checking the variable
  // values, but only through special `float_status` to checks whether
  // the operation is overflow. More about `float_status`, see:
  // https://gitee.com/ascend/modelzoo/issues/I3NF8V?from=project-issue
  if (FLAGS_check_nan_inf) {
    framework::details::NPUAllocAndClearFloatStatus(*this, scope, place);
  }
#endif

1365
  auto exe_ctx = ExecutionContext(*this, scope, *dev_ctx, *runtime_ctx);
1366 1367 1368 1369
  // using cache
  if (kernel_type_.get()) {
    dev_ctx = pool.Get(kernel_type_->place_);
  }
1370

1371 1372 1373 1374 1375 1376
// TODO(Liu-xiandong): Now we are using too much if-else and hard code in XPU
// device, it's ugly, and we will refactor in the future.
#if defined(PADDLE_WITH_XPU_KP)
  bool use_phi_xpu_kp = false;
#endif

1377 1378 1379 1380 1381
  // TODO(chenweihang): Now we are still reusing a lot of the original fluid
  // implementation, this is a gradual replacement process
  // TODO(chenweihang): in the first phase of project, we only support CPU, CUDA
  // and RCOM backend, the XPU, NPU and MKLDNN will be supported in the second
  // phase
1382
  phi::KernelKey pt_kernel_key;
1383
  std::string pt_kernel_name;
1384
  if (phi::KernelFactory::Instance().HasCompatiblePhiKernel(type_)) {
1385 1386 1387 1388
    if (kernel_signature_ == nullptr || pt_kernel_ == nullptr) {
      kernel_signature_.reset(new phi::KernelSignature(
          std::move(GetExpectedPhiKernelArgs(exe_ctx))));
      VLOG(6) << *kernel_signature_.get();
1389 1390 1391 1392 1393

      kernel_type_.reset(
          new OpKernelType(std::move(InnerGetExpectedKernelType(exe_ctx))));
      dev_ctx = pool.Get(kernel_type_->place_);

1394
      pt_kernel_name = kernel_signature_->name;
1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433
// NOTE(Liu-xiandong): The register kernel used KP have library_type[KP],
// But the default library_type is Plain, so we need to modify the
// library_type here, otherwise it can't work.
#ifdef PADDLE_WITH_XPU_KP
      if (paddle::platform::is_xpu_place(kernel_type_->place_)) {
        bool use_xpu_kp_kernel_rt =
            FLAGS_run_kp_kernel &&
            paddle::platform::is_xpu_kp_support_op(type_, *kernel_type_);
        bool use_xpu_kp_kernel_debug =
            paddle::platform::is_in_xpu_kpwhite_list(type_);
        if (use_xpu_kp_kernel_rt) {
          VLOG(3) << "phi xpu_kp using rt mode in static graph";
        }
        if (use_xpu_kp_kernel_debug) {
          VLOG(3) << "phi xpu_kp using debug mode in static graph";
        }
        bool is_xpu_kp_support =
            (use_xpu_kp_kernel_rt || use_xpu_kp_kernel_debug);
        if (is_xpu_kp_support) {
          auto expected_kernel_key_library_type = kernel_type_->library_type_;
          kernel_type_->library_type_ = LibraryType::kKP;
          VLOG(3) << "modifing XPU KP kernel in static graph: "
                  << pt_kernel_name
                  << ", using_kernel_key:" << *kernel_type_.get();
          auto try_pt_kernel_key =
              TransOpKernelTypeToPhiKernelKey(*kernel_type_.get());
          if (!phi::KernelFactory::Instance().HasKernel(pt_kernel_name,
                                                        try_pt_kernel_key)) {
            kernel_type_->library_type_ = expected_kernel_key_library_type;
            VLOG(3) << "modify XPU KP kernel in static graph: "
                    << pt_kernel_name << " is failed " << *kernel_type_.get();
          } else {
            use_phi_xpu_kp = true;
            VLOG(3) << "modify XPU KP kernel in static graph: "
                    << pt_kernel_name << " is succeed " << *kernel_type_.get();
          }
        }
      }
#endif
1434
      pt_kernel_key = TransOpKernelTypeToPhiKernelKey(*kernel_type_.get());
1435
      pt_kernel_.reset(
1436
          new phi::Kernel(phi::KernelFactory::Instance().SelectKernel(
1437 1438 1439
              pt_kernel_name, pt_kernel_key)));

      if (pt_kernel_->IsValid()) {
1440
        VLOG(6) << "Static mode ChoosePhiKernel - kernel name: "
1441 1442 1443
                << pt_kernel_name << " | kernel key: " << pt_kernel_key
                << " | kernel: " << *pt_kernel_;
      } else {
1444
        VLOG(6) << "Static mode ChoosePhiKernel - kernel `" << pt_kernel_name
1445 1446
                << "` not found.";
      }
1447
    } else {
1448
      pt_kernel_name = kernel_signature_->name;
1449 1450 1451
// NOTE(Liu-xiandong):In my ctest, this branch do not be executed,
// I can't understand it, it's really confusing.
// But we still need to keep this to avoid errors.
1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469
#ifdef PADDLE_WITH_XPU_KP
      if (paddle::platform::is_xpu_place(kernel_type_->place_)) {
        bool use_xpu_kp_kernel_rt =
            FLAGS_run_kp_kernel &&
            paddle::platform::is_xpu_kp_support_op(type_, *kernel_type_);
        bool use_xpu_kp_kernel_debug =
            paddle::platform::is_in_xpu_kpwhite_list(type_);
        if (use_xpu_kp_kernel_rt) {
          VLOG(3) << "phi xpu_kp using rt mode in static graph";
        }
        if (use_xpu_kp_kernel_debug) {
          VLOG(3) << "phi xpu_kp using debug mode in static graph";
        }
        bool is_xpu_kp_support =
            (use_xpu_kp_kernel_rt || use_xpu_kp_kernel_debug);
        if (is_xpu_kp_support) {
          auto expected_kernel_key_library_type = kernel_type_->library_type_;
          kernel_type_->library_type_ = LibraryType::kKP;
1470 1471
          VLOG(3) << "modifing XPU KP kernel in static graph: "
                  << pt_kernel_name
1472 1473 1474
                  << ", using_kernel_key:" << *kernel_type_.get();
          auto try_pt_kernel_key =
              TransOpKernelTypeToPhiKernelKey(*kernel_type_.get());
1475 1476
          if (!phi::KernelFactory::Instance().HasKernel(pt_kernel_name,
                                                        try_pt_kernel_key)) {
1477
            kernel_type_->library_type_ = expected_kernel_key_library_type;
1478 1479 1480 1481 1482 1483
            VLOG(3) << "modify XPU KP kernel in static graph: "
                    << pt_kernel_name << " is failed " << *kernel_type_.get();
          } else {
            use_phi_xpu_kp = true;
            VLOG(3) << "modify XPU KP kernel in static graph: "
                    << pt_kernel_name << " is succeed " << *kernel_type_.get();
1484 1485 1486 1487
          }
        }
      }
#endif
1488
      pt_kernel_key = TransOpKernelTypeToPhiKernelKey(*kernel_type_.get());
1489
    }
1490 1491 1492 1493

// NOTE(Liu-xiandong): Determine whether the selected kernel is valid
// If not, use the kernel registered in fluid. And if the fluid do not
// contains the related heterogeneous kernel, use phi CPU kernel.
1494
#if defined(PADDLE_WITH_XPU)
1495 1496 1497 1498 1499
    bool is_xpu_unsupport =
        paddle::platform::is_xpu_place(kernel_type_->place_) &&
            !paddle::platform::is_xpu_support_op(type_, *kernel_type_.get()) ||
        paddle::platform::is_in_xpu_black_list(type_);
#endif
1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510
#ifdef PADDLE_WITH_XPU_KP
    bool use_xpu_kp_kernel_rt =
        paddle::platform::is_xpu_place(kernel_type_->place_) &&
        FLAGS_run_kp_kernel &&
        paddle::platform::is_xpu_kp_support_op(type_, *kernel_type_);
    bool use_xpu_kp_kernel_debug =
        paddle::platform::is_xpu_place(kernel_type_->place_) &&
        paddle::platform::is_in_xpu_kpwhite_list(type_);
    bool is_xpu_kp_support = (use_xpu_kp_kernel_rt || use_xpu_kp_kernel_debug);
#endif

1511
    if (pt_kernel_->IsValid()
1512
#if defined(PADDLE_WITH_XPU) && !defined(PADDLE_WITH_XPU_KP)
1513 1514
        && !is_xpu_unsupport
#endif
1515 1516 1517
#if defined(PADDLE_WITH_XPU_KP)
        && (!is_xpu_unsupport || use_phi_xpu_kp)
#endif
1518
    ) {
1519
      run_phi_kernel_ = true;
1520 1521 1522
    } else {
      auto& all_op_kernels = AllOpKernels();
      auto kernels_iter = all_op_kernels.find(type_);
1523 1524 1525 1526 1527 1528 1529 1530 1531 1532

// NOTE(Liu-xiandong): If we can't find heterogeneous kernel in phi,
// we need to select the heterogeneous kernel in fluid, but the kernel
// registered in KP use library_type[KP], we need to modify it.
#ifdef PADDLE_WITH_XPU_KP
      if (is_xpu_kp_support) {
        kernel_type_->library_type_ = LibraryType::kKP;
      }
#endif

1533 1534 1535
      if (kernels_iter == all_op_kernels.end() ||
          kernels_iter->second.find(*kernel_type_.get()) ==
              kernels_iter->second.end()
1536
#if defined(PADDLE_WITH_XPU) && !defined(PADDLE_WITH_XPU_KP)
1537
          || is_xpu_unsupport
1538
#endif
1539 1540 1541
#if defined(PADDLE_WITH_XPU_KP)
          || (is_xpu_unsupport && !is_xpu_kp_support)
#endif
1542
      ) {
1543 1544 1545
        auto pt_cpu_kernel_key =
            FallBackToCpu(*kernel_type_.get(), pt_kernel_key, *this);
        pt_kernel_.reset(
1546
            new phi::Kernel(phi::KernelFactory::Instance().SelectKernel(
1547 1548 1549 1550 1551 1552 1553
                pt_kernel_name, pt_cpu_kernel_key)));

        dev_ctx = pool.Get(platform::CPUPlace());
        if (pt_kernel_->IsValid()) {
          VLOG(6) << "Static mode PrepareImpl - kernel name: " << pt_kernel_name
                  << " | kernel key: " << pt_cpu_kernel_key
                  << " | kernel: " << *pt_kernel_;
1554
          run_phi_kernel_ = true;
1555 1556
        }
      }
1557 1558
    }
  }
1559
  if (!run_phi_kernel_) {
1560 1561
    if (kernel_type_.get() == nullptr || kernel_func_.get() == nullptr) {
      ChooseKernel(exe_ctx);
1562
      dev_ctx = pool.Get(kernel_type_->place_);
1563
    }
1564 1565
  }

Y
yuyang18 已提交
1566 1567
  // do data transformScope &transfer_scope;
  std::vector<std::string> transfered_inplace_vars;
1568 1569
  Scope* transfer_scope = nullptr;
  {
1570
    platform::RecordEvent record_event("prepare_data",
C
chenjian 已提交
1571
                                       platform::TracerEventType::OperatorInner,
1572 1573
                                       1,
                                       platform::EventRole::kInnerOp);
1574
    if (need_prepare_data_) {
1575 1576
      transfer_scope = PrepareData(
          scope, *kernel_type_, &transfered_inplace_vars, runtime_ctx);
1577
    }
1578
  }
Y
yuyang18 已提交
1579 1580 1581 1582
  // exec scope is the scope that kernel actually executed on.
  const Scope& exec_scope =
      (transfer_scope == nullptr ? scope : *transfer_scope);

1583
  if (!all_kernels_must_compute_runtime_shape_) {
1584
    platform::RecordEvent record_event("infer_shape",
C
chenjian 已提交
1585
                                       platform::TracerEventType::OperatorInner,
1586 1587
                                       1,
                                       platform::EventRole::kInnerOp);
1588
    RuntimeInferShapeContext infer_shape_ctx(*this, *runtime_ctx);
1589
    this->Info().infer_shape_(&infer_shape_ctx);
1590 1591 1592
    record_event.End();
    platform::RecordOpInfoSupplement(
        Type(), Attrs(), infer_shape_ctx, *runtime_ctx);
1593
  }
1594 1595 1596 1597 1598

  if (FLAGS_enable_unused_var_check) {
    GetThreadLocalUsedVarNameSet()->clear();
  }

X
clean  
Xin Pan 已提交
1599 1600
  // TODO(panyx0718): ExecutionContext should only depend on RuntimeContext
  // not Scope. Imperative mode only pass inputs and get outputs.
1601
  {
1602
    platform::RecordEvent record_event("compute",
C
chenjian 已提交
1603
                                       platform::TracerEventType::OperatorInner,
1604 1605
                                       1,
                                       platform::EventRole::kInnerOp);
1606
    if (run_phi_kernel_) {
1607 1608 1609
      phi::KernelContext pt_kernel_context;
      // Do data transform before building KernelContext
      // TODO(zhiqiu): support TransferInplaceVarsBack
1610
      PreparePhiData(exec_scope, *pt_kernel_, *kernel_signature_, runtime_ctx);
1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624
      if (enable_cache_runtime_context_ && !need_prepare_phi_data_ &&
          !need_prepare_data_) {
        impl_ =
            new CacheImpl(new phi::KernelContext(),
                          new RuntimeInferShapeContext(*this, *runtime_ctx));
        BuildPhiKernelContext(*runtime_ctx, dev_ctx, impl_->getKernelContext());
        (*pt_kernel_)(impl_->getKernelContext());
      } else {
        phi::KernelContext pt_kernel_context;
        // Do data transform before building KernelContext
        // TODO(zhiqiu): support TransferInplaceVarsBack
        BuildPhiKernelContext(*runtime_ctx, dev_ctx, &pt_kernel_context);
        (*pt_kernel_)(&pt_kernel_context);
      }
1625 1626 1627 1628
    } else {
      (*kernel_func_)(
          ExecutionContext(*this, exec_scope, *dev_ctx, *runtime_ctx));
    }
1629
  }
D
dzhwinter 已提交
1630

Y
yuyang18 已提交
1631
  if (!transfered_inplace_vars.empty()) {
T
tianshuo78520a 已提交
1632
    // there is inplace variable has been transferred.
Y
yuyang18 已提交
1633
    TransferInplaceVarsBack(scope, transfered_inplace_vars, *transfer_scope);
1634
  }
1635 1636 1637 1638 1639 1640 1641

  // See [ Why need handle complex gradient to real gradient? ]
  // Only handle the case where the current kernel data type is complex
  if (framework::IsComplexType(kernel_type_->data_type_)) {
    HandleComplexGradToRealGrad(scope, runtime_ctx);
  }

1642 1643 1644 1645 1646 1647 1648 1649
  if (FLAGS_enable_unused_var_check) {
    // skip op that uses mkldnn because it has different memory reuse strategy.
    // use attr here because some GradMakers (like ActivationGradOpMaker) add
    // input when use_mkldnn=true;
    if (!(HasAttr("use_mkldnn") && Attr<bool>("use_mkldnn"))) {
      CheckUnusedVar(*this, scope);
    }
  }
1650

D
dzhwinter 已提交
1651
  /*For profiling/benchmark only*/
D
dzhwinter 已提交
1652
  if (FLAGS_benchmark) {
Y
yuyang18 已提交
1653
    dev_ctx->Wait();
1654 1655
#if defined(PADDLE_WITH_CUDA) || defined(PADLDE_WITH_ROCM)
    PADDLE_ENFORCE_GPU_SUCCESS(platform::GpuGetLastError());
1656 1657
#endif
    VLOG(4) << "Operator(" << Type() << "): context wait and get last error";
D
dzhwinter 已提交
1658
  }
C
chengduoZH 已提交
1659 1660

  if (FLAGS_check_nan_inf) {
W
WangXi 已提交
1661
    framework::details::CheckOpHasNanOrInf(*this, exec_scope, place);
C
chengduoZH 已提交
1662
  }
1663 1664 1665 1666 1667 1668 1669

  // To solve issue #15032, have a discussion with @Luotao for cpu inference,
  // do not cache transfer scope, hence in this case delete transfer scope
  // after run to avoid memory leak
  if (transfer_scope && !run_by_executor_ && !enable_cache_transfer_scope_) {
    scope.DeleteScope(transfer_scope);
  }
Q
Qiao Longfei 已提交
1670
}
X
Xin Pan 已提交
1671

1672 1673 1674
OpKernelType OperatorWithKernel::InnerGetExpectedKernelType(
    const ExecutionContext& ctx) const {
  auto expected_kernel_key = this->GetExpectedKernelType(ctx);
1675 1676 1677
  if (HasAttr("op_device")) {
    if (Attr<std::string>("op_device") == "cpu") {
      expected_kernel_key.place_ = platform::CPUPlace();
1678 1679 1680 1681 1682 1683 1684 1685 1686 1687
    } else if (Attr<std::string>("op_device").find("gpu") !=
               std::string::npos) {
      auto device = Attr<std::string>("op_device");
      size_t pos = device.find(':');
      if (pos != std::string::npos) {
        device = device.substr(0, pos);
        LOG_FIRST_N(WARNING, 1)
            << "Device index is only supported under pipeline parallelism, "
            << "so it will be ignored.";
      }
1688 1689
      // when the Op that only has CPUKernel is assigned to GPU, the CPUKernel
      // will be executed and a warning will be given at the same time.
1690 1691
      expected_kernel_key.place_ = platform::CPUPlace();
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
1692
      if (SupportGPU()) {
1693
        auto& dev_ctx = ctx.device_context();
1694
        expected_kernel_key.place_ = dev_ctx.GetPlace();
1695 1696 1697 1698 1699
      }
#endif
#ifdef PADDLE_WITH_ASCEND_CL
      if (SupportNPU()) {
        auto& dev_ctx = ctx.device_context();
1700
        expected_kernel_key.place_ = dev_ctx.GetPlace();
1701 1702 1703
      }
#endif
      if (platform::is_cpu_place(expected_kernel_key.place_)) {
1704 1705 1706 1707 1708 1709
        LOG_FIRST_N(WARNING, 1)
            << "Op(" << type_
            << ") has no CUDA implementation. It will be assigned to CPUPlace.";
      }
    }
  }
C
cc 已提交
1710 1711
  VLOG(3) << "op type:" << type_
          << ", expected_kernel_key:" << expected_kernel_key;
1712 1713 1714
  return expected_kernel_key;
}

1715
phi::KernelKey OperatorWithKernel::ChoosePhiKernel(
1716
    const ExecutionContext& ctx) const {
1717 1718 1719
  kernel_signature_.reset(
      new phi::KernelSignature(std::move(GetExpectedPhiKernelArgs(ctx))));
  VLOG(6) << *kernel_signature_.get();
1720 1721 1722 1723

  kernel_type_.reset(
      new OpKernelType(std::move(InnerGetExpectedKernelType(ctx))));

1724
  auto pt_kernel_name = kernel_signature_->name;
1725
  auto pt_kernel_key = TransOpKernelTypeToPhiKernelKey(*kernel_type_.get());
1726 1727
  pt_kernel_.reset(new phi::Kernel(phi::KernelFactory::Instance().SelectKernel(
      pt_kernel_name, pt_kernel_key)));
1728 1729

  if (pt_kernel_->IsValid()) {
1730
    VLOG(6) << "Static mode ChoosePhiKernel - kernel name: " << pt_kernel_name
1731 1732 1733
            << " | kernel key: " << pt_kernel_key
            << " | kernel: " << *pt_kernel_;
  } else {
1734
    VLOG(6) << "Static mode ChoosePhiKernel - kernel `" << pt_kernel_name
1735 1736
            << "` not found.";
  }
1737
  return pt_kernel_key;
1738 1739 1740 1741 1742 1743 1744
}

void OperatorWithKernel::ChooseKernel(const ExecutionContext& ctx) const {
  // check if op[type] has kernel registered.
  auto& all_op_kernels = AllOpKernels();
  auto kernels_iter = all_op_kernels.find(type_);
  PADDLE_ENFORCE_NE(
1745 1746
      kernels_iter,
      all_op_kernels.end(),
1747 1748 1749 1750 1751 1752 1753
      platform::errors::Unavailable(
          "There are no kernels which are registered in the %s operator.",
          type_));

  OpKernelMap& kernels = kernels_iter->second;

  auto expected_kernel_key = InnerGetExpectedKernelType(ctx);
L
Liu Yiqun 已提交
1754 1755

  auto kernel_iter = kernels.find(expected_kernel_key);
L
Liu-xiandong 已提交
1756

L
Liu Yiqun 已提交
1757 1758 1759 1760 1761 1762 1763 1764 1765
#ifdef PADDLE_WITH_MKLDNN
  // workaround for missing MKLDNN kernel when FLAGS_use_mkldnn env var is set
  if (kernel_iter == kernels.end() &&
      expected_kernel_key.library_type_ == LibraryType::kMKLDNN) {
    VLOG(3) << "missing MKLDNN kernel: fallbacking to PLAIN one";
    expected_kernel_key.library_type_ = LibraryType::kPlain;
    expected_kernel_key.data_layout_ = DataLayout::kAnyLayout;
    kernel_iter = kernels.find(expected_kernel_key);
  }
1766
#endif
1767 1768

#if defined(PADDLE_WITH_XPU) && !defined(PADDLE_WITH_XPU_KP)
1769
  if (platform::is_xpu_place(expected_kernel_key.place_) &&
Q
QingshuChen 已提交
1770 1771 1772
      (kernel_iter == kernels.end() ||
       !paddle::platform::is_xpu_support_op(type_, expected_kernel_key) ||
       paddle::platform::is_in_xpu_black_list(type_))) {
1773
    VLOG(3) << "fluid missing XPU kernel: " << type_
1774 1775 1776 1777 1778
            << ", expected_kernel_key:" << expected_kernel_key
            << ", fallbacking to CPU one!";
    expected_kernel_key.place_ = platform::CPUPlace();
    kernel_iter = kernels.find(expected_kernel_key);
  }
1779
#endif
L
Liu-xiandong 已提交
1780 1781

#ifdef PADDLE_WITH_XPU_KP
1782 1783 1784 1785 1786 1787 1788
  if (paddle::platform::is_xpu_place(expected_kernel_key.place_)) {
    bool use_xpu_kp_kernel_rt =
        FLAGS_run_kp_kernel &&
        paddle::platform::is_xpu_kp_support_op(type_, expected_kernel_key);
    bool use_xpu_kp_kernel_debug =
        paddle::platform::is_in_xpu_kpwhite_list(type_);
    if (use_xpu_kp_kernel_rt) {
1789
      VLOG(3) << "fluid xpu_kp using rt mode ";
1790 1791
    }
    if (use_xpu_kp_kernel_debug) {
1792
      VLOG(3) << "fluid xpu_kp using debug mode ";
1793 1794 1795
    }
    bool is_xpu_kp_support = (use_xpu_kp_kernel_rt || use_xpu_kp_kernel_debug);
    if (is_xpu_kp_support) {
1796 1797
      auto cache_expected_kernel_key_library_type =
          expected_kernel_key.library_type_;
1798 1799
      expected_kernel_key.library_type_ = LibraryType::kKP;
      kernel_iter = kernels.find(expected_kernel_key);
1800 1801 1802 1803 1804 1805 1806 1807 1808
      // if can't find corresponding kernel when is_xpu_kp_support is on
      // if the fluid do not register related kernel, it can't work and hava
      // error as before
      if (kernel_iter == kernels.end()) {
        expected_kernel_key.library_type_ =
            cache_expected_kernel_key_library_type;
        expected_kernel_key.place_ = platform::CPUPlace();
        kernel_iter = kernels.find(expected_kernel_key);
      } else {
1809
        VLOG(3) << "fluid using XPU KP kernel: " << type_
1810 1811
                << ", using_kernel_key:" << expected_kernel_key;
      }
1812 1813 1814 1815 1816 1817
    }
    bool is_xpu_unsupport =
        (!paddle::platform::is_xpu_support_op(type_, expected_kernel_key) ||
         paddle::platform::is_in_xpu_black_list(type_));
    if (!is_xpu_kp_support &&
        (kernel_iter == kernels.end() || is_xpu_unsupport)) {
1818
      VLOG(3) << "fluid missing XPU kernel: " << type_
1819 1820 1821 1822 1823
              << ", expected_kernel_key:" << expected_kernel_key
              << ", fallbacking to CPU one!";
      expected_kernel_key.place_ = platform::CPUPlace();
      kernel_iter = kernels.find(expected_kernel_key);
    }
L
Liu-xiandong 已提交
1824 1825 1826
  }
#endif

A
Allen Guo 已提交
1827 1828 1829 1830 1831 1832 1833 1834 1835 1836
#ifdef PADDLE_WITH_IPU
  if (kernel_iter == kernels.end() &&
      platform::is_ipu_place(expected_kernel_key.place_)) {
    VLOG(3) << "missing IPU kernel: " << type_
            << ", expected_kernel_key:" << expected_kernel_key
            << ", fallbacking to CPU one!";
    expected_kernel_key.place_ = platform::CPUPlace();
    kernel_iter = kernels.find(expected_kernel_key);
  }
#endif
1837 1838
#ifdef PADDLE_WITH_ASCEND_CL
  if (kernel_iter == kernels.end() &&
1839
      platform::is_npu_place(expected_kernel_key.place_)) {
1840 1841 1842 1843 1844 1845
    VLOG(3) << "missing NPU kernel: " << type_
            << ", expected_kernel_key:" << expected_kernel_key
            << ", fallbacking to CPU one!";
    expected_kernel_key.place_ = platform::CPUPlace();
    kernel_iter = kernels.find(expected_kernel_key);
  }
F
fwenguang 已提交
1846 1847 1848
#endif
#ifdef PADDLE_WITH_MLU
  if (kernel_iter == kernels.end() &&
1849
      platform::is_mlu_place(expected_kernel_key.place_)) {
F
fwenguang 已提交
1850 1851 1852
    VLOG(3) << "missing MLU kernel: " << type_
            << ", expected_kernel_key:" << expected_kernel_key
            << ", fallbacking to CPU one!";
1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863
    expected_kernel_key.place_ = platform::CPUPlace();
    kernel_iter = kernels.find(expected_kernel_key);
  }
#endif
#ifdef PADDLE_WITH_CUSTOM_DEVICE
  if (kernel_iter == kernels.end() &&
      platform::is_custom_place(expected_kernel_key.place_)) {
    VLOG(3) << "missing " << expected_kernel_key.place_.GetDeviceType()
            << " kernel: " << type_
            << ", expected_kernel_key:" << expected_kernel_key
            << ", fallbacking to CPU one!";
F
fwenguang 已提交
1864 1865 1866
    expected_kernel_key.place_ = platform::CPUPlace();
    kernel_iter = kernels.find(expected_kernel_key);
  }
L
Liu Yiqun 已提交
1867
#endif
1868 1869 1870 1871 1872 1873
  PADDLE_ENFORCE_NE(
      kernel_iter,
      kernels.end(),
      platform::errors::NotFound("Operator (%s) does not have kernel for %s.",
                                 type_,
                                 KernelTypeToString(expected_kernel_key)));
L
Liu Yiqun 已提交
1874

1875 1876 1877 1878 1879
  std::lock_guard<std::mutex> lock(cache_update_mutex_);
  if (kernel_type_.get() == nullptr || kernel_func_.get() == nullptr) {
    kernel_type_.reset(new OpKernelType(expected_kernel_key));
    kernel_func_.reset(new OpKernelFunc(kernel_iter->second));
  }
L
Liu Yiqun 已提交
1880 1881
}

Y
yuyang18 已提交
1882
void OperatorWithKernel::TransferInplaceVarsBack(
1883 1884
    const Scope& scope,
    const std::vector<std::string>& inplace_vars,
Y
yuyang18 已提交
1885 1886
    const Scope& transfer_scope) const {
  for (auto& var_name : inplace_vars) {
M
minqiyang 已提交
1887
    VLOG(3) << "share inplace var " + var_name + " back to it's original scope";
C
chengduo 已提交
1888
    auto* origin_var = scope.FindVar(var_name);
1889 1890 1891
    PADDLE_ENFORCE_NOT_NULL(origin_var,
                            platform::errors::InvalidArgument(
                                "The variable[%s] is nullptr.", var_name));
C
chengduo 已提交
1892
    auto* original_tensor =
C
chengduo 已提交
1893
        GetMutableLoDTensorOrSelectedRowsValueFromVar(origin_var);
C
chengduo 已提交
1894
    auto* var = transfer_scope.FindVar(var_name);
1895 1896 1897
    PADDLE_ENFORCE_NOT_NULL(var,
                            platform::errors::InvalidArgument(
                                "The variable[%s] is nullptr.", var_name));
C
chengduo 已提交
1898
    auto* transformed_tensor = GetLoDTensorOrSelectedRowsValueFromVar(*var);
1899
    auto original_dims = original_tensor->dims();
Y
yuyang18 已提交
1900
    original_tensor->ShareDataWith(*transformed_tensor);
B
Baibaifan 已提交
1901 1902 1903 1904 1905
    // In order to solve the problem that the output latitude of NPU reshape
    // operator is not changed when inplace.
    if (type_ != "reshape2" && type_ != "reshape2_grad") {
      original_tensor->Resize(original_dims);
    }
Y
yuyang18 已提交
1906 1907 1908
  }
}

1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937
void OperatorWithKernel::HandleComplexGradToRealGrad(
    const Scope& scope, RuntimeContext* ctx) const {
  for (auto& var_name_item : Outputs()) {
    std::vector<Variable*>& output_vars = ctx->outputs[var_name_item.first];
    for (size_t i = 0; i < var_name_item.second.size(); ++i) {
      // 1. find grad_var & check whether is complex tensor
      auto var_name = var_name_item.second[i];
      auto orig_var_name = GradOriginalVarName(var_name);
      // only focus on gradient var
      if (var_name == orig_var_name) {
        continue;
      }
      auto* grad_var = output_vars[i];
      // skip nullptr var
      if (grad_var == nullptr) {
        continue;
      }
      // don't process LoDTensorArray temporarily,
      // add support if necessary for complex number calculations in the future
      if (!VarIsTensor(*grad_var)) {
        continue;
      }
      auto* grad_tensor =
          GetMutableLoDTensorOrSelectedRowsValueFromVar(grad_var);
      // skip nullptr tensor
      if (grad_tensor == nullptr || !grad_tensor->IsInitialized()) {
        continue;
      }
      // only focus on complex dtype now
1938
      auto src_type = framework::TransToProtoVarType(grad_tensor->dtype());
1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957
      if (!IsComplexType(src_type)) {
        continue;
      }

      // 2. find forward var & check whether need to cast
      auto* var = scope.FindVar(orig_var_name);
      // if forward var not exists, do nothing
      if (var == nullptr) {
        continue;
      }
      if (!VarIsTensor(*var)) {
        continue;
      }
      const auto* tensor = GetLoDTensorOrSelectedRowsValueFromVar(*var);
      PADDLE_ENFORCE_NOT_NULL(
          tensor,
          platform::errors::Unavailable(
              "Forward tensor is nullptr when handle complex data to real."));
      // only need record type, the allocation may have been released
1958
      auto dst_type = framework::TransToProtoVarType(tensor->dtype());
1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975
      // only focus on real dtype and need casting
      if (IsComplexType(dst_type)) {
        continue;
      }

      // 3. cast complex grad to real grad
      VLOG(6) << "Transform " << framework::DataTypeToString(src_type)
              << " var `" << var_name << "` to "
              << framework::DataTypeToString(dst_type)
              << " real var in static graph.";
      Tensor out;
      TransComplexToReal(dst_type, src_type, *grad_tensor, &out);
      SetTensorToVariable(*grad_var, out, grad_var);
    }
  }
}

X
Xin Pan 已提交
1976
Scope* OperatorWithKernel::PrepareData(
1977 1978
    const Scope& scope,
    const OpKernelType& expected_kernel_key,
X
Xin Pan 已提交
1979 1980
    std::vector<std::string>* transfered_inplace_vars,
    RuntimeContext* ctx) const {
Y
yuyang18 已提交
1981
  Scope* new_scope = nullptr;
S
sneaxiy 已提交
1982

1983
  const std::unordered_set<std::string>* no_buffer_ins = nullptr;
S
sneaxiy 已提交
1984 1985 1986 1987
  if (info_) {
    auto& no_buffer_inferer = info_->NoNeedBufferVarsInferer();
    // Some op may not register NoNeedBufferVarsInferer
    if (no_buffer_inferer) {
1988 1989
      no_buffer_ins = &(no_buffer_inferer(Inputs(), Outputs(), Attrs()));
      if (no_buffer_ins->empty()) no_buffer_ins = nullptr;
S
sneaxiy 已提交
1990 1991 1992
    }
  }

Y
yuyang18 已提交
1993
  for (auto& var_name_item : Inputs()) {
1994 1995
    bool should_skip_input =
        no_buffer_ins && no_buffer_ins->count(var_name_item.first) > 0;
S
sneaxiy 已提交
1996

X
Xin Pan 已提交
1997 1998 1999 2000
    std::vector<Variable*>& input_vars = ctx->inputs[var_name_item.first];

    for (size_t i = 0; i < var_name_item.second.size(); ++i) {
      auto& var_name = var_name_item.second[i];
X
Xin Pan 已提交
2001
      auto* var = input_vars[i];
X
Xin Pan 已提交
2002

Y
yuyang18 已提交
2003
      // Only tensor can be tranfer to another device.
C
chengduo 已提交
2004
      if (var == nullptr || !VarIsTensor(*var)) {
Y
yuyang18 已提交
2005 2006 2007
        continue;
      }

C
chengduo 已提交
2008
      auto* tensor_in = GetLoDTensorOrSelectedRowsValueFromVar(*var);
2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023

      // When no_buffer_ins then checking of Tensor::holder_ is
      // not a thread safe. And for infershape scenario checks
      // to be omitted are not really needed
      if (should_skip_input == true) {
#ifdef PADDLE_WITH_MKLDNN
        // Var without buffer may be needed
        // for some situation like InferShape().
        // In this situation We cannot skip Var analysis, as
        // MKL-DNN shape of Var may differ from kNHWC Var
        // In such situation corressponding resized Var
        // has to be created and registered
        if ((tensor_in->layout() == DataLayout::kMKLDNN) &&
            (var->IsType<LoDTensor>() == true) &&
            (expected_kernel_key.data_layout_ != DataLayout::kMKLDNN) &&
2024
            (paddle::platform::MKLDNNDeviceContext::tls()
2025 2026
                 .get_cur_paddle_data_layout() == DataLayout::kNHWC) &&
            (tensor_in->dims().size() >= 3)) {
2027 2028 2029 2030 2031 2032 2033 2034
          // Mixed execution : MKL-DNN and GPU is not supported!
          if (!new_scope) {
            new_scope = &scope.NewScope();
          }
          auto* trans_var = new_scope->Var(var_name);
          input_vars[i] = trans_var;
          auto out = trans_var->GetMutable<LoDTensor>();
          out->Resize(tensor_in->dims());
2035 2036
          platform::MatchShapeToLayout(
              out, tensor_in->layout(), DataLayout::kNHWC);
2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047
          VLOG(7) << "Created reshaped dummy input based on MKL-DNN Tensor , "
                     "but kNHWC layout"
                  << var_name_item.first << " in Operator " << type_;
        } else {
          VLOG(7) << "Skip scanning input " << var_name_item.first
                  << " in Operator " << type_;
        }
#endif
        continue;
      }

Y
yuyang18 已提交
2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058
      if (!tensor_in->IsInitialized()) {
        continue;
      }

      auto kernel_type_for_var = GetKernelTypeForVar(
          var_name_item.first, *tensor_in, expected_kernel_key);

      if (!NeedTransform(kernel_type_for_var, expected_kernel_key)) {
        continue;
      }

M
minqiyang 已提交
2059 2060
      VLOG(3) << "Transform Variable " << var_name << " from "
              << kernel_type_for_var << " to " << expected_kernel_key;
Y
yuyang18 已提交
2061

2062 2063 2064
      // In the inference scenerio, the scopes will be reused across the
      // batches, so the `new_scope` here will result in GPU memroy explosion
      // over the  running of operators.
2065
      // We use a thread_local cache to fix that issue, the key in the cache is
2066 2067 2068 2069 2070
      // the combination of the `scope` argument, from_kernel_type,
      // target_kernel_type.
      // Have a discussion with @Superjomn or the inference developers if some
      // changes on this logic for this macro might not tested on the other
      // scenerios.
2071 2072
      // If this op is not called by an Executor or ParallelExecutor, it should
      // called by a NaiveExecutor, the NaiveExecutor will cache the scopes and
2073
      // variables, that behavior a lot different.
2074 2075 2076 2077 2078 2079 2080 2081 2082
      //
      // To solve issue #15032, have a discussion with @Luotao for cpu
      // inference, for all cpu kernels cases without GPU participation, here
      // not do transfer scope caching, and cpu inference performance is not
      // impacted by test.
      enable_cache_transfer_scope_ = false;
      if (!run_by_executor_ &&
          (platform::is_gpu_place(kernel_type_for_var.place_) ||
           platform::is_gpu_place(expected_kernel_key.place_))) {
2083 2084
        new_scope = TryCreateTransferScope(
            kernel_type_for_var, expected_kernel_key, &scope);
2085
        enable_cache_transfer_scope_ = true;
2086
      }
2087
      if (!new_scope) {
Y
yuyang18 已提交
2088 2089
        new_scope = &scope.NewScope();
      }
2090 2091 2092 2093
      // For inference, if a gpu model has an op which could only run on CPU,
      // each result of different input will be the same with the first one.
      // The reason is that if a gpu tensor is the input of a cpu kernel,
      // we will create a new cpu tensor in new scope.
2094
      // However, if enable_cache_runtime_context_, we get the cpu tensor each
2095 2096
      // time, not the gpu tensor. Thus, we set pre_scope_ = nullptr
      // to trigger `new RuntimeContext()` in RunImpl().
2097
      if (enable_cache_runtime_context_) {
2098 2099
        pre_scope_ = nullptr;
      }
L
Leo Chen 已提交
2100 2101

      // Create new var with the same name in transfer scopes
Y
yuyang18 已提交
2102
      auto* trans_var = new_scope->Var(var_name);
X
fix  
Xin Pan 已提交
2103
      input_vars[i] = trans_var;
L
Leo Chen 已提交
2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120

      // Find if inplace exists between input and output
      // If inplace exists, set the new created var to inplaced output, and
      // record its name in transfered_inplace_vars.
      for (auto& pair : Outputs()) {
        for (size_t j = 0; j < pair.second.size(); ++j) {
          if (pair.second[j] == var_name) {
            VLOG(4) << "Found inplace between input(" << var_name_item.first
                    << ") and output(" << pair.first
                    << "), the variable name is " << var_name;
            ctx->outputs[pair.first][j] = trans_var;
            transfered_inplace_vars->emplace_back(var_name);
          }
        }
      }

      // Do transfer
Y
yuyang18 已提交
2121
      Tensor out;
Y
yuyang18 已提交
2122
      TransformData(expected_kernel_key, kernel_type_for_var, *tensor_in, &out);
Y
yuyang18 已提交
2123 2124 2125
      SetTensorToVariable(*var, out, trans_var);
    }
  }
L
Leo Chen 已提交
2126

2127 2128 2129 2130 2131 2132
  // If pre_scope = &scope, it means that scope is cached and the op is not in
  // while block. If new_scope = nullptr, it means that for each input of this
  // Op, there is no need to do PrepareData. So PrepareData could be skipped at
  // the rest iterations to save the elapsed time.
  // We do not support skipping PrepareData in while block, because the Op's
  // input may be changed by subsequent Ops, which may cause an error.
W
wenbin 已提交
2133 2134 2135 2136 2137 2138

  // For inference, ops that behind conditional branch aren't supported well,
  // so disable prepare optimization conservatively.
  bool force_prepare_data = HasAttr("inference_force_prepare_data") &&
                            Attr<bool>("inference_force_prepare_data");
  if (pre_scope_ == &scope && new_scope == nullptr && !force_prepare_data) {
2139 2140
    need_prepare_data_ = false;
  }
Y
yuyang18 已提交
2141 2142 2143

  return new_scope;
}
Q
Qiao Longfei 已提交
2144

2145
void OperatorWithKernel::ParseInputDataType(
2146 2147
    const Variable* var,
    const std::string& name,
2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166
    proto::VarType::Type* data_type) const {
  if (var != nullptr) {
    const Tensor* t = nullptr;
    if (var->IsType<Tensor>()) {
      t = &var->Get<Tensor>();
    } else if (var->IsType<LoDTensor>()) {
      t = &var->Get<LoDTensor>();
    } else if (var->IsType<phi::SelectedRows>()) {
      t = &(var->Get<phi::SelectedRows>().value());
    } else if (var->IsType<LoDTensorArray>()) {
      auto t_arr = &var->Get<LoDTensorArray>();
      for (size_t j = 0; j < t_arr->size(); j++) {
        if (t_arr->at(j).IsInitialized()) {
          t = &(t_arr->at(j));
        }
      }
    }
    if (t != nullptr) {
      PADDLE_ENFORCE_EQ(
2167 2168
          t->IsInitialized(),
          true,
2169 2170
          platform::errors::InvalidArgument("The %s Op's Input Variable `%s` "
                                            "contains uninitialized Tensor.",
2171 2172
                                            Type(),
                                            name));
2173 2174 2175 2176 2177 2178
      *data_type = paddle::framework::TransToProtoVarType(t->dtype());
    }
  }
}

void OperatorWithKernel::ParseMultiInputDataType(
2179 2180
    const std::vector<Variable*>& vars,
    const std::string& name,
2181
    proto::VarType::Type* data_type) const {
2182
  proto::VarType::Type default_data_type =
2183 2184 2185 2186 2187 2188 2189 2190 2191
      static_cast<proto::VarType::Type>(-1);
  for (size_t i = 0; i < vars.size(); ++i) {
    const Variable* var = vars[i];
    if (var != nullptr) {
      const Tensor* t = nullptr;
      if (var->IsType<Tensor>()) {
        t = &var->Get<Tensor>();
      } else if (var->IsType<LoDTensor>()) {
        t = &var->Get<LoDTensor>();
2192 2193
      } else if (var->IsType<phi::SelectedRows>()) {
        t = &(var->Get<phi::SelectedRows>().value());
2194
      } else if (var->IsType<LoDTensorArray>()) {
2195 2196 2197 2198
        auto t_arr = &var->Get<LoDTensorArray>();
        for (size_t j = 0; j < t_arr->size(); j++) {
          if (t_arr->at(j).IsInitialized()) {
            t = &(t_arr->at(j));
2199 2200
          }
        }
2201 2202
      }
      if (t != nullptr) {
2203
        PADDLE_ENFORCE_EQ(
2204 2205
            t->IsInitialized(),
            true,
2206 2207
            platform::errors::InvalidArgument("The %s Op's Input Variable `%s` "
                                              "contains uninitialized Tensor.",
2208 2209
                                              Type(),
                                              name));
2210 2211
        proto::VarType::Type tmp =
            paddle::framework::TransToProtoVarType(t->dtype());
2212 2213 2214 2215 2216 2217 2218
        PADDLE_ENFORCE(tmp == *data_type || *data_type == default_data_type,
                       platform::errors::InvalidArgument(
                           "The DataType of %s Op's duplicable or different "
                           "slot Variable %s must be "
                           "consistent or reigster GetExpectedKernelType. The "
                           "current variable type is (%s), but the "
                           "previous variable type is (%s).",
2219 2220 2221
                           Type(),
                           name,
                           DataTypeToString(tmp),
2222
                           DataTypeToString(*data_type)));
2223 2224 2225 2226 2227 2228
        *data_type = tmp;
      }
    }
  }
}

2229
proto::VarType::Type OperatorWithKernel::IndicateDataType(
Y
Yu Yang 已提交
2230
    const ExecutionContext& ctx) const {
2231 2232 2233
  proto::VarType::Type dafault_data_type =
      static_cast<proto::VarType::Type>(-1);
  proto::VarType::Type data_type = dafault_data_type;
2234 2235 2236 2237 2238 2239
  for (auto* name : ctx.InNameList()) {
    if (ctx.InputSize(*name) == 1UL) {
      ParseInputDataType(ctx.InputVar(*name), *name, &data_type);
    } else {
      ParseMultiInputDataType(ctx.MultiInputVar(*name), *name, &data_type);
    }
Y
Yu Yang 已提交
2240
  }
2241
  PADDLE_ENFORCE_NE(
2242 2243
      data_type,
      dafault_data_type,
2244 2245
      platform::errors::NotFound(
          "DataType should be indicated by input Variable at %s.", Type()));
2246 2247 2248 2249 2250 2251 2252 2253
  return data_type;
}

proto::VarType::Type OperatorWithKernel::IndicateVarDataType(
    const ExecutionContext& ctx, const std::string& name) const {
  proto::VarType::Type dafault_data_type =
      static_cast<proto::VarType::Type>(-1);
  proto::VarType::Type data_type = dafault_data_type;
2254 2255 2256 2257 2258
  if (ctx.InputSize(name) == 1UL) {
    ParseInputDataType(ctx.InputVar(name), name, &data_type);
  } else {
    ParseMultiInputDataType(ctx.MultiInputVar(name), name, &data_type);
  }
2259
  PADDLE_ENFORCE_NE(
2260 2261
      data_type,
      dafault_data_type,
2262 2263 2264 2265
      platform::errors::InvalidArgument(
          "The Input Variable(%s) of (%s) Operator used to determine kernel "
          "data type is empty or not LoDTensor or SelectedRows or "
          "LoDTensorArray.",
2266 2267
          name,
          Type()));
2268
  return data_type;
Y
Yu Yang 已提交
2269
}
2270

2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288
Tensor* OperatorWithKernel::GetTensorFormInputSafely(
    const ExecutionContext& ctx, const std::string& name) const {
  // 1. get variable and check
  // NOTE: only supports signal input var now
  // NOTE: using const_cast is because we don't have method
  // can get single mutable var, and here will not change
  // the var's data, only use some attribute
  Variable* var = const_cast<Variable*>(ctx.InputVar(name));
  PADDLE_ENFORCE_NOT_NULL(
      var,
      platform::errors::NotFound(
          "The variable %s is not found when promote complex types.", name));
  // 2. get tensor and check
  Tensor* t = nullptr;
  if (var->IsType<Tensor>()) {
    t = var->GetMutable<Tensor>();
  } else if (var->IsType<LoDTensor>()) {
    t = var->GetMutable<LoDTensor>();
2289 2290
  } else if (var->IsType<phi::SelectedRows>()) {
    t = var->GetMutable<phi::SelectedRows>()->mutable_value();
2291 2292 2293 2294 2295 2296 2297 2298
  } else {
    PADDLE_THROW(platform::errors::Unimplemented(
        "Unsupported input variable type in complex type promotion."));
  }
  PADDLE_ENFORCE_NOT_NULL(
      t,
      platform::errors::InvalidArgument(
          "The Tensor of variable %s is nullptr when promote complex types."));
2299 2300
  PADDLE_ENFORCE_EQ(t->IsInitialized(),
                    true,
2301 2302 2303
                    platform::errors::InvalidArgument(
                        "The Tensor in the %s Op's Input Variable %s(%s) is "
                        "not initialized.",
2304 2305 2306
                        Type(),
                        name,
                        ctx.InputName(name)));
2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317
  return t;
}

/** NOTE(chenweihang): For safety reasons, we now only
 * perform type promotes for binary operations with
 * complex type inputs, which is used to support the
 * paddle quantum function.
 * In other cases, the first input data type is used as
 * the kernel data type.
 */
proto::VarType::Type OperatorWithKernel::IndicateOrPromoteVarDataTypes(
2318 2319
    const ExecutionContext& ctx,
    const std::string& name1,
2320 2321 2322 2323 2324 2325
    const std::string& name2) const {
  // 1. Get tensor
  auto* tensor_a = GetTensorFormInputSafely(ctx, name1);
  auto* tensor_b = GetTensorFormInputSafely(ctx, name2);

  // 2. Get two input types
2326 2327
  auto type_a = framework::TransToProtoVarType(tensor_a->dtype());
  auto type_b = framework::TransToProtoVarType(tensor_b->dtype());
2328 2329 2330 2331 2332 2333 2334

  // 3. Get first input type or promote complex types
  auto target_type = PromoteTypesIfComplexExists(type_a, type_b);

  return target_type;
}

2335 2336 2337 2338 2339 2340
OpKernelType OperatorWithKernel::GetExpectedKernelType(
    const ExecutionContext& ctx) const {
  return OpKernelType(IndicateDataType(ctx), ctx.GetPlace());
}

OpKernelType OperatorWithKernel::GetKernelTypeForVar(
2341 2342
    const std::string& var_name,
    const Tensor& tensor,
2343
    const OpKernelType& expected_kernel_type) const {
2344 2345
  return OpKernelType(
      expected_kernel_type.data_type_, tensor.place(), tensor.layout());
2346 2347
}

2348
phi::KernelSignature OperatorWithKernel::GetExpectedPhiKernelArgs(
2349
    const ExecutionContext& ctx) const {
2350
  ExecutionArgumentMappingContext arg_mapping_ctx(ctx);
2351
  if (arg_map_fn_ == nullptr) {
2352 2353 2354 2355
    auto* arg_map_fn = phi::OpUtilsMap::Instance().GetArgumentMappingFn(type_);
    if (arg_map_fn) {
      arg_map_fn_.reset(new phi::ArgumentMappingFn(*arg_map_fn));
    } else {
2356 2357 2358
      auto func =
          [this](
              const phi::ArgumentMappingContext& ctx) -> phi::KernelSignature {
2359 2360 2361 2362
        return phi::DefaultKernelSignatureMap::Instance().Get(type_);
      };
      arg_map_fn_.reset(new phi::ArgumentMappingFn(func));
    }
2363 2364
  }
  return (*arg_map_fn_)(arg_mapping_ctx);
2365 2366
}

2367
Scope* OperatorWithKernel::PreparePhiData(
2368 2369
    const Scope& scope,
    const phi::Kernel& pt_kernel,
2370 2371
    const phi::KernelSignature& pt_kernel_signature,
    RuntimeContext* ctx) const {
2372
  const auto& input_names = pt_kernel_signature.input_names;
2373
  auto input_defs = pt_kernel.args_def().input_defs();
2374 2375
  PADDLE_ENFORCE_EQ(input_names.size(),
                    input_defs.size(),
2376 2377 2378
                    platform::errors::InvalidArgument(
                        "The size of inputs_args names (%d) must be equal to "
                        "the size of kernel input_defs (%d).",
2379 2380
                        input_names.size(),
                        input_defs.size()));
2381
  Scope* new_scope = nullptr;
2382
  auto& name_map = Inputs();
Y
YuanRisheng 已提交
2383 2384 2385 2386 2387 2388 2389 2390 2391 2392
  const std::unordered_set<std::string>* no_buffer_ins = nullptr;
  if (info_) {
    auto& no_buffer_inferer = info_->NoNeedBufferVarsInferer();
    // Some op may not register NoNeedBufferVarsInferer
    if (no_buffer_inferer) {
      no_buffer_ins = &(no_buffer_inferer(Inputs(), Outputs(), Attrs()));
      if (no_buffer_ins->empty()) no_buffer_ins = nullptr;
    }
  }

2393 2394
  for (size_t i = 0; i < input_defs.size(); ++i) {
    auto& in_def = input_defs.at(i);
2395
    if (ctx->inputs.find(input_names[i]) == ctx->inputs.end()) {
H
hong 已提交
2396 2397
      continue;
    }
2398
    auto& ins_vector = ctx->inputs.at(input_names[i]);
2399
    auto& name_vec = name_map.at(input_names[i]);
Y
YuanRisheng 已提交
2400 2401 2402
    bool should_skip_input =
        no_buffer_ins && no_buffer_ins->count(input_names[i]) > 0;

2403 2404 2405 2406 2407 2408 2409
    for (size_t offset = 0; offset < ins_vector.size(); ++offset) {
      // Only tensor can be tranfer to another device.
      auto* var = ins_vector[offset];
      if (var == nullptr || !VarIsTensor(*var)) {
        continue;
      }
      auto* tensor_in = GetLoDTensorOrSelectedRowsValueFromVar(*var);
Y
YuanRisheng 已提交
2410 2411 2412 2413 2414 2415 2416 2417 2418

      // When no_buffer_ins then checking of Tensor::holder_ is
      // not a thread safe. And for infershape scenario checks
      // to be omitted are not really needed
      if (should_skip_input == true) {
        // TODO(YuanRisheng) : There need to supplement MKLDNN code later
        continue;
      }

2419 2420 2421 2422
      if (!tensor_in->IsInitialized()) {
        continue;
      }

2423 2424 2425
      if (in_def.backend == phi::Backend::ALL_BACKEND) {
        continue;
      }
2426 2427 2428 2429 2430

      auto tensor_backend = phi::TransToPhiBackend(tensor_in->place());
      if (in_def.backend == tensor_backend ||
          (in_def.backend == phi::Backend::GPUDNN &&
           tensor_backend == phi::Backend::GPU)) {
2431 2432 2433
        continue;
      }

2434
      auto expected_place = phi::TransToPhiPlace(in_def.backend);
2435
      VLOG(3) << "phi Transform Variable " << input_names[i] << " from "
2436
              << tensor_in->place() << " to " << expected_place;
2437

2438 2439 2440
      if (!new_scope) {
        new_scope = &scope.NewScope();
      }
2441 2442 2443 2444 2445 2446 2447 2448 2449 2450
      // For inference, if a gpu model has an op which could only run on CPU,
      // each result of different input will be the same with the first one.
      // The reason is that if a gpu tensor is the input of a cpu kernel,
      // we will create a new cpu tensor in new scope.
      // However, if enable_cache_runtime_context_, we get the cpu tensor each
      // time, not the gpu tensor. Thus, we set pre_scope_ = nullptr
      // to trigger `new RuntimeContext()` in RunImpl().
      if (enable_cache_runtime_context_) {
        pre_scope_ = nullptr;
      }
2451

2452
      // Create new var with the same name in transfer scopes
2453
      auto* trans_var = new_scope->Var(name_vec[offset]);
2454
      ins_vector[offset] = trans_var;
2455

2456 2457 2458 2459
      // Do transfer
      Tensor out;
      framework::TensorCopySync(*tensor_in, expected_place, &out);
      SetTensorToVariable(*var, out, trans_var);
2460 2461

      need_prepare_phi_data_ = true;
2462 2463 2464 2465 2466 2467
    }
  }

  return new_scope;
}

2468
void OperatorWithKernel::BuildPhiKernelContext(
2469 2470
    const RuntimeContext& ctx,
    platform::DeviceContext* dev_ctx,
2471
    phi::KernelContext* pt_kernel_context) const {
2472
  pt_kernel_context->SetDeviceContext(dev_ctx);
2473

2474 2475 2476
  auto& input_names = kernel_signature_->input_names;
  auto& attr_names = kernel_signature_->attr_names;
  auto& output_names = kernel_signature_->output_names;
2477 2478 2479 2480 2481

  auto input_defs = pt_kernel_->args_def().input_defs();
  auto attr_defs = pt_kernel_->args_def().attribute_defs();
  auto output_defs = pt_kernel_->args_def().output_defs();

2482 2483
  PADDLE_ENFORCE_EQ(input_names.size(),
                    input_defs.size(),
2484 2485 2486
                    platform::errors::InvalidArgument(
                        "The size of inputs_args names (%d) must be equal to "
                        "the size of kernel input_defs (%d).",
2487 2488
                        input_names.size(),
                        input_defs.size()));
2489

2490 2491
  PADDLE_ENFORCE_EQ(output_names.size(),
                    output_defs.size(),
2492 2493 2494
                    platform::errors::InvalidArgument(
                        "The size of outputs_args names (%d) must be equal to "
                        "the size of kernel output_defs (%d).",
2495 2496
                        output_names.size(),
                        output_defs.size()));
2497

2498 2499
  PADDLE_ENFORCE_EQ(attr_names.size(),
                    attr_defs.size(),
2500 2501 2502
                    platform::errors::InvalidArgument(
                        "The size of attribute_args names (%d) must be equal "
                        "to the size of kernel attribute_defs (%d).",
2503 2504
                        attr_names.size(),
                        attr_defs.size()));
2505 2506

  for (size_t i = 0; i < input_names.size(); ++i) {
H
hong 已提交
2507
    auto it = ctx.inputs.find(input_names[i]);
2508 2509 2510

    // calcute the start and end index of the input tensors
    size_t start_idx =
2511
        (i == 0 ? 0 : pt_kernel_context->InputRangeAt(i - 1).second);
H
hong 已提交
2512
    // deal with optional here
2513
    if ((it == ctx.inputs.end() || it->second.size() == 0) &&
H
hong 已提交
2514
        (input_defs[i].type_index ==
2515
             std::type_index(typeid(paddle::optional<phi::DenseTensor>)) ||
H
hong 已提交
2516
         input_defs[i].type_index ==
2517
             std::type_index(typeid(paddle::optional<phi::SelectedRows>)) ||
2518
         input_defs[i].type_index ==
2519 2520
             std::type_index(typeid(
                 paddle::optional<std::vector<const phi::DenseTensor*>>)))) {
H
hong 已提交
2521 2522 2523 2524
      pt_kernel_context->EmplaceBackInputWithoutSetRange(nullptr);
      auto end_idx = start_idx + 1;
      pt_kernel_context->AssignInputRange(std::make_pair(start_idx, end_idx),
                                          i);
2525

H
hong 已提交
2526 2527 2528 2529
      continue;
    }
    auto ins_vector = it->second;
    size_t end_idx = start_idx + ins_vector.size();
2530
    for (size_t offset = 0; offset < ins_vector.size(); ++offset) {
2531
      const phi::TensorBase* tensor_in = nullptr;
2532
      auto* var = ins_vector[offset];
H
hong 已提交
2533 2534
      if (var->IsType<framework::LoDTensor>()) {
        tensor_in = &(var->Get<framework::LoDTensor>());
2535
        pt_kernel_context->EmplaceBackInputWithoutSetRange(tensor_in);
2536 2537
      } else if (var->IsType<phi::SelectedRows>()) {
        tensor_in = &(var->Get<phi::SelectedRows>());
2538 2539
        pt_kernel_context->EmplaceBackInputWithoutSetRange(tensor_in);
      } else if (var->IsType<framework::LoDTensorArray>()) {
2540
        need_prepare_phi_data_ = true;
C
Chen Weihang 已提交
2541
        paddle::small_vector<const phi::TensorBase*> tensor_vector;
2542 2543 2544 2545 2546 2547
        auto& tensor_array = var->Get<framework::LoDTensorArray>();
        for (auto& t : tensor_array) {
          tensor_vector.emplace_back(&t);
        }
        pt_kernel_context->EmplaceBackInputsWithoutSetRange(tensor_vector);
        end_idx += tensor_array.size() - 1;
2548 2549 2550 2551
      } else {
        PADDLE_THROW(platform::errors::Unimplemented(
            "Unsupported input `%s` type when call pt kernel.",
            framework::ToTypeName(var->Type())));
2552
      }
2553
    }
2554
    // Note: here cannot deal with vector<LoDTensorArray> input
2555
    pt_kernel_context->AssignInputRange(std::make_pair(start_idx, end_idx), i);
2556
  }
2557
  VLOG(4) << "Done inputs";
2558 2559

  for (size_t i = 0; i < output_names.size(); ++i) {
H
hong 已提交
2560
    auto it = ctx.outputs.find(output_names[i]);
2561
    size_t start_idx =
2562
        (i == 0 ? 0 : pt_kernel_context->OutputRangeAt(i - 1).second);
H
hong 已提交
2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576

    if (it == ctx.outputs.end() || it->second.empty()) {
      // Deal with the case that some outputs are not found or be NULL when run
      // the kernel.
      // For example : the outputs of matmul_grad are dx and dy,
      // sometimes dx or dy may be NULL.
      pt_kernel_context->EmplaceBackOutputWithoutSetRange(nullptr);
      auto end_idx = start_idx + 1;
      pt_kernel_context->AssignOutputRange(std::make_pair(start_idx, end_idx),
                                           i);
      continue;
    }
    auto& outs_vector = it->second;

2577
    size_t end_idx = start_idx + outs_vector.size();
2578 2579

    for (size_t offset = 0; offset < outs_vector.size(); ++offset) {
2580
      phi::TensorBase* tensor_out = nullptr;
2581
      auto* var = outs_vector[offset];
2582 2583 2584
      if (var) {
        if (var->template IsType<framework::LoDTensor>()) {
          tensor_out = var->template GetMutable<framework::LoDTensor>();
2585
          pt_kernel_context->EmplaceBackOutputWithoutSetRange(tensor_out);
2586 2587
        } else if (var->template IsType<phi::SelectedRows>()) {
          tensor_out = var->template GetMutable<phi::SelectedRows>();
2588 2589
          pt_kernel_context->EmplaceBackOutputWithoutSetRange(tensor_out);
        } else if (var->template IsType<framework::LoDTensorArray>()) {
C
Chen Weihang 已提交
2590
          paddle::small_vector<phi::TensorBase*> tensor_vector;
2591 2592 2593 2594 2595 2596 2597 2598 2599
          auto* tensor_array =
              var->template GetMutable<framework::LoDTensorArray>();
          // Note: If the input LoDTensorArray size is 0, the output
          // LoDTensorArray is also 0
          for (auto& t : *tensor_array) {
            tensor_vector.emplace_back(&t);
          }
          pt_kernel_context->EmplaceBackOutputsWithoutSetRange(tensor_vector);
          end_idx += tensor_array->size() - 1;
2600 2601 2602 2603 2604
        } else {
          PADDLE_THROW(platform::errors::Unimplemented(
              "Unsupported output `%s` type when call pt kernel.",
              framework::ToTypeName(var->Type())));
        }
2605 2606
      } else {
        pt_kernel_context->EmplaceBackOutputWithoutSetRange(tensor_out);
2607
      }
2608
    }
2609
    pt_kernel_context->AssignOutputRange(std::make_pair(start_idx, end_idx), i);
2610
  }
2611
  VLOG(4) << "Done outputs";
2612 2613

  for (size_t i = 0; i < attr_names.size(); ++i) {
2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640
    VLOG(6) << "BuildPhiKernelContext: " << attr_names[i] << ": "
            << attr_defs[i].type_index;
    auto attr_iter = Attrs().find(attr_names[i]);
    switch (attr_defs[i].type_index) {
      case phi::AttributeType::SCALAR:
        if (attr_iter != Attrs().end()) {
          // scalar is in the attribute
          switch (AttrTypeID(attr_iter->second)) {
            case proto::AttrType::FLOAT:
              pt_kernel_context->EmplaceBackAttr(std::move(
                  phi::Scalar(BOOST_GET_CONST(float, attr_iter->second))));
              break;
            case proto::AttrType::INT:
              pt_kernel_context->EmplaceBackAttr(std::move(
                  phi::Scalar(BOOST_GET_CONST(int, attr_iter->second))));
              break;
            case proto::AttrType::STRING:
              pt_kernel_context->EmplaceBackAttr(std::move(phi::Scalar(
                  BOOST_GET_CONST(std::string, attr_iter->second))));
              break;
            default:
              PADDLE_THROW(platform::errors::Unimplemented(
                  "Unsupported cast op attribute `%s` to Scalar when construct "
                  "KernelContext in dygraph.",
                  attr_names[i]));
          }
        } else {  // scalar is in the input
2641
          need_prepare_phi_data_ = true;
2642
          auto& ins_vector = ctx.inputs.at(attr_names[i]);
2643
          pt_kernel_context->EmplaceBackAttr(std::move(
2644
              experimental::MakePhiScalarFromVar(*ins_vector.front())));
2645
        }
2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672
        break;
      case phi::AttributeType::INT_ARRAY:
        if (attr_iter != Attrs().end()) {
          switch (AttrTypeID(attr_iter->second)) {
            case proto::AttrType::INTS:
              pt_kernel_context->EmplaceBackAttr(std::move(phi::IntArray(
                  BOOST_GET_CONST(std::vector<int32_t>, attr_iter->second))));
              break;
            case proto::AttrType::LONGS:
              pt_kernel_context->EmplaceBackAttr(std::move(phi::IntArray(
                  BOOST_GET_CONST(std::vector<int64_t>, attr_iter->second))));
              break;
            case proto::AttrType::INT:
              pt_kernel_context->EmplaceBackAttr(std::move(phi::IntArray(
                  &BOOST_GET_CONST(int32_t, attr_iter->second), 1)));
              break;
            case proto::AttrType::LONG:
              pt_kernel_context->EmplaceBackAttr(std::move(phi::IntArray(
                  &BOOST_GET_CONST(int64_t, attr_iter->second), 1)));
              break;
            default:
              PADDLE_THROW(platform::errors::Unimplemented(
                  "Unsupported cast op attribute `%s` to IntArray when "
                  "construct KernelContext.",
                  attr_names[i]));
          }
        } else {  // shape is in the input
2673
          need_prepare_phi_data_ = true;
2674 2675 2676 2677 2678 2679 2680 2681
          auto& ins_vector = ctx.inputs.at(attr_names[i]);
          if (ins_vector.size() == 1) {  // ShapeTensor
            pt_kernel_context->EmplaceBackAttr(std::move(
                experimental::MakePhiIntArrayFromVar(*ins_vector.front())));
          } else {  // ShapeTensorList
            pt_kernel_context->EmplaceBackAttr(std::move(
                experimental::MakePhiIntArrayFromVarList(ins_vector)));
          }
2682
        }
2683 2684 2685
        break;
      case phi::AttributeType::SCALARS: {
        PADDLE_ENFORCE_NE(
2686 2687
            attr_iter,
            Attrs().end(),
2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745
            platform::errors::NotFound("(%s) is not found in AttributeMap when "
                                       "buildind static KernelContext.",
                                       attr_names[i]));
        switch (AttrTypeID(attr_iter->second)) {
          case proto::AttrType::INTS: {
            const auto& vec =
                BOOST_GET_CONST(std::vector<int32_t>, attr_iter->second);
            std::vector<phi::Scalar> scalar_list;
            scalar_list.reserve(vec.size());
            for (const auto& val : vec) {
              scalar_list.emplace_back(val);
            }
            pt_kernel_context->EmplaceBackAttr(std::move(scalar_list));
          } break;
          case proto::AttrType::LONGS: {
            const auto& vec =
                BOOST_GET_CONST(std::vector<int64_t>, attr_iter->second);
            std::vector<phi::Scalar> scalar_list;
            scalar_list.reserve(vec.size());
            for (const auto& val : vec) {
              scalar_list.emplace_back(val);
            }
            pt_kernel_context->EmplaceBackAttr(std::move(scalar_list));
          } break;
          case proto::AttrType::FLOATS: {
            const auto& vec =
                BOOST_GET_CONST(std::vector<float>, attr_iter->second);
            std::vector<phi::Scalar> scalar_list;
            scalar_list.reserve(vec.size());
            for (const auto& val : vec) {
              scalar_list.emplace_back(val);
            }
            pt_kernel_context->EmplaceBackAttr(std::move(scalar_list));
          } break;
          case proto::AttrType::FLOAT64S: {
            const auto& vec =
                BOOST_GET_CONST(std::vector<double>, attr_iter->second);
            std::vector<phi::Scalar> scalar_list;
            scalar_list.reserve(vec.size());
            for (const auto& val : vec) {
              scalar_list.emplace_back(val);
            }
            pt_kernel_context->EmplaceBackAttr(std::move(scalar_list));
          } break;
          case proto::AttrType::BOOLEANS: {
            const auto& vec =
                BOOST_GET_CONST(std::vector<bool>, attr_iter->second);
            std::vector<phi::Scalar> scalar_list;
            scalar_list.reserve(vec.size());
            for (const auto& val : vec) {
              scalar_list.emplace_back(val);
            }
            pt_kernel_context->EmplaceBackAttr(std::move(scalar_list));
          } break;
          default:
            PADDLE_THROW(platform::errors::Unimplemented(
                "Unsupported cast op attribute `%s` to vector<Scalar> when "
                "construct KernelContext.",
H
hong 已提交
2746 2747
                attr_names[i]));
        }
2748 2749 2750
      } break;
      default: {
        PADDLE_ENFORCE_NE(
2751 2752
            attr_iter,
            Attrs().end(),
2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820
            platform::errors::NotFound("(%s) is not found in AttributeMap when "
                                       "buildind static KernelContext.",
                                       attr_names[i]));
        switch (attr_defs[i].type_index) {
          case phi::AttributeType::FLOAT32:
            pt_kernel_context->EmplaceBackAttr(
                BOOST_GET_CONST(float, attr_iter->second));
            break;
          case phi::AttributeType::INT32:
            pt_kernel_context->EmplaceBackAttr(
                BOOST_GET_CONST(int, attr_iter->second));
            break;
          case phi::AttributeType::BOOL:
            pt_kernel_context->EmplaceBackAttr(
                BOOST_GET_CONST(bool, attr_iter->second));
            break;
          case phi::AttributeType::INT64:
            pt_kernel_context->EmplaceBackAttr(
                BOOST_GET_CONST(int64_t, attr_iter->second));
            break;
          case phi::AttributeType::INT32S:
            pt_kernel_context->EmplaceBackAttr(
                BOOST_GET_CONST(std::vector<int>, attr_iter->second));
            break;
          case phi::AttributeType::DATA_TYPE: {
            auto data_type = framework::TransToPhiDataType(
                static_cast<framework::proto::VarType::Type>(
                    BOOST_GET_CONST(int, attr_iter->second)));
            pt_kernel_context->EmplaceBackAttr(data_type);
          } break;
          case phi::AttributeType::STRING:
            pt_kernel_context->EmplaceBackAttr(
                std::move(BOOST_GET_CONST(std::string, attr_iter->second)));
            break;
          case phi::AttributeType::INT64S:
            switch (AttrTypeID(attr_iter->second)) {
              case proto::AttrType::LONGS:
                pt_kernel_context->EmplaceBackAttr(
                    BOOST_GET_CONST(std::vector<int64_t>, attr_iter->second));
                break;
              case proto::AttrType::INTS: {
                const auto& vector_int_attr =
                    BOOST_GET_CONST(std::vector<int>, attr_iter->second);
                const std::vector<int64_t> vector_int64_attr(
                    vector_int_attr.begin(), vector_int_attr.end());
                pt_kernel_context->EmplaceBackAttr(vector_int64_attr);
              } break;
              default:
                PADDLE_THROW(platform::errors::Unimplemented(
                    "Unsupported cast op attribute `%s` to vector<int64_t> "
                    "when "
                    "construct KernelContext.",
                    attr_names[i]));
            }
            break;
          case phi::AttributeType::FLOAT32S:
            pt_kernel_context->EmplaceBackAttr(
                BOOST_GET_CONST(std::vector<float>, attr_iter->second));
            break;
          case phi::AttributeType::STRINGS:
            pt_kernel_context->EmplaceBackAttr(
                BOOST_GET_CONST(std::vector<std::string>, attr_iter->second));
            break;
          default:
            PADDLE_THROW(platform::errors::Unimplemented(
                "Unsupported cast op attribute `%s` when construct "
                "KernelContext in dygraph.",
                attr_names[i]));
2821
        }
2822 2823 2824
      }
    }
  }
2825
  VLOG(4) << "Done attributes";
2826 2827
}

Q
Qiao Longfei 已提交
2828
}  // namespace framework
L
liaogang 已提交
2829
}  // namespace paddle