operator.cc 135.7 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Q
Qiao Longfei 已提交
2 3 4 5 6 7 8 9 10
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
    http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
D
dzhwinter 已提交
11

12 13
#include "paddle/fluid/framework/operator.h"

14
#include <glog/logging.h>
15

P
peizhilin 已提交
16 17
#include <sstream>
#include <string>
18
#include <unordered_set>
19

20
#include "gflags/gflags.h"
21
#include "paddle/fluid/framework/convert_utils.h"
Y
Yi Wang 已提交
22
#include "paddle/fluid/framework/data_transform.h"
23
#include "paddle/fluid/framework/data_type_transform.h"
W
WangXi 已提交
24
#include "paddle/fluid/framework/details/nan_inf_utils.h"
25
#include "paddle/fluid/framework/op_call_stack.h"
26
#include "paddle/fluid/framework/phi_utils.h"
27
#include "paddle/fluid/framework/raw_tensor.h"
28
#include "paddle/fluid/framework/transfer_scope_cache.h"
29
#include "paddle/fluid/framework/unused_var_check.h"
Y
Yi Wang 已提交
30
#include "paddle/fluid/framework/var_type.h"
31
#include "paddle/fluid/operators/isfinite_op.h"
32
#include "paddle/fluid/operators/ops_extra_info.h"
33
#include "paddle/fluid/platform/device/device_wrapper.h"
L
Leo Chen 已提交
34
#include "paddle/fluid/platform/enforce.h"
35
#include "paddle/fluid/platform/profiler.h"
C
chenjian 已提交
36
#include "paddle/fluid/platform/profiler/event_tracing.h"
37
#include "paddle/fluid/platform/profiler/supplement_tracing.h"
38
#include "paddle/phi/common/int_array.h"
39
#include "paddle/phi/common/scalar.h"
40
#include "paddle/phi/core/compat/get_kerneltype_forvar_utils.h"
41
#include "paddle/phi/core/ddim.h"
42
#include "paddle/phi/core/flags.h"
43
#include "paddle/phi/core/kernel_context.h"
44 45
#include "paddle/phi/core/kernel_factory.h"
#include "paddle/phi/ops/compat/signatures.h"
46

47
namespace phi {
48
class DenseTensor;
49
}  // namespace phi
50

51
#ifdef PADDLE_WITH_XPU
52 53
#include "paddle/fluid/platform/device/xpu/xpu_info.h"
#include "paddle/fluid/platform/device/xpu/xpu_op_list.h"
54
#endif
Q
Qiao Longfei 已提交
55

56 57
#ifdef PADDLE_WITH_MKLDNN
#include "paddle/fluid/platform/mkldnn_helper.h"
58
#include "paddle/fluid/platform/mkldnn_op_list.h"
59 60
#endif

61 62 63 64
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
#include "paddle/fluid/platform/device/gpu/gpu_dnn.h"
#endif

D
dzhwinter 已提交
65
DECLARE_bool(benchmark);
66
PHI_DECLARE_bool(check_nan_inf);
67
DECLARE_bool(enable_unused_var_check);
68 69
PHI_DECLARE_bool(run_kp_kernel);
PHI_DECLARE_bool(enable_host_event_recorder_hook);
D
dzhwinter 已提交
70

Q
Qiao Longfei 已提交
71 72 73
namespace paddle {
namespace framework {

74 75 76 77 78 79
std::vector<std::tuple<platform::Place, LibraryType>> kKernelPriority = {
    std::make_tuple(platform::CUDAPlace(0), LibraryType::kCUDNN),
    std::make_tuple(platform::CUDAPlace(0), LibraryType::kPlain),
    std::make_tuple(platform::CPUPlace(), LibraryType::kMKLDNN),
    std::make_tuple(platform::CPUPlace(), LibraryType::kPlain),
};
D
dzhwinter 已提交
80

81
static DDim GetDimsDebug(const Scope& scope,
82
                         const std::string& name,
83
                         bool get_actual_dim = false) {
84
  Variable* var = scope.FindVar(name);
Q
qiaolongfei 已提交
85 86
  if (var == nullptr) {
    return DDim({-1});
Q
Qiao Longfei 已提交
87 88
  }

89 90
  if (var->IsType<phi::DenseTensor>()) {
    const phi::DenseTensor& tensor = var->Get<phi::DenseTensor>();
M
minqiyang 已提交
91
    return tensor.dims();
92
  } else if (var->IsType<phi::SelectedRows>()) {
M
minqiyang 已提交
93
    if (get_actual_dim) {
94
      return var->Get<phi::SelectedRows>().value().dims();
M
minqiyang 已提交
95
    } else {
96
      return var->Get<phi::SelectedRows>().GetCompleteDims();
M
minqiyang 已提交
97
    }
S
Steffy-zxf 已提交
98 99
  } else if (var->IsType<Strings>()) {
    return DDim({static_cast<int64_t>(var->Get<Strings>().size())});
100 101 102 103 104
  } else {
    return DDim({-1});
  }
}

105
static bool VarInited(const Scope& scope, const std::string& name) {
Q
Qiao Longfei 已提交
106 107 108 109 110
  Variable* var = scope.FindVar(name);
  if (var == nullptr) return false;
  return var->IsInitialized();
}

111
static std::string GetDtype(const Scope& scope, const std::string& name) {
D
dzhwinter 已提交
112 113 114 115
  Variable* var = scope.FindVar(name);
  if (var == nullptr) {
    return "";
  }
116

117 118
  if (var->IsType<phi::DenseTensor>()) {
    const phi::DenseTensor& tensor = var->Get<phi::DenseTensor>();
M
minqiyang 已提交
119
    if (UNLIKELY(!tensor.IsInitialized())) {
120 121
      return "";
    }
122
    return DataTypeToString(framework::TransToProtoVarType(tensor.dtype()));
123 124
  } else if (var->IsType<phi::SelectedRows>()) {
    auto tensor = var->Get<phi::SelectedRows>().value();
Q
Qiao Longfei 已提交
125 126 127
    if (UNLIKELY(!tensor.IsInitialized())) {
      return "uninited";
    } else {
128
      return DataTypeToString(framework::TransToProtoVarType(tensor.dtype()));
Q
Qiao Longfei 已提交
129
    }
S
Steffy-zxf 已提交
130 131
  } else if (var->IsType<Strings>()) {
    return "strings";
D
dzhwinter 已提交
132 133 134 135 136
  } else {
    return "";
  }
}

137
static std::string GetPlace(const Scope& scope, const std::string& name) {
L
Leo Chen 已提交
138 139 140 141 142 143 144 145 146 147
  Variable* var = scope.FindVar(name);
  if (var == nullptr) {
    return "";
  }
  auto to_string = [](const platform::Place& p) {
    std::stringstream sstream;
    sstream << p;
    return sstream.str();
  };

148 149
  if (var->IsType<phi::DenseTensor>()) {
    const phi::DenseTensor& tensor = var->Get<phi::DenseTensor>();
L
Leo Chen 已提交
150 151 152 153
    if (UNLIKELY(!tensor.IsInitialized())) {
      return "";
    }
    return to_string(tensor.place());
154 155
  } else if (var->IsType<phi::SelectedRows>()) {
    auto tensor = var->Get<phi::SelectedRows>().value();
L
Leo Chen 已提交
156 157 158 159 160 161 162 163 164 165
    if (UNLIKELY(!tensor.IsInitialized())) {
      return "uninited";
    } else {
      return to_string(tensor.place());
    }
  } else {
    return "";
  }
}

166
static int GetRowSize(const Scope& scope, const std::string& name) {
167 168 169 170 171
  Variable* var = scope.FindVar(name);
  if (var == nullptr) {
    return -1;
  }

172 173
  if (var->IsType<phi::SelectedRows>()) {
    return var->Get<phi::SelectedRows>().rows().size();
174 175 176 177 178
  }

  return -1;
}

179
static LoD GetLoDDebug(const Scope& scope, const std::string& name) {
Q
Qiao Longfei 已提交
180 181 182 183 184 185 186
  Variable* var = scope.FindVar(name);
  auto default_lod = LoD({{}});

  if (var == nullptr) {
    return default_lod;
  }

187 188
  if (var->IsType<phi::DenseTensor>()) {
    const phi::DenseTensor& tensor = var->Get<phi::DenseTensor>();
M
minqiyang 已提交
189
    return tensor.lod();
Q
Qiao Longfei 已提交
190 191 192 193 194
  } else {
    return default_lod;
  }
}

X
Xin Pan 已提交
195 196 197 198 199
RuntimeContext::RuntimeContext(const VariableNameMap& innames,
                               const VariableNameMap& outnames,
                               const Scope& scope) {
  for (auto& var_name_item : innames) {
    std::vector<Variable*>& input_vars = inputs[var_name_item.first];
X
Xin Pan 已提交
200
    input_vars.reserve(var_name_item.second.size());
X
Xin Pan 已提交
201 202 203 204 205 206
    for (auto& var_name : var_name_item.second) {
      input_vars.push_back(scope.FindVar(var_name));
    }
  }
  for (auto& var_name_item : outnames) {
    std::vector<Variable*>& output_vars = outputs[var_name_item.first];
X
Xin Pan 已提交
207
    output_vars.reserve(var_name_item.second.size());
X
Xin Pan 已提交
208 209 210 211 212 213
    for (auto& var_name : var_name_item.second) {
      output_vars.push_back(scope.FindVar(var_name));
    }
  }
}

214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609
RuntimeInferShapeContext::RuntimeInferShapeContext(const OperatorBase& op,
                                                   const RuntimeContext& ctx)
    : op_(op), ctx_(ctx) {}

bool RuntimeInferShapeContext::HasInput(const std::string& name) const {
  // has only one input
  const auto& ins = ctx_.inputs;
  auto it = ins.find(name);
  if (it == ins.end()) {
    return false;
  }
  const auto& in = it->second;
  if (in.size() == 0) return false;
  PADDLE_ENFORCE_EQ(
      in.size(),
      1UL,
      platform::errors::InvalidArgument(
          "Input %s should not contain more than one inputs.", name));
  return in[0] != nullptr;
}

bool RuntimeInferShapeContext::HasOutput(const std::string& name) const {
  // has only one output
  const auto& outs = ctx_.outputs;
  auto it = outs.find(name);
  if (it == outs.end()) {
    return false;
  }
  const auto& out = it->second;
  if (out.size() == 0) {
    return false;
  }
  PADDLE_ENFORCE_EQ(
      out.size(),
      1UL,
      platform::errors::InvalidArgument(
          "Output %s should not contain more than one outputs.", name));
  return out[0] != nullptr;
}

bool RuntimeInferShapeContext::HasAttr(const std::string& name) const {
  return op_.HasAttr(name);
}

bool RuntimeInferShapeContext::HasInputs(const std::string& name) const {
  const auto& ins = ctx_.inputs;
  auto it = ins.find(name);
  if (it == ins.end() || it->second.empty()) {
    return false;
  }
  for (auto& input : it->second) {
    if (input == nullptr) {
      return false;
    }
  }
  return true;
}

bool RuntimeInferShapeContext::HasOutputs(const std::string& name,
                                          bool allow_null) const {
  const auto& outs = ctx_.outputs;
  auto it = outs.find(name);
  if (it == outs.end() || it->second.empty()) {
    return false;
  }
  if (!allow_null) {
    for (auto& output : it->second) {
      if (output == nullptr) return false;
    }
  }
  return true;
}

AttrReader RuntimeInferShapeContext::Attrs() const {
  return AttrReader(op_.Attrs(), op_.RuntimeAttrs());
}

std::vector<std::string> RuntimeInferShapeContext::Inputs(
    const std::string& name) const {
  return op_.Inputs(name);
}

std::vector<std::string> RuntimeInferShapeContext::Outputs(
    const std::string& name) const {
  return op_.Outputs(name);
}

std::string RuntimeInferShapeContext::GetInputNameByIdx(size_t idx) const {
  auto& op_proto =
      paddle::framework::OpInfoMap::Instance().Get(op_.Type()).proto_;
  PADDLE_ENFORCE_LT(idx,
                    op_proto->inputs().size(),
                    platform::errors::OutOfRange(
                        "The index should be less than the size of inputs of "
                        "operator %s, but got index is %d and size is %d",
                        op_.Type(),
                        idx,
                        op_proto->inputs().size()));
  return op_proto->inputs()[idx].name();
}

std::string RuntimeInferShapeContext::GetOutputNameByIdx(size_t idx) const {
  auto& op_proto =
      paddle::framework::OpInfoMap::Instance().Get(op_.Type()).proto_;
  PADDLE_ENFORCE_LT(idx,
                    op_proto->outputs().size(),
                    platform::errors::OutOfRange(
                        "The index should be less than the size of outputs of "
                        "operator %s, but got index is %d and size is %d",
                        op_.Type(),
                        idx,
                        op_proto->outputs().size()));
  return op_proto->outputs()[idx].name();
}

void RuntimeInferShapeContext::ShareDim(const std::string& in,
                                        const std::string& out,
                                        size_t i,
                                        size_t j) {
  auto in_it = ctx_.inputs.find(in);
  auto out_it = ctx_.outputs.find(out);
  PADDLE_ENFORCE_NE(in_it,
                    ctx_.inputs.end(),
                    platform::errors::NotFound("Input %s does not exist.", in));
  PADDLE_ENFORCE_NE(
      out_it,
      ctx_.outputs.end(),
      platform::errors::NotFound("Output %s does not exist.", out));
  PADDLE_ENFORCE_LT(i,
                    in_it->second.size(),
                    platform::errors::InvalidArgument(
                        "The index of input dimension is out of range, "
                        "excepted index less than %zu, but received %zu.",
                        in_it->second.size(),
                        i));
  PADDLE_ENFORCE_LT(j,
                    out_it->second.size(),
                    platform::errors::InvalidArgument(
                        "The index of output dimension is out of range, "
                        "excepted index less than %zu, but received %zu.",
                        out_it->second.size(),
                        j));

  Variable* in_var = in_it->second[i];
  Variable* out_var = out_it->second[j];

  PADDLE_ENFORCE_EQ(
      in_var->Type(),
      out_var->Type(),
      platform::errors::InvalidArgument(
          "The type of input (%s) and output (%s) are inconsistent.", in, out));

  if (in_var->IsType<phi::SelectedRows>()) {
    auto& in_sele_rows = in_var->Get<phi::SelectedRows>();
    auto out_sele_rows = out_var->GetMutable<phi::SelectedRows>();
    out_sele_rows->mutable_value()->Resize(in_sele_rows.value().dims());
    out_sele_rows->set_rows(in_sele_rows.rows());
    out_sele_rows->set_height(in_sele_rows.height());
  } else if (in_var->IsType<phi::DenseTensor>()) {
    auto& in_lod_tensor = in_var->Get<phi::DenseTensor>();
    auto* out_lod_tensor = out_var->GetMutable<phi::DenseTensor>();
    out_lod_tensor->Resize(in_lod_tensor.dims());
  } else {
    PADDLE_THROW(platform::errors::Unimplemented(
        "Currently, the input type of ShareDim only can be phi::DenseTensor "
        "or SelectedRows."));
  }
}

void RuntimeInferShapeContext::ShareAllLoD(const std::string& in,
                                           const std::string& out) const {
  auto in_it = ctx_.inputs.find(in);
  auto out_it = ctx_.outputs.find(out);
  PADDLE_ENFORCE_NE(in_it,
                    ctx_.inputs.end(),
                    platform::errors::NotFound(
                        "Input [%s] found error in Op [%s]", in, op_.Type()));
  PADDLE_ENFORCE_NE(out_it,
                    ctx_.outputs.end(),
                    platform::errors::NotFound(
                        "Output [%s] found error in Op [%s]", out, op_.Type()));

  auto& in_var_list = in_it->second;
  auto& out_var_list = out_it->second;

  PADDLE_ENFORCE_EQ(
      in_var_list.size(),
      out_var_list.size(),
      platform::errors::PreconditionNotMet(
          "Op [%s]: Input var size should be equal with output var size",
          op_.Type()));

  auto& out_var_names = op_.Outputs(out);

  for (size_t i = 0; i < in_var_list.size(); ++i) {
    if (out_var_names[i] == framework::kEmptyVarName) {
      continue;
    }

    Variable* in_var = in_var_list[i];
    if (!in_var->IsType<phi::DenseTensor>()) return;
    Variable* out_var = out_var_list[i];
    PADDLE_ENFORCE_EQ(
        out_var->IsType<phi::DenseTensor>(),
        true,
        platform::errors::PreconditionNotMet(
            "The %d-th output of Output(%s) must be phi::DenseTensor.",
            i,
            out_var_names[i]));
    auto& in_tensor = in_var->Get<phi::DenseTensor>();
    auto* out_tensor = out_var->GetMutable<phi::DenseTensor>();
    out_tensor->set_lod(in_tensor.lod());
#ifdef PADDLE_WITH_MKLDNN
    if (in_tensor.layout() != DataLayout::ONEDNN)
#endif
      out_tensor->set_layout(in_tensor.layout());
  }
}

void RuntimeInferShapeContext::ShareLoD(const std::string& in,
                                        const std::string& out,
                                        size_t i,
                                        size_t j) const {
  if (can_skip_lod_) {
    return;
  }
  auto in_it = ctx_.inputs.find(in);
  auto out_it = ctx_.outputs.find(out);
  PADDLE_ENFORCE_NE(in_it,
                    ctx_.inputs.end(),
                    platform::errors::NotFound("Input %s does not exist.", in));
  PADDLE_ENFORCE_NE(
      out_it,
      ctx_.outputs.end(),
      platform::errors::NotFound("Output %s does not exist.", out));
  PADDLE_ENFORCE_LT(i,
                    in_it->second.size(),
                    platform::errors::InvalidArgument(
                        "The index of input dimension is out of range, "
                        "excepted index less than %zu, but received %zu.",
                        in_it->second.size(),
                        i));
  PADDLE_ENFORCE_LT(j,
                    out_it->second.size(),
                    platform::errors::InvalidArgument(
                        "The index of output dimension is out of range, "
                        "excepted index less than %zu, but received %zu.",
                        out_it->second.size(),
                        j));

  Variable* in_var = in_it->second.at(i);
  if (!in_var->IsType<phi::DenseTensor>()) return;
  Variable* out_var = out_it->second.at(j);
  PADDLE_ENFORCE_EQ(
      out_var->IsType<phi::DenseTensor>(),
      true,
      platform::errors::InvalidArgument(
          "The %zu-th output of Output(%s) must be phi::DenseTensor.", j, out));
  auto& in_tensor = in_var->Get<phi::DenseTensor>();
  auto* out_tensor = out_var->GetMutable<phi::DenseTensor>();
  out_tensor->set_lod(in_tensor.lod());

// TODO(dzhwinter) : reuse ShareLoD in most operators.
// Need to call ShareLayout explicitly in sequence related ops.
// Shall we have a better method to shared info between in/out phi::DenseTensor?
#ifdef PADDLE_WITH_MKLDNN
  // Fix me: ugly workaround below
  // Correct solution:
  //    set_layout() should NOT be called here (i.e. ShareLoD). Instead,
  //    layout of output tensor should be set "manually" in Compute()
  //    of each OPKernel. The reason layout should NOT be shared between
  //    input and output "automatically" (now by InferShape()->ShareLoD())
  //    is that layout transform may occur after InferShape().
  // Workaround:
  //    Skip set_layout() when input layout is kMKLDNN
  //    This is to avoid kMKLDNN is populated wrongly into a non-MKLDNN
  //    OPKernel. In all MKLDNN OPkernel, set_layout(kMKLDNN) should be called
  //    in Compute()
  if (in_tensor.layout() != DataLayout::ONEDNN)
#endif
    out_tensor->set_layout(in_tensor.layout());
}

int32_t RuntimeInferShapeContext::GetLoDLevel(const std::string& in,
                                              size_t i) const {
  PADDLE_THROW(platform::errors::PreconditionNotMet(
      "GetLoDLevel is only used in compile time. The calculation of "
      "output's actual lod is different among operators so that should be "
      "set in the runtime kernel."));
}

void RuntimeInferShapeContext::SetLoDLevel(const std::string& out,
                                           int32_t lod_level,
                                           size_t j) const {
  PADDLE_THROW(platform::errors::PreconditionNotMet(
      "SetLoDLevel is only used in compile time. The calculation of "
      "output's actual lod is different among operators so that should be "
      "set in the runtime kernel."));
}

bool RuntimeInferShapeContext::IsRuntime() const { return true; }

bool RuntimeInferShapeContext::IsRunMKLDNNKernel() const {
  try {
    auto& op_with_kernel = dynamic_cast<const OperatorWithKernel&>(op_);
    return ((op_with_kernel.kernel_type()) &&
            (op_with_kernel.kernel_type()->data_layout_ ==
             phi::DataLayout::ONEDNN));
  } catch (std::bad_cast& exp) {
    return false;
  }
}

// TODO(paddle-dev): Can this be template?
paddle::small_vector<InferShapeVarPtr, phi::kInputSmallVectorSize>
RuntimeInferShapeContext::GetInputVarPtrs(const std::string& name) const {
  const std::vector<Variable*>& vars = InputVars(name);
  paddle::small_vector<InferShapeVarPtr, phi::kInputSmallVectorSize> res;
  res.reserve(vars.size());
  res.insert(res.begin(), vars.begin(), vars.end());
  return res;
}

paddle::small_vector<InferShapeVarPtr, phi::kOutputSmallVectorSize>
RuntimeInferShapeContext::GetOutputVarPtrs(const std::string& name) const {
  const std::vector<Variable*>& vars = OutputVars(name);
  paddle::small_vector<InferShapeVarPtr, phi::kOutputSmallVectorSize> res;
  res.reserve(vars.size());
  res.insert(res.begin(), vars.begin(), vars.end());
  return res;
}

DDim RuntimeInferShapeContext::GetInputDim(const std::string& name) const {
  const std::vector<Variable*>& vars = InputVars(name);
  PADDLE_ENFORCE_EQ(
      vars.size(),
      1UL,
      platform::errors::InvalidArgument(
          "Input(%s) should hold one element, but now it holds %zu elements.",
          name,
          vars.size()));
  return this->GetDim(vars[0]);
}

std::vector<DDim> RuntimeInferShapeContext::GetInputsDim(
    const std::string& name) const {
  const std::vector<Variable*>& vars = InputVars(name);
  return GetDims(vars);
}

proto::VarType::Type RuntimeInferShapeContext::GetInputVarType(
    const std::string& name) const {
  return GetVarType(InputVars(name).at(0));
}

std::vector<proto::VarType::Type> RuntimeInferShapeContext::GetInputsVarType(
    const std::string& name) const {
  return GetVarTypes(InputVars(name));
}

std::vector<proto::VarType::Type> RuntimeInferShapeContext::GetOutputsVarType(
    const std::string& name) const {
  return GetVarTypes(OutputVars(name));
}

void RuntimeInferShapeContext::SetOutputDim(const std::string& name,
                                            const DDim& dim) {
  auto& vars = OutputVars(name);
  PADDLE_ENFORCE_EQ(
      vars.size(),
      1UL,
      platform::errors::InvalidArgument("Output(%s) should hold one element, "
                                        "but now it holds %zu elements.",
                                        name,
                                        vars.size()));
  SetDim(vars[0], dim);
}

void RuntimeInferShapeContext::SetOutputsDim(const std::string& name,
                                             const std::vector<DDim>& dims) {
  auto& vars = OutputVars(name);
  SetDims(vars, dims);
}

const phi::ArgumentMappingFn*
RuntimeInferShapeContext::GetPhiArgumentMappingFn() const {
  return phi::OpUtilsMap::Instance().GetArgumentMappingFn(op_.Type());
}

const phi::KernelSignature*
RuntimeInferShapeContext::GetPhiDefaultKernelSignature() const {
  return &phi::DefaultKernelSignatureMap::Instance().Get(op_.Type());
}

void RuntimeInferShapeContext::SetSkipLoD(bool skip) { can_skip_lod_ = skip; }

P
pangengzheng 已提交
610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637
std::vector<LoD> RuntimeInferShapeContext::GetOutputsLod(
    const std::string& out) const {
  auto out_it = ctx_.outputs.find(out);
  auto& out_var_list = out_it->second;

  std::vector<LoD> ret;
  for (size_t i = 0; i < out_var_list.size(); ++i) {
    Variable* out_var = out_var_list[i];
    if (out_var != nullptr) {
      auto* out_tensor = out_var->GetMutable<phi::DenseTensor>();
      ret.push_back(out_tensor->lod());
    }
  }
  return ret;
}

std::vector<DDim> RuntimeInferShapeContext::GetOutputsDim(
    const std::string& name) const {
  const std::vector<Variable*>& vars = OutputVars(name);
  std::vector<Variable*> vars_res;
  for (auto var : vars) {
    if (var != nullptr) {
      vars_res.push_back(var);
    }
  }
  return GetDims(vars_res);
}

638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747
DDim RuntimeInferShapeContext::GetDim(Variable* var) const {
  PADDLE_ENFORCE_NOT_NULL(
      var, platform::errors::InvalidArgument("Input variable is nullptr."));
  if (var->IsType<phi::DenseTensor>()) {
    return var->Get<phi::DenseTensor>().dims();
  } else if (var->IsType<phi::SelectedRows>()) {
    return var->Get<phi::SelectedRows>().GetCompleteDims();
  } else {
    PADDLE_THROW(platform::errors::InvalidArgument(
        "Only phi::DenseTensor or SelectedRows support 'GetDim', but input "
        "Variable's type is %s.",
        ToTypeName(var->Type())));
  }
}

std::vector<DDim> RuntimeInferShapeContext::GetDims(
    const std::vector<Variable*>& vars) const {
  std::vector<DDim> ret;
  ret.reserve(vars.size());
  std::transform(
      vars.begin(), vars.end(), std::back_inserter(ret), [this](Variable* var) {
        return this->GetDim(var);
      });
  return ret;
}

std::vector<DDim> RuntimeInferShapeContext::GetRepeatedDims(
    const std::string& name) const {
  PADDLE_THROW(platform::errors::PreconditionNotMet(
      "GetRepeatedDims method only ban be used in compile time."));
}

void RuntimeInferShapeContext::SetDim(Variable* var, const DDim& dim) {
  if (var->IsType<phi::DenseTensor>()) {
    var->GetMutable<phi::DenseTensor>()->Resize(dim);
  } else if (var->IsType<phi::SelectedRows>()) {
    var->GetMutable<phi::SelectedRows>()->set_height(dim[0]);
  } else {
    PADDLE_THROW(platform::errors::Unimplemented(
        "Variable type error, expect phi::DenseTensor or SelectedRows, but "
        "received "
        "(%s).",
        ToTypeName(var->Type())));
  }
}

void RuntimeInferShapeContext::SetDims(const std::vector<Variable*>& vars,
                                       const std::vector<DDim>& dims) {
  size_t length = vars.size();
  PADDLE_ENFORCE_EQ(length,
                    dims.size(),
                    platform::errors::InvalidArgument(
                        "The number of input variables do not match the "
                        "number of input dimensions, the number of variables "
                        "is %zu, the number of dimensions is %zu.",
                        length,
                        dims.size()));
  for (size_t i = 0; i < length; ++i) {
    if (vars[i] == nullptr) {
      continue;
    }
    SetDim(vars[i], dims[i]);
  }
}

void RuntimeInferShapeContext::SetRepeatedDims(const std::string& name,
                                               const std::vector<DDim>& dims) {
  PADDLE_THROW(platform::errors::PreconditionNotMet(
      "SetRepeatedDims method only can be used in compile time."));
}

std::vector<proto::VarType::Type> RuntimeInferShapeContext::GetVarTypes(
    const std::vector<Variable*>& vars) const {
  std::vector<proto::VarType::Type> retv;
  retv.resize(vars.size());
  std::transform(vars.begin(),
                 vars.end(),
                 retv.begin(),
                 std::bind(std::mem_fn(&RuntimeInferShapeContext::GetVarType),
                           this,
                           std::placeholders::_1));
  return retv;
}

proto::VarType::Type RuntimeInferShapeContext::GetVarType(Variable* var) const {
  return ToVarType(var->Type());
}

const std::vector<Variable*>& RuntimeInferShapeContext::InputVars(
    const std::string& name) const {
  auto it = ctx_.inputs.find(name);
  PADDLE_ENFORCE_NE(
      it,
      ctx_.inputs.end(),
      platform::errors::NotFound(
          "Operator (%s) does not have the input (%s).", op_.Type(), name));
  return it->second;
}

const std::vector<Variable*>& RuntimeInferShapeContext::OutputVars(
    const std::string& name) const {
  auto it = ctx_.outputs.find(name);
  PADDLE_ENFORCE_NE(
      it,
      ctx_.outputs.end(),
      platform::errors::NotFound(
          "Operator (%s) does not have the outputs (%s).", op_.Type(), name));
  return it->second;
}

748
void OperatorBase::Run(const Scope& scope, const platform::Place& place) {
P
peizhilin 已提交
749 750 751
  try {
    VLOG(4) << place << " " << DebugStringEx(&scope);
    if (platform::is_gpu_place(place)) {
752
#if !defined(PADDLE_WITH_CUDA) && !defined(PADDLE_WITH_HIP)
753 754 755 756
      PADDLE_THROW(platform::errors::Unavailable(
          "Cannot run operator on place %s, please recompile paddle or "
          "reinstall Paddle with CUDA support.",
          place));
757
#else
758
      auto dev_id = place.device;
P
peizhilin 已提交
759
      platform::SetDeviceId(dev_id);
760 761 762
#endif
    } else if (platform::is_xpu_place(place)) {
#ifndef PADDLE_WITH_XPU
763 764 765 766
      PADDLE_THROW(platform::errors::Unavailable(
          "Cannot run operator on place %s, please recompile paddle or "
          "reinstall Paddle with XPU support.",
          place));
767
#else
768
      auto dev_id = place.device;
769
      platform::SetXPUDeviceId(dev_id);
770 771 772 773 774 775 776 777
#endif
    } else if (platform::is_custom_place(place)) {
#ifndef PADDLE_WITH_CUSTOM_DEVICE
      PADDLE_THROW(platform::errors::Unavailable(
          "Cannot run operator on place %s, please recompile paddle or "
          "reinstall Paddle with CustomDevice support.",
          place));
#else
778
      phi::DeviceManager::SetDevice(place);
779
#endif
P
peizhilin 已提交
780
    }
P
peizhilin 已提交
781

782
    {
783 784 785
      // TODO(wangchaochaohu) : refine code to use only one RecordEvent)
      // in order to record different op type cost time
      // and different op name cost time,we set two event.
C
chenjian 已提交
786
      platform::RecordEvent op_type_record_event(
C
chenjian 已提交
787
          Type(), platform::TracerEventType::Operator, 1);
C
chenjian 已提交
788 789
      auto op_name = platform::OpName(outputs_, Type());
      platform::RecordEvent op_name_record_event(
790 791
          op_name,
          platform::TracerEventType::Operator,
C
chenjian 已提交
792
          FLAGS_enable_host_event_recorder_hook ? 20 : 1,
C
chenjian 已提交
793
          platform::EventRole::kUniqueOp);
P
peizhilin 已提交
794 795
      RunImpl(scope, place);
    }
796

Z
Zhang Ting 已提交
797
    VLOG(3) << GetExecutionPlace(place) << " " << DebugStringEx(&scope);
798
  } catch (platform::EnforceNotMet& exception) {
799
    framework::InsertCallStackInfo(Type(), Attrs(), &exception);
800
    throw std::move(exception);
801 802 803 804 805 806
  } catch (platform::EOFException&) {
    std::rethrow_exception(std::current_exception());
  } catch (std::exception& ex) {
    LOG(WARNING) << Type() << " raises an exception "
                 << platform::demangle(typeid(ex).name()) << ", " << ex.what();
    std::rethrow_exception(std::current_exception());
P
peizhilin 已提交
807
  } catch (...) {
808
    LOG(WARNING) << Type() << " raises an unknown exception";
P
peizhilin 已提交
809
    std::rethrow_exception(std::current_exception());
810
  }
811 812
}

813
bool OperatorBase::HasInputs(const std::string& name) const {
M
minqiyang 已提交
814
  return inputs_.find(name) != inputs_.end();
815 816
}

817
std::string OperatorBase::Input(const std::string& name) const {
Y
Yu Yang 已提交
818
  auto& ins = Inputs(name);
819
  PADDLE_ENFORCE_LE(
820 821
      ins.size(),
      1UL,
822
      platform::errors::InvalidArgument(
823 824
          "Operator %s's input %s should contain only one variable.",
          type_,
825
          name));
Y
Yu Yang 已提交
826
  return ins.empty() ? kEmptyVarName : ins[0];
Y
Yan Chunwei 已提交
827 828
}

Y
Yu Yang 已提交
829 830
const std::vector<std::string>& OperatorBase::Inputs(
    const std::string& name) const {
Y
Yu Yang 已提交
831
  auto it = inputs_.find(name);
832
  PADDLE_ENFORCE_NE(
833 834 835 836
      it,
      inputs_.end(),
      platform::errors::NotFound(
          "Operator %s does not have the input %s.", type_, name));
Y
Yu Yang 已提交
837
  return it->second;
Y
Yan Chunwei 已提交
838 839
}

840
bool OperatorBase::HasOutputs(const std::string& name) const {
841
  if (outputs_.find(name) != outputs_.end()) {
842 843 844 845 846 847
    return true;
  } else {
    return false;
  }
}

848
std::string OperatorBase::Output(const std::string& name) const {
Y
Yu Yang 已提交
849
  auto& outs = Outputs(name);
850
  PADDLE_ENFORCE_LE(
851 852
      outs.size(),
      1UL,
853
      platform::errors::InvalidArgument(
854 855
          "Operator %s's output %s should contain only one variable.",
          type_,
856
          name));
Y
Yu Yang 已提交
857
  return outs.empty() ? kEmptyVarName : outs[0];
Y
Yan Chunwei 已提交
858 859
}

Y
Yu Yang 已提交
860 861
const std::vector<std::string>& OperatorBase::Outputs(
    const std::string& name) const {
Y
Yu Yang 已提交
862
  auto it = outputs_.find(name);
863
  PADDLE_ENFORCE_NE(
864 865
      it,
      outputs_.end(),
866 867
      platform::errors::NotFound(
          "Operator %s does not have an output called %s.", type_, name));
Y
Yu Yang 已提交
868
  return it->second;
Y
Yan Chunwei 已提交
869 870
}

871
std::string OperatorBase::DebugStringEx(const Scope* scope) const {
Q
Qiao Longfei 已提交
872
  std::stringstream ss;
Y
Yu Yang 已提交
873
  ss << "Op(" << type_ << "), inputs:{";
874

875
  const std::unordered_set<std::string>* no_need_buffer_vars = nullptr;
876 877
  if (info_ && info_->NoNeedBufferVarsInferer()) {
    no_need_buffer_vars =
878 879
        &(Info().NoNeedBufferVarsInferer()(Inputs(), Outputs(), Attrs()));
    if (no_need_buffer_vars->empty()) no_need_buffer_vars = nullptr;
880 881
  }

Y
Yu Yang 已提交
882 883
  for (auto it = inputs_.begin(); it != inputs_.end();) {
    auto& input = *it;
884 885
    bool is_no_need_buffer_var =
        (no_need_buffer_vars && no_need_buffer_vars->count(input.first) > 0);
Y
Yu Yang 已提交
886 887
    ss << input.first << "[";
    for (size_t i = 0; i < input.second.size(); ++i) {
Q
Qiao Longfei 已提交
888 889
      auto var_name = input.second[i];
      ss << var_name;
890
      if (scope) {
Q
Qiao Longfei 已提交
891 892 893 894 895 896 897
        if (!VarInited(*scope, var_name)) {
          ss << "[uninited]";
        } else {
          int row_size = GetRowSize(*scope, var_name);
          if (row_size >= 0) {
            ss << "[row_size=" << row_size << "]";
          }
898 899 900
          std::string dtype = is_no_need_buffer_var
                                  ? "unknown_dtype"
                                  : GetDtype(*scope, var_name);
901 902 903
          std::string place = is_no_need_buffer_var
                                  ? "unknown_place"
                                  : GetPlace(*scope, var_name);
Q
Qiao Longfei 已提交
904
          ss << ":" << dtype;
905 906
          ss << "[" << GetDimsDebug(*scope, var_name, true) << "]";
          ss << "(" << GetLoDDebug(*scope, var_name) << ")";
907
          ss << "(" << place << ")";
908
        }
909
      }
Y
Yu Yang 已提交
910 911 912
      if (i != input.second.size() - 1) {
        ss << ", ";
      }
913
    }
Y
Yu Yang 已提交
914
    ss << "]";
Y
Yu Yang 已提交
915 916
    ++it;
    if (it != inputs_.end()) {
917 918
      ss << ", ";
    }
Q
Qiao Longfei 已提交
919
  }
Y
Yu Yang 已提交
920
  ss << "}, outputs:{";
Y
Yu Yang 已提交
921 922
  for (auto it = outputs_.begin(); it != outputs_.end();) {
    auto& output = *it;
Y
Yu Yang 已提交
923 924
    ss << output.first << "[";
    for (size_t i = 0; i < output.second.size(); ++i) {
Q
Qiao Longfei 已提交
925 926
      auto var_name = output.second[i];
      ss << var_name;
927
      if (scope) {
Q
Qiao Longfei 已提交
928 929 930 931 932 933 934
        if (!VarInited(*scope, var_name)) {
          ss << "[uninited]";
        } else {
          int row_size = GetRowSize(*scope, output.second[i]);
          if (row_size >= 0) {
            ss << "[row_size=" << row_size << "]";
          }
C
chengduo 已提交
935 936
          std::string dtype = GetDtype(*scope, output.second[i]);
          ss << ":" << dtype;
937 938
          ss << "[" << GetDimsDebug(*scope, var_name, true) << "]";
          ss << "(" << GetLoDDebug(*scope, var_name) << ")";
L
Leo Chen 已提交
939
          ss << "(" << GetPlace(*scope, var_name) << ")";
940
        }
941
      }
Y
Yu Yang 已提交
942 943 944
      if (i != output.second.size() - 1) {
        ss << ", ";
      }
945
    }
Y
Yu Yang 已提交
946
    ss << "]";
Y
Yu Yang 已提交
947 948
    ++it;
    if (it != outputs_.end()) {
949 950
      ss << ", ";
    }
Q
Qiao Longfei 已提交
951
  }
Y
Yu Yang 已提交
952
  ss << "}.";
Q
Qiao Longfei 已提交
953 954 955
  return ss.str();
}

Y
Yu Yang 已提交
956
OperatorBase::OperatorBase(const std::string& type,
Y
Yu Yang 已提交
957 958
                           const VariableNameMap& inputs,
                           const VariableNameMap& outputs,
Y
Yu Yang 已提交
959
                           const AttributeMap& attrs)
S
sneaxiy 已提交
960 961 962 963 964 965
    : type_(type),
      inputs_(inputs),
      outputs_(outputs),
      attrs_(attrs),
      // NOTE(zjl): why op_info may be nullptr?
      info_(OpInfoMap::Instance().GetNullable(type)) {
H
hong 已提交
966 967 968 969 970 971 972 973
  // In dygraph mode, all the OperatorBase will be constructed by function:
  // framework::OpRegistry::CreateOp(type, {}, {}, {}, false).
  // Inputs, outputs and attrs will be set to empty map
  // to improve the execution efficiency of dygraph.
  if (inputs_.size() > 0 || outputs_.size() > 0) {
    GenerateTemporaryNames();
    CheckAllInputOutputSet();
  }
974 975 976 977 978

  // canonicalize attrs
  if (info_ && info_->proto_) {
    CanonicalizeScalarAttrs(*info_->proto_, &attrs_);
  }
979
  // In OperatorBase level, all attributes with VarDesc type will be considered
980 981 982 983 984 985
  // as Input.
  for (auto& attr : FilterAttrVar(attrs)) {
    VLOG(3) << "found Attribute with Variable type: " << attr.first;
    inputs_[attr.first] = std::move(AttrVarNames(attr.second));
    attrs_.erase(attr.first);
  }
Y
Yu Yang 已提交
986
}
987

Q
qijun 已提交
988 989
std::vector<std::string> OperatorBase::InputVars() const {
  std::vector<std::string> ret_val;
Y
Yu Yang 已提交
990
  for (auto& o : inputs_) {
Q
qijun 已提交
991 992 993 994 995 996
    ret_val.reserve(ret_val.size() + o.second.size());
    ret_val.insert(ret_val.end(), o.second.begin(), o.second.end());
  }
  return ret_val;
}

Y
Yu Yang 已提交
997 998 999 1000 1001 1002 1003 1004 1005 1006
std::vector<std::string> OperatorBase::OutputVars(bool has_intermediate) const {
  std::vector<std::string> ret_val;
  if (has_intermediate) {
    // push all outputs into ret_val
    for (auto& o : outputs_) {
      ret_val.reserve(ret_val.size() + o.second.size());
      ret_val.insert(ret_val.end(), o.second.begin(), o.second.end());
    }
    return ret_val;
  }
S
sneaxiy 已提交
1007
  auto& info = Info();
Y
Yu Yang 已提交
1008 1009

  // get all OpProto::Var for outputs
Y
Yu Yang 已提交
1010
  for (auto& o : info.Proto().outputs()) {
Y
Yu Yang 已提交
1011 1012 1013 1014 1015 1016 1017 1018 1019
    // ignore all intermediate output
    if (o.intermediate()) continue;
    auto out = outputs_.find(o.name());
    if (out != outputs_.end()) {
      ret_val.reserve(ret_val.size() + out->second.size());
      ret_val.insert(ret_val.end(), out->second.begin(), out->second.end());
    }
  }
  return ret_val;
D
dongzhihong 已提交
1020 1021
}

1022
void OperatorBase::CheckAllInputOutputSet() const {
S
sneaxiy 已提交
1023
  if (info_ == nullptr || info_->proto_ == nullptr) return;
1024

S
sneaxiy 已提交
1025
  for (auto& in : info_->Proto().inputs()) {
1026
    if (!in.dispensable() && !in.extra()) {
1027
      PADDLE_ENFORCE_NE(
1028 1029 1030 1031
          inputs_.find(in.name()),
          inputs_.end(),
          platform::errors::NotFound(
              "Operator %s's input (%s) is not set.", Type(), in.name()));
1032
    }
1033 1034
  }

S
sneaxiy 已提交
1035
  for (auto& out : info_->Proto().outputs()) {
1036
    if (!out.dispensable() && !out.extra() && !out.intermediate()) {
1037
      PADDLE_ENFORCE_NE(
1038 1039 1040 1041
          outputs_.find(out.name()),
          outputs_.end(),
          platform::errors::NotFound(
              "Operator %s's output (%s) is not set.", Type(), out.name()));
1042
    }
1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057
  }
}

void OperatorBase::GenerateTemporaryNames() {
  static std::atomic<size_t> gUniqId(0UL);
  for (auto& output : outputs_) {
    for (auto& output_name : output.second) {
      if (output_name == kTempVarName) {
        output_name += type_;
        output_name += "@";
        output_name += std::to_string(gUniqId.fetch_add(1));
      }
    }
  }
}
1058

1059 1060
const phi::DenseTensor* GetLoDTensorOrSelectedRowsValueFromVar(
    const Variable& var) {
1061 1062
  if (var.IsType<phi::DenseTensor>()) {
    return static_cast<const phi::DenseTensor*>(&(var.Get<phi::DenseTensor>()));
1063 1064
  } else if (var.IsType<phi::SelectedRows>()) {
    return &(var.Get<phi::SelectedRows>().value());
Q
QI JUN 已提交
1065
  } else {
1066
    PADDLE_THROW(platform::errors::InvalidArgument(
1067
        "Variable type is %s, expect phi::DenseTensor or SelectedRows.",
1068
        ToTypeName(var.Type())));
Q
QI JUN 已提交
1069 1070 1071
  }
}

1072
phi::DenseTensor* GetMutableLoDTensorOrSelectedRowsValueFromVar(Variable* var) {
1073 1074
  if (var->IsType<phi::DenseTensor>()) {
    return var->GetMutable<phi::DenseTensor>();
1075 1076
  } else if (var->IsType<phi::SelectedRows>()) {
    return var->GetMutable<phi::SelectedRows>()->mutable_value();
Q
QI JUN 已提交
1077
  } else {
1078
    PADDLE_THROW(platform::errors::InvalidArgument(
1079
        "Variable type is %s, expect phi::DenseTensor or SelectedRows.",
1080
        ToTypeName(var->Type())));
Q
QI JUN 已提交
1081 1082 1083
  }
}

1084 1085 1086 1087 1088 1089 1090 1091
OperatorWithKernel::OperatorWithKernel(const std::string& type,
                                       const VariableNameMap& inputs,
                                       const VariableNameMap& outputs,
                                       const AttributeMap& attrs)
    : OperatorBase(type, inputs, outputs, attrs) {}

OperatorWithKernel::~OperatorWithKernel() = default;

1092
bool ExecutionContext::HasInput(const std::string& name) const {
1093
  auto* var = InputVar(name);
1094 1095 1096
  return var != nullptr;
}

1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110
bool ExecutionContext::HasInputs(const std::string& name) const {
  const auto& ins = ctx_.inputs;
  auto it = ins.find(name);
  if (it == ins.end() || it->second.empty()) {
    return false;
  }
  for (const auto* input : it->second) {
    if (input == nullptr) {
      return false;
    }
  }
  return true;
}

1111
bool ExecutionContext::HasOutput(const std::string& name) const {
1112
  auto* var = OutputVar(name);
1113 1114 1115
  return var != nullptr;
}

X
Xin Pan 已提交
1116
const Variable* ExecutionContext::InputVar(const std::string& name) const {
1117 1118
  LogVarUsageIfUnusedVarCheckEnabled(name);

X
Xin Pan 已提交
1119 1120 1121
  auto it = ctx_.inputs.find(name);
  if (it == ctx_.inputs.end()) return nullptr;

1122
  PADDLE_ENFORCE_LE(
1123 1124
      it->second.size(),
      1UL,
1125
      platform::errors::InvalidArgument(
1126
          "Operator %s's input %s should contain only one variable.",
1127 1128
          op_.Type(),
          name));
X
Xin Pan 已提交
1129 1130 1131
  return it->second.empty() ? nullptr : it->second[0];
}

X
clean  
Xin Pan 已提交
1132
Variable* ExecutionContext::OutputVar(const std::string& name) const {
X
Xin Pan 已提交
1133 1134 1135
  auto it = ctx_.outputs.find(name);
  if (it == ctx_.outputs.end()) return nullptr;

1136
  PADDLE_ENFORCE_LE(
1137 1138
      it->second.size(),
      1UL,
1139 1140
      platform::errors::InvalidArgument(
          "Operator %s's output %s should contain only one variable.",
1141 1142
          op_.Type(),
          name));
X
Xin Pan 已提交
1143 1144 1145
  return it->second.empty() ? nullptr : it->second[0];
}

1146
template <>
1147 1148
const std::vector<const phi::DenseTensor*>
ExecutionContext::MultiInput<phi::DenseTensor>(const std::string& name) const {
1149 1150
  LogVarUsageIfUnusedVarCheckEnabled(name);

H
hong 已提交
1151 1152
  auto vars = MultiInputVar(name);
  if (vars.size() == 0) {
X
Xin Pan 已提交
1153 1154
    return {};
  }
1155
  std::vector<const phi::DenseTensor*> res;
X
Xin Pan 已提交
1156
  res.reserve(vars.size());
1157 1158 1159
  std::transform(vars.begin(),
                 vars.end(),
                 std::back_inserter(res),
1160
                 [&](const Variable* var) -> const phi::DenseTensor* {
X
Xin Pan 已提交
1161
                   if (var == nullptr) return nullptr;
1162 1163 1164 1165 1166 1167 1168 1169
                   PADDLE_ENFORCE_EQ(
                       var->IsType<phi::DenseTensor>(),
                       true,
                       platform::errors::InvalidArgument(
                           "Input variable should be phi::DenseTensor, "
                           "but the received type is %s.",
                           ToTypeName(var->Type())));
                   return &(var->Get<phi::DenseTensor>());
X
Xin Pan 已提交
1170 1171 1172 1173
                 });
  return res;
}

1174
template <>
1175
std::vector<phi::DenseTensor*> ExecutionContext::MultiOutput<phi::DenseTensor>(
1176
    const std::string& name) const {
H
hong 已提交
1177 1178 1179
  auto vars = MultiOutputVar(name);

  if (vars.size() == 0) {
1180 1181
    return {};
  }
1182
  std::vector<phi::DenseTensor*> res;
1183
  res.reserve(vars.size());
1184 1185 1186
  std::transform(vars.begin(),
                 vars.end(),
                 std::back_inserter(res),
1187
                 [&](Variable* var) -> phi::DenseTensor* {
1188
                   return var == nullptr ? nullptr
1189
                                         : var->GetMutable<phi::DenseTensor>();
1190
                 });
1191 1192 1193
  return res;
}

Y
Yu Yang 已提交
1194
bool OpSupportGPU(const std::string& op_type) {
H
hong 已提交
1195
  // check in new Function kernel first
1196
  bool has_phi_kernel = false;
1197
  auto& kernel_factory = phi::KernelFactory::Instance();
H
hong 已提交
1198
  auto kernel_key_map =
1199
      kernel_factory.SelectKernelMap(phi::TransToPhiKernelName(op_type));
H
hong 已提交
1200
  for (auto& kernel : kernel_key_map) {
1201
    has_phi_kernel = true;
1202
    if (platform::is_gpu_place(phi::TransToPhiPlace(kernel.first.backend()))) {
H
hong 已提交
1203 1204 1205 1206
      return true;
    }
  }

Y
Yu Yang 已提交
1207 1208
  auto& all_kernels = OperatorWithKernel::AllOpKernels();
  auto it = all_kernels.find(op_type);
1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221
  if (it != all_kernels.end()) {
    for (auto& kern_pair : it->second) {
      if (platform::is_gpu_place(kern_pair.first.place_)) {
        return true;
      }
    }
  } else {
    if (has_phi_kernel) {
      // if has phi kernel, but not find phi gpu kernel and fluid gpu kernel,
      // this op doesn't support GPU
      return false;
    } else {
      // All control operator must support GPU
Y
Yu Yang 已提交
1222 1223 1224
      return true;
    }
  }
H
hong 已提交
1225

Y
Yu Yang 已提交
1226 1227 1228
  return false;
}

1229
struct OperatorWithKernel::CacheImpl {
1230
  static const char kNotAllowInferShapeCahce[];
1231
  explicit CacheImpl(phi::KernelContext* kernel_ctx,
1232 1233 1234 1235 1236 1237 1238
                     RuntimeInferShapeContext* infer_shape_ctx,
                     const std::vector<phi::DenseTensor*>& tensors,
                     bool not_allow_infer_shape_cache)
      : kernel_ctx_(kernel_ctx),
        infer_shape_ctx_(infer_shape_ctx),
        tensors_(tensors),
        not_allow_infer_shape_cache_(not_allow_infer_shape_cache) {}
1239 1240 1241 1242 1243 1244

  phi::KernelContext* getKernelContext() { return kernel_ctx_.get(); }
  RuntimeInferShapeContext* getRuntimeInferShapeContext() {
    return infer_shape_ctx_.get();
  }

1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268
  bool NeedInferShape() {
    if (not_allow_infer_shape_cache_) return true;

    bool ret{false};
    if (last_ddims_.empty() || tensors_.empty()) ret = true;
    if (!ret) {
      CHECK_EQ(last_ddims_.size(), tensors_.size());
      for (size_t i = 0; i < last_ddims_.size(); ++i) {
        if (tensors_[i]->dims() != last_ddims_[i]) {
          ret = true;
          break;
        }
      }
    }
    if (ret) {
      last_ddims_.resize(tensors_.size());
      for (size_t i = 0; i < last_ddims_.size(); ++i) {
        last_ddims_[i] = tensors_[i]->dims();
      }
    }
    VLOG(3) << "need infer shape is " << ret;
    return ret;
  }

1269 1270 1271
 private:
  std::unique_ptr<phi::KernelContext> kernel_ctx_;
  std::unique_ptr<RuntimeInferShapeContext> infer_shape_ctx_;
1272 1273 1274
  std::vector<phi::DenseTensor*> tensors_;
  bool not_allow_infer_shape_cache_;
  std::vector<phi::DDim> last_ddims_;
1275
};
1276 1277
const char OperatorWithKernel::CacheImpl::kNotAllowInferShapeCahce[] =
    "@NOT_ALLOW_INFERSHAPE_CACHE@";
1278

1279 1280
static void CheckTensorNANOrInf(const std::string& op_type,
                                const std::string& name,
1281
                                const phi::DenseTensor& tensor) {
C
chengduoZH 已提交
1282 1283 1284
  if (tensor.memory_size() == 0) {
    return;
  }
1285 1286
  if (framework::TransToProtoVarType(tensor.dtype()) != proto::VarType::FP32 &&
      framework::TransToProtoVarType(tensor.dtype()) != proto::VarType::FP64) {
C
chengduoZH 已提交
1287 1288
    return;
  }
1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300
  PADDLE_ENFORCE_NE(framework::TensorContainsInf(tensor),
                    true,
                    platform::errors::Fatal(
                        "Operator %s output phi::DenseTensor %s contains Inf.",
                        op_type,
                        name));
  PADDLE_ENFORCE_NE(framework::TensorContainsNAN(tensor),
                    true,
                    platform::errors::Fatal(
                        "Operator %s output phi::DenseTensor %s contains NAN.",
                        op_type,
                        name));
C
chengduoZH 已提交
1301 1302
}

1303 1304 1305 1306
bool OperatorWithKernel::SupportGPU() const {
  auto phi_kernels = phi::KernelFactory::Instance().SelectKernelMap(
      phi::TransToPhiKernelName(type_));
  auto has_phi_kernel =
1307 1308
      std::any_of(phi_kernels.begin(),
                  phi_kernels.end(),
1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320
                  [](phi::KernelKeyMap::const_reference kern_pair) {
                    return kern_pair.first.backend() == phi::Backend::GPU;
                  });
  if (has_phi_kernel) {
    return true;
  } else {
    auto kernel_iter = OperatorWithKernel::AllOpKernels().find(type_);
    if (kernel_iter == OperatorWithKernel::AllOpKernels().end()) {
      return false;
    } else {
      auto& op_kernels = kernel_iter->second;
      return std::any_of(
1321 1322
          op_kernels.begin(),
          op_kernels.end(),
1323 1324 1325 1326 1327 1328 1329
          [](OpKernelMap::const_reference kern_pair) {
            return platform::is_gpu_place(kern_pair.first.place_);
          });
    }
  }
}

1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352
bool OperatorWithKernel::SupportXPU() const {
#ifdef PADDLE_WITH_XPU
  auto phi_kernels = phi::KernelFactory::Instance().SelectKernelMap(
      phi::TransToPhiKernelName(type_));
  auto has_phi_kernel =
      std::any_of(phi_kernels.begin(),
                  phi_kernels.end(),
                  [](phi::KernelKeyMap::const_reference kern_pair) {
                    return kern_pair.first.backend() == phi::Backend::XPU;
                  });
  if (has_phi_kernel) {
    return true;
  } else {
    auto kernel_iter = OperatorWithKernel::AllOpKernels().find(type_);
    if (kernel_iter == OperatorWithKernel::AllOpKernels().end()) {
      return false;
    } else {
      auto& op_kernels = kernel_iter->second;
      return std::any_of(
          op_kernels.begin(),
          op_kernels.end(),
          [this](OpKernelMap::const_reference kern_pair) {
            return platform::is_xpu_place(kern_pair.first.place_) &&
Q
QingshuChen 已提交
1353 1354 1355 1356
                   paddle::platform::is_xpu_support_op(
                       type_,
                       framework::TransToPhiDataType(
                           kern_pair.first.data_type_));
1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367
          });
    }
  }
#else
  PADDLE_THROW(platform::errors::PreconditionNotMet(
      "should not call OperatorWithKernel::SupportXPU() when not compiled with "
      "XPU support."));
  return false;
#endif
}

1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403
bool OperatorWithKernel::SupportCustomDevice() const {
#ifdef PADDLE_WITH_CUSTOM_DEVICE
  auto phi_kernels = phi::KernelFactory::Instance().SelectKernelMap(
      phi::TransToPhiKernelName(type_));
  auto has_phi_kernel =
      std::any_of(phi_kernels.begin(),
                  phi_kernels.end(),
                  [](phi::KernelKeyMap::const_reference kern_pair) {
                    return platform::is_custom_place(
                        phi::TransToPhiPlace(kern_pair.first.backend()));
                  });
  if (has_phi_kernel) {
    return true;
  } else {
    auto kernel_iter = OperatorWithKernel::AllOpKernels().find(type_);
    if (kernel_iter == OperatorWithKernel::AllOpKernels().end()) {
      return false;
    } else {
      auto& op_kernels = kernel_iter->second;
      return std::any_of(
          op_kernels.begin(),
          op_kernels.end(),
          [this](OpKernelMap::const_reference kern_pair) {
            return platform::is_custom_place(kern_pair.first.place_);
          });
    }
  }
#else
  PADDLE_THROW(platform::errors::PreconditionNotMet(
      "should not call OperatorWithKernel::SupportCustomDevice() when not "
      "compiled with "
      "CustomDevice support."));
  return false;
#endif
}

1404
bool OperatorWithKernel::SupportsMKLDNN(const phi::DataType data_type) const {
1405 1406 1407 1408 1409
  auto phi_kernels = phi::KernelFactory::Instance().SelectKernelMap(
      phi::TransToPhiKernelName(type_));
  auto has_phi_kernel =
      std::any_of(phi_kernels.begin(),
                  phi_kernels.end(),
Y
YuanRisheng 已提交
1410 1411
                  [data_type](phi::KernelKeyMap::const_reference kern_pair) {
                    return kern_pair.first.backend() == phi::Backend::ONEDNN &&
1412
                           kern_pair.first.dtype() == data_type;
1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427
                  });
  if (has_phi_kernel) {
    return true;
  } else {
    auto op_kernel_iter = OperatorWithKernel::AllOpKernels().find(type_);
    if (op_kernel_iter == OperatorWithKernel::AllOpKernels().end()) {
      return false;
    } else {
      auto& op_kernels = op_kernel_iter->second;
      return std::any_of(
          op_kernels.begin(),
          op_kernels.end(),
          [data_type](OpKernelMap::const_reference kern_pair) {
            return platform::is_cpu_place(kern_pair.first.place_) &&
                   kern_pair.first.library_type_ == LibraryType::kMKLDNN &&
1428 1429
                   kern_pair.first.data_type_ ==
                       paddle::framework::TransToProtoVarType(data_type);
1430 1431
          });
    }
1432
  }
1433 1434
}

1435
bool OperatorWithKernel::SupportsCUDNN(const phi::DataType data_type) const {
1436 1437
  auto phi_kernels = phi::KernelFactory::Instance().SelectKernelMap(
      phi::TransToPhiKernelName(type_));
1438 1439 1440 1441 1442 1443 1444
  auto has_phi_kernel =
      std::any_of(phi_kernels.begin(),
                  phi_kernels.end(),
                  [data_type](phi::KernelKeyMap::const_reference kern_pair) {
                    return kern_pair.first.backend() == phi::Backend::GPUDNN &&
                           kern_pair.first.dtype() == data_type;
                  });
1445 1446 1447 1448 1449 1450 1451 1452
  if (has_phi_kernel) {
    return true;
  } else {
    auto op_kernel_iter = OperatorWithKernel::AllOpKernels().find(type_);
    if (op_kernel_iter == OperatorWithKernel::AllOpKernels().end()) {
      return false;
    } else {
      auto& op_kernels = op_kernel_iter->second;
1453 1454
      proto::VarType::Type fluid_data_type =
          framework::TransToProtoVarType(data_type);
1455 1456 1457
      return std::any_of(
          op_kernels.begin(),
          op_kernels.end(),
1458
          [fluid_data_type](OpKernelMap::const_reference kern_pair) {
1459 1460
            return platform::is_gpu_place(kern_pair.first.place_) &&
                   kern_pair.first.library_type_ == LibraryType::kCUDNN &&
1461
                   kern_pair.first.data_type_ == fluid_data_type;
1462 1463 1464 1465 1466
          });
    }
  }
}

1467
bool OperatorWithKernel::SupportsKernelType(
1468
    const OpKernelType& kernel_type, const ExecutionContext& exe_ctx) const {
1469 1470
  auto& all_op_kernels = AllOpKernels();
  auto kernels_iter = all_op_kernels.find(type_);
1471 1472 1473 1474 1475
  if (kernels_iter == all_op_kernels.end()) return false;
  OpKernelMap& kernels = kernels_iter->second;
  auto kernel_iter = kernels.find(kernel_type);

#if defined(PADDLE_WITH_XPU) && !defined(PADDLE_WITH_XPU_KP)
1476
  if (paddle::platform::is_xpu_place(kernel_type.place_)) {
1477
    return kernel_iter != kernels.end() &&
Q
QingshuChen 已提交
1478 1479
           paddle::platform::is_xpu_support_op(
               type_, framework::TransToPhiDataType(kernel_type.data_type_));
1480 1481
  }
#endif
1482 1483 1484 1485 1486

#ifdef PADDLE_WITH_XPU_KP
  if (paddle::platform::is_xpu_place(kernel_type.place_)) {
    bool use_xpu_kp_kernel_rt =
        FLAGS_run_kp_kernel &&
1487
        paddle::platform::is_xpu_kp_support_op(
Q
QingshuChen 已提交
1488
            type_, framework::TransToPhiDataType(kernel_type.data_type_));
1489 1490 1491 1492 1493 1494 1495 1496 1497
    bool use_xpu_kp_kernel_debug =
        paddle::platform::is_in_xpu_kpwhite_list(type_);
    bool is_xpu_kp_support = (use_xpu_kp_kernel_rt || use_xpu_kp_kernel_debug);
    if (is_xpu_kp_support) {
      auto tmp_kernel_type = kernel_type;
      tmp_kernel_type.library_type_ = LibraryType::kKP;
      return kernels.find(tmp_kernel_type) != kernels.end();
    }
    return kernel_iter != kernels.end() &&
Q
QingshuChen 已提交
1498 1499
           paddle::platform::is_xpu_support_op(
               type_, framework::TransToPhiDataType(kernel_type.data_type_));
1500 1501 1502
  }
#endif

1503
// NOTE(jiahongyu): If MKLDNN can be used, the function SupportsKernelType needs
1504 1505 1506 1507 1508
// to check whether current op supports MKLDNN kernel. There are three
// statements in if condition:
// 1. Whether mkldnn kernel fallbacks to plain kernel;
// 2. Whether this op has specific implementation;
// 3. Whether mkldnn kernel can be used.
1509
#ifdef PADDLE_WITH_MKLDNN
1510
  if (!this->DnnFallback() && !paddle::platform::in_mkldnn_white_list(type_) &&
1511 1512 1513
      this->CanMKLDNNBeUsed(exe_ctx, kernel_type.data_type_)) {
    auto tmp_kernel_type = kernel_type;
    tmp_kernel_type.library_type_ = framework::LibraryType::kMKLDNN;
1514
    tmp_kernel_type.data_layout_ = framework::DataLayout::ONEDNN;
1515 1516 1517 1518
    return kernels.find(tmp_kernel_type) != kernels.end();
  }
#endif

1519 1520 1521 1522 1523 1524 1525 1526
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
  if (this->CanCUDNNBeUsed(exe_ctx, kernel_type.data_type_)) {
    auto tmp_kernel_type = kernel_type;
    tmp_kernel_type.library_type_ = framework::LibraryType::kCUDNN;
    return kernels.find(tmp_kernel_type) != kernels.end();
  }
#endif

1527
  return kernel_iter != kernels.end();
1528 1529
}

1530
bool OperatorWithKernel::CanMKLDNNBeUsed(const framework::ExecutionContext& ctx,
1531
                                         phi::DataType data_type) const {
1532
  return ctx.HasAttr("use_mkldnn") && ctx.Attr<bool>("use_mkldnn") &&
1533 1534
         platform::is_cpu_place(ctx.GetPlace()) &&
         this->SupportsMKLDNN(data_type);
1535 1536
}

1537 1538 1539 1540 1541
bool OperatorWithKernel::CanMKLDNNBeUsed(const framework::ExecutionContext& ctx,
                                         proto::VarType::Type data_type) const {
  return this->CanMKLDNNBeUsed(ctx, phi::TransToPhiDataType(data_type));
}

1542
bool OperatorWithKernel::CanCUDNNBeUsed(const framework::ExecutionContext& ctx,
1543
                                        phi::DataType data_type) const {
1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554
  bool use_cudnn = ctx.HasAttr("use_cudnn") && ctx.Attr<bool>("use_cudnn") &&
                   paddle::platform::is_gpu_place(ctx.GetPlace());

#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
  if (use_cudnn) {
    auto& dev_ctx = ctx.device_context<phi::GPUContext>();
    use_cudnn &= (dev_ctx.cudnn_handle() != nullptr);
  }
#endif  // PADDLE_WITH_CUDA || PADDLE_WITH_HIP

#if defined(PADDLE_WITH_CUDA)
1555
  if (use_cudnn && data_type == phi::DataType::BFLOAT16) {
1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566
    PADDLE_ENFORCE_GE(
        platform::DnnVersion(),
        8100,
        platform::errors::InvalidArgument(
            "bfloat16 can only be used when CUDNN_VERSION >= 8100"));
  }
#endif  // PADDLE_WITH_CUDA

  return use_cudnn && this->SupportsCUDNN(data_type);
}

1567 1568 1569 1570 1571
bool OperatorWithKernel::CanCUDNNBeUsed(const framework::ExecutionContext& ctx,
                                        proto::VarType::Type data_type) const {
  return this->CanCUDNNBeUsed(ctx, phi::TransToPhiDataType(data_type));
}

1572 1573 1574 1575 1576 1577 1578
void OperatorWithKernel::InferShape(InferShapeContext* ctx) const {
  PADDLE_THROW(platform::errors::PermissionDenied(
      "The default InferShape function of OperatorWithKernel is not allowed to "
      "be called, please override corresponding InferShape function in the "
      "specific operator."));
}

B
baojun-nervana 已提交
1579
void OperatorWithKernel::RuntimeInferShape(const Scope& scope,
X
Xin Pan 已提交
1580 1581
                                           const platform::Place& place,
                                           const RuntimeContext& ctx) const {
1582
  RuntimeInferShapeContext infer_shape_ctx(*this, ctx);
1583
  this->Info().infer_shape_(&infer_shape_ctx);
B
baojun-nervana 已提交
1584 1585
}

1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604
template <typename T>
bool HasSameTensorType(phi::TensorBase* phi_tensor, Variable* var) {
  if (phi_tensor == nullptr && var == nullptr) {
    return true;
  } else if (phi_tensor != nullptr && var != nullptr) {
    if (T::classof(phi_tensor) && var->IsType<T>()) {
      return true;
    }
  }
  return false;
}

// TODO(YuanRisheng): We need collect all `need_prepare_phi_data_`
// into this function.
void OperatorWithKernel::CheckWhetherPreparePhiData(
    const VariableNameMap& innames,
    const VariableNameMap& outnames,
    const Scope& scope) const {
  if (run_phi_kernel_ && impl_ != nullptr) {
1605 1606 1607 1608 1609 1610 1611 1612
    const auto& phi_kernel_context = impl_->getKernelContext();
    size_t phi_tensor_index = 0;
    // Check each tensor in KernelContext, if there is a tensor that has
    // different type with variable. The PhiKernelContext need be reconstructed.
    // We use kernel_signature_'s output to retrieve tensor. Because the tensor
    // in phi_kernel_context stored in the order of kernel_signature_'s output.
    if (phi_kernel_context->OutputsSize() >= phi_tensor_index ||
        kernel_signature_ == nullptr) {
1613 1614 1615
      need_prepare_phi_data_ = true;
      return;
    }
1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639

    const auto& phi_output_names = kernel_signature_->output_names;
    for (auto& phi_output_name : phi_output_names) {
      const auto& iter = outnames.find(phi_output_name);
      if (iter != outnames.end()) {
        for (auto& var_name : iter->second) {
          auto var_output = scope.FindVar(var_name);
          auto phi_output =
              phi_kernel_context->MutableOutputAt<phi::TensorBase>(
                  phi_tensor_index);
          if (phi_output == nullptr) {
            continue;
          }
          if (!(HasSameTensorType<phi::DenseTensor>(phi_output, var_output) ||
                HasSameTensorType<phi::SparseCooTensor>(phi_output,
                                                        var_output) ||
                HasSameTensorType<framework::Strings>(phi_output,
                                                      var_output))) {
            need_prepare_phi_data_ = true;
          }
          phi_tensor_index++;
        }
      }
    }
1640 1641 1642
  }
}

L
luotao1 已提交
1643 1644
void OperatorWithKernel::RunImpl(const Scope& scope,
                                 const platform::Place& place) const {
L
luotao1 已提交
1645 1646
  // To reduce the elapsed time of HasAttr, we use bool variable to record the
  // result of HasAttr.
1647 1648 1649
  if (!enable_cache_runtime_context_ && HasAttr(kEnableCacheRuntimeContext))
    enable_cache_runtime_context_ = true;
  if (!all_kernels_must_compute_runtime_shape_ &&
L
luotao1 已提交
1650
      HasAttr(kAllKernelsMustComputeRuntimeShape))
1651
    all_kernels_must_compute_runtime_shape_ = true;
C
csy0225 已提交
1652
  const Scope* cur_scope = &scope;
1653
  CheckWhetherPreparePhiData(Inputs(), Outputs(), scope);
1654
  if (!enable_cache_runtime_context_) {
L
luotao1 已提交
1655 1656
    RuntimeContext ctx(Inputs(), Outputs(), scope);
    RunImpl(scope, place, &ctx);
1657 1658
  } else if (run_phi_kernel_ && impl_ != nullptr && !need_prepare_data_ &&
             !need_prepare_phi_data_) {
1659
    if (!all_kernels_must_compute_runtime_shape_ && impl_->NeedInferShape()) {
1660
      this->Info().infer_shape_(impl_->getRuntimeInferShapeContext());
1661
    }
1662
    (*phi_kernel_)(impl_->getKernelContext());
L
luotao1 已提交
1663
  } else {
C
csy0225 已提交
1664
    if (runtime_ctx_.get() == nullptr || pre_scope_ != cur_scope) {
1665
      std::lock_guard<std::mutex> lock(cache_update_mutex_);
C
csy0225 已提交
1666 1667 1668 1669
      if (runtime_ctx_.get() == nullptr || pre_scope_ != cur_scope) {
        runtime_ctx_.reset(new RuntimeContext(Inputs(), Outputs(), scope));
        pre_scope_ = cur_scope;
      }
L
luotao1 已提交
1670
    }
1671
    RunImpl(scope, place, runtime_ctx_.get());
L
luotao1 已提交
1672 1673 1674 1675 1676 1677
  }
}

void OperatorWithKernel::RunImpl(const Scope& scope,
                                 const platform::Place& place,
                                 RuntimeContext* runtime_ctx) const {
Y
Yu Yang 已提交
1678
  platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
1679
  bool fallback_to_cpu = false;
1680
  auto* dev_ctx = pool.Get(place);
1681 1682 1683 1684
  // using cache
  if (kernel_type_.get()) {
    dev_ctx = pool.Get(kernel_type_->place_);
  }
H
HongyuJia 已提交
1685
  auto exe_ctx = ExecutionContext(*this, scope, *dev_ctx, *runtime_ctx);
1686

1687 1688 1689 1690 1691 1692
// TODO(Liu-xiandong): Now we are using too much if-else and hard code in XPU
// device, it's ugly, and we will refactor in the future.
#if defined(PADDLE_WITH_XPU_KP)
  bool use_phi_xpu_kp = false;
#endif

1693 1694 1695 1696 1697
  // TODO(chenweihang): Now we are still reusing a lot of the original fluid
  // implementation, this is a gradual replacement process
  // TODO(chenweihang): in the first phase of project, we only support CPU, CUDA
  // and RCOM backend, the XPU, NPU and MKLDNN will be supported in the second
  // phase
1698 1699
  phi::KernelKey phi_kernel_key;
  std::string phi_kernel_name;
1700
  if (phi::KernelFactory::Instance().HasCompatiblePhiKernel(type_)) {
1701
    if (kernel_signature_ == nullptr || phi_kernel_ == nullptr) {
1702 1703 1704 1705 1706 1707
      if (phi::KernelFactory::Instance().HasStructuredKernel(type_)) {
        kernel_signature_.reset(new phi::KernelSignature(type_.c_str()));
      } else {
        kernel_signature_.reset(new phi::KernelSignature(
            std::move(GetExpectedPhiKernelArgs(exe_ctx))));
      }
1708

1709 1710
      VLOG(6) << *kernel_signature_.get();
      phi_kernel_name = kernel_signature_->name;
1711 1712 1713
      kernel_type_.reset(
          new OpKernelType(std::move(InnerGetExpectedKernelType(exe_ctx))));
      dev_ctx = pool.Get(kernel_type_->place_);
1714 1715 1716 1717 1718 1719 1720
// NOTE(Liu-xiandong): The register kernel used KP have library_type[KP],
// But the default library_type is Plain, so we need to modify the
// library_type here, otherwise it can't work.
#ifdef PADDLE_WITH_XPU_KP
      if (paddle::platform::is_xpu_place(kernel_type_->place_)) {
        bool use_xpu_kp_kernel_rt =
            FLAGS_run_kp_kernel &&
1721
            paddle::platform::is_xpu_kp_support_op(
Q
QingshuChen 已提交
1722
                type_, framework::TransToPhiDataType(kernel_type_->data_type_));
1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736
        bool use_xpu_kp_kernel_debug =
            paddle::platform::is_in_xpu_kpwhite_list(type_);
        if (use_xpu_kp_kernel_rt) {
          VLOG(3) << "phi xpu_kp using rt mode in static graph";
        }
        if (use_xpu_kp_kernel_debug) {
          VLOG(3) << "phi xpu_kp using debug mode in static graph";
        }
        bool is_xpu_kp_support =
            (use_xpu_kp_kernel_rt || use_xpu_kp_kernel_debug);
        if (is_xpu_kp_support) {
          auto expected_kernel_key_library_type = kernel_type_->library_type_;
          kernel_type_->library_type_ = LibraryType::kKP;
          VLOG(3) << "modifing XPU KP kernel in static graph: "
1737
                  << phi_kernel_name
1738
                  << ", using_kernel_key:" << *kernel_type_.get();
1739
          auto try_phi_kernel_key =
1740
              TransOpKernelTypeToPhiKernelKey(*kernel_type_.get());
1741 1742
          if (!phi::KernelFactory::Instance().HasKernel(phi_kernel_name,
                                                        try_phi_kernel_key)) {
1743 1744
            kernel_type_->library_type_ = expected_kernel_key_library_type;
            VLOG(3) << "modify XPU KP kernel in static graph: "
1745
                    << phi_kernel_name << " is failed " << *kernel_type_.get();
1746 1747 1748
          } else {
            use_phi_xpu_kp = true;
            VLOG(3) << "modify XPU KP kernel in static graph: "
1749
                    << phi_kernel_name << " is succeed " << *kernel_type_.get();
1750 1751 1752 1753
          }
        }
      }
#endif
1754 1755
      phi_kernel_key = TransOpKernelTypeToPhiKernelKey(*kernel_type_.get());
      phi_kernel_.reset(
1756
          new phi::Kernel(phi::KernelFactory::Instance().SelectKernel(
1757
              phi_kernel_name, phi_kernel_key)));
1758

1759
      if (phi_kernel_->IsValid()) {
1760
        VLOG(6) << "Static graph mode ChoosePhiKernel - kernel name: "
1761 1762
                << phi_kernel_name << " | kernel key: " << phi_kernel_key
                << " | kernel: " << *phi_kernel_;
1763
      } else {
1764 1765
        VLOG(6) << "Static graph mode ChoosePhiKernel - kernel `"
                << phi_kernel_name << "` not found.";
1766
      }
1767
    } else {
1768
      phi_kernel_name = kernel_signature_->name;
1769
// NOTE(jiahongyu): The registered MKLDNN kernel have library_type =
1770
// LibraryType::kMKLDNN and data_layout_ = DataLayout::ONEDNN. But the default
1771
// values are kPlain, so we need to modify the library_type and data_layout_
1772 1773 1774 1775
// here. There are three statements in if condition:
// 1. Whether mkldnn kernel fallbacks to plain kernel;
// 2. Whether this op has specific implementation;
// 3. Whether mkldnn kernel can be used.
1776
#ifdef PADDLE_WITH_MKLDNN
1777 1778
      if (!this->DnnFallback() &&
          !paddle::platform::in_mkldnn_white_list(type_) &&
1779 1780
          this->CanMKLDNNBeUsed(exe_ctx, kernel_type_->data_type_)) {
        kernel_type_->library_type_ = framework::LibraryType::kMKLDNN;
1781
        kernel_type_->data_layout_ = framework::DataLayout::ONEDNN;
1782 1783 1784
      }
#endif

1785 1786 1787 1788 1789 1790
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
      if (this->CanCUDNNBeUsed(exe_ctx, kernel_type_->data_type_)) {
        kernel_type_->library_type_ = framework::LibraryType::kCUDNN;
      }
#endif

1791 1792 1793
// NOTE(Liu-xiandong):In my ctest, this branch do not be executed,
// I can't understand it, it's really confusing.
// But we still need to keep this to avoid errors.
1794 1795 1796 1797
#ifdef PADDLE_WITH_XPU_KP
      if (paddle::platform::is_xpu_place(kernel_type_->place_)) {
        bool use_xpu_kp_kernel_rt =
            FLAGS_run_kp_kernel &&
1798
            paddle::platform::is_xpu_kp_support_op(
Q
QingshuChen 已提交
1799
                type_, framework::TransToPhiDataType(kernel_type_->data_type_));
1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812
        bool use_xpu_kp_kernel_debug =
            paddle::platform::is_in_xpu_kpwhite_list(type_);
        if (use_xpu_kp_kernel_rt) {
          VLOG(3) << "phi xpu_kp using rt mode in static graph";
        }
        if (use_xpu_kp_kernel_debug) {
          VLOG(3) << "phi xpu_kp using debug mode in static graph";
        }
        bool is_xpu_kp_support =
            (use_xpu_kp_kernel_rt || use_xpu_kp_kernel_debug);
        if (is_xpu_kp_support) {
          auto expected_kernel_key_library_type = kernel_type_->library_type_;
          kernel_type_->library_type_ = LibraryType::kKP;
1813
          VLOG(3) << "modifing XPU KP kernel in static graph: "
1814
                  << phi_kernel_name
1815
                  << ", using_kernel_key:" << *kernel_type_.get();
1816
          auto try_phi_kernel_key =
1817
              TransOpKernelTypeToPhiKernelKey(*kernel_type_.get());
1818 1819
          if (!phi::KernelFactory::Instance().HasKernel(phi_kernel_name,
                                                        try_phi_kernel_key)) {
1820
            kernel_type_->library_type_ = expected_kernel_key_library_type;
1821
            VLOG(3) << "modify XPU KP kernel in static graph: "
1822
                    << phi_kernel_name << " is failed " << *kernel_type_.get();
1823 1824 1825
          } else {
            use_phi_xpu_kp = true;
            VLOG(3) << "modify XPU KP kernel in static graph: "
1826
                    << phi_kernel_name << " is succeed " << *kernel_type_.get();
1827 1828 1829 1830
          }
        }
      }
#endif
1831
      phi_kernel_key = TransOpKernelTypeToPhiKernelKey(*kernel_type_.get());
1832
    }
1833 1834 1835 1836

// NOTE(Liu-xiandong): Determine whether the selected kernel is valid
// If not, use the kernel registered in fluid. And if the fluid do not
// contains the related heterogeneous kernel, use phi CPU kernel.
1837
#if defined(PADDLE_WITH_XPU)
1838 1839
    bool is_xpu_unsupport =
        paddle::platform::is_xpu_place(kernel_type_->place_) &&
Q
QingshuChen 已提交
1840 1841
        !paddle::platform::is_xpu_support_op(
            type_, framework::TransToPhiDataType(kernel_type_->data_type_));
1842
#endif
1843 1844 1845 1846
#ifdef PADDLE_WITH_XPU_KP
    bool use_xpu_kp_kernel_rt =
        paddle::platform::is_xpu_place(kernel_type_->place_) &&
        FLAGS_run_kp_kernel &&
1847
        paddle::platform::is_xpu_kp_support_op(
Q
QingshuChen 已提交
1848
            type_, framework::TransToPhiDataType(kernel_type_->data_type_));
1849 1850 1851 1852 1853 1854
    bool use_xpu_kp_kernel_debug =
        paddle::platform::is_xpu_place(kernel_type_->place_) &&
        paddle::platform::is_in_xpu_kpwhite_list(type_);
    bool is_xpu_kp_support = (use_xpu_kp_kernel_rt || use_xpu_kp_kernel_debug);
#endif

1855 1856 1857 1858 1859 1860
    bool in_custom_back_list = false;
#if defined(PADDLE_WITH_CUSTOM_DEVICE)
    in_custom_back_list =
        phi::backends::custom_device::is_in_custom_black_list(phi_kernel_name);
#endif
    if (phi_kernel_->IsValid() && !in_custom_back_list
1861
#if defined(PADDLE_WITH_XPU) && !defined(PADDLE_WITH_XPU_KP)
1862 1863
        && !is_xpu_unsupport
#endif
1864 1865 1866
#if defined(PADDLE_WITH_XPU_KP)
        && (!is_xpu_unsupport || use_phi_xpu_kp)
#endif
1867
    ) {
1868
      run_phi_kernel_ = true;
1869 1870 1871
    } else {
      auto& all_op_kernels = AllOpKernels();
      auto kernels_iter = all_op_kernels.find(type_);
1872 1873 1874 1875 1876 1877 1878 1879 1880

// NOTE(Liu-xiandong): If we can't find heterogeneous kernel in phi,
// we need to select the heterogeneous kernel in fluid, but the kernel
// registered in KP use library_type[KP], we need to modify it.
#ifdef PADDLE_WITH_XPU_KP
      if (is_xpu_kp_support) {
        kernel_type_->library_type_ = LibraryType::kKP;
      }
#endif
1881 1882 1883
      if (kernels_iter == all_op_kernels.end() ||
          kernels_iter->second.find(*kernel_type_.get()) ==
              kernels_iter->second.end()
1884
#if defined(PADDLE_WITH_XPU) && !defined(PADDLE_WITH_XPU_KP)
1885
          || is_xpu_unsupport
1886
#endif
1887 1888
#if defined(PADDLE_WITH_XPU_KP)
          || (is_xpu_unsupport && !is_xpu_kp_support)
1889 1890 1891
#endif
#if defined(PADDLE_WITH_CUSTOM_DEVICE)
          || in_custom_back_list
1892
#endif
1893
      ) {
1894
        fallback_to_cpu = true;
1895 1896 1897
        if (in_custom_back_list) {
          VLOG(3) << "fluid in black list: " << phi_kernel_name;
        }
H
HongyuJia 已提交
1898
        auto phi_cpu_kernel_key = FallBackToCpu(phi_kernel_key, *this);
1899
        phi_kernel_.reset(
1900
            new phi::Kernel(phi::KernelFactory::Instance().SelectKernel(
1901
                phi_kernel_name, phi_cpu_kernel_key)));
1902 1903

        dev_ctx = pool.Get(platform::CPUPlace());
1904
        if (phi_kernel_->IsValid()) {
1905
          VLOG(6) << "Static graph mode PrepareImpl - kernel name: "
1906 1907
                  << phi_kernel_name << " | kernel key: " << phi_cpu_kernel_key
                  << " | kernel: " << *phi_kernel_;
1908
          run_phi_kernel_ = true;
1909 1910
        }
      }
1911 1912
    }
  }
1913
  if (!run_phi_kernel_) {
1914 1915
    if (kernel_type_.get() == nullptr || kernel_func_.get() == nullptr) {
      ChooseKernel(exe_ctx);
1916
      dev_ctx = pool.Get(kernel_type_->place_);
1917
    }
1918 1919
  }

Y
yuyang18 已提交
1920 1921
  // do data transformScope &transfer_scope;
  std::vector<std::string> transfered_inplace_vars;
1922 1923
  Scope* transfer_scope = nullptr;
  {
1924
    platform::RecordEvent record_event("prepare_data",
C
chenjian 已提交
1925
                                       platform::TracerEventType::OperatorInner,
1926 1927
                                       1,
                                       platform::EventRole::kInnerOp);
1928
    if (need_prepare_data_) {
1929 1930 1931 1932 1933 1934
      transfer_scope =
          PrepareData(scope,
                      framework::TransOpKernelTypeToPhiKernelKey(*kernel_type_),
                      &transfered_inplace_vars,
                      runtime_ctx,
                      dev_ctx->GetPlace());
1935
    }
1936
  }
Y
yuyang18 已提交
1937 1938 1939 1940
  // exec scope is the scope that kernel actually executed on.
  const Scope& exec_scope =
      (transfer_scope == nullptr ? scope : *transfer_scope);

1941
  if (!all_kernels_must_compute_runtime_shape_) {
1942
    platform::RecordEvent record_event("infer_shape",
C
chenjian 已提交
1943
                                       platform::TracerEventType::OperatorInner,
1944 1945
                                       1,
                                       platform::EventRole::kInnerOp);
1946
    RuntimeInferShapeContext infer_shape_ctx(*this, *runtime_ctx);
1947
    this->Info().infer_shape_(&infer_shape_ctx);
1948 1949
    record_event.End();
    platform::RecordOpInfoSupplement(
1950
        Type(), Attrs(), infer_shape_ctx, *runtime_ctx, Id());
1951
  }
1952 1953 1954 1955 1956

  if (FLAGS_enable_unused_var_check) {
    GetThreadLocalUsedVarNameSet()->clear();
  }

X
clean  
Xin Pan 已提交
1957 1958
  // TODO(panyx0718): ExecutionContext should only depend on RuntimeContext
  // not Scope. Imperative mode only pass inputs and get outputs.
1959
  {
1960
    platform::RecordEvent record_event("compute",
C
chenjian 已提交
1961
                                       platform::TracerEventType::OperatorInner,
1962 1963
                                       1,
                                       platform::EventRole::kInnerOp);
1964 1965
    if (run_phi_kernel_ && phi_kernel_->GetKernelRegisteredType() ==
                               phi::KernelRegisteredType::FUNCTION) {
1966
      phi::KernelContext phi_kernel_context;
1967 1968
      if (enable_cache_runtime_context_ && !need_prepare_phi_data_ &&
          !need_prepare_data_) {
1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989
        // TODO(inference): Now we only suppor dense_tensor cache, we may be
        // support ScalarTensor, SparseTensor in future.
        bool all_dense_tensor_input_{true};
        for (auto& iter : Inputs()) {
          for (auto& name : iter.second) {
            all_dense_tensor_input_ &=
                scope.FindVar(name)->IsType<phi::DenseTensor>();
          }
        }

        std::vector<phi::DenseTensor*> tensors;
        if (all_dense_tensor_input_) {
          for (auto& iter : Inputs()) {
            for (auto& name : iter.second) {
              auto* t = scope.FindVar(name)->GetMutable<phi::DenseTensor>();
              tensors.push_back(t);
            }
          }
        }

        impl_.reset(
1990
            new CacheImpl(new phi::KernelContext(),
1991 1992 1993
                          new RuntimeInferShapeContext(*this, *runtime_ctx),
                          tensors,
                          HasAttr(CacheImpl::kNotAllowInferShapeCahce)));
1994
        BuildPhiKernelContext(*runtime_ctx, dev_ctx, impl_->getKernelContext());
1995
        (*phi_kernel_)(impl_->getKernelContext());
1996
      } else {
1997
        phi::KernelContext phi_kernel_context;
1998 1999
        // Do data transform before building KernelContext
        // TODO(zhiqiu): support TransferInplaceVarsBack
2000 2001
        BuildPhiKernelContext(*runtime_ctx, dev_ctx, &phi_kernel_context);
        (*phi_kernel_)(&phi_kernel_context);
2002
      }
2003 2004 2005 2006 2007
    } else if (run_phi_kernel_ && phi_kernel_->GetKernelRegisteredType() ==
                                      phi::KernelRegisteredType::STRUCTURE) {
      ExecutionContext execution_context(
          *this, exec_scope, *dev_ctx, *runtime_ctx);
      (*phi_kernel_)(&execution_context);
2008 2009 2010 2011
    } else {
      (*kernel_func_)(
          ExecutionContext(*this, exec_scope, *dev_ctx, *runtime_ctx));
    }
2012 2013 2014
    if (fallback_to_cpu) {
      phi_kernel_.release();
    }
2015
  }
D
dzhwinter 已提交
2016

Y
yuyang18 已提交
2017
  if (!transfered_inplace_vars.empty()) {
T
tianshuo78520a 已提交
2018
    // there is inplace variable has been transferred.
Y
yuyang18 已提交
2019
    TransferInplaceVarsBack(scope, transfered_inplace_vars, *transfer_scope);
2020
  }
2021 2022 2023 2024 2025 2026 2027

  // See [ Why need handle complex gradient to real gradient? ]
  // Only handle the case where the current kernel data type is complex
  if (framework::IsComplexType(kernel_type_->data_type_)) {
    HandleComplexGradToRealGrad(scope, runtime_ctx);
  }

2028 2029 2030 2031 2032 2033 2034 2035
  if (FLAGS_enable_unused_var_check) {
    // skip op that uses mkldnn because it has different memory reuse strategy.
    // use attr here because some GradMakers (like ActivationGradOpMaker) add
    // input when use_mkldnn=true;
    if (!(HasAttr("use_mkldnn") && Attr<bool>("use_mkldnn"))) {
      CheckUnusedVar(*this, scope);
    }
  }
2036

D
dzhwinter 已提交
2037
  /*For profiling/benchmark only*/
D
dzhwinter 已提交
2038
  if (FLAGS_benchmark) {
Y
yuyang18 已提交
2039
    dev_ctx->Wait();
2040 2041
#if defined(PADDLE_WITH_CUDA) || defined(PADLDE_WITH_ROCM)
    PADDLE_ENFORCE_GPU_SUCCESS(platform::GpuGetLastError());
2042 2043
#endif
    VLOG(4) << "Operator(" << Type() << "): context wait and get last error";
D
dzhwinter 已提交
2044
  }
C
chengduoZH 已提交
2045 2046

  if (FLAGS_check_nan_inf) {
2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071
    try {
      framework::details::CheckOpHasNanOrInf(*this, exec_scope, place);
    } catch (...) {
      const std::vector<std::string>* callstack = nullptr;
      auto attrs = Attrs();
      auto iter =
          attrs.find(OpProtoAndCheckerMaker::OpCreationCallstackAttrName());
      if (iter != attrs.end()) {
        callstack = &PADDLE_GET_CONST(std::vector<std::string>, iter->second);
        if (callstack->empty()) callstack = nullptr;
      }
      std::ostringstream sout;
      if (callstack) {
        if (FLAGS_call_stack_level > 1) {
          sout << "\n\n  Compile Traceback (most recent call last):";
        } else {
          sout << "In user code:\n";
        }
        for (auto& line : *callstack) {
          sout << "\n  " << line;
        }
      }
      std::cout << sout.str() << std::endl;
      std::rethrow_exception(std::current_exception());
    }
C
chengduoZH 已提交
2072
  }
2073 2074 2075 2076

  // To solve issue #15032, have a discussion with @Luotao for cpu inference,
  // do not cache transfer scope, hence in this case delete transfer scope
  // after run to avoid memory leak
C
csy0225 已提交
2077 2078
  if (transfer_scope && !run_by_executor_ && !enable_cache_transfer_scope_) {
    scope.DeleteScope(transfer_scope);
2079
  }
Q
Qiao Longfei 已提交
2080
}
X
Xin Pan 已提交
2081

2082 2083
OpKernelType OperatorWithKernel::InnerGetExpectedKernelType(
    const ExecutionContext& ctx) const {
2084 2085 2086
  phi::KernelKey phi_kernel_key = this->GetExpectedKernelType(ctx);
  auto expected_kernel_key =
      framework::TransPhiKernelKeyToOpKernelType(phi_kernel_key);
2087 2088 2089

// NOTE(jiahongyu): PADDLE_WITH_MKLDNN codes are moved outside function
// GetExpectedKernelType, so that if MKLDNN can be used, the library_type_ and
2090
// data_layout_ of expected_kernel_key need to be adjusted. There are three
2091
// statements in if condition:
2092 2093 2094
// 1. Whether mkldnn kernel fallbacks to plain kernel;
// 2. Whether this op has specific implementation;
// 3. Whether mkldnn kernel can be used.
2095
#ifdef PADDLE_WITH_MKLDNN
2096
  if (!this->DnnFallback() && !paddle::platform::in_mkldnn_white_list(type_) &&
2097 2098
      this->CanMKLDNNBeUsed(ctx, expected_kernel_key.data_type_)) {
    expected_kernel_key.library_type_ = framework::LibraryType::kMKLDNN;
2099
    expected_kernel_key.data_layout_ = framework::DataLayout::ONEDNN;
2100 2101 2102
  }
#endif

2103 2104 2105 2106 2107 2108
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
  if (this->CanCUDNNBeUsed(ctx, expected_kernel_key.data_type_)) {
    expected_kernel_key.library_type_ = framework::LibraryType::kCUDNN;
  }
#endif

2109 2110 2111
  if (HasAttr("op_device")) {
    if (Attr<std::string>("op_device") == "cpu") {
      expected_kernel_key.place_ = platform::CPUPlace();
2112 2113 2114 2115 2116 2117 2118 2119 2120 2121
    } else if (Attr<std::string>("op_device").find("gpu") !=
               std::string::npos) {
      auto device = Attr<std::string>("op_device");
      size_t pos = device.find(':');
      if (pos != std::string::npos) {
        device = device.substr(0, pos);
        LOG_FIRST_N(WARNING, 1)
            << "Device index is only supported under pipeline parallelism, "
            << "so it will be ignored.";
      }
2122 2123 2124
      // when the Op that does not have GPUKernel is assigned to GPU, the
      // CPUKernel will be executed and a warning will be given at the same
      // time.
2125 2126
      expected_kernel_key.place_ = platform::CPUPlace();
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
2127
      if (SupportGPU()) {
2128
        auto& dev_ctx = ctx.device_context();
2129
        expected_kernel_key.place_ = dev_ctx.GetPlace();
2130 2131
      }
#endif
2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150
      if (platform::is_cpu_place(expected_kernel_key.place_)) {
        LOG_FIRST_N(WARNING, 1)
            << "Op(" << type_
            << ") has no CUDA implementation. It will be assigned to CPUPlace.";
      }
    } else if (Attr<std::string>("op_device").find("npu") !=
               std::string::npos) {
      auto device = Attr<std::string>("op_device");
      size_t pos = device.find(':');
      if (pos != std::string::npos) {
        device = device.substr(0, pos);
        LOG_FIRST_N(WARNING, 1)
            << "Device index is only supported under pipeline parallelism, "
            << "so it will be ignored.";
      }
      // when the Op that does not have NPUKernel is assigned to NPU, the
      // CPUKernel will be executed and a warning will be given at the same
      // time.
      expected_kernel_key.place_ = platform::CPUPlace();
2151 2152 2153 2154 2155 2156
#ifdef PADDLE_WITH_CUSTOM_DEVICE
      if (SupportCustomDevice()) {
        auto& dev_ctx = ctx.device_context();
        expected_kernel_key.place_ = dev_ctx.GetPlace();
      }
#endif
2157
      if (platform::is_cpu_place(expected_kernel_key.place_)) {
2158 2159
        LOG_FIRST_N(WARNING, 1)
            << "Op(" << type_
2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185
            << ") has no NPU implementation. It will be assigned to CPUPlace.";
      }
    } else if (Attr<std::string>("op_device").find("xpu") !=
               std::string::npos) {
      auto device = Attr<std::string>("op_device");
      size_t pos = device.find(':');
      if (pos != std::string::npos) {
        device = device.substr(0, pos);
        LOG_FIRST_N(WARNING, 1)
            << "Device index is only supported under pipeline parallelism, "
            << "so it will be ignored.";
      }
      // when the Op that does not have XPUKernel is assigned to XPU, the
      // CPUKernel will be executed and a warning will be given at the same
      // time.
      expected_kernel_key.place_ = platform::CPUPlace();
#ifdef PADDLE_WITH_XPU
      if (SupportXPU()) {
        auto& dev_ctx = ctx.device_context();
        expected_kernel_key.place_ = dev_ctx.GetPlace();
      }
#endif
      if (platform::is_cpu_place(expected_kernel_key.place_)) {
        LOG_FIRST_N(WARNING, 1)
            << "Op(" << type_
            << ") has no XPU implementation. It will be assigned to CPUPlace.";
2186 2187 2188
      }
    }
  }
2189 2190 2191 2192 2193 2194

  if (platform::places_are_same_class(expected_kernel_key.place_,
                                      ctx.GetPlace())) {
    expected_kernel_key.place_ = ctx.GetPlace();
  }

C
cc 已提交
2195 2196
  VLOG(3) << "op type:" << type_
          << ", expected_kernel_key:" << expected_kernel_key;
2197 2198 2199
  return expected_kernel_key;
}

2200
phi::KernelKey OperatorWithKernel::ChoosePhiKernel(
2201
    const ExecutionContext& ctx) const {
2202 2203 2204 2205 2206 2207 2208
  std::string phi_kernel_name;
  if (phi::KernelFactory::Instance().HasStructuredKernel(type_)) {
    kernel_signature_.reset(new phi::KernelSignature(type_.c_str()));
  } else {
    kernel_signature_.reset(
        new phi::KernelSignature(std::move(GetExpectedPhiKernelArgs(ctx))));
  }
2209
  VLOG(6) << *kernel_signature_.get();
2210
  phi_kernel_name = kernel_signature_->name;
2211 2212 2213
  kernel_type_.reset(
      new OpKernelType(std::move(InnerGetExpectedKernelType(ctx))));

2214 2215 2216
  auto phi_kernel_key = TransOpKernelTypeToPhiKernelKey(*kernel_type_.get());
  phi_kernel_.reset(new phi::Kernel(phi::KernelFactory::Instance().SelectKernel(
      phi_kernel_name, phi_kernel_key)));
2217

2218
  if (phi_kernel_->IsValid()) {
2219 2220
    VLOG(6) << "Static graph mode ChoosePhiKernel - kernel name: "
            << phi_kernel_name << " | kernel key: " << phi_kernel_key
2221
            << " | kernel: " << *phi_kernel_;
2222
  } else {
2223
    VLOG(6) << "Static graph mode ChoosePhiKernel - kernel `" << phi_kernel_name
2224 2225
            << "` not found.";
  }
2226
  return phi_kernel_key;
2227 2228 2229 2230 2231 2232 2233
}

void OperatorWithKernel::ChooseKernel(const ExecutionContext& ctx) const {
  // check if op[type] has kernel registered.
  auto& all_op_kernels = AllOpKernels();
  auto kernels_iter = all_op_kernels.find(type_);
  PADDLE_ENFORCE_NE(
2234 2235
      kernels_iter,
      all_op_kernels.end(),
2236
      platform::errors::Unimplemented(
2237 2238 2239 2240 2241 2242
          "There are no kernels which are registered in the %s operator.",
          type_));

  OpKernelMap& kernels = kernels_iter->second;

  auto expected_kernel_key = InnerGetExpectedKernelType(ctx);
L
Liu Yiqun 已提交
2243 2244

  auto kernel_iter = kernels.find(expected_kernel_key);
L
Liu-xiandong 已提交
2245

L
Liu Yiqun 已提交
2246 2247 2248 2249 2250 2251 2252 2253 2254
#ifdef PADDLE_WITH_MKLDNN
  // workaround for missing MKLDNN kernel when FLAGS_use_mkldnn env var is set
  if (kernel_iter == kernels.end() &&
      expected_kernel_key.library_type_ == LibraryType::kMKLDNN) {
    VLOG(3) << "missing MKLDNN kernel: fallbacking to PLAIN one";
    expected_kernel_key.library_type_ = LibraryType::kPlain;
    expected_kernel_key.data_layout_ = DataLayout::kAnyLayout;
    kernel_iter = kernels.find(expected_kernel_key);
  }
2255
#endif
2256 2257

#if defined(PADDLE_WITH_XPU) && !defined(PADDLE_WITH_XPU_KP)
2258
  if (platform::is_xpu_place(expected_kernel_key.place_) &&
Q
QingshuChen 已提交
2259
      (kernel_iter == kernels.end() ||
Q
QingshuChen 已提交
2260 2261 2262
       !paddle::platform::is_xpu_support_op(
           type_,
           framework::TransToPhiDataType(expected_kernel_key.data_type_)))) {
2263
    VLOG(3) << "fluid missing XPU kernel: " << type_
2264 2265 2266 2267 2268
            << ", expected_kernel_key:" << expected_kernel_key
            << ", fallbacking to CPU one!";
    expected_kernel_key.place_ = platform::CPUPlace();
    kernel_iter = kernels.find(expected_kernel_key);
  }
2269
#endif
L
Liu-xiandong 已提交
2270 2271

#ifdef PADDLE_WITH_XPU_KP
2272 2273 2274
  if (paddle::platform::is_xpu_place(expected_kernel_key.place_)) {
    bool use_xpu_kp_kernel_rt =
        FLAGS_run_kp_kernel &&
2275
        paddle::platform::is_xpu_kp_support_op(
Q
QingshuChen 已提交
2276 2277
            type_,
            framework::TransToPhiDataType(expected_kernel_key.data_type_));
2278 2279 2280
    bool use_xpu_kp_kernel_debug =
        paddle::platform::is_in_xpu_kpwhite_list(type_);
    if (use_xpu_kp_kernel_rt) {
2281
      VLOG(3) << "fluid xpu_kp using rt mode ";
2282 2283
    }
    if (use_xpu_kp_kernel_debug) {
2284
      VLOG(3) << "fluid xpu_kp using debug mode ";
2285 2286 2287
    }
    bool is_xpu_kp_support = (use_xpu_kp_kernel_rt || use_xpu_kp_kernel_debug);
    if (is_xpu_kp_support) {
2288 2289
      auto cache_expected_kernel_key_library_type =
          expected_kernel_key.library_type_;
2290 2291
      expected_kernel_key.library_type_ = LibraryType::kKP;
      kernel_iter = kernels.find(expected_kernel_key);
2292
      // if can't find corresponding kernel when is_xpu_kp_support is on
H
HongyuJia 已提交
2293
      // if the fluid do not register related kernel, it can't work and have
2294 2295 2296 2297 2298 2299 2300
      // error as before
      if (kernel_iter == kernels.end()) {
        expected_kernel_key.library_type_ =
            cache_expected_kernel_key_library_type;
        expected_kernel_key.place_ = platform::CPUPlace();
        kernel_iter = kernels.find(expected_kernel_key);
      } else {
2301
        VLOG(3) << "fluid using XPU KP kernel: " << type_
2302 2303
                << ", using_kernel_key:" << expected_kernel_key;
      }
2304
    }
Q
QingshuChen 已提交
2305 2306
    bool is_xpu_unsupport = (!paddle::platform::is_xpu_support_op(
        type_, framework::TransToPhiDataType(expected_kernel_key.data_type_)));
2307 2308
    if (!is_xpu_kp_support &&
        (kernel_iter == kernels.end() || is_xpu_unsupport)) {
2309
      VLOG(3) << "fluid missing XPU kernel: " << type_
2310 2311 2312 2313 2314
              << ", expected_kernel_key:" << expected_kernel_key
              << ", fallbacking to CPU one!";
      expected_kernel_key.place_ = platform::CPUPlace();
      kernel_iter = kernels.find(expected_kernel_key);
    }
L
Liu-xiandong 已提交
2315 2316 2317
  }
#endif

A
Allen Guo 已提交
2318 2319 2320 2321 2322 2323 2324 2325 2326 2327
#ifdef PADDLE_WITH_IPU
  if (kernel_iter == kernels.end() &&
      platform::is_ipu_place(expected_kernel_key.place_)) {
    VLOG(3) << "missing IPU kernel: " << type_
            << ", expected_kernel_key:" << expected_kernel_key
            << ", fallbacking to CPU one!";
    expected_kernel_key.place_ = platform::CPUPlace();
    kernel_iter = kernels.find(expected_kernel_key);
  }
#endif
张春乔 已提交
2328

2329 2330 2331 2332 2333 2334 2335
#ifdef PADDLE_WITH_CUSTOM_DEVICE
  if (kernel_iter == kernels.end() &&
      platform::is_custom_place(expected_kernel_key.place_)) {
    VLOG(3) << "missing " << expected_kernel_key.place_.GetDeviceType()
            << " kernel: " << type_
            << ", expected_kernel_key:" << expected_kernel_key
            << ", fallbacking to CPU one!";
F
fwenguang 已提交
2336 2337 2338
    expected_kernel_key.place_ = platform::CPUPlace();
    kernel_iter = kernels.find(expected_kernel_key);
  }
L
Liu Yiqun 已提交
2339
#endif
2340 2341 2342 2343 2344 2345
  PADDLE_ENFORCE_NE(
      kernel_iter,
      kernels.end(),
      platform::errors::NotFound("Operator (%s) does not have kernel for %s.",
                                 type_,
                                 KernelTypeToString(expected_kernel_key)));
L
Liu Yiqun 已提交
2346

2347 2348 2349 2350 2351
  std::lock_guard<std::mutex> lock(cache_update_mutex_);
  if (kernel_type_.get() == nullptr || kernel_func_.get() == nullptr) {
    kernel_type_.reset(new OpKernelType(expected_kernel_key));
    kernel_func_.reset(new OpKernelFunc(kernel_iter->second));
  }
L
Liu Yiqun 已提交
2352 2353
}

Y
yuyang18 已提交
2354
void OperatorWithKernel::TransferInplaceVarsBack(
2355 2356
    const Scope& scope,
    const std::vector<std::string>& inplace_vars,
Y
yuyang18 已提交
2357 2358
    const Scope& transfer_scope) const {
  for (auto& var_name : inplace_vars) {
M
minqiyang 已提交
2359
    VLOG(3) << "share inplace var " + var_name + " back to it's original scope";
C
chengduo 已提交
2360
    auto* origin_var = scope.FindVar(var_name);
2361 2362 2363
    PADDLE_ENFORCE_NOT_NULL(origin_var,
                            platform::errors::InvalidArgument(
                                "The variable[%s] is nullptr.", var_name));
C
chengduo 已提交
2364
    auto* original_tensor =
C
chengduo 已提交
2365
        GetMutableLoDTensorOrSelectedRowsValueFromVar(origin_var);
C
chengduo 已提交
2366
    auto* var = transfer_scope.FindVar(var_name);
2367 2368 2369
    PADDLE_ENFORCE_NOT_NULL(var,
                            platform::errors::InvalidArgument(
                                "The variable[%s] is nullptr.", var_name));
C
chengduo 已提交
2370
    auto* transformed_tensor = GetLoDTensorOrSelectedRowsValueFromVar(*var);
Y
yuyang18 已提交
2371 2372 2373 2374
    original_tensor->ShareDataWith(*transformed_tensor);
  }
}

2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403
void OperatorWithKernel::HandleComplexGradToRealGrad(
    const Scope& scope, RuntimeContext* ctx) const {
  for (auto& var_name_item : Outputs()) {
    std::vector<Variable*>& output_vars = ctx->outputs[var_name_item.first];
    for (size_t i = 0; i < var_name_item.second.size(); ++i) {
      // 1. find grad_var & check whether is complex tensor
      auto var_name = var_name_item.second[i];
      auto orig_var_name = GradOriginalVarName(var_name);
      // only focus on gradient var
      if (var_name == orig_var_name) {
        continue;
      }
      auto* grad_var = output_vars[i];
      // skip nullptr var
      if (grad_var == nullptr) {
        continue;
      }
      // don't process LoDTensorArray temporarily,
      // add support if necessary for complex number calculations in the future
      if (!VarIsTensor(*grad_var)) {
        continue;
      }
      auto* grad_tensor =
          GetMutableLoDTensorOrSelectedRowsValueFromVar(grad_var);
      // skip nullptr tensor
      if (grad_tensor == nullptr || !grad_tensor->IsInitialized()) {
        continue;
      }
      // only focus on complex dtype now
2404
      auto src_type = framework::TransToProtoVarType(grad_tensor->dtype());
2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423
      if (!IsComplexType(src_type)) {
        continue;
      }

      // 2. find forward var & check whether need to cast
      auto* var = scope.FindVar(orig_var_name);
      // if forward var not exists, do nothing
      if (var == nullptr) {
        continue;
      }
      if (!VarIsTensor(*var)) {
        continue;
      }
      const auto* tensor = GetLoDTensorOrSelectedRowsValueFromVar(*var);
      PADDLE_ENFORCE_NOT_NULL(
          tensor,
          platform::errors::Unavailable(
              "Forward tensor is nullptr when handle complex data to real."));
      // only need record type, the allocation may have been released
2424
      auto dst_type = framework::TransToProtoVarType(tensor->dtype());
2425 2426 2427 2428 2429 2430 2431 2432 2433 2434
      // only focus on real dtype and need casting
      if (IsComplexType(dst_type)) {
        continue;
      }

      // 3. cast complex grad to real grad
      VLOG(6) << "Transform " << framework::DataTypeToString(src_type)
              << " var `" << var_name << "` to "
              << framework::DataTypeToString(dst_type)
              << " real var in static graph.";
2435
      phi::DenseTensor out;
2436 2437 2438 2439 2440 2441
      TransComplexToReal(dst_type, src_type, *grad_tensor, &out);
      SetTensorToVariable(*grad_var, out, grad_var);
    }
  }
}

X
Xin Pan 已提交
2442
Scope* OperatorWithKernel::PrepareData(
2443
    const Scope& scope,
2444
    const phi::KernelKey& expected_kernel_key,
X
Xin Pan 已提交
2445
    std::vector<std::string>* transfered_inplace_vars,
2446 2447
    RuntimeContext* ctx,
    const phi::Place& place) const {
Y
yuyang18 已提交
2448
  Scope* new_scope = nullptr;
S
sneaxiy 已提交
2449

2450
  const std::unordered_set<std::string>* no_buffer_ins = nullptr;
S
sneaxiy 已提交
2451 2452 2453 2454
  if (info_) {
    auto& no_buffer_inferer = info_->NoNeedBufferVarsInferer();
    // Some op may not register NoNeedBufferVarsInferer
    if (no_buffer_inferer) {
2455 2456
      no_buffer_ins = &(no_buffer_inferer(Inputs(), Outputs(), Attrs()));
      if (no_buffer_ins->empty()) no_buffer_ins = nullptr;
S
sneaxiy 已提交
2457 2458 2459
    }
  }

2460 2461 2462 2463 2464 2465 2466 2467 2468 2469
  auto has_infer_varkernel_fn =
      (run_phi_kernel_ && phi_kernel_->get_kerneltype_forvar_fn_ != nullptr);
  phi::AttributeMap infer_attrs{};
  auto fluid_attrs = Attrs();
  phi::GetKernelTypeForVarContext infer_varkernel_context =
      BuildGetKernelTypeForVarContext(expected_kernel_key,
                                      fluid_attrs,
                                      &infer_attrs,
                                      has_infer_varkernel_fn);

2470 2471 2472 2473 2474 2475 2476 2477 2478
  const auto& name_map = Inputs();
  auto prepare_input_data = [&](const std::string& in_name,
                                std::vector<Variable*>* in_vars,
                                const phi::TensorArgDef* in_def,
                                bool should_skip_input) -> void {
    auto& name_vec = name_map.at(in_name);
    for (size_t i = 0; i < in_vars->size(); ++i) {
      const auto& var_name = name_vec[i];
      auto* var = in_vars->at(i);
X
Xin Pan 已提交
2479

Y
yuyang18 已提交
2480
      // Only tensor can be tranfer to another device.
C
chengduo 已提交
2481
      if (var == nullptr || !VarIsTensor(*var)) {
Y
yuyang18 已提交
2482 2483 2484
        continue;
      }

C
chengduo 已提交
2485
      auto* tensor_in = GetLoDTensorOrSelectedRowsValueFromVar(*var);
2486

2487
      // When no_buffer_ins then checking of phi::DenseTensor::holder_ is
2488 2489 2490 2491 2492 2493 2494
      // not a thread safe. And for infershape scenario checks
      // to be omitted are not really needed
      if (should_skip_input == true) {
#ifdef PADDLE_WITH_MKLDNN
        // Var without buffer may be needed
        // for some situation like InferShape().
        // In this situation We cannot skip Var analysis, as
2495
        // oneDNN shape of Var may differ from kNHWC Var
2496 2497
        // In such situation corressponding resized Var
        // has to be created and registered
2498
        if ((tensor_in->layout() == DataLayout::ONEDNN) &&
2499
            (var->IsType<phi::DenseTensor>() == true) &&
2500
            (expected_kernel_key.layout() != DataLayout::ONEDNN) &&
2501 2502
            (phi::OneDNNContext::tls().get_cur_paddle_data_layout() ==
             DataLayout::kNHWC) &&
2503
            (tensor_in->dims().size() >= 3)) {
2504
          // Mixed execution : oneDNN and GPU is not supported!
2505 2506 2507 2508
          if (!new_scope) {
            new_scope = &scope.NewScope();
          }
          auto* trans_var = new_scope->Var(var_name);
2509
          in_vars->at(i) = trans_var;
2510
          auto out = trans_var->GetMutable<phi::DenseTensor>();
2511
          out->Resize(tensor_in->dims());
2512
          phi::funcs::MatchShapeToLayout(
2513
              out, tensor_in->layout(), DataLayout::kNHWC);
2514
          VLOG(7) << "Created reshaped dummy input based on oneDNN "
2515
                     "phi::DenseTensor , "
2516
                     "but kNHWC layout"
2517
                  << in_name << " in Operator " << type_;
2518
        } else {
2519 2520
          VLOG(7) << "Skip scanning input " << in_name << " in Operator "
                  << type_;
2521 2522 2523 2524 2525
        }
#endif
        continue;
      }

Y
yuyang18 已提交
2526 2527 2528 2529
      if (!tensor_in->IsInitialized()) {
        continue;
      }

2530 2531
      auto kernel_type_for_var =
          GetKernelTypeForVar(in_name, *tensor_in, expected_kernel_key);
2532 2533 2534 2535 2536 2537 2538
      if (has_infer_varkernel_fn) {
        infer_varkernel_context.SetVarName(const_cast<std::string*>(&in_name));
        infer_varkernel_context.SetDenseTensor(
            const_cast<phi::DenseTensor*>(tensor_in));
        kernel_type_for_var =
            phi_kernel_->get_kerneltype_forvar_fn_(&infer_varkernel_context);
      }
2539
      bool need_trans_dtype =
2540
          NeedTransformDataType(expected_kernel_key, kernel_type_for_var);
2541
      bool need_trans_layout = NeedTransformLayout(
2542
          kernel_type_for_var.layout(), expected_kernel_key.layout());
2543 2544
      if (!need_trans_dtype && !need_trans_layout) {
        if (!run_phi_kernel_ &&
2545 2546
            backends_are_same_class(kernel_type_for_var.backend(),
                                    expected_kernel_key.backend())) {
2547 2548 2549
          continue;
        }
      }
Y
yuyang18 已提交
2550

2551
      std::unique_ptr<phi::KernelKey> new_expected_kernel_key = nullptr;
2552 2553 2554 2555 2556 2557 2558 2559 2560 2561
      if (run_phi_kernel_) {
        if (phi_kernel_->GetKernelRegisteredType() ==
            phi::KernelRegisteredType::STRUCTURE) {
          if (!backends_are_same_class(kernel_type_for_var.backend(),
                                       expected_kernel_key.backend())) {
            new_expected_kernel_key =
                std::make_unique<phi::KernelKey>(expected_kernel_key.backend(),
                                                 expected_kernel_key.layout(),
                                                 expected_kernel_key.dtype());
          }
2562
        } else if (in_def != nullptr &&  // KernelRegisteredType is Function
2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577
                   in_def->backend != phi::Backend::ALL_BACKEND) {
          auto tensor_backend = phi::TransToPhiBackend(tensor_in->place());
          if ((in_def->backend != tensor_backend &&
               !(in_def->backend == phi::Backend::GPUDNN &&
                 tensor_backend == phi::Backend::GPU) &&
               !(in_def->backend == phi::Backend::KPS &&
                 tensor_backend == phi::Backend::XPU) &&
               !(in_def->backend == phi::Backend::ONEDNN &&
                 tensor_backend == phi::Backend::CPU)) ||
              tensor_in->place().GetType() == AllocationType::GPUPINNED) {
            new_expected_kernel_key =
                std::make_unique<phi::KernelKey>(in_def->backend,
                                                 expected_kernel_key.layout(),
                                                 expected_kernel_key.dtype());
          }
2578 2579 2580 2581 2582 2583 2584
        }
      }

      if (!need_trans_dtype && !need_trans_layout) {
        if (run_phi_kernel_ && new_expected_kernel_key == nullptr) {
          continue;
        }
Y
yuyang18 已提交
2585 2586
      }

M
minqiyang 已提交
2587
      VLOG(3) << "Transform Variable " << var_name << " from "
2588 2589 2590
              << kernel_type_for_var << " to "
              << (new_expected_kernel_key ? *new_expected_kernel_key
                                          : expected_kernel_key);
Y
yuyang18 已提交
2591

H
HongyuJia 已提交
2592 2593 2594
      // In the inference scenario, the scopes will be reused across the
      // batches, so the `new_scope` here will result in GPU memory explosion
      // over the running of operators.
2595
      // We use a thread_local cache to fix that issue, the key in the cache is
2596 2597 2598 2599 2600
      // the combination of the `scope` argument, from_kernel_type,
      // target_kernel_type.
      // Have a discussion with @Superjomn or the inference developers if some
      // changes on this logic for this macro might not tested on the other
      // scenerios.
2601 2602
      // If this op is not called by an Executor or ParallelExecutor, it should
      // called by a NaiveExecutor, the NaiveExecutor will cache the scopes and
2603
      // variables, that behavior a lot different.
2604 2605 2606 2607 2608 2609
      //
      // To solve issue #15032, have a discussion with @Luotao for cpu
      // inference, for all cpu kernels cases without GPU participation, here
      // not do transfer scope caching, and cpu inference performance is not
      // impacted by test.
      enable_cache_transfer_scope_ = false;
2610 2611
      if (!run_by_executor_) {
        if (new_expected_kernel_key) {
2612 2613 2614 2615
          if (kernel_type_for_var.backend() == phi::Backend::GPU ||
              kernel_type_for_var.backend() == phi::Backend::GPUDNN ||
              new_expected_kernel_key->backend() == phi::Backend::GPU ||
              new_expected_kernel_key->backend() == phi::Backend::GPUDNN) {
C
csy0225 已提交
2616
            new_scope = TryCreateTransferScope(
2617 2618 2619
                kernel_type_for_var, *new_expected_kernel_key, &scope);
            enable_cache_transfer_scope_ = true;
          }
2620 2621 2622 2623
        } else if (kernel_type_for_var.backend() == phi::Backend::GPU ||
                   kernel_type_for_var.backend() == phi::Backend::GPUDNN ||
                   expected_kernel_key.backend() == phi::Backend::GPU ||
                   expected_kernel_key.backend() == phi::Backend::GPUDNN) {
C
csy0225 已提交
2624
          new_scope = TryCreateTransferScope(
2625 2626 2627
              kernel_type_for_var, expected_kernel_key, &scope);
          enable_cache_transfer_scope_ = true;
        }
2628
      }
2629

2630
      if (!new_scope) {
Y
yuyang18 已提交
2631 2632
        new_scope = &scope.NewScope();
      }
C
csy0225 已提交
2633 2634 2635 2636 2637 2638 2639 2640 2641 2642
      // For inference, if a gpu model has an op which could only run on CPU,
      // each result of different input will be the same with the first one.
      // The reason is that if a gpu tensor is the input of a cpu kernel,
      // we will create a new cpu tensor in new scope.
      // However, if enable_cache_runtime_context_, we get the cpu tensor each
      // time, not the gpu tensor. Thus, we set pre_scope_ = nullptr
      // to trigger `new RuntimeContext()` in RunImpl().
      if (enable_cache_runtime_context_) {
        pre_scope_ = nullptr;
      }
L
Leo Chen 已提交
2643 2644

      // Create new var with the same name in transfer scopes
Y
yuyang18 已提交
2645
      auto* trans_var = new_scope->Var(var_name);
2646
      in_vars->at(i) = trans_var;
L
Leo Chen 已提交
2647 2648 2649 2650 2651 2652 2653

      // Find if inplace exists between input and output
      // If inplace exists, set the new created var to inplaced output, and
      // record its name in transfered_inplace_vars.
      for (auto& pair : Outputs()) {
        for (size_t j = 0; j < pair.second.size(); ++j) {
          if (pair.second[j] == var_name) {
2654
            VLOG(4) << "Found inplace between input(" << in_name
L
Leo Chen 已提交
2655 2656 2657 2658 2659 2660 2661 2662 2663
                    << ") and output(" << pair.first
                    << "), the variable name is " << var_name;
            ctx->outputs[pair.first][j] = trans_var;
            transfered_inplace_vars->emplace_back(var_name);
          }
        }
      }

      // Do transfer
2664
      phi::DenseTensor out;
2665 2666 2667 2668 2669 2670 2671 2672 2673
      TransformData(
          new_expected_kernel_key ? *new_expected_kernel_key
                                  : expected_kernel_key,
          kernel_type_for_var,
          *tensor_in,
          &out,
          new_expected_kernel_key
              ? phi::TransToPhiPlace(new_expected_kernel_key->backend())
              : place);
Y
yuyang18 已提交
2674 2675
      SetTensorToVariable(*var, out, trans_var);
    }
2676 2677
  };

2678 2679
  if (run_phi_kernel_ && phi_kernel_->GetKernelRegisteredType() ==
                             phi::KernelRegisteredType::FUNCTION) {
2680
    const auto& input_names = kernel_signature_->input_names;
2681
    const auto& input_defs = phi_kernel_->args_def().input_defs();
2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697
    PADDLE_ENFORCE_EQ(input_names.size(),
                      input_defs.size(),
                      platform::errors::InvalidArgument(
                          "The size of inputs_args names (%d) must be equal to "
                          "the size of kernel input_defs (%d).",
                          input_names.size(),
                          input_defs.size()));
    for (size_t i = 0; i < input_defs.size(); ++i) {
      std::string input_name = input_names[i];
      auto iter = ctx->inputs.find(input_name);
      if (iter == ctx->inputs.end()) {
        continue;
      }
      auto& ins_vector = iter->second;
      bool should_skip_input =
          no_buffer_ins && no_buffer_ins->count(input_name) > 0;
2698 2699 2700 2701 2702 2703 2704 2705 2706

      phi::TensorArgDef in_def = input_defs.at(i);
#ifdef PADDLE_WITH_CUSTOM_DEVICE
      // When the backend of input tensor arg_def is CUSTOM, we need to set it
      // to the actual backend by expected_kernel_key.
      if (in_def.backend == phi::Backend::CUSTOM) {
        in_def.SetBackend(expected_kernel_key.backend());
      }
#endif
2707 2708
      prepare_input_data(input_name, &ins_vector, &in_def, should_skip_input);
    }
2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724
#ifdef PADDLE_WITH_MKLDNN
    // For input that is Extra, only MKLDNN will use Extra Inputs
    auto& extra_input_names =
        paddle::operators::ExtraInfoUtils::Instance().GetExtraInputNamesMap(
            Type());
    for (const auto& input_name : extra_input_names) {
      auto iter = ctx->inputs.find(input_name);
      if (iter == ctx->inputs.end()) {
        continue;
      }
      bool should_skip_input =
          no_buffer_ins && no_buffer_ins->count(input_name) > 0;
      std::vector<Variable*>& input_vars = iter->second;
      prepare_input_data(input_name, &input_vars, nullptr, should_skip_input);
    }
#endif
2725 2726 2727 2728 2729 2730 2731 2732 2733
  } else {
    for (auto& var_name_item : Inputs()) {
      bool should_skip_input =
          no_buffer_ins && no_buffer_ins->count(var_name_item.first) > 0;

      std::vector<Variable*>& input_vars = ctx->inputs[var_name_item.first];
      prepare_input_data(
          var_name_item.first, &input_vars, nullptr, should_skip_input);
    }
Y
yuyang18 已提交
2734
  }
L
Leo Chen 已提交
2735

C
csy0225 已提交
2736 2737 2738 2739
  // If pre_scope = &scope, it means that scope is cached and the op is not in
  // while block. If new_scope = nullptr, it means that for each input of this
  // Op, there is no need to do PrepareData. So PrepareData could be skipped at
  // the rest iterations to save the elapsed time.
2740 2741
  // We do not support skipping PrepareData in while block, because the Op's
  // input may be changed by subsequent Ops, which may cause an error.
C
csy0225 已提交
2742

W
wenbin 已提交
2743 2744 2745 2746
  // For inference, ops that behind conditional branch aren't supported well,
  // so disable prepare optimization conservatively.
  bool force_prepare_data = HasAttr("inference_force_prepare_data") &&
                            Attr<bool>("inference_force_prepare_data");
C
csy0225 已提交
2747
  if (pre_scope_ == &scope && new_scope == nullptr && !force_prepare_data) {
2748 2749
    need_prepare_data_ = false;
  }
Y
yuyang18 已提交
2750 2751 2752

  return new_scope;
}
Q
Qiao Longfei 已提交
2753

2754
void OperatorWithKernel::ParseInputDataType(
2755 2756
    const Variable* var,
    const std::string& name,
2757 2758
    proto::VarType::Type* data_type) const {
  if (var != nullptr) {
2759 2760 2761
    const phi::DenseTensor* t = nullptr;
    if (var->IsType<phi::DenseTensor>()) {
      t = &var->Get<phi::DenseTensor>();
2762 2763
    } else if (var->IsType<phi::DenseTensor>()) {
      t = &var->Get<phi::DenseTensor>();
2764 2765
    } else if (var->IsType<phi::SelectedRows>()) {
      t = &(var->Get<phi::SelectedRows>().value());
2766 2767 2768 2769
    } else if (var->IsType<phi::SparseCooTensor>()) {
      const phi::SparseCooTensor* sp_t = &(var->Get<phi::SparseCooTensor>());
      *data_type = paddle::framework::TransToProtoVarType(sp_t->dtype());
      return;
2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784
    } else if (var->IsType<LoDTensorArray>()) {
      auto t_arr = &var->Get<LoDTensorArray>();
      for (size_t j = 0; j < t_arr->size(); j++) {
        if (t_arr->at(j).IsInitialized()) {
          t = &(t_arr->at(j));
        }
      }
    }
    if (t != nullptr) {
      *data_type = paddle::framework::TransToProtoVarType(t->dtype());
    }
  }
}

void OperatorWithKernel::ParseMultiInputDataType(
2785 2786
    const std::vector<Variable*>& vars,
    const std::string& name,
2787
    proto::VarType::Type* data_type) const {
2788
  proto::VarType::Type default_data_type =
2789 2790 2791 2792
      static_cast<proto::VarType::Type>(-1);
  for (size_t i = 0; i < vars.size(); ++i) {
    const Variable* var = vars[i];
    if (var != nullptr) {
2793 2794 2795
      const phi::DenseTensor* t = nullptr;
      if (var->IsType<phi::DenseTensor>()) {
        t = &var->Get<phi::DenseTensor>();
2796 2797
      } else if (var->IsType<phi::SelectedRows>()) {
        t = &(var->Get<phi::SelectedRows>().value());
2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820
      } else if (var->IsType<phi::SparseCooTensor>()) {
        const phi::SparseCooTensor* sp_t = &(var->Get<phi::SparseCooTensor>());
        PADDLE_ENFORCE_EQ(
            sp_t->initialized(),
            true,
            platform::errors::InvalidArgument("The %s Op's Input Variable `%s` "
                                              "contains uninitialized Tensor.",
                                              Type(),
                                              name));
        proto::VarType::Type tmp =
            paddle::framework::TransToProtoVarType(sp_t->dtype());
        PADDLE_ENFORCE(tmp == *data_type || *data_type == default_data_type,
                       platform::errors::InvalidArgument(
                           "The DataType of %s Op's duplicable or different "
                           "slot Variable %s must be "
                           "consistent or reigster GetExpectedKernelType. The "
                           "current variable type is (%s), but the "
                           "previous variable type is (%s).",
                           Type(),
                           name,
                           DataTypeToString(tmp),
                           DataTypeToString(*data_type)));
        *data_type = tmp;
2821
      } else if (var->IsType<LoDTensorArray>()) {
2822 2823 2824 2825
        auto t_arr = &var->Get<LoDTensorArray>();
        for (size_t j = 0; j < t_arr->size(); j++) {
          if (t_arr->at(j).IsInitialized()) {
            t = &(t_arr->at(j));
2826 2827
          }
        }
2828 2829
      }
      if (t != nullptr) {
2830 2831 2832 2833 2834 2835 2836
        PADDLE_ENFORCE_EQ(t->IsInitialized(),
                          true,
                          platform::errors::InvalidArgument(
                              "The %s Op's Input Variable `%s` "
                              "contains uninitialized phi::DenseTensor.",
                              Type(),
                              name));
2837 2838
        proto::VarType::Type tmp =
            paddle::framework::TransToProtoVarType(t->dtype());
2839 2840 2841 2842 2843 2844 2845
        PADDLE_ENFORCE(tmp == *data_type || *data_type == default_data_type,
                       platform::errors::InvalidArgument(
                           "The DataType of %s Op's duplicable or different "
                           "slot Variable %s must be "
                           "consistent or reigster GetExpectedKernelType. The "
                           "current variable type is (%s), but the "
                           "previous variable type is (%s).",
2846 2847 2848
                           Type(),
                           name,
                           DataTypeToString(tmp),
2849
                           DataTypeToString(*data_type)));
2850 2851 2852 2853 2854 2855
        *data_type = tmp;
      }
    }
  }
}

2856
proto::VarType::Type OperatorWithKernel::IndicateDataType(
Y
Yu Yang 已提交
2857
    const ExecutionContext& ctx) const {
2858 2859 2860
  proto::VarType::Type dafault_data_type =
      static_cast<proto::VarType::Type>(-1);
  proto::VarType::Type data_type = dafault_data_type;
2861

2862
  for (auto* name : ctx.InNameList()) {
2863 2864 2865 2866 2867
    if (ctx.InputSize(*name) == 1UL) {
      ParseInputDataType(ctx.InputVar(*name), *name, &data_type);
    } else {
      ParseMultiInputDataType(ctx.MultiInputVar(*name), *name, &data_type);
    }
Y
Yu Yang 已提交
2868
  }
2869
  PADDLE_ENFORCE_NE(
2870 2871
      data_type,
      dafault_data_type,
2872 2873
      platform::errors::NotFound(
          "DataType should be indicated by input Variable at %s.", Type()));
2874 2875 2876 2877 2878 2879 2880 2881
  return data_type;
}

proto::VarType::Type OperatorWithKernel::IndicateVarDataType(
    const ExecutionContext& ctx, const std::string& name) const {
  proto::VarType::Type dafault_data_type =
      static_cast<proto::VarType::Type>(-1);
  proto::VarType::Type data_type = dafault_data_type;
2882 2883 2884 2885 2886
  if (ctx.InputSize(name) == 1UL) {
    ParseInputDataType(ctx.InputVar(name), name, &data_type);
  } else {
    ParseMultiInputDataType(ctx.MultiInputVar(name), name, &data_type);
  }
2887
  PADDLE_ENFORCE_NE(
2888 2889
      data_type,
      dafault_data_type,
2890 2891
      platform::errors::InvalidArgument(
          "The Input Variable(%s) of (%s) Operator used to determine kernel "
2892
          "data type is empty or not phi::DenseTensor or SelectedRows or "
2893
          "LoDTensorArray.",
2894 2895
          name,
          Type()));
2896
  return data_type;
Y
Yu Yang 已提交
2897
}
2898

2899
phi::DenseTensor* OperatorWithKernel::GetTensorFormInputSafely(
2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911
    const ExecutionContext& ctx, const std::string& name) const {
  // 1. get variable and check
  // NOTE: only supports signal input var now
  // NOTE: using const_cast is because we don't have method
  // can get single mutable var, and here will not change
  // the var's data, only use some attribute
  Variable* var = const_cast<Variable*>(ctx.InputVar(name));
  PADDLE_ENFORCE_NOT_NULL(
      var,
      platform::errors::NotFound(
          "The variable %s is not found when promote complex types.", name));
  // 2. get tensor and check
2912 2913 2914
  phi::DenseTensor* t = nullptr;
  if (var->IsType<phi::DenseTensor>()) {
    t = var->GetMutable<phi::DenseTensor>();
2915 2916
  } else if (var->IsType<phi::SelectedRows>()) {
    t = var->GetMutable<phi::SelectedRows>()->mutable_value();
2917 2918 2919 2920
  } else {
    PADDLE_THROW(platform::errors::Unimplemented(
        "Unsupported input variable type in complex type promotion."));
  }
2921 2922 2923 2924 2925 2926 2927
  PADDLE_ENFORCE_NOT_NULL(t,
                          platform::errors::InvalidArgument(
                              "The phi::DenseTensor of variable %s is nullptr "
                              "when promote complex types."));
  PADDLE_ENFORCE_EQ(
      t->IsInitialized(),
      true,
2928
      platform::errors::InvalidArgument(
2929 2930 2931 2932 2933
          "The phi::DenseTensor in the %s Op's Input Variable %s(%s) is "
          "not initialized.",
          Type(),
          name,
          ctx.InputName(name)));
2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944
  return t;
}

/** NOTE(chenweihang): For safety reasons, we now only
 * perform type promotes for binary operations with
 * complex type inputs, which is used to support the
 * paddle quantum function.
 * In other cases, the first input data type is used as
 * the kernel data type.
 */
proto::VarType::Type OperatorWithKernel::IndicateOrPromoteVarDataTypes(
2945 2946
    const ExecutionContext& ctx,
    const std::string& name1,
2947 2948 2949 2950 2951 2952
    const std::string& name2) const {
  // 1. Get tensor
  auto* tensor_a = GetTensorFormInputSafely(ctx, name1);
  auto* tensor_b = GetTensorFormInputSafely(ctx, name2);

  // 2. Get two input types
2953 2954
  auto type_a = framework::TransToProtoVarType(tensor_a->dtype());
  auto type_b = framework::TransToProtoVarType(tensor_b->dtype());
2955 2956 2957 2958 2959 2960 2961

  // 3. Get first input type or promote complex types
  auto target_type = PromoteTypesIfComplexExists(type_a, type_b);

  return target_type;
}

2962
phi::KernelKey OperatorWithKernel::GetExpectedKernelType(
2963
    const ExecutionContext& ctx) const {
2964
  return phi::KernelKey(IndicateDataType(ctx), ctx.GetPlace());
2965 2966
}

2967
phi::KernelKey OperatorWithKernel::GetKernelTypeForVar(
2968
    const std::string& var_name,
2969
    const phi::DenseTensor& tensor,
2970
    const phi::KernelKey& expected_kernel_type) const {
2971 2972 2973 2974
#ifdef PADDLE_WITH_MKLDNN
  // When the op is first oneDNN op (there was some non oneDNN op
  // previously)
  // then we also need to rotate shape NHWC -> NCWH
2975
  if ((expected_kernel_type.layout() == phi::DataLayout::ONEDNN) &&
2976
      (tensor.layout() != phi::DataLayout::ONEDNN) &&
2977 2978
      phi::OneDNNContext::tls().get_cur_paddle_data_layout() ==
          phi::DataLayout::kNHWC) {
2979 2980
    return phi::KernelKey(
        tensor.place(), phi::DataLayout::kNHWC, expected_kernel_type.dtype());
2981 2982
  }
#endif
2983 2984
  return phi::KernelKey(
      tensor.place(), tensor.layout(), expected_kernel_type.dtype());
2985 2986
}

2987
phi::KernelSignature OperatorWithKernel::GetExpectedPhiKernelArgs(
2988
    const ExecutionContext& ctx) const {
2989
  ExecutionArgumentMappingContext arg_mapping_ctx(ctx);
2990
  if (arg_map_fn_ == nullptr) {
2991 2992 2993 2994
    auto* arg_map_fn = phi::OpUtilsMap::Instance().GetArgumentMappingFn(type_);
    if (arg_map_fn) {
      arg_map_fn_.reset(new phi::ArgumentMappingFn(*arg_map_fn));
    } else {
2995 2996 2997
      auto func =
          [this](
              const phi::ArgumentMappingContext& ctx) -> phi::KernelSignature {
2998 2999 3000 3001
        return phi::DefaultKernelSignatureMap::Instance().Get(type_);
      };
      arg_map_fn_.reset(new phi::ArgumentMappingFn(func));
    }
3002 3003
  }
  return (*arg_map_fn_)(arg_mapping_ctx);
3004 3005
}

3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064
static void SetDnnAttrIntoDeviceContext(
    phi::DeviceContext* dev_ctx,
    const Attribute& attr,
    const std::string& attr_name,
    const operators::ExtraAttrPropertySet& attr_propertys) {
#ifdef PADDLE_WITH_MKLDNN
  if (phi::OneDNNContext::classof(dev_ctx) &&
      attr_propertys.Support(operators::ExtraAttrProperty::ONEDNN)) {
    VLOG(4) << "Runtime attr `" << attr_name << "` is passed to OneDNNContext.";
    phi::OneDNNContext* one_dnn_ctx = static_cast<phi::OneDNNContext*>(dev_ctx);
    switch (AttrTypeID(attr)) {
      case proto::AttrType::FLOAT:
        one_dnn_ctx->SetDnnAttr(attr_name, PADDLE_GET_CONST(float, attr));
        break;
      case proto::AttrType::INT:
        one_dnn_ctx->SetDnnAttr(attr_name, PADDLE_GET_CONST(int, attr));
        break;
      case proto::AttrType::STRING:
        one_dnn_ctx->SetDnnAttr(attr_name, PADDLE_GET_CONST(std::string, attr));
        break;
      case proto::AttrType::INTS:
        one_dnn_ctx->SetDnnAttr(attr_name,
                                PADDLE_GET_CONST(std::vector<int>, attr));
        break;
      case proto::AttrType::FLOATS:
        one_dnn_ctx->SetDnnAttr(attr_name,
                                PADDLE_GET_CONST(std::vector<float>, attr));
        break;
      case proto::AttrType::BOOLEAN:
        one_dnn_ctx->SetDnnAttr(attr_name, PADDLE_GET_CONST(bool, attr));
        break;
      default:
        PADDLE_THROW(platform::errors::Unimplemented(
            "Unsupported Attribute value type `%s` for phi.",
            platform::demangle(attr.type().name())));
    }
  }
#endif
#ifdef PADDLE_WITH_CUDA
  if (phi::GPUContext::classof(dev_ctx) &&
      attr_propertys.Support(operators::ExtraAttrProperty::GPUDNN)) {
    VLOG(4) << "Runtime attr `" << attr_name << "` is passed to GPUDNNContext.";
    phi::GPUContext* gpu_dnn_ctx = static_cast<phi::GPUContext*>(dev_ctx);
    switch (AttrTypeID(attr)) {
      case proto::AttrType::INT:
        gpu_dnn_ctx->SetDnnAttr(attr_name, PADDLE_GET_CONST(int, attr));
        break;
      case proto::AttrType::BOOLEAN:
        gpu_dnn_ctx->SetDnnAttr(attr_name, PADDLE_GET_CONST(bool, attr));
        break;
      default:
        PADDLE_THROW(platform::errors::Unimplemented(
            "Unsupported Attribute value type `%s` for phi.",
            platform::demangle(attr.type().name())));
    }
  }
#endif
}

3065
void OperatorWithKernel::BuildPhiKernelContext(
3066 3067
    const RuntimeContext& ctx,
    platform::DeviceContext* dev_ctx,
3068 3069
    phi::KernelContext* phi_kernel_context) const {
  phi_kernel_context->SetDeviceContext(dev_ctx);
3070

3071 3072 3073
  auto& input_names = kernel_signature_->input_names;
  auto& attr_names = kernel_signature_->attr_names;
  auto& output_names = kernel_signature_->output_names;
3074

3075 3076 3077
  auto input_defs = phi_kernel_->args_def().input_defs();
  auto attr_defs = phi_kernel_->args_def().attribute_defs();
  auto output_defs = phi_kernel_->args_def().output_defs();
3078

3079 3080 3081 3082 3083 3084 3085 3086 3087
#if defined(PADDLE_WITH_MKLDNN)
  if (phi::OneDNNContext::classof(dev_ctx)) {
    // Onednn holds this op's variable's name and init them here.
    phi::OneDNNContext* one_dnn_ctx = static_cast<phi::OneDNNContext*>(dev_ctx);
    one_dnn_ctx->SetInputsName(Inputs());
    one_dnn_ctx->SetOutputsName(Outputs());
  }
#endif

3088 3089
  PADDLE_ENFORCE_EQ(input_names.size(),
                    input_defs.size(),
3090 3091 3092
                    platform::errors::InvalidArgument(
                        "The size of inputs_args names (%d) must be equal to "
                        "the size of kernel input_defs (%d).",
3093 3094
                        input_names.size(),
                        input_defs.size()));
3095

3096 3097
  PADDLE_ENFORCE_EQ(output_names.size(),
                    output_defs.size(),
3098 3099 3100
                    platform::errors::InvalidArgument(
                        "The size of outputs_args names (%d) must be equal to "
                        "the size of kernel output_defs (%d).",
3101 3102
                        output_names.size(),
                        output_defs.size()));
3103

3104 3105
  PADDLE_ENFORCE_EQ(attr_names.size(),
                    attr_defs.size(),
3106 3107 3108
                    platform::errors::InvalidArgument(
                        "The size of attribute_args names (%d) must be equal "
                        "to the size of kernel attribute_defs (%d).",
3109 3110
                        attr_names.size(),
                        attr_defs.size()));
3111
  for (size_t i = 0; i < input_names.size(); ++i) {
H
hong 已提交
3112
    auto it = ctx.inputs.find(input_names[i]);
3113 3114 3115

    // calcute the start and end index of the input tensors
    size_t start_idx =
3116
        (i == 0 ? 0 : phi_kernel_context->InputRangeAt(i - 1).second);
H
hong 已提交
3117
    // deal with optional here
3118
    if ((it == ctx.inputs.end() || it->second.size() == 0) &&
H
hong 已提交
3119
        (input_defs[i].type_index ==
3120
             std::type_index(typeid(paddle::optional<phi::DenseTensor>)) ||
H
hong 已提交
3121
         input_defs[i].type_index ==
3122
             std::type_index(typeid(paddle::optional<phi::SelectedRows>)) ||
3123
         input_defs[i].type_index ==
3124 3125
             std::type_index(typeid(
                 paddle::optional<std::vector<const phi::DenseTensor*>>)))) {
3126
      phi_kernel_context->EmplaceBackInputWithoutSetRange(nullptr);
H
hong 已提交
3127
      auto end_idx = start_idx + 1;
3128 3129
      phi_kernel_context->AssignInputRange(std::make_pair(start_idx, end_idx),
                                           i);
3130

H
hong 已提交
3131 3132 3133 3134
      continue;
    }
    auto ins_vector = it->second;
    size_t end_idx = start_idx + ins_vector.size();
3135
    for (size_t offset = 0; offset < ins_vector.size(); ++offset) {
3136
      const phi::TensorBase* tensor_in = nullptr;
3137
      auto* var = ins_vector[offset];
3138 3139
      if (var->IsType<phi::DenseTensor>()) {
        tensor_in = &(var->Get<phi::DenseTensor>());
3140
        phi_kernel_context->EmplaceBackInputWithoutSetRange(tensor_in);
3141 3142
      } else if (var->IsType<phi::SelectedRows>()) {
        tensor_in = &(var->Get<phi::SelectedRows>());
3143
        phi_kernel_context->EmplaceBackInputWithoutSetRange(tensor_in);
3144 3145 3146
      } else if (var->IsType<phi::SparseCooTensor>()) {
        tensor_in = &(var->Get<phi::SparseCooTensor>());
        phi_kernel_context->EmplaceBackInputWithoutSetRange(tensor_in);
3147
      } else if (var->IsType<framework::LoDTensorArray>()) {
3148
        need_prepare_phi_data_ = true;
3149 3150
        tensor_in = &(var->Get<framework::LoDTensorArray>());
        phi_kernel_context->EmplaceBackInputWithoutSetRange(tensor_in);
3151 3152 3153
      } else if (var->IsType<framework::Vocab>()) {
        tensor_in = &(var->Get<framework::Vocab>());
        phi_kernel_context->EmplaceBackInputWithoutSetRange(tensor_in);
3154 3155 3156
      } else if (var->IsType<framework::FeedList>()) {
        tensor_in = &(var->Get<framework::FeedList>());
        phi_kernel_context->EmplaceBackInputWithoutSetRange(tensor_in);
3157 3158 3159 3160
      } else {
        PADDLE_THROW(platform::errors::Unimplemented(
            "Unsupported input `%s` type when call pt kernel.",
            framework::ToTypeName(var->Type())));
3161
      }
3162
    }
3163
    // Note: here cannot deal with vector<LoDTensorArray> input
3164
    phi_kernel_context->AssignInputRange(std::make_pair(start_idx, end_idx), i);
3165
  }
3166
  VLOG(4) << "Done inputs";
3167
  for (size_t i = 0; i < output_names.size(); ++i) {
H
hong 已提交
3168
    auto it = ctx.outputs.find(output_names[i]);
3169
    size_t start_idx =
3170
        (i == 0 ? 0 : phi_kernel_context->OutputRangeAt(i - 1).second);
H
hong 已提交
3171 3172

    if (it == ctx.outputs.end() || it->second.empty()) {
3173
      VLOG(4) << "Output " << output_names[i] << " not found";
H
hong 已提交
3174 3175 3176 3177
      // Deal with the case that some outputs are not found or be NULL when run
      // the kernel.
      // For example : the outputs of matmul_grad are dx and dy,
      // sometimes dx or dy may be NULL.
3178
      phi_kernel_context->EmplaceBackOutputWithoutSetRange(nullptr);
H
hong 已提交
3179
      auto end_idx = start_idx + 1;
3180 3181
      phi_kernel_context->AssignOutputRange(std::make_pair(start_idx, end_idx),
                                            i);
H
hong 已提交
3182 3183 3184 3185
      continue;
    }
    auto& outs_vector = it->second;

3186
    size_t end_idx = start_idx + outs_vector.size();
3187 3188

    for (size_t offset = 0; offset < outs_vector.size(); ++offset) {
3189
      phi::TensorBase* tensor_out = nullptr;
3190
      auto* var = outs_vector[offset];
3191
      if (var) {
3192 3193
        if (var->template IsType<phi::DenseTensor>()) {
          tensor_out = var->template GetMutable<phi::DenseTensor>();
3194
          phi_kernel_context->EmplaceBackOutputWithoutSetRange(tensor_out);
3195 3196
        } else if (var->template IsType<phi::SelectedRows>()) {
          tensor_out = var->template GetMutable<phi::SelectedRows>();
3197
          phi_kernel_context->EmplaceBackOutputWithoutSetRange(tensor_out);
3198 3199 3200
        } else if (var->template IsType<phi::SparseCooTensor>()) {
          tensor_out = var->template GetMutable<phi::SparseCooTensor>();
          phi_kernel_context->EmplaceBackOutputWithoutSetRange(tensor_out);
3201
        } else if (var->template IsType<framework::LoDTensorArray>()) {
3202
          tensor_out = var->template GetMutable<framework::LoDTensorArray>();
3203 3204
          // Note: If the input LoDTensorArray size is 0, the output
          // LoDTensorArray is also 0
3205
          phi_kernel_context->EmplaceBackOutputWithoutSetRange(tensor_out);
3206 3207 3208
        } else if (var->template IsType<framework::Strings>()) {
          tensor_out = var->template GetMutable<framework::Strings>();
          phi_kernel_context->EmplaceBackOutputWithoutSetRange(tensor_out);
3209 3210 3211 3212 3213 3214 3215
        } else if (var->template IsType<paddle::framework::RawTensor>()) {
          tensor_out = var->template GetMutable<paddle::framework::RawTensor>();
          phi_kernel_context->EmplaceBackOutputWithoutSetRange(tensor_out);
        } else if (!var->IsInitialized()) {
          // The following is for RAW type of var
          tensor_out = var->template GetMutable<paddle::framework::RawTensor>();
          phi_kernel_context->EmplaceBackOutputWithoutSetRange(tensor_out);
3216 3217 3218 3219 3220
        } else {
          PADDLE_THROW(platform::errors::Unimplemented(
              "Unsupported output `%s` type when call pt kernel.",
              framework::ToTypeName(var->Type())));
        }
3221
      } else {
3222
        VLOG(4) << "Output " << output_names[i] << " is nullptr";
3223
        phi_kernel_context->EmplaceBackOutputWithoutSetRange(tensor_out);
3224
      }
3225
    }
3226 3227
    phi_kernel_context->AssignOutputRange(std::make_pair(start_idx, end_idx),
                                          i);
3228
  }
3229
  VLOG(4) << "Done outputs";
3230
  for (size_t i = 0; i < attr_names.size(); ++i) {
3231 3232
    VLOG(6) << "BuildPhiKernelContext: " << attr_names[i] << ": "
            << attr_defs[i].type_index;
3233 3234
    // attribute with Variable type has been placed into Inputs(), and
    // we can parse them from RuntimeContext.inputs.
3235 3236 3237 3238 3239 3240 3241
    auto attr_iter = Attrs().find(attr_names[i]);
    switch (attr_defs[i].type_index) {
      case phi::AttributeType::SCALAR:
        if (attr_iter != Attrs().end()) {
          // scalar is in the attribute
          switch (AttrTypeID(attr_iter->second)) {
            case proto::AttrType::FLOAT:
3242
              phi_kernel_context->EmplaceBackAttr(std::move(
R
Ruibiao Chen 已提交
3243
                  phi::Scalar(PADDLE_GET_CONST(float, attr_iter->second))));
3244
              break;
3245 3246 3247 3248
            case proto::AttrType::FLOAT64:
              phi_kernel_context->EmplaceBackAttr(std::move(
                  phi::Scalar(PADDLE_GET_CONST(double, attr_iter->second))));
              break;
3249
            case proto::AttrType::INT:
3250
              phi_kernel_context->EmplaceBackAttr(std::move(
R
Ruibiao Chen 已提交
3251
                  phi::Scalar(PADDLE_GET_CONST(int, attr_iter->second))));
3252
              break;
3253 3254 3255 3256
            case proto::AttrType::LONG:
              phi_kernel_context->EmplaceBackAttr(std::move(
                  phi::Scalar(PADDLE_GET_CONST(int64_t, attr_iter->second))));
              break;
3257
            case proto::AttrType::STRING:
3258
              phi_kernel_context->EmplaceBackAttr(std::move(phi::Scalar(
R
Ruibiao Chen 已提交
3259
                  PADDLE_GET_CONST(std::string, attr_iter->second))));
3260
              break;
3261 3262 3263 3264
            case proto::AttrType::BOOLEAN:
              phi_kernel_context->EmplaceBackAttr(std::move(
                  phi::Scalar(PADDLE_GET_CONST(bool, attr_iter->second))));
              break;
3265 3266 3267 3268 3269
            case proto::AttrType::SCALAR:
              phi_kernel_context->EmplaceBackAttr(
                  std::move(phi::Scalar(PADDLE_GET_CONST(
                      paddle::experimental::Scalar, attr_iter->second))));
              break;
3270 3271 3272 3273 3274 3275 3276
            default:
              PADDLE_THROW(platform::errors::Unimplemented(
                  "Unsupported cast op attribute `%s` to Scalar when construct "
                  "KernelContext in dygraph.",
                  attr_names[i]));
          }
        } else {  // scalar is in the input
3277
          need_prepare_phi_data_ = true;
3278
          auto& ins_vector = ctx.inputs.at(attr_names[i]);
3279 3280
          phi_kernel_context->EmplaceBackAttr(
              std::move(framework::MakePhiScalarFromVar(*ins_vector.front())));
3281
        }
3282 3283 3284 3285 3286
        break;
      case phi::AttributeType::INT_ARRAY:
        if (attr_iter != Attrs().end()) {
          switch (AttrTypeID(attr_iter->second)) {
            case proto::AttrType::INTS:
3287
              phi_kernel_context->EmplaceBackAttr(std::move(phi::IntArray(
R
Ruibiao Chen 已提交
3288
                  PADDLE_GET_CONST(std::vector<int32_t>, attr_iter->second))));
3289 3290
              break;
            case proto::AttrType::LONGS:
3291
              phi_kernel_context->EmplaceBackAttr(std::move(phi::IntArray(
R
Ruibiao Chen 已提交
3292
                  PADDLE_GET_CONST(std::vector<int64_t>, attr_iter->second))));
3293 3294
              break;
            case proto::AttrType::INT:
3295
              phi_kernel_context->EmplaceBackAttr(std::move(phi::IntArray(
R
Ruibiao Chen 已提交
3296
                  &PADDLE_GET_CONST(int32_t, attr_iter->second), 1)));
3297 3298
              break;
            case proto::AttrType::LONG:
3299
              phi_kernel_context->EmplaceBackAttr(std::move(phi::IntArray(
R
Ruibiao Chen 已提交
3300
                  &PADDLE_GET_CONST(int64_t, attr_iter->second), 1)));
3301 3302 3303 3304 3305 3306 3307 3308
              break;
            default:
              PADDLE_THROW(platform::errors::Unimplemented(
                  "Unsupported cast op attribute `%s` to IntArray when "
                  "construct KernelContext.",
                  attr_names[i]));
          }
        } else {  // shape is in the input
3309
          need_prepare_phi_data_ = true;
3310 3311
          auto& ins_vector = ctx.inputs.at(attr_names[i]);
          if (ins_vector.size() == 1) {  // ShapeTensor
3312
            phi_kernel_context->EmplaceBackAttr(std::move(
3313
                framework::MakePhiIntArrayFromVar(*ins_vector.front())));
3314
          } else {  // ShapeTensorList
3315 3316
            phi_kernel_context->EmplaceBackAttr(
                std::move(framework::MakePhiIntArrayFromVarList(ins_vector)));
3317
          }
3318
        }
3319
        break;
3320

3321 3322
      case phi::AttributeType::SCALARS: {
        PADDLE_ENFORCE_NE(
3323 3324
            attr_iter,
            Attrs().end(),
3325 3326 3327 3328 3329 3330
            platform::errors::NotFound("(%s) is not found in AttributeMap when "
                                       "buildind static KernelContext.",
                                       attr_names[i]));
        switch (AttrTypeID(attr_iter->second)) {
          case proto::AttrType::INTS: {
            const auto& vec =
R
Ruibiao Chen 已提交
3331
                PADDLE_GET_CONST(std::vector<int32_t>, attr_iter->second);
3332 3333 3334 3335 3336
            std::vector<phi::Scalar> scalar_list;
            scalar_list.reserve(vec.size());
            for (const auto& val : vec) {
              scalar_list.emplace_back(val);
            }
3337
            phi_kernel_context->EmplaceBackAttr(std::move(scalar_list));
3338 3339 3340
          } break;
          case proto::AttrType::LONGS: {
            const auto& vec =
R
Ruibiao Chen 已提交
3341
                PADDLE_GET_CONST(std::vector<int64_t>, attr_iter->second);
3342 3343 3344 3345 3346
            std::vector<phi::Scalar> scalar_list;
            scalar_list.reserve(vec.size());
            for (const auto& val : vec) {
              scalar_list.emplace_back(val);
            }
3347
            phi_kernel_context->EmplaceBackAttr(std::move(scalar_list));
3348 3349 3350
          } break;
          case proto::AttrType::FLOATS: {
            const auto& vec =
R
Ruibiao Chen 已提交
3351
                PADDLE_GET_CONST(std::vector<float>, attr_iter->second);
3352 3353 3354 3355 3356
            std::vector<phi::Scalar> scalar_list;
            scalar_list.reserve(vec.size());
            for (const auto& val : vec) {
              scalar_list.emplace_back(val);
            }
3357
            phi_kernel_context->EmplaceBackAttr(std::move(scalar_list));
3358 3359 3360
          } break;
          case proto::AttrType::FLOAT64S: {
            const auto& vec =
R
Ruibiao Chen 已提交
3361
                PADDLE_GET_CONST(std::vector<double>, attr_iter->second);
3362 3363 3364 3365 3366
            std::vector<phi::Scalar> scalar_list;
            scalar_list.reserve(vec.size());
            for (const auto& val : vec) {
              scalar_list.emplace_back(val);
            }
3367
            phi_kernel_context->EmplaceBackAttr(std::move(scalar_list));
3368 3369 3370
          } break;
          case proto::AttrType::BOOLEANS: {
            const auto& vec =
R
Ruibiao Chen 已提交
3371
                PADDLE_GET_CONST(std::vector<bool>, attr_iter->second);
3372 3373 3374 3375 3376
            std::vector<phi::Scalar> scalar_list;
            scalar_list.reserve(vec.size());
            for (const auto& val : vec) {
              scalar_list.emplace_back(val);
            }
3377
            phi_kernel_context->EmplaceBackAttr(std::move(scalar_list));
3378
          } break;
3379 3380 3381 3382 3383 3384
          case proto::AttrType::SCALARS: {
            const auto& vec = PADDLE_GET_CONST(
                std::vector<paddle::experimental::Scalar>, attr_iter->second);
            std::vector<phi::Scalar> scalar_list{vec.begin(), vec.end()};
            phi_kernel_context->EmplaceBackAttr(std::move(scalar_list));
          } break;
3385 3386 3387 3388
          default:
            PADDLE_THROW(platform::errors::Unimplemented(
                "Unsupported cast op attribute `%s` to vector<Scalar> when "
                "construct KernelContext.",
H
hong 已提交
3389 3390
                attr_names[i]));
        }
3391 3392
      } break;
      default: {
3393
        if (attr_iter == Attrs().end()) {
3394
          // TODO(chenweihang): remove this backup searching later
3395 3396 3397 3398 3399 3400 3401 3402 3403
          attr_iter = RuntimeAttrs().find(attr_names[i]);
          PADDLE_ENFORCE_NE(attr_iter,
                            RuntimeAttrs().end(),
                            platform::errors::NotFound(
                                "(%s) is not found in AttributeMap when "
                                "buildind static KernelContext.",
                                attr_names[i]));
        }

3404 3405
        switch (attr_defs[i].type_index) {
          case phi::AttributeType::FLOAT32:
3406
            phi_kernel_context->EmplaceBackAttr(
R
Ruibiao Chen 已提交
3407
                PADDLE_GET_CONST(float, attr_iter->second));
3408
            break;
3409 3410 3411 3412
          case phi::AttributeType::FLOAT64:
            phi_kernel_context->EmplaceBackAttr(
                PADDLE_GET_CONST(double, attr_iter->second));
            break;
3413
          case phi::AttributeType::INT32:
3414
            phi_kernel_context->EmplaceBackAttr(
R
Ruibiao Chen 已提交
3415
                PADDLE_GET_CONST(int, attr_iter->second));
3416 3417
            break;
          case phi::AttributeType::BOOL:
3418
            phi_kernel_context->EmplaceBackAttr(
R
Ruibiao Chen 已提交
3419
                PADDLE_GET_CONST(bool, attr_iter->second));
3420 3421
            break;
          case phi::AttributeType::INT64:
3422
            phi_kernel_context->EmplaceBackAttr(
R
Ruibiao Chen 已提交
3423
                PADDLE_GET_CONST(int64_t, attr_iter->second));
3424 3425
            break;
          case phi::AttributeType::INT32S:
3426
            phi_kernel_context->EmplaceBackAttr(
R
Ruibiao Chen 已提交
3427
                PADDLE_GET_CONST(std::vector<int>, attr_iter->second));
3428
            break;
3429 3430 3431 3432
          case phi::AttributeType::BOOLS:
            phi_kernel_context->EmplaceBackAttr(
                PADDLE_GET_CONST(std::vector<bool>, attr_iter->second));
            break;
3433 3434 3435
          case phi::AttributeType::DATA_TYPE: {
            auto data_type = framework::TransToPhiDataType(
                static_cast<framework::proto::VarType::Type>(
R
Ruibiao Chen 已提交
3436
                    PADDLE_GET_CONST(int, attr_iter->second)));
3437
            phi_kernel_context->EmplaceBackAttr(data_type);
3438 3439
          } break;
          case phi::AttributeType::STRING:
3440
            phi_kernel_context->EmplaceBackAttr(
R
Ruibiao Chen 已提交
3441
                std::move(PADDLE_GET_CONST(std::string, attr_iter->second)));
3442 3443 3444 3445
            break;
          case phi::AttributeType::INT64S:
            switch (AttrTypeID(attr_iter->second)) {
              case proto::AttrType::LONGS:
3446
                phi_kernel_context->EmplaceBackAttr(
R
Ruibiao Chen 已提交
3447
                    PADDLE_GET_CONST(std::vector<int64_t>, attr_iter->second));
3448 3449 3450
                break;
              case proto::AttrType::INTS: {
                const auto& vector_int_attr =
R
Ruibiao Chen 已提交
3451
                    PADDLE_GET_CONST(std::vector<int>, attr_iter->second);
3452 3453
                const std::vector<int64_t> vector_int64_attr(
                    vector_int_attr.begin(), vector_int_attr.end());
3454
                phi_kernel_context->EmplaceBackAttr(vector_int64_attr);
3455 3456 3457 3458 3459 3460 3461 3462 3463 3464
              } break;
              default:
                PADDLE_THROW(platform::errors::Unimplemented(
                    "Unsupported cast op attribute `%s` to vector<int64_t> "
                    "when "
                    "construct KernelContext.",
                    attr_names[i]));
            }
            break;
          case phi::AttributeType::FLOAT32S:
3465
            phi_kernel_context->EmplaceBackAttr(
R
Ruibiao Chen 已提交
3466
                PADDLE_GET_CONST(std::vector<float>, attr_iter->second));
3467 3468
            break;
          case phi::AttributeType::STRINGS:
3469
            phi_kernel_context->EmplaceBackAttr(
R
Ruibiao Chen 已提交
3470
                PADDLE_GET_CONST(std::vector<std::string>, attr_iter->second));
3471 3472 3473 3474 3475 3476
            break;
          default:
            PADDLE_THROW(platform::errors::Unimplemented(
                "Unsupported cast op attribute `%s` when construct "
                "KernelContext in dygraph.",
                attr_names[i]));
3477
        }
3478 3479 3480
      }
    }
  }
3481
  VLOG(4) << "Done attributes";
3482

3483 3484 3485 3486 3487 3488
// Clear All old attrs before add new attrs,
// because sometimes old attrs may be misused.
#if defined(PADDLE_WITH_MKLDNN)
  if (phi::OneDNNContext::classof(dev_ctx)) {
    phi::OneDNNContext* one_dnn_ctx = static_cast<phi::OneDNNContext*>(dev_ctx);
    one_dnn_ctx->ClearDnnAttr();
3489
    if (!RuntimeAttrs().empty()) need_prepare_phi_data_ = true;
3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506
  }
#endif

  // Note(YuanRisheng): Now, we can't open code below.
  // Because some unittest run OLD dygraph and ExtraAttr is not supported in OLD
  // dygraph. So, here we use trick that dev_ctx is a global object. We can
  // store ExtraAttr in static graph and when unittest run OLD dygraph, it can
  // obtain these ExtraAttr. We can open this code when OLD dygraph is no longer
  // used.
  /*
  #if defined(PADDLE_WITH_CUDA)
    if(phi::GPUContext::classof(dev_ctx)) {
      phi::GPUContext* gpu_dnn_ctx = static_cast<phi::GPUContext*>(dev_ctx);
      gpu_dnn_ctx->ClearDnnAttr();
    }
  #endif
  */
3507 3508 3509 3510 3511 3512
  // For compatible with Op with extra attrs for specific backend
#if defined(PADDLE_WITH_MKLDNN) || defined(PADDLE_WITH_CUDA)
  auto& runtime_attrs = RuntimeAttrs();
  for (const auto& attr_iter : runtime_attrs) {
    auto& attr_name = attr_iter.first;
    auto& attr = attr_iter.second;
H
HongyuJia 已提交
3513
    auto attr_propertys = paddle::operators::GetExtraAttrProperties(attr_name);
3514 3515 3516 3517 3518 3519
    SetDnnAttrIntoDeviceContext(dev_ctx, attr, attr_name, attr_propertys);
  }
  // TODO(chenweihang): Since the pass will still `SetAttr` in the OpDesc,
  // we try to add these Attrs to the RuntimeAttrs, but these OpDesc will lose
  // the RuntimeAttrs information in the process of converting the Graph to
  // the Program, so additional record configuration will be introduced,
S
Shuangchi He 已提交
3520
  // which increases the cost of development and understanding, so we
3521 3522 3523 3524 3525 3526 3527
  // still use Attrs to get and the attributes set by these passes from Attrs
  // for the time being. In the future, it is necessary to clarify the
  // positioning of RuntimeAttrs and expand related functions.
  auto& attrs = Attrs();
  for (const auto& attr_iter : attrs) {
    auto& attr_name = attr_iter.first;
    auto& attr = attr_iter.second;
H
HongyuJia 已提交
3528
    auto attr_propertys = paddle::operators::GetExtraAttrProperties(attr_name);
3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562
    SetDnnAttrIntoDeviceContext(dev_ctx, attr, attr_name, attr_propertys);
  }
  VLOG(4) << "Done runtime attributes";
#endif

// For compatible with Op with extra input for onednn backend
#ifdef PADDLE_WITH_MKLDNN
  if (phi::OneDNNContext::classof(dev_ctx)) {
    phi::OneDNNContext* one_dnn_ctx = static_cast<phi::OneDNNContext*>(dev_ctx);
    auto& extra_input_names =
        paddle::operators::ExtraInfoUtils::Instance().GetExtraInputNamesMap(
            Type());
    for (const auto& input_name : extra_input_names) {
      auto it = ctx.inputs.find(input_name);
      if (it == ctx.inputs.end() || it->second.size() == 0) {
        one_dnn_ctx->SetDnnInput(input_name, nullptr);
      } else {
        auto ins_vector = it->second;
        PADDLE_ENFORCE_EQ(
            ins_vector.size(),
            1UL,
            phi::errors::InvalidArgument(
                "OneDNN's extra input only allows one input tensor."));
        auto* var = ins_vector[0];
        PADDLE_ENFORCE_EQ(var->IsType<phi::DenseTensor>(),
                          true,
                          phi::errors::InvalidArgument(
                              "OneDNN's extra input only can be DenseTensor."));
        one_dnn_ctx->SetDnnInput(input_name, &(var->Get<phi::DenseTensor>()));
      }
    }
  }
  VLOG(4) << "Done runtime extra inputs";
#endif
3563 3564
}

Q
Qiao Longfei 已提交
3565
}  // namespace framework
L
liaogang 已提交
3566
}  // namespace paddle