prepared_operator.cc 25.6 KB
Newer Older
J
Jiabin Yang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/fluid/imperative/prepared_operator.h"
16

17
#include "paddle/fluid/framework/data_type_transform.h"
18
#include "paddle/fluid/framework/details/nan_inf_utils.h"
19
#include "paddle/fluid/imperative/infer_shape_context.h"
20
#include "paddle/fluid/imperative/tracer.h"
21
#include "paddle/pten/common/scalar.h"
22
#include "paddle/pten/common/scalar_array.h"
23
#include "paddle/utils/small_vector.h"
Q
QingshuChen 已提交
24
#ifdef PADDLE_WITH_XPU
25
#include "paddle/fluid/platform/device/xpu/xpu_op_list.h"
Q
QingshuChen 已提交
26
#endif
27 28
#include "paddle/fluid/platform/device/gpu/gpu_info.h"

29
DECLARE_bool(check_nan_inf);
30
DECLARE_bool(run_pten_kernel);
31
DECLARE_bool(benchmark);
F
Feng Xing 已提交
32
DECLARE_bool(run_kp_kernel);
33

J
Jiabin Yang 已提交
34 35 36
namespace paddle {
namespace imperative {

37 38 39 40 41 42 43 44 45 46
const std::shared_ptr<VariableWrapper>& GetVariableWrapper(
    const std::shared_ptr<paddle::imperative::VarBase>& var) {
  return var->SharedVar();
}

const std::shared_ptr<VariableWrapper>& GetVariableWrapper(
    const std::shared_ptr<VariableWrapper>& var) {
  return var;
}

J
Jiabin Yang 已提交
47 48 49 50 51 52 53 54 55 56
const framework::Tensor* GetTensorFromVar(const framework::Variable& var) {
  if (var.IsType<framework::LoDTensor>()) {
    return &(var.Get<framework::LoDTensor>());
  } else if (var.IsType<framework::SelectedRows>()) {
    return &(var.Get<framework::SelectedRows>().value());
  } else {
    return nullptr;
  }
}

57 58 59 60 61 62 63 64 65 66 67 68 69 70 71
static const framework::Attribute& GetAttr(
    const framework::AttributeMap& attrs,
    const framework::AttributeMap& default_attrs, const std::string& name) {
  auto it = attrs.find(name);
  bool found = it != attrs.end();
  if (!found) {
    it = default_attrs.find(name);
    found = it != default_attrs.end();
  }
  PADDLE_ENFORCE_EQ(
      found, true,
      platform::errors::NotFound("(%s) is not found in AttributeMap.", name));
  return it->second;
}

72
template <typename VarType>
73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89
static void HandleComplexGradToRealGrad(const NameVarMap<VarType>& outs) {
  for (auto& pair : outs) {
    for (auto& var : pair.second) {
      if (var == nullptr) {
        continue;
      }
      if (var->ForwardDataType() ==
          static_cast<framework::proto::VarType::Type>(-1)) {
        VLOG(6) << "Var (" << var->Name()
                << ")'s forward data type is not set.";
        continue;
      }
      if (!framework::IsComplexType(var->DataType()) ||
          framework::IsComplexType(var->ForwardDataType())) {
        continue;
      }
      const auto* tensor = GetTensorFromVar(var->Var());
J
Jiabin Yang 已提交
90
      if (tensor && tensor->IsInitialized()) {
91 92 93 94 95 96 97 98
        VLOG(6) << "Transform " << framework::DataTypeToString(var->DataType())
                << " var `" << var->Name() << "` to "
                << framework::DataTypeToString(var->ForwardDataType())
                << " real var in dynamic graph.";
        framework::Tensor out;
        framework::TransComplexToReal(var->ForwardDataType(), var->DataType(),
                                      *tensor, &out);
        SetTensorToVariable(var->Var(), out, var->MutableVar());
J
Jiabin Yang 已提交
99 100 101 102 103 104 105
      }
    }
  }
}

PreparedOp::PreparedOp(const framework::OperatorBase& op,
                       const framework::RuntimeContext& ctx,
106
                       const framework::OpKernelType& kernel_type,
107
                       const framework::OperatorWithKernel::OpKernelFunc& func,
108
                       platform::DeviceContext* dev_ctx)
109 110 111 112 113 114
    : op_(op),
      ctx_(ctx),
      kernel_type_(kernel_type),
      func_(func),
      dev_ctx_(dev_ctx) {}

115 116 117 118 119
PreparedOp::PreparedOp(const framework::OperatorBase& op,
                       const framework::RuntimeContext& ctx,
                       const framework::OpKernelType& kernel_type,
                       const framework::KernelSignature& kernel_signature,
                       const pten::Kernel& pt_kernel,
120
                       pten::KernelContext* pt_kernel_context,
121 122 123 124 125 126 127 128
                       platform::DeviceContext* dev_ctx)
    : op_(op),
      ctx_(ctx),
      kernel_type_(kernel_type),
      func_(nullptr),
      dev_ctx_(dev_ctx),
      run_pten_kernel_(true),
      pt_kernel_signature_(kernel_signature),
129 130
      pt_kernel_(pt_kernel),
      pt_kernel_context_(pt_kernel_context) {}
131

132 133 134 135 136
template <typename VarType>
PreparedOp PrepareImpl(const NameVarMap<VarType>& ins,
                       const NameVarMap<VarType>& outs,
                       const framework::OperatorWithKernel& op,
                       const platform::Place& place,
137
                       const framework::AttributeMap& attrs,
138 139
                       const framework::AttributeMap& default_attrs,
                       pten::KernelContext* pt_kernel_context) {
140
  platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
141
  auto* dev_ctx = pool.Get(place);
142

143 144 145 146 147 148 149 150
  framework::RuntimeContext ctx({}, {});

#ifdef PADDLE_WITH_MKLDNN
  // MKLDNN variant of code reads attributes in some of GetKernelTypeForVar and
  // GetKernelType functions, so we need to copy the attributes there.
  // Const qualifier of Attrs had to be discarded to overwrite it.
  if (FLAGS_use_mkldnn) {
    auto& mutable_op_attrs = const_cast<framework::AttributeMap&>(op.Attrs());
151 152 153 154
    mutable_op_attrs = default_attrs;
    for (auto& attr : attrs) {
      mutable_op_attrs[attr.first] = attr.second;
    }
155 156
  }
#endif
J
Jiabin Yang 已提交
157

158
  // 1. get expected kernel key
159 160 161
  auto dygraph_exe_ctx = DygraphExecutionContext<VarType>(
      op, framework::Scope(), *dev_ctx, ctx, ins, outs, attrs, default_attrs);
  auto expected_kernel_key = op.GetExpectedKernelType(dygraph_exe_ctx);
162 163
  VLOG(3) << "expected_kernel_key:" << expected_kernel_key;

164 165 166
  if (FLAGS_run_pten_kernel &&
      pten::KernelFactory::Instance().HasCompatiblePtenKernel(op.Type())) {
    auto pt_kernel_signature = op.GetExpectedPtenKernelArgs(dygraph_exe_ctx);
167
    VLOG(6) << pt_kernel_signature;
168

Y
YuanRisheng 已提交
169
    auto pt_kernel_name = pt_kernel_signature.name;
170 171 172 173 174
    auto pt_kernel_key = TransOpKernelTypeToPtenKernelKey(expected_kernel_key);
    auto pt_kernel = pten::KernelFactory::Instance().SelectKernel(
        pt_kernel_name, pt_kernel_key);

    if (pt_kernel.IsValid()) {
C
Chen Weihang 已提交
175
      VLOG(6) << "Dynamic mode PrepareImpl - kernel name: " << pt_kernel_name
176 177 178 179 180
              << " | kernel key: " << pt_kernel_key
              << " | kernel: " << pt_kernel;

      // TODO(chenweihang): using CPUKernel when miss device kernel case
      return PreparedOp(op, ctx, expected_kernel_key, pt_kernel_signature,
181
                        pt_kernel, pt_kernel_context, dev_ctx);
182
    } else {
C
Chen Weihang 已提交
183
      VLOG(6) << "Dynamic mode ChoosePtenKernel - kernel `" << pt_kernel_name
184 185 186 187
              << "` not found.";
    }
  }

188
  // 2. check if op[type] has kernel registered.
J
Jiabin Yang 已提交
189 190
  auto& all_op_kernels = op.AllOpKernels();
  auto kernels_iter = all_op_kernels.find(op.Type());
191 192 193 194 195
  PADDLE_ENFORCE_NE(
      kernels_iter, all_op_kernels.end(),
      platform::errors::NotFound(
          "There are no kernels which are registered in the %s operator.",
          op.Type()));
J
Jiabin Yang 已提交
196 197 198

  auto& kernels = kernels_iter->second;
  auto kernel_iter = kernels.find(expected_kernel_key);
199
#ifdef PADDLE_WITH_XPU
Q
QingshuChen 已提交
200 201 202 203
  if (is_xpu_place(expected_kernel_key.place_) &&
      (kernel_iter == kernels.end() ||
       !paddle::platform::is_xpu_support_op(op.Type(), expected_kernel_key) ||
       paddle::platform::is_in_xpu_black_list(op.Type()))) {
204 205 206
    VLOG(3) << "missing XPU kernel: " << op.Type()
            << ", expected_kernel_key:" << expected_kernel_key
            << ", fallbacking to CPU one!";
207 208 209
    expected_kernel_key.place_ = platform::CPUPlace();
    kernel_iter = kernels.find(expected_kernel_key);
  }
210 211 212 213
#endif
#ifdef PADDLE_WITH_ASCEND_CL
  if (kernel_iter == kernels.end() &&
      is_npu_place(expected_kernel_key.place_)) {
214 215 216
    VLOG(3) << "missing NPU kernel: " << op.Type()
            << ", expected_kernel_key:" << expected_kernel_key
            << ", fallbacking to CPU one!";
217 218 219
    expected_kernel_key.place_ = platform::CPUPlace();
    kernel_iter = kernels.find(expected_kernel_key);
  }
220 221 222 223 224 225 226 227 228 229
#endif
#ifdef PADDLE_WITH_MLU
  if (kernel_iter == kernels.end() &&
      is_mlu_place(expected_kernel_key.place_)) {
    VLOG(3) << "missing MLU kernel: " << op.Type()
            << ", expected_kernel_key:" << expected_kernel_key
            << ", fallbacking to CPU one!";
    expected_kernel_key.place_ = platform::CPUPlace();
    kernel_iter = kernels.find(expected_kernel_key);
  }
230
#endif
231 232
  // TODO(jiabin): Add operator.cc's line 1000 part back when we need that
  // case
233 234 235 236
  PADDLE_ENFORCE_NE(kernel_iter, kernels.end(),
                    platform::errors::NotFound(
                        "Operator %s does not have kernel for %s.", op.Type(),
                        KernelTypeToString(expected_kernel_key)));
237

238 239 240 241
  if (!(expected_kernel_key.place_ == place)) {
    dev_ctx = pool.Get(expected_kernel_key.place_);
  }

242
  return PreparedOp(op, ctx, expected_kernel_key, kernel_iter->second, dev_ctx);
243 244
}

245 246 247 248
PreparedOp PreparedOp::Prepare(const NameVarMap<VarBase>& ins,
                               const NameVarMap<VarBase>& outs,
                               const framework::OperatorWithKernel& op,
                               const platform::Place& place,
249
                               const framework::AttributeMap& attrs,
250 251 252 253
                               const framework::AttributeMap& default_attrs,
                               pten::KernelContext* pt_kernel_context) {
  return PrepareImpl<VarBase>(ins, outs, op, place, attrs, default_attrs,
                              pt_kernel_context);
254 255 256 257 258 259
}

PreparedOp PreparedOp::Prepare(const NameVarMap<VariableWrapper>& ins,
                               const NameVarMap<VariableWrapper>& outs,
                               const framework::OperatorWithKernel& op,
                               const platform::Place& place,
260
                               const framework::AttributeMap& attrs,
261 262
                               const framework::AttributeMap& default_attrs,
                               pten::KernelContext* pt_kernel_context) {
263
  return PrepareImpl<VariableWrapper>(ins, outs, op, place, attrs,
264
                                      default_attrs, pt_kernel_context);
265 266
}

267
template <typename VarType>
268
static void BuildDygraphPtenKernelContext(
269 270 271 272
    const framework::KernelSignature& pt_kernel_signature,
    const pten::Kernel& pt_kernel, const NameVarMap<VarType>& ins,
    const NameVarMap<VarType>& outs, const framework::AttributeMap& attrs,
    const framework::AttributeMap& default_attrs,
273
    platform::DeviceContext* dev_ctx, pten::KernelContext* kernel_ctx) {
274 275 276 277 278 279 280
  // TODO(chenweihang): now only work for very simple case,
  // many cases need to be deal with later:
  // 1. the input and output are not tensor
  // 2. the dispensbale, duplicable input and output
  // 3. needless attributes remove
  // 4. use pt Tensor directly
  // 5. kernel input is not DenseTensor
281
  kernel_ctx->SetDeviceContext(dev_ctx);
282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311

  auto& input_names = std::get<0>(pt_kernel_signature.args);
  auto& attr_names = std::get<1>(pt_kernel_signature.args);
  auto& output_names = std::get<2>(pt_kernel_signature.args);

  auto& input_defs = pt_kernel.args_def().input_defs();
  auto& output_defs = pt_kernel.args_def().output_defs();
  auto& attr_defs = pt_kernel.args_def().attribute_defs();

  PADDLE_ENFORCE_EQ(input_names.size(), input_defs.size(),
                    platform::errors::InvalidArgument(
                        "the size of inputs_args names (%d) must be equal to "
                        "the size of kernel input_defs (%d).",
                        input_names.size(), input_defs.size()));

  PADDLE_ENFORCE_EQ(output_names.size(), output_defs.size(),
                    platform::errors::InvalidArgument(
                        "the size of outputs_args names (%d) must be equal to "
                        "the size of kernel output_defs (%d).",
                        output_names.size(), output_defs.size()));

  PADDLE_ENFORCE_EQ(attr_names.size(), attr_defs.size(),
                    platform::errors::InvalidArgument(
                        "the size of attribute_args names (%d) must be equal "
                        "to the size of kernel attribute_defs (%d).",
                        attr_names.size(), attr_defs.size()));

  for (size_t i = 0; i < input_names.size(); ++i) {
    auto& in_def = input_defs.at(i);
    auto& ins_vector = ins.at(input_names[i]);
312 313 314

    size_t start_idx = (i == 0 ? 0 : kernel_ctx->InputRangeAt(i - 1).second);
    size_t end_idx = start_idx + ins_vector.size();
315 316 317 318 319 320 321 322 323 324 325 326
    auto current_vector_size = kernel_ctx->InputsSize();

    // If the memory needed is less than the current memory allocated, we will
    // reuse the current memory by using ReMakePtenDenseTensorFromVar.
    // Otherwise,we will create new storage.
    for (size_t offset = 0; offset < ins_vector.size(); ++offset) {
      const auto& variable = ins_vector[offset]->Var();
      if (current_vector_size > start_idx + offset) {
        auto& input_ptr = kernel_ctx->MutableInputPtrAt(start_idx + offset);
        if (input_ptr == nullptr) {
          input_ptr = experimental::MakePtenTensorBaseFromVar(variable, in_def);
        } else {
327
          experimental::ReMakePtenDenseTensorFromVar(
328 329
              variable, in_def, kernel_ctx->MutableInputAt<pten::DenseTensor>(
                                    start_idx + offset));
330
        }
331 332 333
      } else {
        kernel_ctx->EmplaceBackInputWithoutSetRange(
            experimental::MakePtenTensorBaseFromVar(variable, in_def));
334
      }
335
    }
336
    kernel_ctx->AssignInputRange(std::make_pair(start_idx, end_idx), i);
337 338 339 340
  }

  for (size_t i = 0; i < output_names.size(); ++i) {
    auto& out_def = output_defs.at(i);
341 342

    size_t start_idx = (i == 0 ? 0 : kernel_ctx->OutputRangeAt(i - 1).second);
343
    auto current_vector_size = kernel_ctx->OutputsSize();
344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359

    auto iter = outs.find(output_names[i]);
    if (iter == outs.end()) {
      if (current_vector_size > start_idx) {
        kernel_ctx->SetOutputWithoutSetRange(start_idx, {nullptr});
      } else {
        kernel_ctx->EmplaceBackOutputWithoutSetRange({nullptr});
      }
      kernel_ctx->AssignOutputRange(std::make_pair(start_idx, start_idx + 1),
                                    i);
      continue;
    }

    auto& outs_vector = iter->second;
    size_t end_idx = start_idx + outs_vector.size();

360 361 362 363 364
    // If the memory needed is less than the current memory allocated, we will
    // reuse the current memory by using ReMakePtenDenseTensorFromVar.
    // Otherwise,we will create new storage.
    for (size_t offset = 0; offset < outs_vector.size(); ++offset) {
      if (current_vector_size > start_idx + offset) {
365 366 367 368 369 370 371 372 373 374 375
        auto* buffer_tensor =
            kernel_ctx->MutableOutputAt<pten::DenseTensor>(start_idx + offset);
        if (buffer_tensor) {
          experimental::ReMakePtenDenseTensorFromVar(
              outs_vector[offset]->MutableVar(), out_def, buffer_tensor);
        } else {
          kernel_ctx->SetOutputWithoutSetRange(
              start_idx + offset,
              experimental::MakePtenTensorBaseFromVar(
                  outs_vector[offset]->MutableVar(), out_def));
        }
376 377 378 379
      } else {
        kernel_ctx->EmplaceBackOutputWithoutSetRange(
            experimental::MakePtenTensorBaseFromVar(
                outs_vector[offset]->MutableVar(), out_def));
380
      }
381
    }
382
    kernel_ctx->AssignOutputRange(std::make_pair(start_idx, end_idx), i);
383 384 385
  }

  for (size_t i = 0; i < attr_names.size(); ++i) {
386 387 388 389 390 391 392 393
    if (attr_defs[i].type_index == std::type_index(typeid(pten::ScalarArray))) {
      if (attrs.find(attr_names[i]) !=
          attrs.end()) {  // shape is in the attribute
        auto& attr = GetAttr(attrs, default_attrs, attr_names[i]);
        if (std::type_index(attr.type()) ==
            std::type_index(typeid(std::vector<int64_t>))) {
          kernel_ctx->EmplaceBackAttr(std::move(
              pten::ScalarArray(BOOST_GET_CONST(std::vector<int64_t>, attr))));
394 395 396 397
        } else if (std::type_index(attr.type()) ==
                   std::type_index(typeid(std::vector<int32_t>))) {
          kernel_ctx->EmplaceBackAttr(std::move(
              pten::ScalarArray(BOOST_GET_CONST(std::vector<int32_t>, attr))));
398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420
        } else {
          PADDLE_THROW(platform::errors::Unimplemented(
              "Unsupported cast op attribute `%s` to VectorTensor when "
              "construct KernelContext.",
              attr_names[i]));
        }
      } else {  // shape is in the input
        auto& ins_vector = ins.at(attr_names[i]);
        if (ins_vector.size() == 1) {  // ShapeTensor
          kernel_ctx->EmplaceBackAttr(std::move(
              experimental::MakePtenScalarArrayFromVar(ins_vector[0]->Var())));
        } else {  // ShapeTensorList
          std::vector<framework::Variable*> variables;
          variables.reserve(ins_vector.size());
          for (const auto& var_base : ins_vector) {
            variables.push_back(var_base->MutableVar());
          }
          kernel_ctx->EmplaceBackAttr(std::move(
              experimental::MakePtenScalarArrayFromVarList(variables)));
        }
      }
    } else if (attr_defs[i].type_index ==
               std::type_index(typeid(pten::Scalar))) {
421 422 423
      // TODO(chenweihang): support other attrs later
      // TODO(zhangyunfei): Scalar should hold scaler type, and we should check
      // attribtue type by attr_defs
424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444
      if (attrs.find(attr_names[i]) != attrs.end() ||
          default_attrs.find(attr_names[i]) !=
              default_attrs.end()) {  // scalar is in the attribute
        auto& attr = GetAttr(attrs, default_attrs, attr_names[i]);
        if (std::type_index(attr.type()) == std::type_index(typeid(float))) {
          kernel_ctx->EmplaceBackAttr(
              std::move(pten::Scalar(BOOST_GET_CONST(float, attr))));
        } else if (std::type_index(attr.type()) ==
                   std::type_index(typeid(std::string))) {
          kernel_ctx->EmplaceBackAttr(
              std::move(pten::Scalar(BOOST_GET_CONST(std::string, attr))));
        } else {
          PADDLE_THROW(platform::errors::Unimplemented(
              "Unsupported cast op attribute `%s` to Scalar when construct "
              "KernelContext in dygraph.",
              attr_names[i]));
        }
      } else {  // scalar is in the input
        auto& ins_vector = ins.at(attr_names[i]);
        kernel_ctx->EmplaceBackAttr(std::move(
            experimental::MakePtenScalarFromVar(ins_vector[0]->Var())));
445
      }
446

447 448
    } else {
      // TODO(chenweihang): support other attrs later
449
      auto& attr = GetAttr(attrs, default_attrs, attr_names[i]);
450
      if (attr_defs[i].type_index == std::type_index(typeid(int))) {
451
        kernel_ctx->EmplaceBackAttr(BOOST_GET_CONST(int, attr));
452
      } else if (attr_defs[i].type_index == std::type_index(typeid(float))) {
453
        kernel_ctx->EmplaceBackAttr(BOOST_GET_CONST(float, attr));
454
      } else if (attr_defs[i].type_index == std::type_index(typeid(bool))) {
455
        kernel_ctx->EmplaceBackAttr(BOOST_GET_CONST(bool, attr));
456
      } else if (attr_defs[i].type_index ==
457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472
                 std::type_index(typeid(pten::DataType))) {
        auto data_type = pten::TransToPtenDataType(
            static_cast<framework::proto::VarType::Type>(
                BOOST_GET_CONST(int, attr)));
        kernel_ctx->EmplaceBackAttr(data_type);
      } else if (attr_defs[i].type_index ==
                 std::type_index(typeid(std::vector<int64_t>))) {
        if (std::type_index(attr.type()) ==
            std::type_index(typeid(std::vector<int>))) {
          // Emplace Back Attr according to the type of Pten_Kernel args.
          const auto& vector_int_attr = BOOST_GET_CONST(std::vector<int>, attr);
          const std::vector<int64_t> vector_int64_attr(vector_int_attr.begin(),
                                                       vector_int_attr.end());
          kernel_ctx->EmplaceBackAttr(vector_int64_attr);
        }
        // TODO(YuanRisheng) Need support vector<int64_t> attr
473 474
      } else {
        PADDLE_THROW(platform::errors::Unimplemented(
475
            "Unsupported cast op attribute `%s` when construct "
476 477 478 479 480 481 482
            "KernelContext in dygraph.",
            attr_names[i]));
      }
    }
  }
}

483 484 485 486 487 488 489
template <typename VarType>
static void WriteBackToOutputs(
    const framework::KernelSignature& pt_kernel_signature,
    const NameVarMap<VarType>& outs, pten::KernelContext* kernel_ctx) {
  auto& output_names = std::get<2>(pt_kernel_signature.args);

  for (size_t i = 0; i < output_names.size(); ++i) {
490 491 492
    auto iter = outs.find(output_names[i]);
    if (iter != outs.end()) {
      auto& outs_vector = iter->second;
493

494 495 496
      auto& range_pair = kernel_ctx->OutputRangeAt(i);
      auto pten_outs = kernel_ctx->MutableOutputBetween<pten::DenseTensor>(
          range_pair.first, range_pair.second);
497

498 499 500 501
      for (size_t j = 0; j < pten_outs.size(); ++j) {
        experimental::MakeVariableFromPtenTensor(pten_outs[j],
                                                 outs_vector[j]->MutableVar());
      }
502 503 504 505
    }
  }
}

506 507 508
template <typename VarType>
static void PreparedOpRunImpl(
    const framework::OperatorBase& op, const framework::RuntimeContext& ctx,
509
    const framework::OpKernelType& kernel_type,
510
    const framework::OperatorWithKernel::OpKernelFunc& func,
511
    platform::DeviceContext* dev_ctx, const NameVarMap<VarType>& ins,
512 513
    const NameVarMap<VarType>& outs, const framework::AttributeMap& attrs,
    const framework::AttributeMap& default_attrs) {
J
Jiabin Yang 已提交
514 515
  // TODO(zjl): remove scope in dygraph
  framework::Scope scope;
H
hong 已提交
516

517 518
  DygraphInferShapeContext<VarType> infer_shape_ctx(
      &ins, &outs, &attrs, &default_attrs, op.Type(), &kernel_type);
519
  op.Info().infer_shape_(&infer_shape_ctx);
H
hong 已提交
520

521
  func(DygraphExecutionContext<VarType>(op, scope, *dev_ctx, ctx, ins, outs,
522
                                        attrs, default_attrs));
523

524 525 526 527 528
  if (FLAGS_check_nan_inf) {
    framework::details::CheckOpHasNanOrInfInDygraph<VarType>(
        op.Type(), outs, dev_ctx->GetPlace());
  }

L
Leo Chen 已提交
529 530 531 532 533 534 535 536
  if (FLAGS_benchmark) {
    dev_ctx->Wait();
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
    PADDLE_ENFORCE_GPU_SUCCESS(platform::GpuGetLastError());
    VLOG(4) << "Operator(" << op.Type() << "): context wait and get last error";
#endif
  }

537 538 539 540 541 542 543 544 545 546 547 548 549 550 551
  /**
   * [ Why need handle complex gradient to real gradient? ]
   *
   * After the introduction of complex number calculations, Ops that support
   * complex number calculations generally support type promotion, such as
   * x(float32) + y(complex64) = out(complex64), then the type of the grad
   * tensor should be dout(complex64), dx(float32), dy (complex64).
   *
   * But because the dout is complex64, the dx is also complex64 after
   * grad op kernel executed, we need to recognize this situation and
   * convert dx to float32 type. HandleComplexGradToRealGrad does this thing.
   */
  if (framework::IsComplexType(kernel_type.data_type_)) {
    HandleComplexGradToRealGrad<VarType>(outs);
  }
552
}
H
hong 已提交
553

554 555 556
template <typename VarType>
static void PreparedOpRunPtImpl(
    const framework::OperatorBase& op,
557
    const framework::OpKernelType& kernel_type,
558
    const framework::KernelSignature& pt_kernel_signature,
559 560 561
    const pten::Kernel& pt_kernel, pten::KernelContext* pt_kernel_context,
    platform::DeviceContext* dev_ctx, const NameVarMap<VarType>& ins,
    const NameVarMap<VarType>& outs, const framework::AttributeMap& attrs,
562
    const framework::AttributeMap& default_attrs) {
563 564
  DygraphInferShapeContext<VarType> infer_shape_ctx(
      &ins, &outs, &attrs, &default_attrs, op.Type(), &kernel_type);
565
  op.Info().infer_shape_(&infer_shape_ctx);
566

567 568 569 570 571
  BuildDygraphPtenKernelContext<VarType>(pt_kernel_signature, pt_kernel, ins,
                                         outs, attrs, default_attrs, dev_ctx,
                                         pt_kernel_context);

  pt_kernel(pt_kernel_context);
572

573 574
  if (FLAGS_benchmark) {
    dev_ctx->Wait();
575 576
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
    PADDLE_ENFORCE_GPU_SUCCESS(platform::GpuGetLastError());
577 578 579 580 581 582
    VLOG(4) << "Operator(" << op.Type() << "): context wait and get last error";
#endif
  }

  WriteBackToOutputs<VarType>(pt_kernel_signature, outs, pt_kernel_context);

583 584
  // Ensure that it does not affect the VarBase life cycle management
  pt_kernel_context->ClearData();
585 586

  // TODO(chenweihang): add debug flags later
587 588 589
  if (framework::IsComplexType(kernel_type.data_type_)) {
    HandleComplexGradToRealGrad<VarType>(outs);
  }
590 591
}

592 593
void PreparedOp::Run(const NameVarMap<VarBase>& ins,
                     const NameVarMap<VarBase>& outs,
594 595
                     const framework::AttributeMap& attrs,
                     const framework::AttributeMap& default_attrs) {
596
  if (run_pten_kernel_) {
597 598 599
    PreparedOpRunPtImpl<VarBase>(op_, kernel_type_, pt_kernel_signature_,
                                 pt_kernel_, pt_kernel_context_, dev_ctx_, ins,
                                 outs, attrs, default_attrs);
600 601 602 603
  } else {
    PreparedOpRunImpl<VarBase>(op_, ctx_, kernel_type_, func_, dev_ctx_, ins,
                               outs, attrs, default_attrs);
  }
604
}
H
hong 已提交
605

606 607
void PreparedOp::Run(const NameVarMap<VariableWrapper>& ins,
                     const NameVarMap<VariableWrapper>& outs,
608 609
                     const framework::AttributeMap& attrs,
                     const framework::AttributeMap& default_attrs) {
610
  if (run_pten_kernel_) {
611 612 613
    PreparedOpRunPtImpl<VariableWrapper>(
        op_, kernel_type_, pt_kernel_signature_, pt_kernel_, pt_kernel_context_,
        dev_ctx_, ins, outs, attrs, default_attrs);
614 615 616 617
  } else {
    PreparedOpRunImpl<VariableWrapper>(op_, ctx_, kernel_type_, func_, dev_ctx_,
                                       ins, outs, attrs, default_attrs);
  }
J
Jiabin Yang 已提交
618 619 620 621
}

}  // namespace imperative
}  // namespace paddle