prepared_operator.cc 24.6 KB
Newer Older
J
Jiabin Yang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/fluid/imperative/prepared_operator.h"
16

17
#include "paddle/fluid/framework/data_type_transform.h"
18
#include "paddle/fluid/framework/details/nan_inf_utils.h"
19
#include "paddle/fluid/imperative/infer_shape_context.h"
20
#include "paddle/fluid/imperative/tracer.h"
21
#include "paddle/pten/common/scalar.h"
22
#include "paddle/pten/common/scalar_array.h"
23
#include "paddle/utils/small_vector.h"
Q
QingshuChen 已提交
24
#ifdef PADDLE_WITH_XPU
25
#include "paddle/fluid/platform/device/xpu/xpu_op_list.h"
Q
QingshuChen 已提交
26
#endif
27 28
#include "paddle/fluid/platform/device/gpu/gpu_info.h"

29
DECLARE_bool(check_nan_inf);
30
DECLARE_bool(run_pten_kernel);
31
DECLARE_bool(benchmark);
F
Feng Xing 已提交
32
DECLARE_bool(run_kp_kernel);
33

J
Jiabin Yang 已提交
34 35 36
namespace paddle {
namespace imperative {

37 38 39 40 41 42 43 44 45 46
const std::shared_ptr<VariableWrapper>& GetVariableWrapper(
    const std::shared_ptr<paddle::imperative::VarBase>& var) {
  return var->SharedVar();
}

const std::shared_ptr<VariableWrapper>& GetVariableWrapper(
    const std::shared_ptr<VariableWrapper>& var) {
  return var;
}

J
Jiabin Yang 已提交
47 48 49 50 51 52 53 54 55 56
const framework::Tensor* GetTensorFromVar(const framework::Variable& var) {
  if (var.IsType<framework::LoDTensor>()) {
    return &(var.Get<framework::LoDTensor>());
  } else if (var.IsType<framework::SelectedRows>()) {
    return &(var.Get<framework::SelectedRows>().value());
  } else {
    return nullptr;
  }
}

57 58 59 60 61 62 63 64 65 66 67 68 69 70 71
static const framework::Attribute& GetAttr(
    const framework::AttributeMap& attrs,
    const framework::AttributeMap& default_attrs, const std::string& name) {
  auto it = attrs.find(name);
  bool found = it != attrs.end();
  if (!found) {
    it = default_attrs.find(name);
    found = it != default_attrs.end();
  }
  PADDLE_ENFORCE_EQ(
      found, true,
      platform::errors::NotFound("(%s) is not found in AttributeMap.", name));
  return it->second;
}

72
template <typename VarType>
73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89
static void HandleComplexGradToRealGrad(const NameVarMap<VarType>& outs) {
  for (auto& pair : outs) {
    for (auto& var : pair.second) {
      if (var == nullptr) {
        continue;
      }
      if (var->ForwardDataType() ==
          static_cast<framework::proto::VarType::Type>(-1)) {
        VLOG(6) << "Var (" << var->Name()
                << ")'s forward data type is not set.";
        continue;
      }
      if (!framework::IsComplexType(var->DataType()) ||
          framework::IsComplexType(var->ForwardDataType())) {
        continue;
      }
      const auto* tensor = GetTensorFromVar(var->Var());
J
Jiabin Yang 已提交
90
      if (tensor && tensor->IsInitialized()) {
91 92 93 94 95 96 97 98
        VLOG(6) << "Transform " << framework::DataTypeToString(var->DataType())
                << " var `" << var->Name() << "` to "
                << framework::DataTypeToString(var->ForwardDataType())
                << " real var in dynamic graph.";
        framework::Tensor out;
        framework::TransComplexToReal(var->ForwardDataType(), var->DataType(),
                                      *tensor, &out);
        SetTensorToVariable(var->Var(), out, var->MutableVar());
J
Jiabin Yang 已提交
99 100 101 102 103 104 105
      }
    }
  }
}

PreparedOp::PreparedOp(const framework::OperatorBase& op,
                       const framework::RuntimeContext& ctx,
106
                       const framework::OpKernelType& kernel_type,
107
                       const framework::OperatorWithKernel::OpKernelFunc& func,
108
                       platform::DeviceContext* dev_ctx)
109 110 111 112 113 114
    : op_(op),
      ctx_(ctx),
      kernel_type_(kernel_type),
      func_(func),
      dev_ctx_(dev_ctx) {}

115 116 117 118 119
PreparedOp::PreparedOp(const framework::OperatorBase& op,
                       const framework::RuntimeContext& ctx,
                       const framework::OpKernelType& kernel_type,
                       const framework::KernelSignature& kernel_signature,
                       const pten::Kernel& pt_kernel,
120
                       pten::KernelContext* pt_kernel_context,
121 122 123 124 125 126 127 128
                       platform::DeviceContext* dev_ctx)
    : op_(op),
      ctx_(ctx),
      kernel_type_(kernel_type),
      func_(nullptr),
      dev_ctx_(dev_ctx),
      run_pten_kernel_(true),
      pt_kernel_signature_(kernel_signature),
129 130
      pt_kernel_(pt_kernel),
      pt_kernel_context_(pt_kernel_context) {}
131

132 133 134 135 136
template <typename VarType>
PreparedOp PrepareImpl(const NameVarMap<VarType>& ins,
                       const NameVarMap<VarType>& outs,
                       const framework::OperatorWithKernel& op,
                       const platform::Place& place,
137
                       const framework::AttributeMap& attrs,
138 139
                       const framework::AttributeMap& default_attrs,
                       pten::KernelContext* pt_kernel_context) {
140
  platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
141
  auto* dev_ctx = pool.Get(place);
142

143 144 145 146 147 148 149 150
  framework::RuntimeContext ctx({}, {});

#ifdef PADDLE_WITH_MKLDNN
  // MKLDNN variant of code reads attributes in some of GetKernelTypeForVar and
  // GetKernelType functions, so we need to copy the attributes there.
  // Const qualifier of Attrs had to be discarded to overwrite it.
  if (FLAGS_use_mkldnn) {
    auto& mutable_op_attrs = const_cast<framework::AttributeMap&>(op.Attrs());
151 152 153 154
    mutable_op_attrs = default_attrs;
    for (auto& attr : attrs) {
      mutable_op_attrs[attr.first] = attr.second;
    }
155 156
  }
#endif
J
Jiabin Yang 已提交
157

158
  // 1. get expected kernel key
159 160 161
  auto dygraph_exe_ctx = DygraphExecutionContext<VarType>(
      op, framework::Scope(), *dev_ctx, ctx, ins, outs, attrs, default_attrs);
  auto expected_kernel_key = op.GetExpectedKernelType(dygraph_exe_ctx);
162 163
  VLOG(3) << "expected_kernel_key:" << expected_kernel_key;

164 165 166
  if (FLAGS_run_pten_kernel &&
      pten::KernelFactory::Instance().HasCompatiblePtenKernel(op.Type())) {
    auto pt_kernel_signature = op.GetExpectedPtenKernelArgs(dygraph_exe_ctx);
C
Chen Weihang 已提交
167
    VLOG(6) << framework::KernelSignatureToString(pt_kernel_signature);
168

Y
YuanRisheng 已提交
169
    auto pt_kernel_name = pt_kernel_signature.name;
170 171 172 173 174
    auto pt_kernel_key = TransOpKernelTypeToPtenKernelKey(expected_kernel_key);
    auto pt_kernel = pten::KernelFactory::Instance().SelectKernel(
        pt_kernel_name, pt_kernel_key);

    if (pt_kernel.IsValid()) {
C
Chen Weihang 已提交
175
      VLOG(6) << "Dynamic mode PrepareImpl - kernel name: " << pt_kernel_name
176 177 178 179 180
              << " | kernel key: " << pt_kernel_key
              << " | kernel: " << pt_kernel;

      // TODO(chenweihang): using CPUKernel when miss device kernel case
      return PreparedOp(op, ctx, expected_kernel_key, pt_kernel_signature,
181
                        pt_kernel, pt_kernel_context, dev_ctx);
182
    } else {
C
Chen Weihang 已提交
183
      VLOG(6) << "Dynamic mode ChoosePtenKernel - kernel `" << pt_kernel_name
184 185 186 187
              << "` not found.";
    }
  }

188
  // 2. check if op[type] has kernel registered.
J
Jiabin Yang 已提交
189 190
  auto& all_op_kernels = op.AllOpKernels();
  auto kernels_iter = all_op_kernels.find(op.Type());
191 192 193 194 195
  PADDLE_ENFORCE_NE(
      kernels_iter, all_op_kernels.end(),
      platform::errors::NotFound(
          "There are no kernels which are registered in the %s operator.",
          op.Type()));
J
Jiabin Yang 已提交
196 197 198

  auto& kernels = kernels_iter->second;
  auto kernel_iter = kernels.find(expected_kernel_key);
199
#ifdef PADDLE_WITH_XPU
Q
QingshuChen 已提交
200 201 202 203
  if (is_xpu_place(expected_kernel_key.place_) &&
      (kernel_iter == kernels.end() ||
       !paddle::platform::is_xpu_support_op(op.Type(), expected_kernel_key) ||
       paddle::platform::is_in_xpu_black_list(op.Type()))) {
204 205 206
    VLOG(3) << "missing XPU kernel: " << op.Type()
            << ", expected_kernel_key:" << expected_kernel_key
            << ", fallbacking to CPU one!";
207 208 209
    expected_kernel_key.place_ = platform::CPUPlace();
    kernel_iter = kernels.find(expected_kernel_key);
  }
210 211 212 213
#endif
#ifdef PADDLE_WITH_ASCEND_CL
  if (kernel_iter == kernels.end() &&
      is_npu_place(expected_kernel_key.place_)) {
214 215 216
    VLOG(3) << "missing NPU kernel: " << op.Type()
            << ", expected_kernel_key:" << expected_kernel_key
            << ", fallbacking to CPU one!";
217 218 219
    expected_kernel_key.place_ = platform::CPUPlace();
    kernel_iter = kernels.find(expected_kernel_key);
  }
220
#endif
221 222
  // TODO(jiabin): Add operator.cc's line 1000 part back when we need that
  // case
223 224 225 226
  PADDLE_ENFORCE_NE(kernel_iter, kernels.end(),
                    platform::errors::NotFound(
                        "Operator %s does not have kernel for %s.", op.Type(),
                        KernelTypeToString(expected_kernel_key)));
227

228 229 230 231
  if (!(expected_kernel_key.place_ == place)) {
    dev_ctx = pool.Get(expected_kernel_key.place_);
  }

232
  return PreparedOp(op, ctx, expected_kernel_key, kernel_iter->second, dev_ctx);
233 234
}

235 236 237 238
PreparedOp PreparedOp::Prepare(const NameVarMap<VarBase>& ins,
                               const NameVarMap<VarBase>& outs,
                               const framework::OperatorWithKernel& op,
                               const platform::Place& place,
239
                               const framework::AttributeMap& attrs,
240 241 242 243
                               const framework::AttributeMap& default_attrs,
                               pten::KernelContext* pt_kernel_context) {
  return PrepareImpl<VarBase>(ins, outs, op, place, attrs, default_attrs,
                              pt_kernel_context);
244 245 246 247 248 249
}

PreparedOp PreparedOp::Prepare(const NameVarMap<VariableWrapper>& ins,
                               const NameVarMap<VariableWrapper>& outs,
                               const framework::OperatorWithKernel& op,
                               const platform::Place& place,
250
                               const framework::AttributeMap& attrs,
251 252
                               const framework::AttributeMap& default_attrs,
                               pten::KernelContext* pt_kernel_context) {
253
  return PrepareImpl<VariableWrapper>(ins, outs, op, place, attrs,
254
                                      default_attrs, pt_kernel_context);
255 256
}

257
template <typename VarType>
258
static void BuildDygraphPtenKernelContext(
259 260 261 262
    const framework::KernelSignature& pt_kernel_signature,
    const pten::Kernel& pt_kernel, const NameVarMap<VarType>& ins,
    const NameVarMap<VarType>& outs, const framework::AttributeMap& attrs,
    const framework::AttributeMap& default_attrs,
263
    platform::DeviceContext* dev_ctx, pten::KernelContext* kernel_ctx) {
264 265 266 267 268 269 270
  // TODO(chenweihang): now only work for very simple case,
  // many cases need to be deal with later:
  // 1. the input and output are not tensor
  // 2. the dispensbale, duplicable input and output
  // 3. needless attributes remove
  // 4. use pt Tensor directly
  // 5. kernel input is not DenseTensor
271
  kernel_ctx->SetDeviceContext(dev_ctx);
272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301

  auto& input_names = std::get<0>(pt_kernel_signature.args);
  auto& attr_names = std::get<1>(pt_kernel_signature.args);
  auto& output_names = std::get<2>(pt_kernel_signature.args);

  auto& input_defs = pt_kernel.args_def().input_defs();
  auto& output_defs = pt_kernel.args_def().output_defs();
  auto& attr_defs = pt_kernel.args_def().attribute_defs();

  PADDLE_ENFORCE_EQ(input_names.size(), input_defs.size(),
                    platform::errors::InvalidArgument(
                        "the size of inputs_args names (%d) must be equal to "
                        "the size of kernel input_defs (%d).",
                        input_names.size(), input_defs.size()));

  PADDLE_ENFORCE_EQ(output_names.size(), output_defs.size(),
                    platform::errors::InvalidArgument(
                        "the size of outputs_args names (%d) must be equal to "
                        "the size of kernel output_defs (%d).",
                        output_names.size(), output_defs.size()));

  PADDLE_ENFORCE_EQ(attr_names.size(), attr_defs.size(),
                    platform::errors::InvalidArgument(
                        "the size of attribute_args names (%d) must be equal "
                        "to the size of kernel attribute_defs (%d).",
                        attr_names.size(), attr_defs.size()));

  for (size_t i = 0; i < input_names.size(); ++i) {
    auto& in_def = input_defs.at(i);
    auto& ins_vector = ins.at(input_names[i]);
302 303 304

    size_t start_idx = (i == 0 ? 0 : kernel_ctx->InputRangeAt(i - 1).second);
    size_t end_idx = start_idx + ins_vector.size();
305 306 307 308 309 310 311 312 313 314 315 316
    auto current_vector_size = kernel_ctx->InputsSize();

    // If the memory needed is less than the current memory allocated, we will
    // reuse the current memory by using ReMakePtenDenseTensorFromVar.
    // Otherwise,we will create new storage.
    for (size_t offset = 0; offset < ins_vector.size(); ++offset) {
      const auto& variable = ins_vector[offset]->Var();
      if (current_vector_size > start_idx + offset) {
        auto& input_ptr = kernel_ctx->MutableInputPtrAt(start_idx + offset);
        if (input_ptr == nullptr) {
          input_ptr = experimental::MakePtenTensorBaseFromVar(variable, in_def);
        } else {
317
          experimental::ReMakePtenDenseTensorFromVar(
318 319
              variable, in_def, kernel_ctx->MutableInputAt<pten::DenseTensor>(
                                    start_idx + offset));
320
        }
321 322 323
      } else {
        kernel_ctx->EmplaceBackInputWithoutSetRange(
            experimental::MakePtenTensorBaseFromVar(variable, in_def));
324
      }
325
    }
326
    kernel_ctx->AssignInputRange(std::make_pair(start_idx, end_idx), i);
327 328 329 330 331
  }

  for (size_t i = 0; i < output_names.size(); ++i) {
    auto& out_def = output_defs.at(i);
    auto& outs_vector = outs.at(output_names[i]);
332 333 334

    size_t start_idx = (i == 0 ? 0 : kernel_ctx->OutputRangeAt(i - 1).second);
    size_t end_idx = start_idx + outs_vector.size();
335 336 337 338 339 340 341 342 343 344 345 346 347
    auto current_vector_size = kernel_ctx->OutputsSize();
    // If the memory needed is less than the current memory allocated, we will
    // reuse the current memory by using ReMakePtenDenseTensorFromVar.
    // Otherwise,we will create new storage.
    for (size_t offset = 0; offset < outs_vector.size(); ++offset) {
      if (current_vector_size > start_idx + offset) {
        experimental::ReMakePtenDenseTensorFromVar(
            outs_vector[offset]->MutableVar(), out_def,
            kernel_ctx->MutableOutputAt<pten::DenseTensor>(start_idx + offset));
      } else {
        kernel_ctx->EmplaceBackOutputWithoutSetRange(
            experimental::MakePtenTensorBaseFromVar(
                outs_vector[offset]->MutableVar(), out_def));
348
      }
349
    }
350
    kernel_ctx->AssignOutputRange(std::make_pair(start_idx, end_idx), i);
351 352 353
  }

  for (size_t i = 0; i < attr_names.size(); ++i) {
354 355 356 357 358 359 360 361
    if (attr_defs[i].type_index == std::type_index(typeid(pten::ScalarArray))) {
      if (attrs.find(attr_names[i]) !=
          attrs.end()) {  // shape is in the attribute
        auto& attr = GetAttr(attrs, default_attrs, attr_names[i]);
        if (std::type_index(attr.type()) ==
            std::type_index(typeid(std::vector<int64_t>))) {
          kernel_ctx->EmplaceBackAttr(std::move(
              pten::ScalarArray(BOOST_GET_CONST(std::vector<int64_t>, attr))));
362 363 364 365
        } else if (std::type_index(attr.type()) ==
                   std::type_index(typeid(std::vector<int32_t>))) {
          kernel_ctx->EmplaceBackAttr(std::move(
              pten::ScalarArray(BOOST_GET_CONST(std::vector<int32_t>, attr))));
366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388
        } else {
          PADDLE_THROW(platform::errors::Unimplemented(
              "Unsupported cast op attribute `%s` to VectorTensor when "
              "construct KernelContext.",
              attr_names[i]));
        }
      } else {  // shape is in the input
        auto& ins_vector = ins.at(attr_names[i]);
        if (ins_vector.size() == 1) {  // ShapeTensor
          kernel_ctx->EmplaceBackAttr(std::move(
              experimental::MakePtenScalarArrayFromVar(ins_vector[0]->Var())));
        } else {  // ShapeTensorList
          std::vector<framework::Variable*> variables;
          variables.reserve(ins_vector.size());
          for (const auto& var_base : ins_vector) {
            variables.push_back(var_base->MutableVar());
          }
          kernel_ctx->EmplaceBackAttr(std::move(
              experimental::MakePtenScalarArrayFromVarList(variables)));
        }
      }
    } else if (attr_defs[i].type_index ==
               std::type_index(typeid(pten::Scalar))) {
389 390 391
      // TODO(chenweihang): support other attrs later
      // TODO(zhangyunfei): Scalar should hold scaler type, and we should check
      // attribtue type by attr_defs
392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412
      if (attrs.find(attr_names[i]) != attrs.end() ||
          default_attrs.find(attr_names[i]) !=
              default_attrs.end()) {  // scalar is in the attribute
        auto& attr = GetAttr(attrs, default_attrs, attr_names[i]);
        if (std::type_index(attr.type()) == std::type_index(typeid(float))) {
          kernel_ctx->EmplaceBackAttr(
              std::move(pten::Scalar(BOOST_GET_CONST(float, attr))));
        } else if (std::type_index(attr.type()) ==
                   std::type_index(typeid(std::string))) {
          kernel_ctx->EmplaceBackAttr(
              std::move(pten::Scalar(BOOST_GET_CONST(std::string, attr))));
        } else {
          PADDLE_THROW(platform::errors::Unimplemented(
              "Unsupported cast op attribute `%s` to Scalar when construct "
              "KernelContext in dygraph.",
              attr_names[i]));
        }
      } else {  // scalar is in the input
        auto& ins_vector = ins.at(attr_names[i]);
        kernel_ctx->EmplaceBackAttr(std::move(
            experimental::MakePtenScalarFromVar(ins_vector[0]->Var())));
413
      }
414

415 416
    } else {
      // TODO(chenweihang): support other attrs later
417
      auto& attr = GetAttr(attrs, default_attrs, attr_names[i]);
418
      if (attr_defs[i].type_index == std::type_index(typeid(int))) {
419
        kernel_ctx->EmplaceBackAttr(BOOST_GET_CONST(int, attr));
420
      } else if (attr_defs[i].type_index == std::type_index(typeid(float))) {
421
        kernel_ctx->EmplaceBackAttr(BOOST_GET_CONST(float, attr));
422
      } else if (attr_defs[i].type_index == std::type_index(typeid(bool))) {
423
        kernel_ctx->EmplaceBackAttr(BOOST_GET_CONST(bool, attr));
424
      } else if (attr_defs[i].type_index ==
425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440
                 std::type_index(typeid(pten::DataType))) {
        auto data_type = pten::TransToPtenDataType(
            static_cast<framework::proto::VarType::Type>(
                BOOST_GET_CONST(int, attr)));
        kernel_ctx->EmplaceBackAttr(data_type);
      } else if (attr_defs[i].type_index ==
                 std::type_index(typeid(std::vector<int64_t>))) {
        if (std::type_index(attr.type()) ==
            std::type_index(typeid(std::vector<int>))) {
          // Emplace Back Attr according to the type of Pten_Kernel args.
          const auto& vector_int_attr = BOOST_GET_CONST(std::vector<int>, attr);
          const std::vector<int64_t> vector_int64_attr(vector_int_attr.begin(),
                                                       vector_int_attr.end());
          kernel_ctx->EmplaceBackAttr(vector_int64_attr);
        }
        // TODO(YuanRisheng) Need support vector<int64_t> attr
441 442
      } else {
        PADDLE_THROW(platform::errors::Unimplemented(
443
            "Unsupported cast op attribute `%s` when construct "
444 445 446 447 448 449 450
            "KernelContext in dygraph.",
            attr_names[i]));
      }
    }
  }
}

451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470
template <typename VarType>
static void WriteBackToOutputs(
    const framework::KernelSignature& pt_kernel_signature,
    const NameVarMap<VarType>& outs, pten::KernelContext* kernel_ctx) {
  auto& output_names = std::get<2>(pt_kernel_signature.args);

  for (size_t i = 0; i < output_names.size(); ++i) {
    auto& outs_vector = outs.at(output_names[i]);

    auto& range_pair = kernel_ctx->OutputRangeAt(i);
    auto pten_outs = kernel_ctx->MutableOutputBetween<pten::DenseTensor>(
        range_pair.first, range_pair.second);

    for (size_t j = 0; j < pten_outs.size(); ++j) {
      experimental::MakeVariableFromPtenTensor(pten_outs[j],
                                               outs_vector[j]->MutableVar());
    }
  }
}

471 472 473
template <typename VarType>
static void PreparedOpRunImpl(
    const framework::OperatorBase& op, const framework::RuntimeContext& ctx,
474
    const framework::OpKernelType& kernel_type,
475
    const framework::OperatorWithKernel::OpKernelFunc& func,
476
    platform::DeviceContext* dev_ctx, const NameVarMap<VarType>& ins,
477 478
    const NameVarMap<VarType>& outs, const framework::AttributeMap& attrs,
    const framework::AttributeMap& default_attrs) {
J
Jiabin Yang 已提交
479 480
  // TODO(zjl): remove scope in dygraph
  framework::Scope scope;
H
hong 已提交
481

482
  DygraphInferShapeContext<VarType> infer_shape_ctx(&ins, &outs, &attrs,
483
                                                    &default_attrs, op.Type());
484 485
  static_cast<const framework::OperatorWithKernel&>(op).InferShape(
      &infer_shape_ctx);
H
hong 已提交
486

487
  func(DygraphExecutionContext<VarType>(op, scope, *dev_ctx, ctx, ins, outs,
488
                                        attrs, default_attrs));
489

490 491 492 493 494
  if (FLAGS_check_nan_inf) {
    framework::details::CheckOpHasNanOrInfInDygraph<VarType>(
        op.Type(), outs, dev_ctx->GetPlace());
  }

L
Leo Chen 已提交
495 496 497 498 499 500 501 502
  if (FLAGS_benchmark) {
    dev_ctx->Wait();
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
    PADDLE_ENFORCE_GPU_SUCCESS(platform::GpuGetLastError());
    VLOG(4) << "Operator(" << op.Type() << "): context wait and get last error";
#endif
  }

503 504 505 506 507 508 509 510 511 512 513 514 515 516 517
  /**
   * [ Why need handle complex gradient to real gradient? ]
   *
   * After the introduction of complex number calculations, Ops that support
   * complex number calculations generally support type promotion, such as
   * x(float32) + y(complex64) = out(complex64), then the type of the grad
   * tensor should be dout(complex64), dx(float32), dy (complex64).
   *
   * But because the dout is complex64, the dx is also complex64 after
   * grad op kernel executed, we need to recognize this situation and
   * convert dx to float32 type. HandleComplexGradToRealGrad does this thing.
   */
  if (framework::IsComplexType(kernel_type.data_type_)) {
    HandleComplexGradToRealGrad<VarType>(outs);
  }
518
}
H
hong 已提交
519

520 521 522 523
template <typename VarType>
static void PreparedOpRunPtImpl(
    const framework::OperatorBase& op,
    const framework::KernelSignature& pt_kernel_signature,
524 525 526
    const pten::Kernel& pt_kernel, pten::KernelContext* pt_kernel_context,
    platform::DeviceContext* dev_ctx, const NameVarMap<VarType>& ins,
    const NameVarMap<VarType>& outs, const framework::AttributeMap& attrs,
527 528 529 530 531 532
    const framework::AttributeMap& default_attrs) {
  DygraphInferShapeContext<VarType> infer_shape_ctx(&ins, &outs, &attrs,
                                                    &default_attrs, op.Type());
  static_cast<const framework::OperatorWithKernel&>(op).InferShape(
      &infer_shape_ctx);

533 534 535 536 537
  BuildDygraphPtenKernelContext<VarType>(pt_kernel_signature, pt_kernel, ins,
                                         outs, attrs, default_attrs, dev_ctx,
                                         pt_kernel_context);

  pt_kernel(pt_kernel_context);
538

539 540
  if (FLAGS_benchmark) {
    dev_ctx->Wait();
541 542
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
    PADDLE_ENFORCE_GPU_SUCCESS(platform::GpuGetLastError());
543 544 545 546 547 548
    VLOG(4) << "Operator(" << op.Type() << "): context wait and get last error";
#endif
  }

  WriteBackToOutputs<VarType>(pt_kernel_signature, outs, pt_kernel_context);

549 550
  // Ensure that it does not affect the VarBase life cycle management
  pt_kernel_context->ClearData();
551 552 553 554 555

  // TODO(chenweihang): add debug flags later
  // TODO(chenweihang): deal with complex cases later
}

556 557
void PreparedOp::Run(const NameVarMap<VarBase>& ins,
                     const NameVarMap<VarBase>& outs,
558 559
                     const framework::AttributeMap& attrs,
                     const framework::AttributeMap& default_attrs) {
560 561
  if (run_pten_kernel_) {
    PreparedOpRunPtImpl<VarBase>(op_, pt_kernel_signature_, pt_kernel_,
562 563
                                 pt_kernel_context_, dev_ctx_, ins, outs, attrs,
                                 default_attrs);
564 565 566 567
  } else {
    PreparedOpRunImpl<VarBase>(op_, ctx_, kernel_type_, func_, dev_ctx_, ins,
                               outs, attrs, default_attrs);
  }
568
}
H
hong 已提交
569

570 571
void PreparedOp::Run(const NameVarMap<VariableWrapper>& ins,
                     const NameVarMap<VariableWrapper>& outs,
572 573
                     const framework::AttributeMap& attrs,
                     const framework::AttributeMap& default_attrs) {
574 575
  if (run_pten_kernel_) {
    PreparedOpRunPtImpl<VariableWrapper>(op_, pt_kernel_signature_, pt_kernel_,
576 577
                                         pt_kernel_context_, dev_ctx_, ins,
                                         outs, attrs, default_attrs);
578 579 580 581
  } else {
    PreparedOpRunImpl<VariableWrapper>(op_, ctx_, kernel_type_, func_, dev_ctx_,
                                       ins, outs, attrs, default_attrs);
  }
J
Jiabin Yang 已提交
582 583 584 585
}

}  // namespace imperative
}  // namespace paddle