prepared_operator.cc 25.9 KB
Newer Older
J
Jiabin Yang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/fluid/imperative/prepared_operator.h"
16

17
#include "paddle/fluid/framework/data_type_transform.h"
18
#include "paddle/fluid/framework/details/nan_inf_utils.h"
19
#include "paddle/fluid/imperative/infer_shape_context.h"
20
#include "paddle/fluid/imperative/tracer.h"
21
#include "paddle/pten/common/scalar.h"
22
#include "paddle/pten/common/scalar_array.h"
23
#include "paddle/utils/small_vector.h"
Q
QingshuChen 已提交
24
#ifdef PADDLE_WITH_XPU
25
#include "paddle/fluid/platform/device/xpu/xpu_op_list.h"
Q
QingshuChen 已提交
26
#endif
27
DECLARE_bool(check_nan_inf);
28
DECLARE_bool(run_pten_kernel);
29
DECLARE_bool(benchmark);
30

J
Jiabin Yang 已提交
31 32 33
namespace paddle {
namespace imperative {

34 35 36 37 38 39 40 41 42 43
const std::shared_ptr<VariableWrapper>& GetVariableWrapper(
    const std::shared_ptr<paddle::imperative::VarBase>& var) {
  return var->SharedVar();
}

const std::shared_ptr<VariableWrapper>& GetVariableWrapper(
    const std::shared_ptr<VariableWrapper>& var) {
  return var;
}

J
Jiabin Yang 已提交
44 45 46 47 48 49 50 51 52 53
const framework::Tensor* GetTensorFromVar(const framework::Variable& var) {
  if (var.IsType<framework::LoDTensor>()) {
    return &(var.Get<framework::LoDTensor>());
  } else if (var.IsType<framework::SelectedRows>()) {
    return &(var.Get<framework::SelectedRows>().value());
  } else {
    return nullptr;
  }
}

54 55 56 57 58 59 60 61 62 63 64 65 66 67 68
static const framework::Attribute& GetAttr(
    const framework::AttributeMap& attrs,
    const framework::AttributeMap& default_attrs, const std::string& name) {
  auto it = attrs.find(name);
  bool found = it != attrs.end();
  if (!found) {
    it = default_attrs.find(name);
    found = it != default_attrs.end();
  }
  PADDLE_ENFORCE_EQ(
      found, true,
      platform::errors::NotFound("(%s) is not found in AttributeMap.", name));
  return it->second;
}

69
template <typename VarType>
70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86
static void HandleComplexGradToRealGrad(const NameVarMap<VarType>& outs) {
  for (auto& pair : outs) {
    for (auto& var : pair.second) {
      if (var == nullptr) {
        continue;
      }
      if (var->ForwardDataType() ==
          static_cast<framework::proto::VarType::Type>(-1)) {
        VLOG(6) << "Var (" << var->Name()
                << ")'s forward data type is not set.";
        continue;
      }
      if (!framework::IsComplexType(var->DataType()) ||
          framework::IsComplexType(var->ForwardDataType())) {
        continue;
      }
      const auto* tensor = GetTensorFromVar(var->Var());
J
Jiabin Yang 已提交
87
      if (tensor && tensor->IsInitialized()) {
88 89 90 91 92 93 94 95
        VLOG(6) << "Transform " << framework::DataTypeToString(var->DataType())
                << " var `" << var->Name() << "` to "
                << framework::DataTypeToString(var->ForwardDataType())
                << " real var in dynamic graph.";
        framework::Tensor out;
        framework::TransComplexToReal(var->ForwardDataType(), var->DataType(),
                                      *tensor, &out);
        SetTensorToVariable(var->Var(), out, var->MutableVar());
J
Jiabin Yang 已提交
96 97 98 99 100 101 102
      }
    }
  }
}

PreparedOp::PreparedOp(const framework::OperatorBase& op,
                       const framework::RuntimeContext& ctx,
103
                       const framework::OpKernelType& kernel_type,
104
                       const framework::OperatorWithKernel::OpKernelFunc& func,
105
                       platform::DeviceContext* dev_ctx)
106 107 108 109 110 111
    : op_(op),
      ctx_(ctx),
      kernel_type_(kernel_type),
      func_(func),
      dev_ctx_(dev_ctx) {}

112 113 114 115 116
PreparedOp::PreparedOp(const framework::OperatorBase& op,
                       const framework::RuntimeContext& ctx,
                       const framework::OpKernelType& kernel_type,
                       const framework::KernelSignature& kernel_signature,
                       const pten::Kernel& pt_kernel,
117
                       pten::KernelContext* pt_kernel_context,
118 119 120 121 122 123 124 125
                       platform::DeviceContext* dev_ctx)
    : op_(op),
      ctx_(ctx),
      kernel_type_(kernel_type),
      func_(nullptr),
      dev_ctx_(dev_ctx),
      run_pten_kernel_(true),
      pt_kernel_signature_(kernel_signature),
126 127
      pt_kernel_(pt_kernel),
      pt_kernel_context_(pt_kernel_context) {}
128

129 130 131 132 133
template <typename VarType>
PreparedOp PrepareImpl(const NameVarMap<VarType>& ins,
                       const NameVarMap<VarType>& outs,
                       const framework::OperatorWithKernel& op,
                       const platform::Place& place,
134
                       const framework::AttributeMap& attrs,
135 136
                       const framework::AttributeMap& default_attrs,
                       pten::KernelContext* pt_kernel_context) {
137
  platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
138
  auto* dev_ctx = pool.Get(place);
139

140 141 142 143 144 145 146 147
  framework::RuntimeContext ctx({}, {});

#ifdef PADDLE_WITH_MKLDNN
  // MKLDNN variant of code reads attributes in some of GetKernelTypeForVar and
  // GetKernelType functions, so we need to copy the attributes there.
  // Const qualifier of Attrs had to be discarded to overwrite it.
  if (FLAGS_use_mkldnn) {
    auto& mutable_op_attrs = const_cast<framework::AttributeMap&>(op.Attrs());
148 149 150 151
    mutable_op_attrs = default_attrs;
    for (auto& attr : attrs) {
      mutable_op_attrs[attr.first] = attr.second;
    }
152 153
  }
#endif
J
Jiabin Yang 已提交
154

155
  // 1. get expected kernel key
156 157 158
  auto dygraph_exe_ctx = DygraphExecutionContext<VarType>(
      op, framework::Scope(), *dev_ctx, ctx, ins, outs, attrs, default_attrs);
  auto expected_kernel_key = op.GetExpectedKernelType(dygraph_exe_ctx);
159 160
  VLOG(3) << "expected_kernel_key:" << expected_kernel_key;

161 162 163
  if (FLAGS_run_pten_kernel &&
      pten::KernelFactory::Instance().HasCompatiblePtenKernel(op.Type())) {
    auto pt_kernel_signature = op.GetExpectedPtenKernelArgs(dygraph_exe_ctx);
C
Chen Weihang 已提交
164
    VLOG(6) << framework::KernelSignatureToString(pt_kernel_signature);
165 166 167 168 169 170 171

    auto pt_kernel_name = pten::KernelName(pt_kernel_signature.name);
    auto pt_kernel_key = TransOpKernelTypeToPtenKernelKey(expected_kernel_key);
    auto pt_kernel = pten::KernelFactory::Instance().SelectKernel(
        pt_kernel_name, pt_kernel_key);

    if (pt_kernel.IsValid()) {
C
Chen Weihang 已提交
172
      VLOG(6) << "Dynamic mode PrepareImpl - kernel name: " << pt_kernel_name
173 174 175 176 177
              << " | kernel key: " << pt_kernel_key
              << " | kernel: " << pt_kernel;

      // TODO(chenweihang): using CPUKernel when miss device kernel case
      return PreparedOp(op, ctx, expected_kernel_key, pt_kernel_signature,
178
                        pt_kernel, pt_kernel_context, dev_ctx);
179
    } else {
C
Chen Weihang 已提交
180
      VLOG(6) << "Dynamic mode ChoosePtenKernel - kernel `" << pt_kernel_name
181 182 183 184
              << "` not found.";
    }
  }

185
  // 2. check if op[type] has kernel registered.
J
Jiabin Yang 已提交
186 187
  auto& all_op_kernels = op.AllOpKernels();
  auto kernels_iter = all_op_kernels.find(op.Type());
188 189 190 191 192
  PADDLE_ENFORCE_NE(
      kernels_iter, all_op_kernels.end(),
      platform::errors::NotFound(
          "There are no kernels which are registered in the %s operator.",
          op.Type()));
J
Jiabin Yang 已提交
193 194 195

  auto& kernels = kernels_iter->second;
  auto kernel_iter = kernels.find(expected_kernel_key);
196
#ifdef PADDLE_WITH_XPU
Q
QingshuChen 已提交
197 198 199 200
  if (is_xpu_place(expected_kernel_key.place_) &&
      (kernel_iter == kernels.end() ||
       !paddle::platform::is_xpu_support_op(op.Type(), expected_kernel_key) ||
       paddle::platform::is_in_xpu_black_list(op.Type()))) {
201 202 203
    VLOG(3) << "missing XPU kernel: " << op.Type()
            << ", expected_kernel_key:" << expected_kernel_key
            << ", fallbacking to CPU one!";
204 205 206
    expected_kernel_key.place_ = platform::CPUPlace();
    kernel_iter = kernels.find(expected_kernel_key);
  }
207 208 209 210
#endif
#ifdef PADDLE_WITH_ASCEND_CL
  if (kernel_iter == kernels.end() &&
      is_npu_place(expected_kernel_key.place_)) {
211 212 213
    VLOG(3) << "missing NPU kernel: " << op.Type()
            << ", expected_kernel_key:" << expected_kernel_key
            << ", fallbacking to CPU one!";
214 215 216
    expected_kernel_key.place_ = platform::CPUPlace();
    kernel_iter = kernels.find(expected_kernel_key);
  }
217
#endif
218 219
  // TODO(jiabin): Add operator.cc's line 1000 part back when we need that
  // case
220 221 222 223
  PADDLE_ENFORCE_NE(kernel_iter, kernels.end(),
                    platform::errors::NotFound(
                        "Operator %s does not have kernel for %s.", op.Type(),
                        KernelTypeToString(expected_kernel_key)));
224

225 226 227 228
  if (!(expected_kernel_key.place_ == place)) {
    dev_ctx = pool.Get(expected_kernel_key.place_);
  }

229
  return PreparedOp(op, ctx, expected_kernel_key, kernel_iter->second, dev_ctx);
230 231
}

232 233 234 235
PreparedOp PreparedOp::Prepare(const NameVarMap<VarBase>& ins,
                               const NameVarMap<VarBase>& outs,
                               const framework::OperatorWithKernel& op,
                               const platform::Place& place,
236
                               const framework::AttributeMap& attrs,
237 238 239 240
                               const framework::AttributeMap& default_attrs,
                               pten::KernelContext* pt_kernel_context) {
  return PrepareImpl<VarBase>(ins, outs, op, place, attrs, default_attrs,
                              pt_kernel_context);
241 242 243 244 245 246
}

PreparedOp PreparedOp::Prepare(const NameVarMap<VariableWrapper>& ins,
                               const NameVarMap<VariableWrapper>& outs,
                               const framework::OperatorWithKernel& op,
                               const platform::Place& place,
247
                               const framework::AttributeMap& attrs,
248 249
                               const framework::AttributeMap& default_attrs,
                               pten::KernelContext* pt_kernel_context) {
250
  return PrepareImpl<VariableWrapper>(ins, outs, op, place, attrs,
251
                                      default_attrs, pt_kernel_context);
252 253
}

254
template <typename VarType>
255
static void BuildDygraphPtenKernelContext(
256 257 258 259
    const framework::KernelSignature& pt_kernel_signature,
    const pten::Kernel& pt_kernel, const NameVarMap<VarType>& ins,
    const NameVarMap<VarType>& outs, const framework::AttributeMap& attrs,
    const framework::AttributeMap& default_attrs,
260
    platform::DeviceContext* dev_ctx, pten::KernelContext* kernel_ctx) {
261 262 263 264 265 266 267
  // TODO(chenweihang): now only work for very simple case,
  // many cases need to be deal with later:
  // 1. the input and output are not tensor
  // 2. the dispensbale, duplicable input and output
  // 3. needless attributes remove
  // 4. use pt Tensor directly
  // 5. kernel input is not DenseTensor
268
  kernel_ctx->SetDeviceContext(dev_ctx);
269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298

  auto& input_names = std::get<0>(pt_kernel_signature.args);
  auto& attr_names = std::get<1>(pt_kernel_signature.args);
  auto& output_names = std::get<2>(pt_kernel_signature.args);

  auto& input_defs = pt_kernel.args_def().input_defs();
  auto& output_defs = pt_kernel.args_def().output_defs();
  auto& attr_defs = pt_kernel.args_def().attribute_defs();

  PADDLE_ENFORCE_EQ(input_names.size(), input_defs.size(),
                    platform::errors::InvalidArgument(
                        "the size of inputs_args names (%d) must be equal to "
                        "the size of kernel input_defs (%d).",
                        input_names.size(), input_defs.size()));

  PADDLE_ENFORCE_EQ(output_names.size(), output_defs.size(),
                    platform::errors::InvalidArgument(
                        "the size of outputs_args names (%d) must be equal to "
                        "the size of kernel output_defs (%d).",
                        output_names.size(), output_defs.size()));

  PADDLE_ENFORCE_EQ(attr_names.size(), attr_defs.size(),
                    platform::errors::InvalidArgument(
                        "the size of attribute_args names (%d) must be equal "
                        "to the size of kernel attribute_defs (%d).",
                        attr_names.size(), attr_defs.size()));

  for (size_t i = 0; i < input_names.size(); ++i) {
    auto& in_def = input_defs.at(i);
    auto& ins_vector = ins.at(input_names[i]);
299 300 301 302 303 304 305 306 307 308

    size_t start_idx = (i == 0 ? 0 : kernel_ctx->InputRangeAt(i - 1).second);
    size_t end_idx = start_idx + ins_vector.size();

    // The current size of input/output in pt_kernel_context_ is at least equal
    // the start_idx. For the reason of reusing the allocted of inputs or
    // outputs in pt_kernel_context_, the current size of input/output can be
    // greater then the index of which the tensort wanted to set to, so it will
    // use ReMakePtenDenseTensorFromVar to make pten tensor.
    if (kernel_ctx->InputsSize() == start_idx) {
309 310 311 312 313 314 315
      paddle::SmallVector<std::shared_ptr<pten::TensorBase>> tmp_inputs;
      for (const auto& var : ins_vector) {
        const auto& variable = var->Var();
        tmp_inputs.emplace_back(
            experimental::MakePtenTensorBaseFromVar(variable, in_def));
      }
      kernel_ctx->EmplaceBackInputs(std::move(tmp_inputs));
316
    } else if (kernel_ctx->InputsSize() > start_idx) {
317 318
      size_t input_size = kernel_ctx->InputsSize();
      for (size_t j = 0; j < ins_vector.size(); ++j) {
319
        if (input_size > start_idx + j) {
320 321
          experimental::ReMakePtenDenseTensorFromVar(
              ins_vector[j]->Var(), in_def,
322 323 324 325 326 327 328 329
              kernel_ctx->MutableInputAt<pten::DenseTensor>(start_idx + j));
          // TODO(chentianyu03): When multi input kernel, open this code
          /*
          } else {
            kernel_ctx->EmplaceBackInputWithoutSetRange(
                experimental::MakePtenTensorBaseFromVar(ins_vector[j]->Var(),
                                                        in_def));
          */
330 331
        }
      }
332 333 334 335 336 337 338
      kernel_ctx->MutableInputRangeAt(i) = std::make_pair(start_idx, end_idx);
    } else {
      PADDLE_THROW(platform::errors::PreconditionNotMet(
          "Error start index when trying to set new tensor to inputs, start "
          "index is `%d`, but current pt_kernel_context_.inputs.size() is "
          "`%d`.",
          start_idx, kernel_ctx->InputsSize()));
339 340 341 342 343 344
    }
  }

  for (size_t i = 0; i < output_names.size(); ++i) {
    auto& out_def = output_defs.at(i);
    auto& outs_vector = outs.at(output_names[i]);
345 346 347 348 349 350 351 352 353 354

    size_t start_idx = (i == 0 ? 0 : kernel_ctx->OutputRangeAt(i - 1).second);
    size_t end_idx = start_idx + outs_vector.size();

    // The current size of input/output in pt_kernel_context_ is at least equal
    // the start_idx. For the reason of reusing the allocted of inputs or
    // outputs in pt_kernel_context_, the current size of input/output can be
    // greater then the index of which the tensort wanted to set to, so it will
    // use ReMakePtenDenseTensorFromVar to make pten tensor.
    if (kernel_ctx->OutputsSize() == start_idx) {
355 356 357 358 359 360 361
      paddle::SmallVector<std::shared_ptr<pten::TensorBase>> tmp_outputs;
      for (auto& var : outs_vector) {
        auto* variable = var->MutableVar();
        tmp_outputs.emplace_back(
            experimental::MakePtenTensorBaseFromVar(variable, out_def));
      }
      kernel_ctx->EmplaceBackOutputs(std::move(tmp_outputs));
362
    } else if (kernel_ctx->OutputsSize() > start_idx) {
363 364 365 366 367 368
      size_t output_size = kernel_ctx->OutputsSize();
      for (size_t j = 0; j < outs_vector.size(); ++j) {
        if (output_size > i + j) {
          experimental::ReMakePtenDenseTensorFromVar(
              outs_vector[j]->MutableVar(), out_def,
              kernel_ctx->MutableOutputAt<pten::DenseTensor>(i + j));
369 370 371 372 373 374 375
          // TODO(chentianyu03): When multi output kernel, open this code
          /*
          } else {
            kernel_ctx->EmplaceBackOutputWithoutSetRange(
                experimental::MakePtenTensorBaseFromVar(
                    outs_vector[j]->MutableVar(), out_def));
          */
376 377
        }
      }
378 379 380 381 382 383 384
      kernel_ctx->MutableOutputRangeAt(i) = std::make_pair(start_idx, end_idx);
    } else {
      PADDLE_THROW(platform::errors::PreconditionNotMet(
          "Error start index when trying to set new tensor to inputs, start "
          "index is `%d`, but current pt_kernel_context_.outputs.size() is "
          "`%d`.",
          start_idx, kernel_ctx->OutputsSize()));
385 386 387 388
    }
  }

  for (size_t i = 0; i < attr_names.size(); ++i) {
389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419
    if (attr_defs[i].type_index == std::type_index(typeid(pten::ScalarArray))) {
      if (attrs.find(attr_names[i]) !=
          attrs.end()) {  // shape is in the attribute
        auto& attr = GetAttr(attrs, default_attrs, attr_names[i]);
        if (std::type_index(attr.type()) ==
            std::type_index(typeid(std::vector<int64_t>))) {
          kernel_ctx->EmplaceBackAttr(std::move(
              pten::ScalarArray(BOOST_GET_CONST(std::vector<int64_t>, attr))));
        } else {
          PADDLE_THROW(platform::errors::Unimplemented(
              "Unsupported cast op attribute `%s` to VectorTensor when "
              "construct KernelContext.",
              attr_names[i]));
        }
      } else {  // shape is in the input
        auto& ins_vector = ins.at(attr_names[i]);
        if (ins_vector.size() == 1) {  // ShapeTensor
          kernel_ctx->EmplaceBackAttr(std::move(
              experimental::MakePtenScalarArrayFromVar(ins_vector[0]->Var())));
        } else {  // ShapeTensorList
          std::vector<framework::Variable*> variables;
          variables.reserve(ins_vector.size());
          for (const auto& var_base : ins_vector) {
            variables.push_back(var_base->MutableVar());
          }
          kernel_ctx->EmplaceBackAttr(std::move(
              experimental::MakePtenScalarArrayFromVarList(variables)));
        }
      }
    } else if (attr_defs[i].type_index ==
               std::type_index(typeid(pten::Scalar))) {
420 421 422
      // TODO(chenweihang): support other attrs later
      // TODO(zhangyunfei): Scalar should hold scaler type, and we should check
      // attribtue type by attr_defs
423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443
      if (attrs.find(attr_names[i]) != attrs.end() ||
          default_attrs.find(attr_names[i]) !=
              default_attrs.end()) {  // scalar is in the attribute
        auto& attr = GetAttr(attrs, default_attrs, attr_names[i]);
        if (std::type_index(attr.type()) == std::type_index(typeid(float))) {
          kernel_ctx->EmplaceBackAttr(
              std::move(pten::Scalar(BOOST_GET_CONST(float, attr))));
        } else if (std::type_index(attr.type()) ==
                   std::type_index(typeid(std::string))) {
          kernel_ctx->EmplaceBackAttr(
              std::move(pten::Scalar(BOOST_GET_CONST(std::string, attr))));
        } else {
          PADDLE_THROW(platform::errors::Unimplemented(
              "Unsupported cast op attribute `%s` to Scalar when construct "
              "KernelContext in dygraph.",
              attr_names[i]));
        }
      } else {  // scalar is in the input
        auto& ins_vector = ins.at(attr_names[i]);
        kernel_ctx->EmplaceBackAttr(std::move(
            experimental::MakePtenScalarFromVar(ins_vector[0]->Var())));
444
      }
445

446 447
    } else {
      // TODO(chenweihang): support other attrs later
448
      auto& attr = GetAttr(attrs, default_attrs, attr_names[i]);
449
      if (attr_defs[i].type_index == std::type_index(typeid(int))) {
450
        kernel_ctx->EmplaceBackAttr(BOOST_GET_CONST(int, attr));
451
      } else if (attr_defs[i].type_index == std::type_index(typeid(float))) {
452
        kernel_ctx->EmplaceBackAttr(BOOST_GET_CONST(float, attr));
453
      } else if (attr_defs[i].type_index == std::type_index(typeid(bool))) {
454
        kernel_ctx->EmplaceBackAttr(BOOST_GET_CONST(bool, attr));
455
      } else if (attr_defs[i].type_index ==
456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471
                 std::type_index(typeid(pten::DataType))) {
        auto data_type = pten::TransToPtenDataType(
            static_cast<framework::proto::VarType::Type>(
                BOOST_GET_CONST(int, attr)));
        kernel_ctx->EmplaceBackAttr(data_type);
      } else if (attr_defs[i].type_index ==
                 std::type_index(typeid(std::vector<int64_t>))) {
        if (std::type_index(attr.type()) ==
            std::type_index(typeid(std::vector<int>))) {
          // Emplace Back Attr according to the type of Pten_Kernel args.
          const auto& vector_int_attr = BOOST_GET_CONST(std::vector<int>, attr);
          const std::vector<int64_t> vector_int64_attr(vector_int_attr.begin(),
                                                       vector_int_attr.end());
          kernel_ctx->EmplaceBackAttr(vector_int64_attr);
        }
        // TODO(YuanRisheng) Need support vector<int64_t> attr
472 473
      } else {
        PADDLE_THROW(platform::errors::Unimplemented(
474
            "Unsupported cast op attribute `%s` when construct "
475 476 477 478 479 480 481
            "KernelContext in dygraph.",
            attr_names[i]));
      }
    }
  }
}

482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501
template <typename VarType>
static void WriteBackToOutputs(
    const framework::KernelSignature& pt_kernel_signature,
    const NameVarMap<VarType>& outs, pten::KernelContext* kernel_ctx) {
  auto& output_names = std::get<2>(pt_kernel_signature.args);

  for (size_t i = 0; i < output_names.size(); ++i) {
    auto& outs_vector = outs.at(output_names[i]);

    auto& range_pair = kernel_ctx->OutputRangeAt(i);
    auto pten_outs = kernel_ctx->MutableOutputBetween<pten::DenseTensor>(
        range_pair.first, range_pair.second);

    for (size_t j = 0; j < pten_outs.size(); ++j) {
      experimental::MakeVariableFromPtenTensor(pten_outs[j],
                                               outs_vector[j]->MutableVar());
    }
  }
}

502 503 504
template <typename VarType>
static void PreparedOpRunImpl(
    const framework::OperatorBase& op, const framework::RuntimeContext& ctx,
505
    const framework::OpKernelType& kernel_type,
506
    const framework::OperatorWithKernel::OpKernelFunc& func,
507
    platform::DeviceContext* dev_ctx, const NameVarMap<VarType>& ins,
508 509
    const NameVarMap<VarType>& outs, const framework::AttributeMap& attrs,
    const framework::AttributeMap& default_attrs) {
J
Jiabin Yang 已提交
510 511
  // TODO(zjl): remove scope in dygraph
  framework::Scope scope;
H
hong 已提交
512

513
  DygraphInferShapeContext<VarType> infer_shape_ctx(&ins, &outs, &attrs,
514
                                                    &default_attrs, op.Type());
515 516
  static_cast<const framework::OperatorWithKernel&>(op).InferShape(
      &infer_shape_ctx);
H
hong 已提交
517

518
  func(DygraphExecutionContext<VarType>(op, scope, *dev_ctx, ctx, ins, outs,
519
                                        attrs, default_attrs));
520

521 522 523 524 525
  if (FLAGS_check_nan_inf) {
    framework::details::CheckOpHasNanOrInfInDygraph<VarType>(
        op.Type(), outs, dev_ctx->GetPlace());
  }

526 527 528 529 530 531 532 533 534 535 536 537 538 539 540
  /**
   * [ Why need handle complex gradient to real gradient? ]
   *
   * After the introduction of complex number calculations, Ops that support
   * complex number calculations generally support type promotion, such as
   * x(float32) + y(complex64) = out(complex64), then the type of the grad
   * tensor should be dout(complex64), dx(float32), dy (complex64).
   *
   * But because the dout is complex64, the dx is also complex64 after
   * grad op kernel executed, we need to recognize this situation and
   * convert dx to float32 type. HandleComplexGradToRealGrad does this thing.
   */
  if (framework::IsComplexType(kernel_type.data_type_)) {
    HandleComplexGradToRealGrad<VarType>(outs);
  }
541
}
H
hong 已提交
542

543 544 545 546
template <typename VarType>
static void PreparedOpRunPtImpl(
    const framework::OperatorBase& op,
    const framework::KernelSignature& pt_kernel_signature,
547 548 549
    const pten::Kernel& pt_kernel, pten::KernelContext* pt_kernel_context,
    platform::DeviceContext* dev_ctx, const NameVarMap<VarType>& ins,
    const NameVarMap<VarType>& outs, const framework::AttributeMap& attrs,
550 551 552 553 554 555
    const framework::AttributeMap& default_attrs) {
  DygraphInferShapeContext<VarType> infer_shape_ctx(&ins, &outs, &attrs,
                                                    &default_attrs, op.Type());
  static_cast<const framework::OperatorWithKernel&>(op).InferShape(
      &infer_shape_ctx);

556 557 558 559 560
  BuildDygraphPtenKernelContext<VarType>(pt_kernel_signature, pt_kernel, ins,
                                         outs, attrs, default_attrs, dev_ctx,
                                         pt_kernel_context);

  pt_kernel(pt_kernel_context);
561

562 563 564 565 566 567 568 569 570 571 572 573 574 575
  if (FLAGS_benchmark) {
    dev_ctx->Wait();
#if defined(PADDLE_WITH_CUDA)
    PADDLE_ENFORCE_CUDA_SUCCESS(cudaGetLastError());
    VLOG(4) << "Operator(" << op.Type() << "): context wait and get last error";
#endif
#if defined(PADDLE_WITH_HIP)
    PADDLE_ENFORCE_CUDA_SUCCESS(hipGetLastError());
    VLOG(4) << "Operator(" << op.Type() << "): context wait and get last error";
#endif
  }

  WriteBackToOutputs<VarType>(pt_kernel_signature, outs, pt_kernel_context);

576 577
  // Ensure that it does not affect the VarBase life cycle management
  pt_kernel_context->ClearData();
578 579 580 581 582

  // TODO(chenweihang): add debug flags later
  // TODO(chenweihang): deal with complex cases later
}

583 584
void PreparedOp::Run(const NameVarMap<VarBase>& ins,
                     const NameVarMap<VarBase>& outs,
585 586
                     const framework::AttributeMap& attrs,
                     const framework::AttributeMap& default_attrs) {
587 588
  if (run_pten_kernel_) {
    PreparedOpRunPtImpl<VarBase>(op_, pt_kernel_signature_, pt_kernel_,
589 590
                                 pt_kernel_context_, dev_ctx_, ins, outs, attrs,
                                 default_attrs);
591 592 593 594
  } else {
    PreparedOpRunImpl<VarBase>(op_, ctx_, kernel_type_, func_, dev_ctx_, ins,
                               outs, attrs, default_attrs);
  }
595
}
H
hong 已提交
596

597 598
void PreparedOp::Run(const NameVarMap<VariableWrapper>& ins,
                     const NameVarMap<VariableWrapper>& outs,
599 600
                     const framework::AttributeMap& attrs,
                     const framework::AttributeMap& default_attrs) {
601 602
  if (run_pten_kernel_) {
    PreparedOpRunPtImpl<VariableWrapper>(op_, pt_kernel_signature_, pt_kernel_,
603 604
                                         pt_kernel_context_, dev_ctx_, ins,
                                         outs, attrs, default_attrs);
605 606 607 608
  } else {
    PreparedOpRunImpl<VariableWrapper>(op_, ctx_, kernel_type_, func_, dev_ctx_,
                                       ins, outs, attrs, default_attrs);
  }
J
Jiabin Yang 已提交
609 610 611 612
}

}  // namespace imperative
}  // namespace paddle