custom_operator.cc 43.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/fluid/framework/custom_operator.h"

#include <algorithm>
#include <functional>
#include <iostream>
#include <map>
#include <string>
#include <tuple>
#include <unordered_map>
#include <unordered_set>
#include <utility>
#include <vector>

28
#include "paddle/fluid/eager/api/utils/global_utils.h"
29
#include "paddle/fluid/framework/attribute.h"
30
#include "paddle/fluid/framework/convert_utils.h"
31 32 33
#include "paddle/fluid/framework/op_meta_info_helper.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/operator.h"
34
#include "paddle/fluid/framework/phi_utils.h"
35
#include "paddle/fluid/framework/tensor.h"
36
#include "paddle/fluid/platform/device/gpu/gpu_info.h"
37 38
#include "paddle/fluid/platform/dynload/dynamic_loader.h"
#include "paddle/fluid/string/string_helper.h"
39 40
#include "paddle/phi/api/all.h"
#include "paddle/phi/core/compat/convert_utils.h"
41
#include "paddle/phi/core/tensor_utils.h"
42
#include "paddle/utils/any.h"
H
HongyuJia 已提交
43 44 45
#ifdef PADDLE_WITH_CUSTOM_DEVICE
#include "paddle/phi/backends/device_manager.h"
#endif
46

47
#include "gflags/gflags.h"
48
#include "paddle/phi/api/include/operants_manager.h"
49 50 51 52
#include "paddle/phi/api/include/tensor_operants.h"

DECLARE_string(tensor_operants_mode);

53 54 55 56 57 58 59 60 61 62 63 64 65 66 67
namespace paddle {
namespace framework {

namespace detail {

// dynamic lib load func
template <typename T>
static T* DynLoad(void* handle, std::string name) {
  T* func = reinterpret_cast<T*>(dlsym(handle, name.c_str()));
#if !defined(_WIN32)
  auto errorno = dlerror();
#else
  auto errorno = GetLastError();
#endif  // !_WIN32
  PADDLE_ENFORCE_NOT_NULL(
68 69 70 71
      func,
      platform::errors::NotFound(
          "Failed to load dynamic operator library, error message(%s).",
          errorno));
72 73 74
  return func;
}

75
inline static bool IsDuplicableVar(const std::string& var_name) {
76 77 78 79
  std::string suffix = kTensorVectorSuffix;
  return var_name.rfind(suffix) != std::string::npos;
}

80 81
inline static std::string NoGrad(const std::string& var_name,
                                 bool is_double_grad = false) {
82
  std::string suffix = kGradVarSuffix;
83 84 85 86 87 88 89 90
  std::string new_out_suffix = kDoubleGradNewOutSuffix;
  std::string tmp_var_name(var_name);
  if (is_double_grad &&
      (tmp_var_name.rfind(new_out_suffix) != std::string::npos)) {
    tmp_var_name = tmp_var_name.substr(
        0, tmp_var_name.size() - /*kDoubleGradNewOutSuffix length*/ 4);
  }
  return tmp_var_name.substr(0, tmp_var_name.size() - kGradVarSuffixSize);
91 92
}

93 94 95 96 97 98 99 100 101 102 103
inline static bool IsGradVar(const std::string& var_name, bool is_double_grad) {
  std::string suffix = kGradVarSuffix;
  if (!is_double_grad) {
    return var_name.rfind(suffix) != std::string::npos;
  } else {
    // for double grad cases, the X@GRAD is not a grad var, X@GRAD@GRAD is a
    // grad var, here we remove a @GRAD suffix
    return NoGrad(var_name).rfind(suffix) != std::string::npos;
  }
}

104 105
inline static bool IsMemberOf(const std::vector<std::string>& vec,
                              const std::string& name) {
106 107 108
  return std::find(vec.cbegin(), vec.cend(), name) != vec.cend();
}

109
static std::vector<std::string> ParseAttrStr(const std::string& attr) {
110
  auto split_pos = attr.find_first_of(":");
111 112
  PADDLE_ENFORCE_NE(split_pos,
                    std::string::npos,
113 114 115 116 117 118 119 120 121 122
                    platform::errors::InvalidArgument(
                        "Invalid attribute string format. Attribute string "
                        "format is `<name>:<type>`."));

  std::vector<std::string> rlt;
  // 1. name
  rlt.emplace_back(string::trim_spaces(attr.substr(0, split_pos)));
  // 2. type
  rlt.emplace_back(string::trim_spaces(attr.substr(split_pos + 1)));

123
  VLOG(3) << "attr name: " << rlt[0] << ", attr type str: " << rlt[1];
124 125 126 127

  return rlt;
}

128 129 130 131 132 133 134 135
}  // namespace detail

////////////////// Kernel Define ////////////////////

// custom op kernel call function define
static void RunKernelFunc(const framework::ExecutionContext& ctx,
                          const paddle::KernelFunc& func,
                          const std::vector<std::string>& inputs,
136 137
                          const std::vector<std::string>& outputs,
                          const std::vector<std::string>& attrs) {
138
  VLOG(3) << "Custom Operator: Start run KernelFunc.";
139 140
  // prepare CustomOpKernelContext
  paddle::CustomOpKernelContext kernel_ctx;
141
  for (auto& in_name : inputs) {
142
    VLOG(3) << "Custom Operator: input name - " << in_name;
143
    if (detail::IsDuplicableVar(in_name)) {
144 145
      // return const std::vector<const phi::DenseTensor*>
      auto vec_x = ctx.MultiInput<phi::DenseTensor>(in_name);
146 147
      PADDLE_ENFORCE_NE(vec_x.empty(),
                        true,
148 149
                        platform::errors::NotFound(
                            "Input vector<tensor> (%s) is empty.", in_name));
150
      std::vector<paddle::experimental::Tensor> custom_vec_in;
151 152 153
      for (size_t i = 0; i < vec_x.size(); ++i) {
        auto* x = vec_x[i];
        PADDLE_ENFORCE_NOT_NULL(
154 155 156 157 158 159 160
            x,
            platform::errors::NotFound(
                "The %d-th tensor in input vector<tensor> (%s) is nullptr.",
                i,
                in_name));
        PADDLE_ENFORCE_EQ(x->IsInitialized(),
                          true,
161 162 163
                          platform::errors::InvalidArgument(
                              "The %d-th tensor in input vector<tensor> (%s) "
                              "is not initialized.",
164 165
                              i,
                              in_name));
166
        paddle::experimental::Tensor custom_t;
167
        custom_t.set_impl(std::make_shared<phi::DenseTensor>(*x));
168 169
        custom_vec_in.emplace_back(custom_t);
      }
170
      kernel_ctx.EmplaceBackInputs(std::move(custom_vec_in));
171
    } else {
172
      auto* x = ctx.Input<phi::DenseTensor>(in_name);
173 174 175 176 177
      PADDLE_ENFORCE_NOT_NULL(
          x,
          platform::errors::NotFound("Input tensor (%s) is nullptr.", in_name));
      PADDLE_ENFORCE_EQ(x->IsInitialized(),
                        true,
178 179
                        platform::errors::InvalidArgument(
                            "Input tensor (%s) is not initialized.", in_name));
180
      paddle::experimental::Tensor custom_in;
181
      custom_in.set_impl(std::make_shared<phi::DenseTensor>(*x));
182 183 184 185 186 187 188 189 190 191
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
      if (custom_in.is_gpu_pinned()) {
        VLOG(3) << "Custom Operator: custom input is gpu pinned tensor";
        auto gpu_place = phi::GPUPlace(platform::GetCurrentDeviceId());
        auto custom_gpu_in = custom_in.copy_to(gpu_place, true);
        kernel_ctx.EmplaceBackInput(std::move(custom_gpu_in));
      } else {
        kernel_ctx.EmplaceBackInput(std::move(custom_in));
      }
#else
192
      kernel_ctx.EmplaceBackInput(std::move(custom_in));
193
#endif
194
    }
195 196
  }

197 198 199 200 201
  for (auto& attr_str : attrs) {
    auto attr_name_and_type = detail::ParseAttrStr(attr_str);
    auto attr_name = attr_name_and_type[0];
    auto attr_type_str = attr_name_and_type[1];
    if (attr_type_str == "bool") {
202
      kernel_ctx.EmplaceBackAttr(ctx.Attr<bool>(attr_name));
203
    } else if (attr_type_str == "int") {
204
      kernel_ctx.EmplaceBackAttr(ctx.Attr<int>(attr_name));
205
    } else if (attr_type_str == "float") {
206
      kernel_ctx.EmplaceBackAttr(ctx.Attr<float>(attr_name));
207
    } else if (attr_type_str == "int64_t") {
208
      kernel_ctx.EmplaceBackAttr(ctx.Attr<int64_t>(attr_name));
209
    } else if (attr_type_str == "std::string") {
210
      kernel_ctx.EmplaceBackAttr(ctx.Attr<std::string>(attr_name));
211
    } else if (attr_type_str == "std::vector<int>") {
212
      kernel_ctx.EmplaceBackAttr(ctx.Attr<std::vector<int>>(attr_name));
213
    } else if (attr_type_str == "std::vector<float>") {
214
      kernel_ctx.EmplaceBackAttr(ctx.Attr<std::vector<float>>(attr_name));
215
    } else if (attr_type_str == "std::vector<int64_t>") {
216
      kernel_ctx.EmplaceBackAttr(ctx.Attr<std::vector<int64_t>>(attr_name));
217
    } else if (attr_type_str == "std::vector<std::string>") {
218
      kernel_ctx.EmplaceBackAttr(ctx.Attr<std::vector<std::string>>(attr_name));
219 220 221 222 223
    } else {
      PADDLE_THROW(platform::errors::Unimplemented(
          "Unsupported `%s` type value as custom attribute now. "
          "Supported data types include `bool`, `int`, `float`, "
          "`int64_t`, `std::string`, `std::vector<int>`, "
224
          "`std::vector<float>`, `std::vector<int64_t>`, "
225 226 227 228 229
          "`std::vector<std::string>`, Please check whether "
          "the attribute data type and data type string are matched.",
          attr_type_str));
    }
  }
230

231 232
  VLOG(3) << "Custom Operator: push outputs into CustomOpKernelContext.";
  // cache the target tensor pointers
233
  std::vector<phi::DenseTensor*> true_out_ptrs;
234 235 236 237 238 239 240 241
  for (size_t i = 0; i < outputs.size(); ++i) {
    auto out_name = outputs[i];
    if (detail::IsDuplicableVar(out_name)) {
      PADDLE_ENFORCE(i == 0UL && outputs.size() == 1UL,
                     platform::errors::PreconditionNotMet(
                         "If custom operator's outputs contains `paddle::Vec("
                         ")` type, "
                         "it only can hold one output."));
242
      auto vec_out = ctx.MultiOutput<phi::DenseTensor>(out_name);
243 244
      PADDLE_ENFORCE_NE(vec_out.empty(),
                        true,
245 246 247 248 249 250 251 252
                        platform::errors::NotFound(
                            "Output vector<tensor> (%s) is empty.", out_name));
      std::vector<paddle::experimental::Tensor> custom_vec_out;
      for (size_t j = 0; j < vec_out.size(); ++j) {
        auto* out = vec_out[j];
        PADDLE_ENFORCE_NOT_NULL(
            out,
            platform::errors::NotFound(
253 254
                "The %d-th tensor in output vector<tensor> (%s) is nullptr.",
                j,
255 256 257 258
                out_name));
        true_out_ptrs.emplace_back(out);
        paddle::experimental::Tensor custom_t;
        // here only can copy the output tensor into context
259
        custom_t.set_impl(std::make_shared<phi::DenseTensor>(*out));
260 261 262 263
        custom_vec_out.emplace_back(custom_t);
      }
      kernel_ctx.EmplaceBackOutputs(std::move(custom_vec_out));
    } else {
264
      auto* out = ctx.Output<phi::DenseTensor>(out_name);
265 266 267
      PADDLE_ENFORCE_NOT_NULL(out,
                              platform::errors::NotFound(
                                  "Output tensor (%s) is nullptr.", out_name));
268 269 270
      true_out_ptrs.emplace_back(out);
      paddle::experimental::Tensor custom_out;
      // here only can copy the output tensor into context
271
      custom_out.set_impl(std::make_shared<phi::DenseTensor>(*out));
272 273 274
      kernel_ctx.EmplaceBackOutput(std::move(custom_out));
    }
  }
275

276 277
  try {
    VLOG(3) << "Custom Operator: Run ComputeFunc.";
278 279

    FLAGS_tensor_operants_mode = "phi";
280 281
    if (paddle::OperantsManager::Instance().phi_operants.get() == nullptr) {
      paddle::OperantsManager::Instance().phi_operants.reset(
282 283 284 285
          new paddle::operants::PhiTensorOperants());
      VLOG(4) << "Initialize phi tensor operants successfully";
    }

286 287 288 289 290
    func(&kernel_ctx);

    // sync output tensor data into original output
    auto* calc_outs = kernel_ctx.AllMutableOutput();
    PADDLE_ENFORCE_EQ(
291 292
        true_out_ptrs.size(),
        calc_outs->size(),
293 294 295 296
        platform::errors::InvalidArgument(
            "The number of element in custom operator outputs is wrong, "
            "expected contains %d Tensors, but actually contains %d "
            "Tensors.",
297 298
            true_out_ptrs.size(),
            calc_outs->size()));
299 300 301
    for (size_t i = 0; i < true_out_ptrs.size(); ++i) {
      auto* true_out = true_out_ptrs.at(i);
      auto calc_out =
302
          std::dynamic_pointer_cast<phi::DenseTensor>(calc_outs->at(i).impl());
303
      // assign meta info
304
      auto* true_out_meta = phi::DenseTensorUtils::GetMutableMeta(true_out);
305 306 307
      true_out_meta->dims = calc_out->dims();
      true_out_meta->dtype = calc_out->dtype();
      true_out_meta->layout = calc_out->layout();
308 309
      true_out_meta->offset = calc_out->offset();
      // lod no need to be reset
310 311 312
      // reset holder if needed
      if (true_out->Holder() != calc_out->Holder()) {
        true_out->ResetHolder(calc_out->Holder());
313
      }
314 315 316 317 318 319 320
    }
  } catch (platform::EnforceNotMet& exception) {
    throw std::move(exception);
  } catch (std::exception& ex) {
    PADDLE_THROW(platform::errors::External("%s", ex.what()));
  } catch (...) {
    PADDLE_THROW(platform::errors::Fatal(
321
        "Custom operator raises an unknown exception in runtime."));
322 323 324
  }
}

325 326 327 328 329 330 331 332 333 334 335 336 337 338 339
static void RunInferShapeFunc(framework::InferShapeContext* ctx,
                              const paddle::InferShapeFunc& func,
                              const std::vector<std::string>& inputs,
                              const std::vector<std::string>& outputs,
                              const std::vector<std::string>& attrs) {
  std::vector<std::vector<int64_t>> input_shapes;
  std::vector<std::vector<std::vector<int64_t>>> vec_input_shapes;

  VLOG(3) << "Custom Operator: InferShape - get input ddim.";
  for (auto& in_name : inputs) {
    if (detail::IsDuplicableVar(in_name)) {
      OP_INOUT_CHECK(ctx->HasInputs(in_name), "Input", in_name, "Custom");
      auto vec_ddim = ctx->GetInputsDim(in_name);
      std::vector<std::vector<int64_t>> vec_shape;
      vec_shape.reserve(vec_ddim.size());
340 341
      std::transform(vec_ddim.begin(),
                     vec_ddim.end(),
342 343
                     std::back_inserter(vec_shape),
                     [&](const DDim& ddim) -> std::vector<int64_t> {
344
                       return phi::vectorize(ddim);
345 346 347 348 349
                     });
      vec_input_shapes.emplace_back(vec_shape);
    } else {
      OP_INOUT_CHECK(ctx->HasInput(in_name), "Input", in_name, "Custom");
      auto ddim = ctx->GetInputDim(in_name);
350
      input_shapes.emplace_back(phi::vectorize(ddim));
351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402
    }
  }

  std::vector<paddle::any> custom_attrs;
  for (auto& attr_str : attrs) {
    auto attr_name_and_type = detail::ParseAttrStr(attr_str);
    auto attr_name = attr_name_and_type[0];
    auto attr_type_str = attr_name_and_type[1];
    if (attr_type_str == "bool") {
      custom_attrs.emplace_back(ctx->Attrs().Get<bool>(attr_name));
    } else if (attr_type_str == "int") {
      custom_attrs.emplace_back(ctx->Attrs().Get<int>(attr_name));
    } else if (attr_type_str == "float") {
      custom_attrs.emplace_back(ctx->Attrs().Get<float>(attr_name));
    } else if (attr_type_str == "int64_t") {
      custom_attrs.emplace_back(ctx->Attrs().Get<int64_t>(attr_name));
    } else if (attr_type_str == "std::string") {
      custom_attrs.emplace_back(ctx->Attrs().Get<std::string>(attr_name));
    } else if (attr_type_str == "std::vector<int>") {
      custom_attrs.emplace_back(ctx->Attrs().Get<std::vector<int>>(attr_name));
    } else if (attr_type_str == "std::vector<float>") {
      custom_attrs.emplace_back(
          ctx->Attrs().Get<std::vector<float>>(attr_name));
    } else if (attr_type_str == "std::vector<int64_t>") {
      // NOTE(chenweihang): InferShape can't support std::vector<int64_t>
      // attr type, because the input type is std::vector<int64_t>, only
      // can use one rule to parse std::vector<int64_t> parameter
      continue;
    } else if (attr_type_str == "std::vector<std::string>") {
      custom_attrs.emplace_back(
          ctx->Attrs().Get<std::vector<std::string>>(attr_name));
    } else {
      PADDLE_THROW(platform::errors::Unimplemented(
          "Unsupported `%s` type value as custom attribute now. "
          "Supported data types include `bool`, `int`, `float`, "
          "`int64_t`, `std::string`, `std::vector<int>`, "
          "`std::vector<float>`, `std::vector<std::string>`, "
          "Please check whether the attribute data type and "
          "data type string are matched.",
          attr_type_str));
    }
  }

  VLOG(3) << "Custom Operator: InferShape - calc output ddim.";
  auto output_shapes = func(input_shapes, vec_input_shapes, custom_attrs);

  VLOG(3) << "Custom Operator: InferShape - set output ddim.";
  for (size_t i = 0; i < outputs.size(); ++i) {
    auto out_name = outputs[i];
    if (detail::IsDuplicableVar(out_name)) {
      std::vector<DDim> vec_ddim;
      vec_ddim.reserve(output_shapes.size());
403 404
      std::transform(output_shapes.begin(),
                     output_shapes.end(),
405 406
                     std::back_inserter(vec_ddim),
                     [&](const std::vector<int64_t>& shape) -> DDim {
407
                       return phi::make_ddim(shape);
408 409 410
                     });
      ctx->SetOutputsDim(out_name, vec_ddim);
    } else {
411
      ctx->SetOutputDim(out_name, phi::make_ddim(output_shapes[i]));
412 413 414 415
    }
  }
}

416 417 418 419 420 421 422 423 424
//////////////////// Operator Define /////////////////

class CustomOperator : public OperatorWithKernel {
 public:
  using OperatorWithKernel::OperatorWithKernel;

  // Dummy infershape
  // Because it is a pure virtual function, it must be implemented
  void InferShape(framework::InferShapeContext* ctx) const override {
425
    VLOG(3) << "Custom Operator: Dummy infer shape of custom operator.";
426 427 428 429 430 431 432 433 434 435 436 437
  }

  /**
   * NOTE: [Skip the Kernel Selection]
   * Custom Op only registers one Op kernel on each device, so that the
   * data type selection and promotion that depends on GetExpectedKernelType,
   * as well as the adaptation of various other special situations,
   * need users to implement, to avoid users needs to implement
   * GetExpectedKernelType function when expanding other cases.
   * The RAW type is used here as the data type, indicating that
   * it can only be determined at runtime.
   */
438
  phi::KernelKey GetExpectedKernelType(
439
      const framework::ExecutionContext& ctx) const override {
440
    return phi::KernelKey(ctx.GetPlace());
441 442 443 444 445 446 447
  }

  /**
   * NOTE: [Skip Input Variable Cast for DataType]
   * Because the kernel data type is RAW, we should skip the cast for
   * data type difference when PrepareData.
   */
448
  phi::KernelKey GetKernelTypeForVar(
449
      const std::string& var_name,
450
      const phi::DenseTensor& tensor,
451 452 453 454
      const phi::KernelKey& expected_kernel_type) const override {
    return phi::KernelKey(phi::Backend::ALL_BACKEND,
                          tensor.layout(),
                          expected_kernel_type.dtype());
455 456 457 458 459 460 461 462 463 464 465 466
  }
};

class CustomOpMaker : public OpProtoAndCheckerMaker {
 public:
  explicit CustomOpMaker(const std::vector<std::string>& inputs,
                         const std::vector<std::string>& outputs,
                         const std::vector<std::string>& attrs)
      : inputs_(inputs), outputs_(outputs), attrs_(attrs) {}

  void Make() override {
    for (auto& in_name : inputs_) {
467 468 469 470 471 472
      if (detail::IsDuplicableVar(in_name)) {
        AddInput(in_name, "The input " + in_name + "of Custom operator.")
            .AsDuplicable();
      } else {
        AddInput(in_name, "The input " + in_name + "of Custom operator.");
      }
473 474
    }
    for (auto& out_name : outputs_) {
475 476 477 478 479 480
      if (detail::IsDuplicableVar(out_name)) {
        AddOutput(out_name, "The output " + out_name + "of Custom Operator.")
            .AsDuplicable();
      } else {
        AddOutput(out_name, "The output " + out_name + "of Custom Operator.");
      }
481
    }
482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520
    for (auto& attr : attrs_) {
      auto attr_name_and_type = detail::ParseAttrStr(attr);
      auto attr_name = attr_name_and_type[0];
      auto attr_type_str = attr_name_and_type[1];
      if (attr_type_str == "bool") {
        AddAttr<bool>(attr_name, "custom operator bool attribute.")
            .SetDefault(false);
      } else if (attr_type_str == "int") {
        AddAttr<int>(attr_name, "custom operator int attribute.").SetDefault(1);
      } else if (attr_type_str == "float") {
        AddAttr<float>(attr_name, "custom operator float attribute.")
            .SetDefault(1.0f);
      } else if (attr_type_str == "int64_t") {
        AddAttr<int64_t>(attr_name, "custom operator int64_t attribute.")
            .SetDefault(1);
      } else if (attr_type_str == "std::string") {
        AddAttr<std::string>(attr_name, "custom operator int attribute.")
            .SetDefault("");
      } else if (attr_type_str == "std::vector<int>") {
        AddAttr<std::vector<int>>(attr_name,
                                  "custom operator std::vector<int> attribute.")
            .SetDefault({});
      } else if (attr_type_str == "std::vector<float>") {
        AddAttr<std::vector<float>>(
            attr_name, "custom operator std::vector<float> attribute.")
            .SetDefault({});
      } else if (attr_type_str == "std::vector<int64_t>") {
        AddAttr<std::vector<int64_t>>(
            attr_name, "custom operator std::vector<int64_t> attribute.")
            .SetDefault({});
      } else if (attr_type_str == "std::vector<std::string>") {
        AddAttr<std::vector<std::string>>(
            attr_name, "custom operator std::vector<std::string> attribute.")
            .SetDefault({});
      } else {
        PADDLE_THROW(platform::errors::Unimplemented(
            "Unsupported `%s` type value as custom attribute now. "
            "Supported data types include `bool`, `int`, `float`, "
            "`int64_t`, `std::string`, `std::vector<int>`, "
521
            "`std::vector<float>`, `std::vector<int64_t>`, "
522 523 524 525 526
            "`std::vector<std::string>`, Please check whether "
            "the attribute data type and data type string are matched.",
            attr_type_str));
      }
    }
527 528 529
    AddComment(R"DOC(
Custom Operator.

530
According to the phi::DenseTensor operation function implemented by the user
531
independently of the framework, it is encapsulated into a framework
H
HongyuJia 已提交
532 533
operator to adapt to various execution scenarios such as dynamic graph
mode, static graph mode, and inference mode.
534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550

)DOC");
  }

 private:
  std::vector<std::string> inputs_;
  std::vector<std::string> outputs_;
  std::vector<std::string> attrs_;
};

template <typename T>
class CustomGradOpMaker;

template <>
class CustomGradOpMaker<OpDesc> : public SingleGradOpMaker<OpDesc> {
 public:
  explicit CustomGradOpMaker(
551 552
      const OpDesc& fwd_op,
      const std::unordered_set<std::string>& no_grad_set,
553
      std::unordered_map<std::string, std::string>* grad_to_var,
554 555
      const std::vector<BlockDesc*>& grad_block,
      const std::string& name,
556
      const std::vector<std::string>& inputs,
557 558
      const std::vector<std::string>& outputs,
      bool is_double_grad)
559 560 561
      : SingleGradOpMaker<OpDesc>(fwd_op, no_grad_set, grad_to_var, grad_block),
        name_(name),
        inputs_(inputs),
562 563
        outputs_(outputs),
        is_double_grad_(is_double_grad) {}
564 565 566 567 568 569 570 571 572

 protected:
  void Apply(GradOpPtr<OpDesc> grad_op) const override {
    grad_op->SetType(name_);

    auto fwd_op_inputs = this->InputNames();
    auto fwd_op_outputs = this->OutputNames();

    for (auto& in_name : inputs_) {
573
      VLOG(3) << "Custom Operator: GradOpDescMaker - input: " << in_name;
574
      if (!detail::IsGradVar(in_name, is_double_grad_)) {
575 576 577 578 579 580 581 582 583 584 585 586 587 588 589
        if (detail::IsMemberOf(fwd_op_inputs, in_name)) {
          grad_op->SetInput(in_name, this->Input(in_name));
        } else if (detail::IsMemberOf(fwd_op_outputs, in_name)) {
          grad_op->SetInput(in_name, this->Output(in_name));
        } else {
          PADDLE_THROW(platform::errors::InvalidArgument(
              "The input tensor name `%s` is invalid, expected it is the input "
              "or output of forward operator.",
              in_name));
        }
      } else {
        grad_op->SetInput(in_name, this->OutputGrad(detail::NoGrad(in_name)));
      }
    }
    for (auto& out_name : outputs_) {
590
      VLOG(3) << "Custom Operator: GradOpDescMaker - output: " << out_name;
591
      if (detail::IsDuplicableVar(out_name)) {
592
        grad_op->SetOutput(
593 594 595
            out_name,
            this->InputGrad(detail::NoGrad(out_name, is_double_grad_),
                            /*drop_empty_grad=*/false));
596
      } else {
597 598 599
        grad_op->SetOutput(
            out_name,
            this->InputGrad(detail::NoGrad(out_name, is_double_grad_)));
600
      }
601
    }
602
    grad_op->SetAttrMap(this->Attrs());
603 604 605 606 607 608
  }

 private:
  std::string name_;
  std::vector<std::string> inputs_;
  std::vector<std::string> outputs_;
609
  bool is_double_grad_{false};
610 611 612 613 614 615 616 617 618 619 620 621
};

template <>
class CustomGradOpMaker<imperative::OpBase>
    : public SingleGradOpMaker<imperative::OpBase> {
 public:
  explicit CustomGradOpMaker(
      const std::string& type,
      const imperative::NameVarBaseMap& var_base_map_in,
      const imperative::NameVarBaseMap& var_base_map_out,
      const AttributeMap& attrs,
      const std::map<std::string, std::string>& inplace_map,
622 623 624 625
      const std::string& name,
      const std::vector<std::string>& inputs,
      const std::vector<std::string>& outputs,
      bool is_double_grad)
626 627 628 629
      : SingleGradOpMaker<imperative::OpBase>(
            type, var_base_map_in, var_base_map_out, attrs, inplace_map),
        name_(name),
        inputs_(inputs),
630 631
        outputs_(outputs),
        is_double_grad_(is_double_grad) {}
632 633 634 635 636 637 638 639 640 641 642 643 644 645

 protected:
  // TODO(chenweihang): The code is duplicated with the previous one, because
  // ere OpMaker's Input, Output and other methods are protected. Putting the
  // function implementation outside the class will cause the method to be
  // uncallable,
  // so it is still implemented in the class for the time being.
  void Apply(GradOpPtr<imperative::OpBase> grad_op) const override {
    grad_op->SetType(name_);

    auto fwd_op_inputs = this->InputNames();
    auto fwd_op_outputs = this->OutputNames();

    for (auto& in_name : inputs_) {
646
      VLOG(3) << "Custom Operator: GradOpBaseMaker - input: " << in_name;
647
      if (!detail::IsGradVar(in_name, is_double_grad_)) {
648 649 650 651 652 653 654 655 656 657 658 659 660 661 662
        if (detail::IsMemberOf(fwd_op_inputs, in_name)) {
          grad_op->SetInput(in_name, this->Input(in_name));
        } else if (detail::IsMemberOf(fwd_op_outputs, in_name)) {
          grad_op->SetInput(in_name, this->Output(in_name));
        } else {
          PADDLE_THROW(platform::errors::InvalidArgument(
              "The input tensor name `%s` is invalid, expected it is the input "
              "or output of forward operator.",
              in_name));
        }
      } else {
        grad_op->SetInput(in_name, this->OutputGrad(detail::NoGrad(in_name)));
      }
    }
    for (auto& out_name : outputs_) {
663
      VLOG(3) << "Custom Operator: GradOpBaseMaker - output: " << out_name;
664 665
      grad_op->SetOutput(
          out_name, this->InputGrad(detail::NoGrad(out_name, is_double_grad_)));
666
    }
667
    grad_op->SetAttrMap(this->Attrs());
668 669 670 671 672 673
  }

 private:
  std::string name_;
  std::vector<std::string> inputs_;
  std::vector<std::string> outputs_;
674
  bool is_double_grad_{false};
675 676 677 678
};

//////////// Operator and Kernel Register //////////////

679 680 681
static void RegisterOperatorKernelWithPlace(
    const std::string& name,
    const OperatorWithKernel::OpKernelFunc& op_kernel_func,
682 683
    const proto::VarType::Type type,
    const platform::Place& place) {
684
  OpKernelType key(type, place);
685
  VLOG(3) << "Custom Operator: op kernel key: " << key;
686
  OperatorWithKernel::AllOpKernels()[name][key] = op_kernel_func;
687 688
}

689 690 691 692 693 694
static void RegisterOperatorKernel(const std::string& name,
                                   const paddle::KernelFunc& kernel_func,
                                   const std::vector<std::string>& inputs,
                                   const std::vector<std::string>& outputs,
                                   const std::vector<std::string>& attrs,
                                   void* dso_handle) {
695
  VLOG(3) << "Custom Operator: op name in kernel: " << name;
696 697 698 699 700
  // NOTE [ Dummy Op Kernel Key ]
  // TODO(chenweihang): Because execute engine need get device context based
  // op_kernel_key.place_, so we should register kernel for each
  // device. But this is not entirely correct, if user only give a cpu kernel,
  // but call api in gpu device, it will cause error.
701 702 703
  OperatorWithKernel::OpKernelFunc op_kernel_func;
  if (kernel_func) {
    VLOG(3) << "Register custom operator " << name << " with kernel func";
704 705
    op_kernel_func = [kernel_func, inputs, outputs, attrs](
                         const framework::ExecutionContext& ctx) {
706 707 708 709 710 711 712 713 714 715 716 717 718 719 720
      VLOG(3) << "Custom Operator: run custom kernel func in lambda.";
      RunKernelFunc(ctx, kernel_func, inputs, outputs, attrs);
    };
  } else {
    VLOG(3) << "Register custom operator " << name
            << " with raw op kernel func";
    PADDLE_ENFORCE_NOT_NULL(
        dso_handle,
        platform::errors::InvalidArgument(
            "The dso handle must be provided if kernel_func is nullptr."));
    using OpKernelFuncPtr = void(const framework::ExecutionContext&);
    auto symbol_name = "PD_" + name + "_raw_op_kernel_func";
    auto* func = detail::DynLoad<OpKernelFuncPtr>(dso_handle, symbol_name);
    op_kernel_func = func;
  }
721 722
  RegisterOperatorKernelWithPlace(
      name, op_kernel_func, proto::VarType::RAW, platform::CPUPlace());
723
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
724 725
  RegisterOperatorKernelWithPlace(
      name, op_kernel_func, proto::VarType::RAW, platform::CUDAPlace());
726
#endif
727 728 729 730
#if defined(PADDLE_WITH_XPU)
  RegisterOperatorKernelWithPlace(
      name, op_kernel_func, proto::VarType::RAW, platform::XPUPlace());
#endif
H
HongyuJia 已提交
731 732 733 734 735 736 737 738 739 740 741 742 743
#ifdef PADDLE_WITH_CUSTOM_DEVICE
  auto device_types = phi::DeviceManager::GetAllCustomDeviceTypes();
  for (const auto& dev_type : device_types) {
    for (size_t dev_id = 0;
         dev_id < phi::DeviceManager::GetDeviceCount(dev_type);
         dev_id++) {
      RegisterOperatorKernelWithPlace(name,
                                      op_kernel_func,
                                      proto::VarType::RAW,
                                      platform::CustomPlace(dev_type, dev_id));
    }
  }
#endif
744 745
}

746 747
void RegisterOperatorWithMetaInfo(const std::vector<OpMetaInfo>& op_meta_infos,
                                  void* dso_handle) {
748 749 750 751 752 753
  /* Op register */
  OpInfo info;

  auto& base_op_meta = op_meta_infos.front();

  auto op_name = OpMetaInfoHelper::GetOpName(base_op_meta);
754 755

  if (OpInfoMap::Instance().Has(op_name)) {
756
    LOG(WARNING) << "Operator (" << op_name << ") has been registered.";
757 758 759
    return;
  }

760 761 762 763 764 765 766
  auto& op_inputs = OpMetaInfoHelper::GetInputs(base_op_meta);
  auto& op_outputs = OpMetaInfoHelper::GetOutputs(base_op_meta);
  auto& op_attrs = OpMetaInfoHelper::GetAttrs(base_op_meta);
  auto& kernel_fn = OpMetaInfoHelper::GetKernelFn(base_op_meta);
  auto& infer_shape_func = OpMetaInfoHelper::GetInferShapeFn(base_op_meta);
  auto& infer_dtype_func = OpMetaInfoHelper::GetInferDtypeFn(base_op_meta);

767 768
  VLOG(3) << "Custom Operator: forward, op name: " << op_name;
  VLOG(3) << "Custom Operator: forward, op inputs: "
769
          << string::join_strings(op_inputs, ',');
770
  VLOG(3) << "Custom Operator: forward, op outputs: "
771
          << string::join_strings(op_outputs, ',');
772
  VLOG(3) << "Custom Operator: forward, op attrs: "
773
          << string::join_strings(op_attrs, ',');
774 775

  // Op
776 777
  info.creator_ = [](const std::string& op_name,
                     const VariableNameMap& inputs,
778 779 780 781 782 783 784 785 786 787 788 789 790
                     const VariableNameMap& outputs,
                     const AttributeMap& attrs) {
    return new CustomOperator(op_name, inputs, outputs, attrs);
  };

  // OpMaker
  info.proto_ = new proto::OpProto;
  info.proto_->set_type(op_name);

  info.checker_ = new OpAttrChecker();
  CustomOpMaker custom_maker(op_inputs, op_outputs, op_attrs);
  custom_maker(info.proto_, info.checker_);
  PADDLE_ENFORCE_EQ(
791 792
      info.proto_->IsInitialized(),
      true,
793 794
      platform::errors::PreconditionNotMet(
          "Fail to initialize %s's OpProto, because %s is not initialized.",
795 796
          op_name,
          info.proto_->InitializationErrorString()));
797 798

  // InferShape
799 800 801 802
  if (infer_shape_func == nullptr) {
    // use default InferShape
    info.infer_shape_ = [op_inputs, op_outputs](InferShapeContext* ctx) {
      PADDLE_ENFORCE_EQ(
803 804
          op_inputs.size(),
          1UL,
805 806 807
          platform::errors::Unavailable(
              "Your custom operator contains multiple inputs. "
              "We only allow a custom operator that contains only one input "
808 809 810
              "and only one output without setting the InferShapeFn. "
              "At this time, the input shape will be directly set to "
              "the output shape.\n"
811 812 813
              "Please set the InferShapeFn of custom "
              "operator by .SetInferShapeFn(PD_INFER_SHAPE(...))"));
      PADDLE_ENFORCE_EQ(
814 815
          op_outputs.size(),
          1UL,
816 817 818
          platform::errors::Unavailable(
              "Your custom operator contains multiple outputs. "
              "We only allow a custom operator that contains only one input "
819 820 821
              "and only one output without setting the InferShapeFn. "
              "At this time, the input shape will be directly set to "
              "the output shape.\n"
822 823 824
              "Please set the InferShapeFn of custom "
              "operator by .SetInferShapeFn(PD_INFER_SHAPE(...))"));

825
      VLOG(3) << "Custom Operator: Default InferShape - share ddim.";
826 827 828
      ctx->ShareDim(op_inputs[0], op_outputs[0]);
    };
  } else {
829 830
    info.infer_shape_ = [op_inputs, op_outputs, op_attrs, infer_shape_func](
                            InferShapeContext* ctx) {
831
      RunInferShapeFunc(ctx, infer_shape_func, op_inputs, op_outputs, op_attrs);
832 833
    };
  }
834 835

  // Infer Dtype
836
  if (infer_dtype_func == nullptr) {
837
    // use default InferDtype
838 839
    info.infer_var_type_ = [op_inputs, op_outputs](InferVarTypeContext* ctx) {
      PADDLE_ENFORCE_EQ(
840 841
          op_inputs.size(),
          1UL,
842 843 844
          platform::errors::Unavailable(
              "Your custom operator contains multiple inputs. "
              "We only allow a custom operator that contains only one input "
845 846 847
              "and only one output without setting the InferDtypeFn. "
              "At this time, the input dtype will be directly set to "
              "the output dtype.\n"
848 849 850
              "Please set the InferDtypeFn of custom "
              "operator by .SetInferDtypeFn(PD_INFER_DTYPE(...))"));
      PADDLE_ENFORCE_EQ(
851 852
          op_outputs.size(),
          1UL,
853 854 855
          platform::errors::Unavailable(
              "Your custom operator contains multiple outputs. "
              "We only allow a custom operator that contains only one input "
856 857 858
              "and only one output without setting the InferDtypeFn. "
              "At this time, the input dtype will be directly set to "
              "the output dtype.\n"
859 860 861
              "Please set the InferDtypeFn of custom "
              "operator by .SetInferDtypeFn(PD_INFER_DTYPE(...))"));

862
      VLOG(3) << "Custom Operator: InferDtype - share dtype.";
863 864 865 866
      auto dtype = ctx->GetInputDataType(op_inputs[0]);
      ctx->SetOutputDataType(op_outputs[0], dtype);
    };
  } else {
867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886
    info.infer_var_type_ =
        [op_inputs, op_outputs, infer_dtype_func](InferVarTypeContext* ctx) {
          std::vector<DataType> input_dtypes;
          std::vector<std::vector<DataType>> vec_input_dtypes;

          VLOG(3) << "Custom Operator: InferDtype - get input dtype.";
          for (auto& in_name : op_inputs) {
            if (detail::IsDuplicableVar(in_name)) {
              std::vector<DataType> vec_custom_dtype;
              for (size_t i = 0; i < ctx->InputSize(in_name); ++i) {
                auto dtype = ctx->GetInputDataType(in_name, i);
                vec_custom_dtype.emplace_back(
                    paddle::framework::TransToPhiDataType(dtype));
              }
              vec_input_dtypes.emplace_back(vec_custom_dtype);
            } else {
              auto dtype = ctx->GetInputDataType(in_name);
              input_dtypes.emplace_back(
                  paddle::framework::TransToPhiDataType(dtype));
            }
887
          }
888

889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905
          VLOG(3) << "Custom Operator: InferDtype - infer output dtype.";
          auto output_dtypes = infer_dtype_func(input_dtypes, vec_input_dtypes);

          VLOG(3) << "Custom Operator: InferDtype - set output dtype.";
          for (size_t i = 0; i < op_outputs.size(); ++i) {
            auto out_name = op_outputs[i];
            if (detail::IsDuplicableVar(out_name)) {
              for (size_t j = 0; j < output_dtypes.size(); ++j) {
                auto dtype =
                    paddle::framework::TransToProtoVarType(output_dtypes[i]);
                ctx->SetOutputDataType(out_name, dtype, j);
              }
            } else {
              ctx->SetOutputDataType(
                  out_name,
                  paddle::framework::TransToProtoVarType(output_dtypes[i]));
            }
906
          }
907
        };
908
  }
909 910

  // Kernel func
911 912
  RegisterOperatorKernel(
      op_name, kernel_fn, op_inputs, op_outputs, op_attrs, dso_handle);
913 914 915 916 917 918 919 920 921

  // If grad op or double grad op exists
  std::string cur_op_name = op_name;
  for (size_t i = 1; i < op_meta_infos.size(); ++i) {
    auto& cur_grad_op = op_meta_infos[i];

    auto& grad_op_name = OpMetaInfoHelper::GetOpName(cur_grad_op);
    auto& grad_op_inputs = OpMetaInfoHelper::GetInputs(cur_grad_op);
    auto& grad_op_outputs = OpMetaInfoHelper::GetOutputs(cur_grad_op);
922
    auto& grad_op_attrs = OpMetaInfoHelper::GetAttrs(cur_grad_op);
923
    auto& grad_kernel_fn = OpMetaInfoHelper::GetKernelFn(cur_grad_op);
924
    auto& grad_infer_shape_fn = OpMetaInfoHelper::GetInferShapeFn(cur_grad_op);
925

926 927
    VLOG(3) << "Custom Operator: backward, op name: " << grad_op_name;
    VLOG(3) << "Custom Operator: backward, op inputs: "
928
            << string::join_strings(grad_op_inputs, ',');
929
    VLOG(3) << "Custom Operator: backward, op outputs: "
930 931
            << string::join_strings(grad_op_outputs, ',');

932 933
    bool is_double_grad = (i == 2);

934
    // GradOpDescMaker
935 936 937 938 939 940
    info.grad_op_maker_ =
        [grad_op_name, grad_op_inputs, grad_op_outputs, is_double_grad](
            const OpDesc& fwd_op,
            const std::unordered_set<std::string>& no_grad_set,
            std::unordered_map<std::string, std::string>* grad_to_var,
            const std::vector<BlockDesc*>& grad_block) {
941 942 943 944 945 946 947 948
          CustomGradOpMaker<paddle::framework::OpDesc> maker(fwd_op,
                                                             no_grad_set,
                                                             grad_to_var,
                                                             grad_block,
                                                             grad_op_name,
                                                             grad_op_inputs,
                                                             grad_op_outputs,
                                                             is_double_grad);
949 950
          return maker();
        };
951 952

    // GradOpBaseMaker
953 954 955 956 957 958 959 960
    info.dygraph_grad_op_maker_ =
        [grad_op_name, grad_op_inputs, grad_op_outputs, is_double_grad](
            const std::string& type,
            const imperative::NameVarBaseMap& var_base_map_in,
            const imperative::NameVarBaseMap& var_base_map_out,
            const framework::AttributeMap& attrs,
            const framework::AttributeMap& default_attrs,
            const std::map<std::string, std::string>& inplace_map) {
961 962 963 964 965 966 967 968 969
          CustomGradOpMaker<paddle::imperative::OpBase> maker(type,
                                                              var_base_map_in,
                                                              var_base_map_out,
                                                              attrs,
                                                              inplace_map,
                                                              grad_op_name,
                                                              grad_op_inputs,
                                                              grad_op_outputs,
                                                              is_double_grad);
970 971 972
          maker.SetDygraphDefaultAttrsMap(default_attrs);
          return maker();
        };
973 974 975 976 977

    /* Grad op register */
    OpInfo grad_info;

    // Grad Op
978 979 980 981 982 983
    grad_info.creator_ = [](const std::string& type,
                            const VariableNameMap& inputs,
                            const VariableNameMap& outputs,
                            const AttributeMap& attrs) {
      return new CustomOperator(type, inputs, outputs, attrs);
    };
984

985
    // Grad InferShape
986
    if (grad_infer_shape_fn == nullptr) {
987 988
      grad_info.infer_shape_ = [grad_op_inputs,
                                grad_op_outputs,
989
                                is_double_grad](InferShapeContext* ctx) {
990 991 992 993 994 995 996 997 998 999
        // 1. if forward input exists, gradient's shape is same with forward
        // input
        // default
        //    [Suitable for most situations]
        // 2. if forward input not exists, and only contains one grad input and
        // output,
        //    use grad input shape as grad output shape
        //    [Suitable for the situation that forward input is not used as
        //    backward input]
        for (auto& out_name : grad_op_outputs) {
1000
          auto fwd_name = detail::NoGrad(out_name, is_double_grad);
1001 1002
          if (detail::IsDuplicableVar(fwd_name)) {
            // Duplicable forward var must as backward input
1003 1004
            ctx->ShareDim(fwd_name, out_name);
          } else {
1005 1006 1007 1008 1009 1010 1011 1012 1013 1014
            if (ctx->HasInput(fwd_name)) {
              ctx->ShareDim(fwd_name, out_name);
            } else {
              PADDLE_ENFORCE_EQ(
                  grad_op_inputs.size() == 1UL && grad_op_outputs.size() == 1UL,
                  true,
                  platform::errors::Unavailable(
                      "Custom grad operator infershape error. "
                      "If a custom grad operator contains only one input and "
                      "only one output, the input shape will be directly set "
H
HongyuJia 已提交
1015 1016 1017
                      "to the output shape. Otherwise, Please set the forward "
                      "input as the grad operator's input or  set the "
                      "InferShapeFn of custom grad operator by "
1018 1019 1020
                      ".SetInferShapeFn(PD_INFER_SHAPE(...))"));
              ctx->ShareDim(grad_op_inputs[0], out_name);
            }
1021 1022
          }
        }
1023 1024
      };
    } else {
1025 1026 1027
      grad_info.infer_shape_ = [grad_op_inputs,
                                grad_op_outputs,
                                grad_op_attrs,
1028
                                grad_infer_shape_fn](InferShapeContext* ctx) {
1029 1030 1031 1032 1033
        RunInferShapeFunc(ctx,
                          grad_infer_shape_fn,
                          grad_op_inputs,
                          grad_op_outputs,
                          grad_op_attrs);
1034 1035
      };
    }
1036 1037

    // Kernel func
1038 1039 1040 1041 1042 1043
    RegisterOperatorKernel(grad_op_name,
                           grad_kernel_fn,
                           grad_op_inputs,
                           grad_op_outputs,
                           grad_op_attrs,
                           dso_handle);
1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054

    // update current info
    OpInfoMap::Instance().Insert(cur_op_name, info);
    cur_op_name = grad_op_name;
    info = grad_info;
  }
  // insert last info
  OpInfoMap::Instance().Insert(cur_op_name, info);
}

void RegisterOperatorWithMetaInfoMap(
1055
    const paddle::OpMetaInfoMap& op_meta_info_map, void* dso_handle) {
1056
  auto& meta_info_map = op_meta_info_map.GetMap();
1057
  VLOG(3) << "Custom Operator: size of op meta info map - "
1058 1059 1060
          << meta_info_map.size();
  // pair: {op_type, OpMetaInfo}
  for (auto& pair : meta_info_map) {
1061
    VLOG(3) << "Custom Operator: pair first -> op name: " << pair.first;
1062
    RegisterOperatorWithMetaInfo(pair.second, dso_handle);
1063 1064 1065 1066 1067 1068
  }
}

////////////////////// User APIs ///////////////////////

// load op api
1069 1070
const std::unordered_map<std::string, std::vector<OpMetaInfo>>&
LoadOpMetaInfoAndRegisterOp(const std::string& dso_name) {
1071
  void* handle = paddle::platform::dynload::GetOpDsoHandle(dso_name);
1072
  VLOG(3) << "load custom_op lib: " << dso_name;
1073 1074 1075 1076
  typedef OpMetaInfoMap& get_op_meta_info_map_t();
  auto* get_op_meta_info_map =
      detail::DynLoad<get_op_meta_info_map_t>(handle, "PD_GetOpMetaInfoMap");
  auto& op_meta_info_map = get_op_meta_info_map();
1077
  RegisterOperatorWithMetaInfoMap(op_meta_info_map, handle);
1078
  return op_meta_info_map.GetMap();
1079 1080 1081 1082
}

}  // namespace framework
}  // namespace paddle