未验证 提交 6913feb0 编写于 作者: J jjyaoao 提交者: GitHub

remove infrt V1.1 (#52672)

上级 61fe2198
...@@ -73,16 +73,7 @@ tools/nvcc_lazy ...@@ -73,16 +73,7 @@ tools/nvcc_lazy
# This file is automatically generated. # This file is automatically generated.
# TODO(zhiqiang) Move this file to build directory. # TODO(zhiqiang) Move this file to build directory.
paddle/infrt/dialect/pd/ir/pd_ops.td
paddle/infrt/dialect/phi/ir/phi_cpu_kernels.td
paddle/infrt/dialect/phi/ir/phi_gpu_kernels.td
tools/infrt/kernels.json
tools/infrt/kernel_signature.json
paddle/infrt/dialect/pd/common/pd_ops_info.h
.lit_test_times.txt .lit_test_times.txt
paddle/infrt/tests/dialect/Output
paddle/infrt/tests/lit.cfg.py
paddle/infrt/kernel/phi/infershaped/infershaped_kernel_launchers.cc
paddle/fluid/pybind/eager_op_function.cc paddle/fluid/pybind/eager_op_function.cc
tools/nvcc_lazy tools/nvcc_lazy
......
...@@ -269,7 +269,6 @@ option( ...@@ -269,7 +269,6 @@ option(
OFF) OFF)
option(WITH_LITE "Compile Paddle Fluid with Lite Engine" OFF) option(WITH_LITE "Compile Paddle Fluid with Lite Engine" OFF)
option(WITH_CINN "Compile PaddlePaddle with CINN" OFF) option(WITH_CINN "Compile PaddlePaddle with CINN" OFF)
option(WITH_INFRT "Compile PaddlePaddle with INFRT" OFF)
option(WITH_NCCL "Compile PaddlePaddle with NCCL support" ON) option(WITH_NCCL "Compile PaddlePaddle with NCCL support" ON)
option(WITH_RCCL "Compile PaddlePaddle with RCCL support" ON) option(WITH_RCCL "Compile PaddlePaddle with RCCL support" ON)
option(WITH_XPU_BKCL "Compile PaddlePaddle with BAIDU KUNLUN XPU BKCL" OFF) option(WITH_XPU_BKCL "Compile PaddlePaddle with BAIDU KUNLUN XPU BKCL" OFF)
......
include(FetchContent)
set(LLVM_DOWNLOAD_URL
https://paddle-inference-dist.bj.bcebos.com/infrt/llvm_b5149f4e66a49a98b67e8e2de4e24a4af8e2781b.tar.gz
)
set(LLVM_MD5 022819bb5760817013cf4b8a37e97d5e)
set(FETCHCONTENT_BASE_DIR ${THIRD_PARTY_PATH}/llvm)
set(FETCHCONTENT_QUIET OFF)
FetchContent_Declare(
external_llvm
URL ${LLVM_DOWNLOAD_URL}
URL_MD5 ${LLVM_MD5}
PREFIX ${THIRD_PARTY_PATH}/llvm SOURCE_DIR ${THIRD_PARTY_PATH}/install/llvm)
if(NOT LLVM_PATH)
FetchContent_GetProperties(external_llvm)
if(NOT external_llvm_POPULATED)
FetchContent_Populate(external_llvm)
endif()
set(LLVM_PATH ${THIRD_PARTY_PATH}/install/llvm)
set(LLVM_DIR ${THIRD_PARTY_PATH}/install/llvm/lib/cmake/llvm)
set(MLIR_DIR ${THIRD_PARTY_PATH}/install/llvm/lib/cmake/mlir)
else()
set(LLVM_DIR ${LLVM_PATH}/lib/cmake/llvm)
set(MLIR_DIR ${LLVM_PATH}/lib/cmake/mlir)
endif()
if(${CMAKE_CXX_COMPILER} STREQUAL "clang++")
set(CMAKE_EXE_LINKER_FLAGS
"${CMAKE_EXE_LINKER_FLAGS} -stdlib=libc++ -lc++abi")
endif()
message(STATUS "set LLVM_DIR: ${LLVM_DIR}")
message(STATUS "set MLIR_DIR: ${MLIR_DIR}")
find_package(LLVM REQUIRED CONFIG HINTS ${LLVM_DIR})
find_package(MLIR REQUIRED CONFIG HINTS ${MLIR_DIR})
find_package(ZLIB REQUIRED)
list(APPEND CMAKE_MODULE_PATH "${LLVM_CMAKE_DIR}")
include(AddLLVM)
include_directories(${LLVM_INCLUDE_DIRS})
list(APPEND CMAKE_MODULE_PATH "${LLVM_CMAKE_DIR}")
list(APPEND CMAKE_MODULE_PATH "${MLIR_CMAKE_DIR}")
include(AddLLVM)
include(TableGen)
include(AddMLIR)
message(STATUS "Found MLIR: ${MLIR_DIR}")
message(STATUS "Found LLVM ${LLVM_PACKAGE_VERSION}")
message(STATUS "Using LLVMConfig.cmake in: ${LLVM_DIR}")
# To build with MLIR, the LLVM is build from source code using the following flags:
#[==[
cmake ../llvm -G "Unix Makefiles" \
-DLLVM_ENABLE_PROJECTS="mlir;clang" \
-DLLVM_BUILD_EXAMPLES=OFF \
-DLLVM_TARGETS_TO_BUILD="X86" \
-DCMAKE_BUILD_TYPE=Release \
-DLLVM_ENABLE_ASSERTIONS=ON \
-DLLVM_ENABLE_ZLIB=OFF \
-DLLVM_ENABLE_RTTI=ON \
-DLLVM_INSTALL_UTILS=ON \
-DCMAKE_INSTALL_PREFIX=./install
#]==]
# The matched llvm-project version is b5149f4e66a49a98b67e8e2de4e24a4af8e2781b (currently a temporary commit)
add_definitions(${LLVM_DEFINITIONS})
llvm_map_components_to_libnames(
llvm_libs
Support
Core
irreader
X86
executionengine
orcjit
mcjit
all
codegen)
message(STATUS "LLVM libs: ${llvm_libs}")
get_property(mlir_libs GLOBAL PROPERTY MLIR_ALL_LIBS)
message(STATUS "MLIR libs: ${mlir_libs}")
add_definitions(${LLVM_DEFINITIONS})
# The minimum needed libraries for MLIR IR parse and transform.
set(MLIR_IR_LIBS MLIRAnalysis MLIRPass MLIRParser MLIRDialect MLIRIR MLIROptLib)
# tb_base is the name of a xxx.td file (without the .td suffix)
function(mlir_tablegen_on td_base)
set(options)
set(oneValueArgs DIALECT)
cmake_parse_arguments(mlir_tablegen_on "${options}" "${oneValueArgs}"
"${multiValueArgs}" ${ARGN})
set(LLVM_TARGET_DEFINITIONS ${td_base}.td)
mlir_tablegen(${td_base}.hpp.inc -gen-op-decls)
mlir_tablegen(${td_base}.cpp.inc -gen-op-defs)
if(mlir_tablegen_on_DIALECT)
mlir_tablegen(${td_base}_dialect.hpp.inc --gen-dialect-decls
-dialect=${mlir_tablegen_on_DIALECT})
mlir_tablegen(${td_base}_dialect.cpp.inc --gen-dialect-defs
-dialect=${mlir_tablegen_on_DIALECT})
endif()
add_public_tablegen_target(${td_base}_IncGen)
add_custom_target(${td_base}_inc DEPENDS ${td_base}_IncGen)
endfunction()
function(mlir_add_rewriter td_base)
set(LLVM_TARGET_DEFINITIONS ${td_base}.td)
set(LLVM_TARGET_DEPENDS
${LLVM_TARGET_DEPENDS}
${CMAKE_SOURCE_DIR}/paddle/infrt/dialect/infrt/ir/infrt_base.td)
mlir_tablegen(${td_base}.cpp.inc -gen-rewriters)
add_public_tablegen_target(MLIR${td_base}IncGen)
add_dependencies(mlir-headers MLIR${td_base}IncGen)
endfunction()
# Execute the mlir script with infrt-exec program.
# @name: name of the test
# @script: path to the mlir script file
function(infrt_exec_check name script)
add_test(
NAME ${name}
COMMAND
sh -c
"${CMAKE_BINARY_DIR}/paddle/infrt/host_context/infrt-exec -i ${CMAKE_CURRENT_SOURCE_DIR}/${script}| ${LLVM_PATH}/bin/FileCheck ${CMAKE_CURRENT_SOURCE_DIR}/${script}"
)
endfunction()
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set(INFRT_INSTALL_DIR
"${CMAKE_BINARY_DIR}/paddle_infrt_install_dir"
CACHE STRING "A path setting paddle infrt shared and static libraries")
function(copy TARGET)
set(options "")
set(oneValueArgs "")
set(multiValueArgs SRCS DSTS)
cmake_parse_arguments(copy_lib "${options}" "${oneValueArgs}"
"${multiValueArgs}" ${ARGN})
list(LENGTH copy_lib_SRCS copy_lib_SRCS_len)
list(LENGTH copy_lib_DSTS copy_lib_DSTS_len)
if(NOT ${copy_lib_SRCS_len} EQUAL ${copy_lib_DSTS_len})
message(
FATAL_ERROR
"${TARGET} source numbers are not equal to destination numbers")
endif()
math(EXPR len "${copy_lib_SRCS_len} - 1")
foreach(index RANGE ${len})
list(GET copy_lib_SRCS ${index} src)
list(GET copy_lib_DSTS ${index} dst)
add_custom_command(
TARGET ${TARGET}
POST_BUILD
COMMAND mkdir -p "${dst}"
COMMAND cp -r "${src}" "${dst}"
COMMENT "copying ${src} -> ${dst}")
endforeach()
endfunction()
function(copy_part_of_thrid_party TARGET DST)
set(dst_dir "${DST}/third_party/install/glog")
copy(
${TARGET}
SRCS ${GLOG_INCLUDE_DIR} ${GLOG_LIBRARIES}
DSTS ${dst_dir} ${dst_dir}/lib)
endfunction()
# inference library for only inference
set(infrt_lib_deps third_party infrt infrt_static)
add_custom_target(infrt_lib_dist DEPENDS ${infrt_lib_deps})
# CMakeCache Info
copy(
infrt_lib_dist
SRCS ${CMAKE_BINARY_DIR}/CMakeCache.txt
DSTS ${INFRT_INSTALL_DIR})
set(infrt_lib ${INFRT_BINARY_DIR}/libinfrt.*)
copy(
infrt_lib_dist
SRCS ${INFRT_SOURCE_DIR}/api/infrt_api.h ${infrt_lib}
DSTS ${INFRT_INSTALL_DIR}/infrt/include ${INFRT_INSTALL_DIR}/infrt/lib)
copy(
infrt_lib_dist
SRCS ${INFRT_BINARY_DIR}/paddle/framework.pb.h
DSTS ${INFRT_INSTALL_DIR}/infrt/include/internal)
# paddle fluid version
function(version version_file)
execute_process(
COMMAND ${GIT_EXECUTABLE} log --pretty=format:%H -1
WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}
OUTPUT_VARIABLE PADDLE_GIT_COMMIT)
file(WRITE ${version_file} "GIT COMMIT ID: ${PADDLE_GIT_COMMIT}\n")
file(APPEND ${version_file}
"CXX compiler version: ${CMAKE_CXX_COMPILER_VERSION}\n")
endfunction()
version(${INFRT_INSTALL_DIR}/version.txt)
...@@ -493,11 +493,6 @@ if(WIN32) ...@@ -493,11 +493,6 @@ if(WIN32)
list(APPEND third_party_deps extern_dirent) list(APPEND third_party_deps extern_dirent)
endif() endif()
if(WITH_INFRT)
include(external/llvm)
list(APPEND third_party_deps ${llvm_libs})
endif()
if(WITH_IPU) if(WITH_IPU)
include(external/poplar) include(external/poplar)
list(APPEND third_party_deps extern_poplar) list(APPEND third_party_deps extern_poplar)
......
...@@ -54,11 +54,7 @@ For a long time, because the Paddle and Paddle-Lite operators are maintained sep ...@@ -54,11 +54,7 @@ For a long time, because the Paddle and Paddle-Lite operators are maintained sep
Therefore, this functional operator library will be jointly constructed by training and inference team, and will serve as an independent compilation component and underlying infrastructure (not yet independently split), which can serve training, server-inference, and -inference execution systems at the same time. Therefore, this functional operator library will be jointly constructed by training and inference team, and will serve as an independent compilation component and underlying infrastructure (not yet independently split), which can serve training, server-inference, and -inference execution systems at the same time.
### 1.5 The adaptation of the new inference Runtime design 'infrt' ### 1.5 Op and Kernel parameter normalization
Inference team designed a new runtime `infrt`. It is expected to unify the execution system of Paddle-Inference and Paddle-Lite. It is necessary to directly call the operators in the PHI operator library jointly built this time. Therefore, the adaptation to `infrt` needs to be considered in the design. (Currently the `infrt` project is temporarily on hold).
### 1.6 Op and Kernel parameter normalization
The Python 2.0 API project in 2020 standardized the argument list of the Paddle Python-side API, making it concise, easy to use, and standard. However, due to cost considerations, the argument list at the Op level was not standardized, so there will be many early developed operators that differ greatly in arguments from the Python API. For example, `conv` op, the Python API has only 8 arguments, but the corresponding C++ `Conv` Op has 29 arguments. API and Op are essentially the same layer of concepts, both are descriptions of an operation, and the arguments should be consistent. In order to solve this problem, 'the operator definition enhancement project' was launched, and the declarations of 'AsExtra' and 'AsQuant' were added to some unnecessary arguments, but the problem was not fundamentally solved, which is what the construction of the PHI operator library hopes to solve. The Python 2.0 API project in 2020 standardized the argument list of the Paddle Python-side API, making it concise, easy to use, and standard. However, due to cost considerations, the argument list at the Op level was not standardized, so there will be many early developed operators that differ greatly in arguments from the Python API. For example, `conv` op, the Python API has only 8 arguments, but the corresponding C++ `Conv` Op has 29 arguments. API and Op are essentially the same layer of concepts, both are descriptions of an operation, and the arguments should be consistent. In order to solve this problem, 'the operator definition enhancement project' was launched, and the declarations of 'AsExtra' and 'AsQuant' were added to some unnecessary arguments, but the problem was not fundamentally solved, which is what the construction of the PHI operator library hopes to solve.
...@@ -68,7 +64,7 @@ We hope to be able to achieve the same three-layer arguments of Python API -> Op ...@@ -68,7 +64,7 @@ We hope to be able to achieve the same three-layer arguments of Python API -> Op
### 2.1 Location ### 2.1 Location
The PHI code directory is inside the paddle directory, which is at the same level as fluid, rather than inside the fluid directory. PHI is a basic component that is called by various upper-layer runtime such as fluid, lite, and infrt, and it will be used later as a separately compiled dynamic library, therefore PHI is not suitable as the submodule of fluid. The PHI code directory is inside the paddle directory, which is at the same level as fluid, rather than inside the fluid directory. PHI is a basic component that is called by various upper-layer runtime such as fluid, lite, and it will be used later as a separately compiled dynamic library, therefore PHI is not suitable as the submodule of fluid.
### 2.2 Directory Structure ### 2.2 Directory Structure
...@@ -86,15 +82,19 @@ Training and inference require a clear operator library directory structure: ...@@ -86,15 +82,19 @@ Training and inference require a clear operator library directory structure:
- For example, a model uses `add` and `multiply` only, ideally it could be cropped to only 2 kernels. - For example, a model uses `add` and `multiply` only, ideally it could be cropped to only 2 kernels.
- In the long run, support the requirement of easily reusing kernel implementation. - In the long run, support the requirement of easily reusing kernel implementation.
- Explanation: When reusing the kernel, the corresponding function implementation should be introduced through `include` easily, rather than cannot find the kernel because of the complex directory structure. - Explanation: When reusing the kernel, the corresponding function implementation should be introduced through `include` easily, rather than cannot find the kernel because of the complex directory structure.
- In the long run, support the requirement of the unified writing method among cross-device kernels, and the writing method is intuitive and easy to use, without introducing unnecessary template parameters. - In the long run, support the requirement of the unified writing method among cross-device kernels, and the writing method is intuitive and easy to use, without introducing unnecessary template parameters.
- Explanation: Kernel Primitive API module is at the lower layer of the operator library. Its long-term vision is that each operation uses only one kernel to adapt to various devices, the code that truly distinguishes the device is only in the implementation of the Kernel Primitive API. In the future, the template parameters should be limited to as concise as possible when passing complex parameters into the reused kernel. - Explanation: Kernel Primitive API module is at the lower layer of the operator library. Its long-term vision is that each operation uses only one kernel to adapt to various devices, the code that truly distinguishes the device is only in the implementation of the Kernel Primitive API. In the future, the template parameters should be limited to as concise as possible when passing complex parameters into the reused kernel.
- In terms of ease of use, developers can accurately understand where the newly added kernel should be placed, without ambiguity. - In terms of ease of use, developers can accurately understand where the newly added kernel should be placed, without ambiguity.
- Explanation: When developers add an API, they will not be confused about which directory they should put the corresponding kernel in. Moreover, different people should have no ambiguous understanding of where the same kernel should be placed. - Explanation: When developers add an API, they will not be confused about which directory they should put the corresponding kernel in. Moreover, different people should have no ambiguous understanding of where the same kernel should be placed.
- Do not introduce a lot of duplicate directory design. - Do not introduce a lot of duplicate directory design.
- Explanation: Concept splitting is needed, but also with boundaries. Avoid subdirectories with the same name occurring in multiple directories. For example, if `eigen`, `funcs`, `math` directories are placed under the cpu directory, then they shouldn't be placed under the gpu directory. The directory design of the new operator library is mainly divided according to the device, and the directory splitting at other levels should be weakened as much as possible. For example, try not to split based on functions, try not to split based on fields, etc. - Explanation: Concept splitting is needed, but also with boundaries. Avoid subdirectories with the same name occurring in multiple directories. For example, if `eigen`, `funcs`, `math` directories are placed under the cpu directory, then they shouldn't be placed under the gpu directory. The directory design of the new operator library is mainly divided according to the device, and the directory splitting at other levels should be weakened as much as possible. For example, try not to split based on functions, try not to split based on fields, etc.
- Do not introduce too deep directory design. - Do not introduce too deep directory design.
...@@ -136,7 +136,6 @@ Some directory structure description: ...@@ -136,7 +136,6 @@ Some directory structure description:
- `kernels`: Kernels related to each device. - `kernels`: Kernels related to each device.
- `cpu, gpu, ...` - `cpu, gpu, ...`
##### 2.2.2.2 Kernels directory ##### 2.2.2.2 Kernels directory
``` ```
...@@ -170,6 +169,7 @@ The directory structure is described as follows: ...@@ -170,6 +169,7 @@ The directory structure is described as follows:
- The auxiliary functions that are only used by the current kernel, they are always placed in the same backend folder as the kernel implementation, and the .h file is used to manage the code. Auxiliary function codes are no longer placed elsewhere, unless their implementations are used in multiple places. - The auxiliary functions that are only used by the current kernel, they are always placed in the same backend folder as the kernel implementation, and the .h file is used to manage the code. Auxiliary function codes are no longer placed elsewhere, unless their implementations are used in multiple places.
- Even if there are multiple calls, if it is still limited to the same device, directly build the header file and put it in the same directory. - Even if there are multiple calls, if it is still limited to the same device, directly build the header file and put it in the same directory.
- The implementation of the backward kernel and the forward kernel are placed in different files, and the file suffix is `*_grad_kernel.*`, which is convenient for cmake to separate and compile. - The implementation of the backward kernel and the forward kernel are placed in different files, and the file suffix is `*_grad_kernel.*`, which is convenient for cmake to separate and compile.
- No more directories are created for the backward kernel, otherwise directories such as cpu/gpu will also be created under the backward kernel directory. - No more directories are created for the backward kernel, otherwise directories such as cpu/gpu will also be created under the backward kernel directory.
- The implementation of the second-order derivative and the third-order derivative is also placed in the grad kernel implementation file. - The implementation of the second-order derivative and the third-order derivative is also placed in the grad kernel implementation file.
...@@ -230,12 +230,14 @@ void FullKernel(const Context& dev_ctx, ...@@ -230,12 +230,14 @@ void FullKernel(const Context& dev_ctx,
##### 2.3.2.1 API Tensor interface ##### 2.3.2.1 API Tensor interface
- The top-layer is the API-level Tensor interface, which contains two pointer members, `TensorBase` and `AbstractAutogradMeta`. - The top-layer is the API-level Tensor interface, which contains two pointer members, `TensorBase` and `AbstractAutogradMeta`.
- Both members are designed as Interface and do not depend on real Tensor and `Autograd` implementations. - Both members are designed as Interface and do not depend on real Tensor and `Autograd` implementations.
- `AutogradMeta` is only meaningful in the dynamic graph API-level Tensor, it will not be used in the specific kernel calculation, so put it in the top-layer Tensor interface. - `AutogradMeta` is only meaningful in the dynamic graph API-level Tensor, it will not be used in the specific kernel calculation, so put it in the top-layer Tensor interface.
- In addition, such a design facilitates data sharing and reduces copy overhead. - In addition, such a design facilitates data sharing and reduces copy overhead.
- When a Tensor is assigned to another Tensor, or Tensor is used as a function return value, only the pointer is actually copied, and no real data copy is performed. - When a Tensor is assigned to another Tensor, or Tensor is used as a function return value, only the pointer is actually copied, and no real data copy is performed.
- The top-layer C++ Tensor plays a similar role as the Python-side Tensor, and the interface design is as consistent as possible with the Python-side. - The top-layer C++ Tensor plays a similar role as the Python-side Tensor, and the interface design is as consistent as possible with the Python-side.
- Contain basic property access and data access methods of Tensor. - Contain basic property access and data access methods of Tensor.
- `shape`, `place`, `dtype`, `data`. - `shape`, `place`, `dtype`, `data`.
- Contain the `autograd` methods required by the dynamic graph Tensor. - Contain the `autograd` methods required by the dynamic graph Tensor.
...@@ -277,6 +279,7 @@ Tensor ondnn() const; ...@@ -277,6 +279,7 @@ Tensor ondnn() const;
``` ```
- This conversion process may be `cast` or `copy`: - This conversion process may be `cast` or `copy`:
- `cast` if no data copy required. - `cast` if no data copy required.
- `copy` if data copy required. - `copy` if data copy required.
- Transformations are implemented by functional kernels. - Transformations are implemented by functional kernels.
...@@ -334,12 +337,12 @@ Inherit other Tensors with high degrees of freedom: directly inherit `TensorBase ...@@ -334,12 +337,12 @@ Inherit other Tensors with high degrees of freedom: directly inherit `TensorBase
- `TensorBase` is an abstract class, which leaves a lot of room for the description of specific Tensor. If the description of traditional Tensor cannot meet the requirements, a specialized Tensor implementation can be designed. - `TensorBase` is an abstract class, which leaves a lot of room for the description of specific Tensor. If the description of traditional Tensor cannot meet the requirements, a specialized Tensor implementation can be designed.
#### 2.3.3 C++ API #### 2.3.3 C++ API
##### 2.3.3.1 C++ API form ##### 2.3.3.1 C++ API form
> Highlights of this section: > Highlights of this section:
>
> 1. The C++ API corresponds to the Python 2.0 API: the function name, parameter name, parameter order, and return value are the same. > 1. The C++ API corresponds to the Python 2.0 API: the function name, parameter name, parameter order, and return value are the same.
After investigation, we found that very few framework products are designed with the ease of use of the C++ API in mind. For the long-term consideration, if we want to attract more developers to build the paddle ecology, it is also very important to provide a standardized and easy-to-use C++ API architecture. At the same time, the Python 2.0 API project has laid a good reference foundation for the C++ API, and we can directly inherit its achievements. After investigation, we found that very few framework products are designed with the ease of use of the C++ API in mind. For the long-term consideration, if we want to attract more developers to build the paddle ecology, it is also very important to provide a standardized and easy-to-use C++ API architecture. At the same time, the Python 2.0 API project has laid a good reference foundation for the C++ API, and we can directly inherit its achievements.
...@@ -386,24 +389,24 @@ The key to C++ API generation lies in the configuration of the YAML file. Taking ...@@ -386,24 +389,24 @@ The key to C++ API generation lies in the configuration of the YAML file. Taking
```yaml ```yaml
## Forward API configuration ## Forward API configuration
- api : matmul - api: matmul
args : (Tensor x, Tensor y, bool transpose_x=false, bool transpose_y=false) args: (Tensor x, Tensor y, bool transpose_x=false, bool transpose_y=false)
output : Tensor output: Tensor
infer_meta : infer_meta:
func : MatmulInferMeta func: MatmulInferMeta
kernel : kernel:
func : matmul func: matmul
backward : matmul_grad backward: matmul_grad
## Backward API configuration ## Backward API configuration
- backward_api : matmul_grad - backward_api: matmul_grad
forward : matmul (Tensor x, Tensor y, bool transpose_x, bool transpose_y) -> Tensor(out) forward: matmul (Tensor x, Tensor y, bool transpose_x, bool transpose_y) -> Tensor(out)
args : (Tensor x, Tensor y, Tensor out_grad, bool transpose_x=false, bool transpose_y=false) args: (Tensor x, Tensor y, Tensor out_grad, bool transpose_x=false, bool transpose_y=false)
output : Tensor(x_grad), Tensor(y_grad) output: Tensor(x_grad), Tensor(y_grad)
infer_meta : infer_meta:
func : MatmulGradInferMeta func: MatmulGradInferMeta
kernel : kernel:
func : matmul_grad func: matmul_grad
``` ```
The meaning of each configuration parameter: The meaning of each configuration parameter:
...@@ -426,6 +429,7 @@ Due to the large number of C++ APIs and their various forms and functions, some ...@@ -426,6 +429,7 @@ Due to the large number of C++ APIs and their various forms and functions, some
##### 2.3.4.1 Kernel form ##### 2.3.4.1 Kernel form
> Highlights of this section: > Highlights of this section:
>
> 1. Notes on Kernel function form: > 1. Notes on Kernel function form:
> (1) Data type `T` and `DeviceContext` (abbreviated as `Context`) as template parameters; > (1) Data type `T` and `DeviceContext` (abbreviated as `Context`) as template parameters;
> (2) `Context` is the first parameter of Kernel; > (2) `Context` is the first parameter of Kernel;
...@@ -470,14 +474,18 @@ Described as follows: ...@@ -470,14 +474,18 @@ Described as follows:
> FAQ: > FAQ:
>- Why does the first parameter need to be `DeviceContext`? Why must this parameter be passed in? > - Why does the first parameter need to be `DeviceContext`? Why must this parameter be passed in?
- The PHI kernel requires a pure function format. The variables used in the function are passed in through parameters or created inside the function, global singletons are not allowed inside the function. In order to adapt to various kernel requirements, the `DeviceContext` parameter that stores context information is necessary. - The PHI kernel requires a pure function format. The variables used in the function are passed in through parameters or created inside the function, global singletons are not allowed inside the function. In order to adapt to various kernel requirements, the `DeviceContext` parameter that stores context information is necessary.
>- Why are two template parameters needed?
> - Why are two template parameters needed?
- In order to efficiently support the reusing of device-independent kernels. If we want to implement a Fourier transform `fft` kernel, assuming that the kernel can be derived by combining the basic kernels, the form of `Xxx<T, Device>()` can avoid dynamically redistributing devices. - In order to efficiently support the reusing of device-independent kernels. If we want to implement a Fourier transform `fft` kernel, assuming that the kernel can be derived by combining the basic kernels, the form of `Xxx<T, Device>()` can avoid dynamically redistributing devices.
##### 2.3.4.3 Kernel implementation ##### 2.3.4.3 Kernel implementation
> Highlights of this section: > Highlights of this section:
>
> 1. Kernel focuses on computing logic without mixing scheduling logic. > 1. Kernel focuses on computing logic without mixing scheduling logic.
> 2. Kernel is fine-grained enough, with clear boundaries, no optional parameters, easy to reuse. > 2. Kernel is fine-grained enough, with clear boundaries, no optional parameters, easy to reuse.
...@@ -531,13 +539,14 @@ In addition to the change of kernel form from structure format to functional for ...@@ -531,13 +539,14 @@ In addition to the change of kernel form from structure format to functional for
2. In the PHI kernel, the memory application of the output Tensor is required to use the `ctx.Alloc` or `ctx.HostAlloc` method, and no longer use the original `mutable_data` to apply for memory. 2. In the PHI kernel, the memory application of the output Tensor is required to use the `ctx.Alloc` or `ctx.HostAlloc` method, and no longer use the original `mutable_data` to apply for memory.
> FAQ > FAQ
>
> 1. Why is `mutable_data` replaced by `ctx.Alloc`? > 1. Why is `mutable_data` replaced by `ctx.Alloc`?
> Answer: Because the global method `memory::AllocShared` called in the original `mutable_data` method uses a global singleton for memory allocation, which does not conform to the pure function design principle mentioned above. In terms of business requirements, if a single instance is used in the kernel to determine the way of memory allocation, in the multi-threaded environment of inference, different threads will not be able to flexibly specify different memory allocation ways. > Answer: Because the global method `memory::AllocShared` called in the original `mutable_data` method uses a global singleton for memory allocation, which does not conform to the pure function design principle mentioned above. In terms of business requirements, if a single instance is used in the kernel to determine the way of memory allocation, in the multi-threaded environment of inference, different threads will not be able to flexibly specify different memory allocation ways.
##### 2.3.4.4 Kernel registration ##### 2.3.4.4 Kernel registration
> Highlights of this section: > Highlights of this section:
>
> 1. Kernel needs to expose all its key information to the framework and record its input, output and attribute information, otherwise it will lead to unclear boundaries between framework scheduling and Kernel calculation. > 1. Kernel needs to expose all its key information to the framework and record its input, output and attribute information, otherwise it will lead to unclear boundaries between framework scheduling and Kernel calculation.
When fluid Kernel is registered, only the `place`, `layout`, `dtype`, `input` and `output` of the Kernel are recorded and managed by `ExecutionContext`, and there is no corresponding information record. Now the kernel needs to be changed to a functional type. The input, output and attributes of each function are clear. We hope to record the information of each input and output here, which is also compatible with paddle-lite scheduling. When fluid Kernel is registered, only the `place`, `layout`, `dtype`, `input` and `output` of the Kernel are recorded and managed by `ExecutionContext`, and there is no corresponding information record. Now the kernel needs to be changed to a functional type. The input, output and attributes of each function are clear. We hope to record the information of each input and output here, which is also compatible with paddle-lite scheduling.
...@@ -655,6 +664,7 @@ In addition, only basic template adaptation has been implemented at present, and ...@@ -655,6 +664,7 @@ In addition, only basic template adaptation has been implemented at present, and
##### 2.3.4.4 Kernel management ##### 2.3.4.4 Kernel management
> Highlights of this section: > Highlights of this section:
>
> 1. Introduce the design of the current Kernel management components > 1. Introduce the design of the current Kernel management components
For the management of the new form of Kernel, described as follows: For the management of the new form of Kernel, described as follows:
...@@ -663,10 +673,10 @@ For the management of the new form of Kernel, described as follows: ...@@ -663,10 +673,10 @@ For the management of the new form of Kernel, described as follows:
- `KernelKey` is similar to the original `OpKernelType`, but the `palce` and `library_type` fields are combined into one and called `Backend`, because the original `LibraryType` is a limited enumeration class, which is strongly related to place, the splitting increases the cost of understanding instead. - `KernelKey` is similar to the original `OpKernelType`, but the `palce` and `library_type` fields are combined into one and called `Backend`, because the original `LibraryType` is a limited enumeration class, which is strongly related to place, the splitting increases the cost of understanding instead.
- `Kernel` holds more information than the original `OpKernel`. In addition to the Function during execution, it also holds information about specific parameters, namely `KernelArgsDef`. For Tensor type input and output, it saves Tensor type information, Device, data Type, data layout. For Attribute type input and output, it saves type information. - `Kernel` holds more information than the original `OpKernel`. In addition to the Function during execution, it also holds information about specific parameters, namely `KernelArgsDef`. For Tensor type input and output, it saves Tensor type information, Device, data Type, data layout. For Attribute type input and output, it saves type information.
#### 2.3.5 Kernel Compilation and Dependencies #### 2.3.5 Kernel Compilation and Dependencies
> Highlights of this section: > Highlights of this section:
>
> 1. Introduce the compilation design of the kernel. > 1. Introduce the compilation design of the kernel.
> 2. Introduce the establishment of kernel dependencies. > 2. Introduce the establishment of kernel dependencies.
...@@ -714,6 +724,7 @@ The original `InferShape` of fluid Op is the same as `OpKernel`, has the problem ...@@ -714,6 +724,7 @@ The original `InferShape` of fluid Op is the same as `OpKernel`, has the problem
We also rewrite `InferShape` into a functional form, which supports different Ops to call the same `InferShape` function, which improves ease of use and reduces maintenance costs. We also rewrite `InferShape` into a functional form, which supports different Ops to call the same `InferShape` function, which improves ease of use and reduces maintenance costs.
> FAQ: > FAQ:
>
> 1. Why call it `InferMeta` instead of continuing to call it `InferShape`? > 1. Why call it `InferMeta` instead of continuing to call it `InferShape`?
> Answer: The `Meta` of `InferMeta` comes from the `meta` member in `DenseTensor`. In PHI, an op has two components, `InferMeta` and `Kernel`. `InferMeta` covers the functions of `InferShape`, but it is not limited to `InferShape`. In addition to the inference of dims and lod, `InferMeta` also infers dtype and layout, which is different from the original. > Answer: The `Meta` of `InferMeta` comes from the `meta` member in `DenseTensor`. In PHI, an op has two components, `InferMeta` and `Kernel`. `InferMeta` covers the functions of `InferShape`, but it is not limited to `InferShape`. In addition to the inference of dims and lod, `InferMeta` also infers dtype and layout, which is different from the original.
...@@ -757,8 +768,8 @@ The purpose of using `MetaTensor` is to mask multiple Tensor types, and to be co ...@@ -757,8 +768,8 @@ The purpose of using `MetaTensor` is to mask multiple Tensor types, and to be co
The basic design of `MetaTensor` see the `paddle/phi/core/meta_tensor.h`. There is a pointer member `TensorBase` in the base class `MetaTensor`, so it can be compatible with `DenseTensor`, `SelectedRows`, `SparseCsrTensor` and other types in PHI. The basic design of `MetaTensor` see the `paddle/phi/core/meta_tensor.h`. There is a pointer member `TensorBase` in the base class `MetaTensor`, so it can be compatible with `DenseTensor`, `SelectedRows`, `SparseCsrTensor` and other types in PHI.
> Note: > Note:
> Only the content related to the design of PHI itself in this README. If you want to know more about the design of how phi and fluid are compatible, please refer to: > Only the content related to the design of PHI itself in this README. If you want to know more about the design of how phi and fluid are compatible, please refer to:
>
> 1. [Paddle HIgh reusability operator library (PHI) Design Document (CN Version)](https://github.com/PaddlePaddle/docs/blob/develop/docs/design/phi/design_cn.md) > 1. [Paddle HIgh reusability operator library (PHI) Design Document (CN Version)](https://github.com/PaddlePaddle/docs/blob/develop/docs/design/phi/design_cn.md)
> 2. [Paddle HIgh reusability operator library (PHI) Design Document (EN Version)](https://github.com/PaddlePaddle/docs/blob/develop/docs/design/phi/design_en.md) > 2. [Paddle HIgh reusability operator library (PHI) Design Document (EN Version)](https://github.com/PaddlePaddle/docs/blob/develop/docs/design/phi/design_en.md)
...@@ -330,9 +330,7 @@ class PRChecker: ...@@ -330,9 +330,7 @@ class PRChecker:
if filename.startswith(PADDLE_ROOT + 'python/'): if filename.startswith(PADDLE_ROOT + 'python/'):
file_list.append(filename) file_list.append(filename)
elif filename.startswith(PADDLE_ROOT + 'paddle/'): elif filename.startswith(PADDLE_ROOT + 'paddle/'):
if filename.startswith(PADDLE_ROOT + 'paddle/infrt'): if filename.startswith(PADDLE_ROOT + 'paddle/scripts'):
filterFiles.append(filename)
elif filename.startswith(PADDLE_ROOT + 'paddle/scripts'):
if filename.startswith( if filename.startswith(
( (
PADDLE_ROOT + 'paddle/scripts/paddle_build.sh', PADDLE_ROOT + 'paddle/scripts/paddle_build.sh',
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册