提交 0633d001 编写于 作者: J Jiansong Wang

1, Rename dir name from nna to imagination_nna

2, Add imagination/Imagination/IMAGINATION prefix for symbol definition;
3, No build error
上级 e861f860
...@@ -172,8 +172,8 @@ if(LITE_WITH_RKNPU) ...@@ -172,8 +172,8 @@ if(LITE_WITH_RKNPU)
include(device/rknpu) include(device/rknpu)
endif() endif()
if(LITE_WITH_NNA) if(LITE_WITH_IMAGINATION_NNA)
include(device/nna) include(device/imagination_nna)
endif() endif()
include(external/flatbuffers) include(external/flatbuffers)
......
...@@ -175,8 +175,8 @@ if (LITE_WITH_MLU) ...@@ -175,8 +175,8 @@ if (LITE_WITH_MLU)
add_definitions("-DLITE_WITH_MLU") add_definitions("-DLITE_WITH_MLU")
endif() endif()
if (LITE_WITH_NNA) if (LITE_WITH_IMAGINATION_NNA)
add_definitions("-DLITE_WITH_NNA") add_definitions("-DLITE_WITH_IMAGINATION_NNA")
endif() endif()
if (LITE_WITH_HUAWEI_ASCEND_NPU) if (LITE_WITH_HUAWEI_ASCEND_NPU)
......
...@@ -12,58 +12,50 @@ ...@@ -12,58 +12,50 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
if(NOT LITE_WITH_NNA) if(NOT LITE_WITH_IMAGINATION_NNA)
return() return()
endif() endif()
if(NOT DEFINED IMGNNA_DDK_ROOT) if(NOT DEFINED IMAGINATION_NNA_SDK_ROOT)
set(IMGNNA_DDK_ROOT $ENV{IMGNNA_DDK_ROOT}) set(IMAGINATION_NNA_SDK_ROOT $ENV{IMAGINATION_NNA_SDK_ROOT})
if(NOT IMGNNA_DDK_ROOT) if(NOT IMAGINATION_NNA_SDK_ROOT)
message(FATAL_ERROR "Must set IMGNNA_DDK_ROOT or env IMGNNA_DDK_ROOT when LITE_WITH_IMGNNA=ON") message(FATAL_ERROR "Must set IMAGINATION_NNA_SDK_ROOT or env IMAGINATION_NNA_SDK_ROOT when LITE_WITH_IMAGINATION_NNA=ON")
endif() endif()
endif() endif()
message(STATUS "IMGNNA_DDK_ROOT: ${IMGNNA_DDK_ROOT}") message(STATUS "IMAGINATION_NNA_SDK_ROOT: ${IMAGINATION_NNA_SDK_ROOT}")
find_path(IMGNNA_DDK_INC NAMES imgdnn.h find_path(IMGNNA_DDK_INC NAMES imgdnn.h
PATHS ${IMGNNA_DDK_ROOT}/include/imgdnn NO_DEFAULT_PATH) PATHS ${IMAGINATION_NNA_SDK_ROOT}/include/imgdnn NO_DEFAULT_PATH)
if(NOT IMGNNA_DDK_INC) if(NOT IMGNNA_DDK_INC)
message(FATAL_ERROR "Can not find imgdnn.h in ${IMGNNA_DDK_ROOT}/include") message(FATAL_ERROR "Can not find imgdnn.h in ${IMAGINATION_NNA_SDK_ROOT}/include")
endif() endif()
#include_directories("${IMGNNA_DDK_ROOT}/include")
include_directories(${IMGNNA_DDK_INC}) include_directories(${IMGNNA_DDK_INC})
#set(IMGNNA_SUB_LIB_PATH "lib64")
#if(ARM_TARGET_ARCH_ABI STREQUAL "armv8")
# set(IMGNNA_SUB_LIB_PATH "lib64")
#endif()
#if(ARM_TARGET_ARCH_ABI STREQUAL "armv7")
# set(IMGNNA_SUB_LIB_PATH "lib")
#endif()
set(IMGNNA_LIB_PATH "lib") set(IMGNNA_LIB_PATH "lib")
find_library(IMGNNA_DDK_IMGDNN_FILE NAMES imgdnn find_library(IMGNNA_DDK_IMGDNN_FILE NAMES imgdnn
PATHS ${IMGNNA_DDK_ROOT}/${IMGNNA_LIB_PATH}) PATHS ${IMAGINATION_NNA_SDK_ROOT}/${IMGNNA_LIB_PATH})
if(NOT IMGNNA_DDK_IMGDNN_FILE) if(NOT IMGNNA_DDK_IMGDNN_FILE)
message(FATAL_ERROR "Can not find IMGNNA_DDK_IMGDNN_FILE in ${IMGNNA_DDK_ROOT}") message(FATAL_ERROR "Can not find IMGNNA_DDK_IMGDNN_FILE in ${IMAGINATION_NNA_SDK_ROOT}")
else() else()
message(STATUS "Found IMGNNA_DDK IMGDNN Library: ${IMGNNA_DDK_IMGDNN_FILE}") message(STATUS "Found IMGNNA_DDK IMGDNN Library: ${IMGNNA_DDK_IMGDNN_FILE}")
add_library(nna_ddk_imgdnn SHARED IMPORTED GLOBAL) add_library(imagination_nna_ddk_imgdnn SHARED IMPORTED GLOBAL)
set_property(TARGET nna_ddk_imgdnn PROPERTY IMPORTED_LOCATION ${IMGNNA_DDK_IMGDNN_FILE}) set_property(TARGET imagination_nna_ddk_imgdnn PROPERTY IMPORTED_LOCATION ${IMGNNA_DDK_IMGDNN_FILE})
endif() endif()
find_library(IMGNNA_DDK_RUNTIME_FILE NAMES nnasession find_library(IMGNNA_DDK_RUNTIME_FILE NAMES nnasession
PATHS ${IMGNNA_DDK_ROOT}/${IMGNNA_LIB_PATH}) PATHS ${IMAGINATION_NNA_SDK_ROOT}/${IMGNNA_LIB_PATH})
if(NOT IMGNNA_DDK_RUNTIME_FILE) if(NOT IMGNNA_DDK_RUNTIME_FILE)
message(FATAL_ERROR "Can not find IMGNNA_DDK_RUNTIME_FILE in ${IMGNNA_DDK_ROOT}") message(FATAL_ERROR "Can not find IMGNNA_DDK_RUNTIME_FILE in ${IMAGINATION_NNA_SDK_ROOT}")
else() else()
message(STATUS "Found IMGNNA_DDK RUNTIME Library: ${IMGNNA_DDK_RUNTIME_FILE}") message(STATUS "Found IMGNNA_DDK RUNTIME Library: ${IMGNNA_DDK_RUNTIME_FILE}")
add_library(nna_ddk_runtime SHARED IMPORTED GLOBAL) add_library(imagination_nna_ddk_runtime SHARED IMPORTED GLOBAL)
set_property(TARGET nna_ddk_runtime PROPERTY IMPORTED_LOCATION ${IMGNNA_DDK_RUNTIME_FILE}) set_property(TARGET imagination_nna_ddk_runtime PROPERTY IMPORTED_LOCATION ${IMGNNA_DDK_RUNTIME_FILE})
endif() endif()
set(nna_runtime_libs nna_ddk_runtime CACHE INTERNAL "imgnna ddk runtime libs") set(imagination_nna_runtime_libs imagination_nna_ddk_runtime CACHE INTERNAL "imgnna ddk runtime libs")
set(nna_builder_libs nna_ddk_imgdnn CACHE INTERNAL "imgnna ddk builder libs") set(imagination_nna_builder_libs imagination_nna_ddk_imgdnn CACHE INTERNAL "imgnna ddk builder libs")
...@@ -22,7 +22,7 @@ endfunction() ...@@ -22,7 +22,7 @@ endfunction()
function (lite_deps TARGET) function (lite_deps TARGET)
set(options "") set(options "")
set(oneValueArgs "") set(oneValueArgs "")
set(multiValueArgs DEPS X86_DEPS CUDA_DEPS ARM_DEPS PROFILE_DEPS LIGHT_DEPS HVY_DEPS CL_DEPS FPGA_DEPS BM_DEPS RKNPU_DEPS NPU_DEPS XPU_DEPS MLU_DEPS HUAWEI_ASCEND_NPU_DEPS NNA_DEPS APU_DEPS CV_DEPS ARGS) set(multiValueArgs DEPS X86_DEPS CUDA_DEPS ARM_DEPS PROFILE_DEPS LIGHT_DEPS HVY_DEPS CL_DEPS FPGA_DEPS BM_DEPS RKNPU_DEPS NPU_DEPS XPU_DEPS MLU_DEPS HUAWEI_ASCEND_NPU_DEPS IMAGINATION_NNA_DEPS APU_DEPS CV_DEPS ARGS)
cmake_parse_arguments(lite_deps "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) cmake_parse_arguments(lite_deps "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
set(deps ${lite_deps_DEPS}) set(deps ${lite_deps_DEPS})
...@@ -118,8 +118,8 @@ function (lite_deps TARGET) ...@@ -118,8 +118,8 @@ function (lite_deps TARGET)
endforeach(var) endforeach(var)
endif() endif()
if (LITE_WITH_NNA) if (LITE_WITH_IMAGINATION_NNA)
foreach(var ${lite_deps_NNA_DEPS}) foreach(var ${lite_deps_IMAGINATION_NNA_DEPS})
set(deps ${deps} ${var}) set(deps ${deps} ${var})
endforeach(var) endforeach(var)
endif() endif()
...@@ -155,7 +155,7 @@ file(WRITE ${offline_lib_registry_file} "") # clean ...@@ -155,7 +155,7 @@ file(WRITE ${offline_lib_registry_file} "") # clean
function(lite_cc_library TARGET) function(lite_cc_library TARGET)
set(options SHARED shared STATIC static MODULE module) set(options SHARED shared STATIC static MODULE module)
set(oneValueArgs "") set(oneValueArgs "")
set(multiValueArgs SRCS DEPS X86_DEPS CUDA_DEPS CL_DEPS ARM_DEPS FPGA_DEPS BM_DEPS NNA_DEPS RKNPU_DEPS NPU_DEPS XPU_DEPS MLU_DEPS HUAWEI_ASCEND_NPU_DEPS APU_DEPS CV_DEPS PROFILE_DEPS LIGHT_DEPS set(multiValueArgs SRCS DEPS X86_DEPS CUDA_DEPS CL_DEPS ARM_DEPS FPGA_DEPS BM_DEPS IMAGINATION_NNA_DEPS RKNPU_DEPS NPU_DEPS XPU_DEPS MLU_DEPS HUAWEI_ASCEND_NPU_DEPS APU_DEPS CV_DEPS PROFILE_DEPS LIGHT_DEPS
HVY_DEPS EXCLUDE_COMPILE_DEPS ARGS) HVY_DEPS EXCLUDE_COMPILE_DEPS ARGS)
cmake_parse_arguments(args "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) cmake_parse_arguments(args "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
...@@ -166,7 +166,7 @@ function(lite_cc_library TARGET) ...@@ -166,7 +166,7 @@ function(lite_cc_library TARGET)
CUDA_DEPS ${args_CUDA_DEPS} CUDA_DEPS ${args_CUDA_DEPS}
CL_DEPS ${args_CL_DEPS} CL_DEPS ${args_CL_DEPS}
BM_DEPS ${args_BM_DEPS} BM_DEPS ${args_BM_DEPS}
NNA_DEPS ${args_NNA_DEPS} IMAGINATION_NNA_DEPS ${args_IMAGINATION_NNA_DEPS}
RKNPU_DEPS ${args_RKNPU_DEPS} RKNPU_DEPS ${args_RKNPU_DEPS}
ARM_DEPS ${args_ARM_DEPS} ARM_DEPS ${args_ARM_DEPS}
CV_DEPS ${args_CV_DEPS} CV_DEPS ${args_CV_DEPS}
...@@ -207,7 +207,7 @@ function(lite_cc_binary TARGET) ...@@ -207,7 +207,7 @@ function(lite_cc_binary TARGET)
set(options " -g ") set(options " -g ")
endif() endif()
set(oneValueArgs "") set(oneValueArgs "")
set(multiValueArgs SRCS DEPS X86_DEPS CUDA_DEPS CL_DEPS ARM_DEPS FPGA_DEPS BM_DEPS NNA_DEPS RKNPU NPU_DEPS XPU_DEPS MLU_DEPS HUAWEI_ASCEND_NPU_DEPS APU_DEPS PROFILE_DEPS set(multiValueArgs SRCS DEPS X86_DEPS CUDA_DEPS CL_DEPS ARM_DEPS FPGA_DEPS BM_DEPS IMAGINATION_NNA_DEPS RKNPU NPU_DEPS XPU_DEPS MLU_DEPS HUAWEI_ASCEND_NPU_DEPS APU_DEPS PROFILE_DEPS
LIGHT_DEPS HVY_DEPS EXCLUDE_COMPILE_DEPS CV_DEPS ARGS) LIGHT_DEPS HVY_DEPS EXCLUDE_COMPILE_DEPS CV_DEPS ARGS)
cmake_parse_arguments(args "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) cmake_parse_arguments(args "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
...@@ -224,7 +224,7 @@ function(lite_cc_binary TARGET) ...@@ -224,7 +224,7 @@ function(lite_cc_binary TARGET)
XPU_DEPS ${args_XPU_DEPS} XPU_DEPS ${args_XPU_DEPS}
RKNPU_DEPS ${args_RKNPU_DEPS} RKNPU_DEPS ${args_RKNPU_DEPS}
BM_DEPS ${args_BM_DEPS} BM_DEPS ${args_BM_DEPS}
NNA_DEPS ${args_NNA_DEPS} IMAGINATION_NNA_DEPS ${args_IMAGINATION_NNA_DEPS}
PROFILE_DEPS ${args_PROFILE_DEPS} PROFILE_DEPS ${args_PROFILE_DEPS}
LIGHT_DEPS ${args_LIGHT_DEPS} LIGHT_DEPS ${args_LIGHT_DEPS}
HVY_DEPS ${args_HVY_DEPS} HVY_DEPS ${args_HVY_DEPS}
...@@ -262,7 +262,7 @@ function(lite_cc_test TARGET) ...@@ -262,7 +262,7 @@ function(lite_cc_test TARGET)
endif() endif()
set(options "") set(options "")
set(oneValueArgs "") set(oneValueArgs "")
set(multiValueArgs SRCS DEPS X86_DEPS CUDA_DEPS CL_DEPS ARM_DEPS FPGA_DEPS BM_DEPS NNA_DEPS RKNPU_DEPS NPU_DEPS XPU_DEPS MLU_DEPS HUAWEI_ASCEND_NPU_DEPS APU_DEPS PROFILE_DEPS set(multiValueArgs SRCS DEPS X86_DEPS CUDA_DEPS CL_DEPS ARM_DEPS FPGA_DEPS BM_DEPS IMAGINATION_NNA_DEPS RKNPU_DEPS NPU_DEPS XPU_DEPS MLU_DEPS HUAWEI_ASCEND_NPU_DEPS APU_DEPS PROFILE_DEPS
LIGHT_DEPS HVY_DEPS EXCLUDE_COMPILE_DEPS CV_DEPS LIGHT_DEPS HVY_DEPS EXCLUDE_COMPILE_DEPS CV_DEPS
ARGS ARGS
COMPILE_LEVEL # (basic|extra) COMPILE_LEVEL # (basic|extra)
...@@ -287,7 +287,7 @@ function(lite_cc_test TARGET) ...@@ -287,7 +287,7 @@ function(lite_cc_test TARGET)
XPU_DEPS ${args_XPU_DEPS} XPU_DEPS ${args_XPU_DEPS}
RKNPU_DEPS ${args_RKNPU_DEPS} RKNPU_DEPS ${args_RKNPU_DEPS}
BM_DEPS ${args_BM_DEPS} BM_DEPS ${args_BM_DEPS}
NNA_DEPS ${args_NNA_DEPS} IMAGINATION_NNA_DEPS ${args_IMAGINATION_NNA_DEPS}
PROFILE_DEPS ${args_PROFILE_DEPS} PROFILE_DEPS ${args_PROFILE_DEPS}
LIGHT_DEPS ${args_LIGHT_DEPS} LIGHT_DEPS ${args_LIGHT_DEPS}
HVY_DEPS ${args_HVY_DEPS} HVY_DEPS ${args_HVY_DEPS}
...@@ -324,7 +324,7 @@ set(xpu_kernels CACHE INTERNAL "xpu kernels") ...@@ -324,7 +324,7 @@ set(xpu_kernels CACHE INTERNAL "xpu kernels")
set(mlu_kernels CACHE INTERNAL "mlu kernels") set(mlu_kernels CACHE INTERNAL "mlu kernels")
set(huawei_ascend_npu_kernels CACHE INTERNAL "huawei_ascend_npu kernels") set(huawei_ascend_npu_kernels CACHE INTERNAL "huawei_ascend_npu kernels")
set(bm_kernels CACHE INTERNAL "bm kernels") set(bm_kernels CACHE INTERNAL "bm kernels")
set(nna_kernels CACHE INTERNAL "nna kernels") set(imagination_nna_kernels CACHE INTERNAL "imagination_nna kernels")
set(rknpu_kernels CACHE INTERNAL "rknpu kernels") set(rknpu_kernels CACHE INTERNAL "rknpu kernels")
set(opencl_kernels CACHE INTERNAL "opencl kernels") set(opencl_kernels CACHE INTERNAL "opencl kernels")
set(host_kernels CACHE INTERNAL "host kernels") set(host_kernels CACHE INTERNAL "host kernels")
...@@ -341,12 +341,12 @@ if(LITE_BUILD_TAILOR) ...@@ -341,12 +341,12 @@ if(LITE_BUILD_TAILOR)
file(STRINGS ${tailored_kernels_list_path} tailored_kernels_list) file(STRINGS ${tailored_kernels_list_path} tailored_kernels_list)
endif() endif()
# add a kernel for some specific device # add a kernel for some specific device
# device: one of (Host, ARM, X86, NPU, MLU, HUAWEI_ASCEND_NPU, APU, FPGA, OPENCL, CUDA, BM, RKNPU NNA) # device: one of (Host, ARM, X86, NPU, MLU, HUAWEI_ASCEND_NPU, APU, FPGA, OPENCL, CUDA, BM, RKNPU IMAGINATION_NNA)
# level: one of (basic, extra) # level: one of (basic, extra)
function(add_kernel TARGET device level) function(add_kernel TARGET device level)
set(options "") set(options "")
set(oneValueArgs "") set(oneValueArgs "")
set(multiValueArgs SRCS DEPS X86_DEPS CUDA_DEPS CL_DEPS ARM_DEPS FPGA_DEPS BM_DEPS NNA_DEPS RKNPU_DEPS NPU_DEPS XPU_DEPS MLU_DEPS HUAWEI_ASCEND_NPU_DEPS APU_DEPS PROFILE_DEPS set(multiValueArgs SRCS DEPS X86_DEPS CUDA_DEPS CL_DEPS ARM_DEPS FPGA_DEPS BM_DEPS IMAGINATION_NNA_DEPS RKNPU_DEPS NPU_DEPS XPU_DEPS MLU_DEPS HUAWEI_ASCEND_NPU_DEPS APU_DEPS PROFILE_DEPS
LIGHT_DEPS HVY_DEPS EXCLUDE_COMPILE_DEPS LIGHT_DEPS HVY_DEPS EXCLUDE_COMPILE_DEPS
ARGS) ARGS)
cmake_parse_arguments(args "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) cmake_parse_arguments(args "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
...@@ -458,14 +458,14 @@ function(add_kernel TARGET device level) ...@@ -458,14 +458,14 @@ function(add_kernel TARGET device level)
endif() endif()
set(mlu_kernels "${mlu_kernels};${TARGET}" CACHE INTERNAL "") set(mlu_kernels "${mlu_kernels};${TARGET}" CACHE INTERNAL "")
endif() endif()
if ("${device}" STREQUAL "NNA") if ("${device}" STREQUAL "IMAGINATION_NNA")
if (NOT LITE_WITH_NNA) if (NOT LITE_WITH_IMAGINATION_NNA)
foreach(src ${args_SRCS}) foreach(src ${args_SRCS})
file(APPEND ${fake_kernels_src_list} "${CMAKE_CURRENT_SOURCE_DIR}/${src}\n") file(APPEND ${fake_kernels_src_list} "${CMAKE_CURRENT_SOURCE_DIR}/${src}\n")
endforeach() endforeach()
return() return()
endif() endif()
set(nna_kernels "${nna_kernels};${TARGET}" CACHE INTERNAL "") set(imagination_nna_kernels "${imagination_nna_kernels};${TARGET}" CACHE INTERNAL "")
endif() endif()
if ("${device}" STREQUAL "HUAWEI_ASCEND_NPU") if ("${device}" STREQUAL "HUAWEI_ASCEND_NPU")
...@@ -520,7 +520,7 @@ function(add_kernel TARGET device level) ...@@ -520,7 +520,7 @@ function(add_kernel TARGET device level)
RKNPU_DEPS ${args_RKNPU_DEPS} RKNPU_DEPS ${args_RKNPU_DEPS}
BM_DEPS ${args_BM_DEPS} BM_DEPS ${args_BM_DEPS}
MLU_DEPS ${args_MLU_DEPS} MLU_DEPS ${args_MLU_DEPS}
NNA_DEPS ${args_NNA_DEPS} IMAGINATION_NNA_DEPS ${args_IMAGINATION_NNA_DEPS}
HUAWEI_ASCEND_NPU_DEPS ${args_HUAWEI_ASCEND_NPU_DEPS} HUAWEI_ASCEND_NPU_DEPS ${args_HUAWEI_ASCEND_NPU_DEPS}
PROFILE_DEPS ${args_PROFILE_DEPS} PROFILE_DEPS ${args_PROFILE_DEPS}
LIGHT_DEPS ${args_LIGHT_DEPS} LIGHT_DEPS ${args_LIGHT_DEPS}
...@@ -540,7 +540,7 @@ endif() ...@@ -540,7 +540,7 @@ endif()
function(add_operator TARGET level) function(add_operator TARGET level)
set(options "") set(options "")
set(oneValueArgs "") set(oneValueArgs "")
set(multiValueArgs SRCS DEPS X86_DEPS CUDA_DEPS CL_DEPS ARM_DEPS FPGA_DEPS BM_DEPS NNA_DEPS NPU_DEPS XPU_DEPS MLU_DEPS HUAWEI_ASCEND_NPU_DEPS APU_DEPS PROFILE_DEPS set(multiValueArgs SRCS DEPS X86_DEPS CUDA_DEPS CL_DEPS ARM_DEPS FPGA_DEPS BM_DEPS IMAGINATION_NNA_DEPS NPU_DEPS XPU_DEPS MLU_DEPS HUAWEI_ASCEND_NPU_DEPS APU_DEPS PROFILE_DEPS
LIGHT_DEPS HVY_DEPS EXCLUDE_COMPILE_DEPS LIGHT_DEPS HVY_DEPS EXCLUDE_COMPILE_DEPS
ARGS) ARGS)
cmake_parse_arguments(args "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) cmake_parse_arguments(args "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
...@@ -578,7 +578,7 @@ function(add_operator TARGET level) ...@@ -578,7 +578,7 @@ function(add_operator TARGET level)
RKNPU_DEPS ${args_RKNPU_DEPS} RKNPU_DEPS ${args_RKNPU_DEPS}
BM_DEPS ${args_BM_DEPS} BM_DEPS ${args_BM_DEPS}
MLU_DEPS ${args_MLU_DEPS} MLU_DEPS ${args_MLU_DEPS}
NNA_DEPS ${args_NNA_DEPS} IMAGINATION_NNA_DEPS ${args_IMAGINATION_NNA_DEPS}
HUAWEI_ASCEND_NPU_DEPS ${args_HUAWEI_ASCEND_NPU_DEPS} HUAWEI_ASCEND_NPU_DEPS ${args_HUAWEI_ASCEND_NPU_DEPS}
PROFILE_DEPS ${args_PROFILE_DEPS} PROFILE_DEPS ${args_PROFILE_DEPS}
LIGHT_DEPS ${args_LIGHT_DEPS} LIGHT_DEPS ${args_LIGHT_DEPS}
......
...@@ -14,7 +14,7 @@ message(STATUS "LITE_WITH_FPGA:\t${LITE_WITH_FPGA}") ...@@ -14,7 +14,7 @@ message(STATUS "LITE_WITH_FPGA:\t${LITE_WITH_FPGA}")
message(STATUS "LITE_WITH_MLU:\t${LITE_WITH_MLU}") message(STATUS "LITE_WITH_MLU:\t${LITE_WITH_MLU}")
message(STATUS "LITE_WITH_HUAWEI_ASCEND_NPU:\t${LITE_WITH_HUAWEI_ASCEND_NPU}") message(STATUS "LITE_WITH_HUAWEI_ASCEND_NPU:\t${LITE_WITH_HUAWEI_ASCEND_NPU}")
message(STATUS "LITE_WITH_BM:\t${LITE_WITH_BM}") message(STATUS "LITE_WITH_BM:\t${LITE_WITH_BM}")
message(STATUS "LITE_WITH_NNA:\t${LITE_WITH_NNA}") message(STATUS "LITE_WITH_IMAGINATION_NNA:\t${LITE_WITH_IMAGINATION_NNA}")
message(STATUS "LITE_WITH_PROFILE:\t${LITE_WITH_PROFILE}") message(STATUS "LITE_WITH_PROFILE:\t${LITE_WITH_PROFILE}")
message(STATUS "LITE_WITH_CV:\t${LITE_WITH_CV}") message(STATUS "LITE_WITH_CV:\t${LITE_WITH_CV}")
...@@ -94,9 +94,9 @@ if (LITE_WITH_LIGHT_WEIGHT_FRAMEWORK AND LITE_WITH_ARM) ...@@ -94,9 +94,9 @@ if (LITE_WITH_LIGHT_WEIGHT_FRAMEWORK AND LITE_WITH_ARM)
if (LITE_WITH_RKNPU) if (LITE_WITH_RKNPU)
set(INFER_LITE_PUBLISH_ROOT "${INFER_LITE_PUBLISH_ROOT}.rknpu") set(INFER_LITE_PUBLISH_ROOT "${INFER_LITE_PUBLISH_ROOT}.rknpu")
endif(LITE_WITH_RKNPU) endif(LITE_WITH_RKNPU)
if (LITE_WITH_NNA) if (LITE_WITH_IMAGINATION_NNA)
set(INFER_LITE_PUBLISH_ROOT "${INFER_LITE_PUBLISH_ROOT}.nna") set(INFER_LITE_PUBLISH_ROOT "${INFER_LITE_PUBLISH_ROOT}.nna")
endif(LITE_WITH_NNA) endif(LITE_WITH_IMAGINATION_NNA)
else() else()
set(INFER_LITE_PUBLISH_ROOT "${CMAKE_BINARY_DIR}/inference_lite_lib") set(INFER_LITE_PUBLISH_ROOT "${CMAKE_BINARY_DIR}/inference_lite_lib")
endif() endif()
......
...@@ -40,7 +40,7 @@ if ((NOT LITE_ON_TINY_PUBLISH) AND (LITE_WITH_CUDA OR LITE_WITH_X86 OR LITE_WITH ...@@ -40,7 +40,7 @@ if ((NOT LITE_ON_TINY_PUBLISH) AND (LITE_WITH_CUDA OR LITE_WITH_X86 OR LITE_WITH
NPU_DEPS ${npu_kernels} NPU_DEPS ${npu_kernels}
APU_DEPS ${apu_kernels} APU_DEPS ${apu_kernels}
RKNPU_DEPS ${rknpu_kernels} RKNPU_DEPS ${rknpu_kernels}
NNA_DEPS ${nna_kernels} IMAGINATION_NNA_DEPS ${imagination_nna_kernels}
HUAWEI_ASCEND_NPU_DEPS ${huawei_ascend_npu_kernels} HUAWEI_ASCEND_NPU_DEPS ${huawei_ascend_npu_kernels}
) )
...@@ -86,8 +86,8 @@ else() ...@@ -86,8 +86,8 @@ else()
# Need to add RKNPU runtime libs dependency # Need to add RKNPU runtime libs dependency
target_link_libraries(paddle_light_api_shared ${rknpu_builder_libs} ${rknpu_runtime_libs}) target_link_libraries(paddle_light_api_shared ${rknpu_builder_libs} ${rknpu_runtime_libs})
endif() endif()
if (LITE_WITH_NNA) if (LITE_WITH_IMAGINATION_NNA)
# Need to add IMG NNA runtime libs (libhiai.so) dependency # Need to add IMG IMAGINATION_NNA runtime libs (libhiai.so) dependency
#target_link_libraries(paddle_light_api_shared ${nna_builder_libs} ${nna_runtime_libs}) #target_link_libraries(paddle_light_api_shared ${nna_builder_libs} ${nna_runtime_libs})
endif() endif()
endif() endif()
...@@ -122,7 +122,7 @@ if(LITE_WITH_RKNPU) ...@@ -122,7 +122,7 @@ if(LITE_WITH_RKNPU)
set(cxx_api_deps ${cxx_api_deps} ${rknpu_deps}) set(cxx_api_deps ${cxx_api_deps} ${rknpu_deps})
endif() endif()
if(LITE_WITH_NNA) if(LITE_WITH_IMAGINATION_NNA)
set(light_api_deps ${light_api_deps} ${nna_deps}) set(light_api_deps ${light_api_deps} ${nna_deps})
set(cxx_api_deps ${cxx_api_deps} ${nna_deps}) set(cxx_api_deps ${cxx_api_deps} ${nna_deps})
endif() endif()
...@@ -180,7 +180,7 @@ if (NOT LITE_ON_TINY_PUBLISH) ...@@ -180,7 +180,7 @@ if (NOT LITE_ON_TINY_PUBLISH)
APU_DEPS ${apu_kernels} APU_DEPS ${apu_kernels}
RKNPU_DEPS ${rknpu_kernels} RKNPU_DEPS ${rknpu_kernels}
BM_DEPS ${bm_kernels} BM_DEPS ${bm_kernels}
NNA_DEPS ${nna_kernels} IMAGINATION_NNA_DEPS ${imagination_nna_kernels}
CL_DEPS ${opencl_kernels} CL_DEPS ${opencl_kernels}
FPGA_DEPS ${fpga_kernels} FPGA_DEPS ${fpga_kernels}
HUAWEI_ASCEND_NPU_DEPS ${huawei_ascend_npu_kernels}) HUAWEI_ASCEND_NPU_DEPS ${huawei_ascend_npu_kernels})
...@@ -207,7 +207,7 @@ lite_cc_library(light_api SRCS light_api.cc ...@@ -207,7 +207,7 @@ lite_cc_library(light_api SRCS light_api.cc
CL_DEPS ${opencl_kernels} CL_DEPS ${opencl_kernels}
FPGA_DEPS ${fpga_kernels} FPGA_DEPS ${fpga_kernels}
BM_DEPS ${bm_kernels} BM_DEPS ${bm_kernels}
NNA_DEPS ${nna_kernels} IMAGINATION_NNA_DEPS ${imagination_nna_kernels}
MLU_DEPS ${mlu_kernels} MLU_DEPS ${mlu_kernels}
HUAWEI_ASCEND_NPU_DEPS ${huawei_ascend_npu_kernels}) HUAWEI_ASCEND_NPU_DEPS ${huawei_ascend_npu_kernels})
...@@ -232,7 +232,7 @@ if(WITH_TESTING) ...@@ -232,7 +232,7 @@ if(WITH_TESTING)
FPGA_DEPS ${fpga_kernels} FPGA_DEPS ${fpga_kernels}
BM_DEPS ${bm_kernels} BM_DEPS ${bm_kernels}
MLU_DEPS ${mlu_kernels} MLU_DEPS ${mlu_kernels}
NNA_DEPS ${nna_kernels} IMAGINATION_NNA_DEPS ${imagination_nna_kernels}
HUAWEI_ASCEND_NPU_DEPS ${huawei_ascend_npu_kernels} HUAWEI_ASCEND_NPU_DEPS ${huawei_ascend_npu_kernels}
EXCLUDE_COMPILE_DEPS "ON" EXCLUDE_COMPILE_DEPS "ON"
ARGS --model_dir=${LITE_MODEL_DIR}/lite_naive_model ARGS --model_dir=${LITE_MODEL_DIR}/lite_naive_model
...@@ -365,7 +365,7 @@ if (NOT LITE_ON_TINY_PUBLISH) ...@@ -365,7 +365,7 @@ if (NOT LITE_ON_TINY_PUBLISH)
APU_DEPS ${apu_kernels} APU_DEPS ${apu_kernels}
CL_DEPS ${opencl_kernels} CL_DEPS ${opencl_kernels}
FPGA_DEPS ${fpga_kernels} FPGA_DEPS ${fpga_kernels}
NNA_DEPS ${nna_kernels} IMAGINATION_NNA_DEPS ${imagination_nna_kernels}
BM_DEPS ${bm_kernels} BM_DEPS ${bm_kernels}
HUAWEI_ASCEND_NPU_DEPS ${huawei_ascend_npu_kernels}) HUAWEI_ASCEND_NPU_DEPS ${huawei_ascend_npu_kernels})
# The final inference library for just MobileConfig. # The final inference library for just MobileConfig.
...@@ -397,7 +397,7 @@ if(NOT WITH_COVERAGE) ...@@ -397,7 +397,7 @@ if(NOT WITH_COVERAGE)
RKNPU_DEPS ${rknpu_kernels} RKNPU_DEPS ${rknpu_kernels}
BM_DEPS ${bm_kernels} BM_DEPS ${bm_kernels}
MLU_DEPS ${mlu_kernels} MLU_DEPS ${mlu_kernels}
NNA_DEPS ${nna_kernels} IMAGINATION_NNA_DEPS ${imagination_nna_kernels}
ARGS --model_dir=${LITE_MODEL_DIR}/lite_naive_model ARGS --model_dir=${LITE_MODEL_DIR}/lite_naive_model
--optimized_model=${LITE_MODEL_DIR}/lite_naive_model_opt SERIAL) --optimized_model=${LITE_MODEL_DIR}/lite_naive_model_opt SERIAL)
endif() endif()
...@@ -440,7 +440,7 @@ if(NOT WITH_COVERAGE) ...@@ -440,7 +440,7 @@ if(NOT WITH_COVERAGE)
FPGA_DEPS ${fpga_kernels} FPGA_DEPS ${fpga_kernels}
BM_DEPS ${bm_kernels} BM_DEPS ${bm_kernels}
MLU_DEPS ${mlu_kernels} MLU_DEPS ${mlu_kernels}
NNA_DEPS ${nna_kernels} IMAGINATION_NNA_DEPS ${imagination_nna_kernels}
HUAWEI_ASCEND_NPU_DEPS ${huawei_ascend_npu_kernels} HUAWEI_ASCEND_NPU_DEPS ${huawei_ascend_npu_kernels}
ARGS --model_dir=${LITE_MODEL_DIR}/lite_naive_model SERIAL) ARGS --model_dir=${LITE_MODEL_DIR}/lite_naive_model SERIAL)
if (WITH_TESTING) if (WITH_TESTING)
...@@ -461,7 +461,7 @@ if(NOT IOS) ...@@ -461,7 +461,7 @@ if(NOT IOS)
CL_DEPS ${opencl_kernels} CL_DEPS ${opencl_kernels}
BM_DEPS ${bm_kernels} BM_DEPS ${bm_kernels}
RKNPU_DEPS ${rknpu_kernels} RKNPU_DEPS ${rknpu_kernels}
NNA_DEPS ${nna_kernels} IMAGINATION_NNA_DEPS ${imagination_nna_kernels}
FPGA_DEPS ${fpga_kernels} FPGA_DEPS ${fpga_kernels}
X86_DEPS ${x86_kernels} X86_DEPS ${x86_kernels}
CUDA_DEPS ${cuda_kernels} CUDA_DEPS ${cuda_kernels}
...@@ -478,7 +478,7 @@ if(NOT IOS) ...@@ -478,7 +478,7 @@ if(NOT IOS)
CL_DEPS ${opencl_kernels} CL_DEPS ${opencl_kernels}
BM_DEPS ${bm_kernels} BM_DEPS ${bm_kernels}
RKNPU_DEPS ${rknpu_kernels} RKNPU_DEPS ${rknpu_kernels}
NNA_DEPS ${nna_kernels} IMAGINATION_NNA_DEPS ${imagination_nna_kernels}
FPGA_DEPS ${fpga_kernels} FPGA_DEPS ${fpga_kernels}
X86_DEPS ${x86_kernels} X86_DEPS ${x86_kernels}
CUDA_DEPS ${cuda_kernels} CUDA_DEPS ${cuda_kernels}
...@@ -495,7 +495,7 @@ if(NOT IOS) ...@@ -495,7 +495,7 @@ if(NOT IOS)
CL_DEPS ${opencl_kernels} CL_DEPS ${opencl_kernels}
BM_DEPS ${bm_kernels} BM_DEPS ${bm_kernels}
RKNPU_DEPS ${rknpu_kernels} RKNPU_DEPS ${rknpu_kernels}
NNA_DEPS ${nna_kernels} IMAGINATION_NNA_DEPS ${imagination_nna_kernels}
FPGA_DEPS ${fpga_kernels} FPGA_DEPS ${fpga_kernels}
X86_DEPS ${x86_kernels} X86_DEPS ${x86_kernels}
CUDA_DEPS ${cuda_kernels} CUDA_DEPS ${cuda_kernels}
...@@ -506,7 +506,7 @@ if(NOT IOS) ...@@ -506,7 +506,7 @@ if(NOT IOS)
ARM_DEPS ${arm_kernels} ARM_DEPS ${arm_kernels}
CV_DEPS paddle_cv_arm CV_DEPS paddle_cv_arm
NPU_DEPS ${npu_kernels} NPU_DEPS ${npu_kernels}
NNA_DEPS ${nna_kernels} IMAGINATION_NNA_DEPS ${imagination_nna_kernels}
XPU_DEPS ${xpu_kernels} XPU_DEPS ${xpu_kernels}
RKNPU_DEPS ${rknpu_kernels} RKNPU_DEPS ${rknpu_kernels}
MLU_DEPS ${mlu_kernels} MLU_DEPS ${mlu_kernels}
...@@ -525,7 +525,7 @@ if(NOT IOS) ...@@ -525,7 +525,7 @@ if(NOT IOS)
APU_DEPS ${apu_kernels} APU_DEPS ${apu_kernels}
XPU_DEPS ${xpu_kernels} XPU_DEPS ${xpu_kernels}
RKNPU_DEPS ${rknpu_kernels} RKNPU_DEPS ${rknpu_kernels}
NNA_DEPS ${nna_kernels} IMAGINATION_NNA_DEPS ${imagination_nna_kernels}
MLU_DEPS ${mlu_kernels} MLU_DEPS ${mlu_kernels}
HUAWEI_ASCEND_NPU_DEPS ${huawei_ascend_npu_kernels} HUAWEI_ASCEND_NPU_DEPS ${huawei_ascend_npu_kernels}
CL_DEPS ${opencl_kernels} CL_DEPS ${opencl_kernels}
...@@ -540,7 +540,7 @@ if(NOT IOS) ...@@ -540,7 +540,7 @@ if(NOT IOS)
CV_DEPS paddle_cv_arm CV_DEPS paddle_cv_arm
NPU_DEPS ${npu_kernels} NPU_DEPS ${npu_kernels}
RKNPU_DEPS ${npu_kernels} RKNPU_DEPS ${npu_kernels}
NNA_DEPS ${nna_kernels} IMAGINATION_NNA_DEPS ${imagination_nna_kernels}
XPU_DEPS ${xpu_kernels} XPU_DEPS ${xpu_kernels}
APU_DEPS ${apu_kernels} APU_DEPS ${apu_kernels}
CL_DEPS ${opencl_kernels} CL_DEPS ${opencl_kernels}
......
...@@ -125,10 +125,10 @@ std::vector<Place> ParserValidPlaces() { ...@@ -125,10 +125,10 @@ std::vector<Place> ParserValidPlaces() {
} else if (target_repr == "apu") { } else if (target_repr == "apu") {
valid_places.emplace_back( valid_places.emplace_back(
Place{TARGET(kAPU), PRECISION(kInt8), DATALAYOUT(kNCHW)}); Place{TARGET(kAPU), PRECISION(kInt8), DATALAYOUT(kNCHW)});
} else if (target_repr == "nna") { } else if (target_repr == "imagination_nna") {
valid_places.emplace_back(TARGET(kNNA)); valid_places.emplace_back(TARGET(kImaginationNNA));
valid_places.emplace_back( valid_places.emplace_back(
Place{TARGET(kNNA), PRECISION(kInt8), DATALAYOUT(kNCHW)}); Place{TARGET(kImaginationNNA), PRECISION(kInt8), DATALAYOUT(kNCHW)});
} else { } else {
LOG(FATAL) << lite::string_format( LOG(FATAL) << lite::string_format(
"Wrong target '%s' found, please check the command flag " "Wrong target '%s' found, please check the command flag "
...@@ -208,7 +208,7 @@ void PrintOpsInfo(std::set<std::string> valid_ops = {}) { ...@@ -208,7 +208,7 @@ void PrintOpsInfo(std::set<std::string> valid_ops = {}) {
"kRKNPU", "kRKNPU",
"kAPU", "kAPU",
"kHuaweiAscendNPU", "kHuaweiAscendNPU",
"kNNA", "kImaginationNNA",
"kAny", "kAny",
"kUnk"}; "kUnk"};
int maximum_optype_length = 0; int maximum_optype_length = 0;
...@@ -275,17 +275,19 @@ void PrintHelpInfo() { ...@@ -275,17 +275,19 @@ void PrintHelpInfo() {
" `--optimize_out=<output_optimize_model_dir>`\n" " `--optimize_out=<output_optimize_model_dir>`\n"
" " " "
"`--valid_targets=(arm|opencl|x86|npu|xpu|rknpu|apu|huawei_ascend_npu|" "`--valid_targets=(arm|opencl|x86|npu|xpu|rknpu|apu|huawei_ascend_npu|"
"nna)`\n" "imagination_nna)`\n"
" `--record_tailoring_info=(true|false)`\n" " `--record_tailoring_info=(true|false)`\n"
" Arguments of model checking and ops information:\n" " Arguments of model checking and ops information:\n"
" `--print_all_ops=true` Display all the valid operators of " " `--print_all_ops=true` Display all the valid operators of "
"Paddle-Lite\n" "Paddle-Lite\n"
" `--print_supported_ops=true " " `--print_supported_ops=true "
"--valid_targets=(arm|opencl|x86|npu|xpu|rknpu|apu|huawei_ascend_npu|nna)" "--valid_targets=(arm|opencl|x86|npu|xpu|rknpu|apu|huawei_ascend_npu|"
"imagination_nna)"
"`" "`"
" Display valid operators of input targets\n" " Display valid operators of input targets\n"
" `--print_model_ops=true --model_dir=<model_param_dir> " " `--print_model_ops=true --model_dir=<model_param_dir> "
"--valid_targets=(arm|opencl|x86|npu|xpu|rknpu|apu|huawei_ascend_npu|nna)" "--valid_targets=(arm|opencl|x86|npu|xpu|rknpu|apu|huawei_ascend_npu|"
"imagination_nna)"
"`" "`"
" Display operators in the input model\n"; " Display operators in the input model\n";
std::cout << "opt version:" << opt_version << std::endl std::cout << "opt version:" << opt_version << std::endl
......
...@@ -84,10 +84,10 @@ void OptBase::SetValidPlaces(const std::string& valid_places) { ...@@ -84,10 +84,10 @@ void OptBase::SetValidPlaces(const std::string& valid_places) {
} else if (target_repr == "apu") { } else if (target_repr == "apu") {
valid_places_.emplace_back( valid_places_.emplace_back(
Place{TARGET(kAPU), PRECISION(kInt8), DATALAYOUT(kNCHW)}); Place{TARGET(kAPU), PRECISION(kInt8), DATALAYOUT(kNCHW)});
} else if (target_repr == "nna") { } else if (target_repr == "imagination_nna") {
valid_places.emplace_back(TARGET(kNNA)); valid_places.emplace_back(TARGET(kImaginationNNA));
valid_places.emplace_back( valid_places.emplace_back(
Place{TARGET(kNNA), PRECISION(kInt8), DATALAYOUT(kNCHW)}); Place{TARGET(kImaginationNNA), PRECISION(kInt8), DATALAYOUT(kNCHW)});
} else { } else {
LOG(FATAL) << lite::string_format( LOG(FATAL) << lite::string_format(
"Wrong target '%s' found, please check the command flag " "Wrong target '%s' found, please check the command flag "
...@@ -245,7 +245,7 @@ void OptBase::PrintHelpInfo() { ...@@ -245,7 +245,7 @@ void OptBase::PrintHelpInfo() {
" `set_lite_out(output_optimize_model_dir)`\n" " `set_lite_out(output_optimize_model_dir)`\n"
" " " "
"`set_valid_places(arm|opencl|x86|npu|xpu|rknpu|apu|huawei_ascend_npu|" "`set_valid_places(arm|opencl|x86|npu|xpu|rknpu|apu|huawei_ascend_npu|"
"nna)`\n" "imagination_nna)`\n"
" `record_model_info(false|true)`: refer to whether to record ops " " `record_model_info(false|true)`: refer to whether to record ops "
"info for striping lib, false by default`\n" "info for striping lib, false by default`\n"
" `run() : start model transformation`\n" " `run() : start model transformation`\n"
...@@ -283,16 +283,19 @@ void OptBase::PrintExecutableBinHelpInfo() { ...@@ -283,16 +283,19 @@ void OptBase::PrintExecutableBinHelpInfo() {
" `--optimize_out_type=(protobuf|naive_buffer)`\n" " `--optimize_out_type=(protobuf|naive_buffer)`\n"
" `--optimize_out=<output_optimize_model_dir>`\n" " `--optimize_out=<output_optimize_model_dir>`\n"
" " " "
"`--valid_targets=(arm|opencl|x86|npu|xpu|huawei_ascend_npu|nna)`\n" "`--valid_targets=(arm|opencl|x86|npu|xpu|huawei_ascend_npu|imagination_"
"nna)`\n"
" `--record_tailoring_info=(true|false)`\n" " `--record_tailoring_info=(true|false)`\n"
" Arguments of model checking and ops information:\n" " Arguments of model checking and ops information:\n"
" `--print_all_ops=true` Display all the valid operators of " " `--print_all_ops=true` Display all the valid operators of "
"Paddle-Lite\n" "Paddle-Lite\n"
" `--print_supported_ops=true " " `--print_supported_ops=true "
"--valid_targets=(arm|opencl|x86|npu|xpu|huawei_ascend_npu|nna)`" "--valid_targets=(arm|opencl|x86|npu|xpu|huawei_ascend_npu|imagination_"
"nna)`"
" Display valid operators of input targets\n" " Display valid operators of input targets\n"
" `--print_model_ops=true --model_dir=<model_param_dir> " " `--print_model_ops=true --model_dir=<model_param_dir> "
"--valid_targets=(arm|opencl|x86|npu|xpu|huawei_ascend_npu|nna)`" "--valid_targets=(arm|opencl|x86|npu|xpu|huawei_ascend_npu|imagination_"
"nna)`"
" Display operators in the input model\n"; " Display operators in the input model\n";
std::cout << "paddlelite opt version:" << opt_version << std::endl std::cout << "paddlelite opt version:" << opt_version << std::endl
<< help_info << std::endl; << help_info << std::endl;
...@@ -311,7 +314,7 @@ void OptBase::PrintOpsInfo(const std::set<std::string>& valid_ops) { ...@@ -311,7 +314,7 @@ void OptBase::PrintOpsInfo(const std::set<std::string>& valid_ops) {
"kRKNPU", "kRKNPU",
"kAPU", "kAPU",
"kHuaweiAscendNPU", "kHuaweiAscendNPU",
"kNNA", "kImaginationNNA",
"kAny", "kAny",
"kUnk"}; "kUnk"};
// Get the lengh of the first column: maximum length of the op_type // Get the lengh of the first column: maximum length of the op_type
......
...@@ -82,7 +82,7 @@ const std::string& TargetToStr(TargetType target) { ...@@ -82,7 +82,7 @@ const std::string& TargetToStr(TargetType target) {
"rknpu", "rknpu",
"apu", "apu",
"huawei_ascend_npu", "huawei_ascend_npu",
"nna"}; "imagination_nna"};
auto x = static_cast<int>(target); auto x = static_cast<int>(target);
CHECK_LT(x, static_cast<int>(TARGET(NUM))); CHECK_LT(x, static_cast<int>(TARGET(NUM)));
return target2string[x]; return target2string[x];
...@@ -127,7 +127,7 @@ const std::string& TargetRepr(TargetType target) { ...@@ -127,7 +127,7 @@ const std::string& TargetRepr(TargetType target) {
"kRKNPU", "kRKNPU",
"kAPU", "kAPU",
"kHuaweiAscendNPU", "kHuaweiAscendNPU",
"kNNA"}; "kImaginationNNA"};
auto x = static_cast<int>(target); auto x = static_cast<int>(target);
CHECK_LT(x, static_cast<int>(TARGET(NUM))); CHECK_LT(x, static_cast<int>(TARGET(NUM)));
return target2string[x]; return target2string[x];
...@@ -174,7 +174,7 @@ std::set<TargetType> ExpandValidTargets(TargetType target) { ...@@ -174,7 +174,7 @@ std::set<TargetType> ExpandValidTargets(TargetType target) {
TARGET(kRKNPU), TARGET(kRKNPU),
TARGET(kFPGA), TARGET(kFPGA),
TARGET(kHuaweiAscendNPU), TARGET(kHuaweiAscendNPU),
TARGET(kNNA)}); TARGET(kImaginationNNA)});
if (target == TARGET(kAny)) { if (target == TARGET(kAny)) {
return valid_set; return valid_set;
} }
......
...@@ -58,7 +58,7 @@ enum class TargetType : int { ...@@ -58,7 +58,7 @@ enum class TargetType : int {
kRKNPU = 12, kRKNPU = 12,
kAPU = 13, kAPU = 13,
kHuaweiAscendNPU = 14, kHuaweiAscendNPU = 14,
kNNA = 15, kImaginationNNA = 15,
NUM = 16, // number of fields. NUM = 16, // number of fields.
}; };
enum class PrecisionType : int { enum class PrecisionType : int {
......
...@@ -53,7 +53,7 @@ USE_MIR_PASS(multi_stream_analysis_pass); ...@@ -53,7 +53,7 @@ USE_MIR_PASS(multi_stream_analysis_pass);
USE_MIR_PASS(elementwise_mul_constant_eliminate_pass) USE_MIR_PASS(elementwise_mul_constant_eliminate_pass)
USE_MIR_PASS(npu_subgraph_pass); USE_MIR_PASS(npu_subgraph_pass);
USE_MIR_PASS(huawei_ascend_npu_subgraph_pass); USE_MIR_PASS(huawei_ascend_npu_subgraph_pass);
USE_MIR_PASS(nna_subgraph_pass); USE_MIR_PASS(imagination_nna_subgraph_pass);
USE_MIR_PASS(xpu_subgraph_pass); USE_MIR_PASS(xpu_subgraph_pass);
USE_MIR_PASS(mlu_subgraph_pass); USE_MIR_PASS(mlu_subgraph_pass);
USE_MIR_PASS(mlu_postprocess_pass); USE_MIR_PASS(mlu_postprocess_pass);
......
...@@ -192,7 +192,7 @@ void BindLitePlace(py::module *m) { ...@@ -192,7 +192,7 @@ void BindLitePlace(py::module *m) {
.value("RKNPU", TargetType::kRKNPU) .value("RKNPU", TargetType::kRKNPU)
.value("APU", TargetType::kAPU) .value("APU", TargetType::kAPU)
.value("HUAWEI_ASCEND_NPU", TargetType::kHuaweiAscendNPU) .value("HUAWEI_ASCEND_NPU", TargetType::kHuaweiAscendNPU)
.value("NNA", TargetType::kNNA) .value("IMAGINATION_NNA", TargetType::kImaginationNNA)
.value("Any", TargetType::kAny); .value("Any", TargetType::kAny);
// PrecisionType // PrecisionType
......
...@@ -11,4 +11,4 @@ add_subdirectory(bm) ...@@ -11,4 +11,4 @@ add_subdirectory(bm)
add_subdirectory(apu) add_subdirectory(apu)
add_subdirectory(rknpu) add_subdirectory(rknpu)
add_subdirectory(huawei_ascend_npu) add_subdirectory(huawei_ascend_npu)
add_subdirectory(nna) add_subdirectory(imagination_nna)
if(NOT LITE_WITH_IMAGINATION_NNA)
return()
endif()
lite_cc_library(device_imagination_nna SRCS imgdnn_manager.cc DEPS ${imagination_nna_builder_libs} ${imagination_nna_runtime_libs})
...@@ -17,7 +17,7 @@ ...@@ -17,7 +17,7 @@
namespace paddle { namespace paddle {
namespace lite { namespace lite {
namespace nna { namespace imagination_nna {
static void err_callback(imgdnn_report_flags flags, static void err_callback(imgdnn_report_flags flags,
const char **tensor_names, const char **tensor_names,
...@@ -390,6 +390,6 @@ imgdnn_network_object ImgdnnManager::createNetworkObject( ...@@ -390,6 +390,6 @@ imgdnn_network_object ImgdnnManager::createNetworkObject(
return net_obj_; return net_obj_;
} }
} // namespace nna } // namespace imagination_nna
} // namespace lite } // namespace lite
} // namespace paddle } // namespace paddle
...@@ -26,7 +26,7 @@ ...@@ -26,7 +26,7 @@
namespace paddle { namespace paddle {
namespace lite { namespace lite {
namespace nna { namespace imagination_nna {
static inline void CheckAndPrint(bool cond, static inline void CheckAndPrint(bool cond,
const char *msg, const char *msg,
...@@ -42,7 +42,7 @@ static inline void CheckAndPrint(bool cond, ...@@ -42,7 +42,7 @@ static inline void CheckAndPrint(bool cond,
} }
#define ASSERT(statement, msg) \ #define ASSERT(statement, msg) \
lite::nna::CheckAndPrint(statement, msg, __LINE__, __FILE__) lite::imagination_nna::CheckAndPrint(statement, msg, __LINE__, __FILE__)
class ImgdnnManager { class ImgdnnManager {
imgdnn_err_code err_; imgdnn_err_code err_;
...@@ -252,6 +252,6 @@ class ImgdnnManager { ...@@ -252,6 +252,6 @@ class ImgdnnManager {
} }
}; };
} // namespace nna } // namespace imagination_nna
} // namespace lite } // namespace lite
} // namespace paddle } // namespace paddle
if(NOT LITE_WITH_NNA)
return()
endif()
lite_cc_library(device_nna SRCS imgdnn_manager.cc DEPS ${nna_builder_libs} ${nna_runtime_libs})
...@@ -6,5 +6,5 @@ endif() ...@@ -6,5 +6,5 @@ endif()
lite_cc_library(arena_framework SRCS framework.cc DEPS program gtest) lite_cc_library(arena_framework SRCS framework.cc DEPS program gtest)
if((NOT LITE_WITH_OPENCL) AND (LITE_WITH_X86 OR LITE_WITH_ARM)) if((NOT LITE_WITH_OPENCL) AND (LITE_WITH_X86 OR LITE_WITH_ARM))
lite_cc_test(test_arena_framework SRCS framework_test.cc DEPS arena_framework ${rknpu_kernels} ${mlu_kernels} ${bm_kernels} ${npu_kernels} ${huawei_ascend_npu_kernels} ${nna_kernels} ${xpu_kernels} ${x86_kernels} ${cuda_kernels} ${fpga_kernels} ${arm_kernels} ${lite_ops} ${host_kernels}) lite_cc_test(test_arena_framework SRCS framework_test.cc DEPS arena_framework ${rknpu_kernels} ${mlu_kernels} ${bm_kernels} ${npu_kernels} ${huawei_ascend_npu_kernels} ${imagination_nna_kernels} ${xpu_kernels} ${x86_kernels} ${cuda_kernels} ${fpga_kernels} ${arm_kernels} ${lite_ops} ${host_kernels})
endif() endif()
...@@ -64,7 +64,7 @@ using BMContext = Context<TargetType::kBM>; ...@@ -64,7 +64,7 @@ using BMContext = Context<TargetType::kBM>;
using MLUContext = Context<TargetType::kMLU>; using MLUContext = Context<TargetType::kMLU>;
using RKNPUContext = Context<TargetType::kRKNPU>; using RKNPUContext = Context<TargetType::kRKNPU>;
using HuaweiAscendNPUContext = Context<TargetType::kHuaweiAscendNPU>; using HuaweiAscendNPUContext = Context<TargetType::kHuaweiAscendNPU>;
using NNAContext = Context<TargetType::kNNA>; using ImaginationNNAContext = Context<TargetType::kImaginationNNA>;
template <> template <>
class Context<TargetType::kHost> { class Context<TargetType::kHost> {
...@@ -174,18 +174,17 @@ class Context<TargetType::kRKNPU> { ...@@ -174,18 +174,17 @@ class Context<TargetType::kRKNPU> {
}; };
#endif #endif
#ifdef LITE_WITH_NNA #ifdef LITE_WITH_IMAGINATION_NNA
template <> template <>
class Context<TargetType::kNNA> { class Context<TargetType::kImaginationNNA> {
public: public:
Context() {} Context() {}
// explicit Context(const NNAContext& ctx);
// NOTE: InitOnce should only be used by ContextScheduler // NOTE: InitOnce should only be used by ContextScheduler
void InitOnce() {} void InitOnce() {}
void CopySharedTo(NNAContext* ctx) {} void CopySharedTo(ImaginationNNAContext* ctx) {}
// NNAContext& operator=(const NNAContext& ctx) {} // NNAContext& operator=(const NNAContext& ctx) {}
std::string name() const { return "NNAContext"; } std::string name() const { return "ImaginationNNAContext"; }
}; };
#endif #endif
...@@ -487,10 +486,11 @@ class ContextScheduler { ...@@ -487,10 +486,11 @@ class ContextScheduler {
&ctx->As<BMContext>()); &ctx->As<BMContext>());
break; break;
#endif #endif
#ifdef LITE_WITH_NNA #ifdef LITE_WITH_IMAGINATION_NNA
case TARGET(kNNA): case TARGET(kImaginationNNA):
kernel_contexts_[TargetType::kNNA].As<NNAContext>().CopySharedTo( kernel_contexts_[TargetType::kImaginationNNA]
&ctx->As<NNAContext>()); .As<ImaginationNNAContext>()
.CopySharedTo(&ctx->As<ImaginationNNAContext>());
break; break;
#endif #endif
#ifdef LITE_WITH_MLU #ifdef LITE_WITH_MLU
...@@ -556,8 +556,8 @@ class ContextScheduler { ...@@ -556,8 +556,8 @@ class ContextScheduler {
#ifdef LITE_WITH_MLU #ifdef LITE_WITH_MLU
InitContext<TargetType::kMLU, MLUContext>(); InitContext<TargetType::kMLU, MLUContext>();
#endif #endif
#ifdef LITE_WITH_NNA #ifdef LITE_WITH_IMAGINATION_NNA
InitContext<TargetType::kNNA, NNAContext>(); InitContext<TargetType::kImaginationNNA, ImaginationNNAContext>();
#endif #endif
} }
......
...@@ -316,4 +316,4 @@ REGISTER_MIR_PASS(memory_optimize_pass, paddle::lite::mir::MemoryOptimizePass) ...@@ -316,4 +316,4 @@ REGISTER_MIR_PASS(memory_optimize_pass, paddle::lite::mir::MemoryOptimizePass)
TARGET(kAPU), TARGET(kAPU),
TARGET(kMLU), TARGET(kMLU),
TARGET(kHuaweiAscendNPU), TARGET(kHuaweiAscendNPU),
TARGET(kNNA)}); TARGET(kImaginationNNA)});
...@@ -128,10 +128,10 @@ void MLUSubgraphPass::Apply(const std::unique_ptr<SSAGraph>& graph) { ...@@ -128,10 +128,10 @@ void MLUSubgraphPass::Apply(const std::unique_ptr<SSAGraph>& graph) {
fuser(); fuser();
} }
void NNASubgraphPass::Apply(const std::unique_ptr<SSAGraph>& graph) { void ImaginationNNASubgraphPass::Apply(const std::unique_ptr<SSAGraph>& graph) {
std::set<std::string> supported_lists; std::set<std::string> supported_lists;
#define USE_SUBGRAPH_BRIDGE(op_type, target) supported_lists.insert(#op_type); #define USE_SUBGRAPH_BRIDGE(op_type, target) supported_lists.insert(#op_type);
#include "lite/kernels/nna/bridges/paddle_use_bridges.h" #include "lite/kernels/imagination_nna/bridges/paddle_use_bridges.h"
#undef USE_SUBGRAPH_BRIDGE #undef USE_SUBGRAPH_BRIDGE
auto teller = [&](Node* node) { auto teller = [&](Node* node) {
if (!node->IsStmt()) return false; if (!node->IsStmt()) return false;
...@@ -161,5 +161,6 @@ REGISTER_MIR_PASS(rknpu_subgraph_pass, paddle::lite::mir::RKNPUSubgraphPass) ...@@ -161,5 +161,6 @@ REGISTER_MIR_PASS(rknpu_subgraph_pass, paddle::lite::mir::RKNPUSubgraphPass)
.BindTargets({TARGET(kRKNPU)}); .BindTargets({TARGET(kRKNPU)});
REGISTER_MIR_PASS(mlu_subgraph_pass, paddle::lite::mir::MLUSubgraphPass) REGISTER_MIR_PASS(mlu_subgraph_pass, paddle::lite::mir::MLUSubgraphPass)
.BindTargets({TARGET(kMLU)}); .BindTargets({TARGET(kMLU)});
REGISTER_MIR_PASS(nna_subgraph_pass, paddle::lite::mir::NNASubgraphPass) REGISTER_MIR_PASS(imagination_nna_subgraph_pass,
.BindTargets({TARGET(kNNA)}); paddle::lite::mir::ImaginationNNASubgraphPass)
.BindTargets({TARGET(kImaginationNNA)});
...@@ -57,7 +57,7 @@ class MLUSubgraphPass : public ProgramPass { ...@@ -57,7 +57,7 @@ class MLUSubgraphPass : public ProgramPass {
void Apply(const std::unique_ptr<SSAGraph>& graph) override; void Apply(const std::unique_ptr<SSAGraph>& graph) override;
}; };
class NNASubgraphPass : public ProgramPass { class ImaginationNNASubgraphPass : public ProgramPass {
public: public:
void Apply(const std::unique_ptr<SSAGraph>& graph) override; void Apply(const std::unique_ptr<SSAGraph>& graph) override;
}; };
......
...@@ -17,7 +17,7 @@ lite_cc_test(test_gen_code SRCS gen_code_test.cc ...@@ -17,7 +17,7 @@ lite_cc_test(test_gen_code SRCS gen_code_test.cc
NPU_DEPS ${npu_kernels} NPU_DEPS ${npu_kernels}
HUAWEI_ASCEND_NPU_DEPS ${huawei_ascend_npu_kernels} HUAWEI_ASCEND_NPU_DEPS ${huawei_ascend_npu_kernels}
RKNPU_DEPS ${rknpu_kernels} RKNPU_DEPS ${rknpu_kernels}
NNA_DEPS ${nna_kernels} IMAGINATION_NNA_DEPS ${imagination_nna_kernels}
XPU_DEPS ${xpu_kernels} XPU_DEPS ${xpu_kernels}
CL_DEPS ${opencl_kernels} CL_DEPS ${opencl_kernels}
FPGA_DEPS ${fpga_kernels} FPGA_DEPS ${fpga_kernels}
...@@ -48,7 +48,7 @@ lite_cc_test(test_generated_code SRCS generated_code_test.cc DEPS __generated_co ...@@ -48,7 +48,7 @@ lite_cc_test(test_generated_code SRCS generated_code_test.cc DEPS __generated_co
NPU_DEPS ${npu_kernels} NPU_DEPS ${npu_kernels}
HUAWEI_ASCEND_NPU_DEPS ${huawei_ascend_npu_kernels} HUAWEI_ASCEND_NPU_DEPS ${huawei_ascend_npu_kernels}
RKNPU_DEPS ${rknpu_kernels} RKNPU_DEPS ${rknpu_kernels}
NNA_DEPS ${nna_kernels} IMAGINATION_NNA_DEPS ${imagination_nna_kernels}
XPU_DEPS ${xpu_kernels} XPU_DEPS ${xpu_kernels}
CL_DEPS ${opencl_kernels} CL_DEPS ${opencl_kernels}
FPGA_DEPS ${fpga_kernels} FPGA_DEPS ${fpga_kernels}
......
...@@ -15,4 +15,4 @@ add_subdirectory(apu) ...@@ -15,4 +15,4 @@ add_subdirectory(apu)
add_subdirectory(bm) add_subdirectory(bm)
add_subdirectory(rknpu) add_subdirectory(rknpu)
add_subdirectory(huawei_ascend_npu) add_subdirectory(huawei_ascend_npu)
add_subdirectory(nna) add_subdirectory(imagination_nna)
add_subdirectory(bridges)
add_kernel(subgraph_compute_imagination_nna IMAGINATION_NNA basic SRCS subgraph_compute.cc DEPS ${lite_kernel_deps} device_imagination_nna subgraph_bridge_engine ${imagination_nna_subgraph_bridges})
if(NOT LITE_WITH_IMAGINATION_NNA)
return()
endif()
lite_cc_library(subgraph_bridge_utility_imagination_nna SRCS utility.cc DEPS ${imagination_nna_builder_libs} ${imagination_nna_runtime_libs} tensor)
lite_cc_library(subgraph_bridge_graph_imagination_nna SRCS graph.cc DEPS subgraph_bridge_utility_imagination_nna)
set(imagination_nna_subgraph_bridge_deps subgraph_bridge_registry subgraph_bridge_utility_imagination_nna subgraph_bridge_graph_imagination_nna)
lite_cc_library(subgraph_bridge_fc_op_imagination_nna SRCS fc_op.cc DEPS ${imagination_nna_subgraph_bridge_deps})
lite_cc_library(subgraph_bridge_conv_op_imagination_nna SRCS conv_op.cc DEPS ${imagination_nna_subgraph_bridge_deps})
#lite_cc_library(subgraph_bridge_matmul_op_imagination_nna SRCS matmul_op.cc DEPS ${imagination_nna_subgraph_bridge_deps})
#lite_cc_library(subgraph_bridge_mul_op_imagination_nna SRCS mul_op.cc DEPS ${imagination_nna_subgraph_bridge_deps})
lite_cc_library(subgraph_bridge_act_op_imagination_nna SRCS act_op.cc DEPS ${imagination_nna_subgraph_bridge_deps})
#lite_cc_library(subgraph_bridge_scale_op_imagination_nna SRCS scale_op.cc DEPS ${imagination_nna_subgraph_bridge_deps})
#lite_cc_library(subgraph_bridge_softmax_op_imagination_nna SRCS softmax_op.cc DEPS ${imagination_nna_subgraph_bridge_deps})
lite_cc_library(subgraph_bridge_pool_op_imagination_nna SRCS pool_op.cc DEPS ${imagination_nna_subgraph_bridge_deps})
#lite_cc_library(subgraph_bridge_batch_norm_op_imagination_nna SRCS batch_norm_op.cc DEPS ${imagination_nna_subgraph_bridge_deps})
#lite_cc_library(subgraph_bridge_elementwise_ops_imagination_nna SRCS elementwise_ops.cc DEPS ${imagination_nna_subgraph_bridge_deps})
#lite_cc_library(subgraph_bridge_reshape_op_imagination_nna SRCS reshape_op.cc DEPS ${imagination_nna_subgraph_bridge_deps})
#lite_cc_library(subgraph_bridge_conv_transpose_op_imagination_nna SRCS conv_transpose_op.cc DEPS ${imagination_nna_subgraph_bridge_deps})
#lite_cc_library(subgraph_bridge_interpolate_op_imagination_nna SRCS interpolate_op.cc DEPS ${imagination_nna_subgraph_bridge_deps})
#lite_cc_library(subgraph_bridge_transpose_op_imagination_nna SRCS transpose_op.cc DEPS ${imagination_nna_subgraph_bridge_deps})
#lite_cc_library(subgraph_bridge_split_op_imagination_nna SRCS split_op.cc DEPS ${imagination_nna_subgraph_bridge_deps})
#lite_cc_library(subgraph_bridge_concat_op_imagination_nna SRCS concat_op.cc DEPS ${imagination_nna_subgraph_bridge_deps})
#lite_cc_library(subgraph_bridge_shuffle_channel_op_imagination_nna SRCS shuffle_channel_op.cc DEPS ${imagination_nna_subgraph_bridge_deps})
#lite_cc_library(subgraph_bridge_pad2d_op_imagination_nna SRCS pad2d_op.cc DEPS ${imagination_nna_subgraph_bridge_deps})
#lite_cc_library(subgraph_bridge_reduce_mean_op_imagination_nna SRCS reduce_mean_op.cc DEPS ${imagination_nna_subgraph_bridge_deps})
#lite_cc_library(subgraph_bridge_unsqueeze_op_imagination_nna SRCS unsqueeze_op.cc DEPS ${imagination_nna_subgraph_bridge_deps})
#lite_cc_library(subgraph_bridge_gather_op_imagination_nna SRCS gather_op.cc DEPS ${imagination_nna_subgraph_bridge_deps})
#lite_cc_library(subgraph_bridge_lookup_table_op_imagination_nna SRCS lookup_table_op.cc DEPS ${imagination_nna_subgraph_bridge_deps})
#lite_cc_library(subgraph_bridge_argmax_op_imagination_nna SRCS argmax_op.cc DEPS ${imagination_nna_subgraph_bridge_deps})
#lite_cc_library(subgraph_bridge_instance_norm_op_imagination_nna SRCS instance_norm_op.cc DEPS ${imagination_nna_subgraph_bridge_deps})
#lite_cc_library(subgraph_bridge_dropout_op_imagination_nna SRCS dropout_op.cc DEPS ${imagination_nna_subgraph_bridge_deps})
#lite_cc_library(subgraph_bridge_topk_op_imagination_nna SRCS topk_op.cc DEPS ${imagination_nna_subgraph_bridge_deps})
#lite_cc_library(subgraph_bridge_layer_norm_op_imagination_nna SRCS layer_norm_op.cc DEPS ${imagination_nna_subgraph_bridge_deps})
#lite_cc_library(subgraph_bridge_fill_constant_op_imagination_nna SRCS fill_constant_op.cc DEPS ${imagination_nna_subgraph_bridge_deps})
#lite_cc_library(subgraph_bridge_fill_constant_batch_size_like_op_imagination_nna SRCS fill_constant_batch_size_like_op.cc DEPS ${imagination_nna_subgraph_bridge_deps})
#lite_cc_library(subgraph_bridge_increment_op_imagination_nna SRCS increment_op.cc DEPS ${imagination_nna_subgraph_bridge_deps})
#lite_cc_library(subgraph_bridge_compare_op_imagination_nna SRCS compare_op.cc DEPS ${imagination_nna_subgraph_bridge_deps})
#lite_cc_library(subgraph_bridge_shape_op_imagination_nna SRCS shape_op.cc DEPS ${imagination_nna_subgraph_bridge_deps})
set(imagination_nna_subgraph_bridges
subgraph_bridge_registry
subgraph_bridge_utility_imagination_nna
subgraph_bridge_graph_imagination_nna
subgraph_bridge_fc_op_imagination_nna
subgraph_bridge_conv_op_imagination_nna
#subgraph_bridge_matmul_op_imagination_nna
#subgraph_bridge_mul_op_imagination_nna
subgraph_bridge_act_op_imagination_nna
#subgraph_bridge_scale_op_imagination_nna
#subgraph_bridge_softmax_op_imagination_nna
subgraph_bridge_pool_op_imagination_nna
#subgraph_bridge_batch_norm_op_imagination_nna
#subgraph_bridge_elementwise_ops_imagination_nna
#subgraph_bridge_reshape_op_imagination_nna
#subgraph_bridge_conv_transpose_op_imagination_nna
#subgraph_bridge_interpolate_op_imagination_nna
#subgraph_bridge_transpose_op_imagination_nna
#subgraph_bridge_split_op_imagination_nna
#subgraph_bridge_concat_op_imagination_nna
#subgraph_bridge_shuffle_channel_op_imagination_nna
#subgraph_bridge_pad2d_op_imagination_nna
#subgraph_bridge_reduce_mean_op_imagination_nna
#subgraph_bridge_unsqueeze_op_imagination_nna
#subgraph_bridge_gather_op_imagination_nna
#subgraph_bridge_lookup_table_op_imagination_nna
#subgraph_bridge_argmax_op_imagination_nna
#subgraph_bridge_instance_norm_op_imagination_nna
#subgraph_bridge_dropout_op_imagination_nna
#subgraph_bridge_topk_op_imagination_nna
#subgraph_bridge_layer_norm_op_imagination_nna
#subgraph_bridge_fill_constant_op_imagination_nna
#subgraph_bridge_fill_constant_batch_size_like_op_imagination_nna
#subgraph_bridge_increment_op_imagination_nna
#subgraph_bridge_compare_op_imagination_nna
CACHE INTERNAL "imagination_nna_subgraph_bridges")
message(STATUS "+++++ imagination_nna_subgraph_bridges: ${imagination_nna_subgraph_bridges}")
...@@ -12,14 +12,14 @@ ...@@ -12,14 +12,14 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
#include "lite/kernels/nna/bridges/graph.h" #include "lite/kernels/imagination_nna/bridges/graph.h"
#include "lite/kernels/nna/bridges/utility.h" #include "lite/kernels/imagination_nna/bridges/utility.h"
#include "lite/kernels/npu/bridges/registry.h" #include "lite/kernels/npu/bridges/registry.h"
namespace paddle { namespace paddle {
namespace lite { namespace lite {
namespace subgraph { namespace subgraph {
namespace nna { namespace imagination_nna {
// template <typename ActType> // template <typename ActType>
int ActConverter(void* ctx, OpLite* op, KernelBase* kernel) { int ActConverter(void* ctx, OpLite* op, KernelBase* kernel) {
...@@ -64,49 +64,51 @@ int ActConverter(void* ctx, OpLite* op, KernelBase* kernel) { ...@@ -64,49 +64,51 @@ int ActConverter(void* ctx, OpLite* op, KernelBase* kernel) {
return SUCCESS; return SUCCESS;
} }
} // namespace nna } // namespace imagination_nna
} // namespace subgraph } // namespace subgraph
} // namespace lite } // namespace lite
} // namespace paddle } // namespace paddle
#if 0 #if 0
REGISTER_SUBGRAPH_BRIDGE( REGISTER_SUBGRAPH_BRIDGE(
sigmoid, sigmoid,
kNNA, kImaginationNNA,
paddle::lite::subgraph::nna::ActConverter<ge::op::Activation>); paddle::lite::subgraph::imagination_nna::ActConverter<ge::op::Activation>);
#endif #endif
REGISTER_SUBGRAPH_BRIDGE(relu, kNNA, paddle::lite::subgraph::nna::ActConverter); REGISTER_SUBGRAPH_BRIDGE(relu,
kImaginationNNA,
paddle::lite::subgraph::imagination_nna::ActConverter);
#if 0 #if 0
REGISTER_SUBGRAPH_BRIDGE( REGISTER_SUBGRAPH_BRIDGE(
tanh, kNNA, paddle::lite::subgraph::nna::ActConverter<ge::op::Activation>); tanh, kImaginationNNA, paddle::lite::subgraph::imagination_nna::ActConverter<ge::op::Activation>);
REGISTER_SUBGRAPH_BRIDGE( REGISTER_SUBGRAPH_BRIDGE(
relu_clipped, relu_clipped,
kNNA, kImaginationNNA,
paddle::lite::subgraph::nna::ActConverter<ge::op::Activation>); paddle::lite::subgraph::imagination_nna::ActConverter<ge::op::Activation>);
REGISTER_SUBGRAPH_BRIDGE( REGISTER_SUBGRAPH_BRIDGE(
relu6, kNNA, paddle::lite::subgraph::nna::ActConverter<ge::op::Activation>); relu6, kImaginationNNA, paddle::lite::subgraph::imagination_nna::ActConverter<ge::op::Activation>);
REGISTER_SUBGRAPH_BRIDGE( REGISTER_SUBGRAPH_BRIDGE(
leaky_relu, leaky_relu,
kNNA, kImaginationNNA,
paddle::lite::subgraph::nna::ActConverter<ge::op::Activation>); paddle::lite::subgraph::imagination_nna::ActConverter<ge::op::Activation>);
REGISTER_SUBGRAPH_BRIDGE( REGISTER_SUBGRAPH_BRIDGE(
abs, kNNA, paddle::lite::subgraph::nna::ActConverter<ge::op::Activation>); abs, kImaginationNNA, paddle::lite::subgraph::imagination_nna::ActConverter<ge::op::Activation>);
REGISTER_SUBGRAPH_BRIDGE( REGISTER_SUBGRAPH_BRIDGE(
softsign, softsign,
kNNA, kImaginationNNA,
paddle::lite::subgraph::nna::ActConverter<ge::op::Activation>); paddle::lite::subgraph::imagination_nna::ActConverter<ge::op::Activation>);
REGISTER_SUBGRAPH_BRIDGE( REGISTER_SUBGRAPH_BRIDGE(
softplus, softplus,
kNNA, kImaginationNNA,
paddle::lite::subgraph::nna::ActConverter<ge::op::Activation>); paddle::lite::subgraph::imagination_nna::ActConverter<ge::op::Activation>);
REGISTER_SUBGRAPH_BRIDGE( REGISTER_SUBGRAPH_BRIDGE(
hard_sigmoid, hard_sigmoid,
kNNA, kImaginationNNA,
paddle::lite::subgraph::nna::ActConverter<ge::op::Activation>); paddle::lite::subgraph::imagination_nna::ActConverter<ge::op::Activation>);
REGISTER_SUBGRAPH_BRIDGE( REGISTER_SUBGRAPH_BRIDGE(
log, kNNA, paddle::lite::subgraph::nna::ActConverter<ge::op::Log>); log, kImaginationNNA, paddle::lite::subgraph::imagination_nna::ActConverter<ge::op::Log>);
REGISTER_SUBGRAPH_BRIDGE( REGISTER_SUBGRAPH_BRIDGE(
square, kNNA, paddle::lite::subgraph::nna::ActConverter<ge::op::Square>); square, kImaginationNNA, paddle::lite::subgraph::imagination_nna::ActConverter<ge::op::Square>);
REGISTER_SUBGRAPH_BRIDGE( REGISTER_SUBGRAPH_BRIDGE(
sqrt, kNNA, paddle::lite::subgraph::nna::ActConverter<ge::op::Sqrt>); sqrt, kImaginationNNA, paddle::lite::subgraph::imagination_nna::ActConverter<ge::op::Sqrt>);
#endif #endif
...@@ -12,14 +12,14 @@ ...@@ -12,14 +12,14 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
#include "lite/kernels/nna/bridges/graph.h" #include "lite/kernels/imagination_nna/bridges/graph.h"
#include "lite/kernels/nna/bridges/registry.h" #include "lite/kernels/imagination_nna/bridges/registry.h"
#include "lite/kernels/nna/bridges/utility.h" #include "lite/kernels/imagination_nna/bridges/utility.h"
namespace paddle { namespace paddle {
namespace lite { namespace lite {
namespace subgraph { namespace subgraph {
namespace nna { namespace imagination_nna {
int BatchNormConverter(void* ctx, OpLite* op, KernelBase* kernel) { int BatchNormConverter(void* ctx, OpLite* op, KernelBase* kernel) {
CHECK(ctx != nullptr); CHECK(ctx != nullptr);
...@@ -30,7 +30,7 @@ int BatchNormConverter(void* ctx, OpLite* op, KernelBase* kernel) { ...@@ -30,7 +30,7 @@ int BatchNormConverter(void* ctx, OpLite* op, KernelBase* kernel) {
auto scope = op->scope(); auto scope = op->scope();
VLOG(3) << "[NNA] Converting " + op_type + "..."; VLOG(3) << "[NNA] Converting " + op_type + "...";
// Get innat and output vars and op attributes // Get iimagination_nnat and output vars and op attributes
auto x_name = op_info->Input("X").front(); auto x_name = op_info->Input("X").front();
auto x = scope->FindMutableTensor(x_name); auto x = scope->FindMutableTensor(x_name);
auto x_dims = x->dims(); auto x_dims = x->dims();
...@@ -81,11 +81,12 @@ int BatchNormConverter(void* ctx, OpLite* op, KernelBase* kernel) { ...@@ -81,11 +81,12 @@ int BatchNormConverter(void* ctx, OpLite* op, KernelBase* kernel) {
return SUCCESS; return SUCCESS;
} }
} // namespace nna } // namespace imagination_nna
} // namespace subgraph } // namespace subgraph
} // namespace lite } // namespace lite
} // namespace paddle } // namespace paddle
REGISTER_SUBGRAPH_BRIDGE(batch_norm, REGISTER_SUBGRAPH_BRIDGE(
kNNA, batch_norm,
paddle::lite::subgraph::nna::BatchNormConverter); kImaginationNNA,
paddle::lite::subgraph::imagination_nna::BatchNormConverter);
...@@ -13,14 +13,14 @@ ...@@ -13,14 +13,14 @@
// limitations under the License. // limitations under the License.
#include "lite/operators/conv_op.h" #include "lite/operators/conv_op.h"
#include "lite/kernels/nna/bridges/graph.h" #include "lite/kernels/imagination_nna/bridges/graph.h"
#include "lite/kernels/nna/bridges/utility.h" #include "lite/kernels/imagination_nna/bridges/utility.h"
#include "lite/kernels/npu/bridges/registry.h" #include "lite/kernels/npu/bridges/registry.h"
namespace paddle { namespace paddle {
namespace lite { namespace lite {
namespace subgraph { namespace subgraph {
namespace nna { namespace imagination_nna {
int ConvConverter(void *ctx, OpLite *op, KernelBase *kernel) { int ConvConverter(void *ctx, OpLite *op, KernelBase *kernel) {
CHECK(ctx != nullptr); CHECK(ctx != nullptr);
...@@ -282,15 +282,17 @@ int ConvConverter(void *ctx, OpLite *op, KernelBase *kernel) { ...@@ -282,15 +282,17 @@ int ConvConverter(void *ctx, OpLite *op, KernelBase *kernel) {
return REBUILD_WHEN_SHAPE_CHANGED; return REBUILD_WHEN_SHAPE_CHANGED;
} }
} // namespace nna } // namespace imagination_nna
} // namespace subgraph } // namespace subgraph
} // namespace lite } // namespace lite
} // namespace paddle } // namespace paddle
REGISTER_SUBGRAPH_BRIDGE(conv2d, REGISTER_SUBGRAPH_BRIDGE(
kNNA, conv2d,
paddle::lite::subgraph::nna::ConvConverter); kImaginationNNA,
paddle::lite::subgraph::imagination_nna::ConvConverter);
REGISTER_SUBGRAPH_BRIDGE(depthwise_conv2d, REGISTER_SUBGRAPH_BRIDGE(
kNNA, depthwise_conv2d,
paddle::lite::subgraph::nna::ConvConverter); kImaginationNNA,
paddle::lite::subgraph::imagination_nna::ConvConverter);
...@@ -13,13 +13,13 @@ ...@@ -13,13 +13,13 @@
// limitations under the License. // limitations under the License.
#include "imgdnn.h" // NOLINT #include "imgdnn.h" // NOLINT
#include "lite/kernels/nna/bridges/graph.h" #include "lite/kernels/imagination_nna/bridges/graph.h"
#include "lite/kernels/npu/bridges/registry.h" #include "lite/kernels/npu/bridges/registry.h"
namespace paddle { namespace paddle {
namespace lite { namespace lite {
namespace subgraph { namespace subgraph {
namespace nna { namespace imagination_nna {
int FCConverter(void* ctx, OpLite* op, KernelBase* kernel) { int FCConverter(void* ctx, OpLite* op, KernelBase* kernel) {
CHECK(ctx != nullptr); CHECK(ctx != nullptr);
...@@ -174,9 +174,11 @@ int FCConverter(void* ctx, OpLite* op, KernelBase* kernel) { ...@@ -174,9 +174,11 @@ int FCConverter(void* ctx, OpLite* op, KernelBase* kernel) {
return REBUILD_WHEN_SHAPE_CHANGED; return REBUILD_WHEN_SHAPE_CHANGED;
} }
} // namespace nna } // namespace imagination_nna
} // namespace subgraph } // namespace subgraph
} // namespace lite } // namespace lite
} // namespace paddle } // namespace paddle
REGISTER_SUBGRAPH_BRIDGE(fc, kNNA, paddle::lite::subgraph::nna::FCConverter); REGISTER_SUBGRAPH_BRIDGE(fc,
kImaginationNNA,
paddle::lite::subgraph::imagination_nna::FCConverter);
...@@ -12,14 +12,14 @@ ...@@ -12,14 +12,14 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
#include "lite/kernels/nna/bridges/graph.h" #include "lite/kernels/imagination_nna/bridges/graph.h"
#include <utility> #include <utility>
#include "lite/kernels/nna/bridges/utility.h" #include "lite/kernels/imagination_nna/bridges/utility.h"
namespace paddle { namespace paddle {
namespace lite { namespace lite {
namespace subgraph { namespace subgraph {
namespace nna { namespace imagination_nna {
// Add 1 // Add 1
int Graph::Add(const std::string& name, std::shared_ptr<Node> node) { int Graph::Add(const std::string& name, std::shared_ptr<Node> node) {
...@@ -145,7 +145,7 @@ std::shared_ptr<Node> Graph::Add(const std::string& name, ...@@ -145,7 +145,7 @@ std::shared_ptr<Node> Graph::Add(const std::string& name,
return node; return node;
} }
} // namespace nna } // namespace imagination_nna
} // namespace subgraph } // namespace subgraph
} // namespace lite } // namespace lite
} // namespace paddle } // namespace paddle
...@@ -21,7 +21,7 @@ ...@@ -21,7 +21,7 @@
#include <utility> #include <utility>
#include <vector> #include <vector>
#include "imgdnn.h" // NOLINT #include "imgdnn.h" // NOLINT
#include "lite/backends/nna/imgdnn_manager.h" #include "lite/backends/imagination_nna/imgdnn_manager.h"
#include "lite/core/op_lite.h" #include "lite/core/op_lite.h"
#include "lite/core/tensor.h" #include "lite/core/tensor.h"
#include "utility.h" // NOLINT #include "utility.h" // NOLINT
...@@ -29,7 +29,7 @@ ...@@ -29,7 +29,7 @@
namespace paddle { namespace paddle {
namespace lite { namespace lite {
namespace subgraph { namespace subgraph {
namespace nna { namespace imagination_nna {
#define NNA_UNUSED(var) \ #define NNA_UNUSED(var) \
do { \ do { \
...@@ -77,7 +77,7 @@ class Node { ...@@ -77,7 +77,7 @@ class Node {
class Graph { class Graph {
public: public:
explicit Graph(lite::nna::ImgdnnManager* pMgr) { explicit Graph(lite::imagination_nna::ImgdnnManager* pMgr) {
pImgdnnMgr = pMgr; pImgdnnMgr = pMgr;
std::cout << "graph construct" << std::endl; std::cout << "graph construct" << std::endl;
} }
...@@ -129,16 +129,16 @@ class Graph { ...@@ -129,16 +129,16 @@ class Graph {
return nodes_.find(name) != nodes_.end(); return nodes_.find(name) != nodes_.end();
} }
lite::nna::ImgdnnManager* GetBuilder() { lite::imagination_nna::ImgdnnManager* GetBuilder() {
ASSERT(pImgdnnMgr == nullptr, "pImgdnnMgr used before initialize"); ASSERT(pImgdnnMgr == nullptr, "pImgdnnMgr used before initialize");
return pImgdnnMgr; return pImgdnnMgr;
} }
private: private:
std::unordered_map<std::string, std::vector<std::shared_ptr<Node>>> nodes_; std::unordered_map<std::string, std::vector<std::shared_ptr<Node>>> nodes_;
lite::nna::ImgdnnManager* pImgdnnMgr{nullptr}; lite::imagination_nna::ImgdnnManager* pImgdnnMgr{nullptr};
}; };
} // namespace nna } // namespace imagination_nna
} // namespace subgraph } // namespace subgraph
} // namespace lite } // namespace lite
......
...@@ -14,9 +14,9 @@ ...@@ -14,9 +14,9 @@
#pragma once #pragma once
USE_SUBGRAPH_BRIDGE(relu, kNNA); USE_SUBGRAPH_BRIDGE(relu, kImaginationNNA);
USE_SUBGRAPH_BRIDGE(conv2d, kNNA); USE_SUBGRAPH_BRIDGE(conv2d, kImaginationNNA);
USE_SUBGRAPH_BRIDGE(depthwise_conv2d, kNNA); USE_SUBGRAPH_BRIDGE(depthwise_conv2d, kImaginationNNA);
USE_SUBGRAPH_BRIDGE(fc, kNNA); USE_SUBGRAPH_BRIDGE(fc, kImaginationNNA);
USE_SUBGRAPH_BRIDGE(pool2d, kNNA); USE_SUBGRAPH_BRIDGE(pool2d, kImaginationNNA);
// USE_SUBGRAPH_BRIDGE(softmax, kNNA); // USE_SUBGRAPH_BRIDGE(softmax, kImaginationNNA);
...@@ -14,14 +14,14 @@ ...@@ -14,14 +14,14 @@
#include "lite/operators/pool_op.h" #include "lite/operators/pool_op.h"
#include "imgdnn.h" // NOLINT #include "imgdnn.h" // NOLINT
#include "lite/kernels/nna/bridges/graph.h" #include "lite/kernels/imagination_nna/bridges/graph.h"
#include "lite/kernels/nna/bridges/utility.h" #include "lite/kernels/imagination_nna/bridges/utility.h"
#include "lite/kernels/npu/bridges/registry.h" #include "lite/kernels/npu/bridges/registry.h"
namespace paddle { namespace paddle {
namespace lite { namespace lite {
namespace subgraph { namespace subgraph {
namespace nna { namespace imagination_nna {
int PoolConverter(void* ctx, OpLite* op, KernelBase* kernel) { int PoolConverter(void* ctx, OpLite* op, KernelBase* kernel) {
CHECK(ctx != nullptr); CHECK(ctx != nullptr);
...@@ -137,11 +137,12 @@ int PoolConverter(void* ctx, OpLite* op, KernelBase* kernel) { ...@@ -137,11 +137,12 @@ int PoolConverter(void* ctx, OpLite* op, KernelBase* kernel) {
return REBUILD_WHEN_SHAPE_CHANGED; return REBUILD_WHEN_SHAPE_CHANGED;
} }
} // namespace nna } // namespace imagination_nna
} // namespace subgraph } // namespace subgraph
} // namespace lite } // namespace lite
} // namespace paddle } // namespace paddle
REGISTER_SUBGRAPH_BRIDGE(pool2d, REGISTER_SUBGRAPH_BRIDGE(
kNNA, pool2d,
paddle::lite::subgraph::nna::PoolConverter); kImaginationNNA,
paddle::lite::subgraph::imagination_nna::PoolConverter);
...@@ -12,14 +12,14 @@ ...@@ -12,14 +12,14 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
#include "lite/kernels/nna/bridges/graph.h" #include "lite/kernels/imagination_nna/bridges/graph.h"
#include "lite/kernels/nna/bridges/registry.h" #include "lite/kernels/imagination_nna/bridges/registry.h"
#include "lite/kernels/npu/bridges/utility.h" #include "lite/kernels/npu/bridges/utility.h"
namespace paddle { namespace paddle {
namespace lite { namespace lite {
namespace subgraph { namespace subgraph {
namespace nna { namespace imagination_nna {
int SoftmaxConverter(void* ctx, OpLite* op, KernelBase* kernel) { int SoftmaxConverter(void* ctx, OpLite* op, KernelBase* kernel) {
CHECK(ctx != nullptr); CHECK(ctx != nullptr);
...@@ -69,11 +69,12 @@ int SoftmaxConverter(void* ctx, OpLite* op, KernelBase* kernel) { ...@@ -69,11 +69,12 @@ int SoftmaxConverter(void* ctx, OpLite* op, KernelBase* kernel) {
return REBUILD_WHEN_SHAPE_CHANGED; return REBUILD_WHEN_SHAPE_CHANGED;
} }
} // namespace nna } // namespace imagination_nna
} // namespace subgraph } // namespace subgraph
} // namespace lite } // namespace lite
} // namespace paddle } // namespace paddle
REGISTER_SUBGRAPH_BRIDGE(softmax, REGISTER_SUBGRAPH_BRIDGE(
kNNA, softmax,
paddle::lite::subgraph::nna::SoftmaxConverter); kImaginationNNA,
paddle::lite::subgraph::imagination_nna::SoftmaxConverter);
...@@ -12,13 +12,13 @@ ...@@ -12,13 +12,13 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
#include "lite/kernels/nna/bridges/utility.h" #include "lite/kernels/imagination_nna/bridges/utility.h"
#include <utility> #include <utility>
namespace paddle { namespace paddle {
namespace lite { namespace lite {
namespace subgraph { namespace subgraph {
namespace nna { namespace imagination_nna {
bool HasInputArg(const OpInfo* op_info, bool HasInputArg(const OpInfo* op_info,
const Scope* scope, const Scope* scope,
...@@ -61,7 +61,7 @@ void TensorInfoReset(TensorInfo* qnt) { ...@@ -61,7 +61,7 @@ void TensorInfoReset(TensorInfo* qnt) {
qnt->layout = DATALAYOUT(kNCHW); qnt->layout = DATALAYOUT(kNCHW);
} }
} // namespace nna } // namespace imagination_nna
} // namespace subgraph } // namespace subgraph
} // namespace lite } // namespace lite
} // namespace paddle } // namespace paddle
...@@ -26,7 +26,7 @@ ...@@ -26,7 +26,7 @@
namespace paddle { namespace paddle {
namespace lite { namespace lite {
namespace subgraph { namespace subgraph {
namespace nna { namespace imagination_nna {
struct TensorInfo { struct TensorInfo {
imgdnn_type type; imgdnn_type type;
...@@ -44,7 +44,7 @@ bool isScalesPerChannel(std::vector<float> scales); ...@@ -44,7 +44,7 @@ bool isScalesPerChannel(std::vector<float> scales);
void TensorInfoReset(TensorInfo* qnt); void TensorInfoReset(TensorInfo* qnt);
} // namespace nna } // namespace imagination_nna
} // namespace subgraph } // namespace subgraph
} // namespace lite } // namespace lite
} // namespace paddle } // namespace paddle
...@@ -12,26 +12,26 @@ ...@@ -12,26 +12,26 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
#include "lite/kernels/nna/subgraph_compute.h" #include "lite/kernels/imagination_nna/subgraph_compute.h"
#include <sys/time.h> #include <sys/time.h>
#include <time.h> #include <time.h>
#include <limits> #include <limits>
#include <utility> #include <utility>
#include "lite/core/op_registry.h" #include "lite/core/op_registry.h"
#include "lite/kernels/nna/bridges/graph.h" #include "lite/kernels/imagination_nna/bridges/graph.h"
#include "lite/kernels/nna/bridges/paddle_use_bridges.h" #include "lite/kernels/imagination_nna/bridges/paddle_use_bridges.h"
#include "lite/kernels/nna/bridges/utility.h" #include "lite/kernels/imagination_nna/bridges/utility.h"
namespace paddle { namespace paddle {
namespace lite { namespace lite {
namespace kernels { namespace kernels {
namespace nna { namespace imagination_nna {
bool SubgraphEngine::BuildDeviceProgram() { bool SubgraphEngine::BuildDeviceProgram() {
int status = 0; int status = 0;
// Convert all of ops and their input vars and weights and added into the NNA // Convert all of ops and their input vars and weights and added into the NNA
// IMG IR graph // IMG IR graph
subgraph::nna::Graph graph{&imgdnn_mgr_}; subgraph::imagination_nna::Graph graph{&imgdnn_mgr_};
const auto& bridges = subgraph::Registry::Instance(); const auto& bridges = subgraph::Registry::Instance();
if (!origin_program_) { if (!origin_program_) {
BuildOriginProgram(); BuildOriginProgram();
...@@ -43,13 +43,13 @@ bool SubgraphEngine::BuildDeviceProgram() { ...@@ -43,13 +43,13 @@ bool SubgraphEngine::BuildDeviceProgram() {
op->CheckShape(); op->CheckShape();
op->InferShape(); op->InferShape();
std::string op_type = op->op_info()->Type(); std::string op_type = op->op_info()->Type();
if (!bridges.Exists(op_type, TARGET(kNNA))) { if (!bridges.Exists(op_type, TARGET(kImaginationNNA))) {
// return subgraph::FAILED; // return subgraph::FAILED;
return false; return false;
} }
auto kernel = inst.kernel(); auto kernel = inst.kernel();
status |= status |= bridges.Select(op_type, TARGET(kImaginationNNA))(
bridges.Select(op_type, TARGET(kNNA))(reinterpret_cast<void*>(&graph), reinterpret_cast<void*>(&graph),
const_cast<OpLite*>(op), const_cast<OpLite*>(op),
const_cast<KernelBase*>(kernel)); const_cast<KernelBase*>(kernel));
if (subgraph::CHECK_FAILED(status)) { if (subgraph::CHECK_FAILED(status)) {
...@@ -231,16 +231,16 @@ void SubgraphCompute::Run() { ...@@ -231,16 +231,16 @@ void SubgraphCompute::Run() {
engine_->Run(); engine_->Run();
} }
} // namespace nna } // namespace imagination_nna
} // namespace kernels } // namespace kernels
} // namespace lite } // namespace lite
} // namespace paddle } // namespace paddle
REGISTER_LITE_KERNEL(subgraph, REGISTER_LITE_KERNEL(subgraph,
kNNA, kImaginationNNA,
kInt8, kInt8,
kNCHW, kNCHW,
paddle::lite::kernels::nna::SubgraphCompute, paddle::lite::kernels::imagination_nna::SubgraphCompute,
def) def)
.BindInput("Inputs", .BindInput("Inputs",
{LiteType::GetTensorTy(TARGET(kHost), PRECISION(kInt8))}) {LiteType::GetTensorTy(TARGET(kHost), PRECISION(kInt8))})
......
...@@ -18,16 +18,16 @@ ...@@ -18,16 +18,16 @@
#include <string> #include <string>
#include <vector> #include <vector>
#include "imgdnn.h" // NOLINT #include "imgdnn.h" // NOLINT
#include "lite/backends/nna/imgdnn_manager.h" #include "lite/backends/imagination_nna/imgdnn_manager.h"
#include "lite/core/kernel.h" #include "lite/core/kernel.h"
#include "lite/kernels/nna/bridges/graph.h" #include "lite/kernels/imagination_nna/bridges/graph.h"
#include "lite/kernels/npu/bridges/engine.h" #include "lite/kernels/npu/bridges/engine.h"
#include "lite/kernels/npu/bridges/registry.h" #include "lite/kernels/npu/bridges/registry.h"
namespace paddle { namespace paddle {
namespace lite { namespace lite {
namespace kernels { namespace kernels {
namespace nna { namespace imagination_nna {
class SubgraphEngine : public subgraph::Engine { class SubgraphEngine : public subgraph::Engine {
public: public:
...@@ -54,11 +54,12 @@ class SubgraphEngine : public subgraph::Engine { ...@@ -54,11 +54,12 @@ class SubgraphEngine : public subgraph::Engine {
std::vector<std::string> device_onames_; std::vector<std::string> device_onames_;
std::vector<imgdnn_input> device_itensors_; std::vector<imgdnn_input> device_itensors_;
std::vector<imgdnn_output> device_otensors_; std::vector<imgdnn_output> device_otensors_;
lite::nna::ImgdnnManager imgdnn_mgr_; lite::imagination_nna::ImgdnnManager imgdnn_mgr_;
}; };
class SubgraphCompute class SubgraphCompute : public KernelLite<TARGET(kImaginationNNA),
: public KernelLite<TARGET(kNNA), PRECISION(kInt8), DATALAYOUT(kNCHW)> { PRECISION(kInt8),
DATALAYOUT(kNCHW)> {
public: public:
using param_t = operators::SubgraphParam; using param_t = operators::SubgraphParam;
...@@ -75,7 +76,7 @@ class SubgraphCompute ...@@ -75,7 +76,7 @@ class SubgraphCompute
std::unique_ptr<SubgraphEngine> engine_; std::unique_ptr<SubgraphEngine> engine_;
}; };
} // namespace nna } // namespace imagination_nna
} // namespace kernels } // namespace kernels
} // namespace lite } // namespace lite
} // namespace paddle } // namespace paddle
add_subdirectory(bridges)
add_kernel(subgraph_compute_nna NNA basic SRCS subgraph_compute.cc DEPS ${lite_kernel_deps} device_nna subgraph_bridge_engine ${nna_subgraph_bridges})
if(NOT LITE_WITH_NNA)
return()
endif()
lite_cc_library(subgraph_bridge_utility_nna SRCS utility.cc DEPS ${nna_builder_libs} ${nna_runtime_libs} tensor)
lite_cc_library(subgraph_bridge_graph_nna SRCS graph.cc DEPS subgraph_bridge_utility_nna)
set(nna_subgraph_bridge_deps subgraph_bridge_registry subgraph_bridge_utility_nna subgraph_bridge_graph_nna)
lite_cc_library(subgraph_bridge_fc_op_nna SRCS fc_op.cc DEPS ${nna_subgraph_bridge_deps})
lite_cc_library(subgraph_bridge_conv_op_nna SRCS conv_op.cc DEPS ${nna_subgraph_bridge_deps})
#lite_cc_library(subgraph_bridge_matmul_op_nna SRCS matmul_op.cc DEPS ${nna_subgraph_bridge_deps})
#lite_cc_library(subgraph_bridge_mul_op_nna SRCS mul_op.cc DEPS ${nna_subgraph_bridge_deps})
lite_cc_library(subgraph_bridge_act_op_nna SRCS act_op.cc DEPS ${nna_subgraph_bridge_deps})
#lite_cc_library(subgraph_bridge_scale_op_nna SRCS scale_op.cc DEPS ${nna_subgraph_bridge_deps})
#lite_cc_library(subgraph_bridge_softmax_op_nna SRCS softmax_op.cc DEPS ${nna_subgraph_bridge_deps})
lite_cc_library(subgraph_bridge_pool_op_nna SRCS pool_op.cc DEPS ${nna_subgraph_bridge_deps})
#lite_cc_library(subgraph_bridge_batch_norm_op_nna SRCS batch_norm_op.cc DEPS ${nna_subgraph_bridge_deps})
#lite_cc_library(subgraph_bridge_elementwise_ops_nna SRCS elementwise_ops.cc DEPS ${nna_subgraph_bridge_deps})
#lite_cc_library(subgraph_bridge_reshape_op_nna SRCS reshape_op.cc DEPS ${nna_subgraph_bridge_deps})
#lite_cc_library(subgraph_bridge_conv_transpose_op_nna SRCS conv_transpose_op.cc DEPS ${nna_subgraph_bridge_deps})
#lite_cc_library(subgraph_bridge_interpolate_op_nna SRCS interpolate_op.cc DEPS ${nna_subgraph_bridge_deps})
#lite_cc_library(subgraph_bridge_transpose_op_nna SRCS transpose_op.cc DEPS ${nna_subgraph_bridge_deps})
#lite_cc_library(subgraph_bridge_split_op_nna SRCS split_op.cc DEPS ${nna_subgraph_bridge_deps})
#lite_cc_library(subgraph_bridge_concat_op_nna SRCS concat_op.cc DEPS ${nna_subgraph_bridge_deps})
#lite_cc_library(subgraph_bridge_shuffle_channel_op_nna SRCS shuffle_channel_op.cc DEPS ${nna_subgraph_bridge_deps})
#lite_cc_library(subgraph_bridge_pad2d_op_nna SRCS pad2d_op.cc DEPS ${nna_subgraph_bridge_deps})
#lite_cc_library(subgraph_bridge_reduce_mean_op_nna SRCS reduce_mean_op.cc DEPS ${nna_subgraph_bridge_deps})
#lite_cc_library(subgraph_bridge_unsqueeze_op_nna SRCS unsqueeze_op.cc DEPS ${nna_subgraph_bridge_deps})
#lite_cc_library(subgraph_bridge_gather_op_nna SRCS gather_op.cc DEPS ${nna_subgraph_bridge_deps})
#lite_cc_library(subgraph_bridge_lookup_table_op_nna SRCS lookup_table_op.cc DEPS ${nna_subgraph_bridge_deps})
#lite_cc_library(subgraph_bridge_argmax_op_nna SRCS argmax_op.cc DEPS ${nna_subgraph_bridge_deps})
#lite_cc_library(subgraph_bridge_instance_norm_op_nna SRCS instance_norm_op.cc DEPS ${nna_subgraph_bridge_deps})
#lite_cc_library(subgraph_bridge_dropout_op_nna SRCS dropout_op.cc DEPS ${nna_subgraph_bridge_deps})
#lite_cc_library(subgraph_bridge_topk_op_nna SRCS topk_op.cc DEPS ${nna_subgraph_bridge_deps})
#lite_cc_library(subgraph_bridge_layer_norm_op_nna SRCS layer_norm_op.cc DEPS ${nna_subgraph_bridge_deps})
#lite_cc_library(subgraph_bridge_fill_constant_op_nna SRCS fill_constant_op.cc DEPS ${nna_subgraph_bridge_deps})
#lite_cc_library(subgraph_bridge_fill_constant_batch_size_like_op_nna SRCS fill_constant_batch_size_like_op.cc DEPS ${nna_subgraph_bridge_deps})
#lite_cc_library(subgraph_bridge_increment_op_nna SRCS increment_op.cc DEPS ${nna_subgraph_bridge_deps})
#lite_cc_library(subgraph_bridge_compare_op_nna SRCS compare_op.cc DEPS ${nna_subgraph_bridge_deps})
#lite_cc_library(subgraph_bridge_shape_op_nna SRCS shape_op.cc DEPS ${nna_subgraph_bridge_deps})
set(nna_subgraph_bridges
subgraph_bridge_registry
subgraph_bridge_utility_nna
subgraph_bridge_graph_nna
subgraph_bridge_fc_op_nna
subgraph_bridge_conv_op_nna
#subgraph_bridge_matmul_op_nna
#subgraph_bridge_mul_op_nna
subgraph_bridge_act_op_nna
#subgraph_bridge_scale_op_nna
#subgraph_bridge_softmax_op_nna
subgraph_bridge_pool_op_nna
#subgraph_bridge_batch_norm_op_nna
#subgraph_bridge_elementwise_ops_nna
#subgraph_bridge_reshape_op_nna
#subgraph_bridge_conv_transpose_op_nna
#subgraph_bridge_interpolate_op_nna
#subgraph_bridge_transpose_op_nna
#subgraph_bridge_split_op_nna
#subgraph_bridge_concat_op_nna
#subgraph_bridge_shuffle_channel_op_nna
#subgraph_bridge_pad2d_op_nna
#subgraph_bridge_reduce_mean_op_nna
#subgraph_bridge_unsqueeze_op_nna
#subgraph_bridge_gather_op_nna
#subgraph_bridge_lookup_table_op_nna
#subgraph_bridge_argmax_op_nna
#subgraph_bridge_instance_norm_op_nna
#subgraph_bridge_dropout_op_nna
#subgraph_bridge_topk_op_nna
#subgraph_bridge_layer_norm_op_nna
#subgraph_bridge_fill_constant_op_nna
#subgraph_bridge_fill_constant_batch_size_like_op_nna
#subgraph_bridge_increment_op_nna
#subgraph_bridge_compare_op_nna
CACHE INTERNAL "nna_subgraph_bridges")
message(STATUS "+++++ nna_subgraph_bridges: ${nna_subgraph_bridges}")
...@@ -56,7 +56,7 @@ const std::vector<std::vector<std::string>> supported_ops_target = { ...@@ -56,7 +56,7 @@ const std::vector<std::vector<std::string>> supported_ops_target = {
ops_lines = [] ops_lines = []
# valid targets and valid_ops # valid targets and valid_ops
valid_targets = ["kUnk", "kHost", "kX86", "kCUDA", "kARM", "kOpenCL", "kAny", "kFPGA", "kNPU", "kXPU", "kBM", "kMLU", "kRKNPU", "kAPU", "kHuaweiAscendNPU", "kNNA"] valid_targets = ["kUnk", "kHost", "kX86", "kCUDA", "kARM", "kOpenCL", "kAny", "kFPGA", "kNPU", "kXPU", "kBM", "kMLU", "kRKNPU", "kAPU", "kHuaweiAscendNPU", "kImaginationNNA"]
valid_ops = [[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[]] valid_ops = [[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[]]
class TargetType: class TargetType:
kUnk = 0 kUnk = 0
...@@ -74,7 +74,7 @@ class TargetType: ...@@ -74,7 +74,7 @@ class TargetType:
kRKNPU = 12 kRKNPU = 12
kAPU = 13 kAPU = 13
kHuaweiAscendNPU = 14 kHuaweiAscendNPU = 14
kNNA = 15 kImaginationNNA = 15
# record op_info of valid kernels into `valid_ops` according to different target type # record op_info of valid kernels into `valid_ops` according to different target type
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册