提交 91007fe9 编写于 作者: Y yuyang18

Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into...

Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into feature/refine_parallel_executor
...@@ -116,13 +116,14 @@ else() ...@@ -116,13 +116,14 @@ else()
endif() endif()
set(WITH_MKLML ${WITH_MKL}) set(WITH_MKLML ${WITH_MKL})
if (WITH_MKL AND AVX2_FOUND) if (NOT DEFINED WITH_MKLDNN)
set(WITH_MKLDNN ON) if (WITH_MKL AND AVX2_FOUND)
else() set(WITH_MKLDNN ON)
message(STATUS "Do not have AVX2 intrinsics and disabled MKL-DNN") else()
set(WITH_MKLDNN OFF) message(STATUS "Do not have AVX2 intrinsics and disabled MKL-DNN")
set(WITH_MKLDNN OFF)
endif()
endif() endif()
######################################################################################## ########################################################################################
include(external/mklml) # download mklml package include(external/mklml) # download mklml package
......
...@@ -23,8 +23,12 @@ set(BOOST_PROJECT "extern_boost") ...@@ -23,8 +23,12 @@ set(BOOST_PROJECT "extern_boost")
# checked that the devtools package of CentOS 6 installs boost 1.41.0. # checked that the devtools package of CentOS 6 installs boost 1.41.0.
# So we use 1.41.0 here. # So we use 1.41.0 here.
set(BOOST_VER "1.41.0") set(BOOST_VER "1.41.0")
set(BOOST_TAR "boost_1_41_0") if((NOT DEFINED BOOST_TAR) OR (NOT DEFINED BOOST_URL))
set(BOOST_URL "http://paddlepaddledeps.cdn.bcebos.com/${BOOST_TAR}.tar.gz") message(STATUS "use pre defined download url")
set(BOOST_TAR "boost_1_41_0" CACHE STRING "" FORCE)
set(BOOST_URL "http://paddlepaddledeps.cdn.bcebos.com/${BOOST_TAR}.tar.gz" CACHE STRING "" FORCE)
endif()
MESSAGE(STATUS "BOOST_TAR: ${BOOST_TAR}, BOOST_URL: ${BOOST_URL}")
set(BOOST_SOURCES_DIR ${THIRD_PARTY_PATH}/boost) set(BOOST_SOURCES_DIR ${THIRD_PARTY_PATH}/boost)
set(BOOST_DOWNLOAD_DIR "${BOOST_SOURCES_DIR}/src/${BOOST_PROJECT}") set(BOOST_DOWNLOAD_DIR "${BOOST_SOURCES_DIR}/src/${BOOST_PROJECT}")
set(BOOST_INCLUDE_DIR "${BOOST_DOWNLOAD_DIR}/${BOOST_TAR}" CACHE PATH "boost include directory." FORCE) set(BOOST_INCLUDE_DIR "${BOOST_DOWNLOAD_DIR}/${BOOST_TAR}" CACHE PATH "boost include directory." FORCE)
......
...@@ -27,8 +27,12 @@ ENDIF() ...@@ -27,8 +27,12 @@ ENDIF()
INCLUDE(ExternalProject) INCLUDE(ExternalProject)
SET(MKLML_PROJECT "extern_mklml") SET(MKLML_PROJECT "extern_mklml")
SET(MKLML_VER "mklml_lnx_2018.0.3.20180406") IF((NOT DEFINED MKLML_VER) OR (NOT DEFINED MKLML_URL))
SET(MKLML_URL "http://paddlepaddledeps.cdn.bcebos.com/${MKLML_VER}.tgz") MESSAGE(STATUS "use pre defined download url")
SET(MKLML_VER "mklml_lnx_2018.0.3.20180406" CACHE STRING "" FORCE)
SET(MKLML_URL "http://paddlepaddledeps.cdn.bcebos.com/${MKLML_VER}.tgz" CACHE STRING "" FORCE)
ENDIF()
MESSAGE(STATUS "MKLML_VER: ${MKLML_VER}, MKLML_URL: ${MKLML_URL}")
SET(MKLML_SOURCE_DIR "${THIRD_PARTY_PATH}/mklml") SET(MKLML_SOURCE_DIR "${THIRD_PARTY_PATH}/mklml")
SET(MKLML_DOWNLOAD_DIR "${MKLML_SOURCE_DIR}/src/${MKLML_PROJECT}") SET(MKLML_DOWNLOAD_DIR "${MKLML_SOURCE_DIR}/src/${MKLML_PROJECT}")
SET(MKLML_DST_DIR "mklml") SET(MKLML_DST_DIR "mklml")
......
...@@ -162,4 +162,24 @@ copy(pybind_lib ...@@ -162,4 +162,24 @@ copy(pybind_lib
DSTS ${dst_dir}/${module} DSTS ${dst_dir}/${module}
) )
# CMakeCache Info
copy(cmake_cache
SRCS ${CMAKE_CURRENT_BINARY_DIR}/CMakeCache.txt
DSTS ${CMAKE_INSTALL_PREFIX})
add_custom_target(inference_lib_dist DEPENDS ${inference_lib_dist_dep}) add_custom_target(inference_lib_dist DEPENDS ${inference_lib_dist_dep})
# paddle fluid version
execute_process(
COMMAND ${GIT_EXECUTABLE} log --pretty=format:%H -1
OUTPUT_VARIABLE PADDLE_GIT_COMMIT)
set(version_file ${CMAKE_INSTALL_PREFIX}/version.txt)
file(WRITE ${version_file}
"GIT COMMIT ID: ${PADDLE_GIT_COMMIT}\n"
"WITH_MKL: ${WITH_MKL}\n"
"WITH_GPU: ${WITH_GPU}\n")
if(WITH_GPU)
file(APPEND ${version_file}
"CUDA version: ${CUDA_VERSION}\n"
"CUDNN version: v${CUDNN_MAJOR_VERSION}\n")
endif()
# CPU性能调优
此教程会介绍如何使用Python的cProfile包、Python库yep、Google perftools来进行性能分析 (profiling) 与调优(performance tuning)。 此教程会介绍如何使用Python的cProfile包、Python库yep、Google perftools来进行性能分析 (profiling) 与调优(performance tuning)。
Profling 指发现性能瓶颈。系统中的瓶颈可能和程序员开发过程中想象的瓶颈相去甚远。Tuning 指消除瓶颈。性能优化的过程通常是不断重复地 profiling 和 tuning。 Profling 指发现性能瓶颈。系统中的瓶颈可能和程序员开发过程中想象的瓶颈相去甚远。Tuning 指消除瓶颈。性能优化的过程通常是不断重复地 profiling 和 tuning。
...@@ -8,7 +10,7 @@ PaddlePaddle 用户一般通过调用 Python API 编写深度学习程序。大 ...@@ -8,7 +10,7 @@ PaddlePaddle 用户一般通过调用 Python API 编写深度学习程序。大
* Python 与 C++ 混合代码的性能分析 * Python 与 C++ 混合代码的性能分析
# Python代码的性能分析 ## Python代码的性能分析
### 生成性能分析文件 ### 生成性能分析文件
......
# Tune CPU performance
This tutorial introduces techniques we use to profile and tune the This tutorial introduces techniques we use to profile and tune the
CPU performance of PaddlePaddle. We will use Python packages CPU performance of PaddlePaddle. We will use Python packages
`cProfile` and `yep`, and Google's `perftools`. `cProfile` and `yep`, and Google's `perftools`.
...@@ -14,7 +16,7 @@ the profiling and tuning of ...@@ -14,7 +16,7 @@ the profiling and tuning of
1. the Python code and 1. the Python code and
1. the mixture of Python and C++ code. 1. the mixture of Python and C++ code.
# Profiling the Python Code ## Profiling the Python Code
### Generate the Performance Profiling File ### Generate the Performance Profiling File
......
...@@ -10,20 +10,38 @@ PaddlePaddle可以使用常用的Python包管理工具 ...@@ -10,20 +10,38 @@ PaddlePaddle可以使用常用的Python包管理工具
使用pip安装 使用pip安装
------------------------------ ------------------------------
执行下面的命令即可在当前机器上安装PaddlePaddle的运行时环境,并自动下载安装依赖软件。
执行下面的命令即可在当前机器上安装PaddlePaddle的运行时环境,并自动下载安装依赖软件,版本为cpu_avx_openblas。
.. code-block:: bash .. code-block:: bash
pip install paddlepaddle pip install paddlepaddle
当前的默认版本为0.12.0,cpu_avx_openblas,您可以通过指定版本号来安装其它版本,例如:
.. code-block:: bash
pip install paddlepaddle==0.11.0
如果需要安装支持GPU的版本(cuda7.5_cudnn5_avx_openblas),需要执行: 如果需要安装支持GPU的版本(cuda8.0_cudnn5_avx_openblas),需要执行:
.. code-block:: bash .. code-block:: bash
pip install paddlepaddle-gpu pip install paddlepaddle-gpu
当前的默认版本也是0.12.0,PaddlePaddle针对不同需求提供了更多版本的安装包,部分列表如下:
================================= ========================================
版本号 版本说明
================================= ========================================
paddlepaddle-gpu==0.12.0 使用CUDA 8.0和cuDNN 5编译的0.12.0版本
paddlepaddle-gpu==0.11.0.post87 使用CUDA 8.0和cuDNN 7编译的0.11.0版本
paddlepaddle-gpu==0.11.0.post8 使用CUDA 8.0和cuDNN 5编译的0.11.0版本
paddlepaddle-gpu==0.11.0 使用CUDA 7.5和cuDNN 5编译的0.11.0版本
================================= ========================================
您可以在 `Release History <https://pypi.org/project/paddlepaddle-gpu/#history>`_ 中找到paddlepaddle-gpu的各个发行版本。
如果需要获取并安装最新的(开发分支)PaddlePaddle,可以从我们的CI系统中下载最新的whl安装包和c-api开发包并安装, 如果需要获取并安装最新的(开发分支)PaddlePaddle,可以从我们的CI系统中下载最新的whl安装包和c-api开发包并安装,
您可以从下面的表格中找到需要的版本: 您可以从下面的表格中找到需要的版本:
...@@ -37,12 +55,11 @@ PaddlePaddle可以使用常用的Python包管理工具 ...@@ -37,12 +55,11 @@ PaddlePaddle可以使用常用的Python包管理工具
:header: "版本说明", "cp27-cp27mu", "cp27-cp27m" :header: "版本说明", "cp27-cp27mu", "cp27-cp27m"
:widths: 1, 3, 3 :widths: 1, 3, 3
"cpu_avx_mkl", "`paddlepaddle-0.11.0-cp27-cp27mu-linux_x86_64.whl <https://guest:@paddleci.ngrok.io/repository/download/Manylinux1_CpuAvxCp27cp27mu/.lastSuccessful/paddlepaddle-0.11.0-cp27-cp27mu-linux_x86_64.whl>`_", "`paddlepaddle-0.11.0-cp27-cp27m-linux_x86_64.whl <https://guest:@paddleci.ngrok.io/repository/download/Manylinux1_CpuAvxCp27cp27mu/.lastSuccessful/paddlepaddle-0.11.0-cp27-cp27m-linux_x86_64.whl>`_" "cpu_avx_mkl", "`paddlepaddle-latest-cp27-cp27mu-linux_x86_64.whl <https://guest:@paddleci.ngrok.io/repository/download/Manylinux1_CpuAvxCp27cp27mu/.lastSuccessful/paddlepaddle-latest-cp27-cp27mu-linux_x86_64.whl>`_", "`paddlepaddle-latest-cp27-cp27m-linux_x86_64.whl <https://guest:@paddleci.ngrok.io/repository/download/Manylinux1_CpuAvxCp27cp27mu/.lastSuccessful/paddlepaddle-latest-cp27-cp27m-linux_x86_64.whl>`_"
"cpu_avx_openblas", "`paddlepaddle-0.11.0-cp27-cp27mu-linux_x86_64.whl <https://guest:@paddleci.ngrok.io/repository/download/Manylinux1_CpuAvxOpenblas/.lastSuccessful/paddlepaddle-0.11.0-cp27-cp27mu-linux_x86_64.whl>`_", "`paddlepaddle-0.11.0-cp27-cp27m-linux_x86_64.whl <https://guest:@paddleci.ngrok.io/repository/download/Manylinux1_CpuAvxOpenblas/.lastSuccessful/paddlepaddle-0.11.0-cp27-cp27m-linux_x86_64.whl>`_" "cpu_avx_openblas", "`paddlepaddle-latest-cp27-cp27mu-linux_x86_64.whl <https://guest:@paddleci.ngrok.io/repository/download/Manylinux1_CpuAvxOpenblas/.lastSuccessful/paddlepaddle-latest-cp27-cp27mu-linux_x86_64.whl>`_", "`paddlepaddle-latest-cp27-cp27m-linux_x86_64.whl <https://guest:@paddleci.ngrok.io/repository/download/Manylinux1_CpuAvxOpenblas/.lastSuccessful/paddlepaddle-latest-cp27-cp27m-linux_x86_64.whl>`_"
"cpu_noavx_openblas", "`paddlepaddle-0.11.0-cp27-cp27mu-linux_x86_64.whl <https://guest:@paddleci.ngrok.io/repository/download/Manylinux1_CpuNoavxOpenblas/.lastSuccessful/paddlepaddle-0.11.0-cp27-cp27mu-linux_x86_64.whl>`_", "`paddlepaddle-0.11.0-cp27-cp27m-linux_x86_64.whl <https://guest:@paddleci.ngrok.io/repository/download/Manylinux1_CpuNoavxOpenblas/.lastSuccessful/paddlepaddle-0.11.0-cp27-cp27m-linux_x86_64.whl>`_" "cpu_noavx_openblas", "`paddlepaddle-latest-cp27-cp27mu-linux_x86_64.whl <https://guest:@paddleci.ngrok.io/repository/download/Manylinux1_CpuNoavxOpenblas/.lastSuccessful/paddlepaddle-latest-cp27-cp27mu-linux_x86_64.whl>`_", "`paddlepaddle-latest-cp27-cp27m-linux_x86_64.whl <https://guest:@paddleci.ngrok.io/repository/download/Manylinux1_CpuNoavxOpenblas/.lastSuccessful/paddlepaddle-latest-cp27-cp27m-linux_x86_64.whl>`_"
"cuda7.5_cudnn5_avx_mkl", "`paddlepaddle_gpu-0.11.0-cp27-cp27mu-linux_x86_64.whl <https://guest:@paddleci.ngrok.io/repository/download/Manylinux1_Cuda75cudnn5cp27cp27mu/.lastSuccessful/paddlepaddle_gpu-0.11.0-cp27-cp27mu-linux_x86_64.whl>`_", "`paddlepaddle_gpu-0.11.0-cp27-cp27m-linux_x86_64.whl <https://guest:@paddleci.ngrok.io/repository/download/Manylinux1_Cuda75cudnn5cp27cp27mu/.lastSuccessful/paddlepaddle_gpu-0.11.0-cp27-cp27m-linux_x86_64.whl>`_" "cuda8.0_cudnn5_avx_mkl", "`paddlepaddle_gpu-latest-cp27-cp27mu-linux_x86_64.whl <https://guest:@paddleci.ngrok.io/repository/download/Manylinux1_Cuda80cudnn5cp27cp27mu/.lastSuccessful/paddlepaddle_gpu-latest-cp27-cp27mu-linux_x86_64.whl>`_", "`paddlepaddle_gpu-latest-cp27-cp27m-linux_x86_64.whl <https://guest:@paddleci.ngrok.io/repository/download/Manylinux1_Cuda80cudnn5cp27cp27mu/.lastSuccessful/paddlepaddle_gpu-latest-cp27-cp27m-linux_x86_64.whl>`_"
"cuda8.0_cudnn5_avx_mkl", "`paddlepaddle_gpu-0.11.0-cp27-cp27mu-linux_x86_64.whl <https://guest:@paddleci.ngrok.io/repository/download/Manylinux1_Cuda80cudnn5cp27cp27mu/.lastSuccessful/paddlepaddle_gpu-0.11.0-cp27-cp27mu-linux_x86_64.whl>`_", "`paddlepaddle_gpu-0.11.0-cp27-cp27m-linux_x86_64.whl <https://guest:@paddleci.ngrok.io/repository/download/Manylinux1_Cuda80cudnn5cp27cp27mu/.lastSuccessful/paddlepaddle_gpu-0.11.0-cp27-cp27m-linux_x86_64.whl>`_" "cuda8.0_cudnn7_avx_mkl", "`paddlepaddle_gpu-latest-cp27-cp27mu-linux_x86_64.whl <https://guest:@paddleci.ngrok.io/repository/download/Manylinux1_Cuda8cudnn7cp27cp27mu/.lastSuccessful/paddlepaddle_gpu-latest-cp27-cp27mu-linux_x86_64.whl>`_", "`paddlepaddle_gpu-latest-cp27-cp27m-linux_x86_64.whl <https://guest:@paddleci.ngrok.io/repository/download/Manylinux1_Cuda8cudnn7cp27cp27mu/.lastSuccessful/paddlepaddle_gpu-latest-cp27-cp27m-linux_x86_64.whl>`_"
"cuda8.0_cudnn7_avx_mkl", "`paddlepaddle_gpu-0.11.0-cp27-cp27mu-linux_x86_64.whl <https://guest:@paddleci.ngrok.io/repository/download/Manylinux1_Cuda8cudnn7cp27cp27mu/.lastSuccessful/paddlepaddle_gpu-0.11.0-cp27-cp27mu-linux_x86_64.whl>`_", "`paddlepaddle_gpu-0.11.0-cp27-cp27m-linux_x86_64.whl <https://guest:@paddleci.ngrok.io/repository/download/Manylinux1_Cuda8cudnn7cp27cp27mu/.lastSuccessful/paddlepaddle_gpu-0.11.0-cp27-cp27m-linux_x86_64.whl>`_"
.. _pip_dependency: .. _pip_dependency:
...@@ -69,7 +86,7 @@ PaddlePaddle发布的安装包会尽量对齐 `manylinux1 <https://www.python.or ...@@ -69,7 +86,7 @@ PaddlePaddle发布的安装包会尽量对齐 `manylinux1 <https://www.python.or
------------------------------ ------------------------------
- paddlepaddle*.whl is not a supported wheel on this platform. - paddlepaddle*.whl is not a supported wheel on this platform.
出现这个问题的主要原因是,没有找到和当前系统匹配的paddlepaddle安装包。请检查Python版本是否为2.7系列。另外最新的pip官方源中的安装包默认是manylinux1标准,需要使用最新的pip (>9.0.0) 才可以安装。可以使用下面的命令更新您的pip: 出现这个问题的主要原因是,没有找到和当前系统匹配的paddlepaddle安装包。请检查Python版本是否为2.7系列。另外最新的pip官方源中的安装包默认是manylinux1标准,需要使用最新的pip (>9.0.0) 才可以安装。可以使用下面的命令更新您的pip:
.. code-block:: bash .. code-block:: bash
......
...@@ -12,20 +12,38 @@ Install using pip ...@@ -12,20 +12,38 @@ Install using pip
------------------------------ ------------------------------
Run the following command to install PaddlePaddle on the current Run the following command to install PaddlePaddle on the current
machine, it will also download requirements, the version is cpu_avx_openblas. machine, it will also download requirements.
.. code-block:: bash .. code-block:: bash
pip install paddlepaddle pip install paddlepaddle
the default version is 0.12.0, cpu_avx_openblas, you can specify the versions to satisfy your demands, like:
If you wish to install GPU version (cuda7.5_cudnn5_avx_openblas), just run: .. code-block:: bash
pip install paddlepaddle==0.11.0
If you need to install a GPU-enabled version (cuda8.0_cudnn5_avx_openblas), you need to run:
.. code-block:: bash .. code-block:: bash
pip install paddlepaddle-gpu pip install paddlepaddle-gpu
If you wish to install the latest develop branch PaddlePaddle, The default version is also 0.12.0, PaddlePaddle provides several versions of packages for different needs, as shown in the table:
================================= ========================================
版本号 版本说明
================================= ========================================
paddlepaddle-gpu==0.12.0 0.12.0 built with CUDA 8.0 and cuDNN 5
paddlepaddle-gpu==0.11.0.post87 0.11.0 built with CUDA 8.0 and cuDNN 7
paddlepaddle-gpu==0.11.0.post8 0.11.0 built with CUDA 8.0 and cuDNN 5
paddlepaddle-gpu==0.11.0 0.11.0 built with CUDA 7.5 and cuDNN 5
================================= ========================================
You can find all versions released of paddlepaddle-gpu in `Release History <https://pypi.org/project/paddlepaddle-gpu/#history>`_ .
If you wish to install the latest develop branch PaddlePaddle,
you can download the latest whl package from our CI system. Access you can download the latest whl package from our CI system. Access
the below links, log in as guest, then click at the "Artifact" the below links, log in as guest, then click at the "Artifact"
tab, you'll find the download link of whl packages. tab, you'll find the download link of whl packages.
...@@ -40,12 +58,11 @@ If the links below shows up the login form, just click "Log in as guest" to star ...@@ -40,12 +58,11 @@ If the links below shows up the login form, just click "Log in as guest" to star
:header: "version", "cp27-cp27mu", "cp27-cp27m" :header: "version", "cp27-cp27mu", "cp27-cp27m"
:widths: 1, 3, 3 :widths: 1, 3, 3
"cpu_avx_mkl", "`paddlepaddle-0.11.0-cp27-cp27mu-linux_x86_64.whl <https://guest:@paddleci.ngrok.io/repository/download/Manylinux1_CpuAvxCp27cp27mu/.lastSuccessful/paddlepaddle-0.11.0-cp27-cp27mu-linux_x86_64.whl>`_", "`paddlepaddle-0.11.0-cp27-cp27m-linux_x86_64.whl <https://guest:@paddleci.ngrok.io/repository/download/Manylinux1_CpuAvxCp27cp27mu/.lastSuccessful/paddlepaddle-0.11.0-cp27-cp27m-linux_x86_64.whl>`_" "cpu_avx_mkl", "`paddlepaddle-latest-cp27-cp27mu-linux_x86_64.whl <https://guest:@paddleci.ngrok.io/repository/download/Manylinux1_CpuAvxCp27cp27mu/.lastSuccessful/paddlepaddle-latest-cp27-cp27mu-linux_x86_64.whl>`_", "`paddlepaddle-latest-cp27-cp27m-linux_x86_64.whl <https://guest:@paddleci.ngrok.io/repository/download/Manylinux1_CpuAvxCp27cp27mu/.lastSuccessful/paddlepaddle-latest-cp27-cp27m-linux_x86_64.whl>`_"
"cpu_avx_openblas", "`paddlepaddle-0.11.0-cp27-cp27mu-linux_x86_64.whl <https://guest:@paddleci.ngrok.io/repository/download/Manylinux1_CpuAvxOpenblas/.lastSuccessful/paddlepaddle-0.11.0-cp27-cp27mu-linux_x86_64.whl>`_", "`paddlepaddle-0.11.0-cp27-cp27m-linux_x86_64.whl <https://guest:@paddleci.ngrok.io/repository/download/Manylinux1_CpuAvxOpenblas/.lastSuccessful/paddlepaddle-0.11.0-cp27-cp27m-linux_x86_64.whl>`_" "cpu_avx_openblas", "`paddlepaddle-latest-cp27-cp27mu-linux_x86_64.whl <https://guest:@paddleci.ngrok.io/repository/download/Manylinux1_CpuAvxOpenblas/.lastSuccessful/paddlepaddle-latest-cp27-cp27mu-linux_x86_64.whl>`_", "`paddlepaddle-latest-cp27-cp27m-linux_x86_64.whl <https://guest:@paddleci.ngrok.io/repository/download/Manylinux1_CpuAvxOpenblas/.lastSuccessful/paddlepaddle-latest-cp27-cp27m-linux_x86_64.whl>`_"
"cpu_noavx_openblas", "`paddlepaddle-0.11.0-cp27-cp27mu-linux_x86_64.whl <https://guest:@paddleci.ngrok.io/repository/download/Manylinux1_CpuNoavxOpenblas/.lastSuccessful/paddlepaddle-0.11.0-cp27-cp27mu-linux_x86_64.whl>`_", "`paddlepaddle-0.11.0-cp27-cp27m-linux_x86_64.whl <https://guest:@paddleci.ngrok.io/repository/download/Manylinux1_CpuNoavxOpenblas/.lastSuccessful/paddlepaddle-0.11.0-cp27-cp27m-linux_x86_64.whl>`_" "cpu_noavx_openblas", "`paddlepaddle-latest-cp27-cp27mu-linux_x86_64.whl <https://guest:@paddleci.ngrok.io/repository/download/Manylinux1_CpuNoavxOpenblas/.lastSuccessful/paddlepaddle-latest-cp27-cp27mu-linux_x86_64.whl>`_", "`paddlepaddle-latest-cp27-cp27m-linux_x86_64.whl <https://guest:@paddleci.ngrok.io/repository/download/Manylinux1_CpuNoavxOpenblas/.lastSuccessful/paddlepaddle-latest-cp27-cp27m-linux_x86_64.whl>`_"
"cuda7.5_cudnn5_avx_mkl", "`paddlepaddle_gpu-0.11.0-cp27-cp27mu-linux_x86_64.whl <https://guest:@paddleci.ngrok.io/repository/download/Manylinux1_Cuda75cudnn5cp27cp27mu/.lastSuccessful/paddlepaddle_gpu-0.11.0-cp27-cp27mu-linux_x86_64.whl>`_", "`paddlepaddle_gpu-0.11.0-cp27-cp27m-linux_x86_64.whl <https://guest:@paddleci.ngrok.io/repository/download/Manylinux1_Cuda75cudnn5cp27cp27mu/.lastSuccessful/paddlepaddle_gpu-0.11.0-cp27-cp27m-linux_x86_64.whl>`_" "cuda8.0_cudnn5_avx_mkl", "`paddlepaddle_gpu-latest-cp27-cp27mu-linux_x86_64.whl <https://guest:@paddleci.ngrok.io/repository/download/Manylinux1_Cuda80cudnn5cp27cp27mu/.lastSuccessful/paddlepaddle_gpu-latest-cp27-cp27mu-linux_x86_64.whl>`_", "`paddlepaddle_gpu-latest-cp27-cp27m-linux_x86_64.whl <https://guest:@paddleci.ngrok.io/repository/download/Manylinux1_Cuda80cudnn5cp27cp27mu/.lastSuccessful/paddlepaddle_gpu-latest-cp27-cp27m-linux_x86_64.whl>`_"
"cuda8.0_cudnn5_avx_mkl", "`paddlepaddle_gpu-0.11.0-cp27-cp27mu-linux_x86_64.whl <https://guest:@paddleci.ngrok.io/repository/download/Manylinux1_Cuda80cudnn5cp27cp27mu/.lastSuccessful/paddlepaddle_gpu-0.11.0-cp27-cp27mu-linux_x86_64.whl>`_", "`paddlepaddle_gpu-0.11.0-cp27-cp27m-linux_x86_64.whl <https://guest:@paddleci.ngrok.io/repository/download/Manylinux1_Cuda80cudnn5cp27cp27mu/.lastSuccessful/paddlepaddle_gpu-0.11.0-cp27-cp27m-linux_x86_64.whl>`_" "cuda8.0_cudnn7_avx_mkl", "`paddlepaddle_gpu-latest-cp27-cp27mu-linux_x86_64.whl <https://guest:@paddleci.ngrok.io/repository/download/Manylinux1_Cuda8cudnn7cp27cp27mu/.lastSuccessful/paddlepaddle_gpu-latest-cp27-cp27mu-linux_x86_64.whl>`_", "`paddlepaddle_gpu-latest-cp27-cp27m-linux_x86_64.whl <https://guest:@paddleci.ngrok.io/repository/download/Manylinux1_Cuda8cudnn7cp27cp27mu/.lastSuccessful/paddlepaddle_gpu-latest-cp27-cp27m-linux_x86_64.whl>`_"
"cuda8.0_cudnn7_avx_mkl", "`paddlepaddle_gpu-0.11.0-cp27-cp27mu-linux_x86_64.whl <https://guest:@paddleci.ngrok.io/repository/download/Manylinux1_Cuda8cudnn7cp27cp27mu/.lastSuccessful/paddlepaddle_gpu-0.11.0-cp27-cp27mu-linux_x86_64.whl>`_", "`paddlepaddle_gpu-0.11.0-cp27-cp27m-linux_x86_64.whl <https://guest:@paddleci.ngrok.io/repository/download/Manylinux1_Cuda8cudnn7cp27cp27mu/.lastSuccessful/paddlepaddle_gpu-0.11.0-cp27-cp27m-linux_x86_64.whl>`_"
.. _pip_dependency: .. _pip_dependency:
...@@ -79,7 +96,7 @@ FAQ ...@@ -79,7 +96,7 @@ FAQ
------------------------------ ------------------------------
- paddlepaddle*.whl is not a supported wheel on this platform. - paddlepaddle*.whl is not a supported wheel on this platform.
The main cause of this issue is that your current platform is The main cause of this issue is that your current platform is
not supported. Please check that you are using Python 2.7 series. not supported. Please check that you are using Python 2.7 series.
Besides, pypi only supports manylinux1 standard, you'll need to Besides, pypi only supports manylinux1 standard, you'll need to
......
...@@ -91,6 +91,12 @@ void TransDataType(const OpKernelType& kernel_type_for_var, ...@@ -91,6 +91,12 @@ void TransDataType(const OpKernelType& kernel_type_for_var,
case proto::VarType::BOOL: case proto::VarType::BOOL:
framework::VisitDataType(dst_type, CastDataType<bool>(in, out, ctx)); framework::VisitDataType(dst_type, CastDataType<bool>(in, out, ctx));
break; break;
case proto::VarType::INT16:
framework::VisitDataType(dst_type, CastDataType<bool>(in, out, ctx));
break;
case proto::VarType::UINT8:
framework::VisitDataType(dst_type, CastDataType<bool>(in, out, ctx));
break;
default: default:
PADDLE_THROW("Not support type %d", src_type); PADDLE_THROW("Not support type %d", src_type);
} }
......
...@@ -98,7 +98,7 @@ bool MultiDevSSAGraphBuilder::IsDistTrainOp(const OpDesc &op, ...@@ -98,7 +98,7 @@ bool MultiDevSSAGraphBuilder::IsDistTrainOp(const OpDesc &op,
return false; return false;
}; };
if (op.Type() == "split") { if (op.Type() == "split" || op.Type() == "split_byref") {
return checker(op.OutputArgumentNames(), send_op->InputArgumentNames()); return checker(op.OutputArgumentNames(), send_op->InputArgumentNames());
} else if (op.Type() == "concat") { } else if (op.Type() == "concat") {
return checker(op.InputArgumentNames(), send_op->OutputArgumentNames()); return checker(op.InputArgumentNames(), send_op->OutputArgumentNames());
......
...@@ -21,6 +21,7 @@ DEFINE_string(fp16_dirname, "", "Directory of the float16 inference model."); ...@@ -21,6 +21,7 @@ DEFINE_string(fp16_dirname, "", "Directory of the float16 inference model.");
DEFINE_int32(batch_size, 1, "Batch size of input data"); DEFINE_int32(batch_size, 1, "Batch size of input data");
DEFINE_int32(repeat, 1, "Running the inference program repeat times"); DEFINE_int32(repeat, 1, "Running the inference program repeat times");
DEFINE_bool(skip_cpu, false, "Skip the cpu test"); DEFINE_bool(skip_cpu, false, "Skip the cpu test");
DEFINE_bool(use_mkldnn, false, "Use MKLDNN to run inference");
TEST(inference, image_classification) { TEST(inference, image_classification) {
if (FLAGS_dirname.empty() || FLAGS_batch_size < 1 || FLAGS_repeat < 1) { if (FLAGS_dirname.empty() || FLAGS_batch_size < 1 || FLAGS_repeat < 1) {
...@@ -58,8 +59,10 @@ TEST(inference, image_classification) { ...@@ -58,8 +59,10 @@ TEST(inference, image_classification) {
// Run inference on CPU // Run inference on CPU
LOG(INFO) << "--- CPU Runs: ---"; LOG(INFO) << "--- CPU Runs: ---";
LOG(INFO) << "Batch size is " << FLAGS_batch_size; LOG(INFO) << "Batch size is " << FLAGS_batch_size;
LOG(INFO) << "FLAGS_use_mkldnn: " << FLAGS_use_mkldnn;
TestInference<paddle::platform::CPUPlace, false, true>( TestInference<paddle::platform::CPUPlace, false, true>(
dirname, cpu_feeds, cpu_fetchs1, FLAGS_repeat, is_combined); dirname, cpu_feeds, cpu_fetchs1, FLAGS_repeat, is_combined,
FLAGS_use_mkldnn);
LOG(INFO) << output1.dims(); LOG(INFO) << output1.dims();
} }
......
...@@ -133,11 +133,24 @@ std::vector<std::vector<int64_t>> GetFeedTargetShapes( ...@@ -133,11 +133,24 @@ std::vector<std::vector<int64_t>> GetFeedTargetShapes(
return feed_target_shapes; return feed_target_shapes;
} }
void EnableMKLDNN(
const std::unique_ptr<paddle::framework::ProgramDesc>& program) {
for (size_t bid = 0; bid < program->Size(); ++bid) {
auto* block = program->MutableBlock(bid);
for (auto* op : block->AllOps()) {
if (op->HasAttr("use_mkldnn")) {
op->SetAttr("use_mkldnn", true);
}
}
}
}
template <typename Place, bool CreateVars = true, bool PrepareContext = false> template <typename Place, bool CreateVars = true, bool PrepareContext = false>
void TestInference(const std::string& dirname, void TestInference(const std::string& dirname,
const std::vector<paddle::framework::LoDTensor*>& cpu_feeds, const std::vector<paddle::framework::LoDTensor*>& cpu_feeds,
const std::vector<paddle::framework::LoDTensor*>& cpu_fetchs, const std::vector<paddle::framework::LoDTensor*>& cpu_fetchs,
const int repeat = 1, const bool is_combined = false) { const int repeat = 1, const bool is_combined = false,
const bool use_mkldnn = false) {
// 1. Define place, executor, scope // 1. Define place, executor, scope
auto place = Place(); auto place = Place();
auto executor = paddle::framework::Executor(place); auto executor = paddle::framework::Executor(place);
...@@ -169,6 +182,9 @@ void TestInference(const std::string& dirname, ...@@ -169,6 +182,9 @@ void TestInference(const std::string& dirname,
"init_program", "init_program",
paddle::platform::DeviceContextPool::Instance().Get(place)); paddle::platform::DeviceContextPool::Instance().Get(place));
inference_program = InitProgram(&executor, scope, dirname, is_combined); inference_program = InitProgram(&executor, scope, dirname, is_combined);
if (use_mkldnn) {
EnableMKLDNN(inference_program);
}
} }
// Disable the profiler and print the timing information // Disable the profiler and print the timing information
paddle::platform::DisableProfiler(paddle::platform::EventSortingKey::kDefault, paddle::platform::DisableProfiler(paddle::platform::EventSortingKey::kDefault,
......
...@@ -204,6 +204,7 @@ if(WITH_DISTRIBUTE) ...@@ -204,6 +204,7 @@ if(WITH_DISTRIBUTE)
set_source_files_properties(send_recv_op_test.cc PROPERTIES COMPILE_FLAGS ${DISTRIBUTE_COMPILE_FLAGS}) set_source_files_properties(send_recv_op_test.cc PROPERTIES COMPILE_FLAGS ${DISTRIBUTE_COMPILE_FLAGS})
cc_test(test_send_recv SRCS send_recv_op_test.cc DEPS prefetch_op send_op listen_and_serv_op sum_op executor) cc_test(test_send_recv SRCS send_recv_op_test.cc DEPS prefetch_op send_op listen_and_serv_op sum_op executor)
if(WITH_GPU) if(WITH_GPU)
set_source_files_properties(test_send_nccl_id.cc PROPERTIES COMPILE_FLAGS ${DISTRIBUTE_COMPILE_FLAGS})
cc_test(test_send_nccl_id SRCS test_send_nccl_id.cc DEPS send_op listen_and_serv_op executor) cc_test(test_send_nccl_id SRCS test_send_nccl_id.cc DEPS send_op listen_and_serv_op executor)
op_library(gen_nccl_id_op DEPS nccl_common sendrecvop_grpc) op_library(gen_nccl_id_op DEPS nccl_common sendrecvop_grpc)
set_source_files_properties(gen_nccl_id_op.cc PROPERTIES COMPILE_FLAGS ${DISTRIBUTE_COMPILE_FLAGS}) set_source_files_properties(gen_nccl_id_op.cc PROPERTIES COMPILE_FLAGS ${DISTRIBUTE_COMPILE_FLAGS})
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#include "mkldnn.hpp" #include "mkldnn.hpp"
#include "paddle/fluid/operators/activation_op.h" #include "paddle/fluid/operators/activation_op.h"
#include "paddle/fluid/operators/mkldnn_activation_op.h" #include "paddle/fluid/operators/mkldnn_activation_op.h"
#include "paddle/fluid/platform/mkldnn_helper.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
...@@ -23,6 +24,18 @@ using paddle::framework::Tensor; ...@@ -23,6 +24,18 @@ using paddle::framework::Tensor;
using paddle::platform::MKLDNNDeviceContext; using paddle::platform::MKLDNNDeviceContext;
namespace { namespace {
std::string gethash(const mkldnn::memory::dims &operand_dims,
const mkldnn::algorithm algorithm) {
auto dim2str = [](const mkldnn::memory::dims &operand_dims) {
std::string dstr = "";
for (size_t i = 0; i < operand_dims.size(); ++i) {
dstr += std::to_string(operand_dims[i]) + "-";
}
return dstr;
};
return dim2str(operand_dims) + std::to_string(algorithm);
}
template <typename T, typename ExecContext> template <typename T, typename ExecContext>
void eltwise_forward(const ExecContext &ctx, mkldnn::algorithm algorithm, void eltwise_forward(const ExecContext &ctx, mkldnn::algorithm algorithm,
const T alpha = 0, const T beta = 0) { const T alpha = 0, const T beta = 0) {
...@@ -37,42 +50,70 @@ void eltwise_forward(const ExecContext &ctx, mkldnn::algorithm algorithm, ...@@ -37,42 +50,70 @@ void eltwise_forward(const ExecContext &ctx, mkldnn::algorithm algorithm,
const auto *src_data = src->template data<T>(); const auto *src_data = src->template data<T>();
auto *dst = ctx.template Output<Tensor>("Out"); auto *dst = ctx.template Output<Tensor>("Out");
const T *dst_data = dst->template mutable_data<T>(ctx.GetPlace()); T *dst_data = dst->template mutable_data<T>(ctx.GetPlace());
// get memory dim // get memory dim
PADDLE_ENFORCE(src->dims().size() == 2 || src->dims().size() == 4, PADDLE_ENFORCE(src->dims().size() == 2 || src->dims().size() == 4,
"Input dim must be with 2 or 4"); "Input dim must be with 2 or 4");
std::vector<int> src_tz = framework::vectorize2int(src->dims()); std::vector<int> src_tz = framework::vectorize2int(src->dims());
// create memory description const std::string key = gethash(src_tz, algorithm);
auto data_md = src_tz.size() == 2 const std::string key_src_data =
? platform::MKLDNNMemDesc(src_tz, mkldnn::memory::f32, key + ctx.op().Output("Out") + "@eltwise_fwd_src_data";
mkldnn::memory::format::nc) const std::string key_src_mem = key + "@eltwise_fwd_src_mem";
: platform::MKLDNNMemDesc(src_tz, mkldnn::memory::f32, const std::string key_dst_mem = key + "@eltwise_fwd_dst_mem";
mkldnn::memory::format::nchw); const std::string key_fwd = key + "@eltwise_fwd";
// create memory primitives auto p_fwd = std::static_pointer_cast<mkldnn::eltwise_forward>(
auto src_memory = dev_ctx.GetBlob(key_fwd));
mkldnn::memory({data_md, mkldnn_engine},
static_cast<void *>(const_cast<float *>(src_data))); // save input data to be referred in backward path
auto dst_memory = auto p_src_data = std::make_shared<const T *>(src_data);
mkldnn::memory({data_md, mkldnn_engine}, dev_ctx.SetBlob(key_src_data, p_src_data);
static_cast<void *>(const_cast<float *>(dst_data)));
if (p_fwd == nullptr) {
auto forward_desc = mkldnn::eltwise_forward::desc( // create memory description
mkldnn::prop_kind::forward_training, algorithm, data_md, alpha, beta); auto data_md = src_tz.size() == 2
? platform::MKLDNNMemDesc(src_tz, mkldnn::memory::f32,
// save prim desc into global device context to be referred in backward path mkldnn::memory::format::nc)
const std::string key = ctx.op().Output("Out"); : platform::MKLDNNMemDesc(src_tz, mkldnn::memory::f32,
const std::string key_eltwise_pd = key + "@eltwise_pd"; mkldnn::memory::format::nchw);
auto forward_pd = std::make_shared<mkldnn::eltwise_forward::primitive_desc>(
forward_desc, mkldnn_engine); // create memory primitives
dev_ctx.SetBlob(key_eltwise_pd, forward_pd); auto p_src_mem = std::make_shared<mkldnn::memory>(mkldnn::memory(
{data_md, mkldnn_engine}, platform::to_void_cast(src_data)));
auto eltwise = mkldnn::eltwise_forward(*forward_pd, src_memory, dst_memory); dev_ctx.SetBlob(key_src_mem, p_src_mem);
auto p_dst_mem = std::make_shared<mkldnn::memory>(mkldnn::memory(
{data_md, mkldnn_engine}, platform::to_void_cast(dst_data)));
dev_ctx.SetBlob(key_dst_mem, p_dst_mem);
auto fwd_desc = mkldnn::eltwise_forward::desc(
mkldnn::prop_kind::forward_training, algorithm, data_md, alpha, beta);
auto p_fwd_pd = std::make_shared<mkldnn::eltwise_forward::primitive_desc>(
fwd_desc, mkldnn_engine);
const std::string key_fwd_pd = key + "eltwise_fwd_pd";
dev_ctx.SetBlob(key_fwd_pd, p_fwd_pd);
p_fwd = std::make_shared<mkldnn::eltwise_forward>(
*p_fwd_pd, *(p_src_mem.get()), *(p_dst_mem.get()));
dev_ctx.SetBlob(key_fwd, p_fwd);
} else {
// primitives already exist
auto p_src_mem =
std::static_pointer_cast<mkldnn::memory>(dev_ctx.GetBlob(key_src_mem));
PADDLE_ENFORCE(p_src_mem != nullptr,
"Fail to find eltwise p_src_mem in device context.");
auto p_dst_mem =
std::static_pointer_cast<mkldnn::memory>(dev_ctx.GetBlob(key_dst_mem));
PADDLE_ENFORCE(p_dst_mem != nullptr,
"Fail to find eltwise p_src_mem in device context.");
p_src_mem->set_data_handle(platform::to_void_reinterpret_cast(src_data));
p_dst_mem->set_data_handle(dst_data);
}
// push primitive to stream and wait until it's executed // push primitive to stream and wait until it's executed
std::vector<mkldnn::primitive> pipeline = {eltwise}; std::vector<mkldnn::primitive> pipeline = {*(p_fwd.get())};
mkldnn::stream(mkldnn::stream::kind::eager).submit(pipeline).wait(); mkldnn::stream(mkldnn::stream::kind::eager).submit(pipeline).wait();
} }
...@@ -83,8 +124,7 @@ void eltwise_grad(const ExecContext &ctx, mkldnn::algorithm algorithm, ...@@ -83,8 +124,7 @@ void eltwise_grad(const ExecContext &ctx, mkldnn::algorithm algorithm,
const auto &mkldnn_engine = dev_ctx.GetEngine(); const auto &mkldnn_engine = dev_ctx.GetEngine();
// get buffers // get buffers
const auto *x = ctx.template Input<Tensor>("X"); const auto *out = ctx.template Input<Tensor>("Out");
const auto *src = x->template data<T>();
auto *dout = ctx.template Input<Tensor>(framework::GradVarName("Out")); auto *dout = ctx.template Input<Tensor>(framework::GradVarName("Out"));
const auto *diff_dst = dout->template data<T>(); const auto *diff_dst = dout->template data<T>();
...@@ -94,45 +134,73 @@ void eltwise_grad(const ExecContext &ctx, mkldnn::algorithm algorithm, ...@@ -94,45 +134,73 @@ void eltwise_grad(const ExecContext &ctx, mkldnn::algorithm algorithm,
const T *diff_src = dx->template mutable_data<T>(ctx.GetPlace()); const T *diff_src = dx->template mutable_data<T>(ctx.GetPlace());
// get memory dim // get memory dim
std::vector<int> src_tz = framework::vectorize2int(x->dims()); std::vector<int> src_tz = framework::vectorize2int(out->dims());
// create memory description const std::string key = gethash(src_tz, algorithm);
auto data_md = src_tz.size() == 2 const std::string key_diff_src_mem = key + "@eltwise_diff_src_mem";
? platform::MKLDNNMemDesc(src_tz, mkldnn::memory::f32, const std::string key_diff_dst_mem = key + "@eltwise_diff_dst_mem";
mkldnn::memory::format::nc) const std::string key_grad = key + "@eltwise_grad";
: platform::MKLDNNMemDesc(src_tz, mkldnn::memory::f32,
mkldnn::memory::format::nchw); const std::string key_src_data =
key + ctx.op().Input("Out") + "@eltwise_fwd_src_data";
// create memory primitives const auto p_src_data =
auto src_memory = mkldnn::memory( std::static_pointer_cast<T *>(dev_ctx.GetBlob(key_src_data));
{data_md, mkldnn_engine}, static_cast<void *>(const_cast<float *>(src)));
auto diff_src_memory = const std::string key_src_mem = key + "@eltwise_fwd_src_mem";
mkldnn::memory({data_md, mkldnn_engine}, auto p_src_mem =
static_cast<void *>(const_cast<float *>(diff_src))); std::static_pointer_cast<mkldnn::memory>(dev_ctx.GetBlob(key_src_mem));
auto diff_dst_memory = p_src_mem->set_data_handle(*p_src_data.get());
mkldnn::memory({data_md, mkldnn_engine},
static_cast<void *>(const_cast<float *>(diff_dst))); auto p_grad = std::static_pointer_cast<mkldnn::eltwise_forward::primitive>(
dev_ctx.GetBlob(key_grad));
auto backward_desc =
mkldnn::eltwise_backward::desc(algorithm, data_md, data_md, alpha, beta); if (p_grad == nullptr) {
// create memory description
// retrieve eltwise primitive desc from device context auto data_md = src_tz.size() == 2
const std::string key = ctx.op().Input("Out"); ? platform::MKLDNNMemDesc(src_tz, mkldnn::memory::f32,
const std::string key_eltwise_pd = key + "@eltwise_pd"; mkldnn::memory::format::nc)
const std::shared_ptr<void> forward_pd = dev_ctx.GetBlob(key_eltwise_pd); : platform::MKLDNNMemDesc(src_tz, mkldnn::memory::f32,
PADDLE_ENFORCE(forward_pd != nullptr, mkldnn::memory::format::nchw);
"Fail to find eltwise_pd in device context");
auto *p_forward_pd = // create memory primitives
static_cast<mkldnn::eltwise_forward::primitive_desc *>(forward_pd.get()); std::shared_ptr<void> p_diff_src_mem =
std::make_shared<mkldnn::memory>(mkldnn::memory(
auto eltwise_bwd_prim_desc = mkldnn::eltwise_backward::primitive_desc( {data_md, mkldnn_engine}, platform::to_void_cast(diff_src)));
backward_desc, mkldnn_engine, *p_forward_pd); dev_ctx.SetBlob(key_diff_src_mem, p_diff_src_mem);
std::shared_ptr<void> p_diff_dst_mem =
auto eltwise_bwd = mkldnn::eltwise_backward(eltwise_bwd_prim_desc, src_memory, std::make_shared<mkldnn::memory>(mkldnn::memory(
diff_dst_memory, diff_src_memory); {data_md, mkldnn_engine}, platform::to_void_cast(diff_dst)));
dev_ctx.SetBlob(key_diff_dst_mem, p_diff_dst_mem);
auto bwd_desc = mkldnn::eltwise_backward::desc(algorithm, data_md, data_md,
alpha, beta);
const std::string key_fwd_pd = key + "eltwise_fwd_pd";
auto *p_fwd_pd = static_cast<mkldnn::eltwise_forward::primitive_desc *>(
dev_ctx.GetBlob(key_fwd_pd).get());
auto eltwise_bwd_prim_desc = mkldnn::eltwise_backward::primitive_desc(
bwd_desc, mkldnn_engine, *p_fwd_pd);
p_grad = std::make_shared<mkldnn::eltwise_backward>(
eltwise_bwd_prim_desc, *static_cast<mkldnn::memory *>(p_src_mem.get()),
*(static_cast<mkldnn::memory *>(p_diff_dst_mem.get())),
*(static_cast<mkldnn::memory *>(p_diff_src_mem.get())));
} else {
// primitives already exist
auto p_diff_src_mem = std::static_pointer_cast<mkldnn::memory>(
dev_ctx.GetBlob(key_diff_src_mem));
auto p_diff_dst_mem = std::static_pointer_cast<mkldnn::memory>(
dev_ctx.GetBlob(key_diff_dst_mem));
p_diff_src_mem->set_data_handle(
platform::to_void_reinterpret_cast(diff_src));
p_diff_dst_mem->set_data_handle(
platform::to_void_reinterpret_cast(diff_dst));
}
// push primitive to stream and wait until it's executed // push primitive to stream and wait until it's executed
std::vector<mkldnn::primitive> pipeline = {eltwise_bwd}; std::vector<mkldnn::primitive> pipeline = {*(p_grad.get())};
mkldnn::stream(mkldnn::stream::kind::eager).submit(pipeline).wait(); mkldnn::stream(mkldnn::stream::kind::eager).submit(pipeline).wait();
} }
} // anonymous namespace } // anonymous namespace
......
...@@ -41,7 +41,7 @@ namespace operators { ...@@ -41,7 +41,7 @@ namespace operators {
\ \
protected: \ protected: \
std::unique_ptr<::paddle::framework::OpDesc> Apply() const override { \ std::unique_ptr<::paddle::framework::OpDesc> Apply() const override { \
auto *op = new ::paddle::framework::OpDesc(); \ auto* op = new ::paddle::framework::OpDesc(); \
op->SetType(#KERNEL_TYPE "_grad"); \ op->SetType(#KERNEL_TYPE "_grad"); \
op->SetInput("Out", Output("Out")); \ op->SetInput("Out", Output("Out")); \
op->SetInput(::paddle::framework::GradVarName("Out"), \ op->SetInput(::paddle::framework::GradVarName("Out"), \
...@@ -54,23 +54,50 @@ namespace operators { ...@@ -54,23 +54,50 @@ namespace operators {
} \ } \
} }
framework::OpKernelType GetKernelType(const framework::ExecutionContext& ctx,
const framework::OperatorWithKernel& oper,
const std::string& name) {
framework::LibraryType library{framework::LibraryType::kPlain};
#ifdef PADDLE_WITH_MKLDNN
auto it = oper.Attrs().find("use_mkldnn");
if (library == framework::LibraryType::kPlain && it != oper.Attrs().end() &&
platform::CanMKLDNNBeUsed(ctx)) {
library = framework::LibraryType::kMKLDNN;
}
#endif
framework::DataLayout layout = framework::DataLayout::kAnyLayout;
return framework::OpKernelType(
framework::ToDataType(ctx.Input<framework::Tensor>(name)->type()),
ctx.GetPlace(), layout, library);
}
class ActivationOp : public framework::OperatorWithKernel { class ActivationOp : public framework::OperatorWithKernel {
public: public:
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext *ctx) const override { void InferShape(framework::InferShapeContext* ctx) const override {
ctx->SetOutputDim("Out", ctx->GetInputDim("X")); ctx->SetOutputDim("Out", ctx->GetInputDim("X"));
ctx->ShareLoD("X", /*->*/ "Out"); ctx->ShareLoD("X", /*->*/ "Out");
} }
framework::OpKernelType GetExpectedKernelType(
const framework::ExecutionContext& ctx) const override {
return GetKernelType(ctx, *this, "X");
}
}; };
class ActivationOpGrad : public framework::OperatorWithKernel { class ActivationOpGrad : public framework::OperatorWithKernel {
public: public:
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext *ctx) const override { void InferShape(framework::InferShapeContext* ctx) const override {
ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("Out")); ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("Out"));
} }
framework::OpKernelType GetExpectedKernelType(
const framework::ExecutionContext& ctx) const override {
return GetKernelType(ctx, *this, "Out");
}
}; };
__attribute__((unused)) constexpr char SigmoidDoc[] = R"DOC( __attribute__((unused)) constexpr char SigmoidDoc[] = R"DOC(
......
...@@ -14,10 +14,6 @@ limitations under the License. */ ...@@ -14,10 +14,6 @@ limitations under the License. */
#pragma once #pragma once
#ifdef PADDLE_WITH_TESTING
#include "gtest/gtest.h"
#endif
#include <string> #include <string>
#include <vector> #include <vector>
#include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/lod_tensor.h"
......
...@@ -51,7 +51,8 @@ class DetectionMAPOp : public framework::OperatorWithKernel { ...@@ -51,7 +51,8 @@ class DetectionMAPOp : public framework::OperatorWithKernel {
PADDLE_ENFORCE_EQ(label_dims.size(), 2, PADDLE_ENFORCE_EQ(label_dims.size(), 2,
"The rank of Input(Label) must be 2, " "The rank of Input(Label) must be 2, "
"the shape is [N, 6]."); "the shape is [N, 6].");
PADDLE_ENFORCE_EQ(label_dims[1], 6, "The shape is of Input(Label) [N, 6]."); PADDLE_ENFORCE(label_dims[1] == 6 || label_dims[1] == 5,
"The shape of Input(Label) is [N, 6] or [N, 5].");
if (ctx->HasInput("PosCount")) { if (ctx->HasInput("PosCount")) {
PADDLE_ENFORCE(ctx->HasInput("TruePos"), PADDLE_ENFORCE(ctx->HasInput("TruePos"),
...@@ -88,9 +89,10 @@ class DetectionMAPOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -88,9 +89,10 @@ class DetectionMAPOpMaker : public framework::OpProtoAndCheckerMaker {
"offset is N + 1, if LoD[i + 1] - LoD[i] == 0, means there is " "offset is N + 1, if LoD[i + 1] - LoD[i] == 0, means there is "
"no detected data."); "no detected data.");
AddInput("Label", AddInput("Label",
"(LoDTensor) A 2-D LoDTensor with shape[N, 6] represents the" "(LoDTensor) A 2-D LoDTensor represents the"
"Labeled ground-truth data. Each row has 6 values: " "Labeled ground-truth data. Each row has 6 values: "
"[label, is_difficult, xmin, ymin, xmax, ymax], N is the total " "[label, xmin, ymin, xmax, ymax, is_difficult] or 5 values: "
"[label, xmin, ymin, xmax, ymax], where N is the total "
"number of ground-truth data in this mini-batch. For each " "number of ground-truth data in this mini-batch. For each "
"instance, the offsets in first dimension are called LoD, " "instance, the offsets in first dimension are called LoD, "
"the number of offset is N + 1, if LoD[i + 1] - LoD[i] == 0, " "the number of offset is N + 1, if LoD[i + 1] - LoD[i] == 0, "
......
...@@ -72,7 +72,7 @@ class DetectionMAPOpKernel : public framework::OpKernel<T> { ...@@ -72,7 +72,7 @@ class DetectionMAPOpKernel : public framework::OpKernel<T> {
auto* out_false_pos = ctx.Output<framework::LoDTensor>("AccumFalsePos"); auto* out_false_pos = ctx.Output<framework::LoDTensor>("AccumFalsePos");
float overlap_threshold = ctx.Attr<float>("overlap_threshold"); float overlap_threshold = ctx.Attr<float>("overlap_threshold");
float evaluate_difficult = ctx.Attr<bool>("evaluate_difficult"); bool evaluate_difficult = ctx.Attr<bool>("evaluate_difficult");
auto ap_type = GetAPType(ctx.Attr<std::string>("ap_type")); auto ap_type = GetAPType(ctx.Attr<std::string>("ap_type"));
int class_num = ctx.Attr<int>("class_num"); int class_num = ctx.Attr<int>("class_num");
...@@ -175,14 +175,20 @@ class DetectionMAPOpKernel : public framework::OpKernel<T> { ...@@ -175,14 +175,20 @@ class DetectionMAPOpKernel : public framework::OpKernel<T> {
for (int n = 0; n < batch_size; ++n) { for (int n = 0; n < batch_size; ++n) {
std::map<int, std::vector<Box>> boxes; std::map<int, std::vector<Box>> boxes;
for (size_t i = label_index[n]; i < label_index[n + 1]; ++i) { for (size_t i = label_index[n]; i < label_index[n + 1]; ++i) {
Box box(labels(i, 2), labels(i, 3), labels(i, 4), labels(i, 5));
int label = labels(i, 0); int label = labels(i, 0);
auto is_difficult = labels(i, 1); if (input_label.dims()[1] == 6) {
if (std::abs(is_difficult - 0.0) < 1e-6) Box box(labels(i, 2), labels(i, 3), labels(i, 4), labels(i, 5));
box.is_difficult = false; auto is_difficult = labels(i, 1);
else if (std::abs(is_difficult - 0.0) < 1e-6)
box.is_difficult = true; box.is_difficult = false;
boxes[label].push_back(box); else
box.is_difficult = true;
boxes[label].push_back(box);
} else {
PADDLE_ENFORCE_EQ(input_label.dims()[1], 5);
Box box(labels(i, 1), labels(i, 2), labels(i, 3), labels(i, 4));
boxes[label].push_back(box);
}
} }
gt_boxes->push_back(boxes); gt_boxes->push_back(boxes);
} }
......
...@@ -12,45 +12,41 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ...@@ -12,45 +12,41 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "paddle/fluid/operators/is_empty_op.h"
#include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/operator.h" #include "paddle/fluid/framework/operator.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
constexpr char kInput[] = "X"; class IsEmptyOp : public framework::OperatorWithKernel {
constexpr char kOutput[] = "Out";
class IsEmptyOp : public framework::OperatorBase {
public: public:
IsEmptyOp(const std::string &type, const framework::VariableNameMap &inputs, using framework::OperatorWithKernel::OperatorWithKernel;
const framework::VariableNameMap &outputs,
const framework::AttributeMap &attrs)
: OperatorBase(type, inputs, outputs, attrs) {}
private: protected:
void RunImpl(const framework::Scope &scope, void InferShape(framework::InferShapeContext *ctx) const override {
const platform::Place &place) const override { PADDLE_ENFORCE(ctx->HasInput("X"),
// get input "Input(X) of IsEmptyOp should not be null.");
auto *var = scope.FindVar(Input(kInput)); PADDLE_ENFORCE(ctx->HasOutput("Out"),
PADDLE_ENFORCE_NOT_NULL(var); "Output(Out) of IsEmptyOp should not be null.");
auto &tensor = var->Get<framework::LoDTensor>(); ctx->SetOutputDim("Out", {1});
// get output }
auto *out = scope.FindVar(Output(kOutput));
PADDLE_ENFORCE_NOT_NULL(out);
auto *out_tensor = out->GetMutable<framework::LoDTensor>();
out_tensor->Resize({1}); framework::OpKernelType GetExpectedKernelType(
out_tensor->mutable_data<bool>(platform::CPUPlace())[0] = const framework::ExecutionContext &ctx) const override {
framework::product(tensor.dims()) == 0; framework::OpKernelType kt = framework::OpKernelType(
framework::ToDataType(ctx.Input<framework::LoDTensor>("X")->type()),
platform::CPUPlace());
return kt;
} }
}; };
class IsEmptyOpProtoMaker : public framework::OpProtoAndCheckerMaker { class IsEmptyOpMaker : public framework::OpProtoAndCheckerMaker {
public: public:
void Make() override { void Make() override {
AddInput(kInput, "(Tensor) Tensor which is to be checked."); AddInput("X", "(LoDTensor) Tensor which is to be checked.");
AddOutput(kOutput, "(Tensor) a boolean Tensor that indicate empty or not."); AddOutput("Out",
"(LoDTensor) a boolean Tensor that indicate empty or not.");
AddComment(R"DOC( AddComment(R"DOC(
IsEmpty Operator which checks whether a tensor is empty. IsEmpty Operator which checks whether a tensor is empty.
...@@ -62,5 +58,12 @@ It will just return product(tensor.ddims()) > 0; ...@@ -62,5 +58,12 @@ It will just return product(tensor.ddims()) > 0;
} // namespace operators } // namespace operators
} // namespace paddle } // namespace paddle
REGISTER_OP_WITHOUT_GRADIENT(is_empty, paddle::operators::IsEmptyOp, namespace ops = paddle::operators;
paddle::operators::IsEmptyOpProtoMaker);
REGISTER_OPERATOR(is_empty, ops::IsEmptyOp, ops::IsEmptyOpMaker,
paddle::framework::EmptyGradOpMaker);
REGISTER_OP_CPU_KERNEL(
is_empty, ops::IsEmptyOpKernel<paddle::platform::CPUDeviceContext, float>,
ops::IsEmptyOpKernel<paddle::platform::CPUDeviceContext, double>,
ops::IsEmptyOpKernel<paddle::platform::CPUDeviceContext, int>,
ops::IsEmptyOpKernel<paddle::platform::CPUDeviceContext, int64_t>);
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/operator.h"
namespace paddle {
namespace operators {
template <typename DeviceContext, typename T>
class IsEmptyOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
// get input
auto* input_tensor = context.Input<framework::LoDTensor>("X");
// get output
auto* output_tensor = context.Output<framework::LoDTensor>("Out");
output_tensor->mutable_data<bool>(platform::CPUPlace())[0] =
framework::product(input_tensor->dims()) == 0;
}
};
} // namespace operators
} // namespace paddle
...@@ -13,6 +13,8 @@ See the License for the specific language governing permissions and ...@@ -13,6 +13,8 @@ See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#pragma once #pragma once
#include <string>
#include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/detail/safe_ref.h" #include "paddle/fluid/operators/detail/safe_ref.h"
...@@ -60,52 +62,5 @@ class MKLDNNActivationGradKernel ...@@ -60,52 +62,5 @@ class MKLDNNActivationGradKernel
} }
}; };
namespace { // NOLINT
framework::OpKernelType GetKernelType(
const framework::ExecutionContext& ctx,
const framework::OperatorWithKernel& oper) {
framework::LibraryType library{framework::LibraryType::kPlain};
#ifdef PADDLE_WITH_MKLDNN
if (library == framework::LibraryType::kPlain &&
platform::CanMKLDNNBeUsed(ctx)) {
library = framework::LibraryType::kMKLDNN;
}
#endif
framework::DataLayout layout = framework::DataLayout::kAnyLayout;
return framework::OpKernelType(
framework::ToDataType(ctx.Input<framework::Tensor>("X")->type()),
ctx.GetPlace(), layout, library);
}
} // anonymous namespace
class ActivationWithMKLDNNOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override {
ctx->SetOutputDim("Out", ctx->GetInputDim("X"));
ctx->ShareLoD("X", /*->*/ "Out");
}
framework::OpKernelType GetExpectedKernelType(
const framework::ExecutionContext& ctx) const override {
return GetKernelType(ctx, *this);
}
};
class ActivationWithMKLDNNOpGrad : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override {
ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("Out"));
}
framework::OpKernelType GetExpectedKernelType(
const framework::ExecutionContext& ctx) const override {
return GetKernelType(ctx, *this);
}
};
} // namespace operators } // namespace operators
} // namespace paddle } // namespace paddle
...@@ -18,6 +18,26 @@ limitations under the License. */ ...@@ -18,6 +18,26 @@ limitations under the License. */
namespace paddle { namespace paddle {
namespace operators { namespace operators {
using mkldnn::memory; // Note: paddle has also "memory" namespace
using mkldnn::pooling_forward;
using mkldnn::pooling_backward;
// Generate keys for storing/retriving primitives for this operator
// TODO(jczaja): Make hashing function more optimial
static std::string gethash(memory::dims& input_dims, std::string& pooling_type,
std::vector<int>& ksize, std::vector<int>& strides,
std::vector<int>& paddings, std::string suffix) {
auto dims2str = [](memory::dims& operand_dims) {
std::string dstr = "";
for (size_t i = 0; i < operand_dims.size(); ++i) {
dstr += std::to_string(operand_dims[i]) + "-";
}
return dstr;
};
return dims2str(input_dims) + dims2str(ksize) + dims2str(strides) +
dims2str(paddings) + pooling_type + suffix;
}
template <typename T> template <typename T>
class PoolMKLDNNOpKernel : public paddle::framework::OpKernel<T> { class PoolMKLDNNOpKernel : public paddle::framework::OpKernel<T> {
public: public:
...@@ -34,10 +54,6 @@ class PoolMKLDNNOpKernel : public paddle::framework::OpKernel<T> { ...@@ -34,10 +54,6 @@ class PoolMKLDNNOpKernel : public paddle::framework::OpKernel<T> {
// Get an unique name from "argument" name of "Out" variable // Get an unique name from "argument" name of "Out" variable
// This name will be used as key when saving info into device context // This name will be used as key when saving info into device context
const std::string key = ctx.op().Output("Out");
const std::string key_pool_pd = key + "@pool_pd";
const std::string key_pool_workspace_memory =
key + "@pool_workspace_memory";
std::string pooling_type = ctx.Attr<std::string>("pooling_type"); std::string pooling_type = ctx.Attr<std::string>("pooling_type");
std::vector<int> ksize = ctx.Attr<std::vector<int>>("ksize"); std::vector<int> ksize = ctx.Attr<std::vector<int>>("ksize");
...@@ -63,37 +79,71 @@ class PoolMKLDNNOpKernel : public paddle::framework::OpKernel<T> { ...@@ -63,37 +79,71 @@ class PoolMKLDNNOpKernel : public paddle::framework::OpKernel<T> {
std::vector<int> src_tz = paddle::framework::vectorize2int(input->dims()); std::vector<int> src_tz = paddle::framework::vectorize2int(input->dims());
std::vector<int> dst_tz = paddle::framework::vectorize2int(output->dims()); std::vector<int> dst_tz = paddle::framework::vectorize2int(output->dims());
// TODO(pzelazko-intel): support more formats const std::string key = gethash(src_tz, pooling_type, ksize, strides,
auto src_md = platform::MKLDNNMemDesc(src_tz, mkldnn::memory::f32, paddings, ctx.op().Output("Out"));
mkldnn::memory::format::nchw); const std::string key_pool_p = key + "@pool_p";
auto dst_md = platform::MKLDNNMemDesc(dst_tz, mkldnn::memory::f32, const std::string key_pool_pd = key + "@pool_pd";
mkldnn::memory::format::nchw); const std::string key_pool_src_mem_p = key + "@pool_src_mem_p";
const std::string key_pool_dst_mem_p = key + "@pool_dst_mem_p";
std::shared_ptr<mkldnn::pooling_forward::primitive_desc> pool_pd = const std::string key_pool_workspace_memory =
CreatePrimitiveDesc(src_md, dst_md, strides, paddings, ksize, key + "@pool_workspace_memory";
pooling_type, mkldnn_engine);
// save pool_pd into global device context to be referred in backward path
dev_ctx.SetBlob(key_pool_pd, pool_pd);
std::shared_ptr<mkldnn::memory> workspace_memory =
CreateWorkspaceMemory(pool_pd, pooling_type, mkldnn_engine);
// save pool_workspace_memory to be referred in backward path
dev_ctx.SetBlob(key_pool_workspace_memory, workspace_memory);
auto src_memory =
mkldnn::memory({src_md, mkldnn_engine},
static_cast<void*>(const_cast<T*>(input_data)));
auto dst_memory =
mkldnn::memory({dst_md, mkldnn_engine},
static_cast<void*>(const_cast<T*>(output_data)));
auto pool_prim = mkldnn::pooling_forward(*pool_pd, src_memory, dst_memory, auto pool_p =
*workspace_memory); std::static_pointer_cast<pooling_forward>(dev_ctx.GetBlob(key_pool_p));
if (pool_p == nullptr) {
// TODO(pzelazko-intel): support more formats
auto src_md =
platform::MKLDNNMemDesc(src_tz, platform::MKLDNNGetDataType<T>(),
mkldnn::memory::format::nchw);
auto dst_md =
platform::MKLDNNMemDesc(dst_tz, platform::MKLDNNGetDataType<T>(),
mkldnn::memory::format::nchw);
std::shared_ptr<pooling_forward::primitive_desc> pool_pd =
CreatePrimitiveDesc(src_md, dst_md, strides, paddings, ksize,
pooling_type, mkldnn_engine);
// save pool_pd into global device context to be referred in backward path
dev_ctx.SetBlob(key_pool_pd, pool_pd);
std::shared_ptr<mkldnn::memory> workspace_memory =
CreateWorkspaceMemory(pool_pd, pooling_type, mkldnn_engine);
// save pool_workspace_memory to be referred in backward path
dev_ctx.SetBlob(key_pool_workspace_memory, workspace_memory);
auto pool_src_memory_p = std::make_shared<memory>(
memory::primitive_desc{src_md, mkldnn_engine},
static_cast<void*>(const_cast<T*>(input_data)));
dev_ctx.SetBlob(key_pool_src_mem_p, pool_src_memory_p);
auto pool_dst_memory_p = std::make_shared<memory>(
memory::primitive_desc{dst_md, mkldnn_engine},
static_cast<void*>(output_data));
dev_ctx.SetBlob(key_pool_dst_mem_p, pool_dst_memory_p);
pool_p = std::make_shared<pooling_forward>(
*pool_pd, *(pool_src_memory_p.get()), *(pool_dst_memory_p.get()),
*workspace_memory);
dev_ctx.SetBlob(key_pool_p, pool_p);
} else {
// Primitives already exist
auto pool_src_memory_p =
std::static_pointer_cast<memory>(dev_ctx.GetBlob(key_pool_src_mem_p));
PADDLE_ENFORCE(pool_src_memory_p != nullptr,
"Fail to find pooling src mem_p in device context");
auto pool_dst_memory_p =
std::static_pointer_cast<memory>(dev_ctx.GetBlob(key_pool_dst_mem_p));
PADDLE_ENFORCE(pool_dst_memory_p != nullptr,
"Fail to find pooling dst mem_p in device context");
pool_src_memory_p->set_data_handle(
reinterpret_cast<void*>(const_cast<T*>(input_data)));
pool_dst_memory_p->set_data_handle(output_data);
}
// push primitive to stream and wait until it's executed // push primitive to stream and wait until it's executed
std::vector<mkldnn::primitive> pipeline{pool_prim}; std::vector<mkldnn::primitive> pipeline{*(pool_p.get())};
mkldnn::stream(mkldnn::stream::kind::eager).submit(pipeline).wait(); mkldnn::stream(mkldnn::stream::kind::eager).submit(pipeline).wait();
} }
...@@ -120,9 +170,10 @@ class PoolMKLDNNOpKernel : public paddle::framework::OpKernel<T> { ...@@ -120,9 +170,10 @@ class PoolMKLDNNOpKernel : public paddle::framework::OpKernel<T> {
mkldnn::memory::primitive_desc workspace_md = mkldnn::memory::primitive_desc workspace_md =
pooling_type == "max" pooling_type == "max"
? pool_pd->workspace_primitive_desc() ? pool_pd->workspace_primitive_desc()
: mkldnn::memory::primitive_desc( : mkldnn::memory::primitive_desc({{},
{{}, mkldnn::memory::f32, mkldnn::memory::format::nchw}, platform::MKLDNNGetDataType<T>(),
engine); mkldnn::memory::format::nchw},
engine);
auto p_workspace_memory = new mkldnn::memory(workspace_md); auto p_workspace_memory = new mkldnn::memory(workspace_md);
return std::unique_ptr<mkldnn::memory>(p_workspace_memory); return std::unique_ptr<mkldnn::memory>(p_workspace_memory);
...@@ -140,13 +191,6 @@ class PoolMKLDNNGradOpKernel : public paddle::framework::OpKernel<T> { ...@@ -140,13 +191,6 @@ class PoolMKLDNNGradOpKernel : public paddle::framework::OpKernel<T> {
const Tensor* out_grad = ctx.Input<Tensor>(framework::GradVarName("Out")); const Tensor* out_grad = ctx.Input<Tensor>(framework::GradVarName("Out"));
Tensor* in_x_grad = ctx.Output<Tensor>(framework::GradVarName("X")); Tensor* in_x_grad = ctx.Output<Tensor>(framework::GradVarName("X"));
// Get an unique name from "argument" name of "Out" variable
// This name will be used as key when referring info from device context
const std::string key = ctx.op().Input("Out");
const std::string key_pool_pd = key + "@pool_pd";
const std::string key_pool_workspace_memory =
key + "@pool_workspace_memory";
std::string pooling_type = ctx.Attr<std::string>("pooling_type"); std::string pooling_type = ctx.Attr<std::string>("pooling_type");
std::vector<int> ksize = ctx.Attr<std::vector<int>>("ksize"); std::vector<int> ksize = ctx.Attr<std::vector<int>>("ksize");
std::vector<int> strides = ctx.Attr<std::vector<int>>("strides"); std::vector<int> strides = ctx.Attr<std::vector<int>>("strides");
...@@ -171,43 +215,76 @@ class PoolMKLDNNGradOpKernel : public paddle::framework::OpKernel<T> { ...@@ -171,43 +215,76 @@ class PoolMKLDNNGradOpKernel : public paddle::framework::OpKernel<T> {
std::vector<int> diff_dst_tz = std::vector<int> diff_dst_tz =
paddle::framework::vectorize2int(out_grad->dims()); paddle::framework::vectorize2int(out_grad->dims());
auto diff_src_md = platform::MKLDNNMemDesc(diff_src_tz, mkldnn::memory::f32, // Get an unique name from "argument" name of "Out" variable
mkldnn::memory::format::nchw); // This name will be used as key when referring info from device context
auto diff_dst_md = platform::MKLDNNMemDesc(diff_dst_tz, mkldnn::memory::f32, const std::string key = gethash(diff_src_tz, pooling_type, ksize, strides,
mkldnn::memory::format::nchw); paddings, ctx.op().Input("Out"));
const std::string key_pool_bwd_p = key + "@pool_bwd_p";
// Retrieve pool_pd/pool_workspace_memory from device context const std::string key_pool_diff_src_mem_p = key + "@pool_diff_src_mem_p";
auto pool_pd = const std::string key_pool_diff_dst_mem_p = key + "@pool_diff_dst_mem_p";
std::static_pointer_cast<mkldnn::pooling_forward::primitive_desc>( const std::string key_pool_pd = key + "@pool_pd";
dev_ctx.GetBlob(key_pool_pd)); const std::string key_pool_workspace_memory =
PADDLE_ENFORCE(pool_pd != nullptr, key + "@pool_workspace_memory";
"Fail to find pool_pd in device context");
auto workspace_memory = std::static_pointer_cast<mkldnn::memory>(
dev_ctx.GetBlob(key_pool_workspace_memory));
PADDLE_ENFORCE(workspace_memory != nullptr,
"Fail to find workspace_memory in device context");
auto pool_bwd_desc = mkldnn::pooling_backward::desc(
pooling_type == "max" ? mkldnn::algorithm::pooling_max
: mkldnn::algorithm::pooling_avg,
diff_src_md, diff_dst_md, strides, ksize, paddings, paddings,
mkldnn::padding_kind::zero);
auto pool_bwd_pd = mkldnn::pooling_backward::primitive_desc(
pool_bwd_desc, mkldnn_engine, *pool_pd);
auto diff_src_memory =
mkldnn::memory({diff_src_md, mkldnn_engine},
static_cast<void*>(const_cast<T*>(in_x_grad_data)));
auto diff_dst_memory =
mkldnn::memory({diff_dst_md, mkldnn_engine},
static_cast<void*>(const_cast<T*>(out_grad_data)));
auto bwd_prim = mkldnn::pooling_backward( auto pool_bwd_p = std::static_pointer_cast<pooling_backward>(
pool_bwd_pd, diff_dst_memory, *workspace_memory, diff_src_memory); dev_ctx.GetBlob(key_pool_bwd_p));
if (pool_bwd_p == nullptr) {
auto diff_src_md =
platform::MKLDNNMemDesc(diff_src_tz, platform::MKLDNNGetDataType<T>(),
mkldnn::memory::format::nchw);
auto diff_dst_md =
platform::MKLDNNMemDesc(diff_dst_tz, platform::MKLDNNGetDataType<T>(),
mkldnn::memory::format::nchw);
// Retrieve pool_pd/pool_workspace_memory from device context
auto pool_pd =
std::static_pointer_cast<mkldnn::pooling_forward::primitive_desc>(
dev_ctx.GetBlob(key_pool_pd));
PADDLE_ENFORCE(pool_pd != nullptr,
"Fail to find pool_pd in device context");
auto workspace_memory = std::static_pointer_cast<mkldnn::memory>(
dev_ctx.GetBlob(key_pool_workspace_memory));
PADDLE_ENFORCE(workspace_memory != nullptr,
"Fail to find workspace_memory in device context");
auto pool_diff_src_memory_p = std::make_shared<memory>(memory(
{diff_src_md, mkldnn_engine}, static_cast<void*>(in_x_grad_data)));
dev_ctx.SetBlob(key_pool_diff_src_mem_p, pool_diff_src_memory_p);
auto pool_diff_dst_memory_p = std::make_shared<memory>(
memory({diff_dst_md, mkldnn_engine},
static_cast<void*>(const_cast<T*>(out_grad_data))));
dev_ctx.SetBlob(key_pool_diff_dst_mem_p, pool_diff_dst_memory_p);
auto pool_bwd_desc = mkldnn::pooling_backward::desc(
pooling_type == "max" ? mkldnn::algorithm::pooling_max
: mkldnn::algorithm::pooling_avg,
diff_src_md, diff_dst_md, strides, ksize, paddings, paddings,
mkldnn::padding_kind::zero);
auto pool_bwd_pd = mkldnn::pooling_backward::primitive_desc(
pool_bwd_desc, mkldnn_engine, *pool_pd);
pool_bwd_p = std::make_shared<pooling_backward>(
pool_bwd_pd, *(pool_diff_dst_memory_p.get()), *workspace_memory,
*(pool_diff_src_memory_p));
dev_ctx.SetBlob(key_pool_bwd_p, pool_bwd_p);
} else {
// Primitives already exist
auto pool_diff_src_memory_p = std::static_pointer_cast<memory>(
dev_ctx.GetBlob(key_pool_diff_src_mem_p));
PADDLE_ENFORCE(pool_diff_src_memory_p != nullptr,
"Fail to find pooling src mem_p in device context");
auto pool_diff_dst_memory_p = std::static_pointer_cast<memory>(
dev_ctx.GetBlob(key_pool_diff_dst_mem_p));
PADDLE_ENFORCE(pool_diff_dst_memory_p != nullptr,
"Fail to find pooling dst mem_p in device context");
pool_diff_src_memory_p->set_data_handle(
reinterpret_cast<void*>(in_x_grad_data));
pool_diff_dst_memory_p->set_data_handle(const_cast<T*>(out_grad_data));
}
// push primitive to stream and wait until it's executed // push primitive to stream and wait until it's executed
std::vector<mkldnn::primitive> pipeline{bwd_prim}; std::vector<mkldnn::primitive> pipeline{*(pool_bwd_p.get())};
mkldnn::stream(mkldnn::stream::kind::eager).submit(pipeline).wait(); mkldnn::stream(mkldnn::stream::kind::eager).submit(pipeline).wait();
} // Compute() } // Compute()
}; };
......
...@@ -186,8 +186,7 @@ class WarpCTCKernel : public framework::OpKernel<T> { ...@@ -186,8 +186,7 @@ class WarpCTCKernel : public framework::OpKernel<T> {
// warpctc accesses labels in CPU memory // warpctc accesses labels in CPU memory
Tensor warpctc_label; Tensor warpctc_label;
TensorCopy(*label, platform::CPUPlace(), ctx.device_context(), TensorCopySync(*label, platform::CPUPlace(), &warpctc_label);
&warpctc_label);
const int* warpctc_label_data = warpctc_label.data<int>(); const int* warpctc_label_data = warpctc_label.data<int>();
// warpctc stores loss in CPU memory // warpctc stores loss in CPU memory
Tensor warpctc_loss; Tensor warpctc_loss;
......
...@@ -49,7 +49,7 @@ nv_test(device_context_test SRCS device_context_test.cu DEPS device_context gpu_ ...@@ -49,7 +49,7 @@ nv_test(device_context_test SRCS device_context_test.cu DEPS device_context gpu_
nv_test(cudnn_helper_test SRCS cudnn_helper_test.cc DEPS dynload_cuda) nv_test(cudnn_helper_test SRCS cudnn_helper_test.cc DEPS dynload_cuda)
nv_test(transform_test SRCS transform_test.cu DEPS memory place device_context) nv_test(transform_test SRCS transform_test.cu DEPS memory place device_context)
cc_library(device_tracer SRCS device_tracer.cc DEPS boost profiler_proto ${GPU_CTX_DEPS}) cc_library(device_tracer SRCS device_tracer.cc DEPS boost profiler_proto framework_proto ${GPU_CTX_DEPS})
cc_library(profiler SRCS profiler.cc DEPS device_context device_tracer) cc_library(profiler SRCS profiler.cc DEPS device_context device_tracer)
cc_test(profiler_test SRCS profiler_test.cc DEPS profiler) cc_test(profiler_test SRCS profiler_test.cc DEPS profiler)
......
...@@ -38,6 +38,11 @@ void* to_void_cast(const Type* t) { ...@@ -38,6 +38,11 @@ void* to_void_cast(const Type* t) {
return static_cast<void*>(const_cast<Type*>(t)); return static_cast<void*>(const_cast<Type*>(t));
} }
template <typename Type>
void* to_void_reinterpret_cast(const Type* t) {
return reinterpret_cast<void*>(const_cast<Type*>(t));
}
template <class Type> template <class Type>
using tf_desc = typename Type::desc; using tf_desc = typename Type::desc;
...@@ -71,5 +76,15 @@ inline bool CanMKLDNNBeUsed(const framework::ExecutionContext& ctx) { ...@@ -71,5 +76,15 @@ inline bool CanMKLDNNBeUsed(const framework::ExecutionContext& ctx) {
return use_mkldnn && platform::is_cpu_place(ctx.GetPlace()); return use_mkldnn && platform::is_cpu_place(ctx.GetPlace());
} }
template <typename Type>
mkldnn::memory::data_type MKLDNNGetDataType() {
return mkldnn::memory::data_undef;
}
template <>
inline mkldnn::memory::data_type MKLDNNGetDataType<float>() {
return mkldnn::memory::f32;
}
} // namespace platform } // namespace platform
} // namespace paddle } // namespace paddle
...@@ -238,6 +238,7 @@ void BindVarDsec(pybind11::module *m) { ...@@ -238,6 +238,7 @@ void BindVarDsec(pybind11::module *m) {
pybind11::enum_<pd::proto::VarType::Type>(var_desc, "VarType", "") pybind11::enum_<pd::proto::VarType::Type>(var_desc, "VarType", "")
.value("BOOL", pd::proto::VarType::BOOL) .value("BOOL", pd::proto::VarType::BOOL)
.value("UINT8", pd::proto::VarType::UINT8)
.value("INT16", pd::proto::VarType::INT16) .value("INT16", pd::proto::VarType::INT16)
.value("INT32", pd::proto::VarType::INT32) .value("INT32", pd::proto::VarType::INT32)
.value("INT64", pd::proto::VarType::INT64) .value("INT64", pd::proto::VarType::INT64)
......
cmake_minimum_required(VERSION 3.0)
project(cpp_train_demo CXX C)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11")
if(NOT DEFINED PADDLE_LIB)
message(FATAL_ERROR "please set PADDLE_LIB with -DPADDLE_LIB=/paddle/lib/dir")
endif()
option(WITH_MKLDNN "Compile PaddlePaddle with MKLDNN" OFF)
option(WITH_MKL "Compile PaddlePaddle with MKL support, default use openblas." OFF)
include_directories("${PADDLE_LIB}")
include_directories("${PADDLE_LIB}/third_party/install/protobuf/include")
include_directories("${PADDLE_LIB}/third_party/install/glog/include")
include_directories("${PADDLE_LIB}/third_party/install/gflags/include")
include_directories("${PADDLE_LIB}/third_party/install/snappy/include")
include_directories("${PADDLE_LIB}/third_party/install/snappystream/include")
include_directories("${PADDLE_LIB}/third_party/install/zlib/include")
include_directories("${PADDLE_LIB}/third_party/boost")
include_directories("${PADDLE_LIB}/third_party/eigen3")
link_directories("${PADDLE_LIB}/third_party/install/snappy/lib")
link_directories("${PADDLE_LIB}/third_party/install/snappystream/lib")
link_directories("${PADDLE_LIB}/third_party/install/protobuf/lib")
link_directories("${PADDLE_LIB}/third_party/install/glog/lib")
link_directories("${PADDLE_LIB}/third_party/install/gflags/lib")
link_directories("${PADDLE_LIB}/third_party/install/zlib/lib")
add_executable(demo_trainer demo_trainer.cc)
if(WITH_MKLDNN)
include_directories("${PADDLE_LIB}/third_party/install/mkldnn/include")
set(MKLDNN_LIB ${PADDLE_LIB}/third_party/install/mkldnn/lib/libmkldnn.so.0)
endif()
if(WITH_MKL)
include_directories("${PADDLE_LIB}/third_party/install/mklml/include")
set(MATH_LIB ${PADDLE_LIB}/third_party/install/mklml/lib/libmklml_intel.so)
else()
if(APPLE)
set(MATH_LIB cblas)
else(APPLE)
set(MATH_LIB ${PADDLE_LIB}/third_party/install/openblas/lib/libopenblas.a)
endif(APPLE)
endif()
if(APPLE)
set(MACOS_LD_FLAGS "-undefined dynamic_lookup -Wl,-all_load -framework CoreFoundation -framework Security")
else(APPLE)
set(ARCHIVE_START "-Wl,--whole-archive")
set(ARCHIVE_END "-Wl,--no-whole-archive")
set(EXTERNAL_LIB "-lrt -ldl -lpthread")
endif(APPLE)
target_link_libraries(demo_trainer
${MACOS_LD_FLAGS}
${ARCHIVE_START}
${PADDLE_LIB}/paddle/fluid/inference/libpaddle_fluid.a
${ARCHIVE_END}
${MATH_LIB}
${MKLDNN_LIB}
glog gflags protobuf snappystream snappy z
${EXTERNAL_LIB})
### step 1. build paddle lib
```
# WITH_MKL=ON|OFF
# WITH_MKLDNN=ON|OFF
PADDLE_LIB=/paddle/lib/dir
cmake .. -DCMAKE_INSTALL_PREFIX=$PADDLE_LIB \
-DCMAKE_BUILD_TYPE=Release \
-DWITH_FLUID_ONLY=ON \
-DWITH_GPU=OFF \
-DWITH_STYLE_CHECK=OFF \
-DWITH_MKL=OFF \
-DWITH_MKLDNN=OFF
make -j8
make -j8 inference_lib_dist
```
### step 2. generate program desc
```
# please install paddle before run this scripe
pip install --upgrade paddlepaddle-*.whl
python demo_network.py
```
This will generate two program desc files:
- startup_program: used to init all parameters
- main_program: main logic of the network
### step 3. build demo_trainer and run it.
```
# Make a build dir at the same dir of this README.md document.
# The demo dir can be put anywhere.
mkdir build
cd build
# WITH_MKL=ON|OFF
# WITH_MKLDNN=ON|OFF
PADDLE_LIB=/paddle/lib/dir
# PADDLE_LIB is the same with CMAKE_INSTALL_PREFIX when building the lib
cmake .. -DPADDLE_LIB=$PADDLE_LIB \
-DWITH_MKLDNN=OFF \
-DWITH_MKL=OFF
make
# copy startup_program and main_program to this dir
cp ../startup_program .
cp ../main_program .
# run demo cpp trainer
./demo_trainer
```
The output will be:
```
step: 0 loss: 1069.02
step: 1 loss: 1069.02
step: 2 loss: 1069.02
....
```
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle.fluid as fluid
import paddle.fluid.framework as framework
def train_network(with_optimize):
x = fluid.layers.data(name='x', shape=[13], dtype='float32')
y_predict = fluid.layers.fc(input=x, size=1, act=None)
y = fluid.layers.data(name='y', shape=[1], dtype='float32')
cost = fluid.layers.square_error_cost(input=y_predict, label=y)
avg_cost = fluid.layers.mean(cost)
if with_optimize:
sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.00001)
sgd_optimizer.minimize(avg_cost)
else:
fluid.backward.append_backward(avg_cost)
def save_program_desc(network_func):
startup_program = framework.Program()
train_program = framework.Program()
with framework.program_guard(train_program, startup_program):
network_func(with_optimize=False)
with open("startup_program", "w") as f:
f.write(startup_program.desc.serialize_to_string())
with open("main_program", "w") as f:
f.write(train_program.desc.serialize_to_string())
save_program_desc(train_network)
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <fstream>
#include "paddle/fluid/framework/executor.h"
#include "paddle/fluid/framework/init.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/program_desc.h"
#include "paddle/fluid/framework/tensor_util.h"
#include "paddle/fluid/platform/device_context.h"
#include "paddle/fluid/platform/place.h"
namespace paddle {
namespace train {
void ReadBinaryFile(const std::string& filename, std::string* contents) {
std::ifstream fin(filename, std::ios::in | std::ios::binary);
PADDLE_ENFORCE(static_cast<bool>(fin), "Cannot open file %s", filename);
fin.seekg(0, std::ios::end);
contents->clear();
contents->resize(fin.tellg());
fin.seekg(0, std::ios::beg);
fin.read(&(contents->at(0)), contents->size());
fin.close();
}
std::unique_ptr<paddle::framework::ProgramDesc> Load(
paddle::framework::Executor* executor, const std::string& model_filename) {
VLOG(3) << "loading model from " << model_filename;
std::string program_desc_str;
ReadBinaryFile(model_filename, &program_desc_str);
std::unique_ptr<paddle::framework::ProgramDesc> main_program(
new paddle::framework::ProgramDesc(program_desc_str));
return main_program;
}
} // namespace train
} // namespace paddle
int main() {
paddle::framework::InitDevices(false);
const auto cpu_place = paddle::platform::CPUPlace();
paddle::framework::Executor executor(cpu_place);
paddle::framework::Scope scope;
auto startup_program = paddle::train::Load(&executor, "startup_program");
auto train_program = paddle::train::Load(&executor, "main_program");
std::string loss_name = "";
for (auto op_desc : train_program->Block(0).AllOps()) {
if (op_desc->Type() == "mean") {
loss_name = op_desc->Output("Out")[0];
break;
}
}
PADDLE_ENFORCE_NE(loss_name, "", "loss not found");
// init all parameters
executor.Run(*startup_program.get(), &scope, 0);
// prepare data
auto x_var = scope.Var("x");
auto x_tensor = x_var->GetMutable<paddle::framework::LoDTensor>();
x_tensor->Resize({2, 13});
auto x_data = x_tensor->mutable_data<float>(cpu_place);
for (int i = 0; i < 2 * 13; ++i) {
x_data[i] = static_cast<float>(i);
}
auto y_var = scope.Var("y");
auto y_tensor = y_var->GetMutable<paddle::framework::LoDTensor>();
y_tensor->Resize({2, 1});
auto y_data = y_tensor->mutable_data<float>(cpu_place);
for (int i = 0; i < 2 * 1; ++i) {
y_data[i] = static_cast<float>(i);
}
auto loss_var = scope.Var(loss_name);
for (int i = 0; i < 10; ++i) {
executor.Run(*train_program.get(), &scope, 0, false, true);
std::cout << "step: " << i << " loss: "
<< loss_var->Get<paddle::framework::LoDTensor>().data<float>()[0]
<< std::endl;
}
return 0;
}
...@@ -14,20 +14,9 @@ ...@@ -14,20 +14,9 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
function container_running() {
name=$1
docker ps -a --format "{{.Names}}" | grep "${name}" > /dev/null
return $?
}
function start_build_docker() { function start_build_docker() {
docker pull $IMG docker pull $IMG
if container_running "${CONTAINER_ID}"; then
docker stop "${CONTAINER_ID}" 1>/dev/null
docker rm -f "${CONTAINER_ID}" 1>/dev/null
fi
apt_mirror='s#http://archive.ubuntu.com/ubuntu#mirror://mirrors.ubuntu.com/mirrors.txt#g' apt_mirror='s#http://archive.ubuntu.com/ubuntu#mirror://mirrors.ubuntu.com/mirrors.txt#g'
DOCKER_ENV=$(cat <<EOL DOCKER_ENV=$(cat <<EOL
-e FLAGS_fraction_of_gpu_memory_to_use=0.15 \ -e FLAGS_fraction_of_gpu_memory_to_use=0.15 \
...@@ -61,7 +50,6 @@ EOL ...@@ -61,7 +50,6 @@ EOL
fi fi
set -ex set -ex
${DOCKER_CMD} run -it \ ${DOCKER_CMD} run -it \
--name $CONTAINER_ID \
${DOCKER_ENV} \ ${DOCKER_ENV} \
-e SCRIPT_NAME=$0 \ -e SCRIPT_NAME=$0 \
-v $PADDLE_ROOT:/paddle \ -v $PADDLE_ROOT:/paddle \
...@@ -75,10 +63,8 @@ EOL ...@@ -75,10 +63,8 @@ EOL
function main() { function main() {
DOCKER_REPO="paddlepaddle/paddle" DOCKER_REPO="paddlepaddle/paddle"
VERSION="latest-dev" VERSION="latest-dev"
CONTAINER_ID="${USER}_paddle_dev"
PADDLE_ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}")/../../" && pwd )" PADDLE_ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}")/../../" && pwd )"
if [ "$1" == "build_android" ]; then if [ "$1" == "build_android" ]; then
CONTAINER_ID="${USER}_paddle_dev_android"
VERSION="latest-dev-android" VERSION="latest-dev-android"
fi fi
IMG=${DOCKER_REPO}:${VERSION} IMG=${DOCKER_REPO}:${VERSION}
......
...@@ -273,10 +273,11 @@ class DetectionMAP(Evaluator): ...@@ -273,10 +273,11 @@ class DetectionMAP(Evaluator):
[M, 6]. The layout is [label, confidence, xmin, ymin, xmax, ymax]. [M, 6]. The layout is [label, confidence, xmin, ymin, xmax, ymax].
gt_label (Variable): The ground truth label index, which is a LoDTensor gt_label (Variable): The ground truth label index, which is a LoDTensor
with shape [N, 1]. with shape [N, 1].
gt_difficult (Variable): Whether this ground truth is a difficult
bounding box (bbox), which is a LoDTensor [N, 1].
gt_box (Variable): The ground truth bounding box (bbox), which is a gt_box (Variable): The ground truth bounding box (bbox), which is a
LoDTensor with shape [N, 6]. The layout is [xmin, ymin, xmax, ymax]. LoDTensor with shape [N, 6]. The layout is [xmin, ymin, xmax, ymax].
gt_difficult (Variable|None): Whether this ground truth is a difficult
bounding bbox, which can be a LoDTensor [N, 1] or not set. If None,
it means all the ground truth labels are not difficult bbox.
class_num (int): The class number. class_num (int): The class number.
background_label (int): The index of background label, the background background_label (int): The index of background label, the background
label will be ignored. If set to -1, then all categories will be label will be ignored. If set to -1, then all categories will be
...@@ -284,7 +285,8 @@ class DetectionMAP(Evaluator): ...@@ -284,7 +285,8 @@ class DetectionMAP(Evaluator):
overlap_threshold (float): The threshold for deciding true/false overlap_threshold (float): The threshold for deciding true/false
positive, 0.5 by defalut. positive, 0.5 by defalut.
evaluate_difficult (bool): Whether to consider difficult ground truth evaluate_difficult (bool): Whether to consider difficult ground truth
for evaluation, True by defalut. for evaluation, True by defalut. This argument does not work when
gt_difficult is None.
ap_version (string): The average precision calculation ways, it must be ap_version (string): The average precision calculation ways, it must be
'integral' or '11point'. Please check 'integral' or '11point'. Please check
https://sanchom.wordpress.com/tag/average-precision/ for details. https://sanchom.wordpress.com/tag/average-precision/ for details.
...@@ -295,7 +297,7 @@ class DetectionMAP(Evaluator): ...@@ -295,7 +297,7 @@ class DetectionMAP(Evaluator):
exe = fluid.executor(place) exe = fluid.executor(place)
map_evaluator = fluid.Evaluator.DetectionMAP(input, map_evaluator = fluid.Evaluator.DetectionMAP(input,
gt_label, gt_difficult, gt_box) gt_label, gt_box, gt_difficult)
cur_map, accum_map = map_evaluator.get_map_var() cur_map, accum_map = map_evaluator.get_map_var()
fetch = [cost, cur_map, accum_map] fetch = [cost, cur_map, accum_map]
for epoch in PASS_NUM: for epoch in PASS_NUM:
...@@ -313,8 +315,8 @@ class DetectionMAP(Evaluator): ...@@ -313,8 +315,8 @@ class DetectionMAP(Evaluator):
input, input,
gt_label, gt_label,
gt_box, gt_box,
gt_difficult, gt_difficult=None,
class_num, class_num=None,
background_label=0, background_label=0,
overlap_threshold=0.5, overlap_threshold=0.5,
evaluate_difficult=True, evaluate_difficult=True,
...@@ -322,8 +324,11 @@ class DetectionMAP(Evaluator): ...@@ -322,8 +324,11 @@ class DetectionMAP(Evaluator):
super(DetectionMAP, self).__init__("map_eval") super(DetectionMAP, self).__init__("map_eval")
gt_label = layers.cast(x=gt_label, dtype=gt_box.dtype) gt_label = layers.cast(x=gt_label, dtype=gt_box.dtype)
gt_difficult = layers.cast(x=gt_difficult, dtype=gt_box.dtype) if gt_difficult:
label = layers.concat([gt_label, gt_difficult, gt_box], axis=1) gt_difficult = layers.cast(x=gt_difficult, dtype=gt_box.dtype)
label = layers.concat([gt_label, gt_difficult, gt_box], axis=1)
else:
label = layers.concat([gt_label, gt_box], axis=1)
# calculate mean average precision (mAP) of current mini-batch # calculate mean average precision (mAP) of current mini-batch
map = layers.detection_map( map = layers.detection_map(
......
...@@ -72,6 +72,8 @@ def convert_np_dtype_to_dtype_(np_dtype): ...@@ -72,6 +72,8 @@ def convert_np_dtype_to_dtype_(np_dtype):
return core.VarDesc.VarType.INT64 return core.VarDesc.VarType.INT64
elif dtype == np.bool: elif dtype == np.bool:
return core.VarDesc.VarType.BOOL return core.VarDesc.VarType.BOOL
elif dtype == np.uint8:
return core.VarDesc.VarType.UINT8
else: else:
raise ValueError("Not supported numpy dtype " + str(dtype)) raise ValueError("Not supported numpy dtype " + str(dtype))
......
...@@ -49,6 +49,7 @@ __all__ = [ ...@@ -49,6 +49,7 @@ __all__ = [
'reorder_lod_tensor_by_rank', 'reorder_lod_tensor_by_rank',
'ParallelDo', 'ParallelDo',
'Print', 'Print',
'is_empty',
] ]
...@@ -1097,7 +1098,7 @@ class ConditionalBlock(object): ...@@ -1097,7 +1098,7 @@ class ConditionalBlock(object):
input_set = set([ipt.name for ipt in self.inputs]) input_set = set([ipt.name for ipt in self.inputs])
param_list = [ param_list = [
parent_block.var(each_name) for each_name in params parent_block.var_recursive(each_name) for each_name in params
if each_name not in input_set if each_name not in input_set
] ]
...@@ -1562,3 +1563,40 @@ def reorder_lod_tensor_by_rank(x, rank_table): ...@@ -1562,3 +1563,40 @@ def reorder_lod_tensor_by_rank(x, rank_table):
'RankTable': [rank_table]}, 'RankTable': [rank_table]},
outputs={'Out': [out]}) outputs={'Out': [out]})
return out return out
def is_empty(x, cond=None, **ignored):
"""
**Is Empty**
This layer returns the truth value of whether the variable is empty.
Args:
x(Variable): Operand of *is_empty*
cond(Variable|None): Optional output variable to store the result
of *is_empty*
Returns:
Variable: The tensor variable storing the output of *is_empty*.
Raises:
TypeError: If input cond is not a variable, or cond's dtype is
not bool
Examples:
.. code-block:: python
less = fluid.layers.is_empty(x=input)
"""
helper = LayerHelper("is_empty", **locals())
if cond is None:
cond = helper.create_tmp_variable(dtype='bool')
cond.stop_gradient = True
elif not isinstance(cond, Variable):
raise TypeError("cond takes a variable")
elif cond.dtype != 'bool':
raise TypeError("The data type of cond must be bool")
helper.append_op(
type='is_empty', inputs={'X': [x]}, outputs={'Out': [cond]})
return cond
...@@ -23,6 +23,7 @@ import nn ...@@ -23,6 +23,7 @@ import nn
import math import math
__all__ = [ __all__ = [
'prior_box',
'multi_box_head', 'multi_box_head',
'bipartite_match', 'bipartite_match',
'target_assign', 'target_assign',
...@@ -564,6 +565,115 @@ def ssd_loss(location, ...@@ -564,6 +565,115 @@ def ssd_loss(location,
return loss return loss
def prior_box(input,
image,
min_sizes,
max_sizes=None,
aspect_ratios=[1.],
variance=[0.1, 0.1, 0.2, 0.2],
flip=False,
clip=False,
steps=[0.0, 0.0],
offset=0.5,
name=None):
"""
**Prior box operator**
Generate prior boxes for SSD(Single Shot MultiBox Detector) algorithm.
Each position of the input produce N prior boxes, N is determined by
the count of min_sizes, max_sizes and aspect_ratios, The size of the
box is in range(min_size, max_size) interval, which is generated in
sequence according to the aspect_ratios.
Args:
input(Variable): The Input Variables, the format is NCHW.
image(Variable): The input image data of PriorBoxOp,
the layout is NCHW.
min_sizes(list|tuple|float value): min sizes of generated prior boxes.
max_sizes(list|tuple|None): max sizes of generated prior boxes.
Default: None.
aspect_ratios(list|tuple|float value): the aspect ratios of generated
prior boxes. Default: [1.].
variance(list|tuple): the variances to be encoded in prior boxes.
Default:[0.1, 0.1, 0.2, 0.2].
flip(bool): Whether to flip aspect ratios. Default:False.
clip(bool): Whether to clip out-of-boundary boxes. Default: False.
step(list|turple): Prior boxes step across width and height, If
step[0] == 0.0/step[1] == 0.0, the prior boxes step across
height/weight of the input will be automatically calculated.
Default: [0., 0.]
offset(float): Prior boxes center offset. Default: 0.5
name(str): Name of the prior box op. Default: None.
Returns:
boxes(Variable): the output prior boxes of PriorBox.
The layout is [H, W, num_priors, 4].
H is the height of input, W is the width of input,
num_priors is the total
box count of each position of input.
Variances(Variable): the expanded variances of PriorBox.
The layout is [H, W, num_priors, 4].
H is the height of input, W is the width of input
num_priors is the total
box count of each position of input
Examples:
.. code-block:: python
box, var = prior_box(
input=conv1,
image=images,
min_sizes=[100.],
flip=True,
clip=True)
"""
helper = LayerHelper("prior_box", **locals())
dtype = helper.input_dtype()
def _is_list_or_tuple_(data):
return (isinstance(data, list) or isinstance(data, tuple))
if not _is_list_or_tuple_(min_sizes):
min_sizes = [min_sizes]
if not _is_list_or_tuple_(aspect_ratios):
aspect_ratios = [aspect_ratios]
if not (_is_list_or_tuple_(steps) and len(steps) == 2):
raise ValueError('steps should be a list or tuple ',
'with length 2, (step_width, step_height).')
min_sizes = list(map(float, min_sizes))
aspect_ratios = list(map(float, aspect_ratios))
steps = list(map(float, steps))
attrs = {
'min_sizes': min_sizes,
'aspect_ratios': aspect_ratios,
'variances': variance,
'flip': flip,
'clip': clip,
'step_w': steps[0],
'step_h': steps[1],
'offset': offset
}
if max_sizes is not None and len(max_sizes) > 0 and max_sizes[0] > 0:
if not _is_list_or_tuple_(max_sizes):
max_sizes = [max_sizes]
attrs['max_sizes'] = max_sizes
box = helper.create_tmp_variable(dtype)
var = helper.create_tmp_variable(dtype)
helper.append_op(
type="prior_box",
inputs={"Input": input,
"Image": image},
outputs={"Boxes": box,
"Variances": var},
attrs=attrs, )
box.stop_gradient = True
var.stop_gradient = True
return box, var
def multi_box_head(inputs, def multi_box_head(inputs,
image, image,
base_size, base_size,
...@@ -660,47 +770,6 @@ def multi_box_head(inputs, ...@@ -660,47 +770,6 @@ def multi_box_head(inputs,
clip=True) clip=True)
""" """
def _prior_box_(input,
image,
min_sizes,
max_sizes,
aspect_ratios,
variance,
flip=False,
clip=False,
step_w=0.0,
step_h=0.0,
offset=0.5,
name=None):
helper = LayerHelper("prior_box", **locals())
dtype = helper.input_dtype()
attrs = {
'min_sizes': min_sizes,
'aspect_ratios': aspect_ratios,
'variances': variance,
'flip': flip,
'clip': clip,
'step_w': step_w,
'step_h': step_h,
'offset': offset
}
if len(max_sizes) > 0 and max_sizes[0] > 0:
attrs['max_sizes'] = max_sizes
box = helper.create_tmp_variable(dtype)
var = helper.create_tmp_variable(dtype)
helper.append_op(
type="prior_box",
inputs={"Input": input,
"Image": image},
outputs={"Boxes": box,
"Variances": var},
attrs=attrs, )
box.stop_gradient = True
var.stop_gradient = True
return box, var
def _reshape_with_axis_(input, axis=1): def _reshape_with_axis_(input, axis=1):
if not (axis > 0 and axis < len(input.shape)): if not (axis > 0 and axis < len(input.shape)):
raise ValueError("The axis should be smaller than " raise ValueError("The axis should be smaller than "
...@@ -777,11 +846,10 @@ def multi_box_head(inputs, ...@@ -777,11 +846,10 @@ def multi_box_head(inputs,
aspect_ratio = aspect_ratios[i] aspect_ratio = aspect_ratios[i]
if not _is_list_or_tuple_(aspect_ratio): if not _is_list_or_tuple_(aspect_ratio):
aspect_ratio = [aspect_ratio] aspect_ratio = [aspect_ratio]
step = [step_w[i] if step_w else 0.0, step_h[i] if step_w else 0.0]
box, var = _prior_box_(input, image, min_size, max_size, aspect_ratio, box, var = prior_box(input, image, min_size, max_size, aspect_ratio,
variance, flip, clip, step_w[i] variance, flip, clip, step, offset)
if step_w else 0.0, step_h[i]
if step_w else 0.0, offset)
box_results.append(box) box_results.append(box)
var_results.append(var) var_results.append(var)
......
...@@ -699,8 +699,8 @@ def dynamic_gru(input, ...@@ -699,8 +699,8 @@ def dynamic_gru(input,
def gru_unit(input, def gru_unit(input,
hidden, hidden,
size, size,
weight=None, param_attr=None,
bias=None, bias_attr=None,
activation='tanh', activation='tanh',
gate_activation='sigmoid'): gate_activation='sigmoid'):
""" """
...@@ -731,8 +731,8 @@ def gru_unit(input, ...@@ -731,8 +731,8 @@ def gru_unit(input,
input (Variable): The fc transformed input value of current step. input (Variable): The fc transformed input value of current step.
hidden (Variable): The hidden value of lstm unit from previous step. hidden (Variable): The hidden value of lstm unit from previous step.
size (integer): The input dimension value. size (integer): The input dimension value.
weight (ParamAttr): The weight parameters for gru unit. Default: None param_attr (ParamAttr): The weight parameters for gru unit. Default: None
bias (ParamAttr): The bias parameters for gru unit. Default: None bias_attr (ParamAttr): The bias parameters for gru unit. Default: None
activation (string): The activation type for cell (actNode). activation (string): The activation type for cell (actNode).
Default: 'tanh' Default: 'tanh'
gate_activation (string): The activation type for gates (actGate). gate_activation (string): The activation type for gates (actGate).
...@@ -764,34 +764,31 @@ def gru_unit(input, ...@@ -764,34 +764,31 @@ def gru_unit(input,
size = size / 3 size = size / 3
# create weight # create weight
if weight is None: weight = helper.create_parameter(
weight = helper.create_parameter( attr=helper.param_attr, shape=[size, 3 * size], dtype=dtype)
attr=helper.param_attr, shape=[size, 3 * size], dtype=dtype)
gate = helper.create_tmp_variable(dtype)
reset_hidden_pre = helper.create_tmp_variable(dtype)
updated_hidden = helper.create_tmp_variable(dtype)
inputs = {'Input': input, 'HiddenPrev': hidden, 'Weight': weight}
# create bias # create bias
if helper.bias_attr:
if bias is None:
bias_size = [1, 3 * size] bias_size = [1, 3 * size]
bias = helper.create_parameter( bias = helper.create_parameter(
attr=helper.bias_attr, shape=bias_size, dtype=dtype, is_bias=True) attr=helper.bias_attr, shape=bias_size, dtype=dtype, is_bias=True)
inputs['Bias'] = bias
gate = helper.create_tmp_variable(dtype)
reset_hidden_pre = helper.create_tmp_variable(dtype)
updated_hidden = helper.create_tmp_variable(dtype)
helper.append_op( helper.append_op(
type='gru_unit', type='gru_unit',
inputs={'Input': input, inputs=inputs,
'HiddenPrev': hidden,
'Weight': weight},
outputs={ outputs={
'Gate': gate, 'Gate': gate,
'ResetHiddenPrev': reset_hidden_pre, 'ResetHiddenPrev': reset_hidden_pre,
'Hidden': updated_hidden, 'Hidden': updated_hidden,
}, },
attrs={ attrs={
'activation': 0, 'activation': 2, # tanh
'gate_activation': 1, 'gate_activation': 1, # sigmoid
}) })
return updated_hidden, reset_hidden_pre, gate return updated_hidden, reset_hidden_pre, gate
......
...@@ -9,3 +9,4 @@ endforeach() ...@@ -9,3 +9,4 @@ endforeach()
add_subdirectory(fit_a_line) add_subdirectory(fit_a_line)
add_subdirectory(recognize_digits) add_subdirectory(recognize_digits)
add_subdirectory(image_classification) add_subdirectory(image_classification)
add_subdirectory(understand_sentiment)
file(GLOB TEST_OPS RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" "test_*.py")
string(REPLACE ".py" "" TEST_OPS "${TEST_OPS}")
# default test
foreach(src ${TEST_OPS})
py_test(${src} SRCS ${src}.py)
endforeach()
...@@ -17,11 +17,13 @@ from __future__ import print_function ...@@ -17,11 +17,13 @@ from __future__ import print_function
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from functools import partial from functools import partial
import numpy as np
CLASS_DIM = 2 CLASS_DIM = 2
EMB_DIM = 128 EMB_DIM = 128
HID_DIM = 512 HID_DIM = 512
STACKED_NUM = 3 STACKED_NUM = 3
BATCH_SIZE = 128
def stacked_lstm_net(data, input_dim, class_dim, emb_dim, hid_dim, stacked_num): def stacked_lstm_net(data, input_dim, class_dim, emb_dim, hid_dim, stacked_num):
...@@ -50,7 +52,7 @@ def stacked_lstm_net(data, input_dim, class_dim, emb_dim, hid_dim, stacked_num): ...@@ -50,7 +52,7 @@ def stacked_lstm_net(data, input_dim, class_dim, emb_dim, hid_dim, stacked_num):
return prediction return prediction
def inference_network(word_dict): def inference_program(word_dict):
data = fluid.layers.data( data = fluid.layers.data(
name="words", shape=[1], dtype="int64", lod_level=1) name="words", shape=[1], dtype="int64", lod_level=1)
...@@ -60,57 +62,71 @@ def inference_network(word_dict): ...@@ -60,57 +62,71 @@ def inference_network(word_dict):
return net return net
def train_network(word_dict): def train_program(word_dict):
prediction = inference_network(word_dict) prediction = inference_program(word_dict)
label = fluid.layers.data(name="label", shape=[1], dtype="int64") label = fluid.layers.data(name="label", shape=[1], dtype="int64")
cost = fluid.layers.cross_entropy(input=prediction, label=label) cost = fluid.layers.cross_entropy(input=prediction, label=label)
avg_cost = fluid.layers.mean(cost) avg_cost = fluid.layers.mean(cost)
accuracy = fluid.layers.accuracy(input=prediction, label=label) accuracy = fluid.layers.accuracy(input=prediction, label=label)
return avg_cost, accuracy return [avg_cost, accuracy]
def train(use_cuda, save_path): def train(use_cuda, train_program, save_dirname):
BATCH_SIZE = 128 place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
EPOCH_NUM = 5 optimizer = fluid.optimizer.Adagrad(learning_rate=0.002)
word_dict = paddle.dataset.imdb.word_dict() word_dict = paddle.dataset.imdb.word_dict()
trainer = fluid.Trainer(
train_func=partial(train_program, word_dict),
place=place,
optimizer=optimizer)
train_data = paddle.batch( def event_handler(event):
if isinstance(event, fluid.EndEpochEvent):
test_reader = paddle.batch(
paddle.dataset.imdb.test(word_dict), batch_size=BATCH_SIZE)
avg_cost, acc = trainer.test(
reader=test_reader, feed_order=['words', 'label'])
print("avg_cost: %s" % avg_cost)
print("acc : %s" % acc)
if acc > 0.2: # Smaller value to increase CI speed
trainer.save_params(save_dirname)
trainer.stop()
else:
print('BatchID {0}, Test Loss {1:0.2}, Acc {2:0.2}'.format(
event.epoch + 1, avg_cost, acc))
if math.isnan(avg_cost):
sys.exit("got NaN loss, training failed.")
elif isinstance(event, fluid.EndStepEvent):
print("Step {0}, Epoch {1} Metrics {2}".format(
event.step, event.epoch, map(np.array, event.metrics)))
if event.step == 1: # Run 2 iterations to speed CI
trainer.save_params(save_dirname)
trainer.stop()
train_reader = paddle.batch(
paddle.reader.shuffle( paddle.reader.shuffle(
paddle.dataset.imdb.train(word_dict), buf_size=1000), paddle.dataset.imdb.train(word_dict), buf_size=25000),
batch_size=BATCH_SIZE) batch_size=BATCH_SIZE)
test_data = paddle.batch( trainer.train(
paddle.dataset.imdb.test(word_dict), batch_size=BATCH_SIZE) num_epochs=1,
event_handler=event_handler,
def event_handler(event): reader=train_reader,
if isinstance(event, fluid.EndIteration): feed_order=['words', 'label'])
if (event.batch_id % 10) == 0:
avg_cost, accuracy = trainer.test(reader=test_data)
print('BatchID {1:04}, Loss {2:2.2}, Acc {3:2.2}'.format(
event.batch_id + 1, avg_cost, accuracy))
if accuracy > 0.01: # Low threshold for speeding up CI
trainer.params.save(save_path)
return
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() def infer(use_cuda, inference_program, save_dirname=None):
trainer = fluid.Trainer(
partial(train_network, word_dict),
optimizer=fluid.optimizer.Adagrad(learning_rate=0.002),
place=place,
event_handler=event_handler)
trainer.train(train_data, EPOCH_NUM, event_handler=event_handler)
def infer(use_cuda, save_path):
params = fluid.Params(save_path)
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
word_dict = paddle.dataset.imdb.word_dict() word_dict = paddle.dataset.imdb.word_dict()
inferencer = fluid.Inferencer( inferencer = fluid.Inferencer(
partial(inference_network, word_dict), params, place=place) infer_func=partial(inference_program, word_dict),
param_path=save_dirname,
place=place)
def create_random_lodtensor(lod, place, low, high): def create_random_lodtensor(lod, place, low, high):
data = np.random.random_integers(low, high, data = np.random.random_integers(low, high,
...@@ -131,8 +147,8 @@ def main(use_cuda): ...@@ -131,8 +147,8 @@ def main(use_cuda):
if use_cuda and not fluid.core.is_compiled_with_cuda(): if use_cuda and not fluid.core.is_compiled_with_cuda():
return return
save_path = "understand_sentiment_stacked_lstm.inference.model" save_path = "understand_sentiment_stacked_lstm.inference.model"
train(use_cuda, save_path) train(use_cuda, train_program, save_path)
infer(use_cuda, save_path) infer(use_cuda, inference_program, save_path)
if __name__ == '__main__': if __name__ == '__main__':
......
...@@ -109,6 +109,24 @@ class TestDetection(unittest.TestCase): ...@@ -109,6 +109,24 @@ class TestDetection(unittest.TestCase):
print(str(program)) print(str(program))
class TestPriorBox(unittest.TestCase):
def test_prior_box(self):
data_shape = [3, 224, 224]
images = fluid.layers.data(
name='pixel', shape=data_shape, dtype='float32')
conv1 = fluid.layers.conv2d(images, 3, 3, 2)
box, var = layers.prior_box(
input=conv1,
image=images,
min_sizes=[100.0],
aspect_ratios=[1.],
flip=True,
clip=True)
assert len(box.shape) == 4
assert box.shape == var.shape
assert box.shape[3] == 4
class TestMultiBoxHead(unittest.TestCase): class TestMultiBoxHead(unittest.TestCase):
def test_multi_box_head(self): def test_multi_box_head(self):
data_shape = [3, 224, 224] data_shape = [3, 224, 224]
......
...@@ -160,7 +160,9 @@ class TestDetectionMAPOp(OpTest): ...@@ -160,7 +160,9 @@ class TestDetectionMAPOp(OpTest):
label_count, true_pos, false_pos = get_input_pos( label_count, true_pos, false_pos = get_input_pos(
self.class_pos_count, self.true_pos, self.true_pos_lod, self.class_pos_count, self.true_pos, self.true_pos_lod,
self.false_pos, self.false_pos_lod) self.false_pos, self.false_pos_lod)
for (label, difficult, xmin, ymin, xmax, ymax) in self.label: for v in self.label:
label = v[0]
difficult = False if len(v) == 5 else v[1]
if self.evaluate_difficult: if self.evaluate_difficult:
label_count[label] += 1 label_count[label] += 1
elif not difficult: elif not difficult:
...@@ -245,6 +247,15 @@ class TestDetectionMAPOpSkipDiff(TestDetectionMAPOp): ...@@ -245,6 +247,15 @@ class TestDetectionMAPOpSkipDiff(TestDetectionMAPOp):
[2, 0.8, 0, 1], [2, 0.1, 1, 0], [3, 0.2, 0, 1]] [2, 0.8, 0, 1], [2, 0.1, 1, 0], [3, 0.2, 0, 1]]
class TestDetectionMAPOpWithoutDiff(TestDetectionMAPOp):
def init_test_case(self):
super(TestDetectionMAPOpWithoutDiff, self).init_test_case()
# label xmin ymin xmax ymax
self.label = [[1, 0.1, 0.1, 0.3, 0.3], [1, 0.6, 0.6, 0.8, 0.8],
[2, 0.3, 0.3, 0.6, 0.5], [1, 0.7, 0.1, 0.9, 0.3]]
class TestDetectionMAPOp11Point(TestDetectionMAPOp): class TestDetectionMAPOp11Point(TestDetectionMAPOp):
def init_test_case(self): def init_test_case(self):
super(TestDetectionMAPOp11Point, self).init_test_case() super(TestDetectionMAPOp11Point, self).init_test_case()
......
...@@ -14,42 +14,24 @@ ...@@ -14,42 +14,24 @@
import unittest import unittest
import numpy as np import numpy as np
from paddle.fluid.op import Operator from op_test import OpTest
import paddle.fluid.core as core
def create_tensor(scope, name, np_data): class TestEmpty(OpTest):
tensor = scope.var(name).get_tensor()
tensor.set_dims(np_data.shape)
tensor.set(np_data, core.CPUPlace())
return tensor
class TestIsEmptyOp(unittest.TestCase):
def setUp(self): def setUp(self):
self.scope = core.Scope() self.op_type = "is_empty"
# create input variables self.inputs = {'X': np.array([1, 2, 3])}
np_data0 = np.array([0, 1, 2]) self.outputs = {'Out': np.array([False])}
create_tensor(self.scope, "X0", np_data0)
np_data1 = np.array([1])
t = create_tensor(self.scope, "X1", np_data1)
t.set_dims([0])
# create output variables def test_check_output(self):
self.scope.var("out") self.check_output()
def test_no_empty(self):
self.one_case("X0", False)
def test_empty(self): class TestNotEmpty(TestEmpty):
self.one_case("X1", True) def setUp(self):
self.op_type = "is_empty"
def one_case(self, input, target): self.inputs = {'X': np.array([])}
op = Operator(type="is_empty", X=input, Out="out") self.outputs = {'Out': np.array([True])}
op.run(self.scope, core.CPUPlace())
out = self.scope.var("out").get_tensor()
self.assertEqual(np.array(out)[0], target)
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -24,7 +24,8 @@ dtype_to_size = { ...@@ -24,7 +24,8 @@ dtype_to_size = {
core.VarDesc.VarType.INT16: 2, core.VarDesc.VarType.INT16: 2,
core.VarDesc.VarType.INT32: 4, core.VarDesc.VarType.INT32: 4,
core.VarDesc.VarType.INT64: 8, core.VarDesc.VarType.INT64: 8,
core.VarDesc.VarType.BOOL: 1 core.VarDesc.VarType.BOOL: 1,
core.VarDesc.VarType.UINT8: 1,
} }
SUB_BLOCK_OPS = [ SUB_BLOCK_OPS = [
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册