diff --git a/CMakeLists.txt b/CMakeLists.txt index 264420ad830ed39b38f1918951d8d66c84fd5ee9..fd3582a1bca199d62d19550ffdd1efe9db520fa7 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -126,7 +126,7 @@ include(external/swig) # download, build, install swig include(external/warpctc) # download, build, install warpctc include(external/any) # download libn::any include(external/eigen) # download eigen3 -include(external/pybind11) # download pybind11 +include(external/pybind11) # download pybind11 include(external/nccl) include(cudnn) # set cudnn libraries, must before configure diff --git a/cmake/cross_compiling/ios.cmake b/cmake/cross_compiling/ios.cmake index 0b38943952f7fb9052368fe95eb31dd7592d8a47..310450f7d009dc0cdae9c0079a96445af8ec8f95 100644 --- a/cmake/cross_compiling/ios.cmake +++ b/cmake/cross_compiling/ios.cmake @@ -79,9 +79,8 @@ if(NOT DEFINED IOS_ARCH) # FIXME(liuyiqun): support "armv7;armv7s;arm64" future set(IOS_ARCH "arm64") elseif(IOS_PLATFORM STREQUAL "SIMULATOR") - set(IOS_ARCH "i386;x86_64") - elseif(IOS_PLATFORM STREQUAL "WATCHOS") - set(IOS_ARCH armv7k) + # FIXME(liuyiqun): support "i386;x86_64" future + set(IOS_ARCH "x86_64") endif() endif() set(CMAKE_OSX_ARCHITECTURES ${IOS_ARCH} CACHE string "Build architecture for iOS") diff --git a/cmake/external/nccl.cmake b/cmake/external/nccl.cmake index 57d2c0a352507afd01d1cbf2c7b23c00ff7ad81b..fc43766efafc3d3e16f2906ce7f9a3d692c8e4ff 100644 --- a/cmake/external/nccl.cmake +++ b/cmake/external/nccl.cmake @@ -1,3 +1,21 @@ +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +if(NOT WITH_GPU) + return() +endif() + include(ExternalProject) set(NCCL_SOURCE_DIR ${THIRD_PARTY_PATH}/nccl) diff --git a/cmake/external/openblas.cmake b/cmake/external/openblas.cmake index 143b57a954e4e6b2bf273535ebdf0fa8e3dab768..3f86e456cfbe55fe47e5b18e755e34829ebe9930 100644 --- a/cmake/external/openblas.cmake +++ b/cmake/external/openblas.cmake @@ -1,11 +1,11 @@ # Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. diff --git a/cmake/external/pybind11.cmake b/cmake/external/pybind11.cmake index 9391c285c7544669a5b1a078b7473d7a656c1bb4..4e87dc49d8956d1fa6dec777efc5a63c6b0f79a5 100644 --- a/cmake/external/pybind11.cmake +++ b/cmake/external/pybind11.cmake @@ -1,8 +1,26 @@ -INCLUDE(ExternalProject) +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. -SET(PYBIND_SOURCE_DIR ${THIRD_PARTY_PATH}/pybind) +if(NOT WITH_PYTHON) + return() +endif() + +include(ExternalProject) -INCLUDE_DIRECTORIES(${PYBIND_SOURCE_DIR}/src/extern_pybind/include) +set(PYBIND_SOURCE_DIR ${THIRD_PARTY_PATH}/pybind) + +include_directories(${PYBIND_SOURCE_DIR}/src/extern_pybind/include) ExternalProject_Add( extern_pybind @@ -17,14 +35,12 @@ ExternalProject_Add( TEST_COMMAND "" ) -if (${CMAKE_VERSION} VERSION_LESS "3.3.0") +if(${CMAKE_VERSION} VERSION_LESS "3.3.0") set(dummyfile ${CMAKE_CURRENT_BINARY_DIR}/pybind_dummy.c) - file(WRITE ${dummyfile} "const char * dummy_any = \"${dummyfile}\";") + file(WRITE ${dummyfile} "const char * dummy_pybind = \"${dummyfile}\";") add_library(pybind STATIC ${dummyfile}) else() add_library(pybind INTERFACE) endif() add_dependencies(pybind extern_pybind) - -LIST(APPEND external_project_dependencies pybind) diff --git a/cmake/external/swig.cmake b/cmake/external/swig.cmake index ce088ae7eaa3355f2f9761e8c421da0d7ef89fa7..9db457c7b2d61228e5d5af6827c4cda11a20a463 100644 --- a/cmake/external/swig.cmake +++ b/cmake/external/swig.cmake @@ -1,11 +1,11 @@ # Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. diff --git a/cmake/external/zlib.cmake b/cmake/external/zlib.cmake index e2c9fe56f335ae5b627b4d8d4bb17e4a2a466677..a98e069b7cd1654ddd5868560d0905eab6d9c692 100644 --- a/cmake/external/zlib.cmake +++ b/cmake/external/zlib.cmake @@ -1,11 +1,11 @@ # Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. diff --git a/cmake/simd.cmake b/cmake/simd.cmake index 46035a908b588861607a25d3a21cf34b7b6fd4b8..53c2de332ea74b06d1bd6e5bb119cad6af27ed01 100644 --- a/cmake/simd.cmake +++ b/cmake/simd.cmake @@ -1,27 +1,28 @@ # This file is use to check all support level of AVX on your machine # so that PaddlePaddle can unleash the vectorization power of muticore. -INCLUDE(CheckCXXSourceRuns) -INCLUDE(CheckCXXSourceCompiles) +include(CheckCXXSourceRuns) +include(CheckCXXSourceCompiles) -IF(CMAKE_COMPILER_IS_GNUCC OR CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID MATCHES "Clang") +if(CMAKE_COMPILER_IS_GNUCC OR CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID MATCHES "Clang") set(MMX_FLAG "-mmmx") set(SSE2_FLAG "-msse2") set(SSE3_FLAG "-msse3") - SET(AVX_FLAG "-mavx") - SET(AVX2_FLAG "-mavx2") -ELSEIF(MSVC) + set(AVX_FLAG "-mavx") + set(AVX2_FLAG "-mavx2") +elseif(MSVC) set(MMX_FLAG "/arch:MMX") set(SSE2_FLAG "/arch:SSE2") set(SSE3_FLAG "/arch:SSE3") SET(AVX_FLAG "/arch:AVX") SET(AVX2_FLAG "/arch:AVX2") -ENDIF() +endif() set(CMAKE_REQUIRED_FLAGS_RETAINED ${CMAKE_REQUIRED_FLAGS}) # Check MMX set(CMAKE_REQUIRED_FLAGS ${MMX_FLAG}) +set(MMX_FOUND_EXITCODE 1 CACHE STRING "Result from TRY_RUN" FORCE) CHECK_CXX_SOURCE_RUNS(" #include int main() @@ -32,6 +33,7 @@ int main() # Check SSE2 set(CMAKE_REQUIRED_FLAGS ${SSE2_FLAG}) +set(SSE2_FOUND_EXITCODE 1 CACHE STRING "Result from TRY_RUN" FORCE) CHECK_CXX_SOURCE_RUNS(" #include int main() @@ -42,6 +44,7 @@ int main() # Check SSE3 set(CMAKE_REQUIRED_FLAGS ${SSE3_FLAG}) +set(SSE3_FOUND_EXITCODE 1 CACHE STRING "Result from TRY_RUN" FORCE) CHECK_CXX_SOURCE_RUNS(" #include int main() @@ -55,6 +58,7 @@ int main() # Check AVX set(CMAKE_REQUIRED_FLAGS ${AVX_FLAG}) +set(AVX_FOUND_EXITCODE 1 CACHE STRING "Result from TRY_RUN" FORCE) CHECK_CXX_SOURCE_RUNS(" #include int main() @@ -67,6 +71,7 @@ int main() # Check AVX 2 set(CMAKE_REQUIRED_FLAGS ${AVX2_FLAG}) +set(AVX2_FOUND_EXITCODE 1 CACHE STRING "Result from TRY_RUN" FORCE) CHECK_CXX_SOURCE_RUNS(" #include int main() diff --git a/doc/faq/parameter/index_cn.rst b/doc/faq/parameter/index_cn.rst index c721b623183cc7d8d17e2c9fb1635ea07b8970cc..6fa0c64413be1616a435640b0347904a49873349 100644 --- a/doc/faq/parameter/index_cn.rst +++ b/doc/faq/parameter/index_cn.rst @@ -75,7 +75,7 @@ PaddlePaddle目前支持8种learning_rate_schedule,这8种learning_rate_schedu optimizer = paddle.optimizer.Adam( learning_rate=1e-3, - learning_rate_schedule="manual", + learning_rate_schedule="pass_manual", learning_rate_args="1:1.0,2:0.9,3:0.8",) 在该示例中,当已训练pass数小于等于1时,学习率为 :code:`1e-3 * 1.0`;当已训练pass数大于1小于等于2时,学习率为 :code:`1e-3 * 0.9`;当已训练pass数大于2时,学习率为 :code:`1e-3 * 0.8`。 diff --git a/doc/getstarted/build_and_install/docker_install_cn.rst b/doc/getstarted/build_and_install/docker_install_cn.rst index 30b144d849bec367cd0197b6082889e011193a9a..0d34dec8e908c5e61001500725187a2233797f46 100644 --- a/doc/getstarted/build_and_install/docker_install_cn.rst +++ b/doc/getstarted/build_and_install/docker_install_cn.rst @@ -145,7 +145,7 @@ PaddlePaddle发布新版本的时候都会发布对应版本的生产镜像以 Jupyter Notebook是一个开源的web程序,大家可以通过它制作和分享带有代码、公式、图表、文字的交互式文档。用户可以通过网页浏览文档。 -PaddlePaddle Book是为用户和开发者制作的一个交互式的Jupyter Nodebook。 +PaddlePaddle Book是为用户和开发者制作的一个交互式的Jupyter Notebook。 如果您想要更深入了解deep learning,PaddlePaddle Book一定是您最好的选择。 我们提供可以直接运行PaddlePaddle Book的Docker镜像,直接运行: diff --git a/doc/howto/usage/cmd_parameter/arguments_cn.md b/doc/howto/usage/cmd_parameter/arguments_cn.md index f7aa525054468670f59309ddf9206af55bb77869..2dea231ca5487978d59a4d0a570431722ed6b3bf 100644 --- a/doc/howto/usage/cmd_parameter/arguments_cn.md +++ b/doc/howto/usage/cmd_parameter/arguments_cn.md @@ -63,7 +63,7 @@ -训练dot_period +训练dot_period √√ diff --git a/doc/index_cn.rst b/doc/index_cn.rst index 9279bac7f4b2898c18979630a8d6dfcb2dba70e0..ada51c2d73263898b2c748437f8eb0f30b537073 100644 --- a/doc/index_cn.rst +++ b/doc/index_cn.rst @@ -8,3 +8,4 @@ PaddlePaddle 文档 howto/index_cn.rst api/index_cn.rst faq/index_cn.rst + mobile/index_cn.rst diff --git a/doc/index_en.rst b/doc/index_en.rst index 64684b8b9b27e245c6b32ea28809d3bbce22fab9..23b64b6cadf776d44c4d0aa5a550ffe24be13b18 100644 --- a/doc/index_en.rst +++ b/doc/index_en.rst @@ -7,3 +7,4 @@ PaddlePaddle Documentation getstarted/index_en.rst howto/index_en.rst api/index_en.rst + mobile/index_en.rst diff --git a/doc/howto/cross_compiling/cross_compiling_for_android_cn.md b/doc/mobile/cross_compiling_for_android_cn.md similarity index 95% rename from doc/howto/cross_compiling/cross_compiling_for_android_cn.md rename to doc/mobile/cross_compiling_for_android_cn.md index 58e4dd9c3fe43f963d00152aa4f456fadbb12bf3..882066f23714f7ab3bba9199b5fa5ff2325ce849 100644 --- a/doc/howto/cross_compiling/cross_compiling_for_android_cn.md +++ b/doc/mobile/cross_compiling_for_android_cn.md @@ -20,10 +20,32 @@ $ docker build -t username/paddle-android:dev . -f Dockerfile.android 构建好开发镜像后,即可使用开发镜像来编译Android版PaddlePaddle C-API库。 Android的Docker开发镜像向用户提供两个可配置的参数: -| Argument | Optional Values | Default | -|-----------------|-------------------------|---------| -|`ANDROID_ABI` |`armeabi-v7a, arm64-v8a` | `armeabi-v7a` | -|`ANDROID_API` |`>= 21` | `21` | + ++ + + + + + + + + + + + + + + + + + + + + + + +
ArgumentOptional ValuesDefault
ANDROID_ABIarmeabi-v7a, arm64-v8aarmeabi-v7a
ANDROID_API>= 2121
- 编译`armeabi-v7a`,`Android API 21`的PaddlePaddle库 ```bash diff --git a/doc/howto/cross_compiling/cross_compiling_for_android.md b/doc/mobile/cross_compiling_for_android_en.md similarity index 93% rename from doc/howto/cross_compiling/cross_compiling_for_android.md rename to doc/mobile/cross_compiling_for_android_en.md index 161863e5c0a2c002af7d7611dad53c2c19148722..26858581fc1d77a9391520ac0dfd80fbd98f508c 100644 --- a/doc/howto/cross_compiling/cross_compiling_for_android.md +++ b/doc/mobile/cross_compiling_for_android_en.md @@ -26,10 +26,32 @@ $ docker run -it --rm -v $PWD:/paddle -e "ANDROID_ABI=armeabi-v7a" -e "ANDROID_A The Docker image accepts two arguments `ANDROID_ABI` and `ANDROID_API`: -| Argument | Optional Values | Default | -|-----------------|-------------------------|---------| -|`ANDROID_ABI` |`armeabi-v7a, arm64-v8a` | `armeabi-v7a` | -|`ANDROID_API` |`>= 21` | `21` | + ++ + + + + + + + + + + + + + + + + + + + + + + +
ArgumentOptional ValuesDefault
ANDROID_ABIarmeabi-v7a, arm64-v8aarmeabi-v7a
ANDROID_API>= 2121
The ARM-64 architecture (`arm64-v8a`) requires at least level 21 of Android API. diff --git a/doc/howto/cross_compiling/cross_compiling_for_ios_cn.md b/doc/mobile/cross_compiling_for_ios_cn.md similarity index 91% rename from doc/howto/cross_compiling/cross_compiling_for_ios_cn.md rename to doc/mobile/cross_compiling_for_ios_cn.md index 32c490d9aa4202e17aa1784a45a317c5307b98ea..cda636a67de712e072f4cc7ad859dda75211eaa8 100644 --- a/doc/howto/cross_compiling/cross_compiling_for_ios_cn.md +++ b/doc/mobile/cross_compiling_for_ios_cn.md @@ -27,10 +27,28 @@ iOS平台可选配置参数: - `SIMULATOR`,构建目标为`x86`架构的模拟器平台。 - `IOS_ARCH`,目标架构。针对不同的`IOS_PLATFORM`,可设置的目标架构如下表所示: - | IOS_PLATFORM | IOS_ARCH | - |--------------|----------------------| - | OS | armv7, armv7s, arm64 (默认) | - | SIMULATOR | i386, x86_64 (默认) | + + + + + + + + + + + + + + + + + + + + + +
IOS_PLATFORMIOS_ARCH
OSarmv7, armv7s, arm64 (默认)
SIMULATORi386, x86_64 (默认)
- `IOS_DEPLOYMENT_TARGET`,最小的iOS部署版本,默认值为`7.0`。 - `IOS_ENABLE_BITCODE`,是否使能[Bitcode](https://developer.apple.com/library/content/documentation/IDEs/Conceptual/AppDistributionGuide/AppThinning/AppThinning.html#//apple_ref/doc/uid/TP40012582-CH35-SW3),可设置`ON/OFF`,默认值为`ON`。 diff --git a/doc/howto/cross_compiling/cross_compiling_for_raspberry_cn.md b/doc/mobile/cross_compiling_for_raspberry_cn.md similarity index 100% rename from doc/howto/cross_compiling/cross_compiling_for_raspberry_cn.md rename to doc/mobile/cross_compiling_for_raspberry_cn.md diff --git a/doc/howto/cross_compiling/cross_compiling_for_raspberry_en.md b/doc/mobile/cross_compiling_for_raspberry_en.md similarity index 100% rename from doc/howto/cross_compiling/cross_compiling_for_raspberry_en.md rename to doc/mobile/cross_compiling_for_raspberry_en.md diff --git a/doc/mobile/index_cn.rst b/doc/mobile/index_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..1d99666e58b7043b85b0203ee0dfcd1957710161 --- /dev/null +++ b/doc/mobile/index_cn.rst @@ -0,0 +1,9 @@ +MOBILE +====== + +.. toctree:: + :maxdepth: 1 + + cross_compiling_for_android_cn.md + cross_compiling_for_ios_cn.md + cross_compiling_for_raspberry_cn.md diff --git a/doc/mobile/index_en.rst b/doc/mobile/index_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..3c08d736717cfe8d5fdf449dc58015086befbe60 --- /dev/null +++ b/doc/mobile/index_en.rst @@ -0,0 +1,8 @@ +MOBILE +====== + +.. toctree:: + :maxdepth: 1 + + cross_compiling_for_android_en.md + cross_compiling_for_raspberry_en.md diff --git a/paddle/capi/CMakeLists.txt b/paddle/capi/CMakeLists.txt index e767856d5012fd205f6b57f9721d0cbca8dc46ed..d267b14657be2a773d1dacfd9ac3767cddc47415 100644 --- a/paddle/capi/CMakeLists.txt +++ b/paddle/capi/CMakeLists.txt @@ -29,32 +29,32 @@ add_style_check_target(paddle_capi ${CAPI_SOURCES} ${CAPI_HEADER} add_dependencies(paddle_capi paddle_proto) # TODO: paddle_capi_whole will be removed. +set(PADDLE_CAPI_LAYERS_LIBS + paddle_function + paddle_gserver) if(MOBILE_INFERENCE) - set(PADDLE_CAPI_INFER_LIBS - paddle_utils - paddle_parameter - paddle_math - paddle_cuda - paddle_function - paddle_gserver - paddle_proto) + set(PADDLE_CAPI_ENGINE_LIBS + paddle_utils + paddle_parameter + paddle_math + paddle_cuda + paddle_proto) else() - set(PADDLE_CAPI_INFER_LIBS - paddle_utils - paddle_parameter - paddle_math - paddle_cuda - paddle_function - paddle_gserver - paddle_proto - paddle_pserver - paddle_network) + set(PADDLE_CAPI_ENGINE_LIBS + paddle_utils + paddle_parameter + paddle_math + paddle_cuda + paddle_proto + paddle_pserver + paddle_network) endif() +set(PADDLE_CAPI_INFER_LIBS ${PADDLE_CAPI_LAYERS_LIBS} ${PADDLE_CAPI_ENGINE_LIBS}) cc_library(paddle_capi_whole DEPS paddle_capi ${PADDLE_CAPI_INFER_LIBS}) # Link the static library for inference -cc_library(paddle_capi_engine DEPS paddle_capi paddle_utils paddle_parameter paddle_math paddle_cuda paddle_proto) -cc_library(paddle_capi_layers DEPS paddle_function paddle_gserver) +cc_library(paddle_capi_engine DEPS paddle_capi ${PADDLE_CAPI_ENGINE_LIBS}) +cc_library(paddle_capi_layers DEPS ${PADDLE_CAPI_LAYERS_LIBS}) # Link the shared library for inference if(NOT IOS) diff --git a/paddle/framework/CMakeLists.txt b/paddle/framework/CMakeLists.txt index f4fef055daf39e9be0645deaafdad4132fc7e35f..1afc5242081e7f7b12527a15d29421cebeb3d3b8 100644 --- a/paddle/framework/CMakeLists.txt +++ b/paddle/framework/CMakeLists.txt @@ -20,7 +20,8 @@ cc_test(scope_test SRCS scope_test.cc DEPS scope) cc_library(attribute SRCS attribute.cc DEPS framework_proto) -cc_test(program_desc_test SRCS program_desc_test.cc DEPS proto_desc) +cc_test(program_desc_test SRCS program_desc_test.cc DEPS proto_desc +device_context) cc_library(op_proto_maker SRCS op_proto_maker.cc DEPS framework_proto attribute) cc_test(op_proto_maker_test SRCS op_proto_maker_test.cc DEPS op_proto_maker) cc_library(op_info SRCS op_info.cc DEPS attribute framework_proto) @@ -44,8 +45,9 @@ add_custom_command(TARGET framework_py_proto POST_BUILD cc_library(backward SRCS backward.cc DEPS net_op) cc_test(backward_test SRCS backward_test.cc DEPS backward recurrent_op device_context fill_constant_op) +cc_library(lod_rank_table SRCS lod_rank_table.cc DEPS lod_tensor) -cc_library(executor SRCS executor.cc DEPS op_registry device_context scope framework_proto backward glog) +cc_library(executor SRCS executor.cc DEPS op_registry device_context scope framework_proto backward glog lod_rank_table) cc_library(prune SRCS prune.cc DEPS framework_proto) cc_test(prune_test SRCS prune_test.cc DEPS op_info prune recurrent_op device_context) diff --git a/paddle/framework/executor.cc b/paddle/framework/executor.cc index f8d32de5df222e0ba9f143c3f7a3d34d07fed6b4..2fcf41d69f0011b0d9a3d89c97fcebacb0703e97 100644 --- a/paddle/framework/executor.cc +++ b/paddle/framework/executor.cc @@ -21,7 +21,9 @@ limitations under the License. */ #include #include "paddle/framework/feed_fetch_type.h" +#include "paddle/framework/lod_rank_table.h" #include "paddle/framework/lod_tensor.h" +#include "paddle/framework/lod_tensor_array.h" #include "paddle/framework/op_registry.h" #include "paddle/framework/scope.h" @@ -70,10 +72,14 @@ static void CreateTensor(Variable* var, VarDesc::VarType var_type) { var->GetMutable(); } else if (var_type == VarDesc::STEP_SCOPES) { var->GetMutable>(); + } else if (var_type == VarDesc::LOD_RANK_TABLE) { + var->GetMutable(); + } else if (var_type == VarDesc::LOD_TENSOR_ARRAY) { + var->GetMutable(); } else { PADDLE_THROW( "Variable type %d is not in " - "[LoDTensor, SelectedRows, FEED_MINIBATCH, FETCH_LIST]", + "[LoDTensor, SelectedRows, FEED_MINIBATCH, FETCH_LIST, LOD_RANK_TABLE]", var_type); } } @@ -83,7 +89,7 @@ void Executor::Run(const ProgramDescBind& pdesc, Scope* scope, int block_id, // TODO(tonyyang-svail): // - only runs on the first device (i.e. no interdevice communication) // - will change to use multiple blocks for RNN op and Cond Op - PADDLE_ENFORCE_LT(block_id, pdesc.Size()); + PADDLE_ENFORCE_LT(static_cast(block_id), pdesc.Size()); auto& block = pdesc.Block(block_id); auto& device = device_contexts_[0]; diff --git a/paddle/framework/framework.proto b/paddle/framework/framework.proto index 8f2df3dc0e29f96b3aea58b6761d1ccb4cd7c624..f1fc4529e15502927560eefd74110f6ca7eab4a9 100644 --- a/paddle/framework/framework.proto +++ b/paddle/framework/framework.proto @@ -109,6 +109,11 @@ message LoDTensorDesc { optional int32 lod_level = 2 [ default = 0 ]; } +message LoDTensorArrayDesc { + required TensorDesc tensor = 1; + optional int32 lod_level = 2 [ default = 0 ]; +} + message VarDesc { enum VarType { LOD_TENSOR = 1; @@ -116,11 +121,14 @@ message VarDesc { FEED_MINIBATCH = 3; FETCH_LIST = 4; STEP_SCOPES = 5; + LOD_RANK_TABLE = 6; + LOD_TENSOR_ARRAY = 7; } required string name = 1; required VarType type = 2; optional LoDTensorDesc lod_tensor = 3; optional TensorDesc selected_rows = 4; + optional LoDTensorArrayDesc tensor_array = 6; optional bool persistable = 5 [ default = false ]; } diff --git a/paddle/framework/lod_rank_table.cc b/paddle/framework/lod_rank_table.cc new file mode 100644 index 0000000000000000000000000000000000000000..68a83def7e5a12fe3261be9e27cbb9bb54e1f8ad --- /dev/null +++ b/paddle/framework/lod_rank_table.cc @@ -0,0 +1,48 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#include "paddle/framework/lod_rank_table.h" + +namespace paddle { +namespace framework { +void LoDRankTable::Reset(const LoD& lod, size_t level) { + this->coarse_lod_.clear(); + this->items_.clear(); + PADDLE_ENFORCE(level < lod.size(), + "Cannot rank lod since the level %d is less than lod size %d", + level, lod.size()); + coarse_lod_.reserve(level); + for (size_t i = 0; i < level; ++i) { + coarse_lod_.push_back(lod[i]); + } + auto& vec = lod[level]; + for (size_t i = 0; i < vec.size() - 1; ++i) { + TableItem item; + item.index = i; + item.length = vec[i + 1] - vec[i]; + items_.emplace_back(item); + } + // NOTE(yuyang18): + // + // The time complexity of stable_sort is O(N*log(N)) if additional memory is + // available. It is easy to debug and unit test when using `stable_sort` + // instead of `sort`. Also, the items of a rank table will not be too large. + std::stable_sort(items_.begin(), items_.end(), + [](const TableItem& a, const TableItem& b) { + return a.length > b.length; + }); +} + +} // namespace framework +} // namespace paddle diff --git a/paddle/framework/lod_rank_table.h b/paddle/framework/lod_rank_table.h new file mode 100644 index 0000000000000000000000000000000000000000..9faa3a4d7bdc55ab7b24e31f5e5434dacc0a4b36 --- /dev/null +++ b/paddle/framework/lod_rank_table.h @@ -0,0 +1,55 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#pragma once +#include "paddle/framework/lod_tensor.h" + +namespace paddle { +namespace framework { + +// LoD Rank Table stores the `level` of `lod` which is ordered by sequence +// length in descending order. It is useful when implement dynamic RNN and is +// shared by dynamic RNN memory, dynamic RNN slice input and dynamic RNN slice +// output operators. +// +// The table item contains two element. The length of sequence and the index of +// sequence in that level. +// +// LoDRankTable also stores the coarse_lod, which is the lod information whose +// level is less than input level, in order to restore the output LoD +// information. +class LoDRankTable { + public: + struct TableItem { + size_t index; + size_t length; + }; + + LoDRankTable() {} + + void Reset(const LoD& lod, size_t level); + + const std::vector& items() const { return this->items_; } + + const LoD& coarse_lod() const { return this->coarse_lod_; } + + size_t level() const { return coarse_lod_.size(); } + + private: + LoD coarse_lod_; + std::vector items_; +}; + +} // namespace framework +} // namespace paddle diff --git a/paddle/framework/lod_tensor.cc b/paddle/framework/lod_tensor.cc index 584308a5388da0d02d29f71a28097b02b6ea825f..2bcfffb134f46416301b28043e1875e822fbc3e4 100644 --- a/paddle/framework/lod_tensor.cc +++ b/paddle/framework/lod_tensor.cc @@ -135,5 +135,43 @@ void LoDTensor::ShrinkInLevel(size_t level, size_t elem_begin, PADDLE_ENFORCE_LT(begin, end, "Cannot shrink, the result tensor is empty."); ShareDataWith(Slice(begin, end)); } + +void GetFineGrainedLoDLength(const LoD& lod, size_t start_idx, size_t end_idx, + std::vector>* lod_length, + size_t* start_offset) { + lod_length->clear(); + PADDLE_ENFORCE(start_idx < lod.size() - 1, + "start_idx should be >= 0 and < lod.size() - 1."); + PADDLE_ENFORCE(end_idx < lod.size(), + "end_idx should be >= 0 and < lod.size()."); + PADDLE_ENFORCE_LE(start_idx, end_idx, + "start_idx should be less than end_idx."); + for (size_t level_idx = 0; level_idx < lod.size(); ++level_idx) { + std::vector level_lens; + for (size_t i = start_idx; i < end_idx; ++i) { + level_lens.push_back(lod[level_idx][i + 1] - lod[level_idx][i]); + } + lod_length->emplace_back(level_lens); + start_idx = lod[level_idx][start_idx]; + end_idx = lod[level_idx][end_idx]; + } + *start_offset = start_idx; +} + +void AppendLoD(LoD* lod, const std::vector>& lod_length) { + PADDLE_ENFORCE_EQ( + lod->size(), lod_length.size(), + "The lod_length should has the same size with the appended lod."); + for (size_t i = 0; i < lod->size(); ++i) { + auto& level = (*lod)[i]; + if (level.empty()) { + level.push_back(0); + } + for (size_t len : lod_length[i]) { + level.push_back(level.back() + len); + } + } +} + } // namespace framework } // namespace paddle diff --git a/paddle/framework/lod_tensor.h b/paddle/framework/lod_tensor.h index f4fe4cdac6019a1899fd3db8e1b6ca588be0d436..1437da399a28288429527f9672ace0482825159f 100644 --- a/paddle/framework/lod_tensor.h +++ b/paddle/framework/lod_tensor.h @@ -181,5 +181,11 @@ LoDTensor LodExpand(const LoDTensor& source, const LoD& lod, size_t level, return tensor; } +void GetFineGrainedLoDLength(const LoD& lod, size_t start_idx, size_t end_idx, + std::vector>* lod_length, + size_t* start_offset); + +void AppendLoD(LoD* lod, const std::vector>& lod_length); + } // namespace framework } // namespace paddle diff --git a/paddle/framework/lod_tensor.md b/paddle/framework/lod_tensor.md index d147f1c4257eec14664301edab8d1fe2f128d2b0..10a8a7867fbf072f585fe3bfb1243e4e6bef4ec8 100644 --- a/paddle/framework/lod_tensor.md +++ b/paddle/framework/lod_tensor.md @@ -140,19 +140,9 @@ Similarly, the lengths in the top level LoD are transformed into offsets of elements/words as follows: ``` -0 9 10 15 - = = = - 3+2+4 1+9 2+3+10 -``` - -so we can tell that the first article is from word 0 to word 9, and the second article is from word 9 to word 10. - -The complete offset representation is as follows: - -``` -0 9 10 15 -0 3 5 9 10 12 15 - ||| || |||| | || ||| +0 3 4 6 + = = = + 3 3+1 4+2 ``` ## Slicing of LoD Tensors diff --git a/paddle/framework/lod_tensor_array.h b/paddle/framework/lod_tensor_array.h new file mode 100644 index 0000000000000000000000000000000000000000..13f0608d24be97d8bba149b74f1a4deb57deeb48 --- /dev/null +++ b/paddle/framework/lod_tensor_array.h @@ -0,0 +1,23 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#pragma once +#include +#include "paddle/framework/lod_tensor.h" + +namespace paddle { +namespace framework { +using LoDTensorArray = std::vector; +} +} // namespace paddle diff --git a/paddle/framework/lod_tensor_test.cc b/paddle/framework/lod_tensor_test.cc index aa2f6c993d41ae98e0769d470dccad3b410da53e..bf61c9ee7aa99b06e78b9b27dff72d216c74a86e 100644 --- a/paddle/framework/lod_tensor_test.cc +++ b/paddle/framework/lod_tensor_test.cc @@ -144,5 +144,47 @@ TEST(LodExpand, test) { } } +TEST(LoD, GetFineGrainedLoDLength) { + LoD lod; + lod.push_back(std::vector{0, 2, 4, 5}); + lod.push_back(std::vector{0, 1, 6, 8, 10, 11}); + lod.push_back( + std::vector{0, 2, 5, 7, 10, 12, 15, 17, 20, 24, 26, 29}); + + std::vector> lod_length; + size_t start_offset; + paddle::framework::GetFineGrainedLoDLength(lod, 1, 2, &lod_length, + &start_offset); + + std::vector> expected; + expected.push_back(std::vector{2}); + expected.push_back(std::vector{2, 2}); + expected.push_back(std::vector{2, 3, 4, 2}); + EXPECT_EQ(lod_length, expected); + EXPECT_EQ(start_offset, 15UL); +} + +TEST(LoD, AppendLoD) { + std::vector> lod_lens; + lod_lens.push_back(std::vector{2}); + lod_lens.push_back(std::vector{2, 2}); + lod_lens.push_back(std::vector{2, 3, 4, 2}); + + LoD origin; + origin.push_back(std::vector{0, 2}); + origin.push_back(std::vector{0, 1, 6}); + origin.push_back(std::vector{0, 2, 5, 7, 10, 12, 15}); + + paddle::framework::AppendLoD(&origin, lod_lens); + + LoD expected; + expected.push_back(std::vector{0, 2, 4}); + expected.push_back(std::vector{0, 1, 6, 8, 10}); + expected.push_back( + std::vector{0, 2, 5, 7, 10, 12, 15, 17, 20, 24, 26}); + + EXPECT_EQ(origin, expected); +} + } // namespace framework } // namespace paddle diff --git a/paddle/framework/op_desc.cc b/paddle/framework/op_desc.cc index c96166f35d1425218a4a74f50dc5ed542d677b68..e7cba9e702ce0f96a9680169f0593130df2fd096 100644 --- a/paddle/framework/op_desc.cc +++ b/paddle/framework/op_desc.cc @@ -67,8 +67,11 @@ class CompileTimeInferShapeContext : public InferShapeContext { out); in_var->SetLoDLevel(out_var->GetLodLevel()); } + bool IsRuntime() const override; + + protected: + VarDesc::VarType GetVarType(const std::string &name) const override; - private: DDim GetDim(const std::string &name) const override; void SetDim(const std::string &name, const DDim &dim) override; @@ -349,6 +352,9 @@ void OpDescBind::InferVarType(BlockDescBind *block) const { info.infer_var_type_(*this, block); } else { // all output type is LoDTensor by default + VLOG(10) << this->Type() + << " has not registered InferVarType. Set output variables to " + "LOD_TENSOR"; for (auto &out_pair : this->outputs_) { for (auto &out_var_name : out_pair.second) { block->Var(out_var_name)->SetType(VarDesc::LOD_TENSOR); @@ -448,6 +454,12 @@ void CompileTimeInferShapeContext::SetDim(const std::string &name, const DDim &dim) { block_.FindVarRecursive(name)->SetShape(framework::vectorize(dim)); } +bool CompileTimeInferShapeContext::IsRuntime() const { return false; } + +VarDesc::VarType CompileTimeInferShapeContext::GetVarType( + const std::string &name) const { + return block_.FindVarRecursive(name)->GetType(); +} } // namespace framework } // namespace paddle diff --git a/paddle/framework/operator.cc b/paddle/framework/operator.cc index 9295d36c2b2e66130ad273ebd3a40de739efeea7..22a7d9728a05950d66a1acd23d6fb18263f4ff6b 100644 --- a/paddle/framework/operator.cc +++ b/paddle/framework/operator.cc @@ -15,7 +15,9 @@ limitations under the License. */ #include "paddle/framework/operator.h" #include #include +#include "paddle/framework/lod_tensor_array.h" #include "paddle/framework/shape_inference.h" +#include "paddle/framework/var_type.h" namespace paddle { namespace framework { @@ -365,7 +367,9 @@ class RuntimeInferShapeContext : public InferShapeContext { out_tensor->set_lod(in_tensor.lod()); } - private: + bool IsRuntime() const override { return true; } + + protected: DDim GetDim(const std::string& name) const override { Variable* var = scope_.FindVar(name); if (var->IsType()) { @@ -388,6 +392,12 @@ class RuntimeInferShapeContext : public InferShapeContext { } } + VarDesc::VarType GetVarType(const std::string& name) const override { + auto* var = scope_.FindVar(name); + return ToVarType(var->Type()); + } + + private: const OperatorBase& op_; const Scope& scope_; }; diff --git a/paddle/framework/operator.h b/paddle/framework/operator.h index b8a7040ed024fc7b19980beef3d8b367dfdd7f50..a1303a90980b40ff03bce1ab1a6f67bbbf952bcf 100644 --- a/paddle/framework/operator.h +++ b/paddle/framework/operator.h @@ -298,11 +298,10 @@ class ExecutionContext { } #ifdef PADDLE_WITH_CUDA - const platform::CUDADeviceContext& cuda_device_context() const { + const inline platform::CUDADeviceContext& cuda_device_context() const { PADDLE_ENFORCE(platform::is_gpu_place(device_context_.GetPlace())); - auto cuda_ctx = - reinterpret_cast(&device_context_); - return *cuda_ctx; + return *reinterpret_cast( + &device_context_); } #endif @@ -408,7 +407,6 @@ class OperatorWithKernel : public OperatorBase { // indicate kernel DataType by input data. Defaultly all input data must be // same. virtual DataType IndicateDataType(const ExecutionContext& ctx) const { - VLOG(3) << "Default IndicateDataType " << this->Type(); auto& scope = ctx.scope(); int data_type = -1; for (auto& input : this->inputs_) { @@ -425,7 +423,6 @@ class OperatorWithKernel : public OperatorBase { } if (t != nullptr) { int tmp = static_cast(ToDataType(t->type())); - VLOG(3) << "Input " << ipt_name << " with data_type " << tmp; PADDLE_ENFORCE(tmp == data_type || data_type == -1, "DataType of Paddle Op %s must be the same.", Type()); diff --git a/paddle/framework/shape_inference.cc b/paddle/framework/shape_inference.cc index 8169df8e4629e2d02d3dabcd6a8a102ad0077a81..0af41b164f5894db17b2f86d4eba371cf05e3b41 100644 --- a/paddle/framework/shape_inference.cc +++ b/paddle/framework/shape_inference.cc @@ -46,6 +46,23 @@ void InferShapeContext::SetDims(const std::vector &names, SetDim(names[i], dims[i]); } } +std::vector InferShapeContext::GetInputsVarType( + const std::string &name) const { + return GetVarTypes(Inputs(name)); +} +std::vector InferShapeContext::GetOutputsVarType( + const std::string &name) const { + return GetVarTypes(Outputs(name)); +} +std::vector InferShapeContext::GetVarTypes( + const std::vector &names) const { + std::vector retv; + retv.resize(names.size()); + std::transform(names.begin(), names.end(), retv.begin(), + std::bind(std::mem_fn(&InferShapeContext::GetVarType), this, + std::placeholders::_1)); + return retv; +} } // namespace framework } // namespace paddle diff --git a/paddle/framework/shape_inference.h b/paddle/framework/shape_inference.h index 6f19900ef1a3e88fe78d457a03c344ea586ab551..7d36ead2ca85328c7843b3b5d423cf8e921d1c93 100644 --- a/paddle/framework/shape_inference.h +++ b/paddle/framework/shape_inference.h @@ -16,6 +16,7 @@ limitations under the License. */ #include "paddle/framework/attribute.h" #include "paddle/framework/ddim.h" +#include "paddle/framework/framework.pb.h" namespace paddle { namespace framework { @@ -26,6 +27,10 @@ class InferShapeContext { virtual bool HasInput(const std::string &name) const = 0; virtual bool HasOutput(const std::string &name) const = 0; + std::vector GetInputsVarType(const std::string &name) const; + std::vector GetOutputsVarType( + const std::string &name) const; + virtual bool HasInputs(const std::string &name) const = 0; virtual bool HasOutputs(const std::string &name) const = 0; @@ -46,6 +51,8 @@ class InferShapeContext { virtual void ShareLoD(const std::string &in, const std::string &out, size_t i = 0, size_t j = 0) const = 0; + virtual bool IsRuntime() const = 0; + protected: virtual framework::DDim GetDim(const std::string &name) const = 0; virtual void SetDim(const std::string &name, const framework::DDim &dim) = 0; @@ -55,6 +62,11 @@ class InferShapeContext { void SetDims(const std::vector &names, const std::vector &dims); + + std::vector GetVarTypes( + const std::vector &names) const; + + virtual VarDesc::VarType GetVarType(const std::string &name) const = 0; }; } // namespace framework diff --git a/paddle/framework/var_desc.cc b/paddle/framework/var_desc.cc index 8e92c81d1137472737230be79d71824593d3256f..16aca192d41a64003de85ce45f7697bf45c556ed 100644 --- a/paddle/framework/var_desc.cc +++ b/paddle/framework/var_desc.cc @@ -37,13 +37,27 @@ std::vector VarDescBind::Shape() const { DataType VarDescBind::GetDataType() const { return tensor_desc().data_type(); } void VarDescBind::SetLoDLevel(int32_t lod_level) { - PADDLE_ENFORCE(desc_.type() == VarDesc::LOD_TENSOR); - desc_.mutable_lod_tensor()->set_lod_level(lod_level); + switch (desc_.type()) { + case VarDesc::LOD_TENSOR: + desc_.mutable_lod_tensor()->set_lod_level(lod_level); + break; + case VarDesc::LOD_TENSOR_ARRAY: + desc_.mutable_tensor_array()->set_lod_level(lod_level); + break; + default: + PADDLE_THROW("Tensor type=%d does not support LoDLevel", desc_.type()); + } } int32_t VarDescBind::GetLodLevel() const { - PADDLE_ENFORCE(desc_.type() == VarDesc::LOD_TENSOR); - return desc_.lod_tensor().lod_level(); + switch (desc_.type()) { + case VarDesc::LOD_TENSOR: + return desc_.lod_tensor().lod_level(); + case VarDesc::LOD_TENSOR_ARRAY: + return desc_.tensor_array().lod_level(); + default: + PADDLE_THROW("Tensor type=%d does not support LoDLevel", desc_.type()); + } } const TensorDesc &VarDescBind::tensor_desc() const { @@ -53,6 +67,8 @@ const TensorDesc &VarDescBind::tensor_desc() const { return desc_.selected_rows(); case VarDesc::LOD_TENSOR: return desc_.lod_tensor().tensor(); + case VarDesc::LOD_TENSOR_ARRAY: + return desc_.tensor_array().tensor(); default: PADDLE_THROW("Unexpected branch."); } @@ -66,6 +82,8 @@ TensorDesc *VarDescBind::mutable_tensor_desc() { return desc_.mutable_selected_rows(); case VarDesc::LOD_TENSOR: return desc_.mutable_lod_tensor()->mutable_tensor(); + case VarDesc::LOD_TENSOR_ARRAY: + return desc_.mutable_tensor_array()->mutable_tensor(); default: PADDLE_THROW("Unexpected branch."); } diff --git a/paddle/framework/var_desc.h b/paddle/framework/var_desc.h index 70daa20e8d99abc5759655adf538a8c197e9ec6a..5cf4608944c5011d798fbde060002a57be8f6102 100644 --- a/paddle/framework/var_desc.h +++ b/paddle/framework/var_desc.h @@ -15,6 +15,7 @@ limitations under the License. */ #pragma once #include +#include "glog/logging.h" #include "paddle/framework/framework.pb.h" namespace paddle { diff --git a/paddle/framework/var_type.h b/paddle/framework/var_type.h new file mode 100644 index 0000000000000000000000000000000000000000..d060196bb2c478b776851288cb71a1880d60660d --- /dev/null +++ b/paddle/framework/var_type.h @@ -0,0 +1,36 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#pragma once +#include "paddle/framework/framework.pb.h" +#include "paddle/framework/lod_rank_table.h" +#include "paddle/framework/lod_tensor.h" +#include "paddle/framework/lod_tensor_array.h" + +namespace paddle { +namespace framework { +inline VarDesc::VarType ToVarType(std::type_index type) { + if (type.hash_code() == typeid(LoDTensor).hash_code()) { + return VarDesc_VarType_LOD_TENSOR; + } else if (type.hash_code() == typeid(LoDRankTable).hash_code()) { + return VarDesc_VarType_LOD_RANK_TABLE; + } else if (type.hash_code() == typeid(LoDTensorArray).hash_code()) { + return VarDesc_VarType_LOD_TENSOR_ARRAY; + } else { + PADDLE_THROW("ToVarType:Unsupported type %s", type.name()); + } +} + +} // namespace framework +} // namespace paddle diff --git a/paddle/framework/variable.h b/paddle/framework/variable.h index cde5ec2413ad01a0396e19fa617688af0eafbc75..e5a94759f9230ab4ce9d2cc24849a2debb8a5e2f 100644 --- a/paddle/framework/variable.h +++ b/paddle/framework/variable.h @@ -48,6 +48,11 @@ class Variable { void Clear() { holder_.reset(); } + std::type_index Type() const { + PADDLE_ENFORCE(holder_ != nullptr, "Must hold memory"); + return holder_->Type(); + } + private: struct Placeholder { virtual ~Placeholder() {} diff --git a/paddle/gserver/evaluators/Evaluator.cpp b/paddle/gserver/evaluators/Evaluator.cpp index 87cb2d280866ac2be2d6f85e872e547e12548feb..8e66b1f0db5d8a365a5aa9b98d2fb3f867458411 100644 --- a/paddle/gserver/evaluators/Evaluator.cpp +++ b/paddle/gserver/evaluators/Evaluator.cpp @@ -407,7 +407,7 @@ real AucEvaluator::evalImp(std::vector& arguments) { // Copy label from value to a vector. if (nullptr == label && nullptr != labelval) { // label width is 1 - CHECK_EQ(1, labelval->getWidth()); + CHECK_EQ(1U, labelval->getWidth()); VectorPtr vec = Vector::create(labelval->getData(), insNum, output->useGpu()); label = vec->castToInt(); diff --git a/paddle/gserver/layers/ConvBaseProjection.cpp b/paddle/gserver/layers/ConvBaseProjection.cpp index 08f36c516cfdadd42e9333c1c5a7a247df1f263e..19efed7b52ee07a5c509d069c286ccc3b21602f4 100644 --- a/paddle/gserver/layers/ConvBaseProjection.cpp +++ b/paddle/gserver/layers/ConvBaseProjection.cpp @@ -17,7 +17,7 @@ limitations under the License. */ namespace paddle { -ThreadLocalD> ConvBaseProjection::convMem_; +ThreadLocalD> ConvBaseProjection::convMem_; ConvBaseProjection::ConvBaseProjection(const ProjectionConfig &config, ParameterPtr parameter, @@ -175,18 +175,18 @@ void ConvBaseProjection::reshape(int batchSize) { } void *ConvBaseProjection::getSpaceBytes(size_t size) { - std::vector &convMem = *convMem_; + std::vector &convMem = *convMem_; if (convMem.empty()) { int numDevices = hl_get_device_count(); convMem.resize(numDevices); } int devId = hl_get_device(); - MemoryHandle **localMem = &(convMem[devId]); - if (NULL == *localMem || size > (*localMem)->getAllocSize()) { - *localMem = new GpuMemoryHandle(size); + MemoryHandlePtr localMem = convMem[devId]; + if (NULL == localMem || size > localMem->getAllocSize()) { + localMem = std::make_shared(size); } - return (*localMem)->getBuf(); + return localMem->getBuf(); } ConvBaseProjection::~ConvBaseProjection() { diff --git a/paddle/gserver/layers/ConvBaseProjection.h b/paddle/gserver/layers/ConvBaseProjection.h index ebdb57845bb36ac607b1e4c8e02f9d20b6e82a36..bb7ffa627b745f45b0f210cdb58ef87d6990af73 100644 --- a/paddle/gserver/layers/ConvBaseProjection.h +++ b/paddle/gserver/layers/ConvBaseProjection.h @@ -105,7 +105,7 @@ protected: bool bias_; std::unique_ptr weight_; - static ThreadLocalD> convMem_; + static ThreadLocalD> convMem_; }; } // namespace paddle diff --git a/paddle/gserver/layers/MKLDNNAddtoLayer.cpp b/paddle/gserver/layers/MKLDNNAddtoLayer.cpp new file mode 100644 index 0000000000000000000000000000000000000000..8eb700723f2cf7dda969739bb5e3d48358d278a0 --- /dev/null +++ b/paddle/gserver/layers/MKLDNNAddtoLayer.cpp @@ -0,0 +1,154 @@ +/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "MKLDNNAddtoLayer.h" + +using namespace mkldnn; // NOLINT + +namespace paddle { + +REGISTER_LAYER(mkldnn_addto, MKLDNNAddtoLayer); + +bool MKLDNNAddtoLayer::init(const LayerMap& layerMap, + const ParameterMap& parameterMap) { + if (!MKLDNNLayer::init(layerMap, parameterMap)) { + return false; + } + + layerSize_ = getSize(); + for (size_t i = 0; i < inputLayers_.size(); i++) { + CHECK_EQ(layerSize_, inputLayers_[i]->getSize()) << "input size must equal"; + } + if (biasParameter_.get() != NULL) { + biases_ = + std::unique_ptr(new Weight(1, layerSize_, biasParameter_, 0)); + } + return true; +} + +void MKLDNNAddtoLayer::reshape( + int& bs, int& ic, int& ih, int& iw, int oc, int& oh, int& ow) { + CHECK_EQ(layerSize_, getSize()) << "this layer size can not be changed"; + reshapeInput(bs, ih, iw); + ic = inputLayers_[0]->getSize() / ih / iw; + CHECK_EQ((size_t)ic * ih * iw, inputLayers_[0]->getSize()); + CHECK_EQ(inputElemenCnt_, (size_t)bs * ic * ih * iw); + for (size_t i = 0; i < inputLayers_.size(); i++) { + CHECK_EQ(int64_t(bs), inputLayers_[i]->getOutput().getBatchSize()); + CHECK_EQ(layerSize_, inputLayers_[i]->getSize()); + } + + oc = ic; + oh = ih; + ow = iw; + reshapeOutput(oh, ow); + resizeOutput(bs, oc * oh * ow); + printSizeInfo(); +} + +void MKLDNNAddtoLayer::resetFwd(std::vector& pipeline, + MKLDNNMatrixPtr& in, + MKLDNNMatrixPtr& wgt, + MKLDNNMatrixPtr& bias, + MKLDNNMatrixPtr& out) { + if (biases_) { + LOG(FATAL) << "not implemented yet"; + } + resetFwdBuffers(inVals_, out); + in = inVals_[0]; + + std::shared_ptr fwdPD; + resetFwdPD(fwdPD, inVals_, out); + + resetFwdPipeline(pipeline, fwdPD, inVals_, out); +} + +void MKLDNNAddtoLayer::resetBwd(std::vector& pipeline, + MKLDNNMatrixPtr& in, + MKLDNNMatrixPtr& wgt, + MKLDNNMatrixPtr& bias, + MKLDNNMatrixPtr& out) { + resetBwdBuffers(inGrads_, out); + in = inGrads_[0]; + + // backward only need share output grad to input grad + for (size_t i = 0; i < inGrads_.size(); i++) { + if (inGrads_[i] != nullptr) { + inGrads_[i] = out; + inputLayers_[i]->getOutputGrad()->setData(inGrads_[i]->getData()); + } + } +} + +void MKLDNNAddtoLayer::updateWeights(const UpdateCallback& callback) { + if (biases_ && biases_->getWGrad()) { + biases_->getParameterPtr()->incUpdate(callback); + } +} + +void MKLDNNAddtoLayer::resetFwdBuffers(std::vector& inputs, + MKLDNNMatrixPtr& out) { + inputs.resize(inputLayers_.size()); + for (size_t i = 0; i < inputs.size(); i++) { + resetInValue(inputs[i], nullptr, i); + CHECK(inputs[i]); + inputs[i]->downSpatial(); + } + for (size_t i = 1; i < inputs.size(); i++) { + CHECK_PRIMITIVE_DESC_EQ(inputs[i], inputs[0]->getPrimitiveDesc()); + } + + resetOutValue(out, inputs[0]->getPrimitiveDesc()); +} + +void MKLDNNAddtoLayer::resetFwdPD(std::shared_ptr& pd, + std::vector& inputs, + MKLDNNMatrixPtr out) { + std::vector scales(inputs.size(), 1.0); + std::vector srcPDs; + for (size_t i = 0; i < inputs.size(); i++) { + srcPDs.push_back(inputs[i]->getPrimitiveDesc()); + } + CHECK(out); + pd.reset(new sum::primitive_desc(out->getMemoryDesc(), scales, srcPDs)); + CHECK_PRIMITIVE_DESC_EQ(out, pd->dst_primitive_desc()); +} + +void MKLDNNAddtoLayer::resetFwdPipeline( + std::vector& pipeline, + std::shared_ptr& pd, + std::vector& inputs, + MKLDNNMatrixPtr& out) { + std::vector srcs; + for (size_t i = 0; i < inputs.size(); i++) { + srcs.push_back(*(inputs[i])); + } + fwd_.reset(new sum(*pd, srcs, *out)); + pipeline.push_back(*fwd_); +} + +void MKLDNNAddtoLayer::resetBwdBuffers(std::vector& inputs, + MKLDNNMatrixPtr& out) { + CHECK(outVal_); + resetOutGrad(out, outVal_->getPrimitiveDesc()); + CHECK(out); + + inputs.resize(inputLayers_.size()); + for (size_t i = 0; i < inputs.size(); i++) { + resetInGrad(inputs[i], inVal_->getPrimitiveDesc(), i); + CHECK_PRIMITIVE_DESC_EQ(inputs[i], out->getPrimitiveDesc()); + } +} + +} // namespace paddle diff --git a/paddle/gserver/layers/MKLDNNAddtoLayer.h b/paddle/gserver/layers/MKLDNNAddtoLayer.h new file mode 100644 index 0000000000000000000000000000000000000000..15f74ec5bdf3d1e4ae5e09051be6be418590a67a --- /dev/null +++ b/paddle/gserver/layers/MKLDNNAddtoLayer.h @@ -0,0 +1,110 @@ +/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include "MKLDNNLayer.h" +#include "mkldnn.hpp" + +namespace paddle { + +/** + * @brief A subclass of MKLDNNLayer Addto layer. + * + * The config file api is mkldnn_addto + */ +class MKLDNNAddtoLayer : public MKLDNNLayer { +protected: + std::vector inVals_; + std::vector inGrads_; + + // layer size == ic * ih * iw == oc * oh *ow, and can not be changed + size_t layerSize_; + + // TODO(TJ): this part has not been optimized by MKL-DNN + std::unique_ptr biases_; + +public: + explicit MKLDNNAddtoLayer(const LayerConfig& config) : MKLDNNLayer(config) {} + + ~MKLDNNAddtoLayer() {} + + bool init(const LayerMap& layerMap, + const ParameterMap& parameterMap) override; + + void reshape( + int& bs, int& ic, int& ih, int& iw, int oc, int& oh, int& ow) override; + + void resetFwd(std::vector& pipeline, + MKLDNNMatrixPtr& in, + MKLDNNMatrixPtr& wgt, + MKLDNNMatrixPtr& bias, + MKLDNNMatrixPtr& out) override; + + void resetBwd(std::vector& pipeline, + MKLDNNMatrixPtr& in, + MKLDNNMatrixPtr& wgt, + MKLDNNMatrixPtr& bias, + MKLDNNMatrixPtr& out) override; + + void updateWeights(const UpdateCallback& callback) override; + + void printValueFormat() override { + for (size_t i = 0; i < inVals_.size(); ++i) { + VLOG(MKLDNN_FMTS) << i << " input: " << inVals_[i]->getFormat() << " >>>"; + } + if (outVal_) { + VLOG(MKLDNN_FMTS) << outVal_->getFormat() << " >>> "; + } + if (extOutVal_) { + VLOG(MKLDNN_FMTS) << extOutVal_->getFormat(); + } + } + + void printGradFormat() override { + if (extOutGrad_) { + VLOG(MKLDNN_FMTS) << extOutGrad_->getFormat(); + } + if (outGrad_) { + VLOG(MKLDNN_FMTS) << outGrad_->getFormat() << " <<< "; + } + for (size_t i = 0; i < inGrads_.size(); ++i) { + VLOG(MKLDNN_FMTS) << i << " input: " << inGrads_[i]->getFormat() << "<<<"; + } + } + +protected: + /** + * Forward functions: reset buffers(inputs, output, bias), + * reset primitive descriptor, + * reset pipeline. + */ + void resetFwdBuffers(std::vector& inputs, + MKLDNNMatrixPtr& out); + void resetFwdPD(std::shared_ptr& pd, + std::vector& inputs, + MKLDNNMatrixPtr out); + void resetFwdPipeline(std::vector& pipeline, + std::shared_ptr& pd, + std::vector& inputs, + MKLDNNMatrixPtr& out); + + /** + * Backward functions: reset buffers(inputs, output, bias) + */ + void resetBwdBuffers(std::vector& inputs, + MKLDNNMatrixPtr& out); +}; + +} // namespace paddle diff --git a/paddle/gserver/layers/MKLDNNLayer.cpp b/paddle/gserver/layers/MKLDNNLayer.cpp index 663a10509857ec9fb487c1cda1621bdfac1250ac..5fd62f4f73b18df683ccf74143e45054c3631c22 100644 --- a/paddle/gserver/layers/MKLDNNLayer.cpp +++ b/paddle/gserver/layers/MKLDNNLayer.cpp @@ -77,7 +77,7 @@ void MKLDNNLayer::forward(PassType passType) { needResetBwd_ = true; } - if (inputLayers_[0]->getType() == "data") { + if (inputLayers_[0]->getType() == "data" && inputLayers_.size() == 1) { // Update input value data when input layer is "data" type, // since the input value data address might be changed. CHECK(extInVal_); @@ -171,14 +171,16 @@ void MKLDNNLayer::resetWithMatrix(MKLDNNMatrixPtr& dnn, } void MKLDNNLayer::resetInValue( - MKLDNNMatrixPtr& in, const std::shared_ptr& intPD) { + MKLDNNMatrixPtr& in, + const std::shared_ptr& intPD, + size_t inputIdx) { cvtInVal_ = nullptr; extInVal_ = nullptr; in = nullptr; CHECK_GT(bs_ * ic_ * ih_ * iw_, 0); auto extPD = MKLDNNMatrix::createPrimitiveDesc( {bs_, ic_, ih_, iw_}, format::nchw, engine_); - const MatrixPtr& inMat = inputLayers_[0]->getOutputValue(); + const MatrixPtr& inMat = inputLayers_[inputIdx]->getOutputValue(); in = std::dynamic_pointer_cast(inMat); CHECK_EQ(inputIsOnlyMKLDNN(), in != nullptr); if (in == nullptr || in->getFormat() == format::nc) { @@ -216,11 +218,12 @@ void MKLDNNLayer::resetOutValue(MKLDNNMatrixPtr& out, } void MKLDNNLayer::resetInGrad(MKLDNNMatrixPtr& in, - memory::primitive_desc intPD) { + memory::primitive_desc intPD, + size_t inputIdx) { cvtInGrad_ = nullptr; extInGrad_ = nullptr; in = nullptr; - LayerPtr& input = inputLayers_[0]; + LayerPtr& input = inputLayers_[inputIdx]; if (input->getOutputGrad() == nullptr) { // no need input grad return; @@ -245,7 +248,6 @@ void MKLDNNLayer::resetInGrad(MKLDNNMatrixPtr& in, return; } // need create reorder - // TODO(TJ): add macro definition to simplify it CHECK(extInVal_ != nullptr && isPaddleFormat(extInVal_->getFormat())) << "should have external input value and the format must be nchw(nc)"; extInGrad_ = MKLDNNMatrix::create(extInVal_->getPrimitiveDesc(), inMat); diff --git a/paddle/gserver/layers/MKLDNNLayer.h b/paddle/gserver/layers/MKLDNNLayer.h index 2c21a5b2aaecb17a52a5de9a98664068f2255d83..7479c34c92b5231b2521493bc631474d4efd4224 100644 --- a/paddle/gserver/layers/MKLDNNLayer.h +++ b/paddle/gserver/layers/MKLDNNLayer.h @@ -199,7 +199,8 @@ protected: */ void resetInValue( MKLDNNMatrixPtr& in, - const std::shared_ptr& intPD = nullptr); + const std::shared_ptr& intPD = nullptr, + size_t inputIdx = 0); /** * reset output value from internal primitive desc. @@ -212,7 +213,9 @@ protected: * reset input grad from internal primitive desc. * reset both internal and external buffer and create reorder if necessary. */ - void resetInGrad(MKLDNNMatrixPtr& in, mkldnn::memory::primitive_desc intPD); + void resetInGrad(MKLDNNMatrixPtr& in, + mkldnn::memory::primitive_desc intPD, + size_t inputIdx = 0); /** * reset output grad from internal primitive desc. diff --git a/paddle/gserver/layers/SubSequenceLayer.cpp b/paddle/gserver/layers/SubSequenceLayer.cpp index 19b7ad1869af98e6313fe85a40203fd1e84f31d6..00d8ce017aa0121217688a1afc1fe31b4c3619ec 100644 --- a/paddle/gserver/layers/SubSequenceLayer.cpp +++ b/paddle/gserver/layers/SubSequenceLayer.cpp @@ -98,8 +98,19 @@ void SubSequenceLayer::forward(PassType passType) { CHECK_EQ(numSequences2, numSequences3); MatrixPtr inputValue = input.value; - IVectorPtr offsetValue = offsetSeq.ids; - IVectorPtr sizeValue = sizeSeq.ids; + IVectorPtr offsetValue; + IVectorPtr sizeValue; + + if (useGpu_) { + // copy to cpu + IVector::resizeOrCreate(offsetValue, offsetSeq.ids->getSize(), false); + IVector::resizeOrCreate(sizeValue, sizeSeq.ids->getSize(), false); + offsetValue->copyFrom(*offsetSeq.ids); + sizeValue->copyFrom(*sizeSeq.ids); + } else { + offsetValue = offsetSeq.ids; + sizeValue = sizeSeq.ids; + } CHECK_EQ(offsetValue->getSize(), numSequences1); CHECK_EQ(sizeValue->getSize(), numSequences1); @@ -176,8 +187,21 @@ void SubSequenceLayer::backward(const UpdateCallback& callback) { size_t numSequences1 = startPositions1->getSize() - 1; const int* starts1 = startPositions1->getData(); - IVectorPtr offsetValue = getInput(1).ids; - IVectorPtr sizeValue = getInput(2).ids; + const Argument& offsetSeq = getInput(1); + const Argument& sizeSeq = getInput(2); + IVectorPtr offsetValue; + IVectorPtr sizeValue; + + if (useGpu_) { + // copy to cpu + IVector::resizeOrCreate(offsetValue, offsetSeq.ids->getSize(), false); + IVector::resizeOrCreate(sizeValue, sizeSeq.ids->getSize(), false); + offsetValue->copyFrom(*offsetSeq.ids); + sizeValue->copyFrom(*sizeSeq.ids); + } else { + offsetValue = offsetSeq.ids; + sizeValue = sizeSeq.ids; + } int* offsets = offsetValue->getData(); int* sizes = sizeValue->getData(); diff --git a/paddle/gserver/tests/MKLDNNTester.cpp b/paddle/gserver/tests/MKLDNNTester.cpp index 7670cb88fb67dec0ab1d170458d102da166dc7b6..afe1608eab8eaf1217a7a0c8a2774e37c5ea83f4 100644 --- a/paddle/gserver/tests/MKLDNNTester.cpp +++ b/paddle/gserver/tests/MKLDNNTester.cpp @@ -132,7 +132,7 @@ void MKLDNNTester::checkForward() { VLOG(MKLDNN_TESTS) << "Check Forward"; printTopDatas(); double delta = - compareMatrix(dnnLayer_->getOutputValue(), refLayer_->getOutputValue()); + compareMatrix(refLayer_->getOutputValue(), dnnLayer_->getOutputValue()); EXPECT_LE(fabs(delta), eps_); } @@ -147,7 +147,7 @@ void MKLDNNTester::checkBackwardData() { VLOG(MKLDNN_ALL) << "Reference Backward Result: InputGrad " << i; printMatrix(refDiff); - double delta = compareMatrix(dnnDiff, refDiff); + double delta = compareMatrix(refDiff, dnnDiff); EXPECT_LE(fabs(delta), eps_); if (isBN) { // the other two inputs in batch norm are for moving mean and var @@ -177,7 +177,7 @@ void MKLDNNTester::checkBackwardWgts() { << parameters_[REF][i]->getName(); printVector(ref); - double delta = compareVector(dnn, ref); + double delta = compareVector(ref, dnn); EXPECT_LE(fabs(delta), eps_); } diff --git a/paddle/gserver/tests/test_MKLDNN.cpp b/paddle/gserver/tests/test_MKLDNN.cpp index d60b0f04a1613acc3711e711cfe18ced5f0f924d..2e8d9f3333b36005c9b3b28449c76a4a44c74cc6 100644 --- a/paddle/gserver/tests/test_MKLDNN.cpp +++ b/paddle/gserver/tests/test_MKLDNN.cpp @@ -271,20 +271,53 @@ TEST(MKLDNNLayer, BatchNormLayer) { testBatchNormLayer({16, 32, 16, 16}); } -struct testActDesc { +struct testImageDesc { int bs, ic, ih, iw; }; -static void getAddtoConfig(TestConfig& cfg, const testActDesc& pm) { +static void getAddtoConfig(TestConfig& cfg, + const testImageDesc& pm, + const size_t nInputs = 1) { cfg.biasSize = 0; cfg.layerConfig.set_type("addto"); size_t layerSize = pm.ic * pm.ih * pm.iw; cfg.layerConfig.set_size(layerSize); - cfg.inputDefs.push_back({INPUT_DATA, "layer_0", layerSize, 0}); - cfg.layerConfig.add_inputs(); + cfg.layerConfig.set_active_type("relu"); + for (size_t i = 0; i < nInputs; ++i) { + std::stringstream ss; + ss << "layer_" << i; + cfg.inputDefs.push_back({INPUT_DATA, ss.str(), layerSize, 0}); + LayerInputConfig* input = cfg.layerConfig.add_inputs(); + ImageConfig* img_conf = input->mutable_image_conf(); + img_conf->set_channels(pm.ic); + img_conf->set_img_size_y(pm.ih); + img_conf->set_img_size(pm.iw); + } +} + +void testAddtoLayer(const testImageDesc& pm, const size_t nInputs) { + CHECK_GE(nInputs, 1); + TestConfig dnnConfig; + getAddtoConfig(dnnConfig, pm, nInputs); + dnnConfig.layerConfig.set_type("mkldnn_addto"); + // TODO(TJ): test with bias + for (auto withBias : {false}) { + if (withBias) { + dnnConfig.biasSize = pm.ic * pm.ih * pm.iw; + } else { + dnnConfig.biasSize = 0; + } + RUN_MKLDNN_TEST_LAYER(dnnConfig, "addto", pm) + } +} + +TEST(MKLDNNLayer, AddtoLayer) { + testAddtoLayer({16, 5, 14, 14}, 1); + testAddtoLayer({8, 10, 8, 8}, 2); + testAddtoLayer({4, 12, 1, 1}, 3); } -void testActivation(std::string actType, const testActDesc& pm) { +void testActivation(std::string actType, const testImageDesc& pm) { // TODO(TJ): remove me when paddle support elu activation if (actType == "mkldnn_elu") { return; diff --git a/paddle/operators/CMakeLists.txt b/paddle/operators/CMakeLists.txt index 81d92ec6f4f8c94e08d3b86b6319a9bf06f76a22..f22f86468db7be6c87193f85c3f87aef4a9d3c68 100644 --- a/paddle/operators/CMakeLists.txt +++ b/paddle/operators/CMakeLists.txt @@ -69,6 +69,20 @@ function(op_library TARGET) file(APPEND ${pybind_file} "USE_OP(max_pool2d_with_index);\n") endif() + # conv_op contains several operators + if ("${TARGET}" STREQUAL "conv_op") + set(pybind_flag 1) + # It's enough to just adding one operator to pybind + file(APPEND ${pybind_file} "USE_OP(conv2d);\n") + endif() + + # conv_transpose_op contains several operators + if ("${TARGET}" STREQUAL "conv_transpose_op") + set(pybind_flag 1) + # It's enough to just adding one operator to pybind + file(APPEND ${pybind_file} "USE_OP(conv2d_transpose);\n") + endif() + # pool_cudnn_op contains several operators if ("${TARGET}" STREQUAL "pool_cudnn_op") set(pybind_flag 1) @@ -96,7 +110,7 @@ function(op_library TARGET) # It's enough to just adding one operator to pybind file(APPEND ${pybind_file} "USE_GPU_ONLY_OP(ncclAllReduce);\n") endif() - + # reduce_op contains several operators if ("${TARGET}" STREQUAL "reduce_op") set(pybind_flag 1) @@ -104,6 +118,11 @@ function(op_library TARGET) file(APPEND ${pybind_file} "USE_OP(reduce_sum);\n") endif() + if ("${TARGET}" STREQUAL "tensor_array_read_write_op") + set(pybind_flag 1) + file(APPEND ${pybind_file} "USE_NO_KERNEL_OP(read_from_array);\nUSE_NO_KERNEL_OP(write_to_array);\n") + endif() + # pybind USE_NO_KERNEL_OP # HACK: if REGISTER_OP_CPU_KERNEL presents the operator must have kernel file(READ ${TARGET}.cc TARGET_CONTENT) @@ -139,24 +158,38 @@ set(DEPS_OPS sum_op pool_op pool_with_index_op + conv_op + lstm_op + conv_transpose_op nccl_op sequence_conv_op - lstm_op) + sequence_pool_op + lod_rank_table_op + lstm_op + tensor_array_read_write_op + gru_op) op_library(cond_op SRCS cond_op.cc DEPS framework_proto tensor operator net_op) op_library(cross_entropy_op DEPS cross_entropy) op_library(softmax_with_cross_entropy_op DEPS cross_entropy softmax) +op_library(conv_op DEPS vol2col) op_library(sum_op DEPS net_op selected_rows_functor) op_library(pool_op DEPS pooling) op_library(pool_with_index_op DEPS pooling) +op_library(lod_rank_table_op SRCS lod_rank_table_op.cc DEPS lod_rank_table) +op_library(tensor_array_read_write_op SRCS tensor_array_read_write_op.cc) if(WITH_GPU) op_library(nccl_op DEPS nccl_common) endif() op_library(sequence_conv_op DEPS context_project) +op_library(sequence_pool_op DEPS sequence_pooling) op_library(lstm_op DEPS sequence2batch lstm_compute) +op_library(conv_transpose_op DEPS vol2col) +op_library(gru_op DEPS sequence2batch gru_compute) op_library(dynamic_recurrent_op SRCS dynamic_recurrent_op.cc rnn/recurrent_op_utils.cc DEPS net_op tensor_array) op_library(recurrent_op SRCS recurrent_op.cc DEPS executor) + list(REMOVE_ITEM GENERAL_OPS ${DEPS_OPS}) foreach(src ${GENERAL_OPS}) op_library(${src}) diff --git a/paddle/operators/accuracy_op.cc b/paddle/operators/accuracy_op.cc index f50e41bc410abd4c2f5690ef2ea3e112814c875a..e34754f925a2519e0c2ee346f3b593898c354ef7 100644 --- a/paddle/operators/accuracy_op.cc +++ b/paddle/operators/accuracy_op.cc @@ -37,7 +37,7 @@ class AccuracyOp : public framework::OperatorWithKernel { auto inference_dim = ctx->GetInputDim("Out"); auto label_dim = ctx->GetInputDim("Label"); - // Assume indices has same shape with infernece, because + // Assume indices has same shape as inference, because // it's the output of topk. PADDLE_ENFORCE_EQ(label_dim.size(), 2, "label's rank must be 2."); @@ -66,8 +66,8 @@ class AccuracyOpMaker : public framework::OpProtoAndCheckerMaker { framework::OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { // TODO(typhoonzero): support both inference value and indices. - AddInput("Out", "topk (inferences) the network output"); - AddInput("Indices", "topk (indices) the network output"); + AddInput("Out", "The network output of topk (inferences)"); + AddInput("Indices", "The the network output of topk (indices)"); AddInput("Label", "Label of the training data"); // TODO(typhoonzero): AddInput("Weight", ... AddOutput("Accuracy", "The accuracy of current batch"); @@ -75,13 +75,17 @@ class AccuracyOpMaker : public framework::OpProtoAndCheckerMaker { AddOutput("Total", "The samples count of current batch"); AddComment(R"DOC( -Accuracy. It will print accuracy rate for classification. -The accuracy is: -.. math:: -accuracy = \\frac{NumOfCorrectPredicts}{NumOfAllSamples}) +Accuracy Operator. + +It will print accuracy rate for classification. +The accuracy is calculated as follows: + +$$accuracy = \frac{NumOfCorrectPredicts}{NumOfAllSamples}$$ + +Both the input Out and Label can carry the LoD (Level of Details) +information, or not. But the output only shares the LoD information +with the input Out(Inference). -Both the input `Out` and `Label` can carry the LoD (Level of Details) -information, or not. But the output only shares the LoD with input `Inference`. )DOC"); } }; diff --git a/paddle/operators/accuracy_op.cu b/paddle/operators/accuracy_op.cu index a0483f367e1e3bcf4ad50ee9155f25461c1b4ed2..d0c4c0d25d6f4e3ab7acd72d62a8a17fa102637b 100644 --- a/paddle/operators/accuracy_op.cu +++ b/paddle/operators/accuracy_op.cu @@ -72,11 +72,8 @@ class AccuracyOpCUDAKernel : public framework::OpKernel { } AccuracyCudaKernel<<< - 1, PADDLE_CUDA_NUM_THREADS, 0, - reinterpret_cast( - ctx.device_context()) - .stream()>>>(num_samples, infer_width, indices_data, label_data, - accuracy_data); + 1, PADDLE_CUDA_NUM_THREADS, 0, ctx.cuda_device_context().stream()>>>( + num_samples, infer_width, indices_data, label_data, accuracy_data); } }; diff --git a/paddle/operators/activation_op.cc b/paddle/operators/activation_op.cc index 90f1535fcd387c34ea39d84d9c2ec78fcbc3c764..83d35a450d0e8ebf5311cdfd948b066642ccec8c 100644 --- a/paddle/operators/activation_op.cc +++ b/paddle/operators/activation_op.cc @@ -43,7 +43,12 @@ class SigmoidOpMaker : public framework::OpProtoAndCheckerMaker { : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "Input of Sigmoid operator"); AddOutput("Y", "Output of Sigmoid operator"); - AddComment("Sigmoid activation operator, sigmoid = 1 / (1 + exp(-x))"); + AddComment(R"DOC( +Sigmoid Activation Operator. + +$y = 1 / (1 + e^{-x})$ + +)DOC"); } }; @@ -54,8 +59,12 @@ class LogSigmoidOpMaker : public framework::OpProtoAndCheckerMaker { : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "Input of LogSigmoid operator"); AddOutput("Y", "Output of LogSigmoid operator"); - AddComment( - "Logsigmoid activation operator, logsigmoid = log (1 / (1 + exp(-x)))"); + AddComment(R"DOC( +Logsigmoid Activation Operator. + +$y = \log(1 / (1 + e^{-x}))$ + +)DOC"); } }; @@ -65,7 +74,12 @@ class ExpOpMaker : public framework::OpProtoAndCheckerMaker { : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "Input of Exp operator"); AddOutput("Y", "Output of Exp operator"); - AddComment("Exp activation operator, exp(x) = e^x"); + AddComment(R"DOC( +Exp Activation Operator. + +$y = e^x$ + +)DOC"); } }; @@ -75,7 +89,12 @@ class ReluOpMaker : public framework::OpProtoAndCheckerMaker { : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "Input of Relu operator"); AddOutput("Y", "Output of Relu operator"); - AddComment("Relu activation operator, relu(x) = max(x, 0)"); + AddComment(R"DOC( +Relu Activation Operator. + +$y = \max(x, 0)$ + +)DOC"); } }; @@ -87,11 +106,14 @@ class LeakyReluOpMaker : public framework::OpProtoAndCheckerMaker { : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "Input of LeakyRelu operator"); AddOutput("Y", "Output of LeakyRelu operator"); - AddComment( - "LeakyRelu activation operator, " - "leaky_relu = max(x, alpha * x)"); AddAttr("alpha", "The small negative slope") .SetDefault(static_cast(0.02f)); + AddComment(R"DOC( +LeakyRelu Activation Operator. + +$y = \max(x, \alpha * x)$ + +)DOC"); } }; @@ -103,12 +125,20 @@ class SoftShrinkOpMaker : public framework::OpProtoAndCheckerMaker { : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "Input of Softshrink operator"); AddOutput("Y", "Output of Softshrink operator"); - AddComment( - "Softshrink activation operator, " - "softshrink = x - lambda, if x > lambda;" - " x + lambda, if x < lambda; 0 otherwise"); AddAttr("lambda", "non-negative offset") .SetDefault(static_cast(0.5f)); + AddComment(R"DOC( +Softshrink Activation Operator. + +$$ +y = \begin{cases} + x - \lambda, \text{if } x > \lambda \\ + x + \lambda, \text{if } x < -\lambda \\ + 0, \text{otherwise} + \end{cases} +$$ + +)DOC"); } }; @@ -118,9 +148,12 @@ class TanhOpMaker : public framework::OpProtoAndCheckerMaker { : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "Input of Tanh operator"); AddOutput("Y", "Output of Tanh operator"); - AddComment( - "Tanh activation operator, tanh = (exp(x) - exp(-x)) / (exp(x) + " - "exp(-x))"); + AddComment(R"DOC( +Tanh Activation Operator. + +$$y = \frac{e^{x} - e^{-x}}{e^{x} + e^{-x}}$$ + +)DOC"); } }; @@ -131,7 +164,12 @@ class TanhShrinkOpMaker : public framework::OpProtoAndCheckerMaker { : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "Input of TanhShrink operator"); AddOutput("Y", "Output of TanhShrink operator"); - AddComment("TanhShrink activation operator, tanhshrink(x) = x - tanh(x)"); + AddComment(R"DOC( +TanhShrink Activation Operator. + +$$y = x - \frac{e^{x} - e^{-x}}{e^{x} + e^{-x}}$$ + +)DOC"); } }; @@ -143,13 +181,20 @@ class HardShrinkOpMaker : public framework::OpProtoAndCheckerMaker { : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "Input of HardShrink operator"); AddOutput("Y", "Output of HardShrink operator"); - AddComment( - "HardShrink activation operator, " - "hard_shrink(x) = x if x > lambda" - "hard_shrink(x) = x if x < -lambda" - "hard_shrink(x) = 0 otherwise"); AddAttr("threshold", "The value of threshold for HardShrink") .SetDefault(static_cast(0.5)); + AddComment(R"DOC( +HardShrink Activation Operator. + +$$ +y = \begin{cases} + x, \text{if } x > \lambda \\ + x, \text{if } x < -\lambda \\ + 0, \text{otherwise} + \end{cases} +$$ + +)DOC"); } }; @@ -159,7 +204,12 @@ class SqrtOpMaker : public framework::OpProtoAndCheckerMaker { : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "Input of Sqrt operator"); AddOutput("Y", "Output of Sqrt operator"); - AddComment("Sqrt activation operator, sqrt(x) = x^(1/2)"); + AddComment(R"DOC( +Sqrt Activation Operator. + +$y = \sqrt{x}$ + +)DOC"); } }; @@ -169,7 +219,12 @@ class AbsOpMaker : public framework::OpProtoAndCheckerMaker { : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "Input of Abs operator"); AddOutput("Y", "Output of Abs operator"); - AddComment("Abs activation operator, abs(x) = |x|"); + AddComment(R"DOC( +Abs Activation Operator. + +$y = |x|$ + +)DOC"); } }; @@ -180,7 +235,12 @@ class ReciprocalOpMaker : public framework::OpProtoAndCheckerMaker { : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "Input of Reciprocal operator"); AddOutput("Y", "Output of Reciprocal operator"); - AddComment("Reciprocal activation operator, reciprocal(x) = 1 / x"); + AddComment(R"DOC( +Reciprocal Activation Operator. + +$$y = \frac{1}{x}$$ + +)DOC"); } }; @@ -190,7 +250,14 @@ class LogOpMaker : public framework::OpProtoAndCheckerMaker { : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "Input of Log operator"); AddOutput("Y", "Output of Log operator"); - AddComment("Log activation operator, log(x) = natural logarithm of x"); + AddComment(R"DOC( +Log Activation Operator. + +$y = \ln(x)$ + +Natural logarithm of x. + +)DOC"); } }; @@ -200,7 +267,12 @@ class SquareOpMaker : public framework::OpProtoAndCheckerMaker { : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "Input of Square operator"); AddOutput("Y", "Output of Square operator"); - AddComment("Square activation operator, square(x) = x^2"); + AddComment(R"DOC( +Square Activation Operator. + +$y = x^2$ + +)DOC"); } }; @@ -211,7 +283,12 @@ class SoftplusOpMaker : public framework::OpProtoAndCheckerMaker { : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "Input of Softplus operator"); AddOutput("Y", "Output of Softplus operator"); - AddComment("Softplus activation operator, softplus(x) = log(1 + exp(x))"); + AddComment(R"DOC( +Softplus Activation Operator. + +$y = \ln(1 + e^{x})$ + +)DOC"); } }; @@ -222,7 +299,12 @@ class SoftsignOpMaker : public framework::OpProtoAndCheckerMaker { : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "Input of Softsign operator"); AddOutput("Y", "Output of Softsign operator"); - AddComment("Softsign activation operator, softsign(x) = x / (1 + |x|)"); + AddComment(R"DOC( +Softsign Activation Operator. + +$$y = \frac{x}{1 + |x|}$$ + +)DOC"); } }; @@ -233,11 +315,16 @@ class BReluOpMaker : public framework::OpProtoAndCheckerMaker { : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "Input of BRelu operator"); AddOutput("Y", "Output of BRelu operator"); - AddComment("BRelu activation operator, brelu = max(min(x, t_min), t_max)"); AddAttr("t_min", "The min marginal value of BRelu") .SetDefault(static_cast(0)); AddAttr("t_max", "The max marginal value of BRelu") .SetDefault(static_cast(24)); + AddComment(R"DOC( +BRelu Activation Operator. + +$y = \max(\min(x, t_{min}), t_{max})$ + +)DOC"); } }; @@ -249,11 +336,14 @@ class SoftReluOpMaker : public framework::OpProtoAndCheckerMaker { : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "Input of SoftRelu operator"); AddOutput("Y", "Output of SoftRelu operator"); - AddComment( - "SoftRelu activation operator, soft_relu = log(1 + exp(max(min(x, " - "threshold), threshold)))"); AddAttr("threshold", "The threshold value of SoftRelu") .SetDefault(static_cast(40)); + AddComment(R"DOC( +SoftRelu Activation Operator. + +$y = \ln(1 + \exp(\max(\min(x, threshold), threshold))$ + +)DOC"); } }; @@ -262,19 +352,19 @@ class ELUOpMaker : public framework::OpProtoAndCheckerMaker { public: ELUOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("X", - "(Tensor) The input of ELU operator, it shouldn't be empty. Input " - "is flattened and treated as a 1D array."); - AddOutput("Y", - "(Tensor) The output of ELU operator. It has the same shape as " - "the input."); - AddAttr( - "alpha", "(float, default 1.0) Alpha value in the elu formulation.") - .SetDefault(static_cast(1.)); + AddInput("X", "Input of ELU operator"); + AddOutput("Y", "Output of ELU operator"); + AddAttr("alpha", "The alpha value of ELU") + .SetDefault(static_cast(1.0f)); AddComment(R"DOC( - ELU activation operator. It applies this element-wise computation on - the input: f(x) = max(0, x) + min(0, alpha * (exp(x) - 1)). - Check .. _Link: https://arxiv.org/abs/1511.07289 for more details.)DOC"); +ELU Activation Operator. + +Applies the following element-wise computation on the input according to +https://arxiv.org/abs/1511.07289. + +$y = \max(0, x) + \min(0, \alpha * (e^x - 1))$ + +)DOC"); } }; @@ -285,9 +375,14 @@ class Relu6OpMaker : public framework::OpProtoAndCheckerMaker { : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "Input of Relu6 operator"); AddOutput("Y", "Output of Relu6 operator"); - AddComment("Relu6 activation operator, relu6 = min(max(0, x), 6)"); AddAttr("threshold", "The threshold value of Relu6") .SetDefault(static_cast(6)); + AddComment(R"DOC( +Relu6 Activation Operator. + +$y = \min(\max(0, x), 6)$ + +)DOC"); } }; @@ -298,9 +393,14 @@ class PowOpMaker : public framework::OpProtoAndCheckerMaker { : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "Input of Pow operator"); AddOutput("Y", "Output of Pow operator"); - AddComment("Pow activation operator, pow(x, factor) = x^factor"); AddAttr("factor", "The exponential factor of Pow") .SetDefault(static_cast(1)); + AddComment(R"DOC( +Pow Activation Operator. + +$y = x^{factor}$ + +)DOC"); } }; @@ -311,11 +411,16 @@ class STanhOpMaker : public framework::OpProtoAndCheckerMaker { : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "Input of STanh operator"); AddOutput("Y", "Output of STanh operator"); - AddComment("STanh activation operator, stanh = b * tanh(a * x)"); AddAttr("scale_a", "The scale parameter of a for the input") .SetDefault(static_cast(2 / 3)); AddAttr("scale_b", "The scale parameter of b for the input") .SetDefault(static_cast(1.7159)); + AddComment(R"DOC( +STanh Activation Operator. + +$$y = b * \frac{e^{a * x} - e^{-a * x}}{e^{a * x} + e^{-a * x}}$$ + +)DOC"); } }; @@ -327,12 +432,19 @@ class ThresholdedReluOpMaker : public framework::OpProtoAndCheckerMaker { : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "Input of ThresholdedRelu operator"); AddOutput("Y", "Output of ThresholdedRelu operator"); - AddComment( - "ThresholdedRelu activation operator, " - "thresholded_relu = x for x > threshold, " - "thresholded_relu = 0 otherwise."); AddAttr("threshold", "The threshold location of activation") .SetDefault(static_cast(1.0)); + AddComment(R"DOC( +ThresholdedRelu Activation Operator. + +$$ +y = \begin{cases} + x, \text{if } x > threshold \\ + 0, \text{otherwise} + \end{cases} +$$ + +)DOC"); } }; @@ -344,27 +456,23 @@ class HardSigmoidOpMaker : public framework::OpProtoAndCheckerMaker { : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "Input of HardSigmoid operator"); AddOutput("Y", "Output of HardSigmoid operator"); + AddAttr("slope", "Slope for linear approximation of sigmoid") + .SetDefault(static_cast(0.2)); + AddAttr("offset", "Offset for linear approximation of sigmoid") + .SetDefault(static_cast(0.5)); AddComment(R"DOC( -Hard Sigmoid activation operator. +HardSigmoid Activation Operator. -Segment-wise linear approximation of sigmoid[1]. -This is much faster than sigmoid. +Segment-wise linear approximation of sigmoid(https://arxiv.org/abs/1603.00391), +which is much faster than sigmoid. -hard_sigmoid = max(0, min(1, slope * x + shift)) +$y = \max(0, \min(1, slope * x + shift))$ The slope should be positive. The offset can be either positive or negative. -The default slope and shift are set from [1]. +The default slope and shift are set according to the above reference. It is recommended to use the defaults for this activation. -References: - [1] Noisy Activation Functions - (https://arxiv.org/abs/1603.00391) - - )DOC"); - AddAttr("slope", "Slope for linear approximation of sigmoid") - .SetDefault(static_cast(0.2)); - AddAttr("offset", "Offset for linear approximation of sigmoid") - .SetDefault(static_cast(0.5)); +)DOC"); } }; diff --git a/paddle/operators/activation_op.h b/paddle/operators/activation_op.h index ddd966e26c9abad0d83f8b5c6e3e7d9ad65158a8..ceb4b4e40b67473f42e67e3f02f8e012e1b1eb50 100644 --- a/paddle/operators/activation_op.h +++ b/paddle/operators/activation_op.h @@ -232,7 +232,7 @@ struct HardShrinkGradFunctor : public BaseActivationFunctor { } }; -// softshrink(x) = x - lambda, if x > lambda; x + lambda, if x < lambda; 0 +// softshrink(x) = x - lambda, if x > lambda; x + lambda, if x < -lambda; 0 // otherwise template struct SoftShrinkFunctor : public BaseActivationFunctor { diff --git a/paddle/operators/adadelta_op.cc b/paddle/operators/adadelta_op.cc index 24e419b532d97bc16ab96dad418d6e73c03f30a0..b717e1647e4b89285b841420650dc69e8a1e0c58 100644 --- a/paddle/operators/adadelta_op.cc +++ b/paddle/operators/adadelta_op.cc @@ -64,16 +64,15 @@ class AdadeltaOpMaker : public framework::OpProtoAndCheckerMaker { : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("Param", "(Tensor) Input parameter"); AddInput("Grad", "(Tensor) Input gradient"); - AddInput("AvgSquaredGrad", - "(Tensor) Input expectation of squared gradient"); + AddInput("AvgSquaredGrad", "(Tensor) Input average of squared gradient"); AddInput("AvgSquaredUpdate", - "(Tensor) Input expectation of squared parameter updates"); + "(Tensor) Input average of squared parameter updates"); AddOutput("ParamOut", "(Tensor) Output parameter"); AddOutput("AvgSquaredGradOut", - "(Tensor) Output expectation of squared gradient"); + "(Tensor) Output average of squared gradient"); AddOutput("AvgSquaredUpdateOut", - "(Tensor) Output expectation of squared parameter updates"); + "(Tensor) Output average of squared parameter updates"); AddAttr("rho", "(float, default 0.95) Exponential decay rate " @@ -84,22 +83,21 @@ class AdadeltaOpMaker : public framework::OpProtoAndCheckerMaker { "numerical stability") .SetDefault(1.0e-6f); AddComment(R"DOC( -Adadelta Updates Operator. +Adadelta Optimizer. -This implements the Adadelta optimizer[1]. Adadelta is a per-dimension -adaptive learning rate method for gradient descent. +Adadelta optimizer is implemented as explained in: +https://arxiv.org/abs/1212.5701 +Adadelta is a per-dimension adaptive learning rate method used +for gradient descent. -Adadelta updates: +Adadelta updates are as follows: -avg_squared_grad_out = rho * avg_squared_grad + (1 - rho) * grad * grad -param_update = - sqrt((avg_squared_update + epsilon) / - (avg_squared_grad_out + epsilon)) * grad -avg_squared_update_out = rho * avg_squared_update + (1 - rho) * param_update**2 -param_out = param + param_update - -References: - [1] ADADELTA: An Adaptive Learning Rate Method - https://arxiv.org/abs/1212.5701 +$$avgSquaredGradOut = \rho * avgSquaredGrad + (1 - \rho) * grad * grad \break +paramUpdate = - $\sqrt{((avgSquaredUpdate + \epsilon) / + (avgSquaredGrad_out + \epsilon))}$ * grad \break +avgSquaredUpdateOut = \rho * avgSquaredUpdate + (1 - \rho) * + {(paramUpdate)}^2 \break +paramOut = param + paramUpdate$$ )DOC"); } diff --git a/paddle/operators/adagrad_op.cc b/paddle/operators/adagrad_op.cc index bc081f87dcab0dcd8ef329dcb1f66b627c82b4a2..8d1a2b7938d2c6607cbeb3cecb72d1d5b83dd8b9 100644 --- a/paddle/operators/adagrad_op.cc +++ b/paddle/operators/adagrad_op.cc @@ -73,12 +73,16 @@ class AdagradOpMaker : public framework::OpProtoAndCheckerMaker { Adaptive Gradient Algorithm (Adagrad). -moment_out = moment + grad * grad -param_out = param - learning_rate * grad / (sqrt(moment_out) + epsilon) +The update is done as follows: + +$$momentOut = moment + grad * grad \break +paramOut = param - learningRate * grad / ($\sqrt{momentOut}$ + \epsilon) \break +$$ The original paper(http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf) -does not have the epsilon attribute. It is added here for numerical stability -by avoiding division by zero. +does not have the epsilon attribute. It is added here in our implementation +as also proposed here: http://cs231n.github.io/neural-networks-3/#ada +for numerical stability to avoid the division by zero error. )DOC"); } diff --git a/paddle/operators/adam_op.cc b/paddle/operators/adam_op.cc index 3572de06bd60f7979e3bfbf39856b04942ce81c0..97a091ae766abfba5412bbd32c34a6f80701fbf7 100644 --- a/paddle/operators/adam_op.cc +++ b/paddle/operators/adam_op.cc @@ -51,8 +51,8 @@ class AdamOp : public framework::OperatorWithKernel { PADDLE_ENFORCE_EQ(framework::product(beta1_pow_dims), 1, "Beta1 power accumulator should have 1 dimension"); auto beta2_pow_dims = ctx->GetInputDim("Beta2Pow"); - PADDLE_ENFORCE_EQ(framework::product(beta1_pow_dims), 1, - "Beta1 power accumulator should have 1 dimension"); + PADDLE_ENFORCE_EQ(framework::product(beta2_pow_dims), 1, + "Beta2 power accumulator should have 1 dimension"); auto param_dims = ctx->GetInputDim("Param"); PADDLE_ENFORCE_EQ( @@ -60,10 +60,10 @@ class AdamOp : public framework::OperatorWithKernel { "Param and Grad input of AdamOp should have same dimension"); PADDLE_ENFORCE_EQ( param_dims, ctx->GetInputDim("Moment1"), - "Param and Moment input of AdamOp should have same dimension"); + "Param and Moment1 input of AdamOp should have same dimension"); PADDLE_ENFORCE_EQ( param_dims, ctx->GetInputDim("Moment2"), - "Param and InfNorm input of AdamOp should have same dimension"); + "Param and Moment2 input of AdamOp should have same dimension"); ctx->SetOutputDim("ParamOut", param_dims); ctx->SetOutputDim("Moment1Out", param_dims); @@ -103,23 +103,20 @@ class AdamOpMaker : public framework::OpProtoAndCheckerMaker { .SetDefault(1.0e-8f); AddComment(R"DOC( -Adam Updates Operator. +Adam Optimizer. This implements the Adam optimizer from Section 2 of the Adam -paper[1]. Adam is a first-order gradient-based optimization -method based on adaptive estimates of lower-order moments. +paper : https://arxiv.org/abs/1412.6980. +Adam is a first-order gradient-based optimization method based on +adaptive estimates of lower-order moments. Adam updates: -moment1_out = beta1 * moment1 + (1 − beta1) * grad -moment2_out = beta2 * moment2 + (1 − beta2) * grad * grad -learning_rate_t = learning_rate_t * - sqrt(1 - beta2_pow) / (1 - beta1_pow) -param_out = param - learning_rate_t * moment1/ (sqrt(moment2) + epsilon) - -References: - [1] Adam: A Method for Stochastic Optimization - (https://arxiv.org/abs/1412.6980) +$$moment_1_{out} = \beta_1 * moment_1 + (1 - \beta_1) * grad \break +moment_2_{out} = \beta_2 * moment_2 + (1 - \beta_2) * grad * grad \break +learningRate = learningRate * + $\sqrt{(1 - \beta_2_{pow})}$ / (1 - \beta_1_{pow}) \break +paramOut = param - learningRate * moment_1/ ($\sqrt{(moment_2)} + \epsilon)$$ )DOC"); } diff --git a/paddle/operators/adamax_op.cc b/paddle/operators/adamax_op.cc index ff2565774115571166712b03c8990e5bf8de12a5..14cf3841b33a8153549e4c99ed2b75286e9c64db 100644 --- a/paddle/operators/adamax_op.cc +++ b/paddle/operators/adamax_op.cc @@ -99,26 +99,22 @@ class AdamaxOpMaker : public framework::OpProtoAndCheckerMaker { "Constant for numerical stability") .SetDefault(1.0e-8f); AddComment(R"DOC( -Adamax Updates Operator. +Adamax Optimizer. -This implements the Adamax optimizer from Section 7 of the Adam -paper[1]. Adamax is a variant of the +We implement the Adamax optimizer from Section 7 of the Adam +paper: https://arxiv.org/abs/1412.6980. Adamax is a variant of the Adam algorithm based on the infinity norm. Adamax updates: -moment_out = beta1 * moment + (1 - beta1) * grad -inf_norm_out = max(beta2 * inf_norm + epsilon, abs(grad)) -learning_rate_t = learning_rate/(1 - beta1_pow) -param_out = param - learning_rate_t * moment_out/inf_norm_out +$$momentOut = \beta_1 * moment + (1 - \beta_1) * grad \break +infNormOut = max(\beta_2 * infNorm + \epsilon, |grad|) \break +learningRate = learningRate /(1 - \beta_1_{pow}) \break +paramOut = param - learningRate * momentPut / infNormOut$$ The original paper does not have an epsilon attribute. -However, it is added here for numerical stability -by preventing divide by 0. - -References: - [1] Adam: A Method for Stochastic Optimization - (https://arxiv.org/abs/1412.6980) +However, it is added here for numerical stability to prevent the +division by 0 error. )DOC"); } diff --git a/paddle/operators/auc_op.cc b/paddle/operators/auc_op.cc index f5784922af1cafcf4e6873c0e906bbd66ce8ae69..ccb969ab23a8e1df713278513a66b10f21690108 100644 --- a/paddle/operators/auc_op.cc +++ b/paddle/operators/auc_op.cc @@ -23,11 +23,11 @@ class AucOp : public framework::OperatorWithKernel { protected: void InferShape(framework::InferShapeContext *ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("Out"), "Input of Out must be initialized."); + PADDLE_ENFORCE(ctx->HasInput("Out"), "Input of Out should not be null."); PADDLE_ENFORCE(ctx->HasInput("Indices"), - "Input of Indices must be initialized."); + "Input of Indices should not be null."); PADDLE_ENFORCE(ctx->HasInput("Label"), - "Input of Label must be initialized."); + "Input of Label should not be null."); auto inference_height = ctx->GetInputDim("Out")[0]; auto label_height = ctx->GetInputDim("Label")[0]; @@ -52,20 +52,20 @@ class AucOpMaker : public framework::OpProtoAndCheckerMaker { : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("Out", "A floating point 2D tensor, values are in the range [0, 1]." - "Each row is descend sorted. This input should be the" + "Each row is sorted in descending order. This input should be the" "output of topk." "Typically, this tensor indicates the probability of each label"); AddInput("Indices", "An int 2D tensor, indicating the indices of original" - "tensor before sort. Typically, this tensor indicates which label" - "the probability stands for."); + "tensor before sorting. Typically, this tensor indicates which " + "label the probability stands for."); AddInput("Label", "A 2D int tensor indicating the label of the training data." "The height is batch size and width is always 1."); // TODO(typhoonzero): support weight input AddOutput("AUC", "A scalar representing the " - "current area-under-curve."); + "current area-under-the-curve."); AddAttr("curve", "Curve type, can be 'ROC' or 'PR'.") .SetDefault("ROC"); @@ -74,19 +74,18 @@ class AucOpMaker : public framework::OpProtoAndCheckerMaker { " roc curve.") .SetDefault(200); - AddComment( - R"DOC(Computes the AUC according forward output and label. -Best to use for binary classification evaluations. + AddComment(R"DOC( +Area Under The Curve (AUC) Operator. +This implementation computes the AUC according to forward output and label. +It is used very widely in binary classification evaluation. As a note: If input label contains values other than 0 and 1, it will be cast -to bool. - -You can find the definations here: +to bool. You can find the relevant definitions here: https://en.wikipedia.org/wiki/Receiver_operating_characteristic#Area_under_the_curve -Possible curves are: -- ROC: Receiver operating characteristic -- PR: Precision Recall +There are two types of possible curves: +1. ROC: Receiver operating characteristic +2. PR: Precision Recall )DOC"); } }; diff --git a/paddle/operators/batch_norm_op.cc b/paddle/operators/batch_norm_op.cc index f2c8be4c54eed9cd0aeb004eeb74a42adc0695f5..7d73dfde786208fe217dac97325d432fb80052ad 100644 --- a/paddle/operators/batch_norm_op.cc +++ b/paddle/operators/batch_norm_op.cc @@ -51,6 +51,10 @@ class BatchNormOp : public framework::OperatorWithKernel { PADDLE_ENFORCE(ctx->HasOutput("SavedMean"), ""); PADDLE_ENFORCE(ctx->HasOutput("SavedVariance"), ""); + const float epsilon = ctx->Attrs().Get("epsilon"); + PADDLE_ENFORCE_GE(epsilon, 0.0, "epsilon should be larger than 0"); + PADDLE_ENFORCE_LE(epsilon, 0.001, "epsilon should not be too large"); + // make sure Mean/MeanOut and Variance/VarianceOut share memory in Python PADDLE_ENFORCE_EQ(ctx->Inputs("Mean")[0], ctx->Outputs("MeanOut")[0], "Mean and MeanOut should share the same memory"); @@ -66,7 +70,7 @@ class BatchNormOp : public framework::OperatorWithKernel { : x_dims[x_dims.size() - 1]); PADDLE_ENFORCE(x_dims.size() >= 3 && x_dims.size() <= 5, - "Input x must have 3 to 5 dimensions."); + "Input X must have 3 to 5 dimensions."); PADDLE_ENFORCE_EQ(ctx->GetInputDim("Scale").size(), 1UL); PADDLE_ENFORCE_EQ(ctx->GetInputDim("Scale")[0], C); @@ -93,16 +97,16 @@ class BatchNormOpMaker : public framework::OpProtoAndCheckerMaker { AddInput("X", "The input tensor"); AddInput("Scale", "Scale is a 1-dimensional tensor of size C " - "to be applied to the output"); + "that is applied to the output"); AddInput("Bias", "Bias is a 1-dimensional tensor of size C " - "to be applied to the output"); + "that is applied to the output"); AddInput("Mean", - "The global mean (for training) or the " + "The global mean (for training) or " "estimated mean (for testing)"); AddInput("Variance", "The global variance (for training) " - "or the estimated Variance (for testing)"); + "or estimated Variance (for testing)"); AddOutput("Y", "result after normalization"); AddOutput("MeanOut", "Share memory with Mean. " @@ -119,10 +123,14 @@ class BatchNormOpMaker : public framework::OpProtoAndCheckerMaker { "will apply to output when training") .AsIntermediate(); AddComment(R"DOC( -https://arxiv.org/pdf/1502.03167.pdf +Batch Normalization. -NHWC `[batch, in_height, in_width, in_channels]` -NCHW `[batch, in_channels, in_height, in_width]` +Batch Norm has been implemented as discussed in the paper: +https://arxiv.org/pdf/1502.03167.pdf +Can be used as a normalizer function for conv2d and fully_connected operations. +The required data format for this layer is one of the following: +1. NHWC `[batch, in_height, in_width, in_channels]` +2. NCHW `[batch, in_channels, in_height, in_width]` )DOC"); } @@ -297,7 +305,6 @@ class BatchNormGradOp : public framework::OperatorWithKernel { framework::DataType IndicateDataType( const framework::ExecutionContext &ctx) const override { - VLOG(3) << "IndicateDataType " << this->Type(); const auto *var = ctx.InputVar(framework::GradVarName("Y")); if (var == nullptr) { PADDLE_THROW("can't find Y@GRAD"); diff --git a/paddle/operators/cast_op.cc b/paddle/operators/cast_op.cc index 19187894c3f4803ef241d5e0c159852c0d9687da..70ee7861bab3a982eae60dd85b10c2e41f5827d0 100644 --- a/paddle/operators/cast_op.cc +++ b/paddle/operators/cast_op.cc @@ -23,13 +23,17 @@ class CastOpProtoMaker : public framework::OpProtoAndCheckerMaker { CastOpProtoMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("X", "the input tensor of cast op"); - AddOutput("Out", "the output tensor of cast op"); - AddComment(R"DOC(Cast operator. -cast the input tensor to other data type. -)DOC"); + AddInput("X", "The input tensor of cast op"); + AddOutput("Out", "The output tensor of cast op"); AddAttr("out_data_type", "output data type"); AddAttr("in_data_type", "input data type"); + AddComment(R"DOC( +Cast Operator. + +This Operator casts the input tensor to another data type and +returns tha Output Tensor. + +)DOC"); } }; diff --git a/paddle/operators/clip_op.cc b/paddle/operators/clip_op.cc index f80204c6833d6436f2cf21610beea45b36787eea..3e9066ceb2a4a4dc19fdf5ef02bb7fadaab4bfff 100644 --- a/paddle/operators/clip_op.cc +++ b/paddle/operators/clip_op.cc @@ -49,8 +49,11 @@ class ClipOpMaker : public framework::OpProtoAndCheckerMaker { AddAttr( "max", "(float)Maximum value, above which element is replaced by max"); AddComment(R"DOC( -Clip operator limits the given input within an interval. The interval is +Clip Operator. + +The clip operator limits the value of given input within an interval. The interval is specified with arguments 'min' and 'max'. + )DOC"); } }; diff --git a/paddle/operators/concat_op.cc b/paddle/operators/concat_op.cc index e11e51b4583817ef50cd447dbcf4c7202a152422..5f052689251bc023df635d41c1e64a660a0aa488 100644 --- a/paddle/operators/concat_op.cc +++ b/paddle/operators/concat_op.cc @@ -56,20 +56,24 @@ class ConcatOpMaker : public framework::OpProtoAndCheckerMaker { public: ConcatOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("X", "the input tensors of concat operator.").AsDuplicable(); - AddOutput("Out", "the output tensor of concat operator."); - AddComment(R"DOC( - Join the input tensors along with the axis. - Examples: - Input[0] = [[1,2],[3,4]] - Input[1] = [[5,6]] - axis = 0 - Output = [[1,2], - [3,4], - [5,6]] - )DOC"); - AddAttr("axis", "The axis which the inputs will be joined with.") + AddInput("X", "Input tensors of concat operator.").AsDuplicable(); + AddOutput("Out", "Output tensor of concat operator."); + AddAttr("axis", + "The axis along which the input tensors will be concatenated.") .SetDefault(0); + AddComment(R"DOC( +Concat Operator. + +Concatenate the input tensors along dimension axis. +Examples: + Input[0] = [[1,2],[3,4]] + Input[1] = [[5,6]] + axis = 0 + Output = [[1,2], + [3,4], + [5,6]] + +)DOC"); } }; diff --git a/paddle/operators/cond_op.cc b/paddle/operators/cond_op.cc index adcd867f502d166f851926fde602dbb3fed9b48e..b809bdc3a0fea727f2fb6ea0a55672ee9b0bbd04 100644 --- a/paddle/operators/cond_op.cc +++ b/paddle/operators/cond_op.cc @@ -216,11 +216,12 @@ class CondOpProtoAndCheckerMaker : public framework::OpProtoAndCheckerMaker { AddOutput("IndexTensors", "Index Tensors contains indices for true/false"); AddComment(R"DOC( -Sample dependent Cond Operator: -Given Cond[i] as a 1/0 vector to indicate true/false -The equation is: -Out[i] = subnet_t[i], if Cond[i] == true -Out[i] = subnet_t[i], if Cond[i] == false +Sample Dependent Conditional Operator. + +Given Cond[i] as a 1/0 vector to indicate true/false: +Out[i] = subnet_true[i], if Cond[i] == true +Out[i] = subnet_false[i], if Cond[i] == false + )DOC"); } }; diff --git a/paddle/operators/conv2d_op.cc b/paddle/operators/conv2d_op.cc deleted file mode 100644 index 1acb8415d0691df77047806d3c81b51cbb8c59f3..0000000000000000000000000000000000000000 --- a/paddle/operators/conv2d_op.cc +++ /dev/null @@ -1,111 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ - -#include "paddle/operators/conv2d_op.h" - -namespace paddle { -namespace operators { - -void Conv2DOp::InferShape(framework::InferShapeContext* ctx) const { - PADDLE_ENFORCE(ctx->HasInput("Input"), - "Input(Input) of Conv2DOp should not be null."); - PADDLE_ENFORCE(ctx->HasInput("Filter"), - "Input(Filter) of Conv2DOp should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("Output"), - "Output(Output) of Conv2DOp should not be null."); - - auto in_dims = ctx->GetInputDim("Input"); - auto filter_dims = ctx->GetInputDim("Filter"); - std::vector strides = ctx->Attrs().Get>("strides"); - std::vector paddings = ctx->Attrs().Get>("paddings"); - int groups = ctx->Attrs().Get("groups"); - int input_channels = in_dims[1]; - int output_channels = filter_dims[0]; - - PADDLE_ENFORCE_EQ(in_dims.size(), 4, "Conv2DOp input should be 4-D."); - PADDLE_ENFORCE_EQ(filter_dims.size(), 4, "Conv2DOp filter should be 4-D."); - PADDLE_ENFORCE_EQ(input_channels, filter_dims[1] * groups, - "The number of input channels should be equal to filter " - "channels * groups."); - PADDLE_ENFORCE_EQ( - output_channels % groups, 0, - "The number of output channels should be divided by groups."); - - auto output_height = - OutputSize(in_dims[2], filter_dims[2], paddings[0], strides[0]); - auto output_width = - OutputSize(in_dims[3], filter_dims[3], paddings[1], strides[1]); - ctx->SetOutputDim("Output", - {in_dims[0], filter_dims[0], output_height, output_width}); -} - -Conv2DOpMaker::Conv2DOpMaker(framework::OpProto* proto, - framework::OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput( - "Input", - "The input tensor of convolution operator. " - "The format of input tensor is NCHW. Where N is batch size, C is the " - "number of channels, H and W is the height and width of image."); - AddInput("Filter", - "The filter tensor of convolution operator." - "The format of the filter tensor is MCHW, where M is the number of " - "output image channels, C is the number of input image channels, " - "H and W is height and width of filter. " - "If the groups attribute is greater than 1, C equal the number of " - "input image channels divided by the groups."); - AddOutput("Output", - "The output tensor of convolution operator." - "The format of output tensor is also NCHW."); - AddAttr>("strides", "strides of convolution operator.") - .SetDefault({1, 1}); - AddAttr>("paddings", "paddings of convolution operator.") - .SetDefault({0, 0}); - AddAttr( - "groups", - "group size of convolution operator. " - "Refer to grouped convolution in Alex Krizhevsky's paper: " - "when group=2, the first half of the filters are only connected to the " - "first half of the input channels, and the second half only connected " - "to the second half.") - .SetDefault(1); - AddComment(R"DOC( -The convolution operation calculates the output based on the input, filter -and strides, paddings, groups parameters. The size of each dimension of the -parameters is checked in the infer-shape. -)DOC"); -} - -void Conv2DOpGrad::InferShape(framework::InferShapeContext* ctx) const { - auto in_dims = ctx->GetInputDim("Input"); - auto filter_dims = ctx->GetInputDim("Filter"); - if (ctx->HasOutput(framework::GradVarName("Input"))) { - ctx->SetOutputDim(framework::GradVarName("Input"), in_dims); - } - if (ctx->HasOutput(framework::GradVarName("Filter"))) { - ctx->SetOutputDim(framework::GradVarName("Filter"), filter_dims); - } -} - -} // namespace operators -} // namespace paddle - -namespace ops = paddle::operators; -REGISTER_OP(conv2d, ops::Conv2DOp, ops::Conv2DOpMaker, conv2d_grad, - ops::Conv2DOpGrad); - -REGISTER_OP_CPU_KERNEL( - conv2d, ops::GemmConv2DKernel); -REGISTER_OP_CPU_KERNEL( - conv2d_grad, ops::GemmConvGrad2DKernel); diff --git a/paddle/operators/conv2d_transpose_cudnn_op.cc b/paddle/operators/conv2d_transpose_cudnn_op.cc index 8ce94e0f04f14e1eae7e7d01280601cc72dea8c4..fce1357ce5af5f11ccc5941690431393301e6725 100644 --- a/paddle/operators/conv2d_transpose_cudnn_op.cc +++ b/paddle/operators/conv2d_transpose_cudnn_op.cc @@ -12,7 +12,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/conv2d_transpose_op.h" +#include "paddle/operators/conv_transpose_op.h" namespace paddle { namespace operators { @@ -38,13 +38,13 @@ class CudnnConv2DTransposeOpMaker : public Conv2DTransposeOpMaker { } // namespace paddle namespace ops = paddle::operators; -REGISTER_OP(conv2d_transpose_cudnn, ops::Conv2DTransposeOp, +REGISTER_OP(conv2d_transpose_cudnn, ops::ConvTransposeOp, ops::CudnnConv2DTransposeOpMaker, conv2d_transpose_cudnn_grad, - ops::Conv2DTransposeOpGrad); + ops::ConvTransposeOpGrad); REGISTER_OP_CPU_KERNEL( conv2d_transpose_cudnn, - ops::GemmConv2DTransposeKernel); + ops::GemmConvTransposeKernel); REGISTER_OP_CPU_KERNEL( conv2d_transpose_cudnn_grad, - ops::GemmConv2DTransposeGradKernel); + ops::GemmConvTransposeGradKernel); diff --git a/paddle/operators/conv2d_transpose_cudnn_op.cu b/paddle/operators/conv2d_transpose_cudnn_op.cu index 61fcfb3bd8fa57f2c45fbf3a980dbe41041cff18..694526ec01214acf2ec6a3d68d3cf072739ac185 100644 --- a/paddle/operators/conv2d_transpose_cudnn_op.cu +++ b/paddle/operators/conv2d_transpose_cudnn_op.cu @@ -15,7 +15,7 @@ #include "paddle/framework/eigen.h" #include "paddle/framework/op_registry.h" #include "paddle/memory/memory.h" -#include "paddle/operators/conv2d_transpose_op.h" +#include "paddle/operators/conv_transpose_op.h" #include "paddle/platform/assert.h" #include "paddle/platform/cudnn_helper.h" @@ -27,7 +27,6 @@ using ScopedTensorDescriptor = platform::ScopedTensorDescriptor; using ScopedFilterDescriptor = platform::ScopedFilterDescriptor; using ScopedConvolutionDescriptor = platform::ScopedConvolutionDescriptor; using DataLayout = platform::DataLayout; -using CUDADeviceContext = platform::CUDADeviceContext; static constexpr size_t kConvCudnnWorkspaceLimitBytes = 1024 * 1024 * 1024; diff --git a/paddle/operators/conv2d_transpose_op.cc b/paddle/operators/conv2d_transpose_op.cc deleted file mode 100644 index 348527728bdd4ed60676d6e6e44c4e761b803096..0000000000000000000000000000000000000000 --- a/paddle/operators/conv2d_transpose_op.cc +++ /dev/null @@ -1,107 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ - -#include "paddle/operators/conv2d_transpose_op.h" - -namespace paddle { -namespace operators { - -void Conv2DTransposeOp::InferShape(framework::InferShapeContext* ctx) const { - PADDLE_ENFORCE(ctx->HasInput("Input"), - "Input(Input) of Conv2DTransposeOp should not be null."); - PADDLE_ENFORCE(ctx->HasInput("Filter"), - "Input(Filter) of Conv2DTransposeOp should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("Output"), - "Output(Output) of Conv2DTransposeOp should not be null."); - - auto in_dims = ctx->GetInputDim("Input"); - auto filter_dims = ctx->GetInputDim("Filter"); - std::vector strides = ctx->Attrs().Get>("strides"); - std::vector paddings = ctx->Attrs().Get>("paddings"); - - for (size_t i = 0; i < paddings.size(); ++i) { - PADDLE_ENFORCE_EQ(paddings[i], 0, - "No Padding allowed in conv transpose op."); - } - - PADDLE_ENFORCE_EQ(in_dims.size(), 4, - "Conv2DTransposeOp input should be 4-D tensor."); - PADDLE_ENFORCE_EQ(filter_dims.size(), 4, - "Conv2DTransposeOp filter should be 4-D tensor."); - PADDLE_ENFORCE_EQ(in_dims[1], filter_dims[0], - "input and kernel input dimension should be equal."); - - auto output_height = (in_dims[2] - 1) * strides[0] + filter_dims[2]; - auto output_width = (in_dims[3] - 1) * strides[1] + filter_dims[3]; - ctx->SetOutputDim("Output", - {in_dims[0], filter_dims[1], output_height, output_width}); -} - -Conv2DTransposeOpMaker::Conv2DTransposeOpMaker( - framework::OpProto* proto, framework::OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput( - "Input", - "(Tensor) The input tensor of convolution transpose operator. " - "The format of input tensor is NCHW. Where N is batch size, C is the " - "number of input channels, H and W is the height and width of image."); - AddInput("Filter", - "(Tensor) The filter tensor of convolution transpose operator." - "The format of the filter tensor is CMHW, where C is the number of " - "output image channels, M is the number of input image channels, " - "H and W is height and width of filter. " - "We enforce groups number == 1 and padding == 0 in " - "convolution transpose Scenario."); - AddOutput("Output", - "(Tensor) The output tensor of convolution transpose operator." - "The format of output tensor is also NCHW."); - AddAttr>("strides", - "strides of convolution transpose operator.") - .SetDefault({1, 1}); - AddAttr>("paddings", - "paddings of convolution transpose operator.") - .SetDefault({0, 0}); - AddComment(R"DOC( -The convolution transpose operation calculates the output based on the input, filter -and strides, paddings, groups parameters. The size of each dimension of the -parameters is checked in the infer-shape. -)DOC"); -} - -void Conv2DTransposeOpGrad::InferShape( - framework::InferShapeContext* ctx) const { - auto in_dims = ctx->GetInputDim("Input"); - auto filter_dims = ctx->GetInputDim("Filter"); - if (ctx->HasOutput(framework::GradVarName("Input"))) { - ctx->SetOutputDim(framework::GradVarName("Input"), in_dims); - } - if (ctx->HasOutput(framework::GradVarName("Filter"))) { - ctx->SetOutputDim(framework::GradVarName("Filter"), filter_dims); - } -} - -} // namespace operators -} // namespace paddle - -namespace ops = paddle::operators; -REGISTER_OP(conv2d_transpose, ops::Conv2DTransposeOp, - ops::Conv2DTransposeOpMaker, conv2d_transpose_grad, - ops::Conv2DTransposeOpGrad); - -REGISTER_OP_CPU_KERNEL( - conv2d_transpose, - ops::GemmConv2DTransposeKernel); -REGISTER_OP_CPU_KERNEL( - conv2d_transpose_grad, - ops::GemmConv2DTransposeGradKernel); diff --git a/paddle/operators/conv2d_transpose_op.h b/paddle/operators/conv2d_transpose_op.h deleted file mode 100644 index cab7788227690621a0e5b744197b86c515bbef72..0000000000000000000000000000000000000000 --- a/paddle/operators/conv2d_transpose_op.h +++ /dev/null @@ -1,254 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once - -#include "paddle/framework/eigen.h" -#include "paddle/framework/op_registry.h" -#include "paddle/operators/math/im2col.h" -#include "paddle/operators/math/math_function.h" - -namespace paddle { -namespace operators { - -using Tensor = framework::Tensor; -using DDim = framework::DDim; - -// Define Op classes in .h file so that other conv transpose -// operator implementations can reuse the code. -class Conv2DTransposeOpMaker : public framework::OpProtoAndCheckerMaker { - public: - Conv2DTransposeOpMaker(framework::OpProto* proto, - framework::OpAttrChecker* op_checker); -}; - -class Conv2DTransposeOp : public framework::OperatorWithKernel { - public: - using framework::OperatorWithKernel::OperatorWithKernel; - - protected: - void InferShape(framework::InferShapeContext* ctx) const override; -}; - -class Conv2DTransposeOpGrad : public framework::OperatorWithKernel { - public: - using framework::OperatorWithKernel::OperatorWithKernel; - - protected: - void InferShape(framework::InferShapeContext* ctx) const override; -}; - -template -class GemmConv2DTransposeKernel : public framework::OpKernel { - public: - void Compute(const framework::ExecutionContext& context) const override { - const Tensor* input = context.Input("Input"); - // The filter will be reshaped, so it should not be constant pointer - Tensor filter = *context.Input("Filter"); - - Tensor* output = context.Output("Output"); - - std::vector strides = context.Attr>("strides"); - - // TODO(Zhuoyuan): Paddings can be added in future. - // groups will alway be disabled in conv2d_transpose. - - const int batch_size = input->dims()[0]; - const int m = input->dims()[1]; - const int h = input->dims()[2]; - const int w = input->dims()[3]; - - const int k_h = filter.dims()[2]; - const int k_w = filter.dims()[3]; - - const int c = output->dims()[1]; // output channels - const int o_h = output->dims()[2]; - const int o_w = output->dims()[3]; - - paddle::operators::math::Col2ImFunctor< - paddle::operators::math::ColFormat::kCFO, Place, T> - col2im; - - // use col_shape in the im2col and col2im calculation - DDim col_shape = {c, k_h, k_w, h, w}; - - // use col_matrix_shape in the gemm calculation - DDim col_matrix_shape = {c * k_h * k_w, h * w}; - - Tensor col; - col.mutable_data(col_shape, context.GetPlace()); - // col_matrix shares the same piece of data with col, - // but will be reshaped into a two-dimensional matrix shape - // to call the matrix multiplication interface. - Tensor col_matrix; - col_matrix.ShareDataWith(col); - col_matrix.Resize(col_matrix_shape); - - DDim output_shape = {c, o_h, o_w}; - DDim input_matrix_shape = {m, h * w}; - - DDim filter_matrix_shape = {m, c * k_h * k_w}; - filter.Resize(filter_matrix_shape); - - // convolution transpose: gemm + col2im (similar to conv-backward on input) - - output->mutable_data(context.GetPlace()); - auto t = framework::EigenVector::Flatten(*output); - t.device(context.GetEigenDevice()) = t.constant(static_cast(0)); - - for (int i = 0; i < batch_size; i++) { - // batch with size (M, h * w) - Tensor input_batch = input->Slice(i, i + 1).Resize(input_matrix_shape); - // filter size: (M, c * k_h * k_w) - - // output size: (c, o_h, o_w) - Tensor output_batch = output->Slice(i, i + 1).Resize(output_shape); - - // col_matrix = filter * input_batch - // of shape (c * k_h * k_w, h * w) - math::matmul(context.device_context(), filter, true, - input_batch, false, T(1.0), &col_matrix, T(0.0)); - col2im(context.device_context(), output_batch, col, strides[0], - strides[1], 0, 0, 0, 0); - } - } -}; - -template -class GemmConv2DTransposeGradKernel : public framework::OpKernel { - public: - void Compute(const framework::ExecutionContext& context) const override { - const Tensor* input = context.Input("Input"); - const Tensor* output_grad = - context.Input(framework::GradVarName("Output")); - - // For filter, we do not use const pointer b/c we will do reshape, - // but we should avoid modifying its value. - Tensor filter = *context.Input("Filter"); - - Tensor* input_grad = - context.Output(framework::GradVarName("Input")); - Tensor* filter_grad = - context.Output(framework::GradVarName("Filter")); - - std::vector strides = context.Attr>("strides"); - // Actually, no paddings and groups allowed in conv transpose. - std::vector paddings = context.Attr>("paddings"); - - const int batch_size = input->dims()[0]; - const int m = input->dims()[1]; - const int h = input->dims()[2]; - const int w = input->dims()[3]; - - const int k_h = filter.dims()[2]; - const int k_w = filter.dims()[3]; - - const int c = output_grad->dims()[1]; // output channels - const int o_h = output_grad->dims()[2]; - const int o_w = output_grad->dims()[3]; - - // Only im2col functor required for bp to get to the right shape - paddle::operators::math::Im2ColFunctor< - paddle::operators::math::ColFormat::kCFO, Place, T> - im2col; - - // use col_shape in the im2col and col2im calculation - DDim col_shape = {c, k_h, k_w, h, w}; - - // use col_matrix_shape in the gemm calculation - DDim col_matrix_shape_f = {c * h * w, k_h * k_w}; - - Tensor col; - col.mutable_data(col_shape, context.GetPlace()); - // col_matrix shares the same piece of data with col, - // but will be reshaped into a two-dimensional matrix shape - // to call the matrix multiplication interface. - - DDim output_shape = {c, o_h, o_w}; - DDim input_matrix_shape = {m, h * w}; - - DDim filter_matrix_shape = {m, c * k_h * k_w}; - filter.Resize(filter_matrix_shape); - - // convolution transpose grad on input: - // im2col + gemm (similar to conv-forward) - // input need to compute gradient - if (input_grad) { - Tensor col_matrix; - col_matrix.ShareDataWith(col); - DDim col_matrix_shape = {c * k_h * k_w, h * w}; - col_matrix.Resize(col_matrix_shape); - - input_grad->mutable_data(context.GetPlace()); - auto t = framework::EigenVector::Flatten(*input_grad); - t.device(context.GetEigenDevice()) = t.constant(static_cast(0)); - - for (int i = 0; i < batch_size; i++) { - // batch with size (c, o_h * o_w) - Tensor output_grad_batch = - output_grad->Slice(i, i + 1).Resize(output_shape); - // filter of size (m, c * k_h * k_w) - - // batch with size (m, h, w) - Tensor input_grad_batch = - input_grad->Slice(i, i + 1).Resize(input_matrix_shape); - - // im2col: dy from (c, o_h, o_w) -> (c * k_h * k_w, h * w) - im2col(context.device_context(), output_grad_batch, col, strides[0], - strides[1], paddings[0], paddings[0], paddings[1], paddings[1]); - - // gemm: dx = filter * dy - // (m, c * k_h * k_w) * (c * k_h * k_w, h * w) -> (m, c, h) - math::matmul(context.device_context(), filter, false, - col_matrix, false, T(1.0), &input_grad_batch, - T(0.0)); - } - } - - // filter gradient required - if (filter_grad) { - Tensor col_matrix_f; - col_matrix_f.ShareDataWith(col); - DDim col_matrix_shape_f = {c * h * w, k_h * k_w}; - col_matrix_f.Resize(col_matrix_shape_f); - - filter_grad->mutable_data(context.GetPlace()); - Tensor filter_grad_ = *filter_grad; - filter_grad_.Resize(filter_matrix_shape); - auto t = framework::EigenVector::Flatten(filter_grad_); - t.device(context.GetEigenDevice()) = t.constant(static_cast(0)); - - for (int i = 0; i < batch_size; ++i) { - // batch with size (c, o_h, o_w) - Tensor output_grad_batch = - output_grad->Slice(i, i + 1).Resize(output_shape); - // input batch - Tensor in_batch = input->Slice(i, i + 1).Resize(input_matrix_shape); - - // im2col: (c * h * w, k_h * k_w) - im2col(context.device_context(), output_grad_batch, col, strides[0], - strides[1], paddings[0], paddings[0], paddings[1], paddings[1]); - - // gemm: d_filter = x * y_grad^T - // (m, c * h * w) * (k_h * k_w, c * h * w) -> (m, c, h) - math::matmul(context.device_context(), in_batch, false, - col_matrix_f, true, T(1.0), &filter_grad_, - T(1.0)); - } - } - } -}; - -} // namespace operators -} // namespace paddle diff --git a/paddle/operators/conv_cudnn_op.cc b/paddle/operators/conv_cudnn_op.cc index 4288f300dd5b0464f2b4394cdb0b44f93060ae74..97f31bf22d7072d89bd043045045dcb5bb5518b8 100644 --- a/paddle/operators/conv_cudnn_op.cc +++ b/paddle/operators/conv_cudnn_op.cc @@ -12,7 +12,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/conv2d_op.h" +#include "paddle/operators/conv_op.h" namespace paddle { namespace operators { @@ -29,7 +29,7 @@ class CudnnConvOpMaker : public Conv2DOpMaker { "workspace is a section of GPU memory which will be " "allocated/freed each time the operator runs, larger " "workspace size can increase performance but also requires " - "better hardward. This size should be carefully setted.") + "better hardware. This size should be chosen carefully.") .SetDefault(4096); } }; @@ -38,10 +38,11 @@ class CudnnConvOpMaker : public Conv2DOpMaker { } // namespace paddle namespace ops = paddle::operators; -REGISTER_OP(conv_cudnn, ops::Conv2DOp, ops::CudnnConvOpMaker, conv_cudnn_grad, - ops::Conv2DOpGrad); -REGISTER_OP_CPU_KERNEL( - conv_cudnn, ops::GemmConv2DKernel); +REGISTER_OP(conv_cudnn, ops::ConvOp, ops::CudnnConvOpMaker, conv_cudnn_grad, + ops::ConvOpGrad); + +REGISTER_OP_CPU_KERNEL(conv_cudnn, + ops::GemmConvKernel); REGISTER_OP_CPU_KERNEL( conv_cudnn_grad, - ops::GemmConvGrad2DKernel); + ops::GemmConvGradKernel); diff --git a/paddle/operators/conv_cudnn_op.cu b/paddle/operators/conv_cudnn_op.cu index e2eb157f40c0039f87c41d28f8732cd4901a046d..2aec4a2760260623c4c7054c590afa8e1c6c3fea 100644 --- a/paddle/operators/conv_cudnn_op.cu +++ b/paddle/operators/conv_cudnn_op.cu @@ -15,7 +15,7 @@ #include "paddle/framework/eigen.h" #include "paddle/framework/op_registry.h" #include "paddle/memory/memory.h" -#include "paddle/operators/conv2d_op.h" +#include "paddle/operators/conv_op.h" #include "paddle/platform/assert.h" #include "paddle/platform/cudnn_helper.h" @@ -27,7 +27,6 @@ using ScopedTensorDescriptor = platform::ScopedTensorDescriptor; using ScopedFilterDescriptor = platform::ScopedFilterDescriptor; using ScopedConvolutionDescriptor = platform::ScopedConvolutionDescriptor; using DataLayout = platform::DataLayout; -using CUDADeviceContext = platform::CUDADeviceContext; static constexpr size_t kCONV_CUDNN_WORKSPACE_LIMIT_BYTES = 1024 * 1024 * 1024; diff --git a/paddle/operators/conv_op.cc b/paddle/operators/conv_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..a6f65f10165929316f971d195f3790fd9e7ed376 --- /dev/null +++ b/paddle/operators/conv_op.cc @@ -0,0 +1,209 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#include "paddle/operators/conv_op.h" + +namespace paddle { +namespace operators { + +void ConvOp::InferShape(framework::InferShapeContext* ctx) const { + PADDLE_ENFORCE(ctx->HasInput("Input"), + "Input(Input) of ConvOp should not be null."); + PADDLE_ENFORCE(ctx->HasInput("Filter"), + "Input(Filter) of ConvOp should not be null."); + PADDLE_ENFORCE(ctx->HasOutput("Output"), + "Output(Output) of ConvOp should not be null."); + + auto in_dims = ctx->GetInputDim("Input"); + auto filter_dims = ctx->GetInputDim("Filter"); + std::vector strides = ctx->Attrs().Get>("strides"); + std::vector paddings = ctx->Attrs().Get>("paddings"); + int groups = ctx->Attrs().Get("groups"); + int input_channels = in_dims[1]; + int output_channels = filter_dims[0]; + + PADDLE_ENFORCE(in_dims.size() == 4 || in_dims.size() == 5, + "Conv intput should be 4-D or 5-D tensor."); + PADDLE_ENFORCE_EQ( + in_dims.size(), filter_dims.size(), + "Conv input dimension and filter dimension should be the same."); + PADDLE_ENFORCE( + in_dims.size() - strides.size() == 2U, + "Conv input dimension and strides dimension should be consistent."); + PADDLE_ENFORCE_EQ( + paddings.size(), strides.size(), + "Conv paddings dimension and Conv strides dimension should be the same."); + PADDLE_ENFORCE_EQ(input_channels, filter_dims[1] * groups, + "The number of input channels should be equal to filter " + "channels * groups."); + PADDLE_ENFORCE_EQ( + output_channels % groups, 0, + "The number of output channels should be divided by groups."); + + std::vector output_shape({in_dims[0], filter_dims[0]}); + for (size_t i = 0; i < paddings.size(); ++i) { + output_shape.push_back(OutputSize(in_dims[i + 2], filter_dims[i + 2], + paddings[i], strides[i])); + } + ctx->SetOutputDim("Output", framework::make_ddim(output_shape)); +} + +Conv2DOpMaker::Conv2DOpMaker(framework::OpProto* proto, + framework::OpAttrChecker* op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput( + "Input", + "(Tensor) The input tensor of convolution operator. " + "The format of input tensor is NCHW, where N is batch size, C is the " + "number of channels, H is the height of the feature, " + "and W is the width of the feature."); + AddInput("Filter", + "(Tensor) The filter tensor of convolution operator. " + "The format of the filter tensor is MCHW, where M is the number of " + "output image channels, C is the number of input image channels, " + "H is the height of the filter, and W is the width of the filter. " + "If the groups attribute is greater than 1, C equals the number of " + "input image channels divided by the groups."); + AddOutput("Output", + "(Tensor) The output tensor of convolution operator. " + "The format of output tensor is also NCHW."); + AddAttr>("strides", "strides of convolution operator.") + .SetDefault({1, 1}); + AddAttr>("paddings", "paddings of convolution operator.") + .SetDefault({0, 0}); + AddAttr( + "groups", + "(int default:1), the group size of convolution operator. " + "According to grouped convolution in Alex Krizhevsky's Deep CNN paper: " + "when group=2, the first half of the filters is only connected to the " + "first half of the input channels, while the second half of the filters " + "is only connected to the second half of the input channels.") + .SetDefault(1); + AddComment(R"DOC( +Convolution Operator. + +The convolution operation calculates the output based on the input, filter +and strides, paddings, groups parameters. The size of each dimension of the +parameters is checked in the infer-shape. +Input(Input, Filter) and output(Output) are in NCHW format. Where N is batch +size, C is the number of channels, H is the height of the feature, and W is +the width of the feature. Parameters(ksize, strides, paddings) are two elements. +These two elements represent height and width, respectively. +The input(X) size and output(Out) size may be different. + +Example: + Input: + Input shape: (N, C_in, H_in, W_in) + Filter shape: (C_out, C_in, H_f, W_f) + Output: + Output shape: (N, C_out, H_out, W_out) + where + H_out = (H_in - filter_size[0] + 2 * paddings[0]) / strides[0] + 1; + W_out = (W_in - filter_size[1] + 2 * paddings[1]) / strides[1] + 1; +)DOC"); +} + +Conv3DOpMaker::Conv3DOpMaker(framework::OpProto* proto, + framework::OpAttrChecker* op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput( + "Input", + "(Tensor) The input tensor of convolution operator. " + "The format of input tensor is NCDHW. Where N is batch size, C is the " + "number of channels, D is the depth of the feature, H is the height of " + "the feature, " + "and W is the width of the feature."); + AddInput("Filter", + "(Tensor) The filter tensor of convolution operator. " + "The format of the filter tensor is MCDHW, where M is the number of " + "output image channels, C is the number of input image channels, " + "D is the depth of the filter, H is the height of the filter, and W " + "is the width of the filter." + "If the groups attribute is greater than 1, C equals the number of " + "input image channels divided by the groups."); + AddOutput("Output", + "(Tensor) The output tensor of convolution operator." + "The format of output tensor is also NCDHW."); + AddAttr>( + "strides", + "(vector, default:{0, 0, 0}), the strides of convolution operator.") + .SetDefault({1, 1, 1}); + AddAttr>( + "paddings", + "(vector, default:{0, 0, 0}), the paddings of convolution operator.") + .SetDefault({0, 0, 0}); + AddAttr( + "groups", + "(int default:1), the group size of convolution operator. " + "According to grouped convolution in Alex Krizhevsky's Deep CNN paper: " + "when group=2, the first half of the filters is only connected to the " + "first half of the input channels, while the second half of the filters " + "is only connected to the second half of the input channels.") + .SetDefault(1); + + AddComment(R"DOC( +Convolution3D Operator. + +The convolution operation calculates the output based on the input, filter +and strides, paddings, groups parameters. The size of each dimension of the +parameters is checked in the infer-shape. +Input(Input, Filter) and output(Output) are in NCDHW format. Where N is batch +size, C is the number of channels,D is the depth of the feature, H is the height of +the feature, and W is the width of the feature. Parameters(ksize, strides, paddings) +are three elements. These three elements represent depth, height and width, respectively. +The input(X) size and output(Out) size may be different. + +Example: + Input: + Input shape: (N, C_in, D_in, H_in, W_in) + Filter shape: (C_out, C_in, D_f, H_f, W_f) + Output: + Output shape: (N, C_out, D_out, H_out, W_out) + where + D_out = (D_in - filter_size[0] + 2 * paddings[0]) / strides[0] + 1; + H_out = (H_in - filter_size[1] + 2 * paddings[1]) / strides[1] + 1; + W_out = (W_in - filter_size[2] + 2 * paddings[2]) / strides[2] + 1; +)DOC"); +} + +void ConvOpGrad::InferShape(framework::InferShapeContext* ctx) const { + auto in_dims = ctx->GetInputDim("Input"); + auto filter_dims = ctx->GetInputDim("Filter"); + if (ctx->HasOutput(framework::GradVarName("Input"))) { + ctx->SetOutputDim(framework::GradVarName("Input"), in_dims); + } + if (ctx->HasOutput(framework::GradVarName("Filter"))) { + ctx->SetOutputDim(framework::GradVarName("Filter"), filter_dims); + } +} + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OP(conv2d, ops::ConvOp, ops::Conv2DOpMaker, conv2d_grad, + ops::ConvOpGrad); +namespace ops = paddle::operators; +REGISTER_OP(conv3d, ops::ConvOp, ops::Conv3DOpMaker, conv3d_grad, + ops::ConvOpGrad); + +REGISTER_OP_CPU_KERNEL(conv2d, + ops::GemmConvKernel); +REGISTER_OP_CPU_KERNEL( + conv2d_grad, ops::GemmConvGradKernel); + +REGISTER_OP_CPU_KERNEL(conv3d, + ops::GemmConvKernel); +REGISTER_OP_CPU_KERNEL( + conv3d_grad, ops::GemmConvGradKernel); diff --git a/paddle/operators/conv2d_op.cu b/paddle/operators/conv_op.cu similarity index 64% rename from paddle/operators/conv2d_op.cu rename to paddle/operators/conv_op.cu index c697c9466d34c29af6976f3a4d2d0a24ba778ceb..8e6f9da455b7291049aee57189dae15b8bcc2150 100644 --- a/paddle/operators/conv2d_op.cu +++ b/paddle/operators/conv_op.cu @@ -12,11 +12,16 @@ See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/conv2d_op.h" +#include "paddle/operators/conv_op.h" namespace ops = paddle::operators; +REGISTER_OP_GPU_KERNEL(conv2d, + ops::GemmConvKernel); REGISTER_OP_GPU_KERNEL( - conv2d, ops::GemmConv2DKernel); + conv2d_grad, ops::GemmConvGradKernel); + +REGISTER_OP_GPU_KERNEL(conv3d, + ops::GemmConvKernel); REGISTER_OP_GPU_KERNEL( - conv2d_grad, ops::GemmConvGrad2DKernel); + conv3d_grad, ops::GemmConvGradKernel); diff --git a/paddle/operators/conv2d_op.h b/paddle/operators/conv_op.h similarity index 54% rename from paddle/operators/conv2d_op.h rename to paddle/operators/conv_op.h index 0621389a79eee6b5e75b1eab309b49f8aa4a97ca..7c1729213bf3f5f3987afbf2d51d5b5339ae521d 100644 --- a/paddle/operators/conv2d_op.h +++ b/paddle/operators/conv_op.h @@ -18,6 +18,7 @@ limitations under the License. */ #include "paddle/framework/op_registry.h" #include "paddle/operators/math/im2col.h" #include "paddle/operators/math/math_function.h" +#include "paddle/operators/math/vol2col.h" namespace paddle { namespace operators { @@ -40,14 +41,20 @@ class Conv2DOpMaker : public framework::OpProtoAndCheckerMaker { framework::OpAttrChecker* op_checker); }; -class Conv2DOp : public framework::OperatorWithKernel { +class Conv3DOpMaker : public framework::OpProtoAndCheckerMaker { + public: + Conv3DOpMaker(framework::OpProto* proto, + framework::OpAttrChecker* op_checker); +}; + +class ConvOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override; }; -class Conv2DOpGrad : public framework::OperatorWithKernel { +class ConvOpGrad : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; @@ -55,7 +62,7 @@ class Conv2DOpGrad : public framework::OperatorWithKernel { }; template -class GemmConv2DKernel : public framework::OpKernel { +class GemmConvKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { const Tensor* input = context.Input("Input"); @@ -70,51 +77,78 @@ class GemmConv2DKernel : public framework::OpKernel { std::vector paddings = context.Attr>("paddings"); int groups = context.Attr("groups"); - int batch_size = input->dims()[0]; - int input_channels = input->dims()[1]; - int filter_height = filter.dims()[filter.dims().size() - 2]; - int filter_width = filter.dims()[filter.dims().size() - 1]; - int output_channels = output->dims()[1]; - int output_height = output->dims()[2]; - int output_width = output->dims()[3]; - - paddle::operators::math::Im2ColFunctor< - paddle::operators::math::ColFormat::kCFO, Place, T> - im2col; + const int batch_size = static_cast(input->dims()[0]); + + // filter_shape_vec: {k_h, k_w} or {k_d, k_h, k_w} + std::vector filter_shape_vec(framework::vectorize(filter.dims())); + filter_shape_vec.erase(filter_shape_vec.begin(), + filter_shape_vec.begin() + 2); + + // output_shape_vec: {o_h, o_w} or {o_d, o_h, o_w} + std::vector output_shape_vec(framework::vectorize(output->dims())); + output_shape_vec.erase(output_shape_vec.begin(), + output_shape_vec.begin() + 2); + // use col_shape in the im2col calculation - framework::DDim col_shape = {input_channels / groups, filter_height, - filter_width, output_height, output_width}; + // col_shape_vec: {i_c/g, k_h, k_w, o_h, o_w} or {i_c/g, k_d, k_h, k_w, o_d, + // o_h, o_w} + std::vector col_shape_vec; + col_shape_vec.push_back(input->dims()[1] / groups); + col_shape_vec.insert(col_shape_vec.end(), filter_shape_vec.begin(), + filter_shape_vec.end()); + col_shape_vec.insert(col_shape_vec.end(), output_shape_vec.begin(), + output_shape_vec.end()); + framework::DDim col_shape(framework::make_ddim(col_shape_vec)); + // use col_matrix_shape in the gemm calculation - framework::DDim col_matrix_shape = { - input_channels / groups * filter_height * filter_width, - output_height * output_width}; + // size: (i_c/g * k_h * k_w, o_h * o_w) or (i_c/g * k_d * k_h * k_w, o_d * + // o_h * o_w) + framework::DDim col_matrix_shape = + framework::flatten_to_2d(col_shape, filter_shape_vec.size() + 1); + Tensor col; col.mutable_data(col_shape, context.GetPlace()); // col_matrix shares the same piece of data with col, // but will be reshaped into a two-dimensional matrix shape // to call the matrix multiplication interface. - Tensor col_matrix = col; + Tensor col_matrix; + col_matrix.ShareDataWith(col); col_matrix.Resize(col_matrix_shape); - framework::DDim input_shape = {input->dims()[1], input->dims()[2], - input->dims()[3]}; + framework::DDim input_shape = framework::slice_ddim( + input->dims(), 1, static_cast(input->dims().size())); + framework::DDim filter_matrix_shape = {filter.dims()[0], filter.numel() / filter.dims()[0]}; filter.Resize(filter_matrix_shape); - framework::DDim output_matrix_shape = {output_channels, - output_height * output_width}; - // convolution operator: im2col + gemm - int in_step = input_channels / groups; - int out_step = output_channels / groups; + framework::DDim output_matrix_shape = { + output->dims()[1], + output->numel() / (output->dims()[0] * output->dims()[1])}; + + // convolution operator: im2col(or vol2col) + gemm + int in_step = static_cast(input->dims()[1]) / groups; + int out_step = static_cast(output->dims()[1]) / groups; + for (int i = 0; i < batch_size; i++) { Tensor in_batch = input->Slice(i, i + 1).Resize(input_shape); Tensor out_batch = output->Slice(i, i + 1).Resize(output_matrix_shape); for (int g = 0; g < groups; g++) { - // im2col Tensor in_slice = in_batch.Slice(g * in_step, (g + 1) * in_step); - im2col(context.device_context(), in_slice, col, strides[0], strides[1], - paddings[0], paddings[0], paddings[1], paddings[1]); + + if (filter_shape_vec.size() == 2) { + // im2col + math::Im2ColFunctor im2col; + im2col(context.device_context(), in_slice, col, strides[0], + strides[1], paddings[0], paddings[0], paddings[1], + paddings[1]); + } else if (filter_shape_vec.size() == 3) { + // vol2col + math::Vol2ColFunctor vol2col; + vol2col(context.device_context(), in_slice, col, strides[0], + strides[1], strides[2], paddings[0], paddings[1], + paddings[2]); + } // gemm Tensor out_slice = out_batch.Slice(g * out_step, (g + 1) * out_step); @@ -127,7 +161,7 @@ class GemmConv2DKernel : public framework::OpKernel { }; template -class GemmConvGrad2DKernel : public framework::OpKernel { +class GemmConvGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { const Tensor* input = context.Input("Input"); @@ -137,64 +171,79 @@ class GemmConvGrad2DKernel : public framework::OpKernel { context.Output(framework::GradVarName("Input")); Tensor* filter_grad = context.Output(framework::GradVarName("Filter")); - // The filter and filter_grad will be reshaped in the calculations, // so here use an assignment operation, // that avoids modifying the variable in the Scope. Tensor filter = *context.Input("Filter"); + if (!input_grad && !filter_grad) return; + std::vector strides = context.Attr>("strides"); std::vector paddings = context.Attr>("paddings"); int groups = context.Attr("groups"); - int batch_size = input->dims()[0]; - int input_channels = input->dims()[1]; - int filter_height = filter.dims()[filter.dims().size() - 2]; - int filter_width = filter.dims()[filter.dims().size() - 1]; - int output_channels = output_grad->dims()[1]; - int output_height = output_grad->dims()[2]; - int output_width = output_grad->dims()[3]; - - paddle::operators::math::Col2ImFunctor< - paddle::operators::math::ColFormat::kCFO, Place, T> - col2im; - paddle::operators::math::Im2ColFunctor< - paddle::operators::math::ColFormat::kCFO, Place, T> - im2col; - // use col_shape in the im2col and col2im calculation - framework::DDim col_shape = {input_channels / groups, filter_height, - filter_width, output_height, output_width}; + const int batch_size = static_cast(input->dims()[0]); + + // filter_shape_vec: {k_h, k_w} or {k_d, k_h, k_w} + std::vector filter_shape_vec(framework::vectorize(filter.dims())); + filter_shape_vec.erase(filter_shape_vec.begin(), + filter_shape_vec.begin() + 2); + + // output_shape_vec: {o_h, o_w} or {o_d, o_h, o_w} + std::vector output_shape_vec( + framework::vectorize(output_grad->dims())); + output_shape_vec.erase(output_shape_vec.begin(), + output_shape_vec.begin() + 2); + + // use col_shape in the im2col calculation + // col_shape_vec: {i_c/g, k_h, k_w, o_h, o_w} or {i_c/g, k_d, k_h, k_w, o_d, + // o_h, o_w} + std::vector col_shape_vec; + col_shape_vec.push_back(input->dims()[1] / groups); + col_shape_vec.insert(col_shape_vec.end(), filter_shape_vec.begin(), + filter_shape_vec.end()); + col_shape_vec.insert(col_shape_vec.end(), output_shape_vec.begin(), + output_shape_vec.end()); + framework::DDim col_shape(framework::make_ddim(col_shape_vec)); + // use col_matrix_shape in the gemm calculation - framework::DDim col_matrix_shape = { - input_channels / groups * filter_height * filter_width, - output_height * output_width}; - Tensor col; - col.mutable_data(col_shape, context.GetPlace()); - // col_matrix shares the same piece of data with col, - // but will be reshaped into a two-dimensional matrix shape - // to call the matrix multiplication interface. - Tensor col_matrix = col; - col_matrix.Resize(col_matrix_shape); + // size: (i_c/g * k_h * k_w, o_h * o_w) + // or + // (i_c/g * k_d * k_h * k_w, o_d * o_h * o_w) + framework::DDim col_matrix_shape = + framework::flatten_to_2d(col_shape, filter_shape_vec.size() + 1); - framework::DDim input_shape = {input->dims()[1], input->dims()[2], - input->dims()[3]}; - framework::DDim output_matrix_shape = { - output_grad->dims()[1], - output_grad->dims()[2] * output_grad->dims()[3]}; + framework::DDim input_shape = framework::slice_ddim( + input->dims(), 1, static_cast(input->dims().size())); framework::DDim filter_matrix_shape = {filter.dims()[0], filter.numel() / filter.dims()[0]}; filter.Resize(filter_matrix_shape); - // convolution backward input operator: gemm + col2im - // convolution backward weight operator: im2col + gemm - int in_step = input_channels / groups; - int out_step = output_channels / groups; + framework::DDim output_matrix_shape = { + output_grad->dims()[1], + output_grad->numel() / + (output_grad->dims()[0] * output_grad->dims()[1])}; + + // convolution backward input operator: gemm + col2im(or col2vol) + // convolution backward weight operator: im2col(or vol2col) + gemm + int in_step = static_cast(input->dims()[1]) / groups; + int out_step = static_cast(output_grad->dims()[1]) / groups; + + Tensor col; + // col_matrix shares the same piece of data with col, + // but will be reshaped into a two-dimensional matrix shape + // to call the matrix multiplication interface. + Tensor col_matrix; + col.mutable_data(col_shape, context.GetPlace()); + col_matrix.ShareDataWith(col); + col_matrix.Resize(col_matrix_shape); + + math::SetConstant set_zero; if (input_grad) { input_grad->mutable_data(context.GetPlace()); - auto t = framework::EigenVector::Flatten(*input_grad); - t.device(context.GetEigenDevice()) = t.constant(static_cast(0)); + set_zero(context.device_context(), input_grad, static_cast(0)); for (int i = 0; i < batch_size; i++) { Tensor out_grad_batch = @@ -208,13 +257,22 @@ class GemmConvGrad2DKernel : public framework::OpKernel { math::matmul(context.device_context(), filter_slice, true, out_grad_slice, false, T(1.0), &col_matrix, T(0.0)); - // col2im Tensor in_grad_slice = in_grad_batch.Slice(g * in_step, (g + 1) * in_step); - col2im(context.device_context(), in_grad_slice, col, strides[0], - strides[1], paddings[0], paddings[0], paddings[1], - paddings[1]); + + if (filter_shape_vec.size() == 2) { + math::Col2ImFunctor col2im; + col2im(context.device_context(), in_grad_slice, col, strides[0], + strides[1], paddings[0], paddings[0], paddings[1], + paddings[1]); + + } else if (filter_shape_vec.size() == 3) { + math::Col2VolFunctor col2vol; + col2vol(context.device_context(), in_grad_slice, col, strides[0], + strides[1], strides[2], paddings[0], paddings[1], + paddings[2]); + } } } } @@ -223,8 +281,7 @@ class GemmConvGrad2DKernel : public framework::OpKernel { filter_grad->mutable_data(context.GetPlace()); Tensor filter_grad_ = *filter_grad; filter_grad_.Resize(filter_matrix_shape); - auto t = framework::EigenVector::Flatten(filter_grad_); - t.device(context.GetEigenDevice()) = t.constant(static_cast(0)); + set_zero(context.device_context(), filter_grad, static_cast(0)); for (int i = 0; i < batch_size; i++) { Tensor out_grad_batch = @@ -235,9 +292,18 @@ class GemmConvGrad2DKernel : public framework::OpKernel { Tensor out_grad_slice = out_grad_batch.Slice(g * out_step, (g + 1) * out_step); Tensor in_slice = in_batch.Slice(g * in_step, (g + 1) * in_step); - im2col(context.device_context(), in_slice, col, strides[0], - strides[1], paddings[0], paddings[0], paddings[1], - paddings[1]); + + if (filter_shape_vec.size() == 2) { + math::Im2ColFunctor im2col; + im2col(context.device_context(), in_slice, col, strides[0], + strides[1], paddings[0], paddings[0], paddings[1], + paddings[1]); + } else if (filter_shape_vec.size() == 3) { + math::Vol2ColFunctor vol2col; + vol2col(context.device_context(), in_slice, col, strides[0], + strides[1], strides[2], paddings[0], paddings[1], + paddings[2]); + } // gemm Tensor filter_grad_slice = @@ -250,6 +316,5 @@ class GemmConvGrad2DKernel : public framework::OpKernel { } } }; - } // namespace operators } // namespace paddle diff --git a/paddle/operators/conv_shift_op.cc b/paddle/operators/conv_shift_op.cc index 6156a2d6af9a010240449a7c944ec0caffc85189..a4150a5664690e750d2501a1849767c23209186b 100644 --- a/paddle/operators/conv_shift_op.cc +++ b/paddle/operators/conv_shift_op.cc @@ -96,14 +96,13 @@ as used in the Neural Turing Machine: https://arxiv.org/abs/1410.5401 The equation is: - \f[ - Out[i] = \sum_{j=-(N-1)/2}^{(N-1)/2} X_{i+j} * Y_{j} - \f] +$$Out[i] = \sum_{j=-(N-1)/2}^{(N-1)/2} X_{i+j} * Y_{j}$$ -where X's index is computed modulo M, and b's index is computed modulo N. +where X's index is computed modulo M, and Y's index is computed modulo N. + +Both inputs X and Y can carry LoD (Level of Details) information. +However, the output only shares the LoD information with input X. -Both of the input `X` and `Y` can carry LoD (Level of Details) information. -However, the output only shares the LoD information with input `X`. )DOC"); } }; diff --git a/paddle/operators/conv_shift_op.cu b/paddle/operators/conv_shift_op.cu index 145e966fe9caa68f7485bb258fa78fd34bfd4c04..74ed1b0ed358afc4f1a4e6a0c322eb032029d551 100644 --- a/paddle/operators/conv_shift_op.cu +++ b/paddle/operators/conv_shift_op.cu @@ -130,9 +130,7 @@ class ConvShiftKernel : public framework::OpKernel { dim3 grid_dim(num_x_blocks, batch_size); - auto stream = reinterpret_cast( - context.device_context()) - .stream(); + auto stream = context.cuda_device_context().stream(); conv_shift_forward<<>>( x_data, y_data, out_data, x_width, y_width, y_half_width, batch_size); @@ -159,9 +157,7 @@ class ConvShiftGradKernel int y_width = Y->dims()[1]; int y_half_width = (y_width - 1) / 2; - auto stream = reinterpret_cast( - context.device_context()) - .stream(); + auto stream = context.cuda_device_context().stream(); const int x_per_block = 256; int num_x_blocks = div_up(x_width, x_per_block); diff --git a/paddle/operators/conv_transpose_op.cc b/paddle/operators/conv_transpose_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..50081779a5ea3c81884007d4e4b7832dc4ea2bdd --- /dev/null +++ b/paddle/operators/conv_transpose_op.cc @@ -0,0 +1,203 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#include "paddle/operators/conv_transpose_op.h" + +namespace paddle { +namespace operators { + +void ConvTransposeOp::InferShape(framework::InferShapeContext* ctx) const { + PADDLE_ENFORCE(ctx->HasInput("Input"), + "Input(Input) of ConvTransposeOp should not be null."); + PADDLE_ENFORCE(ctx->HasInput("Filter"), + "Input(Filter) of ConvTransposeOp should not be null."); + PADDLE_ENFORCE(ctx->HasOutput("Output"), + "Output(Output) of ConvTransposeOp should not be null."); + + auto in_dims = ctx->GetInputDim("Input"); + auto filter_dims = ctx->GetInputDim("Filter"); + std::vector strides = ctx->Attrs().Get>("strides"); + std::vector paddings = ctx->Attrs().Get>("paddings"); + + for (size_t i = 0; i < paddings.size(); ++i) { + PADDLE_ENFORCE_EQ(paddings[i], 0, + "No Padding allowed in conv transpose op."); + } + + PADDLE_ENFORCE(in_dims.size() == 4 || in_dims.size() == 5, + "ConvTransposeOp intput should be 4-D or 5-D tensor."); + PADDLE_ENFORCE_EQ(in_dims.size(), filter_dims.size(), + "ConvTransposeOp input dimension and filter dimension " + "should be the same."); + PADDLE_ENFORCE(in_dims.size() - strides.size() == 2U, + "ConvTransposeOp input dimension and strides dimension should " + "be consistent."); + PADDLE_ENFORCE_EQ(paddings.size(), strides.size(), + "ConvTransposeOp paddings dimension and Conv strides " + "dimension should be the same."); + PADDLE_ENFORCE_EQ(in_dims[1], filter_dims[0], + "In ConvTransposeOp, The input channel should be the same " + "as the number of filters."); + + std::vector output_shape({in_dims[0], filter_dims[1]}); + for (size_t i = 0; i < paddings.size(); ++i) { + output_shape.push_back((in_dims[i + 2] - 1) * strides[i] + + filter_dims[i + 2]); + } + ctx->SetOutputDim("Output", framework::make_ddim(output_shape)); +} + +Conv2DTransposeOpMaker::Conv2DTransposeOpMaker( + framework::OpProto* proto, framework::OpAttrChecker* op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput( + "Input", + "(Tensor) The input tensor of convolution transpose operator. " + "The format of input tensor is NCHW. Where N is batch size, C is the " + "number of input channels, H is the height of the feature, and " + "W is the width of the feature."); + AddInput("Filter", + "(Tensor) The filter tensor of convolution transpose operator. " + "The format of the filter tensor is CMHW, where C is the number of " + "output image channels, M is the number of input image channels, " + "H is the height of the filter, and W is the width of the filter. " + "We enforce groups number == 1 and padding == 0 in " + "the convolution transpose scenario."); + AddOutput("Output", + "(Tensor) The output tensor of convolution transpose operator. " + "The format of output tensor is also NCHW."); + AddAttr>( + "strides", + "(vector defalut:{1, 1}), strides of convolution transpose operator.") + .SetDefault({1, 1}); + AddAttr>( + "paddings", + "(vector defalut:{0, 0}), paddings of convolution transpose operator.") + .SetDefault({0, 0}); + AddComment(R"DOC( +Convolution2D Transpose Operator. + +The convolution transpose operation calculates the output based on the input, filter +and strides, paddings, groups parameters. The size of each dimension of the +parameters is checked in the infer-shape. + +Input(Input, Filter) and output(Output) are in NCHW format. Where N is batch +size, C is the number of channels, H is the height of the feature, and +W is the width of the feature. Parameters(ksize, strides, paddings) are two elements. +These two elements represent height and width, respectively. +The input(X) size and output(Out) size may be different. +Example: + Input: + Input shape: (N, C_in, H_in, W_in) + Filter shape: (C_in, C_out, H_f, W_f) + Output: + Output shape: (N, C_out, H_out, W_out) + where + H_out = (H_in - 1) * strides[0] - 2 * paddings[0] + filter_size[0]; + W_out = (W_in - 1) * strides[1] - 2 * paddings[1] + filter_size[1]; +)DOC"); +} + +Conv3DTransposeOpMaker::Conv3DTransposeOpMaker( + framework::OpProto* proto, framework::OpAttrChecker* op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("Input", + "(Tensor) The input tensor of convolution transpose operator." + "The format of input tensor is NCDHW. Where N is batch size, C is " + "the number of channels, D is the depth of the feature, H is the " + "height of the feature, and " + "W is the width of the feature."); + AddInput("Filter", + "(Tensor) The filter tensor of convolution transpose operator." + "The format of the filter tensor is CMDHW, where C is the number of " + "output image channels, M is the number of input image channels, D " + "is the depth of the filter, H is the height of the filter, and " + "W is the width of the filter." + "We enforce groups number == 1 and padding == 0 in " + "the convolution3d transpose scenario."); + AddOutput("Output", + "(Tensor) The output tensor of convolution transpose operator." + "The format of output tensor is also NCDHW." + "Where N is batch size, C is " + "the number of channels, D is the depth of the feature, H is the " + "height of the feature, and W is the width of the feature."); + AddAttr>( + "strides", + "(vector defalut:{1, 1, 1}), strides of convolution transpose operator.") + .SetDefault({1, 1, 1}); + AddAttr>( + "paddings", + "(vector defalut:{0, 0, 0}), paddings of convolution transpose operator.") + .SetDefault({0, 0, 0}); + AddComment(R"DOC( +Convolution3D Transpose Operator. + +The convolution transpose operation calculates the output based on the input, filter +and strides, paddings, groups parameters. The size of each dimension of the +parameters is checked in the infer-shape. + +Input(Input, Filter) and output(Output) are in NCDHW format. Where N is batch +size, C is the number of channels, D is the depth of the feature, +H is the height of the feature, and W is the width of the feature. +Parameters(ksize, strides, paddings) are three elements. +These three elements represent depth, height and width, respectively. +The input(X) size and output(Out) size may be different. +Example: + Input: + Input shape: (N, C_in, D_in, H_in, W_in) + Filter shape: (C_in, C_out, D_f, H_f, W_f) + Output: + Output shape: (N, C_out, D_out, H_out, W_out) + where + D_out = (D_in - 1) * strides[0] - 2 * paddings[0] + filter_size[0]; + H_out = (H_in - 1) * strides[1] - 2 * paddings[1] + filter_size[1]; + W_out = (W_in - 1) * strides[2] - 2 * paddings[2] + filter_size[2]; +)DOC"); +} + +void ConvTransposeOpGrad::InferShape(framework::InferShapeContext* ctx) const { + auto in_dims = ctx->GetInputDim("Input"); + auto filter_dims = ctx->GetInputDim("Filter"); + if (ctx->HasOutput(framework::GradVarName("Input"))) { + ctx->SetOutputDim(framework::GradVarName("Input"), in_dims); + } + if (ctx->HasOutput(framework::GradVarName("Filter"))) { + ctx->SetOutputDim(framework::GradVarName("Filter"), filter_dims); + } +} + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; + +REGISTER_OP(conv2d_transpose, ops::ConvTransposeOp, ops::Conv2DTransposeOpMaker, + conv2d_transpose_grad, ops::ConvTransposeOpGrad); + +REGISTER_OP_CPU_KERNEL( + conv2d_transpose, + ops::GemmConvTransposeKernel); +REGISTER_OP_CPU_KERNEL( + conv2d_transpose_grad, + ops::GemmConvTransposeGradKernel); + +REGISTER_OP(conv3d_transpose, ops::ConvTransposeOp, ops::Conv3DTransposeOpMaker, + conv3d_transpose_grad, ops::ConvTransposeOpGrad); + +REGISTER_OP_CPU_KERNEL( + conv3d_transpose, + ops::GemmConvTransposeKernel); +REGISTER_OP_CPU_KERNEL( + conv3d_transpose_grad, + ops::GemmConvTransposeGradKernel); diff --git a/paddle/operators/conv2d_transpose_op.cu b/paddle/operators/conv_transpose_op.cu similarity index 63% rename from paddle/operators/conv2d_transpose_op.cu rename to paddle/operators/conv_transpose_op.cu index 931ac9eed294c4fe7c726d8cc2c4d9a39ec12828..401cddb379ced134b800d2a078fe130a2850fbb2 100644 --- a/paddle/operators/conv2d_transpose_op.cu +++ b/paddle/operators/conv_transpose_op.cu @@ -12,13 +12,20 @@ See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/conv2d_transpose_op.h" +#include "paddle/operators/conv_transpose_op.h" namespace ops = paddle::operators; REGISTER_OP_GPU_KERNEL( conv2d_transpose, - ops::GemmConv2DTransposeKernel); + ops::GemmConvTransposeKernel); REGISTER_OP_GPU_KERNEL( conv2d_transpose_grad, - ops::GemmConv2DTransposeGradKernel); + ops::GemmConvTransposeGradKernel); + +REGISTER_OP_GPU_KERNEL( + conv3d_transpose, + ops::GemmConvTransposeKernel); +REGISTER_OP_GPU_KERNEL( + conv3d_transpose_grad, + ops::GemmConvTransposeGradKernel); diff --git a/paddle/operators/conv_transpose_op.h b/paddle/operators/conv_transpose_op.h new file mode 100644 index 0000000000000000000000000000000000000000..6c1a6220d784abf89ec789f94d9cff9e5414db04 --- /dev/null +++ b/paddle/operators/conv_transpose_op.h @@ -0,0 +1,293 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include "paddle/framework/eigen.h" +#include "paddle/framework/op_registry.h" +#include "paddle/operators/math/im2col.h" +#include "paddle/operators/math/math_function.h" +#include "paddle/operators/math/vol2col.h" + +namespace paddle { +namespace operators { + +using Tensor = framework::Tensor; +using DDim = framework::DDim; + +// Define Op classes in .h file so that other conv transpose +// operator implementations can reuse the code. +class Conv2DTransposeOpMaker : public framework::OpProtoAndCheckerMaker { + public: + Conv2DTransposeOpMaker(framework::OpProto* proto, + framework::OpAttrChecker* op_checker); +}; + +class Conv3DTransposeOpMaker : public framework::OpProtoAndCheckerMaker { + public: + Conv3DTransposeOpMaker(framework::OpProto* proto, + framework::OpAttrChecker* op_checker); +}; + +class ConvTransposeOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + protected: + void InferShape(framework::InferShapeContext* ctx) const override; +}; + +class ConvTransposeOpGrad : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + protected: + void InferShape(framework::InferShapeContext* ctx) const override; +}; + +template +class GemmConvTransposeKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& context) const override { + const Tensor* input = context.Input("Input"); + // The filter will be reshaped, so it should not be constant pointer + Tensor filter = *context.Input("Filter"); + Tensor* output = context.Output("Output"); + + std::vector strides = context.Attr>("strides"); + // TODO(Zhuoyuan): Paddings can be added in future. + // groups will alway be disabled in conv2dtranspose. + + const int batch_size = static_cast(input->dims()[0]); + + // input_shape_vec: {h, w} or {d, h, w} + std::vector input_shape_vec = framework::vectorize(input->dims()); + input_shape_vec.erase(input_shape_vec.begin(), input_shape_vec.begin() + 2); + + // filter_shape_vec: {k_h, k_w} or {k_d, k_h, k_w} + std::vector filter_shape_vec = framework::vectorize(filter.dims()); + filter_shape_vec.erase(filter_shape_vec.begin(), + filter_shape_vec.begin() + 2); + + // use col_shape in the im2col and col2im (or vol2col and col2vol) + // calculation + // col_shape_vec: {c, k_h, k_w, h, w} or {c, k_d, k_h, k_w, d, h, w} + std::vector col_shape_vec; + col_shape_vec.push_back(output->dims()[1]); + col_shape_vec.insert(col_shape_vec.end(), filter_shape_vec.begin(), + filter_shape_vec.end()); + col_shape_vec.insert(col_shape_vec.end(), input_shape_vec.begin(), + input_shape_vec.end()); + DDim col_shape(framework::make_ddim(col_shape_vec)); + + // use col_matrix_shape in the gemm calculation + // size: (c * k_h * k_w, h * w) or (c * k_d * k_h * k_w, d * h * w) + DDim col_matrix_shape = + framework::flatten_to_2d(col_shape, filter_shape_vec.size() + 1); + + Tensor col; + col.mutable_data(col_shape, context.GetPlace()); + // col_matrix shares the same piece of data with col, + // but will be reshaped into a two-dimensional matrix shape + // to call the matrix multiplication interface. + Tensor col_matrix; + col_matrix.ShareDataWith(col); + col_matrix.Resize(col_matrix_shape); + + // output size: (c, o_h, o_w) or (c, o_d, o_h, o_w) + DDim output_shape = + framework::slice_ddim(output->dims(), 1, output->dims().size()); + + // input matrix size: (m, h * w) or (m, d * h * w) + DDim input_matrix_shape = {input->dims()[1], col_matrix_shape[1]}; + + // filter size: (m, c * k_h * k_w) or (m, c * k_d * k_h * k_w) + DDim filter_matrix_shape = {input->dims()[1], col_matrix_shape[0]}; + filter.Resize(filter_matrix_shape); + + output->mutable_data(context.GetPlace()); + math::SetConstant set_zero; + set_zero(context.device_context(), output, static_cast(0)); + + // convolution transpose: gemm + col2im or col2vol (similar to conv-backward + // on input) + for (int i = 0; i < batch_size; i++) { + // batch with size (m, h * w) or (m, d * h * w) + Tensor input_batch = input->Slice(i, i + 1).Resize(input_matrix_shape); + + // output size: (c, o_h, o_w) or (c, o_d, o_h, o_w) + Tensor output_batch = output->Slice(i, i + 1).Resize(output_shape); + + // col_matrix = filter * input_batch + // of shape (c * k_h * k_w, h * w) or (c * k_d * k_h * k_w, d * h * w) + math::matmul(context.device_context(), filter, true, + input_batch, false, static_cast(1.0), + &col_matrix, static_cast(0.0)); + + if (filter_shape_vec.size() == 2) { + // col2im: col_matrix -> dy + // from (c * k_h * k_w, h * w) to (c, o_h, o_w) + math::Col2ImFunctor col2im; + + col2im(context.device_context(), output_batch, col, strides[0], + strides[1], 0, 0, 0, 0); + } else if (filter_shape_vec.size() == 3) { + // col2vol: col_matrix -> dy + // from (c * k_d * k_h * k_w, d * h * w) to (c, o_d, o_h, o_w) + math::Col2VolFunctor col2vol; + col2vol(context.device_context(), output_batch, col, strides[0], + strides[1], strides[2], 0, 0, 0); + } + } + } +}; + +template +class GemmConvTransposeGradKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& context) const override { + const Tensor* input = context.Input("Input"); + const Tensor* output_grad = + context.Input(framework::GradVarName("Output")); + // For filter, we do not use const pointer b/c we will do reshape, + // but we should avoid modifying its value. + Tensor filter = *context.Input("Filter"); + Tensor* input_grad = + context.Output(framework::GradVarName("Input")); + Tensor* filter_grad = + context.Output(framework::GradVarName("Filter")); + + if ((!input_grad) && (!filter_grad)) return; + + std::vector strides = context.Attr>("strides"); + // Actually, no paddings and groups allowed in conv transpose. + std::vector paddings = context.Attr>("paddings"); + + const int batch_size = static_cast(input->dims()[0]); + + // input_shape_vec: {h, w} or {d, h, w} + std::vector input_shape_vec = framework::vectorize(input->dims()); + input_shape_vec.erase(input_shape_vec.begin(), input_shape_vec.begin() + 2); + + // filter_shape_vec: {k_h, k_w} or {k_d, k_h, k_w} + std::vector filter_shape_vec = framework::vectorize(filter.dims()); + filter_shape_vec.erase(filter_shape_vec.begin(), + filter_shape_vec.begin() + 2); + + // use col_shape in the im2col and col2im (or vol2col and col2vol) + // calculation + // col_shape_vec: {c, k_h, k_w, h, w} or {c, k_d, k_h, k_w, d, h, w} + std::vector col_shape_vec; + col_shape_vec.push_back(output_grad->dims()[1]); + col_shape_vec.insert(col_shape_vec.end(), filter_shape_vec.begin(), + filter_shape_vec.end()); + col_shape_vec.insert(col_shape_vec.end(), input_shape_vec.begin(), + input_shape_vec.end()); + DDim col_shape(framework::make_ddim(col_shape_vec)); + + // use col_matrix_shape in the gemm calculation + // size: (c * k_h * k_w, h * w) or (c * k_d * k_h * k_w, d * h * w) + DDim col_matrix_shape = + framework::flatten_to_2d(col_shape, filter_shape_vec.size() + 1); + + // output size: (c, o_h, o_w) or (c, o_d, o_h, o_w) + DDim output_shape = framework::slice_ddim(output_grad->dims(), 1, + output_grad->dims().size()); + + // input matrix size: (m, h * w) or (m, d * h * w) + DDim input_matrix_shape = {input->dims()[1], col_matrix_shape[1]}; + + // filter size: (m, c * k_h * k_w) or (m, c * k_d * k_h * k_w) + DDim filter_matrix_shape = {input->dims()[1], col_matrix_shape[0]}; + filter.Resize(filter_matrix_shape); + + // convolution transpose grad on input: + // im2col + gemm (similar to conv-forward) + // input need to compute gradient + if (input_grad || filter_grad) { + Tensor col; + col.mutable_data(col_shape, context.GetPlace()); + // col_matrix shares the same piece of data with col, + // but will be reshaped into a two-dimensional matrix shape + // to call the matrix multiplication interface. + Tensor col_matrix; + col_matrix.ShareDataWith(col); + col_matrix.Resize(col_matrix_shape); + + Tensor filter_grad_; + math::SetConstant set_zero; + + if (input_grad) { + input_grad->mutable_data(context.GetPlace()); + set_zero(context.device_context(), input_grad, static_cast(0)); + } + if (filter_grad) { // filter size (m, c, k_h, k_w) + filter_grad->mutable_data(context.GetPlace()); + set_zero(context.device_context(), filter_grad, static_cast(0)); + filter_grad_ = *filter_grad; + filter_grad_.Resize(filter_matrix_shape); + } + + for (int i = 0; i < batch_size; i++) { + // batch with size (c, o_h * o_w) + Tensor output_grad_batch = + output_grad->Slice(i, i + 1).Resize(output_shape); + + if (filter_shape_vec.size() == 2) { + // im2col: dy -> col matrix + // from (c, o_h, o_w) to (c * k_h * k_w, h * w) + math::Im2ColFunctor im2col; + im2col(context.device_context(), output_grad_batch, col, strides[0], + strides[1], paddings[0], paddings[0], paddings[1], + paddings[1]); + } else if (filter_shape_vec.size() == 3) { + // vol2col: dy -> col_matrix + // from (c, o_d, o_h, o_w) to (c * k_d * k_h * k_w, d * h * w) + math::Vol2ColFunctor vol2col; + vol2col(context.device_context(), output_grad_batch, col, strides[0], + strides[1], strides[2], paddings[0], paddings[1], + paddings[2]); + } + + if (input_grad) { + // batch with size (m, h, w) + Tensor input_grad_batch = + input_grad->Slice(i, i + 1).Resize(input_matrix_shape); + // gemm: dx = filter * dy + // (m, c * k_h * k_w) * (c * k_h * k_w, h * w) -> (m, h * w) + // or + // (m, c * k_d * k_h * k_w) * (c * k_d * k_h * k_w, d * h * w) -> (m, + // d, h, w) + math::matmul(context.device_context(), filter, false, + col_matrix, false, static_cast(1.0), + &input_grad_batch, static_cast(0.0)); + } + if (filter_grad) { + // input batch + Tensor in_batch = input->Slice(i, i + 1).Resize(input_matrix_shape); + // gemm: d_filter = x * dy^T + // (m, c * h * w) * (k_h * k_w, c * h * w) -> (m, k_h * k_w) + // or + // (m, d * h * w) * (d * h * w, c * k_d * k_h * k_w) -> (m, c * k_d * + // k_h * k_w) + math::matmul(context.device_context(), in_batch, false, + col_matrix, true, static_cast(1.0), + &filter_grad_, static_cast(1.0)); + } + } + } + } +}; +} // namespace operators +} // namespace paddle diff --git a/paddle/operators/cos_sim_op.cc b/paddle/operators/cos_sim_op.cc index 55f69fb03ad69c94dc4ebb8edd651d84e06a5f46..312264ccd48d1405a247a2c864d9f5897c897bea 100644 --- a/paddle/operators/cos_sim_op.cc +++ b/paddle/operators/cos_sim_op.cc @@ -79,15 +79,16 @@ class CosSimOpMaker : public framework::OpProtoAndCheckerMaker { AddComment(R"DOC( Cosine Similarity Operator. -The equation is: Out = X^T * Y / (sqrt(X^T * X) * sqrt(Y^T * Y)). +$Out = X^T * Y / (\sqrt{X^T * X} * \sqrt{Y^T * Y})$ -The input `X` and `Y` must have the same shape, except that the 1st dimension -of input `Y` could be just 1 (different from input `X`), which will be -broadcasted to match the shape of input `X` before computing their cosine +The input X and Y must have the same shape, except that the 1st dimension +of input Y could be just 1 (different from input X), which will be +broadcasted to match the shape of input X before computing their cosine similarity. -Both the input `X` and `Y` can carry the LoD (Level of Details) information, -or not. But the output only shares the LoD with input `X`. +Both the input X and Y can carry the LoD (Level of Details) information, +or not. But the output only shares the LoD information with input X. + )DOC"); } }; diff --git a/paddle/operators/crf_decoding_op.cc b/paddle/operators/crf_decoding_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..d1ce74c4b911545476f0b362df0e0f7a0d14cfb4 --- /dev/null +++ b/paddle/operators/crf_decoding_op.cc @@ -0,0 +1,136 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/operators/crf_decoding_op.h" + +namespace paddle { +namespace operators { +class CRFDecodingOpMaker : public framework::OpProtoAndCheckerMaker { + public: + CRFDecodingOpMaker(framework::OpProto* proto, + framework::OpAttrChecker* op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("Emission", + "(LoDTensor, default: LoDTensor). A LoDTensor with shape " + "[N x D] where N is the size of the mini-batch and D is the total " + "tag number. This input is the unscaled emission weight matrix of " + "the linear_chain_crf operator."); + AddInput( + "Transition", + "(Tensor, default: Tensor). A Tensor with shape [(D + 2) x D]. " + "This input is the transition weights learned by the linear_chain_crf " + "operator, denoted as w. The 1st row of w are transition weights for " + "the start mask. The 2nd row of w are transition weights for the end " + "mask. Transition weights between other tags begin from the 3rd row of " + "w. See more details in comments of the linear_chain_crf operator."); + AddInput( + "Label", + "(LoDTensor, LoDTensor). The ground truth with shape " + "[N x 1]. This input is optional. See more details in the operator's " + "comments.") + .AsDispensable(); + AddOutput("ViterbiPath", + "(LoDTensor, LoDTensor). The decoding results. What to " + "return changes depending on whether the Input(Label) (the groud " + "truth) is given. See more details in the operator's comment."); + AddComment(R"DOC( +The crf_decoding operator reads the emission feature weights and the transition +freature weights learned by the linear_chain_crf operator. It implements the +Viterbi algorithm which is a dynamic programming algorithm for finding the most +likely sequence of hidden states, called the Viterbi path, that results in a +sequence of observed tags. + +The output of this operator changes according to whether Input(Label) is given: + +1. Input(Label) is given: + +This happens in training. This operator is used to co-work with the chunk_eval +operator. + +When Input(Label) is given, the crf_decoding operator returns a row vector +with shape [N x 1] whose values are fixed to be 0, indicating an incorrect +prediction, or 1 indicating a tag is correctly predicted. Such an ouput is the +input to chunk_eval operator. + +2. Input(Label) is not given: + +This is the standard decoding process. + +The crf_decoding operator returns a row vecotr with shape [N x 1] whose values +range from 0 to maximum tag number - 1. Each element indicates an index of a +predicted tag. +)DOC"); + } +}; + +class CRFDecodingOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + void InferShape(framework::InferShapeContext* ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("Emission"), + "Input(Emission) should be not null."); + PADDLE_ENFORCE(ctx->HasInput("Transition"), + "Input(Transition) should be not null."); + + PADDLE_ENFORCE(ctx->HasOutput("ViterbiPath"), + "Output(ViterbiPath) should be not null."); + + auto emission_dims = ctx->GetInputDim("Emission"); + PADDLE_ENFORCE_EQ(emission_dims.size(), 2UL, + "The Input(Emission) should be a 2-D tensor."); + PADDLE_ENFORCE(emission_dims[0], "An empty mini-batch is not allowed."); + + auto transition_dims = ctx->GetInputDim("Transition"); + PADDLE_ENFORCE_EQ(transition_dims.size(), 2UL, + "The Input(Transition) should be a 2-D tensor."); + PADDLE_ENFORCE_EQ( + transition_dims[0] - 2, transition_dims[1], + "An invalid dimension for the Input(Transition), which should " + "be a 2-D tensor with shape [(D + 2) x D]."); + PADDLE_ENFORCE_EQ( + emission_dims[1], transition_dims[1], + "The 2nd dimension of the Input(Emission) and the Input(Transition) " + "should be equal to the tag number."); + + if (ctx->HasInput("Label")) { + auto label_dims = ctx->GetInputDim("Label"); + PADDLE_ENFORCE(label_dims.size() == 2UL && label_dims[1] == 1UL, + "The Input(Label) should be a 2-D tensor with the 2nd " + "dimensions fixed to 1."); + PADDLE_ENFORCE_EQ( + emission_dims[0], label_dims[0], + "The height of Input(Emission) and the height of Input(Label) " + "should be the same."); + } + + ctx->ShareLoD("Emission", /*->*/ "ViterbiPath"); + ctx->SetOutputDim("ViterbiPath", {emission_dims[0], 1}); + } + + protected: + framework::DataType IndicateDataType( + const framework::ExecutionContext& ctx) const override { + return framework::ToDataType(ctx.Input("Emission")->type()); + } +}; +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OP_WITHOUT_GRADIENT(crf_decoding, ops::CRFDecodingOp, + ops::CRFDecodingOpMaker); +REGISTER_OP_CPU_KERNEL( + crf_decoding, ops::CRFDecodingOpKernel, + ops::CRFDecodingOpKernel); diff --git a/paddle/operators/crf_decoding_op.h b/paddle/operators/crf_decoding_op.h new file mode 100644 index 0000000000000000000000000000000000000000..526e0c5dcb2649b35ee28f5153c8472ca7a0af7b --- /dev/null +++ b/paddle/operators/crf_decoding_op.h @@ -0,0 +1,127 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once +#include "paddle/framework/eigen.h" +#include "paddle/framework/op_registry.h" +#include "paddle/operators/math/math_function.h" + +namespace paddle { +namespace operators { + +using framework::LoDTensor; +using framework::LoD; +using framework::Tensor; + +template +class CRFDecodingOpKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + PADDLE_ENFORCE(platform::is_cpu_place(ctx.GetPlace()), + "The crf_decoding operator can only run on CPU."); + + auto* emission_weights = ctx.Input("Emission"); + auto* transition_weights = ctx.Input("Transition"); + auto* label = ctx.Input("Label"); + auto* decoded_path = ctx.Output("ViterbiPath"); + + PADDLE_ENFORCE_EQ(emission_weights->NumLevels(), 1UL, + "The Input(Emission) should be a sequence."); + auto lod = emission_weights->lod(); + PADDLE_ENFORCE(lod.size(), "Input(Emission) must be a sequence."); + const size_t level = 0; + const size_t seq_num = lod[level].size() - 1; + + int* path = decoded_path->mutable_data(platform::CPUPlace()); + math::SetConstant()(ctx.device_context(), + decoded_path, 0); + for (size_t i = 0; i < seq_num; ++i) { + int start_pos = static_cast(lod[level][i]); + int end_pos = static_cast(lod[level][i + 1]); + Tensor decoded_path_one_seq = decoded_path->Slice(start_pos, end_pos); + Decode(emission_weights->Slice(start_pos, end_pos), *transition_weights, + &decoded_path_one_seq); + } + + if (label) { + PADDLE_ENFORCE_EQ(label->NumLevels(), 1UL, + "The Input(Label) should be a sequence."); + const int* label_value = label->data(); + size_t batch_size = emission_weights->dims()[0]; + for (size_t i = 0; i < batch_size; ++i) { + path[i] = label_value[i] == path[i] ? 1 : 0; + } + } + } + + private: + void Decode(const Tensor& emission_weights, const Tensor& transition_weights, + Tensor* decoded_path) const { + auto emission_dims = emission_weights.dims(); + const size_t seq_len = emission_dims[0]; + const size_t tag_num = emission_dims[1]; + + const size_t state_trans_base_idx = 2; + + const T* x = emission_weights.data(); + const T* w = transition_weights.data(); + int* path = decoded_path->data(); + + // alpha is a memo table. An element alpha(k, v) records the score of the + // best sequence of tags from position 1 to position k with v being the end + // tag. + Tensor alpha; + T* alpha_value = alpha.mutable_data(emission_dims, platform::CPUPlace()); + Tensor track; + int* track_value = + track.mutable_data(emission_dims, platform::CPUPlace()); + + for (size_t i = 0; i < tag_num; ++i) alpha_value[i] = w[i] + x[i]; + + for (size_t k = 1; k < seq_len; ++k) { + for (size_t i = 0; i < tag_num; ++i) { + T max_score = -std::numeric_limits::max(); + int max_j = 0; + for (size_t j = 0; j < tag_num; ++j) { + T score = alpha_value[(k - 1) * tag_num + j] + + w[(j + state_trans_base_idx) * tag_num + i]; + if (score > max_score) { + max_score = score; + max_j = j; + } + } + + alpha_value[k * tag_num + i] = max_score + x[k * tag_num + i]; + track_value[k * tag_num + i] = max_j; + } + } + + T max_score = -std::numeric_limits::max(); + int max_i = 0; + for (size_t i = 0; i < tag_num; ++i) { + T score = alpha_value[(seq_len - 1) * tag_num + i] + w[tag_num + i]; + if (score > max_score) { + max_score = score; + max_i = i; + } + } + path[seq_len - 1] = max_i; + for (int k = seq_len - 1; k >= 1; --k) { + path[k - 1] = max_i = track_value[k * tag_num + max_i]; + } + } +}; + +} // namespace operators +} // namespace paddle diff --git a/paddle/operators/crop_op.cc b/paddle/operators/crop_op.cc index ed78e9e3a3a49b7ff0990b8d13cfe2dae594b722..6752eb8c1c72150b0b1cf5595211ca1d01ef2bf4 100644 --- a/paddle/operators/crop_op.cc +++ b/paddle/operators/crop_op.cc @@ -56,34 +56,35 @@ class CropOpMaker : public framework::OpProtoAndCheckerMaker { : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "The input of pad op. " - "The input should be a k-D tensor(k > 0 and k < 7)"); + "The input should be a k-D tensor(k > 0 and k < 7)."); AddInput("Y", - "The input used as reference for cropping" - " with the same dimension as X. ") + "The input used as reference for cropping, " + "which is of the same dimensions as X.") .AsDispensable(); AddOutput("Out", - "The output of crop op " - "with the same dimension as X."); + "The output of crop op, " + "which is of the same dimensions as X."); AddAttr>("offsets", - "A list describing offsets to be cropped." - "The size of offsets list should be as same as " - "dimension size of input X."); + "A list describing offsets to be cropped. " + "The size of offsets list should be the same as " + "the dimension size of input X."); AddAttr>("shape", - "A list describing the shape of output." - "The size of shape list should be as same as " - "dimension size of input X.") + "A list describing the shape of output. " + "The size of shape list should be the same as " + "the dimension size of input X.") .SetDefault(std::vector()); AddComment(R"DOC( Crop Operator. + Crop input into output, as specified by offsets and shape. There are two ways to set shape: -1. referenc input: crop input X as shape as reference input. +1. reference input: crop input X into the same shape as reference input. The dimension of reference input should - be as same as input X. -2. shape list: crop input X by shape described by a list. - The size of shape list should be as same as - dimension size of input X. + be the same as the dimension of input X. +2. shape list: crop input X into the shape described by a list. + The size of shape list should be the same as + the dimension size of input X. The input should be a k-D tensor(k > 0 and k < 7). As an example: @@ -91,20 +92,20 @@ Given: X = [[0, 1, 2, 0, 0] [0, 3, 4, 0, 0] - [0, 0, 0, 0, 0]] + [0, 0, 0, 0, 0]], and - offsets = [0, 1] + offsets = [0, 1], and - shape = [2, 2] + shape = [2, 2], -then we get +we get: Out = [[1, 2], - [3, 4]] + [3, 4]]. )DOC"); } diff --git a/paddle/operators/cross_entropy_op.cc b/paddle/operators/cross_entropy_op.cc index 39df19da677a7dee7d0989d491f8d5511f73a9c7..9d41879b27a24f83090f5abf1325eca5f9488d00 100644 --- a/paddle/operators/cross_entropy_op.cc +++ b/paddle/operators/cross_entropy_op.cc @@ -49,7 +49,7 @@ class CrossEntropyOp : public framework::OperatorWithKernel { } protected: - // Explicitly set that data type of the output of the cross_entropy operator + // Explicitly set that the data type of computation kernel of cross_entropy // is determined by its input "X". framework::DataType IndicateDataType( const framework::ExecutionContext& ctx) const override { @@ -96,7 +96,8 @@ class CrossEntropyGradientOp : public framework::OperatorWithKernel { } protected: - // CrossEntropy's data type just determined by "X" + // Explicitly set that the data type of computation kernel of cross_entropy + // is determined by its input "X". framework::DataType IndicateDataType( const framework::ExecutionContext& ctx) const override { return framework::ToDataType(ctx.Input("X")->type()); @@ -113,21 +114,17 @@ class CrossEntropyOpMaker : public framework::OpProtoAndCheckerMaker { "where N is the batch size and D is the number of classes. " "This input is a probability computed by the previous operator, " "which is almost always the result of a softmax operator."); - AddInput( - "Label", - "(Tensor, default Tensor), the ground truth which is " - "a 2-D tensor. " - "When soft_label is set to false, `Label` is a Tensor with shape " - "[N x 1]. " - "When soft_label is set to true, `Label` is a Tensor " - "with shape [N x K]."); + AddInput("Label", + "(Tensor), the ground truth which is a 2-D tensor. When " + "soft_label is set to false, Label is a Tensor with shape " + "[N x 1]. When soft_label is set to true, Label is a " + "Tensor with shape [N x K]."); AddOutput("Y", - "(Tensor, default Tensor), a 2-D tensor " - "with shape [N x 1]. The cross entropy loss."); - AddAttr( - "soft_label", - "(bool, default false), a flag to indicate whether to interpretate " - "the given labels as soft labels.") + "(Tensor, default Tensor), a 2-D tensor with shape " + "[N x 1]. The cross entropy loss."); + AddAttr("soft_label", + "(bool, default false), a flag indicating whether to " + "interpretate the given labels as soft labels.") .SetDefault(false); AddComment(R"DOC( CrossEntropy Operator. @@ -137,13 +134,13 @@ computation. 1) One-hot cross-entropy: soft_label = false, Label[i, 0] indicates the class index for sample i: - Y[i] = -log(X[i, Label[i]]) + $Y[i] = -\log(X[i, Label[i]])$ 2) Soft-label cross-entropy: soft_label = true, Label[i, j] indicates the soft label of class j for sample i: - Y[i] = \sum_j{-Label[i, j] * log(X[i, j])} + $Y[i] = \sum_j{-Label[i, j] * log(X[i, j])}$ Please make sure that in this case the summuation of each row of Label equals one. @@ -153,8 +150,9 @@ computation. non-zero element (equals 1), soft-label cross-entropy degenerates to a one-hot cross-entropy with one-hot label representation. -Both the input `X` and `Label` can carry the LoD (Level of Details) information, -or not. But the output only shares the LoD with input `X`. +Both the input X and Label can carry the LoD (Level of Details) information, +or not. But the output only shares the LoD information with input X. + )DOC"); } }; diff --git a/paddle/operators/cross_entropy_op.cu b/paddle/operators/cross_entropy_op.cu index a523cb6fcec16d309f6bb3baf8549bf14756fd7d..530b319a44eac915f0d49eb55bfe5929908eab26 100644 --- a/paddle/operators/cross_entropy_op.cu +++ b/paddle/operators/cross_entropy_op.cu @@ -82,24 +82,19 @@ class CrossEntropyGradientOpCUDAKernel : public framework::OpKernel { int block = 512; int grid = (batch_size * class_num + block - 1) / block; + auto stream = ctx.cuda_device_context().stream(); if (ctx.Attr("soft_label")) { auto* label_data = label->data(); - SoftCrossEntropyGradientKernel<<< - grid, block, 0, reinterpret_cast( - ctx.device_context()) - .stream()>>>(dx_data, dy_data, x_data, label_data, - batch_size, class_num); + SoftCrossEntropyGradientKernel<<>>( + dx_data, dy_data, x_data, label_data, batch_size, class_num); } else { math::SetConstant functor; functor(ctx.device_context(), dx, 0); auto* label_data = label->data(); grid = (batch_size + block - 1) / block; - CrossEntropyGradientKernel<<< - grid, block, 0, reinterpret_cast( - ctx.device_context()) - .stream()>>>(dx_data, dy_data, x_data, label_data, - batch_size, class_num); + CrossEntropyGradientKernel<<>>( + dx_data, dy_data, x_data, label_data, batch_size, class_num); } } }; diff --git a/paddle/operators/decayed_adagrad_op.cc b/paddle/operators/decayed_adagrad_op.cc index 17b394aa07cb0c7ca6e085b61590ff052221b22c..640b4e77448d1b64bcf7375f26c07ff1d2bdeaa3 100644 --- a/paddle/operators/decayed_adagrad_op.cc +++ b/paddle/operators/decayed_adagrad_op.cc @@ -75,11 +75,18 @@ class DecayedAdagradOpMaker : public framework::OpProtoAndCheckerMaker { "Constant for numerical stability") .SetDefault(1.0e-6f); AddComment(R"DOC( +Decayed Adagrad Optimizer. -Decayed Adagrad +The update is done as follows: -moment_out = decay * moment + (1 - decay) * grad * grad -param_out = param - learning_rate * grad / (sqrt(moment_out) + epsilon) +$$ +moment\_out = decay * moment + (1 - decay) * grad * grad \\ +param\_out = param - \frac{learning\_rate * grad}{\sqrt{moment\_out} + epsilon} +$$ + +The original paper(http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf) +does not have an epsilon attribute. It is added here for numerical +stability to avoid the division by zero error. )DOC"); } diff --git a/paddle/operators/dropout_op.cc b/paddle/operators/dropout_op.cc index ff1ccea3b94dcd55c372b707c2afeda874ed212e..818146aca766cb13b93fd024c11c1209655d9e11 100644 --- a/paddle/operators/dropout_op.cc +++ b/paddle/operators/dropout_op.cc @@ -43,22 +43,24 @@ class DropoutOpMaker : public framework::OpProtoAndCheckerMaker { DropoutOpMaker(framework::OpProto* proto, framework::OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { - AddAttr("dropout_prob", "Probability of setting units to zero.") - .SetDefault(.5f); - AddAttr("is_training", "Whether in training phase.").SetDefault(true); - AddAttr("seed", "Dropout random seed.").SetDefault(0); AddInput("X", "The input of dropout op."); AddOutput("Out", "The output of dropout op."); AddOutput("Mask", "The random sampled dropout mask.").AsIntermediate(); + AddAttr("dropout_prob", "Probability of setting units to zero.") + .SetDefault(.5f); + AddAttr("is_training", "True if in training phase.").SetDefault(true); + AddAttr("seed", "Dropout random seed.").SetDefault(0); + AddComment(R"DOC( Dropout Operator. -'Dropout' refers to randomly dropping out units in a nerual network. It is a +Dropout refers to randomly dropping out units in a nerual network. It is a regularization technique for reducing overfitting by preventing neuron co-adaption during training. The dropout operator randomly set (according to the given dropout probability) the outputs of some units to zero, while others -being set to their inputs. +are set equal to their corresponding inputs. + )DOC"); } }; diff --git a/paddle/operators/dynamic_recurrent_op.cc b/paddle/operators/dynamic_recurrent_op.cc index a0b06ac1dc305bc899f9abaafcc980a6150ecda9..d48cc4e8df587708ab93e7d788145adc01c1d3e5 100644 --- a/paddle/operators/dynamic_recurrent_op.cc +++ b/paddle/operators/dynamic_recurrent_op.cc @@ -386,12 +386,13 @@ class DynamicRecurrentOpProtoAndCheckerMaker RNNAlgorithm::kArgNames[RNNAlgorithm::ComputeMode::kForward]; // inputs and outputs stored in proto AddInput(name.inlinks, - "the inputs that need to be segmented for each step.") + "The inputs that need to be segmented for each step.") .AsDuplicable(); - AddInput(name.initial_states, "variables to initialize states.") + AddInput(name.initial_states, "Variables to initialize the states.") .AsDuplicable(); - AddOutput(name.outlinks, "the outputs that need to concated for all steps.") + AddOutput(name.outlinks, + "The outputs that need to be concatenated for all steps.") .AsDuplicable(); AddOutput(name.step_scopes, "step scopes"); @@ -399,7 +400,12 @@ class DynamicRecurrentOpProtoAndCheckerMaker AddAttr>(name.ex_states, "names of ex_states"); AddAttr>(name.states, "names of states"); - AddComment("This is a RNN operator for varience-length sequences."); + AddComment(R"DOC( +Dynamic Recurrent Operator. + +This is a RNN operator for varience-length sequences. + +)DOC"); } }; diff --git a/paddle/operators/elementwise_add_op.cc b/paddle/operators/elementwise_add_op.cc index d9bc80c869c023caebf0b45ed24f2def3f0b1dd8..ebe1de90c7d245756de759d8675a30f955843798 100644 --- a/paddle/operators/elementwise_add_op.cc +++ b/paddle/operators/elementwise_add_op.cc @@ -22,7 +22,7 @@ class ElementwiseAddOpMaker : public ElementwiseOpMaker { ElementwiseAddOpMaker(framework::OpProto* proto, framework::OpAttrChecker* op_checker) : ElementwiseOpMaker(proto, op_checker) { - SetComment("add", "Out = X + Y"); + SetComment("Add", "$Out = X + Y$"); AddComment(comment_); } }; diff --git a/paddle/operators/elementwise_div_op.cc b/paddle/operators/elementwise_div_op.cc index 3f56344d0007b5f14fd9b5b9b44a9b29d3c42f2a..de75816a249002549940b04d928c88c17d075917 100644 --- a/paddle/operators/elementwise_div_op.cc +++ b/paddle/operators/elementwise_div_op.cc @@ -22,7 +22,7 @@ class ElementwiseDivOpMaker : public ElementwiseOpMaker { ElementwiseDivOpMaker(framework::OpProto* proto, framework::OpAttrChecker* op_checker) : ElementwiseOpMaker(proto, op_checker) { - SetComment("Div", "Out = X / Y"); + SetComment("Div", "$Out = X / Y$"); AddComment(comment_); } }; diff --git a/paddle/operators/elementwise_mul_op.cc b/paddle/operators/elementwise_mul_op.cc index da7765aa6a7a81c9e0b4f462022cad54c16aec47..ffa10486f123963274aa478eb4c607e32138bcec 100644 --- a/paddle/operators/elementwise_mul_op.cc +++ b/paddle/operators/elementwise_mul_op.cc @@ -23,7 +23,7 @@ class ElementwiseMulOpMaker : public ElementwiseOpMaker { ElementwiseMulOpMaker(framework::OpProto* proto, framework::OpAttrChecker* op_checker) : ElementwiseOpMaker(proto, op_checker) { - SetComment("Mul", "Out = X ⊙ Y"); + SetComment("Mul", "$Out = X \\odot\\ Y$"); AddComment(comment_); } }; diff --git a/paddle/operators/elementwise_op.h b/paddle/operators/elementwise_op.h index fce4b24a22f40c9cc57738273a758d0d48ff5e91..56e5eb69bc382a2c15d88b759fa6987f02c6cabb 100644 --- a/paddle/operators/elementwise_op.h +++ b/paddle/operators/elementwise_op.h @@ -46,37 +46,42 @@ class ElementwiseOpMaker : public framework::OpProtoAndCheckerMaker { ElementwiseOpMaker(framework::OpProto* proto, framework::OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("X", R"DOC( -The first input of elementwise op, it's a tensor of any dimensions. -)DOC"); - AddInput("Y", R"DOC( -The sencond input of elementwise op, it's a tensor and it's dimensions -must be small or equal to X's dimensions. -)DOC"); + AddInput("X", "(Tensor) The first input tensor of elementwise op"); + AddInput("Y", "(Tensor) The second input tensor of elementwise op"); + AddOutput("Out", "The output of elementwise op"); AddAttr("axis", - R"DOC( -When the shape(Y) does not equal the shape(X),Y will be broadcasted -to match the shape of X and axis should be dimension index Y in X - )DOC") + "(int, default -1) The starting dimension index " + "for broadcasting Y onto X") .SetDefault(-1) .EqualGreaterThan(-1); - - AddOutput("Out", "The output of elementwise op"); comment_ = R"DOC( -Limited elementwise {name} operator.The equation is: Out = {equation}. -1. The shape of Y should be same with X or -2. Y's shape is a subset of X. - Y will be broadcasted to match the shape of X and axis should be dimension index Y in X. - - example: - shape(X) = (2, 3, 4, 5), shape(Y) = (,) - shape(X) = (2, 3, 4, 5), shape(Y) = (5,) - shape(X) = (2, 3, 4, 5), shape(Y) = (4, 5) - shape(X) = (2, 3, 4, 5), shape(Y) = (3, 4), with axis=1 - shape(X) = (2, 3, 4, 5), shape(Y) = (2), with axis=0 +Limited Elementwise {name} Operator. + +The equation is: + +{equation} + +X is a tensor of any dimension and the dimensions of tensor Y must be smaller than +or equal to the dimensions of X. + +There are two cases for this operator: +1. The shape of Y is same with X; +2. The shape of Y is a subset of X. + +For case 2: +Y will be broadcasted to match the shape of X and axis should be +the starting dimension index for broadcasting Y onto X. + +example: + shape(X) = (2, 3, 4, 5), shape(Y) = (,) + shape(X) = (2, 3, 4, 5), shape(Y) = (5,) + shape(X) = (2, 3, 4, 5), shape(Y) = (4, 5) + shape(X) = (2, 3, 4, 5), shape(Y) = (3, 4), with axis=1 + shape(X) = (2, 3, 4, 5), shape(Y) = (2), with axis=0 Both the input X and Y can carry the LoD (Level of Details) information, -or not. But the output only shares the LoD with input X. +or not. But the output only shares the LoD information with input X. + )DOC"; AddComment(comment_); } diff --git a/paddle/operators/elementwise_sub_op.cc b/paddle/operators/elementwise_sub_op.cc index 3e4f98fdb35b148931a67d511fe41958eb523f99..39702dad0ee61de71ff0d54765e6f73de93cee9c 100644 --- a/paddle/operators/elementwise_sub_op.cc +++ b/paddle/operators/elementwise_sub_op.cc @@ -22,7 +22,7 @@ class ElementwiseSubOpMaker : public ElementwiseOpMaker { ElementwiseSubOpMaker(framework::OpProto* proto, framework::OpAttrChecker* op_checker) : ElementwiseOpMaker(proto, op_checker) { - SetComment("Sub", "Out = X - Y"); + SetComment("Sub", "$Out = X - Y$"); AddComment(comment_); } }; diff --git a/paddle/operators/feed_op.cc b/paddle/operators/feed_op.cc index 0e5b263eae904d97b61d41691b848e4fa2c17971..0dd84cbeaafbafd45132b0a0b744554ce7475411 100644 --- a/paddle/operators/feed_op.cc +++ b/paddle/operators/feed_op.cc @@ -59,8 +59,13 @@ class FeedOpInfoMaker : public framework::OpProtoAndCheckerMaker { : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "The input of feed op"); AddOutput("Out", "The output of feed op"); - AddComment("feed op, it should not be configured by users directly"); - AddAttr("col", "column of feed"); + AddAttr("col", "(int) The column of feed"); + AddComment(R"DOC( +Feed Operator. + +It should not be configured by users directly. + +)DOC"); } }; diff --git a/paddle/operators/fetch_op.cc b/paddle/operators/fetch_op.cc index f1086e3dc774a5e57f1abb5d4f91f859fc0e64aa..8108ae69dec4bafd1c04d5ab05eef6f467d4c6e8 100644 --- a/paddle/operators/fetch_op.cc +++ b/paddle/operators/fetch_op.cc @@ -66,8 +66,13 @@ class FetchOpInfoMaker : public framework::OpProtoAndCheckerMaker { : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "The input of fetch op"); AddOutput("Out", "The output of fetch op"); - AddComment("fetch op, it should not be configured by users directly"); - AddAttr("col", "column of fetch"); + AddAttr("col", "(int) The column of fetch"); + AddComment(R"DOC( +Fetch Operator. + +It should not be configured by users directly. + +)DOC"); } }; } // namespace operators diff --git a/paddle/operators/fill_constant_batch_size_like_op.cc b/paddle/operators/fill_constant_batch_size_like_op.cc index 0244adb42392c707d755e95c7abdebd826c219b4..232d88e26bd2fb17f52c2dc3626d47c2615b1d42 100644 --- a/paddle/operators/fill_constant_batch_size_like_op.cc +++ b/paddle/operators/fill_constant_batch_size_like_op.cc @@ -34,15 +34,18 @@ class FillConstantBatchSizeLikeOp : public framework::OperatorWithKernel { std::vector shape_int64(shape.size(), 0); std::transform(shape.begin(), shape.end(), shape_int64.begin(), [](int a) { return static_cast(a); }); - auto dims = framework::make_ddim(shape_int64); + auto output_dim = framework::make_ddim(shape_int64); - int dim_idx = ctx->Attrs().Get("dim_idx"); - PADDLE_ENFORCE_GE(dim_idx, 0); - PADDLE_ENFORCE_GT(static_cast(shape.size()), dim_idx); - PADDLE_ENFORCE_GT(ctx->GetInputDim("Input").size(), dim_idx); + int input_dim_idx = ctx->Attrs().Get("input_dim_idx"); + PADDLE_ENFORCE_GE(input_dim_idx, 0); + PADDLE_ENFORCE_GT(ctx->GetInputDim("Input").size(), input_dim_idx); - dims[dim_idx] = ctx->GetInputDim("Input")[dim_idx]; - ctx->SetOutputDim("Out", dims); + int output_dim_idx = ctx->Attrs().Get("output_dim_idx"); + PADDLE_ENFORCE_GE(output_dim_idx, 0); + PADDLE_ENFORCE_GT(static_cast(shape.size()), output_dim_idx); + + output_dim[output_dim_idx] = ctx->GetInputDim("Input")[input_dim_idx]; + ctx->SetOutputDim("Out", output_dim); } protected: @@ -69,21 +72,30 @@ class FillConstantBatchSizeLikeOpMaker "(Tensor) Tensor of specified shape will be filled " "with the specified value"); AddAttr>("shape", "(vector) The shape of the output"); - AddAttr("dim_idx", - "(int, default 0) the index of batch size dimension") + AddAttr("input_dim_idx", + "(int, default 0) the index of input's batch size dimension") + .SetDefault(0); + AddAttr("output_dim_idx", + "(int, default 0) the index of output's batch size dimension") .SetDefault(0); AddAttr("value", "(float, default 0) The value to be filled") .SetDefault(0.0f); - AddComment(R"DOC(Fill up a variable with specified constant value.)DOC"); + AddComment(R"DOC( +FillConstantBatchSizeLike Operator. + +Fill up a variable with specified constant value. + +)DOC"); } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; -REGISTER_OP_WITHOUT_GRADIENT(fill_constant_batch_size_like, - ops::FillConstantBatchSizeLikeOp, - ops::FillConstantBatchSizeLikeOpMaker); +REGISTER_OPERATOR(fill_constant_batch_size_like, + ops::FillConstantBatchSizeLikeOp, + paddle::framework::EmptyGradOpMaker, + ops::FillConstantBatchSizeLikeOpMaker); REGISTER_OP_CPU_KERNEL( fill_constant_batch_size_like, ops::FillConstantBatchSizeLikeOpKernel, diff --git a/paddle/operators/fill_constant_op.cc b/paddle/operators/fill_constant_op.cc index 7a861b6cfc0fab312f4e5a7cce2fc28f923173d2..f60425051cc93b77ef9e383f90a62eb9dfed44de 100644 --- a/paddle/operators/fill_constant_op.cc +++ b/paddle/operators/fill_constant_op.cc @@ -35,7 +35,9 @@ class FillConstantOp : public framework::OperatorWithKernel { protected: framework::DataType IndicateDataType( const framework::ExecutionContext &ctx) const override { - return static_cast(ctx.Attr("data_type")); + int data_type = ctx.Attr("data_type"); + VLOG(10) << " FillConstant data_type = " << data_type; + return static_cast(data_type); } }; @@ -54,7 +56,12 @@ class FillConstantOpMaker : public framework::OpProtoAndCheckerMaker { AddOutput("Out", "(Tensor) Tensor of specified shape will be filled " "with the specified value"); - AddComment(R"DOC(Fill up a variable with specified constant value.)DOC"); + AddComment(R"DOC( +FillConstantBatchSizeLike Operator. + +Fill up a variable with specified constant value. + +)DOC"); } }; } // namespace operators @@ -66,4 +73,5 @@ REGISTER_OP_WITHOUT_GRADIENT(fill_constant, ops::FillConstantOp, REGISTER_OP_CPU_KERNEL( fill_constant, ops::FillConstantOpKernel, ops::FillConstantOpKernel, - ops::FillConstantOpKernel); + ops::FillConstantOpKernel, + ops::FillConstantOpKernel); diff --git a/paddle/operators/fill_constant_op.cu b/paddle/operators/fill_constant_op.cu index a57b11c6cba77ad7d258c47a8ebf887f359f9522..bca402a8b988b570a083e9ce253342304f4b8946 100644 --- a/paddle/operators/fill_constant_op.cu +++ b/paddle/operators/fill_constant_op.cu @@ -20,4 +20,5 @@ namespace ops = paddle::operators; REGISTER_OP_GPU_KERNEL( fill_constant, ops::FillConstantOpKernel, ops::FillConstantOpKernel, - ops::FillConstantOpKernel); + ops::FillConstantOpKernel, + ops::FillConstantOpKernel); diff --git a/paddle/operators/fill_zeros_like_op.cc b/paddle/operators/fill_zeros_like_op.cc index ed529ac40aaf179b35a9ab32e11ed7dbbe9289ba..8ab39d4fb012b8fa3883f33e4d15be7918500354 100644 --- a/paddle/operators/fill_zeros_like_op.cc +++ b/paddle/operators/fill_zeros_like_op.cc @@ -37,11 +37,13 @@ class FillZerosLikeOpMaker : public framework::OpProtoAndCheckerMaker { framework::OpAttrChecker *op_checker) : framework::OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "The input of fill-zeros-like op."); - AddOutput("Y", "The varibale will be filled up with zeros."); + AddOutput("Y", "The variable will be filled up with zeros."); AddComment(R"DOC( -Fill up a vriable with zeros. +FillZerosLike Operator. + +Fill up a variable with zeros. +The output will have the same size as the input. -The output will have the same size with input. )DOC"); } }; diff --git a/paddle/operators/gather_op.cc b/paddle/operators/gather_op.cc index f6c7f472da24a1a60c0d2538ae643bdc8e55b10f..aee672500ee5d1bf6cc7ef872f2cb6c408de6d9e 100644 --- a/paddle/operators/gather_op.cc +++ b/paddle/operators/gather_op.cc @@ -67,11 +67,28 @@ class GatherOpMaker : public framework::OpProtoAndCheckerMaker { : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "The source input of gather op"); AddInput("Index", "The index input of gather op"); - AddOutput("Out", "The output of add op"); + AddOutput("Out", "The output of gather op"); AddComment(R"DOC( -Gather Operator by selecting from the first axis, +Gather Operator. + +$Out = X[Index]$ + +Out is obtained by gathering entries of the outer-most dimension +of X indexed by Index and concatenate them together. + +Example: + +X = [[1, 2], + [3, 4], + [5, 6]] + +Index = [[1, 2]] + +Then: + +Out = [[3, 4], + [5, 6]] -Out = X[Index] )DOC"); } }; diff --git a/paddle/operators/gaussian_random_op.cc b/paddle/operators/gaussian_random_op.cc index be7f542a7a274d88d2dac953995d6a83a6ce022d..802c98ae764d02af3143d1d39b714d486791da82 100644 --- a/paddle/operators/gaussian_random_op.cc +++ b/paddle/operators/gaussian_random_op.cc @@ -68,21 +68,35 @@ class GaussianRandomOpMaker : public framework::OpProtoAndCheckerMaker { GaussianRandomOpMaker(framework::OpProto* proto, framework::OpAttrChecker* op_checker) : framework::OpProtoAndCheckerMaker(proto, op_checker) { - AddOutput("Out", "output matrix of random op"); - AddComment(R"DOC( -GaussianRandom operator. -Use to initialize tensor with gaussian random generator. -)DOC"); + AddOutput("Out", "Output matrix of gaussian random op"); - AddAttr>("shape", "The dimension of random tensor."); - AddAttr("mean", "mean of random tensor.").SetDefault(.0f); - AddAttr("std", "std of random tensor.").SetDefault(1.0f); + AddAttr>("shape", + "(vector) " + "The dimension of random tensor."); + AddAttr("mean", + "(float, default 0.0) " + "mean of random tensor.") + .SetDefault(.0f); + AddAttr("std", + "(float, default 1.0) " + "std of random tensor.") + .SetDefault(1.0f); AddAttr("seed", + "(int, default 0) " "Random seed of generator." - "0 means use system wide seed") + "0 means use system wide seed.") .SetDefault(0); - AddAttr("data_type", "output data type") + AddAttr("data_type", + "(int, default 5(FP32)) " + "Output data type.") .SetDefault(framework::DataType::FP32); + + AddComment(R"DOC( +GaussianRandom Operator. + +Used to initialize tensors with gaussian random generator. + +)DOC"); } }; diff --git a/paddle/operators/gru_op.cc b/paddle/operators/gru_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..5aa03f8916a67222fb0ca5781533766063e52683 --- /dev/null +++ b/paddle/operators/gru_op.cc @@ -0,0 +1,220 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#include "paddle/operators/gru_op.h" + +namespace paddle { +namespace operators { + +using framework::Tensor; + +class GRUOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + void InferShape(framework::InferShapeContext* ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("Input"), + "Input(%s) of GRUOp should not be null.", "Input"); + PADDLE_ENFORCE(ctx->HasInput("Weight"), + "Input(%s) of GRUOp should not be null.", "Weight"); + PADDLE_ENFORCE(ctx->HasOutput("BatchGate"), + "Output(%s) of GRUOp should not be null.", "BatchGate"); + PADDLE_ENFORCE(ctx->HasOutput("BatchResetHiddenPrev"), + "Output(%s) of GRUOp should not be null.", + "BatchResetHiddenPrev"); + PADDLE_ENFORCE(ctx->HasOutput("BatchHidden"), + "Output(%s) of GRUOp should not be null.", "BatchHidden"); + PADDLE_ENFORCE(ctx->HasOutput("Hidden"), + "Output(%s) of GRUOp should not be null.", "Hidden"); + auto input_dims = ctx->GetInputDim("Input"); + auto weight_dims = ctx->GetInputDim("Weight"); + int input_size = input_dims[1]; + int frame_size = weight_dims[0]; + PADDLE_ENFORCE_EQ(input_size, frame_size * 3, + "The input_size must be 3 times of frame_size in GRUOp."); + PADDLE_ENFORCE_EQ( + weight_dims[1], frame_size * 3, + "The shape of Weight matrix must be [frame_size, frame_size * 3]."); + if (ctx->HasInput("H0")) { + auto h0_dims = ctx->GetInputDim("H0"); + PADDLE_ENFORCE_EQ(h0_dims[1], frame_size, + "The width of H0 must be equal to frame_size."); + } + if (ctx->HasInput("Bias")) { + auto bias_dims = ctx->GetInputDim("Bias"); + int bias_height = bias_dims[0]; + int bias_width = bias_dims[1]; + PADDLE_ENFORCE_EQ(bias_height, 1, + "The shape of Bias must be [1, frame_size * 3]."); + PADDLE_ENFORCE_EQ(bias_width, frame_size * 3, + "The shape of Bias must be [1, frame_size * 3]."); + } + ctx->SetOutputDim("BatchGate", input_dims); + ctx->SetOutputDim("BatchResetHiddenPrev", {input_dims[0], frame_size}); + ctx->SetOutputDim("BatchHidden", {input_dims[0], frame_size}); + ctx->SetOutputDim("Hidden", {input_dims[0], frame_size}); + ctx->ShareLoD("Input", "Hidden"); + } +}; + +class GRUOpMaker : public framework::OpProtoAndCheckerMaker { + public: + GRUOpMaker(framework::OpProto* proto, framework::OpAttrChecker* op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("Input", + "(LoDTensor) The first input is a LodTensor, which supports " + "variable-time length input sequence. The underlying tensor in " + "this LoDTenosr is a matrix with shape (T X 3D), where, T is the " + "total time steps in this mini-batch, D is the hidden size."); + AddInput("H0", + "(Tensor, optional) The initial hidden state is an optional " + "input. This is a tensor with shape (N x D), where N is the " + "batch size, D is the hidden size.") + .AsDispensable(); + AddInput( + "Weight", + "(Tensor) The learnable hidden-hidden weight matrix with shape " + "(D x 3D), where D is the hidden size. The elements continuous in " + "memory can be divided into two parts. The first part are weights of " + "the update gate and reset gate with shape (D x 2D), and the second " + "part are weights of output candidate with shape (D x D)."); + AddInput("Bias", + "(Tensor, optional) Bias vector with shape (1 x 3D) concating " + "bias of the update gate, reset gate and output candidate.") + .AsDispensable(); + AddOutput("BatchGate", + "(LoDTensor) To compute with batches, sequence data will be " + "reorganized into several successive batches each containing " + "data from the same time step. The LoDTensor BatchGate contains " + "the update gate, reset gate and output candidate values " + "organized in batches. The LoD size is 2. The first LoD contains " + "the batch offsets and the second LoD contains the indexes in " + "the raw sequence data.") + .AsIntermediate(); + AddOutput( + "BatchResetHiddenPrev", + "(LoDTensor) The reseted hidden state LoDTensor organized in batches. " + "This LoDTensor is a matrix with shape (T X D) and has the same LoD " + "with `BatchGate`.") + .AsIntermediate(); + AddOutput( + "BatchHidden", + "(LoDTensor) The hidden state LoDTensor organized in batches. " + "This LoDTensor is a matrix with shape (T X D) and has the same LoD " + "with `BatchGate`.") + .AsIntermediate(); + AddOutput( + "Hidden", + "(LoDTensor) the hidden state LoDTensor organized in sequences. " + "This LoDTensor is a matrix with shape (T X D) and has the same LoD " + "with `BatchGate`."); + AddAttr("activation", + "(string, default tanh) " + "The activation type used for output candidate {h}_t.") + .SetDefault("tanh"); + AddAttr( + "gate_activation", + "(string, default sigmoid) " + "The activation type used in update gate and reset gate.") + .SetDefault("sigmoid"); + AddAttr("is_reverse", + "(bool, defalut: False) " + "whether to compute reversed GRU.") + .SetDefault(false); + AddComment(R"DOC( +GRU Operator implements part calculations of the complete GRU as following: + +\f[ +update \ gate: u_t = actGate(xu_t + W_u * h_{t-1} + b_u) \\ +reset \ gate: r_t = actGate(xr_t + W_r * h_{t-1} + b_r) \\ +output \ candidate: {h}_t = actNode(xc_t + W_c * dot(r_t, h_{t-1}) + b_c) \\ +output: h_t = dot((1 - u_t), h_{t-1}) + dot(u_t, {h}_t) +\f] + +@note To implement the complete GRU, fully-connected operator must be used +before to feed xu, xr and xc as the Input of GRU operator. +)DOC"); + } +}; + +class GRUGradOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + void InferShape(framework::InferShapeContext* ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("Input"), + "Input(%s) of GRUGradOp should not be null.", "Input"); + PADDLE_ENFORCE(ctx->HasInput("Weight"), + "Input(%s) of GRUGradOp should not be null.", "Weight"); + PADDLE_ENFORCE(ctx->HasInput("BatchGate"), + "Input(%s) of GRUGradOp should not be null.", "BatchGate"); + PADDLE_ENFORCE(ctx->HasInput("BatchResetHiddenPrev"), + "Input(%s) of GRUGradOp should not be null.", + "BatchResetHiddenPrev"); + PADDLE_ENFORCE(ctx->HasInput("BatchHidden"), + "Input(%s) of GRUOp should not be null.", "BatchHidden"); + PADDLE_ENFORCE(ctx->HasInput("Hidden"), + "Input(%s) of GRUGradOp should not be null.", "Hidden"); + PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Hidden")), + "Input(%s@GRAD) of GRUGradOp should not be null.", "Hidden"); + auto input_dims = ctx->GetInputDim("Input"); + auto weight_dims = ctx->GetInputDim("Weight"); + int input_size = input_dims[1]; + int frame_size = weight_dims[0]; + int weight_height = weight_dims[0]; + int weight_width = weight_dims[1]; + PADDLE_ENFORCE_EQ(input_size, frame_size * 3, + "The input_size must be 3 times of frame_size in GRUOp."); + PADDLE_ENFORCE_EQ( + weight_height, frame_size, + "The shape of Weight matrix must be [frame_size, frame_size * 3]."); + PADDLE_ENFORCE_EQ( + weight_width, frame_size * 3, + "The shape of Weight matrix must be [frame_size, frame_size * 3]."); + if (ctx->HasInput("H0")) { + auto h0_dims = ctx->GetInputDim("H0"); + PADDLE_ENFORCE_EQ(h0_dims[1], frame_size, + "The width of H0 must be equal to frame_size."); + auto h0_grad_name = framework::GradVarName("H0"); + if (ctx->HasOutput(h0_grad_name)) + ctx->SetOutputDim(h0_grad_name, h0_dims); + } + if (ctx->HasInput("Bias")) { + auto bias_dims = ctx->GetInputDim("Bias"); + int bias_height = bias_dims[0]; + int bias_width = bias_dims[1]; + PADDLE_ENFORCE_EQ(bias_height, 1, + "The shape of Bias must be [1, frame_size * 3]."); + PADDLE_ENFORCE_EQ(bias_width, frame_size * 3, + "The shape of Bias must be [1, frame_size * 3]."); + auto bias_grad_name = framework::GradVarName("Bias"); + if (ctx->HasOutput(bias_grad_name)) + ctx->SetOutputDim(bias_grad_name, bias_dims); + } + auto input_grad_name = framework::GradVarName("Input"); + if (ctx->HasOutput(input_grad_name)) + ctx->SetOutputDim(input_grad_name, input_dims); + auto weight_grad_name = framework::GradVarName("Weight"); + if (ctx->HasOutput(weight_grad_name)) + ctx->SetOutputDim(weight_grad_name, weight_dims); + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OP(gru, ops::GRUOp, ops::GRUOpMaker, gru_grad, ops::GRUGradOp); +REGISTER_OP_CPU_KERNEL(gru, ops::GRUKernel, + ops::GRUKernel); +REGISTER_OP_CPU_KERNEL(gru_grad, + ops::GRUGradKernel, + ops::GRUGradKernel); diff --git a/paddle/operators/gru_op.cu b/paddle/operators/gru_op.cu new file mode 100644 index 0000000000000000000000000000000000000000..35538c74b4bf678f8068999bfadb2589a1671be0 --- /dev/null +++ b/paddle/operators/gru_op.cu @@ -0,0 +1,23 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#define EIGEN_USE_GPU +#include "paddle/operators/gru_op.h" + +namespace ops = paddle::operators; +REGISTER_OP_GPU_KERNEL(gru, ops::GRUKernel, + ops::GRUKernel); +REGISTER_OP_GPU_KERNEL(gru_grad, + ops::GRUGradKernel, + ops::GRUGradKernel); diff --git a/paddle/operators/gru_op.h b/paddle/operators/gru_op.h new file mode 100644 index 0000000000000000000000000000000000000000..ba90ec9816c40a6a49065ac6efcee6b93dffce90 --- /dev/null +++ b/paddle/operators/gru_op.h @@ -0,0 +1,231 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#pragma once + +#include "paddle/operators/math/gru_compute.h" +#include "paddle/operators/math/math_function.h" +#include "paddle/operators/math/sequence2batch.h" + +#include "paddle/framework/eigen.h" +#include "paddle/framework/op_registry.h" + +namespace paddle { +namespace operators { + +using Tensor = framework::Tensor; +using LoDTensor = framework::LoDTensor; + +template +using EigenMatrix = framework::EigenMatrix; + +template +class GRUKernel : public framework::OpKernel { + public: + void BatchCompute(const framework::ExecutionContext& context) const { + auto* input = context.Input("Input"); + auto* h0 = context.Input("H0"); + const T* h0_data = h0 ? h0->data() : nullptr; + auto* weight = context.Input("Weight"); + const T* weight_data = weight->data(); + auto* bias = context.Input("Bias"); + auto* batch_gate = context.Output("BatchGate"); + batch_gate->mutable_data(context.GetPlace()); + auto* batch_reset_hidden_prev = + context.Output("BatchResetHiddenPrev"); + batch_reset_hidden_prev->mutable_data(context.GetPlace()); + auto* batch_hidden = context.Output("BatchHidden"); + batch_hidden->mutable_data(context.GetPlace()); + auto* hidden = context.Output("Hidden"); + hidden->mutable_data(context.GetPlace()); + + context.ShareLoD("Input", "Hidden"); + + auto hidden_dims = hidden->dims(); + + bool is_reverse = context.Attr("is_reverse"); + math::LoDTensor2BatchFunctor to_batch; + to_batch(context.device_context(), *input, *batch_gate, true, is_reverse); + + int frame_size = hidden_dims[1]; + int batch_size = hidden_dims[0]; + auto g = EigenMatrix::From(*batch_gate); + auto place = context.GetEigenDevice(); + if (bias) { + auto b = EigenMatrix::From(*bias); + g.device(place) = g + + b.reshape(Eigen::array({{1, frame_size * 3}})) + .broadcast(Eigen::array({{batch_size, 1}})); + } + + math::hl_gru_value gru_value; + gru_value.gateWeight = const_cast(weight_data); + gru_value.stateWeight = + const_cast(weight_data + 2 * frame_size * frame_size); + gru_value.prevOutValue = const_cast(h0_data); + auto batch_starts = batch_gate->lod()[0]; + size_t num_batch = batch_starts.size() - 1; + for (size_t n = 0; n < num_batch; n++) { + int bstart = static_cast(batch_starts[n]); + int bend = static_cast(batch_starts[n + 1]); + int cur_batch_size = bend - bstart; + + Tensor gate_t = batch_gate->Slice(bstart, bend); + Tensor reset_hidden_prev_t = batch_reset_hidden_prev->Slice(bstart, bend); + Tensor hidden_t = batch_hidden->Slice(bstart, bend); + gru_value.outputValue = hidden_t.data(); + gru_value.gateValue = gate_t.data(); + gru_value.resetOutputValue = reset_hidden_prev_t.data(); + math::GRUUnitFunctor::compute( + context.device_context(), gru_value, frame_size, cur_batch_size, + math::ActiveType(context.Attr("activation")), + math::ActiveType(context.Attr("gate_activation"))); + gru_value.prevOutValue = gru_value.outputValue; + } + + math::Batch2LoDTensorFunctor to_seq; + batch_hidden->set_lod(batch_gate->lod()); + to_seq(context.device_context(), *batch_hidden, *hidden); + } + + void Compute(const framework::ExecutionContext& context) const override { + BatchCompute(context); + } +}; + +template +class GRUGradKernel : public framework::OpKernel { + public: + void BatchCompute(const framework::ExecutionContext& context) const { + auto* h0 = context.Input("H0"); + const T* h0_data = h0 ? h0->data() : nullptr; + auto* weight = context.Input("Weight"); + const T* weight_data = weight->data(); + auto* batch_gate = context.Input("BatchGate"); + auto* batch_reset_hidden_prev = + context.Input("BatchResetHiddenPrev"); + auto* batch_hidden = context.Input("BatchHidden"); + auto* hidden = context.Input("Hidden"); + auto* hidden_grad = + context.Input(framework::GradVarName("Hidden")); + auto* input_grad = + context.Output(framework::GradVarName("Input")); + auto* h0_grad = context.Output(framework::GradVarName("H0")); + auto* weight_grad = + context.Output(framework::GradVarName("Weight")); + auto* bias_grad = context.Output(framework::GradVarName("Bias")); + + auto gate_dims = batch_gate->dims(); + auto hidden_dims = hidden->dims(); + int frame_size = hidden_dims[1]; + + math::LoDTensor2BatchFunctor to_batch; + LoDTensor batch_hidden_grad, batch_gate_grad, batch_reset_hidden_prev_grad; + batch_hidden_grad.mutable_data(hidden_dims, context.GetPlace()); + batch_gate_grad.mutable_data(gate_dims, context.GetPlace()); + batch_reset_hidden_prev_grad.mutable_data(hidden_dims, + context.GetPlace()); + math::SetConstant zero; + zero(context.device_context(), &batch_hidden_grad, static_cast(0.0)); + zero(context.device_context(), &batch_gate_grad, static_cast(0.0)); + zero(context.device_context(), &batch_reset_hidden_prev_grad, + static_cast(0.0)); + + bool is_reverse = context.Attr("is_reverse"); + batch_hidden_grad.set_lod(batch_hidden->lod()); + to_batch(context.device_context(), *hidden_grad, batch_hidden_grad, false, + is_reverse); + + math::hl_gru_value gru_value; + gru_value.gateWeight = const_cast(weight_data); + gru_value.stateWeight = + const_cast(weight_data + 2 * frame_size * frame_size); + + math::hl_gru_grad gru_grad; + if (weight_grad) { + gru_grad.gateWeightGrad = + weight_grad->mutable_data(context.GetPlace()); + zero(context.device_context(), weight_grad, static_cast(0.0)); + gru_grad.stateWeightGrad = + weight_grad->data() + 2 * frame_size * frame_size; + } else { + gru_grad.gateWeightGrad = nullptr; + gru_grad.stateWeightGrad = nullptr; + } + + auto batch_starts = batch_hidden_grad.lod()[0]; + size_t num_batch = batch_starts.size() - 1; + for (int n = static_cast(num_batch) - 1; n >= 0; n--) { + int bstart = static_cast(batch_starts[n]); + int bend = static_cast(batch_starts[n + 1]); + int cur_batch_size = bend - bstart; + + Tensor gate_t = batch_gate->Slice(bstart, bend); + gru_value.gateValue = gate_t.data(); + Tensor reset_hidden_prev_t = batch_reset_hidden_prev->Slice(bstart, bend); + gru_value.resetOutputValue = reset_hidden_prev_t.data(); + + Tensor hidden_grad_t = batch_hidden_grad.Slice(bstart, bend); + gru_grad.outputGrad = hidden_grad_t.data(); + Tensor gate_grad_t = batch_gate_grad.Slice(bstart, bend); + gru_grad.gateGrad = gate_grad_t.data(); + Tensor reset_hidden_prev_grad_t = + batch_reset_hidden_prev_grad.Slice(bstart, bend); + gru_grad.resetOutputGrad = reset_hidden_prev_grad_t.data(); + if (n == 0) { + gru_value.prevOutValue = const_cast(h0_data); + if (h0_grad) { + T* h0_grad_data = h0_grad->mutable_data(context.GetPlace()); + zero(context.device_context(), h0_grad, static_cast(0.0)); + gru_grad.prevOutGrad = h0_grad_data; + } else { + gru_grad.prevOutGrad = nullptr; + } + } else { + int bstart_pre = static_cast(batch_starts[n - 1]); + Tensor hidden_prev_t = batch_hidden->Slice(bstart_pre, bstart); + gru_value.prevOutValue = hidden_prev_t.data(); + Tensor hidden_prev_grad_t = batch_hidden_grad.Slice(bstart_pre, bstart); + gru_grad.prevOutGrad = hidden_prev_grad_t.data(); + } + + math::GRUUnitGradFunctor::compute( + context.device_context(), gru_value, gru_grad, frame_size, + cur_batch_size, + math::ActiveType(context.Attr("activation")), + math::ActiveType(context.Attr("gate_activation"))); + } + if (input_grad) { + input_grad->mutable_data(context.GetPlace()); + math::Batch2LoDTensorFunctor to_seq; + batch_gate_grad.set_lod(batch_gate->lod()); + to_seq(context.device_context(), batch_gate_grad, *input_grad); + } + if (bias_grad) { + bias_grad->mutable_data(context.GetPlace()); + auto d_b = EigenMatrix::From(*bias_grad); + auto d_g = EigenMatrix::From(batch_gate_grad); + auto place = context.GetEigenDevice(); + d_b.device(place) = d_g.sum(Eigen::array({{0}})); + } + } + + void Compute(const framework::ExecutionContext& context) const override { + BatchCompute(context); + } +}; + +} // namespace operators +} // namespace paddle diff --git a/paddle/operators/gru_unit_op.cc b/paddle/operators/gru_unit_op.cc index 8d9723289d9af9ef218a5e056b4b585383e00dac..89c027ff1eea93012dc5ab22b081786efc328e96 100644 --- a/paddle/operators/gru_unit_op.cc +++ b/paddle/operators/gru_unit_op.cc @@ -80,19 +80,21 @@ class GRUUnitOpMaker : public framework::OpProtoAndCheckerMaker { AddInput("HiddenPrev", "(Tensor) Matrix with shape [batch_size, frame_size] for the " "states of previous time step."); - AddInput("Weight", - "(Tensor) Weight matrix with shape [frame_size, frame_size * 3]. " - "The elements continuous in memory can be divided into two parts. " - "The first part are weights of the update gate and reset gate " - "with shape [frame_size, frame_size * 2], and the second part are " - "weights of output candidate with shape [frame_size, frame_size]"); - AddInput("Bias", - "(Tensor) Bias vector with shape [1, frame_size * 3] concating " - "bias of the update gate, reset gate and output candidate.") + AddInput( + "Weight", + "(Tensor) Weight matrix with shape [frame_size, frame_size * 3]. " + "The elements continuous in memory can be divided into two parts. " + "The first part are weights of the update gate and reset gate " + "with shape [frame_size, frame_size * 2], and the second part are " + "weights of output candidate with shape [frame_size, frame_size]."); + AddInput( + "Bias", + "(Tensor) Bias vector with shape [1, frame_size * 3] concatenating " + "bias of the update gate, reset gate and output candidate.") .AsDispensable(); AddOutput("Gate", "(Tensor) Matrix with shape [batch_size, frame_size * 3] for the " - "output of update gate, reset gate and output candidate") + "output of update gate, reset gate and output candidate.") .AsIntermediate(); AddOutput("ResetHiddenPrev", "(Tensor) Matrix with shape [batch_size, frame_size] for the " @@ -112,16 +114,19 @@ class GRUUnitOpMaker : public framework::OpProtoAndCheckerMaker { .SetDefault(sigmoid) .InEnum({identity, sigmoid, tanh, relu}); AddComment(R"DOC( -GRUUnitOp implements part calculations of the GRU unit as following: +GRUUnit Operator. -\f[ -update \ gate: u_t = actGate(xu_t + W_u * hidden_prev + bias_u) \\ -reset \ gate: r_t = actGate(xr_t + W_r * hidden_prev + bias_r) \\ -output \ candidate: {h}_t = actNode(xc_t + W_c * dot(r_t, hidden_prev) + bias_c) \\ -output: h_t = dot((1-u_t), {h}_t) + dot(u_t, hidden_prev) -\f] +This operator implements partial calculations of the GRU unit as follows: + +$$ +update \ gate: u_t = actGate(xu_t + W_u * hidden_{prev} + bias_u) \\ +reset \ gate: r_t = actGate(xr_t + W_r * hidden_{prev} + bias_r) \\ +output \ candidate: {h}_t = actNode({xc}_t + W_c * dot(r_t, hidden_{prev}) + bias_c) \\ +output: h_t = dot((1-u_t), {h}_t) + dot(u_t, hidden_{prev}) +$$ The rest of GRU unit can be completed by using FCOp's output as the input of GRUUnitOp. + )DOC"); } }; diff --git a/paddle/operators/huber_loss_op.cc b/paddle/operators/huber_loss_op.cc index 2d9449f5ca50dab8d2a7928c4311ec2d66b47904..3435e74b0afb470fcbd1c0f4e06ad363352cac00 100644 --- a/paddle/operators/huber_loss_op.cc +++ b/paddle/operators/huber_loss_op.cc @@ -59,10 +59,12 @@ class HuberLossOpMaker : public framework::OpProtoAndCheckerMaker { "The shape is same as Input(X) and will be reused in backward.") .AsIntermediate(); AddOutput("Out", - "The output tensor with shape [batch_size, 1] which represents " - "the huber loss."); + "The output tensor with shape [batch_size, 1] " + "which represents the huber loss."); AddAttr("delta", "Hyper parameter in huber loss."); AddComment(R"DOC( +HuberLoss Operator. + Huber loss is a loss function used in robust regression. We define X as the input value and Y as the target value. Huber loss can evaluate the fitness of X to Y. Different from MSE loss, Huber loss is more robust for outliers. The diff --git a/paddle/operators/increment_op.cc b/paddle/operators/increment_op.cc index 139392c691e00b2a94f46801f1cfc2018ce139f5..deb02bf2bf82a22a2b59b1ee16c222a162863144 100644 --- a/paddle/operators/increment_op.cc +++ b/paddle/operators/increment_op.cc @@ -31,7 +31,6 @@ class IncrementOp : public framework::OperatorWithKernel { } }; -template class IncrementOpMaker : public framework::OpProtoAndCheckerMaker { public: IncrementOpMaker(framework::OpProto *proto, @@ -39,14 +38,18 @@ class IncrementOpMaker : public framework::OpProtoAndCheckerMaker { : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "(Tensor) The input tensor of increment operator"); AddOutput("Out", "(Tensor) The output tensor of increment operator."); - AddComment(R"DOC(Increment operator + AddAttr("step", + "(float, default 1.0) " + "The step size by which the " + "input tensor will be incremented.") + .SetDefault(1.0); + AddComment(R"DOC( +Increment Operator. + +The equation is: +$$Out = X + step$$ -The equation is: Out = X + step )DOC"); - AddAttr("step", - "The step size by which the " - "input tensor will be incremented.") - .SetDefault(1.0); } }; @@ -69,7 +72,10 @@ class IncrementGradOpMaker : public framework::SingleGradOpDescMaker { namespace ops = paddle::operators; -REGISTER_OPERATOR(increment, ops::IncrementOp, ops::IncrementOpMaker, +REGISTER_OPERATOR(increment, ops::IncrementOp, ops::IncrementOpMaker, ops::IncrementGradOpMaker); -REGISTER_OP_CPU_KERNEL(increment, - ops::IncrementKernel); +REGISTER_OP_CPU_KERNEL( + increment, ops::IncrementKernel, + ops::IncrementKernel, + ops::IncrementKernel, + ops::IncrementKernel); diff --git a/paddle/operators/increment_op.cu b/paddle/operators/increment_op.cu index 659c380d147a36650452bea23b30cbcf1ff516ee..f97a6c468522f033687bd83ae5b1a1bc7d86fa80 100644 --- a/paddle/operators/increment_op.cu +++ b/paddle/operators/increment_op.cu @@ -16,4 +16,7 @@ REGISTER_OP_GPU_KERNEL( increment, - paddle::operators::IncrementKernel); + paddle::operators::IncrementKernel, + paddle::operators::IncrementKernel, + paddle::operators::IncrementKernel, + paddle::operators::IncrementKernel); diff --git a/paddle/operators/increment_op.h b/paddle/operators/increment_op.h index 342e254fc453555c70923efbca02fdfd014af015..3d53256dd1a277d7face8b43860d6672d7a68cfb 100644 --- a/paddle/operators/increment_op.h +++ b/paddle/operators/increment_op.h @@ -19,7 +19,7 @@ namespace paddle { namespace operators { -template +template class IncrementKernel : public framework::OpKernel { public: virtual void Compute(const framework::ExecutionContext& context) const { @@ -27,7 +27,7 @@ class IncrementKernel : public framework::OpKernel { auto* in = context.Input("X"); tensor->mutable_data(in->place()); - auto step = static_cast(context.Attr("step")); + auto step = static_cast(context.Attr("step")); auto eigen_out = framework::EigenVector::Flatten(*tensor); auto eigen_in = framework::EigenVector::Flatten(*in); diff --git a/paddle/operators/l1_norm_op.cc b/paddle/operators/l1_norm_op.cc index 1d111696cf43d232413a8dec7ffb057cb1913c7f..02ebf022968e95d0b20598d3c935fb51177c8841 100644 --- a/paddle/operators/l1_norm_op.cc +++ b/paddle/operators/l1_norm_op.cc @@ -57,7 +57,7 @@ L1 Norm Operator. Computes the L1 norm of a tensor. -Out = sum (abs(X)) +$$Out = \sum{|X|}$$ )DOC"); } diff --git a/paddle/operators/linear_chain_crf_op.cc b/paddle/operators/linear_chain_crf_op.cc index 605dbba5af1bb8b0d718833be6af45fdaeac70ac..bcb48e13bd948b4e91ce8cbd7231a9619fac8d18 100644 --- a/paddle/operators/linear_chain_crf_op.cc +++ b/paddle/operators/linear_chain_crf_op.cc @@ -22,52 +22,55 @@ class LinearChainCRFOpMaker : public framework::OpProtoAndCheckerMaker { LinearChainCRFOpMaker(framework::OpProto* proto, framework::OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput( - "Emission", - "(LoDTensor, default: LoDTensor). " - "The unscaled emission weight matrix for the linear chain CRF. " - "This input is a LoDTensor with shape [N x D] where N is the size of " - "the mini-batch and D is the total tag number."); - AddInput( - "Transition", - "(Tensor, default: Tensor). A Tensor with shape [(D + 2) x D]. " - "The learnable parameter for the linear_chain_crf operator. " - "See more details in the operator's comments."); - AddInput( - "Label", - "(LoDTensor, default: LoDTensor). The ground truth which is a 2-D " - "LoDTensor with shape [N x 1], where N is the total element number in " - "a mini-batch."); + AddInput("Emission", + "(LoDTensor, default LoDTensor) " + "A 2-D LoDTensor with shape [N x D], where N is the size of the " + "mini-batch and D is the total tag number. The unscaled emission " + "weight matrix for the linear chain CRF. "); + AddInput("Transition", + "(Tensor, default Tensor) A 2-D Tensor with shape " + "[(D + 2) x D]. The learnable parameter for the linear_chain_crf " + "operator. See more details in the operator's comments."); + AddInput("Label", + "(LoDTensor, default LoDTensor) A LoDTensor with shape " + "[N x 1], where N is the total element number in a mini-batch. " + "The ground truth."); AddOutput( "Alpha", - "Tensor, default: Tensor. The forward vectors for the entire " - "batch. A two dimensional tensor with shape [N x D], " - "denoted as \f$\alpha\f$. \f$\alpha$\f is a memo table used to " - "calculate the normalization factor in CRF. \f$\alpha[k, v]$\f stores " - "the unnormalized probabilites of all possible unfinished sequences of " - "tags that end at position \f$k$\f with tag \f$v$\f. For each \f$k$\f, " + "(Tensor, default Tensor) A 2-D Tensor with shape [N x D]. " + "The forward vectors for the entire batch. Denote it as \f$\alpha\f$. " + "\f$\alpha$\f is a memo table used to calculate the normalization " + "factor in CRF. \f$\alpha[k, v]$\f stores the unnormalized " + "probabilites of all possible unfinished sequences of tags that end at " + "position \f$k$\f with tag \f$v$\f. For each \f$k$\f, " "\f$\alpha[k, v]$\f is a vector of length \f$D$\f with a component for " "each tag value \f$v$\f. This vector is called a forward vecotr and " "will also be used in backward computations.") .AsIntermediate(); - AddOutput("EmissionExps", - "The exponentials of Input(Emission). This is an intermediate " - "computational result in forward computation, and will be reused " - "in backward computation.") + AddOutput( + "EmissionExps", + "(Tensor, default Tensor) A 2-D Tensor with shape [N x D]. " + "The exponentials of Input(Emission). This is an intermediate " + "computational result in forward computation, and will be reused in " + "backward computation.") .AsIntermediate(); - AddOutput("TransitionExps", - "The exponentials of Input(Transition). This is an intermediate " - "computational result in forward computation, and will be reused " - "in backward computation.") + AddOutput( + "TransitionExps", + "(Tensor, default Tensor) A 2-D Tensor with shape " + "[(D + 2) x D]. The exponentials of Input(Transition). This is an " + "intermediate computational result in forward computation, and " + "will be reused in backward computation.") .AsIntermediate(); AddOutput( "LogLikelihood", - "(Tensor, default: Tensor). The logarithm of the conditional " + "(Tensor, default Tensor) The logarithm of the conditional " "likelihood of each training sample in a mini-batch. This is a 2-D " "tensor with shape [S x 1], where S is the sequence number in a " "mini-batch. Note: S is equal to the sequence number in a mini-batch. " "The output is no longer a LoDTensor."); AddComment(R"DOC( +LinearChainCRF Operator. + Conditional Random Field defines an undirected probabilistic graph with nodes denoting random variables and edges denoting dependencies between these variables. CRF learns the conditional probability \f$P(Y|X)\f$, where @@ -81,29 +84,28 @@ and output must be linear sequences. Thus, the graph of such a CRF is a simple chain or a line, which results in the linear chain CRF. This operator implements the Forward-Backward algorithm for the linear chain -CRF. Please see http://www.cs.columbia.edu/~mcollins/fb.pdf and -http://cseweb.ucsd.edu/~elkan/250Bwinter2012/loglinearCRFs.pdf for reference. +CRF. Please refer to http://www.cs.columbia.edu/~mcollins/fb.pdf and +http://cseweb.ucsd.edu/~elkan/250Bwinter2012/loglinearCRFs.pdf for details. Equation: - -- Denote Input(Emission) to this operator as \f$x\f$ here. -- The first D values of Input(Transition) to this operator are for starting +1. Denote Input(Emission) to this operator as \f$x\f$ here. +2. The first D values of Input(Transition) to this operator are for starting weights, denoted as \f$a\f$ here. -- The next D values of Input(Transition) of this operator are for ending +3. The next D values of Input(Transition) of this operator are for ending weights, denoted as \f$b\f$ here. -- The remaning values of Input(Transition) are for transition weights, +4. The remaning values of Input(Transition) are for transition weights, denoted as \f$w\f$ here. -- Denote Input(Label) as \f$s\f$ here. +5. Denote Input(Label) as \f$s\f$ here. The probability of a sequence \f$s\f$ of length \f$L\f$ is defined as: -\f$P(s) = (1/Z) exp(a_{s_1} + b_{s_L} +\f$P(s) = (1/Z) \exp(a_{s_1} + b_{s_L} + \sum_{l=1}^L x_{s_l} + \sum_{l=2}^L w_{s_{l-1},s_l})\f$ where \f$Z\f$ is a normalization value so that the sum of \f$P(s)\f$ over all possible sequences is \f$1\f$, and \f$x\f$ is the emission feature weight to the linear chain CRF. -Finaly, the linear chain CRF operator outputs the logarithm of the conditional +Finally, the linear chain CRF operator outputs the logarithm of the conditional likelihood of each training sample in a mini-batch. NOTE: @@ -179,8 +181,8 @@ class LinearChainCRFOp : public framework::OperatorWithKernel { } protected: - // Explicitly set that the data type of output of the linear_chain_crf - // operator is determined by its input "Emission". + // Explicitly set that the data type of computation kernel of linear_chain_crf + // is determined by its input "Emission". framework::DataType IndicateDataType( const framework::ExecutionContext& ctx) const override { return framework::ToDataType(ctx.Input("Emission")->type()); diff --git a/paddle/operators/linear_chain_crf_op.h b/paddle/operators/linear_chain_crf_op.h index 56fb0c9102bee6e2fefd1180ef20237891573f70..ddf73981751798c72cef08f2dd5c87580b45aec3 100644 --- a/paddle/operators/linear_chain_crf_op.h +++ b/paddle/operators/linear_chain_crf_op.h @@ -134,7 +134,7 @@ class LinearChainCRFOpKernel : public framework::OpKernel { Tensor emission_row_max; emission_row_max.mutable_data( - framework::make_ddim({static_cast(batch_size), 1}), + framework::make_ddim({static_cast(batch_size), 1}), platform::CPUPlace()); auto place = ctx.GetEigenDevice(); @@ -273,7 +273,7 @@ class LinearChainCRFOpKernel : public framework::OpKernel { const int* lbl = label.data(); PADDLE_ENFORCE_LT( - *std::max_element(lbl, lbl + seq_length), tag_num, + static_cast(*std::max_element(lbl, lbl + seq_length)), tag_num, "An invalid tag label that execesses the largest tag number."); // Calculate the nominator part, which depends on the label sequence. diff --git a/paddle/operators/load_op.cc b/paddle/operators/load_op.cc index 2d4eff0c35af520dd27b9eb197937026a8fbdff9..b71a33a6b1ce80b545e6d7a4020dafc941dc55d2 100644 --- a/paddle/operators/load_op.cc +++ b/paddle/operators/load_op.cc @@ -115,14 +115,18 @@ class LoadOpProtoMaker : public framework::OpProtoAndCheckerMaker { LoadOpProtoMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { - AddOutput("Out", "The tensor need to be loaded"); - AddComment(R"DOC(Load Operator -Load operator will load a tensor variable from disk file. -)DOC"); + AddOutput("Out", "(Tensor) The tensor need to be loaded"); AddAttr("file_path", + "(string) " "Variable will be loaded from \"file_path\".") .AddCustomChecker( [](const std::string &path) { return !path.empty(); }); + AddComment(R"DOC( +Load Operator. + +Load operator will load a tensor variable from disk file. + +)DOC"); } }; } // namespace operators diff --git a/paddle/operators/lod_rank_table_op.cc b/paddle/operators/lod_rank_table_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..be198951c241dc5e5587c8a2b8d94f67173d2b2a --- /dev/null +++ b/paddle/operators/lod_rank_table_op.cc @@ -0,0 +1,80 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ +#include "paddle/framework/lod_rank_table.h" +#include "paddle/framework/op_registry.h" +namespace paddle { +namespace operators { + +class LoDRankTableOp : public framework::OperatorBase { + public: + LoDRankTableOp(const std::string &type, + const framework::VariableNameMap &inputs, + const framework::VariableNameMap &outputs, + const framework::AttributeMap &attrs) + : OperatorBase(type, inputs, outputs, attrs) {} + void Run(const framework::Scope &scope, + const platform::DeviceContext &dev_ctx) const override { + auto x = scope.FindVar(Input("X"))->Get(); + auto *out = + scope.FindVar(Output("Out"))->GetMutable(); + out->Reset(x.lod(), static_cast(Attr("level"))); + } +}; + +class LoDRankTableOpProtoMaker : public framework::OpProtoAndCheckerMaker { + public: + LoDRankTableOpProtoMaker(framework::OpProto *proto, + framework::OpAttrChecker *op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("X", + "(LoDTensor) input lod tensor, must contain lod information."); + AddOutput("Out", "(LoDRankTable) The rank table of specific level."); + AddAttr("level", "(int) the specific lod level to rank.") + .SetDefault(0) + .EqualGreaterThan(0); + AddComment(R"DOC(Create LoDRanTable by LoDTensor + +LoD Rank Table stores the `level` of `lod` which is ordered by sequence +length in descending order. It is useful when implement dynamic RNN and is +shared by dynamic RNN memory, dynamic RNN slice input and dynamic RNN slice +output operators. +)DOC"); + } +}; + +class LoDRankTableInferShape : public framework::InferShapeBase { + public: + void operator()(framework::InferShapeContext *context) const override { + PADDLE_ENFORCE(context->HasInput("X"), "LoDRankTable must has input X"); + } +}; + +class LoDRankTableInferVarType : public framework::VarTypeInference { + public: + void operator()(const framework::OpDescBind &op_desc, + framework::BlockDescBind *block) const override { + for (auto &o : op_desc.Output("Out")) { + block->Var(o)->SetType(framework::VarDesc::LOD_RANK_TABLE); + } + } +}; + +} // namespace operators +} // namespace paddle + +REGISTER_OPERATOR(lod_rank_table, paddle::operators::LoDRankTableOp, + paddle::operators::LoDRankTableOpProtoMaker, + paddle::operators::LoDRankTableInferShape, + paddle::operators::LoDRankTableInferVarType, + paddle::framework::EmptyGradOpMaker); diff --git a/paddle/operators/lookup_table_op.cc b/paddle/operators/lookup_table_op.cc index 0b361e20f2037b9b75bc8670488dff1c50fb689c..2163c8ce4e5d75d5934c08f59a47bad9553f0c8b 100644 --- a/paddle/operators/lookup_table_op.cc +++ b/paddle/operators/lookup_table_op.cc @@ -53,21 +53,27 @@ class LookupTableOpMaker : public framework::OpProtoAndCheckerMaker { framework::OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("W", - "An input represents embedding tensors," - " which is a learnable parameter."); + "An input represents embedding tensors, " + "which is a learnable parameter."); AddInput("Ids", - "An input with type int32 or int64" - "contains the ids to be looked up in W." - "Ids must be a column vector with rank = 2." - "The 2nd dimension size must be 1"); - AddOutput("Out", "The lookup results, which have the same type with W."); - AddAttr("is_sparse", "Sparse update").SetDefault(false); + "An input with type int32 or int64 " + "contains the ids to be looked up in W. " + "Ids must be a column vector with rank = 2. " + "The 2nd dimension size must be 1."); + AddOutput("Out", "The lookup results, which have the same type as W."); + AddAttr("is_sparse", + "(boolean, default false) " + "Sparse update") + .SetDefault(false); AddComment(R"DOC( +Lookup Table Operator. + This operator is used to perform lookups on the parameter W, then concatenated into a dense tensor. -The input `Ids` can carry the LoD (Level of Details) information, -or not. And the output only shares the LoD with input `Ids`. +The input Ids can carry the LoD (Level of Details) information, +or not. And the output only shares the LoD information with input Ids. + )DOC"); } }; diff --git a/paddle/operators/lookup_table_op.cu b/paddle/operators/lookup_table_op.cu index c7ba1720662fe80c945f2b4aa19745e408d40948..10d66e5ff40987baf7a0c47c1c07dc03f8aa6a3f 100644 --- a/paddle/operators/lookup_table_op.cu +++ b/paddle/operators/lookup_table_op.cu @@ -74,10 +74,9 @@ class LookupTableCUDAKernel : public framework::OpKernel { dim3 threads(128, 8); dim3 grids(8, 1); - LookupTable<<< - grids, threads, 0, reinterpret_cast( - context.device_context()) - .stream()>>>(output, table, ids, N, K, D); + LookupTable<<>>( + output, table, ids, N, K, D); } }; @@ -95,9 +94,7 @@ class LookupTableGradCUDAKernel : public framework::OpKernel { auto* ids_data = ids->data(); auto ids_dim = ids->dims(); - auto stream = reinterpret_cast( - context.device_context()) - .stream(); + auto stream = context.cuda_device_context().stream(); // copy GPU memory to CPU pinned memory framework::Vector new_rows; new_rows.resize(ids_dim[0]); @@ -136,11 +133,10 @@ class LookupTableGradCUDAKernel : public framework::OpKernel { dim3 threads(128, 8); dim3 grids(8, 1); - LookupTableGrad<<( - context.device_context()) - .stream()>>>(d_table, d_output, ids, N, K, D); + LookupTableGrad< + T, 128, 8, + 8><<>>( + d_table, d_output, ids, N, K, D); } } }; diff --git a/paddle/operators/lrn_op.cc b/paddle/operators/lrn_op.cc index 89ea6bfdbd9b78dd0a81fd5ba465d09549162eb5..00392b7967d020a7951a16a7850a2f08735baeb8 100644 --- a/paddle/operators/lrn_op.cc +++ b/paddle/operators/lrn_op.cc @@ -45,72 +45,70 @@ class LRNOpMaker : public framework::OpProtoAndCheckerMaker { public: LRNOpMaker(framework::OpProto* proto, framework::OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("X", R"DOC( - (Tensor) The input of LRN operator. It must be a 4D tenor with NCHW format. - )DOC"); - + AddInput("X", + "(Tensor) The input of LRN operator. " + "It must be a 4D tenor with NCHW format."); AddOutput("Out", "(Tensor) The output of LRN operator, which is also the 4D " "tensor with NCHW format."); - AddOutput("MidOut", R"Doc( -(Tensor)Middle result of lrn op.It's computed in forward process -and also used in backward process. - )Doc"); - - AddAttr("n", R"DOC( -(int, default 5)n is “adjacent” kernel maps at the same spatial position. - )DOC") + AddOutput("MidOut", + "(Tensor) Middle result of LRN operator. It's computed in " + "forward process and also used in backward process."); + + AddAttr("n", + "(int default 5) " + "n is the \"adjacent\" kernel that maps " + "at the same spatial position.") .SetDefault(5) .GreaterThan(0); - AddAttr("k", R"DOC( -(float, default 2.0)k is the bias. - )DOC") + AddAttr("k", + "(float, default 2.0) " + "k is the bias.") .SetDefault(2.0) .GreaterThan(0.0); - AddAttr("alpha", R"DOC( -(float, default 0.0001)alpha is the scale number. - )DOC") + AddAttr("alpha", + "(float, default 0.0001) " + "alpha is the scale number.") .SetDefault(0.0001) .GreaterThan(0.0); - AddAttr("beta", R"DOC( -(float, default 0.75)beta is the power number. - )DOC") + AddAttr("beta", + "(float, default 0.75) " + "beta is the power number.") .SetDefault(0.75) .GreaterThan(0.0); AddComment(R"DOC( - Local Response Normalization. - - This Function comes from the paper - "ImageNet Classification with Deep Convolutional Neural Networks". +Local Response Normalization Operator. - The original formula is: +This operator comes from the paper +"ImageNet Classification with Deep Convolutional Neural Networks". - Input(i, x, y) - Output(i, x, y) = ---------------------------------------------- - -- upper - (k + alpha * > (Input(j, x, y))^2) ^ (beta) - -- j = lower +The original formula is: - upper is `min(C, c + n/2)` - lower if `max(0, c - n/2)` +$$ +Output(i, x, y) = Input(i, x, y) / \left( +k + \alpha \sum\limits^{\min(C, c + n/2)}_{j = \max(0, c - n/2)} +(Input(j, x, y))^2 +\right)^{\beta} +$$ - Function implementation: +Function implementation: - inputs and outpus is NCHW format, while input.shape.ndims() is equal 4. - And the meaning of each dimension(0-3) is respectively batch size, - feature maps, rows and columns. +Inputs and outpus are in NCHW format, while input.shape.ndims() equals 4. +And dimensions 0 ~ 3 represent batch size, feature maps, rows, +and columns, respectively. - Input and Output in the above formula is for each map(i) of one image, and - Input(i, x, y), Output(i, x, y) represents an element in an image. +Input and Output in the formula above is for each map(i) of one image, and +Input(i, x, y), Output(i, x, y) represents an element in an image. - C is the number of feature maps of one image, and n is a hyper-parameters - is configured when Function is initialized. The sum in the denominator - is the sum of the same position in the neighboring maps. - )DOC"); +C is the number of feature maps of one image. n is a hyper-parameter +configured when operator is initialized. The sum in the denominator +is the sum of the same positions in the neighboring maps. + +)DOC"); } }; diff --git a/paddle/operators/lstm_op.cc b/paddle/operators/lstm_op.cc index 94342d940704d850a2a45c281a3d88de5a132753..fdf52cf424d1b2727982e6e76f0f824915d84968 100644 --- a/paddle/operators/lstm_op.cc +++ b/paddle/operators/lstm_op.cc @@ -103,7 +103,7 @@ class LSTMOpMaker : public framework::OpProtoAndCheckerMaker { AddInput("H0", "(Tensor, optional) the initial hidden state is an optional " "input. This is a tensor with shape (N x D), where N is the " - "batch size, D is the hidden size.") + "batch size and D is the hidden size.") .AsDispensable(); AddInput("C0", "(Tensor, optional) the initial cell state is an optional " @@ -134,85 +134,82 @@ class LSTMOpMaker : public framework::OpProtoAndCheckerMaker { AddOutput("BatchGate", "(LoDTensor) This LoDTensor contains input gate, forget gate " "and output gate after the nonlinear computation. This " - "LoDTensor has the same shape with the reorganized input, which " + "LoDTensor has the same shape as the reorganized input, which " "is also be called batch input. The LoD size is 2. The first " "LoD is the batch offsets and the second LoD contains the " "indexes, which denote the position of reorganized sequence " "in the raw input.") .AsIntermediate(); AddOutput("BatchCellPreAct", - "(LoDTensor) This LoDTensor is got in the forward and used " + "(LoDTensor) This LoDTensor is obtained in the forward and used " "in the backward.") .AsIntermediate(); AddAttr("usePeepholes", - "(bool, defalut: True) " + "(bool, default True) " "whether to enable diagonal/peephole connections.") .SetDefault(true); AddAttr("isReverse", - "(bool, defalut: False) " + "(bool, default False) " "whether to compute reversed LSTM.") .SetDefault(false); AddAttr( "gateActivation", - "(string, default: sigmoid)" + "(string, default sigmoid)" "The activation for input gate, forget gate and output " "gate, `sigmoid` by default.") .SetDefault("sigmoid"); AddAttr("cellActivation", - "(string, default: tanh)" + "(string, default tanh)" "The activation for cell output, `tanh` by defalut.") .SetDefault("tanh"); AddAttr("candidateActivation", - "(string, default: tanh)" + "(string, default tanh)" "The activation for candidate hidden state, " "`tanh` by default.") .SetDefault("tanh"); - AddComment(R"DOC(Long-Short Term Memory (LSTM) Operator + AddComment(R"DOC( +Long-Short Term Memory (LSTM) Operator. -The defalut implementation is diagonal/peephole connection [1], the formula is -as follows +The defalut implementation is diagonal/peephole connection +(https://arxiv.org/pdf/1402.1128.pdf), the formula is as follows: - i_t = \sigma(W_{ix}x_{t} + W_{ih}h_{t-1} + W_{ic}c_{t-1} + b_i) +$$ +i_t = \sigma(W_{ix}x_{t} + W_{ih}h_{t-1} + W_{ic}c_{t-1} + b_i) \\ - f_t = \sigma(W_{fx}x_{t} + W_{fh}h_{t-1} + W_{fc}c_{t-1} + b_f) +f_t = \sigma(W_{fx}x_{t} + W_{fh}h_{t-1} + W_{fc}c_{t-1} + b_f) \\ - \tilde{c_t} = act_g(W_{cx}x_t + W_{ch}h_{t-1} + b_c) +\tilde{c_t} = act_g(W_{cx}x_t + W_{ch}h_{t-1} + b_c) \\ - o_t = \sigma(W_{ox}x_{t} + W_{oh}h_{t-1} + W_{oc}c_t + b_o) +o_t = \sigma(W_{ox}x_{t} + W_{oh}h_{t-1} + W_{oc}c_t + b_o) \\ - c_t = f_t ⊙ c_{t-1} + i_t ⊙ \tilde{c_t} +c_t = f_t \odot c_{t-1} + i_t \odot \tilde{c_t} \\ - h_t = o_t ⊙ act_h(c_t) +h_t = o_t \odot act_h(c_t) +$$ where the W terms denote weight matrices (e.g. \f$W_{xi}\f$ is the matrix of weights from the input gate to the input), \f$W_{ic}, W_{fc}, W_{oc}\f$ -are diagonal weight matrices for peephole connections. In our implenmention, -We use vectors to reprenset these diagonal weight matrices. The b terms +are diagonal weight matrices for peephole connections. In our implementation, +we use vectors to reprenset these diagonal weight matrices. The b terms denote bias vectors (\f$b_i\f$ is the input gate bias vector), \f$\sigma\f$ -is the non-line actications, such as logistic sigmoid function, and -\f$i, f, o\f$ and \f$c\f$ are respectively the input gate, forget gate, -output gate and cell activation vectors, all of which are the same size as +is the non-line activations, such as logistic sigmoid function, and +\f$i, f, o\f$ and \f$c\f$ are the input gate, forget gate, output gate, +and cell activation vectors, respectively, all of which have the same size as the cell output activation vector \f$h\f$. -The ⊙ is the element-wise product of the vectors, \f$act_g\f$ and \f$act_h\f$ -are the cell input and cell output activation functions, `tanh` is usually +The \f$\odot\f$ is the element-wise product of the vectors. \f$act_g\f$ and \f$act_h\f$ +are the cell input and cell output activation functions and `tanh` is usually used for them. \f$\tilde{c_t}\f$ is also called candidate hidden state, which is computed based on the current input and the previous hidden state. -Set `usePeepholes` False to disable peephole connection [2]. The formula +Set usePeepholes False to disable peephole connection +(http://www.bioinf.jku.at/publications/older/2604.pdf). The formula is omitted here. -@note These \f$W_{xi}x_{t}, W_{xf}x_{t}, W_{xc}x_{t}, W_{xo}x_{t}\f$ -operations on the input x_{t} were NOT included in this operator. +Note that these \f$W_{xi}x_{t}, W_{xf}x_{t}, W_{xc}x_{t}, W_{xo}x_{t}\f$ +operations on the input \f$x_{t}\f$ are NOT included in this operator. Users can choose to use fully-connect operator before LSTM operator. -[1] Hasim Sak, Andrew Senior, and Francoise Beaufays. Long short-term memory -recurrent neural network architectures for large scale acoustic modeling. -INTERSPEECH, 2014. - -[2] S. Hochreiter and J. Schmidhuber. Long Short-Term Memory. -Neural Computation, 9(8):1735-1780, 1997. - )DOC"); } }; diff --git a/paddle/operators/lstm_unit_op.cc b/paddle/operators/lstm_unit_op.cc index 5d63017208a55ec4bcc2e8d66f1ca2e1b84d4593..f4519ec16f3f694cf49941f8d23c4106f6f1ddc3 100644 --- a/paddle/operators/lstm_unit_op.cc +++ b/paddle/operators/lstm_unit_op.cc @@ -57,17 +57,22 @@ class LstmUnitOpMaker : public framework::OpProtoAndCheckerMaker { "The cell state tensor of last time-step in the Lstm Unit operator."); AddOutput("C", "The cell tensor of Lstm Unit operator."); AddOutput("H", "The hidden state tensor of Lstm Unit operator."); - - AddComment(R"DOC(Lstm-Unit Operator + AddAttr("forget_bias", + "(float, default 0.0) " + "The forget bias of Lstm Unit.") + .SetDefault(0.0); + AddComment(R"DOC( +Lstm Unit Operator Equation: - i, f, o, j = split(X) - C = C_prev * sigm(f + forget_bias) + sigm(i) * tanh(j) - H = C * sigm(o) + +$$ +i, f, o, j = split(X) \\ +C = C_{prev} * sigm(f + forget\_bias) + sigm(i) * tanh(j) \\ +H = C * sigm(o) +$$ )DOC"); - AddAttr("forget_bias", "The forget bias of Lstm Unit.") - .SetDefault(0.0); } }; diff --git a/paddle/operators/margin_rank_loss_op.cc b/paddle/operators/margin_rank_loss_op.cc index 638a99addc2119e8f44648cc54b97bd8a892d2bc..d7e8a0ea7632650203106b01531d724cf0b8e085 100644 --- a/paddle/operators/margin_rank_loss_op.cc +++ b/paddle/operators/margin_rank_loss_op.cc @@ -55,8 +55,6 @@ class MarginRankLossOpMaker : public framework::OpProtoAndCheckerMaker { "(2-D tensor with shape [batch_size x 1]) " "The label indicating X1 ranked higher than X2 or not, " "can only be +1 or -1."); - AddAttr("margin", "(scalar, default 0) Margin for MarginRankLossOp.") - .SetDefault(static_cast(0)); AddOutput("Activated", "(2-D tensor with shape [batch_size x 1]) Intermediate tensor " "to indicate whether each element of Output(Out) is activated.") @@ -64,23 +62,26 @@ class MarginRankLossOpMaker : public framework::OpProtoAndCheckerMaker { AddOutput("Out", "(2-D tensor with shape [batch_size x 1]) " "The output loss of MarginRankLoss operator."); + AddAttr("margin", "(scalar, default 0) Margin for MarginRankLossOp.") + .SetDefault(static_cast(0)); AddComment(R"DOC( +MarginRankLoss Operator. -MarginRankLoss operator measures the loss given a pair of training sample +This operator measures the loss given a pair of training sample {`X1`, `X2`} and the `Label` with attribute `margin`, where `Label = +1` -indicating X1 is ranked higher than `X2`, otherwise `Label = -1`. The loss -turns out +indicating X1 is ranked higher than `X2` and `Label = -1` otherwise. The loss +is calculated as: -loss(X1, X2, Label) = max(0, -Label * (X1 - X2) + margin). +$loss(X1, X2, Label) = \max(0, -Label * (X1 - X2) + margin)$ -The attribute `margin` involved here helps make the predictions more robust. +The attribute `margin` here helps make the predictions more robust. Denote the item ranked higher as the positive sample, otherwise the negative sample. If the score of the two samples satisfies -positive sample - negative sample < margin, +$positive sample - negative sample < margin$ -the pair of samples will contribute to the final loss, which will backpropogate -and train the ranking model to enlarge the difference of the two score. +the pair of samples will contribute to the final loss, which will backpropagate +and train the ranking model to enlarge the difference between the two scores. For batch input with size `batch_size`, `X1`, `X2` and `Label` all have the same shape [batch_size x 1]. diff --git a/paddle/operators/math/CMakeLists.txt b/paddle/operators/math/CMakeLists.txt index 40cc177d0f19c2359626ef972e787a0b1c5580f8..90bc9f4f922e7aa09523bad8ffb3ef477dd89857 100644 --- a/paddle/operators/math/CMakeLists.txt +++ b/paddle/operators/math/CMakeLists.txt @@ -8,20 +8,24 @@ if(WITH_GPU) nv_library(softmax SRCS softmax.cc softmax.cu DEPS operator) nv_library(cross_entropy SRCS cross_entropy.cc cross_entropy.cu DEPS operator) nv_library(pooling SRCS pooling.cc pooling.cu DEPS device_context) + nv_library(sequence_pooling SRCS sequence_pooling.cc sequence_pooling.cu DEPS device_context math_function) nv_library(vol2col SRCS vol2col.cc vol2col.cu DEPS device_context) nv_library(context_project SRCS context_project.cc context_project.cu DEPS device_context) nv_library(sequence2batch SRCS sequence2batch.cc sequence2batch.cu DEPS device_context) nv_library(lstm_compute SRCS lstm_compute.cc lstm_compute.cu DEPS device_context activation_functions) + nv_library(gru_compute SRCS gru_compute.cc gru_compute.cu DEPS device_context activation_functions) else() cc_library(math_function SRCS math_function.cc im2col.cc DEPS cblas device_context operator) cc_library(selected_rows_functor SRCS selected_rows_functor.cc DEPS selected_rows math_function) cc_library(softmax SRCS softmax.cc DEPS operator) cc_library(cross_entropy SRCS cross_entropy.cc DEPS operator) cc_library(pooling SRCS pooling.cc DEPS device_context) + cc_library(sequence_pooling SRCS sequence_pooling.cc DEPS device_context math_function) cc_library(vol2col SRCS vol2col.cc DEPS device_context) cc_library(context_project SRCS context_project.cc DEPS device_context) cc_library(sequence2batch SRCS sequence2batch.cc DEPS device_context) cc_library(lstm_compute SRCS lstm_compute.cc DEPS device_context activation_functions) + cc_library(gru_compute SRCS gru_compute.cc DEPS device_context activation_functions math_function) endif() cc_test(math_function_test SRCS math_function_test.cc DEPS math_function tensor) diff --git a/paddle/operators/math/detail/CMakeLists.txt b/paddle/operators/math/detail/CMakeLists.txt index 49cf228de2204cb4888cf645a0cb68ed04cc3371..0df1c060f9042067b655d987560a278f9fc46a5b 100644 --- a/paddle/operators/math/detail/CMakeLists.txt +++ b/paddle/operators/math/detail/CMakeLists.txt @@ -1,5 +1 @@ -if(WITH_AVX) - cc_library(activation_functions SRCS hl_cpu_functions.cc hl_avx_functions.cc) -else() - cc_library(activation_functions SRCS hl_cpu_functions.cc) -endif() +cc_library(activation_functions SRCS avx_functions.cc) diff --git a/paddle/operators/math/detail/activation_functions.h b/paddle/operators/math/detail/activation_functions.h new file mode 100644 index 0000000000000000000000000000000000000000..a20c35d1d9dc4a3a6fae92023fd1aae787a716ec --- /dev/null +++ b/paddle/operators/math/detail/activation_functions.h @@ -0,0 +1,170 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once +#include +#include "paddle/platform/hostdevice.h" + +#ifdef __AVX__ +#include +#endif + +namespace paddle { +namespace operators { +namespace math { +namespace detail { + +#define SIGMOID_THRESHOLD_MIN -40.0 +#define SIGMOID_THRESHOLD_MAX 13.0 +#define EXP_MAX_INPUT 40.0 + +namespace forward { + +template +DEVICE T Identity(const T a) { + return a; +} + +template +DEVICE T Relu(const T a) { + return a > static_cast(0.0) ? a : static_cast(0.0); +} + +template +DEVICE T Sigmoid(const T a) { + const T min = SIGMOID_THRESHOLD_MIN; + const T max = SIGMOID_THRESHOLD_MAX; + T tmp = (a < min) ? min : ((a > max) ? max : a); + return static_cast(1.0) / (static_cast(1.0) + exp(-tmp)); +} + +template +DEVICE T Tanh(const T a) { + T tmp = -2.0 * a; + tmp = (tmp > EXP_MAX_INPUT) ? EXP_MAX_INPUT : tmp; + return (2.0 / (1.0 + exp(tmp))) - 1.0; +} + +} // namespace forward + +namespace backward { + +template +DEVICE T Identity(const T a, const T b) { + return a; +} + +template +DEVICE T Relu(const T a, const T b) { + return a * (b > 0.0 ? 1.0 : 0.0); +} + +template +DEVICE T Sigmoid(const T a, const T b) { + return a * b * (1.0 - b); +} + +template +DEVICE T Tanh(const T a, const T b) { + return a * (1.0 - b * b); +} + +} // namespace backward + +template +struct Active { + typedef T (*Act)(T); + typedef T (*ActGrad)(T, T); +}; + +static DEVICE Active::Act kActFloat[] = { + &forward::Sigmoid, &forward::Relu, &forward::Tanh, + &forward::Identity}; + +static DEVICE Active::ActGrad kActGradFloat[] = { + &backward::Sigmoid, &backward::Relu, &backward::Tanh, + &backward::Identity}; + +static DEVICE Active::Act kActDouble[] = { + &forward::Sigmoid, &forward::Relu, &forward::Tanh, + &forward::Identity}; + +static DEVICE Active::ActGrad kActGradDouble[] = { + &backward::Sigmoid, &backward::Relu, + &backward::Tanh, &backward::Identity}; + +namespace forward { +inline DEVICE float activation(float a, int index) { + return kActFloat[index](a); +} + +inline DEVICE double activation(double a, int index) { + return kActDouble[index](a); +} + +} // namespace forward + +namespace backward { +inline DEVICE float activation(float a, float b, int index) { + return kActGradFloat[index](a, b); +} + +inline DEVICE double activation(double a, double b, int index) { + return kActGradDouble[index](a, b); +} +} // namespace backward + +#ifdef __AVX__ +namespace forward { +namespace avx { +__m256 Relu(const __m256 a); +__m256 Sigmoid(const __m256 a); +__m256 Tanh(const __m256 a); +__m256 Identity(const __m256 a); +} // namespace avx +} // namespace forward + +namespace backward { +namespace avx { +__m256 Relu(const __m256 a, const __m256 b); +__m256 Sigmoid(const __m256 a, const __m256 b); +__m256 Tanh(const __m256 a, const __m256 b); +__m256 Identity(const __m256 a, const __m256 b); +} // namespace avx +} // namespace backward + +static Active<__m256>::Act kActAvx[] = { + &forward::avx::Sigmoid, &forward::avx::Relu, &forward::avx::Tanh, + &forward::avx::Identity}; + +static Active<__m256>::ActGrad kActGradAvx[] = { + &backward::avx::Sigmoid, &backward::avx::Relu, &backward::avx::Tanh, + &backward::avx::Identity}; + +namespace forward { +inline __m256 activation(__m256 a, int index) { return kActAvx[index](a); } +} // namespace forward + +namespace backward { +inline __m256 activation(__m256 a, __m256 b, int index) { + return kActGradAvx[index](a, b); +} +} // namespace backward + +#endif + +} // namespace detail +} // namespace math +} // namespace operators +} // namespace paddle diff --git a/paddle/operators/math/detail/hl_avx_functions.cc b/paddle/operators/math/detail/avx_functions.cc similarity index 68% rename from paddle/operators/math/detail/hl_avx_functions.cc rename to paddle/operators/math/detail/avx_functions.cc index 415bac5d93ee00244d072b0998c6941b14d4f8d8..921364788cd23e265fa0ca027bf1af3f81604489 100644 --- a/paddle/operators/math/detail/hl_avx_functions.cc +++ b/paddle/operators/math/detail/avx_functions.cc @@ -12,59 +12,79 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#ifdef __AVX__ + #include -#include "hl_functions.h" +#include "paddle/operators/math/detail/activation_functions.h" // TODO(qingqing) refine this dependence #include "paddle/cuda/src/avx_mathfun.h" -namespace hppl { +namespace paddle { +namespace operators { +namespace math { +namespace detail { -__m256 exp(__m256 a) { return exp256_ps(a); } +__m256 Exp(__m256 a) { return exp256_ps(a); } -__m256 relu(const __m256 a) { +namespace forward { +namespace avx { +__m256 Relu(const __m256 a) { __m256 tmp = _mm256_set1_ps(0.0f); return _mm256_max_ps(a, tmp); } -__m256 sigmoid(const __m256 a) { +__m256 Sigmoid(const __m256 a) { __m256 max = _mm256_set1_ps(SIGMOID_THRESHOLD_MAX); __m256 min = _mm256_set1_ps(SIGMOID_THRESHOLD_MIN); __m256 tmp = _mm256_max_ps(a, min); tmp = _mm256_min_ps(tmp, max); tmp = _mm256_sub_ps(_mm256_set1_ps(0.0f), tmp); - tmp = exp(tmp); + tmp = Exp(tmp); tmp = _mm256_add_ps(_mm256_set1_ps(1.0f), tmp); tmp = _mm256_div_ps(_mm256_set1_ps(1.0f), tmp); return tmp; } -__m256 tanh(const __m256 a) { +__m256 Tanh(const __m256 a) { __m256 max = _mm256_set1_ps(EXP_MAX_INPUT); __m256 tmp = _mm256_mul_ps(_mm256_set1_ps(-2.0f), a); tmp = _mm256_min_ps(tmp, max); - tmp = exp(tmp); + tmp = Exp(tmp); return _mm256_sub_ps(_mm256_div_ps(_mm256_set1_ps(2.0f), _mm256_add_ps(_mm256_set1_ps(1.0f), tmp)), _mm256_set1_ps(1.0f)); } -__m256 linear(const __m256 a) { return a; } +__m256 Identity(const __m256 a) { return a; } + +} // namespace avx +} // namespace forward -__m256 relu(const __m256 a, const __m256 b) { +namespace backward { +namespace avx { +__m256 Relu(const __m256 a, const __m256 b) { return _mm256_mul_ps( a, _mm256_and_ps(_mm256_cmp_ps(b, _mm256_set1_ps(0.0f), _CMP_GT_OS), _mm256_set1_ps(1.0f))); } -__m256 sigmoid(const __m256 a, const __m256 b) { +__m256 Sigmoid(const __m256 a, const __m256 b) { return _mm256_mul_ps(_mm256_mul_ps(a, b), _mm256_sub_ps(_mm256_set1_ps(1.0f), b)); } -__m256 tanh(const __m256 a, const __m256 b) { +__m256 Tanh(const __m256 a, const __m256 b) { return _mm256_mul_ps( a, _mm256_sub_ps(_mm256_set1_ps(1.0f), _mm256_mul_ps(b, b))); } -__m256 linear(const __m256 a, const __m256 b) { return a; } -} // namespace hppl +__m256 Identity(const __m256 a, const __m256 b) { return a; } +} // namespace avx +} // namespace backward + +} // namespace detail +} // namespace math +} // namespace operators +} // namespace paddle + +#endif diff --git a/paddle/operators/math/detail/gru_cpu_kernel.h b/paddle/operators/math/detail/gru_cpu_kernel.h new file mode 100644 index 0000000000000000000000000000000000000000..51af140cf4d5e6581765bea00033fa53d383230d --- /dev/null +++ b/paddle/operators/math/detail/gru_cpu_kernel.h @@ -0,0 +1,424 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once +#include +#include "paddle/operators/math/detail/activation_functions.h" +#include "paddle/operators/math/gru_compute.h" + +namespace paddle { +namespace operators { +namespace math { +namespace detail { + +#ifndef __NVCC__ + +template +void hl_naive_gru_forward_reset_output(OpResetOutput opResetOutput, + T *gateValue, T *resetOutputValue, + T *prevOutputValue, int frameSize, + activation_mode_t active_gate) { + T rValueUpdateGate; + T rValueResetGate; + T rValueResetOutput; + T rPrevOut = 0; + T *updateGate = gateValue; + T *resetGate = gateValue + frameSize; + + for (int i = 0; i < frameSize; i++) { + rValueUpdateGate = updateGate[i]; + rValueResetGate = resetGate[i]; + if (prevOutputValue) { + rPrevOut = prevOutputValue[i]; + } + + opResetOutput(rValueUpdateGate, rValueResetGate, rPrevOut, + rValueResetOutput, active_gate); + + updateGate[i] = rValueUpdateGate; + resetGate[i] = rValueResetGate; + resetOutputValue[i] = rValueResetOutput; + } +} + +template +void hl_naive_gru_forward_final_output(OpFinalOutput opFinalOutput, + T *gateValue, T *prevOutputValue, + T *outputValue, int frameSize, + activation_mode_t active_node) { + T rValueUpdateGate; + T rValueFrameState; + T rPrevOut = 0; + T rOutput; + T *updateGate = gateValue; + T *frameState = gateValue + frameSize * 2; + + for (int i = 0; i < frameSize; i++) { + rValueUpdateGate = updateGate[i]; + rValueFrameState = frameState[i]; + if (prevOutputValue) { + rPrevOut = prevOutputValue[i]; + } + + opFinalOutput(rValueUpdateGate, rValueFrameState, rPrevOut, rOutput, + active_node); + + frameState[i] = rValueFrameState; + outputValue[i] = rOutput; + } +} + +template +void hl_avx_gru_forward_reset_output(OpResetOutput opResetOutput, T *gateValue, + T *resetOutputValue, T *prevOutputValue, + int frameSize, + activation_mode_t active_gate) { +#ifdef __AVX__ + __m256 rValueUpdateGate; + __m256 rValueResetGate; + __m256 rValueResetOutput; + __m256 rPrevOut = _mm256_set1_ps(0.0f); + __m256 *updateGate = (__m256 *)gateValue; + __m256 *resetGate = (__m256 *)(gateValue + frameSize); + + for (int i = 0; i < frameSize / 8; i++) { + rValueUpdateGate = updateGate[i]; + rValueResetGate = resetGate[i]; + if (prevOutputValue) { + rPrevOut = ((__m256 *)prevOutputValue)[i]; + } + + opResetOutput(rValueUpdateGate, rValueResetGate, rPrevOut, + rValueResetOutput, active_gate); + + updateGate[i] = rValueUpdateGate; + resetGate[i] = rValueResetGate; + ((__m256 *)resetOutputValue)[i] = rValueResetOutput; + } +#endif +} + +template +void hl_avx_gru_forward_final_output(OpFinalOutput opFinalOutput, T *gateValue, + T *prevOutputValue, T *outputValue, + int frameSize, + activation_mode_t active_node) { +#ifdef __AVX__ + __m256 rValueUpdateGate; + __m256 rValueFrameState; + __m256 rPrevOut = _mm256_set1_ps(0.0f); + __m256 rOutput; + __m256 *updateGate = (__m256 *)gateValue; + __m256 *frameState = (__m256 *)(gateValue + frameSize * 2); + + for (int i = 0; i < frameSize / 8; i++) { + rValueUpdateGate = updateGate[i]; + rValueFrameState = frameState[i]; + if (prevOutputValue) { + rPrevOut = ((__m256 *)prevOutputValue)[i]; + } + + opFinalOutput(rValueUpdateGate, rValueFrameState, rPrevOut, rOutput, + active_node); + + frameState[i] = rValueFrameState; + ((__m256 *)outputValue)[i] = rOutput; + } +#endif +} + +template +inline void forward_reset_output(OpResetOutput opResetOutput, + hl_gru_value value, int frameSize, + int batchSize, activation_mode_t active_gate) { + for (int b = 0; b < batchSize; b++) { + if (OpResetOutput::avx && !(frameSize & (8 - 1)) && (sizeof(T) == 4)) { + hl_avx_gru_forward_reset_output( + opResetOutput, value.gateValue, value.resetOutputValue, + value.prevOutValue, frameSize, active_gate); + } else { + hl_naive_gru_forward_reset_output( + opResetOutput, value.gateValue, value.resetOutputValue, + value.prevOutValue, frameSize, active_gate); + } + + value.gateValue += frameSize * 3; + value.resetOutputValue += frameSize; + if (value.prevOutValue) { + value.prevOutValue += frameSize; + } + } +} + +template +inline void forward_final_output(OpFinalOutput opFinalOutput, + hl_gru_value value, int frameSize, + int batchSize, activation_mode_t active_node) { + for (int b = 0; b < batchSize; b++) { + if (OpFinalOutput::avx && !(frameSize & (8 - 1)) && (sizeof(T) == 4)) { + hl_avx_gru_forward_final_output(opFinalOutput, value.gateValue, + value.prevOutValue, value.outputValue, + frameSize, active_node); + } else { + hl_naive_gru_forward_final_output(opFinalOutput, value.gateValue, + value.prevOutValue, value.outputValue, + frameSize, active_node); + } + + value.gateValue += frameSize * 3; + value.outputValue += frameSize; + if (value.prevOutValue) { + value.prevOutValue += frameSize; + } + } +} + +template +void hl_naive_gru_backward_state_grad(OpStateGrad opStateGrad, T *gateValue, + T *gateGrad, T *prevOutValue, + T *prevOutGrad, T *outputGrad, + int frameSize, + activation_mode_t active_node) { + T rUpdateGateValue; + T rUpdateGateGrad; + T rFrameStateValue; + T rFrameStateGrad; + T rOutGrad; + T rPrevOutValue = 0; + T rPrevOutGrad = 0; + T *updateGateValue = gateValue; + T *updateGateGrad = gateGrad; + T *frameStateValue = gateValue + frameSize * 2; + T *frameStateGrad = gateGrad + frameSize * 2; + + for (int i = 0; i < frameSize; i++) { + rUpdateGateValue = updateGateValue[i]; + rFrameStateValue = frameStateValue[i]; + rOutGrad = outputGrad[i]; + if (prevOutValue) { + rPrevOutValue = prevOutValue[i]; + } + if (prevOutGrad) { + rPrevOutGrad = prevOutGrad[i]; + } + + opStateGrad(rUpdateGateValue, rUpdateGateGrad, rFrameStateValue, + rFrameStateGrad, rPrevOutValue, rPrevOutGrad, rOutGrad, + active_node); + + updateGateGrad[i] = rUpdateGateGrad; + frameStateGrad[i] = rFrameStateGrad; + if (prevOutGrad) { + prevOutGrad[i] = rPrevOutGrad; + } + } +} + +template +void hl_naive_gru_backward_reset_grad(OpResetGrad opResetGrad, T *gateValue, + T *gateGrad, T *prevOutValue, + T *prevOutGrad, T *resetOutputGrad, + int frameSize, + activation_mode_t active_gate) { + T rUpdateGateValue; + T rUpdateGateGrad; + T rResetGateValue; + T rResetGateGrad; + T rResetOutputGrad = 0; + T rPrevOutValue = 0; + T rPrevOutGrad = 0; + T *updateGateValue = gateValue; + T *updateGateGrad = gateGrad; + T *resetGateValue = gateValue + frameSize; + T *resetGateGrad = gateGrad + frameSize; + + for (int i = 0; i < frameSize; i++) { + rUpdateGateValue = updateGateValue[i]; + rUpdateGateGrad = updateGateGrad[i]; + rResetGateValue = resetGateValue[i]; + + if (prevOutValue && prevOutGrad) { + rResetOutputGrad = resetOutputGrad[i]; + } + if (prevOutValue) { + rPrevOutValue = prevOutValue[i]; + } + if (prevOutGrad) { + rPrevOutGrad = prevOutGrad[i]; + } + + opResetGrad(rUpdateGateValue, rUpdateGateGrad, rResetGateValue, + rResetGateGrad, rPrevOutValue, rPrevOutGrad, rResetOutputGrad, + active_gate); + + updateGateGrad[i] = rUpdateGateGrad; + resetGateGrad[i] = rResetGateGrad; + if (prevOutGrad) { + prevOutGrad[i] = rPrevOutGrad; + } + } +} + +template +void hl_avx_gru_backward_state_grad(OpStateGrad opStateGrad, T *gateValue, + T *gateGrad, T *prevOutValue, + T *prevOutGrad, T *outputGrad, + int frameSize, + activation_mode_t active_node) { +#ifdef __AVX__ + __m256 rUpdateGateValue; + __m256 rUpdateGateGrad; + __m256 rFrameStateValue; + __m256 rFrameStateGrad; + __m256 rOutGrad; + __m256 rPrevOutValue = _mm256_set1_ps(0.0f); + __m256 rPrevOutGrad = _mm256_set1_ps(0.0f); + __m256 *updateGateValue = (__m256 *)gateValue; + __m256 *updateGateGrad = (__m256 *)gateGrad; + __m256 *frameStateValue = (__m256 *)(gateValue + frameSize * 2); + __m256 *frameStateGrad = (__m256 *)(gateGrad + frameSize * 2); + + for (int i = 0; i < frameSize / 8; i++) { + rUpdateGateValue = updateGateValue[i]; + rFrameStateValue = frameStateValue[i]; + rOutGrad = ((__m256 *)outputGrad)[i]; + if (prevOutValue) { + rPrevOutValue = ((__m256 *)prevOutValue)[i]; + } + if (prevOutGrad) { + rPrevOutGrad = ((__m256 *)prevOutGrad)[i]; + } + + opStateGrad(rUpdateGateValue, rUpdateGateGrad, rFrameStateValue, + rFrameStateGrad, rPrevOutValue, rPrevOutGrad, rOutGrad, + active_node); + + updateGateGrad[i] = rUpdateGateGrad; + frameStateGrad[i] = rFrameStateGrad; + if (prevOutGrad) { + ((__m256 *)prevOutGrad)[i] = rPrevOutGrad; + } + } +#endif +} + +template +void hl_avx_gru_backward_reset_grad(OpResetGrad opResetGrad, T *gateValue, + T *gateGrad, T *prevOutValue, + T *prevOutGrad, T *resetOutputGrad, + int frameSize, + activation_mode_t active_gate) { +#ifdef __AVX__ + __m256 rUpdateGateValue; + __m256 rUpdateGateGrad; + __m256 rResetGateValue; + __m256 rResetGateGrad; + __m256 rResetOutputGrad = _mm256_set1_ps(0.0f); + __m256 rPrevOutValue = _mm256_set1_ps(0.0f); + __m256 rPrevOutGrad = _mm256_set1_ps(0.0f); + __m256 *updateGateValue = (__m256 *)gateValue; + __m256 *updateGateGrad = (__m256 *)gateGrad; + __m256 *resetGateValue = (__m256 *)(gateValue + frameSize); + __m256 *resetGateGrad = (__m256 *)(gateGrad + frameSize); + + for (int i = 0; i < frameSize / 8; i++) { + rUpdateGateValue = updateGateValue[i]; + rUpdateGateGrad = updateGateGrad[i]; + rResetGateValue = resetGateValue[i]; + + if (prevOutValue && prevOutGrad) { + rResetOutputGrad = ((__m256 *)resetOutputGrad)[i]; + } + if (prevOutValue) { + rPrevOutValue = ((__m256 *)prevOutValue)[i]; + } + if (prevOutGrad) { + rPrevOutGrad = ((__m256 *)prevOutGrad)[i]; + } + + opResetGrad(rUpdateGateValue, rUpdateGateGrad, rResetGateValue, + rResetGateGrad, rPrevOutValue, rPrevOutGrad, rResetOutputGrad, + active_gate); + + updateGateGrad[i] = rUpdateGateGrad; + resetGateGrad[i] = rResetGateGrad; + if (prevOutGrad) { + ((__m256 *)prevOutGrad)[i] = rPrevOutGrad; + } + } +#endif +} + +template +inline void backward_state_grad(OpStateGrad opStateGrad, hl_gru_value value, + hl_gru_grad grad, int frameSize, + int batchSize, activation_mode_t active_node) { + for (int b = 0; b < batchSize; b++) { + if (OpStateGrad::avx && !(frameSize & (8 - 1)) && (sizeof(T) == 4)) { + hl_avx_gru_backward_state_grad( + opStateGrad, value.gateValue, grad.gateGrad, value.prevOutValue, + grad.prevOutGrad, grad.outputGrad, frameSize, active_node); + } else { + hl_naive_gru_backward_state_grad( + opStateGrad, value.gateValue, grad.gateGrad, value.prevOutValue, + grad.prevOutGrad, grad.outputGrad, frameSize, active_node); + } + + value.gateValue += frameSize * 3; + if (value.prevOutValue) { + value.prevOutValue += frameSize; + } + + grad.gateGrad += frameSize * 3; + grad.outputGrad += frameSize; + if (grad.prevOutGrad) { + grad.prevOutGrad += frameSize; + } + } +} + +template +inline void backward_reset_grad(OpResetGrad opResetGrad, hl_gru_value value, + hl_gru_grad grad, int frameSize, + int batchSize, activation_mode_t active_gate) { + for (int b = 0; b < batchSize; b++) { + if (OpResetGrad::avx && !(frameSize & (8 - 1)) && (sizeof(T) == 4)) { + hl_avx_gru_backward_reset_grad( + opResetGrad, value.gateValue, grad.gateGrad, value.prevOutValue, + grad.prevOutGrad, grad.resetOutputGrad, frameSize, active_gate); + } else { + hl_naive_gru_backward_reset_grad( + opResetGrad, value.gateValue, grad.gateGrad, value.prevOutValue, + grad.prevOutGrad, grad.resetOutputGrad, frameSize, active_gate); + } + + value.gateValue += frameSize * 3; + if (value.prevOutValue) { + value.prevOutValue += frameSize; + } + + grad.gateGrad += frameSize * 3; + grad.resetOutputGrad += frameSize; + if (grad.prevOutGrad) { + grad.prevOutGrad += frameSize; + } + } +} + +#endif + +} // namespace detail +} // namespace math +} // namespace operators +} // namespace paddle diff --git a/paddle/operators/math/detail/gru_gpu_kernel.h b/paddle/operators/math/detail/gru_gpu_kernel.h new file mode 100644 index 0000000000000000000000000000000000000000..6441c648b048422c110872a85aa8cb719f11a8d7 --- /dev/null +++ b/paddle/operators/math/detail/gru_gpu_kernel.h @@ -0,0 +1,203 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once +#include +#include "paddle/operators/math/detail/activation_functions.h" +#include "paddle/operators/math/gru_compute.h" +#include "paddle/platform/cuda_helper.h" +#include "paddle/platform/device_context.h" + +#include + +namespace paddle { +namespace operators { +namespace math { +namespace detail { + +/* + * threads(framePerBlock, batchPerBlock) + * grid(frameBlocks, batchBlocks) + */ +template +__global__ void KeGruForwardResetOutput(OpResetOutput opResetOutput, + T *gateValue, T *resetOutputValue, + T *prevOutputValue, int frameSize, + int batchSize, + activation_mode_t active_gate) { + const int frameIdx = blockIdx.x * blockDim.x + threadIdx.x; + if (frameIdx >= frameSize) return; + + int batchIdx = 0; + if (isBatch) { + batchIdx = blockIdx.y * blockDim.y + threadIdx.y; + if (batchIdx >= batchSize) return; + gateValue += batchIdx * 3 * frameSize; + resetOutputValue += batchIdx * frameSize; + } + + T rPrevOut = 0; + T rValueResetOutput; + T rValueUpdateGate = gateValue[frameIdx + frameSize * 0]; + T rValueResetGate = gateValue[frameIdx + frameSize * 1]; + + if (prevOutputValue) { + if (isBatch) prevOutputValue += batchIdx * frameSize; + rPrevOut = prevOutputValue[frameIdx]; + } + + opResetOutput(rValueUpdateGate, rValueResetGate, rPrevOut, rValueResetOutput, + active_gate); + + gateValue[frameIdx + frameSize * 0] = rValueUpdateGate; + gateValue[frameIdx + frameSize * 1] = rValueResetGate; + resetOutputValue[frameIdx] = rValueResetOutput; +} + +/* + * threads(framePerBlock, batchPerBlock) + * grid(frameBlocks, batchBlocks) + */ +template +__global__ void KeGruForwardFinalOutput(OpFinalOutput opFinalOutput, + T *gateValue, T *prevOutputValue, + T *outputValue, int frameSize, + int batchSize, + activation_mode_t active_node) { + const int frameIdx = blockIdx.x * blockDim.x + threadIdx.x; + if (frameIdx >= frameSize) return; + int batchIdx = 0; + if (isBatch) { + batchIdx = blockIdx.y * blockDim.y + threadIdx.y; + if (batchIdx >= batchSize) return; + gateValue += batchIdx * 3 * frameSize; + outputValue += batchIdx * frameSize; + } + + T rOutput; + T rPrevOut = 0; + T rValueUpdateGate = gateValue[frameIdx + frameSize * 0]; + T rValueFrameState = gateValue[frameIdx + frameSize * 2]; + + if (prevOutputValue) { + if (isBatch) prevOutputValue += batchIdx * frameSize; + rPrevOut = prevOutputValue[frameIdx]; + } + + opFinalOutput(rValueUpdateGate, rValueFrameState, rPrevOut, rOutput, + active_node); + + gateValue[frameIdx + frameSize * 2] = rValueFrameState; + outputValue[frameIdx] = rOutput; +} + +/* + * threads(framePerBlock, batchPerBlock) + * grid(frameBlocks, batchBlocks) + */ +template +__global__ void KeGruBackwardStateGrad(OpStateGrad opStateGrad, T *gateValue, + T *gateGrad, T *prevOutValue, + T *prevOutGrad, T *outputGrad, + int frameSize, int batchSize, + activation_mode_t active_node) { + const int frameIdx = blockIdx.x * blockDim.x + threadIdx.x; + if (frameIdx >= frameSize) return; + int batchIdx = 0; + if (isBatch) { + batchIdx = blockIdx.y * blockDim.y + threadIdx.y; + if (batchIdx >= batchSize) return; + gateValue += batchIdx * 3 * frameSize; + gateGrad += batchIdx * 3 * frameSize; + outputGrad += batchIdx * frameSize; + } + + T rUpdateGateGrad; + T rFrameStateGrad; + T rPrevOutValue = 0; + T rPrevOutGrad = 0; + T rUpdateGateValue = gateValue[frameIdx + frameSize * 0]; + T rFrameStateValue = gateValue[frameIdx + frameSize * 2]; + T rOutGrad = outputGrad[frameIdx]; + + if (prevOutValue && prevOutGrad) { + if (isBatch) prevOutValue += batchIdx * frameSize; + rPrevOutValue = prevOutValue[frameIdx]; + + if (isBatch) prevOutGrad += batchIdx * frameSize; + rPrevOutGrad = prevOutGrad[frameIdx]; + } + + opStateGrad(rUpdateGateValue, rUpdateGateGrad, rFrameStateValue, + rFrameStateGrad, rPrevOutValue, rPrevOutGrad, rOutGrad, + active_node); + + gateGrad[frameIdx + frameSize * 0] = rUpdateGateGrad; + gateGrad[frameIdx + frameSize * 2] = rFrameStateGrad; + if (prevOutGrad) { + prevOutGrad[frameIdx] = rPrevOutGrad; + } +} + +/* + * threads(framePerBlock, batchPerBlock) + * grid(frameBlocks, batchBlocks) + */ +template +__global__ void KeGruBackwardResetGrad(OpResetGrad opResetGrad, T *gateValue, + T *gateGrad, T *prevOutValue, + T *prevOutGrad, T *resetOutputGrad, + int frameSize, int batchSize, + activation_mode_t active_gate) { + const int frameIdx = blockIdx.x * blockDim.x + threadIdx.x; + if (frameIdx >= frameSize) return; + int batchIdx = 0; + if (isBatch) { + batchIdx = blockIdx.y * blockDim.y + threadIdx.y; + if (batchIdx >= batchSize) return; + gateValue += batchIdx * 3 * frameSize; + gateGrad += batchIdx * 3 * frameSize; + resetOutputGrad += batchIdx * frameSize; + } + + T rResetGateGrad; + T rPrevOutValue = 0; + T rPrevOutGrad = 0; + T rResetOutputGrad = 0; + T rUpdateGateValue = gateValue[frameIdx + frameSize * 0]; + T rUpdateGateGrad = gateGrad[frameIdx + frameSize * 0]; + T rResetGateValue = gateValue[frameIdx + frameSize * 1]; + + if (prevOutValue && prevOutGrad) { + if (isBatch) prevOutValue += batchIdx * frameSize; + if (isBatch) prevOutGrad += batchIdx * frameSize; + rPrevOutValue = prevOutValue[frameIdx]; + rPrevOutGrad = prevOutGrad[frameIdx]; + rResetOutputGrad = resetOutputGrad[frameIdx]; + } + + opResetGrad(rUpdateGateValue, rUpdateGateGrad, rResetGateValue, + rResetGateGrad, rPrevOutValue, rPrevOutGrad, rResetOutputGrad, + active_gate); + + gateGrad[frameIdx + frameSize * 0] = rUpdateGateGrad; + gateGrad[frameIdx + frameSize * 1] = rResetGateGrad; + if (prevOutGrad) { + prevOutGrad[frameIdx] = rPrevOutGrad; + } +} +} // namespace detail +} // namespace math +} // namespace operators +} // namespace paddle diff --git a/paddle/operators/math/detail/gru_kernel.h b/paddle/operators/math/detail/gru_kernel.h new file mode 100644 index 0000000000000000000000000000000000000000..8a681d8d8bced72e1296f863489f6ccbc7913167 --- /dev/null +++ b/paddle/operators/math/detail/gru_kernel.h @@ -0,0 +1,155 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/operators/math/detail/activation_functions.h" +#include "paddle/platform/hostdevice.h" + +#include + +// TODO(guosheng): refine code style in gru_kernel +namespace paddle { +namespace operators { +namespace math { +namespace detail { + +namespace forward { + +template +class gru_resetOutput { + public: + HOSTDEVICE void operator()(T &valueUpdateGate, T &valueResetGate, T &prevOut, + T &valueResetOutput, activation_mode_t actGate) { + valueUpdateGate = activation(valueUpdateGate, actGate); + valueResetGate = activation(valueResetGate, actGate); + valueResetOutput = prevOut * valueResetGate; + } +#ifndef __NVCC__ +#ifndef __AVX__ + static const bool avx = false; +#else + static const bool avx = true; + HOSTDEVICE void operator()(__m256 &valueUpdateGate, __m256 &valueResetGate, + __m256 &prevOut, __m256 &valueResetOutput, + activation_mode_t actGate) { + valueUpdateGate = activation(valueUpdateGate, actGate); + valueResetGate = activation(valueResetGate, actGate); + valueResetOutput = _mm256_mul_ps(prevOut, valueResetGate); + } +#endif +#endif +}; + +template +class gru_finalOutput { + public: + HOSTDEVICE void operator()(T &valueUpdateGate, T &valueFrameState, T &prevOut, + T &valueOutput, activation_mode_t actInput) { + valueFrameState = activation(valueFrameState, actInput); + valueOutput = prevOut - (valueUpdateGate * prevOut) + + (valueUpdateGate * valueFrameState); + } +#ifndef __NVCC__ +#ifndef __AVX__ + static const bool avx = false; +#else + static const bool avx = true; + HOSTDEVICE void operator()(__m256 &valueUpdateGate, __m256 &valueFrameState, + __m256 &prevOut, __m256 &valueOutput, + activation_mode_t actInput) { + valueFrameState = activation(valueFrameState, actInput); + valueOutput = _mm256_add_ps( + _mm256_sub_ps(prevOut, _mm256_mul_ps(valueUpdateGate, prevOut)), + _mm256_mul_ps(valueUpdateGate, valueFrameState)); + } +#endif +#endif +}; +} // namespace forward + +namespace backward { + +template +class gru_stateGrad { + public: + HOSTDEVICE void operator()(T &valueUpdateGate, T &gradUpdateGate, + T &valueFrameState, T &gradFrameState, + T &valuePrevOut, T &gradPrevOut, T &gradOutput, + activation_mode_t actInput) { + gradUpdateGate = (gradOutput * valueFrameState); + gradUpdateGate -= (gradOutput * valuePrevOut); + gradPrevOut -= (gradOutput * valueUpdateGate); + gradPrevOut += gradOutput; + gradFrameState = + activation(gradOutput * valueUpdateGate, valueFrameState, actInput); + } +#ifndef __NVCC__ +#ifndef __AVX__ + static const bool avx = false; +#else + static const bool avx = true; + HOSTDEVICE void operator()(__m256 &valueUpdateGate, __m256 &gradUpdateGate, + __m256 &valueFrameState, __m256 &gradFrameState, + __m256 &valuePrevOut, __m256 &gradPrevOut, + __m256 &gradOutput, activation_mode_t actInput) { + gradUpdateGate = _mm256_mul_ps(gradOutput, valueFrameState); + gradUpdateGate = + _mm256_sub_ps(gradUpdateGate, _mm256_mul_ps(gradOutput, valuePrevOut)); + gradPrevOut = _mm256_add_ps( + _mm256_sub_ps(gradPrevOut, _mm256_mul_ps(gradOutput, valueUpdateGate)), + gradOutput); + gradFrameState = activation(_mm256_mul_ps(gradOutput, valueUpdateGate), + valueFrameState, actInput); + } +#endif +#endif +}; + +template +class gru_resetGrad { + public: + HOSTDEVICE void operator()(T &valueUpdateGate, T &gradUpdateGate, + T &valueResetGate, T &gradResetGate, + T &valuePrevOut, T &gradPrevOut, + T &gradResetOutput, activation_mode_t actGate) { + gradResetGate = (gradResetOutput * valuePrevOut); + gradPrevOut += (gradResetOutput * valueResetGate); + gradUpdateGate = activation(gradUpdateGate, valueUpdateGate, actGate); + gradResetGate = activation(gradResetGate, valueResetGate, actGate); + } +#ifndef __NVCC__ +#ifndef __AVX__ + static const bool avx = false; +#else + static const bool avx = true; + HOSTDEVICE void operator()(__m256 &valueUpdateGate, __m256 &gradUpdateGate, + __m256 &valueResetGate, __m256 &gradResetGate, + __m256 &valuePrevOut, __m256 &gradPrevOut, + __m256 &gradResetOutput, + activation_mode_t actGate) { + gradResetGate = _mm256_mul_ps(gradResetOutput, valuePrevOut); + gradPrevOut = _mm256_add_ps(gradPrevOut, + _mm256_mul_ps(gradResetOutput, valueResetGate)); + gradUpdateGate = activation(gradUpdateGate, valueUpdateGate, actGate); + gradResetGate = activation(gradResetGate, valueResetGate, actGate); + } +#endif +#endif +}; + +} // namespace backward + +} // namespace detail +} // namespace math +} // namespace operators +} // namespace paddle diff --git a/paddle/operators/math/detail/hl_activation_functions.h b/paddle/operators/math/detail/hl_activation_functions.h deleted file mode 100644 index 9d7d9914f0090bff17049038dfa2288d84f3dbda..0000000000000000000000000000000000000000 --- a/paddle/operators/math/detail/hl_activation_functions.h +++ /dev/null @@ -1,188 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#ifndef HL_ACTIVATION_FUNCTIONS_H_ -#define HL_ACTIVATION_FUNCTIONS_H_ - -#include "hl_functions.h" -#include "paddle/operators/math/lstm_compute.h" - -/** - * Active functions: sigmoid, relu, tanh and linear. - */ -#define FLOAT_ACTIVE_FUNCTION \ - { \ - hppl::typef::sigmoid, hppl::typef::relu, hppl::typef::tanh, \ - hppl::typef::linear \ - } - -#define DOUBLE_ACTIVE_FUNCTION \ - { \ - hppl::typed::sigmoid, hppl::typed::relu, hppl::typed::tanh, \ - hppl::typed::linear \ - } - -#define AVX_ACTIVE_FUNCTION \ - { hppl::sigmoid, hppl::relu, hppl::tanh, hppl::linear } - -namespace hppl { - -using activation_mode_t = paddle::operators::math::activation_mode_t; - -/** - * Hppl supports sigmoid, relu, tanh, linear active functions - * for neural networks' forward and backward activation. - */ -template -class Active { - public: - typedef T (*forward)(T); - typedef T (*backward)(T, T); -}; - -template -struct ForwardActType; - -template <> -struct ForwardActType { - using type = Active::forward; -}; - -template <> -struct ForwardActType { - using type = Active::forward; -}; - -template -struct BackwardActType; - -template <> -struct BackwardActType { - using type = Active::backward; -}; - -template <> -struct BackwardActType { - using type = Active::backward; -}; - -#ifdef __NVCC__ -namespace gpu { -static __device__ Active::forward forward[] = FLOAT_ACTIVE_FUNCTION; -static __device__ Active::backward backward[] = FLOAT_ACTIVE_FUNCTION; - -static __device__ Active::forward forward_d[] = DOUBLE_ACTIVE_FUNCTION; -static __device__ Active::backward backward_d[] = - DOUBLE_ACTIVE_FUNCTION; - -template -struct ForwardAct { - __device__ typename ForwardActType::type operator()( - activation_mode_t type); -}; - -template <> -struct ForwardAct { - __device__ ForwardActType::type operator()(activation_mode_t type) { - return forward[type]; - } -}; - -template <> -struct ForwardAct { - __device__ ForwardActType::type operator()(activation_mode_t type) { - return forward_d[type]; - } -}; - -template -struct BackwardAct { - __device__ typename BackwardActType::type operator()( - activation_mode_t type); -}; - -template <> -struct BackwardAct { - __device__ BackwardActType::type operator()(activation_mode_t type) { - return backward[type]; - } -}; - -template <> -struct BackwardAct { - __device__ BackwardActType::type operator()(activation_mode_t type) { - return backward_d[type]; - } -}; - -} // namespace gpu -#else -namespace cpu { -static Active::forward forward[] = FLOAT_ACTIVE_FUNCTION; -static Active::backward backward[] = FLOAT_ACTIVE_FUNCTION; - -static Active::forward forward_d[] = DOUBLE_ACTIVE_FUNCTION; -static Active::backward backward_d[] = DOUBLE_ACTIVE_FUNCTION; - -template -struct ForwardAct { - typename ForwardActType::type operator()(activation_mode_t type); -}; - -template <> -struct ForwardAct { - ForwardActType::type operator()(activation_mode_t type) { - return forward[type]; - } -}; - -template <> -struct ForwardAct { - ForwardActType::type operator()(activation_mode_t type) { - return forward_d[type]; - } -}; - -template -struct BackwardAct { - typename BackwardActType::type operator()(activation_mode_t type); -}; - -template <> -struct BackwardAct { - BackwardActType::type operator()(activation_mode_t type) { - return backward[type]; - } -}; - -template <> -struct BackwardAct { - BackwardActType::type operator()(activation_mode_t type) { - return backward_d[type]; - } -}; - -} // namespace cpu - -#ifdef __AVX__ -namespace avx { -static Active<__m256>::forward forward[] = AVX_ACTIVE_FUNCTION; -static Active<__m256>::backward backward[] = AVX_ACTIVE_FUNCTION; -} // namespace avx -#endif -#endif - -} // namespace hppl - -#endif // HL_ACTIVATION_FUNCTIONS_H_ diff --git a/paddle/operators/math/detail/hl_avx_functions.h b/paddle/operators/math/detail/hl_avx_functions.h deleted file mode 100644 index 35f4eabb4c07c6cc9d2edded02e5b6290b1232f8..0000000000000000000000000000000000000000 --- a/paddle/operators/math/detail/hl_avx_functions.h +++ /dev/null @@ -1,32 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#ifndef HL_AVX_FUNCTIONS_H_ -#define HL_AVX_FUNCTIONS_H_ - -#include - -namespace hppl { -__m256 relu(const __m256 a); -__m256 sigmoid(const __m256 a); -__m256 tanh(const __m256 a); -__m256 linear(const __m256 a); - -__m256 relu(const __m256 a, const __m256 b); -__m256 sigmoid(const __m256 a, const __m256 b); -__m256 tanh(const __m256 a, const __m256 b); -__m256 linear(const __m256 a, const __m256 b); -} // namespace hppl - -#endif // HL_AVX_FUNCTIONS_H_ diff --git a/paddle/operators/math/detail/hl_cpu_functions.cc b/paddle/operators/math/detail/hl_cpu_functions.cc deleted file mode 100644 index 21ec78f9629af0e4673a56517d76ac6734f57db8..0000000000000000000000000000000000000000 --- a/paddle/operators/math/detail/hl_cpu_functions.cc +++ /dev/null @@ -1,89 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#include -#include "hl_functions.h" - -namespace hppl { -namespace typef { - -float relu(const float a) { - return a > static_cast(0.0) ? a : static_cast(0.0); -} - -float sigmoid(const float a) { - const float min = SIGMOID_THRESHOLD_MIN; - const float max = SIGMOID_THRESHOLD_MAX; - float tmp = (a < min) ? min : ((a > max) ? max : a); - return static_cast(1.0) / (static_cast(1.0) + exp(-tmp)); -} - -float tanh(const float a) { - float tmp = -2.0 * a; - tmp = (tmp > EXP_MAX_INPUT) ? EXP_MAX_INPUT : tmp; - return (2.0 / (1.0 + exp(tmp))) - 1.0; -} - -float linear(const float a) { return a; } - -float relu(const float a, const float b) { return a * (b > 0.0 ? 1.0 : 0.0); } - -float sigmoid(const float a, const float b) { - return a * b * (static_cast(1) - b); -} - -float tanh(const float a, const float b) { - return a * (static_cast(1) - b * b); -} - -float linear(const float a, const float b) { return a; } - -} // namespace typef - -namespace typed { -double relu(const double a) { - return a > static_cast(0.0) ? a : static_cast(0.0); -} - -double sigmoid(const double a) { - const double min = SIGMOID_THRESHOLD_MIN; - const double max = SIGMOID_THRESHOLD_MAX; - double tmp = (a < min) ? min : ((a > max) ? max : a); - return static_cast(1.0) / (static_cast(1.0) + exp(-tmp)); -} - -double tanh(const double a) { - double tmp = -2.0 * a; - tmp = (tmp > EXP_MAX_INPUT) ? EXP_MAX_INPUT : tmp; - return (2.0 / (1.0 + exp(tmp))) - 1.0; -} - -double linear(const double a) { return a; } - -double relu(const double a, const double b) { - return a * (b > 0.0 ? 1.0 : 0.0); -} - -double sigmoid(const double a, const double b) { - return a * b * (static_cast(1) - b); -} - -double tanh(const double a, const double b) { - return a * (static_cast(1) - b * b); -} - -double linear(const double a, const double b) { return a; } - -} // namespace typed -} // namespace hppl diff --git a/paddle/operators/math/detail/hl_functions.h b/paddle/operators/math/detail/hl_functions.h deleted file mode 100644 index 3e2f0c9ee6d3ae2ed598c4d5f09b85b7d61fdd51..0000000000000000000000000000000000000000 --- a/paddle/operators/math/detail/hl_functions.h +++ /dev/null @@ -1,71 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#ifndef HL_FUNCTIONS_H_ -#define HL_FUNCTIONS_H_ - -/** - * sigmoid threshold maximum - */ -#define SIGMOID_THRESHOLD_MIN -40.0 - -/** - * sigmoid threshold minimum - */ -#define SIGMOID_THRESHOLD_MAX 13.0 - -/** - * The maximum input value for exp, used to avoid overflow problem. - * currently only used for tanh function. - */ -#define EXP_MAX_INPUT 40.0 - -#ifndef __NVCC__ -namespace hppl { -namespace typef { -float relu(const float a); -float sigmoid(const float a); -float tanh(const float a); -float linear(const float a); - -float relu(const float a, const float b); -float sigmoid(const float a, const float b); -float tanh(const float a, const float b); -float linear(const float a, const float b); - -} // namespace typef - -namespace typed { -double relu(const double a); -double sigmoid(const double a); -double tanh(const double a); -double linear(const double a); - -double relu(const double a, const double b); -double sigmoid(const double a, const double b); -double tanh(const double a, const double b); -double linear(const double a, const double b); -} // namespace typed - -} // namespace hppl - -#ifdef __AVX__ -#include "hl_avx_functions.h" -#endif - -#else -#include "hl_gpu_functions.h" -#endif - -#endif // HL_FUNCTIONS_H_ diff --git a/paddle/operators/math/detail/hl_gpu_functions.h b/paddle/operators/math/detail/hl_gpu_functions.h deleted file mode 100644 index 72f2204e7b2cfdba1367b51e3731dde11fb292d6..0000000000000000000000000000000000000000 --- a/paddle/operators/math/detail/hl_gpu_functions.h +++ /dev/null @@ -1,93 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#ifndef HL_GPU_FUNCTIONS_CUH_ -#define HL_GPU_FUNCTIONS_CUH_ - -#include "hl_base.h" - -namespace hppl { -namespace typef { - -__device__ static float relu(const float a) { return a > 0.0f ? a : 0.0f; } - -__device__ static float sigmoid(const float a) { - const float min = SIGMOID_THRESHOLD_MIN; - const float max = SIGMOID_THRESHOLD_MAX; - float tmp = (a < min) ? min : ((a > max) ? max : a); - return __fdividef(1.0f, 1.0f + __expf(-tmp)); -} - -__device__ static float tanh(const float a) { - float tmp = -2.0 * a; - tmp = (tmp > EXP_MAX_INPUT) ? EXP_MAX_INPUT : tmp; - return __fdividef(2.0f, (1.0f + __expf(-2.0f * tmp))) - 1.0f; -} - -__device__ static float linear(const float a) { return a; } - -__device__ static float relu(const float a, const float b) { - return a * (b > 0.0f ? 1.0f : 0.0f); -} - -__device__ static float sigmoid(const float a, const float b) { - return a * b * (1.0f - b); -} - -__device__ static float tanh(const float a, const float b) { - return a * (1.0f - b * b); -} - -__device__ static float linear(const float a, const float b) { return a; } - -} // namespace typef - -namespace typed { - -__device__ static double relu(const double a) { return a > 0.0 ? a : 0.0; } - -__device__ static double sigmoid(const double a) { - const double min = SIGMOID_THRESHOLD_MIN; - const double max = SIGMOID_THRESHOLD_MAX; - double tmp = (a < min) ? min : ((a > max) ? max : a); - return 1.0 / (1.0 + exp(-tmp)); -} - -__device__ static double tanh(const double a) { - double tmp = -2.0 * a; - tmp = (tmp > EXP_MAX_INPUT) ? EXP_MAX_INPUT : tmp; - return (2.0 / (1.0 + exp(-2.0 * a))) - 1.0; -} - -__device__ static double linear(const double a) { return a; } - -__device__ static double relu(const double a, const double b) { - return a * (b > 0.0 ? 1.0 : 0.0); -} - -__device__ static double sigmoid(const double a, const double b) { - return a * b * (1 - b); -} - -__device__ static double tanh(const double a, const double b) { - return a * (1.0 - b * b); -} - -__device__ static double linear(const double a, const double b) { return a; } - -} // namespace typef - -} // namespace hppl - -#endif // HL_GPU_FUNCTIONS_CUH_ diff --git a/paddle/operators/math/detail/lstm_cpu_kernel.h b/paddle/operators/math/detail/lstm_cpu_kernel.h index d0ed55ea168bc3e701c421c51d662c646e475351..f5b0dd85c9d63805459431f933176581ee3658dc 100644 --- a/paddle/operators/math/detail/lstm_cpu_kernel.h +++ b/paddle/operators/math/detail/lstm_cpu_kernel.h @@ -14,7 +14,7 @@ limitations under the License. */ #pragma once #include -#include "paddle/operators/math/detail/hl_activation_functions.h" +#include "paddle/operators/math/detail/activation_functions.h" #include "paddle/operators/math/lstm_compute.h" namespace paddle { @@ -26,7 +26,10 @@ namespace detail { template void naive_lstm_forward_one_sequence(Op op, LstmMetaValue value, - int frameSize) { + int frameSize, + activation_mode_t active_node, + activation_mode_t active_gate, + activation_mode_t active_state) { T rValueIn; T rValueIg; T rValueFg; @@ -58,7 +61,7 @@ void naive_lstm_forward_one_sequence(Op op, LstmMetaValue value, } op(rValueIn, rValueIg, rValueFg, rValueOg, rPrevState, rState, rStateAtv, - rOut, rCheckI, rCheckF, rCheckO); + rOut, rCheckI, rCheckF, rCheckO, active_node, active_gate, active_state); valueIn[i] = rValueIn; valueIg[i] = rValueIg; @@ -72,7 +75,10 @@ void naive_lstm_forward_one_sequence(Op op, LstmMetaValue value, template void naive_lstm_backward_one_sequence(Op op, LstmMetaValue value, - LstmMetaGrad grad, int frameSize) { + LstmMetaGrad grad, int frameSize, + activation_mode_t active_node, + activation_mode_t active_gate, + activation_mode_t active_state) { T rValueIn; T rValueIg; T rValueFg; @@ -122,7 +128,7 @@ void naive_lstm_backward_one_sequence(Op op, LstmMetaValue value, op(rValueIn, rValueIg, rValueFg, rValueOg, rGradIn, rGradIg, rGradFg, rGradOg, rPrevState, rPrevStateGrad, rState, rStateGrad, rStateAtv, rOutputGrad, rCheckI, rCheckF, rCheckO, rCheckIGrad, rCheckFGrad, - rCheckOGrad); + rCheckOGrad, active_node, active_gate, active_state); gradIn[i] = rGradIn; gradIg[i] = rGradIg; @@ -176,8 +182,7 @@ void avx_lstm_forward_one_sequence(Op op, LstmMetaValue value, int frameSize, } op(rValueIn, rValueIg, rValueFg, rValueOg, rPrevState, rState, rStateAtv, - rOut, rCheckI, rCheckF, rCheckO, hppl::avx::forward[active_node], - hppl::avx::forward[active_gate], hppl::avx::forward[active_state]); + rOut, rCheckI, rCheckF, rCheckO, active_node, active_gate, active_state); valueIn[i] = rValueIn; valueIg[i] = rValueIg; @@ -246,8 +251,7 @@ void avx_lstm_backward_one_sequence(Op op, LstmMetaValue value, op(rValueIn, rValueIg, rValueFg, rValueOg, rGradIn, rGradIg, rGradFg, rGradOg, rPrevState, rPrevStateGrad, rState, rStateGrad, rStateAtv, rOutputGrad, rCheckI, rCheckF, rCheckO, rCheckIGrad, rCheckFGrad, - rCheckOGrad, hppl::avx::backward[active_node], - hppl::avx::backward[active_gate], hppl::avx::backward[active_state]); + rCheckOGrad, active_node, active_gate, active_state); gradIn[i] = rGradIn; gradIg[i] = rGradIg; @@ -274,7 +278,8 @@ void cpu_lstm_forward(Op op, LstmMetaValue value, int frameSize, avx_lstm_forward_one_sequence(op, value, frameSize, active_node, active_gate, active_state); } else { - naive_lstm_forward_one_sequence(op, value, frameSize); + naive_lstm_forward_one_sequence(op, value, frameSize, active_node, + active_gate, active_state); } } @@ -287,7 +292,8 @@ void cpu_lstm_backward(Op op, LstmMetaValue value, LstmMetaGrad grad, avx_lstm_backward_one_sequence(op, value, grad, frameSize, active_node, active_gate, active_state); } else { - naive_lstm_backward_one_sequence(op, value, grad, frameSize); + naive_lstm_backward_one_sequence(op, value, grad, frameSize, active_node, + active_gate, active_state); } } diff --git a/paddle/operators/math/detail/lstm_gpu_kernel.h b/paddle/operators/math/detail/lstm_gpu_kernel.h index c06f164f84a92d31f89901e2656bdb8e69c533b7..41a54a359daa14a047c49728962ea15eefd12274 100644 --- a/paddle/operators/math/detail/lstm_gpu_kernel.h +++ b/paddle/operators/math/detail/lstm_gpu_kernel.h @@ -13,13 +13,12 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include -#include "paddle/operators/math/detail/hl_activation_functions.h" +#include "paddle/operators/math/detail/activation_functions.h" #include "paddle/operators/math/lstm_compute.h" #include "paddle/platform/cuda_helper.h" #include "paddle/platform/device_context.h" -#include +#include namespace paddle { namespace operators { @@ -32,7 +31,9 @@ namespace detail { */ template __global__ void KeLstmForward(Op op, LstmMetaValue value, int frameSize, - int batchSize) { + int batchSize, activation_mode_t active_node, + activation_mode_t active_gate, + activation_mode_t active_state) { const int frameIdx = blockIdx.x * blockDim.x + threadIdx.x; if (frameIdx >= frameSize) return; @@ -69,7 +70,7 @@ __global__ void KeLstmForward(Op op, LstmMetaValue value, int frameSize, } op(rValueIn, rValueIg, rValueFg, rValueOg, rPrevState, rState, rStateAtv, - rOut, rCheckI, rCheckF, rCheckO); + rOut, rCheckI, rCheckF, rCheckO, active_node, active_gate, active_state); value.gateValue[frameIdx] = rValueIn; value.gateValue[frameIdx + frameSize] = rValueIg; @@ -88,7 +89,9 @@ __global__ void KeLstmForward(Op op, LstmMetaValue value, int frameSize, template __global__ void KeLstmBackward(Op op, LstmMetaValue value, LstmMetaGrad grad, int frameSize, - int batchSize) { + int batchSize, activation_mode_t active_node, + activation_mode_t active_gate, + activation_mode_t active_state) { const int frameIdx = blockIdx.x * blockDim.x + threadIdx.x; if (frameIdx >= frameSize) return; @@ -141,7 +144,8 @@ __global__ void KeLstmBackward(Op op, LstmMetaValue value, op(rValueIn, rValueIg, rValueFg, rValueOg, rGradIn, rGradIg, rGradFg, rGradOg, rPrevState, rPrevStateGrad, rState, rStateGrad, rStateAtv, rOutputGrad, - rCheckI, rCheckF, rCheckO, rCheckIGrad, rCheckFGrad, rCheckOGrad); + rCheckI, rCheckF, rCheckO, rCheckIGrad, rCheckFGrad, rCheckOGrad, + active_node, active_gate, active_state); grad.gateGrad[frameIdx] = rGradIn; grad.gateGrad[frameIdx + frameSize] = rGradIg; @@ -197,11 +201,13 @@ void gpu_lstm_forward(const platform::DeviceContext& context, Op op, if (batchSize == 1) { KeLstmForward<<>>( - op, value, frameSize, batchSize); + op, value, frameSize, batchSize, active_node, active_gate, + active_state); } else { KeLstmForward<<>>( - op, value, frameSize, batchSize); + op, value, frameSize, batchSize, active_node, active_gate, + active_state); } } @@ -220,9 +226,9 @@ void gpu_lstm_backward(const platform::DeviceContext& context, Op op, threads = dim3(framePerBlock, 1); grid = dim3(frameBlocks, 1); } else { - /* framePerBlock = 32 batchPerBlock = 32 */ - threads = dim3(32, 32); - grid = dim3((frameSize + 32 - 1) / 32, (batchSize + 32 - 1) / 32); + /* framePerBlock = 32 batchPerBlock = 16 */ + threads = dim3(32, 16); + grid = dim3((frameSize + 32 - 1) / 32, (batchSize + 16 - 1) / 16); } auto stream = @@ -230,12 +236,19 @@ void gpu_lstm_backward(const platform::DeviceContext& context, Op op, if (batchSize == 1) { KeLstmBackward<<>>( - op, value, grad, frameSize, batchSize); + op, value, grad, frameSize, batchSize, active_node, active_gate, + active_state); } else { KeLstmBackward<<>>( - op, value, grad, frameSize, batchSize); + op, value, grad, frameSize, batchSize, active_node, active_gate, + active_state); } + + cudaStreamSynchronize(stream); + // TODO(qingqing): Add cuda error check for each kernel. + cudaError_t err = cudaGetLastError(); + PADDLE_ENFORCE(err, cudaGetErrorString(err)); } } // namespace detail diff --git a/paddle/operators/math/detail/lstm_kernel.h b/paddle/operators/math/detail/lstm_kernel.h index 461039a4d51a2b9b8a55d3101bdf4c511907597e..9daaf91981a8e0252374f528f0e063111bd32675 100644 --- a/paddle/operators/math/detail/lstm_kernel.h +++ b/paddle/operators/math/detail/lstm_kernel.h @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/math/detail/hl_activation_functions.h" +#include "paddle/operators/math/detail/activation_functions.h" #include "paddle/platform/hostdevice.h" #include @@ -24,45 +24,22 @@ namespace detail { namespace forward { -template -DEVICE inline T sigmoid(const T a) { - const T min = SIGMOID_THRESHOLD_MIN; - const T max = SIGMOID_THRESHOLD_MAX; - T tmp = (a < min) ? min : ((a > max) ? max : a); - return static_cast(1.0) / (static_cast(1.0) + exp(-tmp)); -} - -template -DEVICE inline T tanh(const T a) { - T tmp = -2.0 * a; - tmp = (tmp > EXP_MAX_INPUT) ? EXP_MAX_INPUT : tmp; - return (2.0 / (1.0 + exp(tmp))) - 1.0; -} - template class lstm { public: HOSTDEVICE void operator()(T &valueIn, T &valueIg, T &valueFg, T &valueOg, T &prevState, T &state, T &stateAtv, T &output, - T &checkI, T &checkF, T &checkO) { -#if 0 - // TODO(qingqing) support to activation speficed by users - valueIn = actInput(valueIn); - valueIg = actGate(valueIg + prevState * checkI); - valueFg = actGate(valueFg + prevState * checkF); - state = valueIn * valueIg + prevState * valueFg; - valueOg = actGate(valueOg + state * checkO); - stateAtv = actState(state); - output = valueOg * stateAtv; -#else - valueIn = tanh(valueIn); - valueIg = sigmoid(valueIg + prevState * checkI); - valueFg = sigmoid(valueFg + prevState * checkF); + T &checkI, T &checkF, T &checkO, + activation_mode_t active_node, + activation_mode_t active_gate, + activation_mode_t active_state) { + valueIn = activation(valueIn, active_node); + valueIg = activation(valueIg + prevState * checkI, active_gate); + valueFg = activation(valueFg + prevState * checkF, active_gate); state = valueIn * valueIg + prevState * valueFg; - valueOg = sigmoid(valueOg + state * checkO); - stateAtv = tanh(state); + valueOg = activation(valueOg + state * checkO, active_gate); + stateAtv = activation(state, active_state); output = valueOg * stateAtv; -#endif } #ifndef __NVCC__ #ifndef __AVX__ // If not compiled with AVX instructs. Disable AVX by default @@ -75,16 +52,19 @@ class lstm { __m256 &valueOg, __m256 &prevState, __m256 &state, __m256 &stateAtv, __m256 &output, __m256 &checkI, __m256 &checkF, __m256 &checkO, - hppl::Active<__m256>::forward actInput, - hppl::Active<__m256>::forward actGate, - hppl::Active<__m256>::forward actState) { - valueIn = actInput(valueIn); - valueIg = actGate(_mm256_add_ps(valueIg, _mm256_mul_ps(prevState, checkI))); - valueFg = actGate(_mm256_add_ps(valueFg, _mm256_mul_ps(prevState, checkF))); + activation_mode_t active_node, + activation_mode_t active_gate, + activation_mode_t active_state) { + valueIn = activation(valueIn, active_node); + valueIg = activation( + _mm256_add_ps(valueIg, _mm256_mul_ps(prevState, checkI)), active_gate); + valueFg = activation( + _mm256_add_ps(valueFg, _mm256_mul_ps(prevState, checkF)), active_gate); state = _mm256_add_ps(_mm256_mul_ps(valueIn, valueIg), _mm256_mul_ps(prevState, valueFg)); - valueOg = actGate(_mm256_add_ps(valueOg, _mm256_mul_ps(state, checkO))); - stateAtv = actState(state); + valueOg = activation(_mm256_add_ps(valueOg, _mm256_mul_ps(state, checkO)), + active_gate); + stateAtv = activation(state, active_state); output = _mm256_mul_ps(valueOg, stateAtv); } #endif @@ -95,16 +75,6 @@ class lstm { namespace backward { -template -DEVICE inline T sigmoid(const T a, const T b) { - return a * b * (1.0 - b); -} - -template -DEVICE inline T tanh(const T a, const T b) { - return a * (1.0 - b * b); -} - template class lstm { public: @@ -113,29 +83,20 @@ class lstm { T &prevState, T &prevStateGrad, T &state, T &stateGrad, T &stateAtv, T &outputGrad, T &checkI, T &checkF, T &checkO, T &checkIGrad, - T &checkFGrad, T &checkOGrad) { -#if 0 - // TODO(qingqing) support to activation speficed by users - gradOg = actGate(outputGrad * stateAtv, valueOg); - stateGrad += actState(outputGrad * valueOg, stateAtv) + gradOg * checkO; - gradIn = actInput(stateGrad * valueIg, valueIn); - gradIg = actGate(stateGrad * valueIn, valueIg); - gradFg = actGate(stateGrad * prevState, valueFg); + T &checkFGrad, T &checkOGrad, + activation_mode_t active_node, + activation_mode_t active_gate, + activation_mode_t active_state) { + gradOg = activation(outputGrad * stateAtv, valueOg, active_gate); + stateGrad += activation(outputGrad * valueOg, stateAtv, active_state) + + gradOg * checkO; + gradIn = activation(stateGrad * valueIg, valueIn, active_node); + gradIg = activation(stateGrad * valueIn, valueIg, active_gate); + gradFg = activation(stateGrad * prevState, valueFg, active_gate); prevStateGrad = gradIg * checkI + gradFg * checkF + stateGrad * valueFg; checkIGrad = gradIg * prevState; checkFGrad = gradFg * prevState; checkOGrad = gradOg * state; -#else - gradOg = sigmoid(outputGrad * stateAtv, valueOg); - stateGrad += tanh(outputGrad * valueOg, stateAtv) + gradOg * checkO; - gradIn = tanh(stateGrad * valueIg, valueIn); - gradIg = sigmoid(stateGrad * valueIn, valueIg); - gradFg = sigmoid(stateGrad * prevState, valueFg); - prevStateGrad = gradIg * checkI + gradFg * checkF + stateGrad * valueFg; - checkIGrad = gradIg * prevState; - checkFGrad = gradFg * prevState; - checkOGrad = gradOg * state; -#endif } #ifndef __NVCC__ #ifndef __AVX__ // If not compiled with AVX instructs. Disable AVX by default @@ -143,24 +104,26 @@ class lstm { #else // Only float support AVX optimization static const bool avx = std::is_same::value; - HOSTDEVICE void operator()(__m256 &valueIn, __m256 &valueIg, __m256 &valueFg, - __m256 &valueOg, __m256 &gradIn, __m256 &gradIg, - __m256 &gradFg, __m256 &gradOg, __m256 &prevState, - __m256 &prevStateGrad, __m256 &state, - __m256 &stateGrad, __m256 &stateAtv, - __m256 &outputGrad, __m256 &checkI, __m256 &checkF, - __m256 &checkO, __m256 &checkIGrad, - __m256 &checkFGrad, __m256 &checkOGrad, - hppl::Active<__m256>::backward actInput, - hppl::Active<__m256>::backward actGate, - hppl::Active<__m256>::backward actState) { - gradOg = actGate(_mm256_mul_ps(outputGrad, stateAtv), valueOg); + HOSTDEVICE void operator()( + __m256 &valueIn, __m256 &valueIg, __m256 &valueFg, __m256 &valueOg, + __m256 &gradIn, __m256 &gradIg, __m256 &gradFg, __m256 &gradOg, + __m256 &prevState, __m256 &prevStateGrad, __m256 &state, + __m256 &stateGrad, __m256 &stateAtv, __m256 &outputGrad, __m256 &checkI, + __m256 &checkF, __m256 &checkO, __m256 &checkIGrad, __m256 &checkFGrad, + __m256 &checkOGrad, activation_mode_t active_node, + activation_mode_t active_gate, activation_mode_t active_state) { + gradOg = + activation(_mm256_mul_ps(outputGrad, stateAtv), valueOg, active_gate); stateGrad = _mm256_add_ps( - actState(_mm256_mul_ps(outputGrad, valueOg), stateAtv), stateGrad); + activation(_mm256_mul_ps(outputGrad, valueOg), stateAtv, active_state), + stateGrad); stateGrad = _mm256_add_ps(_mm256_mul_ps(gradOg, checkO), stateGrad); - gradIn = actInput(_mm256_mul_ps(stateGrad, valueIg), valueIn); - gradIg = actGate(_mm256_mul_ps(stateGrad, valueIn), valueIg); - gradFg = actGate(_mm256_mul_ps(stateGrad, prevState), valueFg); + gradIn = + activation(_mm256_mul_ps(stateGrad, valueIg), valueIn, active_node); + gradIg = + activation(_mm256_mul_ps(stateGrad, valueIn), valueIg, active_gate); + gradFg = + activation(_mm256_mul_ps(stateGrad, prevState), valueFg, active_gate); prevStateGrad = _mm256_add_ps(_mm256_mul_ps(gradIg, checkI), _mm256_mul_ps(gradFg, checkF)); prevStateGrad = diff --git a/paddle/operators/math/gru_compute.cc b/paddle/operators/math/gru_compute.cc new file mode 100644 index 0000000000000000000000000000000000000000..125af449d3f700e24be5e4b7615c3b0e03fd4e5b --- /dev/null +++ b/paddle/operators/math/gru_compute.cc @@ -0,0 +1,102 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/operators/math/gru_compute.h" +#include "paddle/operators/math/detail/gru_cpu_kernel.h" +#include "paddle/operators/math/detail/gru_kernel.h" +#include "paddle/operators/math/math_function.h" + +namespace paddle { +namespace operators { +namespace math { + +template +struct GRUUnitFunctor { + static void compute(const platform::DeviceContext &context, + hl_gru_value value, int frameSize, int batchSize, + activation_mode_t active_node, + activation_mode_t active_gate) { +#ifndef __NVCC__ + if (value.prevOutValue) { + math::gemm( + context, false, false, batchSize, frameSize * 2, frameSize, 1, + value.prevOutValue, frameSize, value.gateWeight, frameSize * 2, 1, + value.gateValue, frameSize * 3); + } + + detail::forward_reset_output(detail::forward::gru_resetOutput(), value, + frameSize, batchSize, active_gate); + + if (value.prevOutValue) { + math::gemm( + context, false, false, batchSize, frameSize, frameSize, 1, + value.resetOutputValue, frameSize, value.stateWeight, frameSize, 1, + value.gateValue + frameSize * 2, frameSize * 3); + } + + detail::forward_final_output(detail::forward::gru_finalOutput(), value, + frameSize, batchSize, active_node); +#endif + } +}; + +template +struct GRUUnitGradFunctor { + static void compute(const platform::DeviceContext &context, + hl_gru_value value, hl_gru_grad grad, int frameSize, + int batchSize, activation_mode_t active_node, + activation_mode_t active_gate) { +#ifndef __NVCC__ + detail::backward_state_grad(detail::backward::gru_stateGrad(), value, + grad, frameSize, batchSize, active_node); + + if (value.prevOutValue && grad.prevOutGrad) { + math::gemm( + context, false, true, batchSize, frameSize, frameSize, 1, + grad.gateGrad + frameSize * 2, frameSize * 3, value.stateWeight, + frameSize, 0, grad.resetOutputGrad, frameSize); + + if (grad.stateWeightGrad) { + math::gemm( + context, true, false, frameSize, frameSize, batchSize, 1, + value.resetOutputValue, frameSize, grad.gateGrad + frameSize * 2, + frameSize * 3, 1, grad.stateWeightGrad, frameSize); + } + } + + detail::backward_reset_grad(detail::backward::gru_resetGrad(), value, + grad, frameSize, batchSize, active_gate); + + if (grad.prevOutGrad && value.prevOutValue) { + math::gemm( + context, false, true, batchSize, frameSize, frameSize * 2, 1, + grad.gateGrad, frameSize * 3, value.gateWeight, frameSize * 2, 1, + grad.prevOutGrad, frameSize); + + if (grad.gateWeightGrad) { + math::gemm( + context, true, false, frameSize, frameSize * 2, batchSize, 1, + value.prevOutValue, frameSize, grad.gateGrad, frameSize * 3, 1, + grad.gateWeightGrad, frameSize * 2); + } + } +#endif + } +}; + +template struct GRUUnitFunctor; +template struct GRUUnitFunctor; +template struct GRUUnitGradFunctor; +template struct GRUUnitGradFunctor; + +} // namespace math +} // namespace operators +} // namespace paddle diff --git a/paddle/operators/math/gru_compute.cu b/paddle/operators/math/gru_compute.cu new file mode 100644 index 0000000000000000000000000000000000000000..7b9e54ac029f6aa00553338435684097d6d02b25 --- /dev/null +++ b/paddle/operators/math/gru_compute.cu @@ -0,0 +1,178 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/operators/math/detail/gru_gpu_kernel.h" +#include "paddle/operators/math/detail/gru_kernel.h" +#include "paddle/operators/math/gru_compute.h" +#include "paddle/operators/math/math_function.h" + +namespace paddle { +namespace operators { +namespace math { + +template +struct GRUUnitFunctor { + static void compute(const platform::DeviceContext &context, + hl_gru_value value, int frameSize, int batchSize, + activation_mode_t active_node, + activation_mode_t active_gate) { + auto stream = + reinterpret_cast(context).stream(); + dim3 threads; + dim3 grid; + if (batchSize == 1) { + int framePerBlock = frameSize <= 1024 ? frameSize : 1024; + int frameBlocks = (frameSize + 1024 - 1) / 1024; + threads = dim3(framePerBlock, 1); + grid = dim3(frameBlocks, 1); + } else { + threads = dim3(32, 32); + grid = dim3((frameSize + 32 - 1) / 32, (batchSize + 32 - 1) / 32); + } + + if (value.prevOutValue) { + math::gemm( + context, false, false, batchSize, frameSize * 2, frameSize, 1, + value.prevOutValue, frameSize, value.gateWeight, frameSize * 2, 1, + value.gateValue, frameSize * 3); + } + + if (batchSize == 1) { + detail::KeGruForwardResetOutput, + /* isBatch= */ false, + T><<>>( + detail::forward::gru_resetOutput(), value.gateValue, + value.resetOutputValue, value.prevOutValue, frameSize, batchSize, + active_gate); + } else { + detail::KeGruForwardResetOutput, + /* isBatch= */ true, + T><<>>( + detail::forward::gru_resetOutput(), value.gateValue, + value.resetOutputValue, value.prevOutValue, frameSize, batchSize, + active_gate); + } + + if (value.prevOutValue) { + math::gemm( + context, false, false, batchSize, frameSize, frameSize, 1, + value.resetOutputValue, frameSize, value.stateWeight, frameSize, 1, + value.gateValue + frameSize * 2, frameSize * 3); + } + + if (batchSize == 1) { + detail::KeGruForwardFinalOutput, + /* isBatch= */ false, + T><<>>( + detail::forward::gru_finalOutput(), value.gateValue, + value.prevOutValue, value.outputValue, frameSize, batchSize, + active_node); + } else { + detail::KeGruForwardFinalOutput, + /* isBatch= */ true, + T><<>>( + detail::forward::gru_finalOutput(), value.gateValue, + value.prevOutValue, value.outputValue, frameSize, batchSize, + active_node); + } + } +}; + +template +struct GRUUnitGradFunctor { + static void compute(const platform::DeviceContext &context, + hl_gru_value value, hl_gru_grad grad, int frameSize, + int batchSize, activation_mode_t active_node, + activation_mode_t active_gate) { + auto stream = + reinterpret_cast(context).stream(); + dim3 threads; + dim3 grid; + if (batchSize == 1) { + int framePerBlock = frameSize <= 1024 ? frameSize : 1024; + int frameBlocks = (frameSize + 1024 - 1) / 1024; + threads = dim3(framePerBlock, 1); + grid = dim3(frameBlocks, 1); + } else { + threads = dim3(32, 32); + grid = dim3((frameSize + 32 - 1) / 32, (batchSize + 32 - 1) / 32); + } + + if (batchSize == 1) { + detail::KeGruBackwardStateGrad< + detail::backward::gru_stateGrad, + /* isBatch= */ false><<>>( + detail::backward::gru_stateGrad(), value.gateValue, grad.gateGrad, + value.prevOutValue, grad.prevOutGrad, grad.outputGrad, frameSize, + batchSize, active_node); + } else { + detail::KeGruBackwardStateGrad< + detail::backward::gru_stateGrad, + /* isBatch= */ true><<>>( + detail::backward::gru_stateGrad(), value.gateValue, grad.gateGrad, + value.prevOutValue, grad.prevOutGrad, grad.outputGrad, frameSize, + batchSize, active_node); + } + + if (value.prevOutValue && grad.prevOutGrad) { + math::gemm( + context, false, true, batchSize, frameSize, frameSize, 1, + grad.gateGrad + frameSize * 2, frameSize * 3, value.stateWeight, + frameSize, 0, grad.resetOutputGrad, frameSize); + + if (grad.stateWeightGrad) { + math::gemm( + context, true, false, frameSize, frameSize, batchSize, 1, + value.resetOutputValue, frameSize, grad.gateGrad + frameSize * 2, + frameSize * 3, 1, grad.stateWeightGrad, frameSize); + } + } + + if (batchSize == 1) { + detail::KeGruBackwardResetGrad< + detail::backward::gru_resetGrad, + /* isBatch= */ false><<>>( + detail::backward::gru_resetGrad(), value.gateValue, grad.gateGrad, + value.prevOutValue, grad.prevOutGrad, grad.resetOutputGrad, frameSize, + batchSize, active_gate); + } else { + detail::KeGruBackwardResetGrad< + detail::backward::gru_resetGrad, + /* isBatch= */ true><<>>( + detail::backward::gru_resetGrad(), value.gateValue, grad.gateGrad, + value.prevOutValue, grad.prevOutGrad, grad.resetOutputGrad, frameSize, + batchSize, active_gate); + } + + if (grad.prevOutGrad && value.prevOutValue) { + math::gemm( + context, false, true, batchSize, frameSize, frameSize * 2, 1, + grad.gateGrad, frameSize * 3, value.gateWeight, frameSize * 2, 1, + grad.prevOutGrad, frameSize); + + if (grad.gateWeightGrad) { + math::gemm( + context, true, false, frameSize, frameSize * 2, batchSize, 1, + value.prevOutValue, frameSize, grad.gateGrad, frameSize * 3, 1, + grad.gateWeightGrad, frameSize * 2); + } + } + } +}; + +template struct GRUUnitFunctor; +template struct GRUUnitFunctor; +template struct GRUUnitGradFunctor; +template struct GRUUnitGradFunctor; + +} // namespace math +} // namespace operators +} // namespace paddle diff --git a/paddle/operators/math/gru_compute.h b/paddle/operators/math/gru_compute.h new file mode 100644 index 0000000000000000000000000000000000000000..1475fb38104f353857dfd968e46af98a6d52c52a --- /dev/null +++ b/paddle/operators/math/gru_compute.h @@ -0,0 +1,61 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include "paddle/operators/math/lstm_compute.h" +#include "paddle/platform/device_context.h" +#include "paddle/platform/enforce.h" + +namespace paddle { +namespace operators { +namespace math { + +// TODO(guosheng): refine code style in gru_compute +template +struct hl_gru_value { + T *gateWeight; + T *stateWeight; + T *gateValue; + T *resetOutputValue; + T *outputValue; + T *prevOutValue; +}; + +template +struct hl_gru_grad { + T *gateWeightGrad; + T *stateWeightGrad; + T *gateGrad; + T *resetOutputGrad; + T *outputGrad; + T *prevOutGrad; +}; + +template +struct GRUUnitFunctor { + static void compute(const platform::DeviceContext &context, + hl_gru_value value, int frameSize, int batchSize, + activation_mode_t active_node, + activation_mode_t active_gate); +}; + +template +struct GRUUnitGradFunctor { + static void compute(const platform::DeviceContext &context, + hl_gru_value value, hl_gru_grad grad, int frameSize, + int batchSize, activation_mode_t active_node, + activation_mode_t active_gate); +}; + +} // namespace math +} // namespace operators +} // namespace paddle diff --git a/paddle/operators/math/sequence_pooling.cc b/paddle/operators/math/sequence_pooling.cc new file mode 100644 index 0000000000000000000000000000000000000000..5913c99fdb01100d0de44ab317124550fa626528 --- /dev/null +++ b/paddle/operators/math/sequence_pooling.cc @@ -0,0 +1,103 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/operators/math/sequence_pooling.h" +#include "paddle/operators/math/math_function.h" + +namespace paddle { +namespace operators { +namespace math { + +template +class MaxSeqPoolFunctor { + public: + void operator()(const platform::DeviceContext& context, + const framework::LoDTensor& input, framework::Tensor* output, + framework::Tensor* index) { + auto in_dims = input.dims(); + auto out_dims = output->dims(); + auto idx_dims = index->dims(); + PADDLE_ENFORCE_GT(in_dims.size(), 1); + PADDLE_ENFORCE_GT(out_dims.size(), 1); + for (int64_t i = 1; i < in_dims.size(); ++i) { + PADDLE_ENFORCE_EQ(in_dims[i], out_dims[i]); + } + PADDLE_ENFORCE_EQ(idx_dims, out_dims); + + auto starts = input.lod()[0]; + const T* in_data = input.data(); + T* out_data = output->data(); + int* max_index = index->data(); + + int64_t num_seq = out_dims[0]; + int64_t dim = output->numel() / num_seq; + for (int64_t i = 0; i < num_seq; ++i) { + for (int64_t k = 0; k < dim; ++k) { + out_data[i * dim + k] = in_data[starts[i] * dim + k]; + max_index[i * dim + k] = starts[i]; + } + for (size_t j = starts[i] + 1; j < starts[i + 1]; ++j) { + for (int64_t k = 0; k < dim; ++k) { + if (in_data[j * dim + k] > out_data[i * dim + k]) { + out_data[i * dim + k] = in_data[j * dim + k]; + max_index[i * dim + k] = j; + } + } + } + } + } +}; + +template +class MaxSeqPoolGradFunctor { + public: + void operator()(const platform::DeviceContext& context, + const framework::Tensor& out_grad, + const framework::Tensor& index, + framework::LoDTensor* in_grad) { + auto og_dims = out_grad.dims(); + auto ig_dims = in_grad->dims(); + auto idx_dims = index.dims(); + PADDLE_ENFORCE_GT(og_dims.size(), 1); + PADDLE_ENFORCE_GT(ig_dims.size(), 1); + for (int64_t i = 1; i < og_dims.size(); ++i) { + PADDLE_ENFORCE_EQ(og_dims[i], ig_dims[i]); + } + PADDLE_ENFORCE_EQ(idx_dims, og_dims); + + const T* og_data = out_grad.data(); + const int* max_index = index.data(); + T* ig_data = in_grad->data(); + + SetConstant set_zero; + set_zero(context, in_grad, static_cast(0.0)); + int64_t num_seq = og_dims[0]; + int64_t dim = out_grad.numel() / num_seq; + for (int64_t i = 0; i < num_seq; ++i) { + for (int64_t j = 0; j < dim; ++j) { + int step_id = max_index[i * dim + j]; + ig_data[step_id * dim + j] = og_data[i * dim + j]; + } + } + } +}; + +template class MaxSeqPoolFunctor; +template class MaxSeqPoolFunctor; +template class MaxSeqPoolGradFunctor; +template class MaxSeqPoolGradFunctor; + +} // namespace math +} // namespace operators +} // namespace paddle diff --git a/paddle/operators/math/sequence_pooling.cu b/paddle/operators/math/sequence_pooling.cu new file mode 100644 index 0000000000000000000000000000000000000000..5ed951402fecba66a8960f4d024bf3785dac51c7 --- /dev/null +++ b/paddle/operators/math/sequence_pooling.cu @@ -0,0 +1,136 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/operators/math/math_function.h" +#include "paddle/operators/math/sequence_pooling.h" + +namespace paddle { +namespace operators { +namespace math { + +#define FLT_MAX __FLT_MAX__ + +template +__global__ void KeMaxSequencePool(const T* input, const size_t* starts, + T* output, int* index, int64_t num_seq, + int64_t dim) { + int dim_idx = threadIdx.x; + int seq_id = blockIdx.x; + if (seq_id >= num_seq) return; + size_t start = starts[seq_id]; + size_t end = starts[seq_id + 1]; + + for (int64_t i = dim_idx; i < dim; i += blockDim.x) { + T max_val = static_cast(-FLT_MAX); + int max_id = -1; + for (size_t step_id = start; step_id < end; step_id++) { + if (max_val < input[step_id * dim + i]) { + max_val = input[step_id * dim + i]; + max_id = step_id; + } + } + output[seq_id * dim + i] = max_val; + index[seq_id * dim + i] = max_id; + } +} + +template +class MaxSeqPoolFunctor { + public: + void operator()(const platform::DeviceContext& context, + const framework::LoDTensor& input, framework::Tensor* output, + framework::Tensor* index) { + auto in_dims = input.dims(); + auto out_dims = output->dims(); + auto idx_dims = index->dims(); + PADDLE_ENFORCE_GT(in_dims.size(), static_cast(1)); + PADDLE_ENFORCE_GT(out_dims.size(), 1); + for (int64_t i = 1; i < in_dims.size(); ++i) { + PADDLE_ENFORCE_EQ(in_dims[i], out_dims[i]); + } + PADDLE_ENFORCE_EQ(idx_dims, out_dims); + + auto starts = input.lod()[0]; + const T* in_data = input.data(); + T* out_data = output->data(); + int* max_index = index->data(); + + int64_t num_seq = out_dims[0]; + int64_t dim = output->numel() / num_seq; + + dim3 threads(256, 1); + dim3 grid(num_seq, 1); + auto stream = + reinterpret_cast(context).stream(); + KeMaxSequencePool<<>>( + in_data, starts.data(), out_data, max_index, num_seq, dim); + } +}; + +template +__global__ void KeMaxSequencePoolGrad(const T* out_grad, const int* max_index, + T* in_grad, int64_t num_seq, + int64_t dim) { + int idx = threadIdx.x + blockIdx.x * blockDim.x; + int col_idx = idx % dim; + if (idx < num_seq * dim) { + int step_id = max_index[idx]; + in_grad[step_id * dim + col_idx] = out_grad[idx]; + } +} + +template +class MaxSeqPoolGradFunctor { + public: + void operator()(const platform::DeviceContext& context, + const framework::Tensor& out_grad, + const framework::Tensor& index, + framework::LoDTensor* in_grad) { + auto og_dims = out_grad.dims(); + auto idx_dims = index.dims(); + auto ig_dims = in_grad->dims(); + PADDLE_ENFORCE_GT(og_dims.size(), static_cast(1)); + PADDLE_ENFORCE_GT(ig_dims.size(), static_cast(1)); + for (int64_t i = 1; i < og_dims.size(); ++i) { + PADDLE_ENFORCE_EQ(og_dims[i], ig_dims[i]); + } + PADDLE_ENFORCE_EQ(idx_dims, og_dims); + + const T* og_data = out_grad.data(); + const int* max_index = index.data(); + T* ig_data = in_grad->data(); + + SetConstant set_zero; + set_zero(context, in_grad, static_cast(0.0)); + int64_t num_seq = og_dims[0]; + int64_t dim = out_grad.numel() / num_seq; + + unsigned int blocks = (num_seq * dim + 128 - 1) / 128; + dim3 threads(128, 1); + dim3 grid(blocks, 1); + auto stream = + reinterpret_cast(context).stream(); + KeMaxSequencePoolGrad<<>>( + og_data, max_index, ig_data, num_seq, dim); + } +}; + +template class MaxSeqPoolFunctor; +template class MaxSeqPoolFunctor; +template class MaxSeqPoolGradFunctor; +template class MaxSeqPoolGradFunctor; + +} // namespace math +} // namespace operators +} // namespace paddle diff --git a/paddle/operators/math/sequence_pooling.h b/paddle/operators/math/sequence_pooling.h new file mode 100644 index 0000000000000000000000000000000000000000..35dfe26de1a87a064410401244914d4e2a94176e --- /dev/null +++ b/paddle/operators/math/sequence_pooling.h @@ -0,0 +1,45 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once +#include "paddle/framework/lod_tensor.h" +#include "paddle/framework/tensor.h" +#include "paddle/platform/device_context.h" + +namespace paddle { +namespace operators { +namespace math { + +#define FLT_MAX __FLT_MAX__ + +template +class MaxSeqPoolFunctor { + public: + void operator()(const platform::DeviceContext& context, + const framework::LoDTensor& input, framework::Tensor* output, + framework::Tensor* index); +}; + +template +class MaxSeqPoolGradFunctor { + public: + void operator()(const platform::DeviceContext& context, + const framework::Tensor& out_grad, + const framework::Tensor& index, + framework::LoDTensor* in_grad); +}; + +} // namespace math +} // namespace operators +} // namespace paddle diff --git a/paddle/operators/matmul_op.cc b/paddle/operators/matmul_op.cc index 5ecbee3b413617e3a5523d9a32e72bc08bd316c5..5a1a6154203d40186f1e41491194b19612931b1f 100644 --- a/paddle/operators/matmul_op.cc +++ b/paddle/operators/matmul_op.cc @@ -144,7 +144,10 @@ class MatMulOpMaker : public framework::OpProtoAndCheckerMaker { )DOC") .SetDefault(false); AddComment(R"DOC( -The MatMul operator is used to perform (batched) matrix multiplication +MatMul Operator. + + +This operator is used to perform (batched) matrix multiplication over the last two dimensions of the input tensors `X` and `Y`. If a transpose flag is specified, the last two dimensions of the @@ -166,7 +169,8 @@ The differences are: - We add `transpose_X` and `transpose_Y` flags. Both the input `X` and `Y` can carry the LoD (Level of Details) information, -or not. But the output only shares the LoD with input `X`. +or not. But the output only shares the LoD information with input `X`. + )DOC"); } }; diff --git a/paddle/operators/mean_op.cc b/paddle/operators/mean_op.cc index 7caa1c9d0cf4dba33a206c85bcbed1fb1cb4e010..78b4bbca84d4670aba73222f1d679604d7516b02 100644 --- a/paddle/operators/mean_op.cc +++ b/paddle/operators/mean_op.cc @@ -36,7 +36,11 @@ class MeanOpMaker : public framework::OpProtoAndCheckerMaker { : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "The input of mean op"); AddOutput("Out", "The output of mean op"); - AddComment(R"DOC( Mean Operator + AddComment(R"DOC( +Mean Operator. + +Out is a scalar which is the mean of all elements in X. + )DOC"); } }; diff --git a/paddle/operators/minus_op.cc b/paddle/operators/minus_op.cc index f7943e99acc5975d077f2319b6f678cfc693c1f3..4684c20208501a3239fd57b35428946bb52af4a0 100644 --- a/paddle/operators/minus_op.cc +++ b/paddle/operators/minus_op.cc @@ -52,14 +52,16 @@ class MinusOpMaker : public framework::OpProtoAndCheckerMaker { AddInput("Y", "The right tensor of minus operator."); AddOutput("Out", "The output tensor of minus operator."); - AddComment(R"DOC(Minus Operator + AddComment(R"DOC( +Minus Operator. Equation: - Out = X - Y + $Out = X - Y$ Both the input `X` and `Y` can carry the LoD (Level of Details) information, -or not. But the output only shares the LoD with input `X`. +or not. But the output only shares the LoD information with input `X`. + )DOC"); } }; diff --git a/paddle/operators/modified_huber_loss_op.cc b/paddle/operators/modified_huber_loss_op.cc index 7b9e9528952d552a69ffe6a628672901c5c1a7fd..28528848af1f467bf38be53f9d05fee6ca3f93cc 100644 --- a/paddle/operators/modified_huber_loss_op.cc +++ b/paddle/operators/modified_huber_loss_op.cc @@ -43,27 +43,35 @@ class ModifiedHuberLossOpMaker : public framework::OpProtoAndCheckerMaker { framework::OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", - "The input tensor of modified huber loss op." + "The input tensor of modified huber loss op. " "X is 2-D tensor with shape [batch_size, 1]."); AddInput("Y", - "The target labels of modified huber loss op." - "The shape of Y is same as X. Values of Y must be 0 or 1."); + "The target labels of modified huber loss op. " + "The shape of Y is the same as X. Values of Y must be 0 or 1."); AddOutput("IntermediateVal", "Variable to save intermediate result which will be reused in " "backward processing.") .AsIntermediate(); AddOutput("Out", "Classification loss for X."); AddComment(R"DOC( -Modified huber loss is used in binary classification problem. The shape of -input X and target Y are both [N, 1] and so is the shape of output loss. -Since target Y is not differentiable, cacluating gradient for Y is illegal. -The formulation of modified huber loss is: - -L(y, f(x)) = max(0, 1 - yf(x))^2 for yf(x) >= -1, - -4yf(x) otherwise. - -Make sure the values of target label Y are in {0, 1} here. The operator will +Modified Huber Loss Operator. + +This operator is used in binary classification problem. The shape of +input X and target Y are both [N, 1] and so is the shape of the output loss. +Since target Y is not differentiable, calculating gradient for Y is illegal. +The formula of modified huber loss is: + +$$ +L(y, f(x)) = +\begin{cases} +(\max(0, 1 - yf(x)))^2, \text{if} \ yf(x) >= -1 \\ + -4yf(x), \quad \text{otherwise} +\end{cases} +$$ + +Make sure the values of target label Y are in {0, 1} here. This operator will scale values of Y to {-1, +1} when computing losses and gradients. + )DOC"); } }; diff --git a/paddle/operators/momentum_op.cc b/paddle/operators/momentum_op.cc index 2d4d6f13720f0e6888edbddcb3243116506227ba..e8ce16f4cfcf83fd13e4d3a5318a4ae0c8c8449c 100644 --- a/paddle/operators/momentum_op.cc +++ b/paddle/operators/momentum_op.cc @@ -75,17 +75,23 @@ class MomentumOpMaker : public framework::OpProtoAndCheckerMaker { AddOutput("VelocityOut", "(Tensor) Output updated velocity"); AddAttr("mu", "(float) Momentum coefficient"); - AddAttr("useNesterov", "(bool) Use Nesterov Momentum") + AddAttr("useNesterov", + "(bool, default false) " + "Use Nesterov Momentum") .SetDefault(false); AddComment(R"DOC( - -Momentum Algorithm with a flag for Nestrov Moemntum (momentum). - -velocity = mu * velocity + gradient -if (use_nesterov): - param = param - gradient * learning_rate + mu * velocity * learning_rate -else: - param = param - learning_rate * velocity +Momentum Optimizer. + +This optimizer has a flag for Nestrov Momentum. +The update equations are as follows: + +$$ +velocity = mu * velocity + gradient \\ +if (use\_nesterov): \\ + param = param - gradient * learning\_rate + mu * velocity * learning\_rate \\ +else: \\ + param = param - learning\_rate * velocity. \\ +$$ )DOC"); } diff --git a/paddle/operators/mul_op.cc b/paddle/operators/mul_op.cc index 90acf034d905e6ab3ba7bf8c3d29e1ef1161ed0c..3c39ae10dc50084cff284c307167c33c9208a3ce 100644 --- a/paddle/operators/mul_op.cc +++ b/paddle/operators/mul_op.cc @@ -78,6 +78,7 @@ class MulOpMaker : public framework::OpProtoAndCheckerMaker { AddOutput("Out", "The output of mul op"); AddAttr( "x_num_col_dims", + "(int, default 1) " R"DOC(mul_op can take tensors with more than two dimensions as input `X`, in that case, tensors will be reshaped to a matrix. The matrix's first dimension(column length) will be the product of tensor's last @@ -88,20 +89,24 @@ class MulOpMaker : public framework::OpProtoAndCheckerMaker { .EqualGreaterThan(1); AddAttr( "y_num_col_dims", + "(int, default 1) " R"DOC(mul_op can take tensors with more than two dimensions as input `Y`, in that case, tensors will be reshaped to a matrix. Just like input `X`. )DOC") .SetDefault(1) .EqualGreaterThan(1); AddComment(R"DOC( -Mul operator is used to perform matrix multiplication for input X and Y. +Mul Operator. + +This operator is used to perform matrix multiplication for input X and Y. The equation is: - Out = X * Y + $$Out = X * Y$$ Both the input `X` and `Y` can carry the LoD (Level of Details) information, -or not. But the output only shares the LoD with input `X`. +or not. But the output only shares the LoD information with input `X`. + )DOC"); } }; diff --git a/paddle/operators/multiplex_op.cc b/paddle/operators/multiplex_op.cc index 4d86769026e4b3e3040bdcb3bc6dc2edea58b4b0..234fddcfd55ccc66f6378689dbc426499474b11f 100644 --- a/paddle/operators/multiplex_op.cc +++ b/paddle/operators/multiplex_op.cc @@ -66,7 +66,8 @@ class MultiplexOpMaker : public framework::OpProtoAndCheckerMaker { AddInput("X", "The candidate tensors of multiplex operator.") .AsDuplicable(); AddOutput("Out", "The output tensor of multiplex operator."); - AddComment(R"DOC(Multiplex operator + AddComment(R"DOC( +Multiplex Operator. Multiplex multiple tensors according to the index provided by the index tensor. @@ -77,10 +78,11 @@ the (Ids[i])-th tensor. For i-th row of the output tensor: -y[i] = x_{k}[i] +$$y[i] = x_{k}[i]$$ -where y is the output tensor. `x_{k}` is the k-th input tensor +where `y` is the output tensor, `x_{k}` is the k-th input tensor, and `k = Ids[i]`. + )DOC"); } }; diff --git a/paddle/operators/multiplex_op.cu b/paddle/operators/multiplex_op.cu index 143a14fef5783f8ed085d4c4ce2afb3b190d0600..7adc7df164e6c7f39ed9b572dfb09380ef06d749 100644 --- a/paddle/operators/multiplex_op.cu +++ b/paddle/operators/multiplex_op.cu @@ -35,9 +35,7 @@ class MultiplexGPUKernel : public framework::OpKernel { Tensor index_t_cpu; index_t_cpu.CopyFrom(*ids, platform::CPUPlace(), ctx.device_context()); auto* index = index_t_cpu.data(); - auto stream = reinterpret_cast( - ctx.device_context()) - .stream(); + auto stream = ctx.cuda_device_context().stream(); Place place = boost::get(ctx.GetPlace()); for (auto i = 0; i < rows; i++) { int32_t k = index[i]; @@ -73,9 +71,7 @@ class MultiplexGradGPUKernel : public framework::OpKernel { index_t_cpu.CopyFrom(*ids, platform::CPUPlace(), ctx.device_context()); auto* index = index_t_cpu.data(); - auto stream = reinterpret_cast( - ctx.device_context()) - .stream(); + auto stream = ctx.device_context().stream(); Place place = boost::get(ctx.GetPlace()); for (auto i = 0; i < rows; i++) { size_t k = static_cast(index[i]); diff --git a/paddle/operators/name_convention.md b/paddle/operators/name_convention.md index 5a216907950100070ba57176c382eb659effb293..b5cb176e003b4584321142ac9f1c3380b7010936 100644 --- a/paddle/operators/name_convention.md +++ b/paddle/operators/name_convention.md @@ -4,10 +4,10 @@ To make the operator document itself more clear, we recommend operator names obe ### OpProtoMaker names -When defining an operator in Paddle, a corresponding [OpProtoMaker](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/operator.h#L170) (TODO: OpProtoMaker Doc)need to be defined. All the Input/Output and Attributes will write into the [OpProto](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/framework.proto#L61) , and will be used in client language to create operator. +When defining an operator in Paddle, a corresponding [OpProtoMaker](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/operator.h#L170) (TODO: OpProtoMaker Doc)need to be defined. All the Input/Output and Attributes will write into the [OpProto](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/framework.proto#L61) , and will be used in client language to create operator. - Input/Output. - - Input/Output names follow the **CamelCase**. e.g. `X`, `Y`, `Matrix`, `LastAxisInMatrix`. Input/Output much more like Variables, we prefer to meaningful English words. + - Input/Output names follow the **CamelCase**. e.g. `X`, `Y`, `Matrix`, `LastAxisInMatrix`. Input/Output much more like Variables, we prefer to meaningful English words. - If an operator's Input/Output are tensors in math, not match to any meaningful words, input name should starts from `X`. e.g. `X`, `Y`, and output name should starts from `Out`. e.g. `Out`. This rule intends making operators which have few inputs/outputs unified. - Attribute. @@ -15,7 +15,7 @@ When defining an operator in Paddle, a corresponding [OpProtoMaker](https://gith - Comments. - Input/Output/Attr comment follow the format of **(type,default value) usage**, corresponding to which type it can be and how it will be used in the operator. e.g. Attribute in Accumulator`"gamma" `,`(float, default 1.0) Accumulation multiplier`. - - Operator comment format of` R"DOC(your comment here)DOC"`. You should explain the input/output of the operator first. If there is math calculation in this operator, you should write the equation in the comment. e.g. `Out = X + Y`. + - Operator comment format of` R"DOC(your comment here)DOC"`. You should explain the input/output of the operator first. If there is math calculation in this operator, you should write the equation in the comment. e.g. `Out = X + Y`. - Order. - Follow the order of Input/Output, then Attribute, then Comments. See the example in best practice. @@ -24,7 +24,7 @@ When defining an operator in Paddle, a corresponding [OpProtoMaker](https://gith Here we give some examples to show how these rules will be used. -- The operator has one input, one output. e.g.`relu`, inputs: `X`, outputs: `Out`. +- The operator has one input, one output. e.g.`relu`, inputs: `X`, outputs: `Out`. - The operator has two input, one output. e.g. `rowwise_add`, inputs : `X`, `Y`, outputs : `Out`. @@ -38,23 +38,27 @@ public: AccumulateOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("X", "(Tensor) The input tensor that has to be accumulated to the output tensor. - If the output size is not the same as input size, + AddInput("X", "(Tensor) The input tensor that has to be accumulated to the output tensor. + If the output size is not the same as input size, the output tensor is first reshaped and initialized to zero, and only then, accumulation is done."); AddOutput("Out", "(Tensor) Accumulated output tensor"); AddAttr("gamma", "(float, default 1.0) Accumulation multiplier").SetDefault(1.0f); AddComment(R"DOC( -Accumulate operator accumulates the input tensor to the output tensor. If the +Accumulate Operator. + +This operator accumulates the input tensor to the output tensor. If the output tensor already has the right size, we add to it; otherwise, we first initialize the output tensor to all zeros, and then do accumulation. Any further calls to the operator, given that no one else fiddles with the output in the interim, will do simple accumulations. -Accumulation is done as shown: + +Accumulation is done as follows: Out = 1*X + gamma*Out where X is the input tensor, Out is the output tensor and gamma is the multiplier argument. + )DOC"); } }; diff --git a/paddle/operators/nccl_op.cc b/paddle/operators/nccl_op.cc index d39cb2fcf9cc205edf86f8ab1d5e04b5672e00f6..66fcc09bc877867e66a37adc73230d8dabf4cbed 100644 --- a/paddle/operators/nccl_op.cc +++ b/paddle/operators/nccl_op.cc @@ -48,12 +48,17 @@ class NCCLInitOpMaker : public framework::OpProtoAndCheckerMaker { : OpProtoAndCheckerMaker(proto, op_checker) { AddOutput("Communicator", "Create Communicator for communicating between gpus"); - AddAttr>("gpus", "gpu id lists"); - AddAttr("data_type", "output data type") + AddAttr>("gpus", "(vector) GPU id lists"); + AddAttr("data_type", + "(int, default 5 (FP32)) " + "Output data type") .SetDefault(framework::DataType::FP32); AddComment(R"DOC( - create communicator. - )DOC"); +NCCLInit Operator. + +Create communicator. + +)DOC"); } }; @@ -143,11 +148,15 @@ class NCCLAllReduceOpMaker : public framework::OpProtoAndCheckerMaker { AddInput("Communicator", "Communicator for communicating between gpus"); AddOutput("Out", "The output of AllReduce op"); AddAttr("reduction", + "(string, default 'ncclSum') " "{'ncclMin', 'ncclMax', 'ncclProd', 'ncclSum'}.") .SetDefault("ncclSum"); AddComment(R"DOC( - AllReduce the input tensors. - )DOC"); +NCCLAllReduce Operator. + +AllReduce the input tensors. + +)DOC"); } }; @@ -161,14 +170,20 @@ class NCCLReduceOpMaker : public framework::OpProtoAndCheckerMaker { AddInput("Communicator", "Communicator for communicating between gpus"); AddOutput("Out", "The output of Reduce op"); AddAttr("reduction", + "(string, default 'ncclSum') " "{'ncclMin', 'ncclMax', 'ncclProd', 'ncclSum'}.") .SetDefault("ncclSum"); AddAttr("root", - "root gpu of the parameter. if not " - "set(platform::kInvalidGPUId). hashed by name.") + "(int, default kInvalidGPUId) " + "Root gpu of the parameter. If not, " + "set(platform::kInvalidGPUId). Hashed by name.") .SetDefault(platform::kInvalidGPUId); AddComment(R"DOC( - Reduce the tensors)DOC"); +NCCLReduce Operator. + +Reduce the tensors. + +)DOC"); } }; @@ -182,12 +197,16 @@ class NCCLBcastOpMaker : public framework::OpProtoAndCheckerMaker { AddInput("Communicator", "Communicator for communicating between gpus"); AddOutput("Out", "The output of Bcast"); AddAttr("root", - "root gpu of the parameter. if not " - "set(platform::kInvalidGPUId). hashed by name.") + "(int, default kInvalidGPUId) " + "Root gpu of the parameter. If not, " + "set(platform::kInvalidGPUId). Hashed by name.") .SetDefault(platform::kInvalidGPUId); AddComment(R"DOC( - Bcast the tensors. - )DOC"); +NCCLBcast Operator. + +Bcast the tensors. + +)DOC"); } }; diff --git a/paddle/operators/nccl_op.cu b/paddle/operators/nccl_op.cu index 86dee8ee8e1c1a1041d6bc9fa515d669a9c4e466..4f0a2a79edb9f24c7758fc91483d374425b36853 100644 --- a/paddle/operators/nccl_op.cu +++ b/paddle/operators/nccl_op.cu @@ -64,9 +64,7 @@ class NCCLAllReduceKernel : public framework::OpKernel { auto* comm = ctx.Input("Communicator"); - auto stream = reinterpret_cast( - ctx.device_context()) - .stream(); + auto stream = ctx.cuda_device_context().stream(); // device id int gpu_id = boost::get(ctx.GetPlace()).GetDeviceId(); diff --git a/paddle/operators/pad_op.cc b/paddle/operators/pad_op.cc index 73a0b8baff530840ddd0d4c65cd4c060ab18e401..adb75df6ef10c59fc6f3db4d36e1ffb1ae0b4b1e 100644 --- a/paddle/operators/pad_op.cc +++ b/paddle/operators/pad_op.cc @@ -54,41 +54,44 @@ class PadOpMaker : public framework::OpProtoAndCheckerMaker { "The input of pad op. " "The input should be a k-D tensor(k > 0 and k < 7)"); AddOutput("Out", - "The output of pad op." + "The output of pad op. " "A tensor with the same shape as X."); + AddAttr>( + "paddings", + "(vector) " + "A list to describe the padding rules for each dimension. " + "For 2-D image tensor, paddings=[0, 1, 2, 3] means " + "padding 0 row to top, 1 row to bottom, 2 columns to left " + "and 3 columns to right. Size of paddings should be equal to " + "2 * dimension size of the input tensor."); + AddAttr("pad_value", + "(float, default 0.0) " + "The value to fill the padded areas.") + .SetDefault(0.0f); AddComment(R"DOC( -Pad input into output, as specified by paddings and pad_value. The input should be a k-D tensor(k > 0 and k < 7). As an example: +Pad Operator. + +Pad input into output, as specified by paddings and pad_value. +The input should be a k-D tensor(k > 0 and k < 7). As an example: Given: X = [[1, 2], - [3, 4]] - -and + [3, 4]], -paddings = [0, 1, 1, 2] +paddings = [0, 1, 1, 2], and -pad_value = 0 +pad_value = 0, -then we get +we have: Out = [[0, 1, 2, 0, 0] [0, 3, 4, 0, 0] [0, 0, 0, 0, 0]] + )DOC"); - AddAttr>( - "paddings", - "A list to describes padding rules for each dimension." - " For 2-D image tensor, paddings=[0, 1, 2, 3] means" - " padding 0 row to top, 1 row to bottom, 2 columns to left" - " and 3 columns to right.Size of paddings should be equal to" - " 2 * dimension size of input tensor."); - AddAttr("pad_value", - "(float) default to 0; " - "The value to fill padded areas.") - .SetDefault(0.0f); } }; diff --git a/paddle/operators/pool_op.cc b/paddle/operators/pool_op.cc index 4d75c11bc8130343e95f75e687529303179caa93..f58aab733866973f477ca79e5e53ba58adbf3dc7 100644 --- a/paddle/operators/pool_op.cc +++ b/paddle/operators/pool_op.cc @@ -73,125 +73,138 @@ Pool2dOpMaker::Pool2dOpMaker(framework::OpProto *proto, AddInput( "X", "(Tensor) The input tensor of pooling operator. " - "The format of input tensor is NCHW. Where N is batch size, C is the " - "number of channels, H and W is the height and width of feature."); + "The format of input tensor is NCHW, where N is batch size, C is the " + "number of channels, H is the height of the feature, " + "and W is the width of the feature."); AddOutput("Out", - "(Tensor) The output tensor of pooling operator." - "The format of output tensor is also NCHW." - "Where N is batch size, C is " - "the number of channels, H and W is the height and " - "width of feature."); + "(Tensor) The output tensor of pooling operator. " + "The format of output tensor is also NCHW, " + "where N is batch size, C is the number of channels, " + "H is the height of the feature, " + "and W is the width of the feature."); AddAttr("poolingType", "(string), pooling type, can be \"max\" for max-pooling " "and \"avg\" for average-pooling.") .InEnum({"max", "avg"}); AddAttr>("ksize", - "(vector ), the pooling window size(height, width) " - "of pooling operator." + "(vector) The pooling window " + "size(height, width) of the pooling operator. " "If globalPooling = true, ksize and paddings will " "be ignored."); // TODO(Chengduo): Add checker. // (Currently, // TypedAttrChecker don't support vector type.) AddAttr("globalPooling", - "(bool default: false), whether to use the global pooling." + "(bool, default false) Whether to use the global pooling. " "If globalPooling = true, ksize and paddings will be ignored.") .SetDefault(false); - AddAttr>( - "strides", - "(vector, default:{1, 1}), strides(height, width) of pooling operator.") + AddAttr>("strides", + "(vector, default {1, 1}), strides(height, " + "width) of pooling operator.") .SetDefault({1, 1}); // TODO(Chengduo): Add checker. (Currently, // TypedAttrChecker don't support vector type.) AddAttr>( "paddings", - "(vector defalut:{0,0}), paddings(height, width) of pooling operator." + "(vector, defalut {0,0}), paddings(height, width) of pooling " + "operator." "If globalPooling = true, paddings and ksize will be ignored.") .SetDefault({0, 0}); // TODO(Chengduo): Add checker. (Currently, // TypedAttrChecker don't support vector type.) AddComment(R"DOC( +Pool2d Operator. + The pooling2d operation calculates the output based on the input, poolingType and ksize, strides, paddings parameters. -Input(X) and output(Out) are in NCHW format. Where N is batch size, C is the -number of channels, H and W is the height and width of feature. +Input(X) and output(Out) are in NCHW format, where N is batch size, C is the +number of channels, H is the height of the feature, and W is the width of the feature. Parameters(ksize, strides, paddings) are two elements. These two elements represent height and width, respectively. The input(X) size and output(Out) size may be different. Example: Input: - X shape: (N, C, H_in, W_in) + X shape: $(N, C, H_{in}, W_{in})$ Output: - Out shape: (N, C, H_out, W_out) - where - H_out = (H_in - ksize[0] + 2 * paddings[0]) / strides[0] + 1; - W_out = (W_in - ksize[1] + 2 * paddings[1]) / strides[1] + 1; + Out shape: $(N, C, H_{out}, W_{out})$ + where + $$ + H_{out} = (H_{in} - ksize[0] + 2 * paddings[0]) / strides[0] + 1 \\ + W_{out} = (W_{in} - ksize[1] + 2 * paddings[1]) / strides[1] + 1 + $$ + )DOC"); } Pool3dOpMaker::Pool3dOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput( - "X", - "(Tensor) The input tensor of pooling operator. " - "The format of input tensor is NCDHW. Where N is batch size, C is " - "the number of channels, D, H and W is the depth, height and width of " - "feature."); + AddInput("X", + "(Tensor) The input tensor of pooling operator. " + "The format of input tensor is NCDHW, where N is batch size, C is " + "the number of channels, and D, H and W is the depth, height and " + "width of " + "the feature, respectively."); AddOutput("Out", "(Tensor) The output tensor of pooling operator." - "The format of output tensor is also NCDHW." - "Where N is batch size, C is " - "the number of channels, D, H and W is the depth, height and " - "width of feature."); + "The format of output tensor is also NCDHW, " + "where N is batch size, C is " + "the number of channels, and D, H and W is the depth, height and " + "width of the feature, respectively."); AddAttr("poolingType", - "(string), pooling type, can be \"max\" for max-pooling " + "(string) Pooling type, can be \"max\" for max-pooling " "and \"avg\" for average-pooling.") .InEnum({"max", "avg"}); - AddAttr>("ksize", - "(vector ), the pooling window size(depth, height, " - "width) of pooling " - "operator." - "If globalPooling = true, ksize and paddings wille " - "be ignored."); // TODO(Chengduo): Add checker. - // (Currently, + AddAttr>( + "ksize", + "(vector) The pooling window size(depth, height, " + "width) of pooling operator. " + "If globalPooling = true, ksize and paddings will " + "be ignored."); // TODO(Chengduo): Add checker. + // (Currently, // TypedAttrChecker don't support vector type.) AddAttr("globalPooling", - "(bool default: false), whether to use the global pooling." + "(bool, default false) Whether to use the global pooling. " "If globalPooling = true, ksize and paddings wille be ignored.") .SetDefault(false); - AddAttr>("strides", - "(vector, default:{1,1,1}), strides(depth, height, " - "width) of pooling operator.") + AddAttr>( + "strides", + "(vector, default {1,1,1}) Strides(depth, height, " + "width) of the pooling operator.") .SetDefault({1, 1, 1}); // TODO(Chengduo): Add checker. (Currently, // TypedAttrChecker don't support vector type.) AddAttr>( "paddings", - "(vector defalut:{0,0,0}), paddings(depth, height, " - "width) of pooling operator." - "If globalPooling = true, ksize and paddings wille be ignored.") + "(vector, defalut {0,0,0}), paddings(depth, height, " + "width) of pooling operator. " + "If globalPooling = true, ksize and paddings will be ignored.") .SetDefault({0, 0, 0}); // TODO(Chengduo): Add checker. (Currently, // TypedAttrChecker don't support vector type.) AddComment(R"DOC( +Pool3d Operator. + The pooling3d operation calculates the output based on -the input, poolingType and ksize, strides, paddings parameters. -Input(X) and output(Out) are in NCDHW format. Where N is batch -size, C is the number of channels, D, H and W is the depth, height and -width of feature. Parameters(ksize, strides, paddings) are three elements. -These three elements represent depth, height and width, respectively. -The input(X) size and output(Out) size may be different. +the input, poolingType, ksize, strides, and paddings parameters. +Input(X) and output(Out) are in NCDHW format, where N is batch +size, C is the number of channels, and D, H and W are the depth, height and +width of the feature, respectively. Parameters(ksize, strides, paddings) +are three elements. These three elements represent depth, height and +width, respectively. The input(X) size and output(Out) size may be different. Example: Input: - X shape: (N, C, D_in, H_in, W_in) + X shape: $(N, C, D_{in}, H_{in}, W_{in})$ Output: - Out shape: (N, C, D_out, H_out, W_out) + Out shape: $(N, C, D_{out}, H_{out}, W_{out})$ where - D_out = (D_in - ksize[0] + 2 * paddings[0]) / strides[0] + 1; - H_out = (H_in - ksize[1] + 2 * paddings[1]) / strides[1] + 1; - W_out = (W_in - ksize[2] + 2 * paddings[2]) / strides[2] + 1; + $$ + D_{out} = (D_{in} - ksize[0] + 2 * paddings[0]) / strides[0] + 1 \\ + H_{out} = (H_{in} - ksize[1] + 2 * paddings[1]) / strides[1] + 1 \\ + W_{out} = (W_{in} - ksize[2] + 2 * paddings[2]) / strides[2] + 1 + $$ + )DOC"); } } // namespace operators diff --git a/paddle/operators/pool_with_index_op.cc b/paddle/operators/pool_with_index_op.cc index 95e896e7cc33b1aebe78d1af8746a25318048041..a31b3fcb7083ba5bfbcb6666c4641833ae1e69eb 100644 --- a/paddle/operators/pool_with_index_op.cc +++ b/paddle/operators/pool_with_index_op.cc @@ -89,64 +89,73 @@ class MaxPool2dWithIndexOpMaker : public framework::OpProtoAndCheckerMaker { : OpProtoAndCheckerMaker(proto, op_checker) { AddInput( "X", - "(Tensor), the input tensor of pooling operator. " - "The format of input tensor is NCHW. Where N is batch size, C is the " - "number of channels, H and W is the height and width of image."); + "(Tensor) The input tensor of pooling operator. " + "The format of input tensor is NCHW, where N is batch size, C is the " + "number of channels, H is the height of the image, " + "and W is the width of the image."); AddOutput("Out", - "(Tensor), the output tensor of pooling operator." - "The format of output tensor is also NCHW." - "Where N is batch size, C is " - "the number of channels, H and W is the height and " - "width of image."); + "(Tensor) The output tensor of pooling operator. " + "The format of output tensor is also NCHW, " + "where N is batch size, C is " + "the number of channels, H is the height of the image " + "and W is the width of the image."); AddOutput("Mask", - "(Tensor), the Mask tensor of pooling operator." - "The format of output tensor is also NCHW." - "Where N is batch size, C is the number of channels, H and W " - "is the height and width of image." - "The value in it is the index in current feature map"); + "(Tensor) The Mask tensor of pooling operator." + "The format of output tensor is also NCHW, " + "where N is batch size, C is the number of channels, " + "H is the height of the image, " + "and W is the width of the image. " + "It represents the index in the current feature map."); AddAttr>("ksize", - "(vector ), the pooling window size(height, " - "width) of pooling operator." + "(vector) The pooling window size(height, " + "width) of pooling operator. " "If globalPooling = true, ksize and paddings " "will be ignored."); // TODO(Chengduo): Add // checker. (Currently, // TypedAttrChecker don't support vector type.) AddAttr( "globalPooling", - "(bool default: false), whether to use the global pooling." + "(bool, default false) Whether to use the global pooling. " "If globalPooling = true, ksize and paddings will be ignored.") .SetDefault(false); - AddAttr>( - "strides", - "(vector, default:{1, 1}), strides(height, width) of pooling operator.") + AddAttr>("strides", + "(vector, default {1, 1}), strides(height, " + "width) of pooling operator.") .SetDefault({1, 1}); // TODO(Chengduo): Add checker. (Currently, // TypedAttrChecker don't support vector type.) AddAttr>( "paddings", - "(vector defalut:{0, 0}), paddings(height, width) of pooling operator." + "(vector, defalut {0, 0}), paddings(height, width) of pooling " + "operator. " "If globalPooling = true, paddings and will be ignored.") .SetDefault({0, 0}); // TODO(Chengduo): Add checker. (Currently, // TypedAttrChecker don't support vector type.) AddComment(R"DOC( +MaxPool2d Operator. + The maxPooling2d with index operation calculates the output and the mask -based on the input and ksize, strides, paddings parameters. Input(X) and -output(Out, Mask) are in NCHW format. Where N is batch size, C is the -number of channels, H and W is the height and width of feature. +based on the input, ksize, strides, and paddings parameters. Input(X) and +output(Out, Mask) are in NCHW format, where N is batch size, C is the +number of channels, H is the height of the feature, +and W is the width of the feature. Parameters(ksize, strides, paddings) are two elements. These two elements represent height and width, respectively. The input(X) size and output(Out, Mask) size may be different. Example: Input: - X shape: (N, C, H_in, W_in) + X shape: $(N, C, H_{in}, W_{in})$ Output: - Out shape: (N, C, H_out, W_out) - Mask shape: (N, C, H_out, W_out) + Out shape: $(N, C, H_{out}, W_{out})$ + Mask shape: $(N, C, H_{out}, W_{out})$ where - H_out = (H_in - ksize[0] + 2 * paddings[0]) / strides[0] + 1; - W_out = (W_in - ksize[1] + 2 * paddings[1]) / strides[1] + 1; + $$ + H_{out} = (H_{in} - ksize[0] + 2 * paddings[0]) / strides[0] + 1 \\ + W_{out} = (W_{in} - ksize[1] + 2 * paddings[1]) / strides[1] + 1 + $$ + )DOC"); } }; @@ -156,70 +165,76 @@ class MaxPool3dWithIndexOpMaker : public framework::OpProtoAndCheckerMaker { MaxPool3dWithIndexOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput( - "X", - "(Tensor), the input tensor of pooling operator. " - "The format of input tensor is NCDHW. Where N is batch size, C is " - "the number of channels, D, H and W is the depth, height and width of " - "image."); + AddInput("X", + "(Tensor) The input tensor of pooling operator. " + "The format of input tensor is NCDHW, where N is batch size, C is " + "the number of channels, and D, H and W are the depth, height and " + "width of " + "the image, respectively"); AddOutput("Out", - "(Tensor), the output tensor of pooling operator." - "The format of output tensor is also NCDHW." - "Where N is batch size, C is " - "the number of channels, D, H and W is the depth, height and " - "width of image."); + "(Tensor) The output tensor of pooling operator. " + "The format of output tensor is also NCDHW, " + "where N is the batch size, C is the number of channels, " + "and D, H and W are the depth, height and " + "width of the image, respectively."); AddOutput("Mask", - "(Tensor), the Mask tensor of pooling operator." - "The format of output tensor is also NCDHW." - "Where N is batch size, C is the number of channels, D, H and W " - "is the depth, height and width of image." - "The value in it is the index in current feature map"); + "(Tensor) The Mask tensor of pooling operator. " + "The format of output tensor is also NCDHW, " + "where N is the batch size, C is the number of channels, and " + "D, H and W are the depth, height and width " + "of the image, respectively. " + "It represents the index in the current feature map."); AddAttr>("ksize", - "(vector), the pooling window size(depth, " - "height, width) of pooling " - "operator." + "(vector) The pooling window size(depth, " + "height, width) of pooling operator. " "If globalPooling = true, ksize and paddings " "will be ignored."); // TODO(Chengduo): Add // checker. (Currently, // TypedAttrChecker don't support vector type.) AddAttr( "globalPooling", - "(bool default: false), whether to use the global pooling." + "(bool, default false) Whether to use the global pooling. " "If globalPooling = true, ksize and paddings will be ignored.") .SetDefault(false); AddAttr>("strides", - "(vector, default:{1,1,1}), strides(depth, " + "(vector, default {1,1,1}), strides(depth, " "height, width) of pooling operator.") .SetDefault({1, 1, 1}); // TODO(Chengduo): Add checker. (Currently, // TypedAttrChecker don't support vector type.) AddAttr>( "paddings", - "(vector defalut:{0,0,0}), paddings(depth, " - "height, width) of pooling operator." + "(vector, defalut {0,0,0}), paddings(depth, " + "height, width) of pooling operator. " "If globalPooling = true, paddings and ksize will be ignored.") .SetDefault({0, 0, 0}); // TODO(Chengduo): Add checker. (Currently, // TypedAttrChecker don't support vector type.) AddComment(R"DOC( +MaxPool3d Operator. + The maxpooling3d with index operation calculates the output and the mask based on the input and ksize, strides, paddings parameters. -Input(X) and output(Out, Mask) are in NCDHW format. Where N is batch -size, C is the number of channels, D, H and W is the depth, height and -width of feature. Parameters(ksize, strides, paddings) are three elements. +Input(X) and output(Out, Mask) are in NCDHW format, where N is batch +size, C is the number of channels, and D, H and W are the depth, height and +width of the feature, respectively. +Parameters(ksize, strides, paddings) are three elements. These three elements represent depth, height and width, respectively. The input(X) size and output(Out, Mask) size may be different. Example: Input: - X shape: (N, C, D_in, H_in, W_in) + X shape: $(N, C, D_{in}, H_{in}, W_{in})$ Output: - Out shape: (N, C, D_out, H_out, W_out) - Mask shape: (N, C, D_out, H_out, W_out) + Out shape: $(N, C, D_{out}, H_{out}, W_{out})$ + Mask shape: $(N, C, D_{out}, H_{out}, W_{out})$ where - D_out = (D_in - ksize[0] + 2 * paddings[0]) / strides[0] + 1; - H_out = (H_in - ksize[1] + 2 * paddings[1]) / strides[1] + 1; - W_out = (W_in - ksize[2] + 2 * paddings[2]) / strides[2] + 1; + $$ + D_{out} = (D_{in} - ksize[0] + 2 * paddings[0]) / strides[0] + 1 \\ + H_{out} = (H_{in} - ksize[1] + 2 * paddings[1]) / strides[1] + 1 \\ + W_{out} = (W_{in} - ksize[2] + 2 * paddings[2]) / strides[2] + 1 + $$ + )DOC"); } }; diff --git a/paddle/operators/positive_negative_pair_op.cc b/paddle/operators/positive_negative_pair_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..afbb63cc6053e2968750c163f9ac194d0f5b93e0 --- /dev/null +++ b/paddle/operators/positive_negative_pair_op.cc @@ -0,0 +1,177 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/operators/positive_negative_pair_op.h" + +namespace paddle { +namespace operators { + +class PositiveNegativePairOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + void InferShape(framework::InferShapeContext *ctx) const override { + PADDLE_ENFORCE( + ctx->HasInput("Score"), + "Input(Score) of PositiveNegativePairOp should not be null."); + PADDLE_ENFORCE( + ctx->HasInput("Label"), + "Input(Label) of PositiveNegativePairOp should not be null."); + PADDLE_ENFORCE( + ctx->HasInput("QueryID"), + "Input(QueryID) of PositiveNegativePairOp should not be null."); + PADDLE_ENFORCE( + ctx->HasOutput("PositivePair"), + "Output(PositivePair) of PositiveNegativePairOp should not be null."); + PADDLE_ENFORCE( + ctx->HasOutput("NegativePair"), + "Output(NegativePair) of PositiveNegativePairOp should not be null."); + PADDLE_ENFORCE( + ctx->HasOutput("NeutralPair"), + "Output(NeutralPair) of PositiveNegativePairOp should not be null."); + auto scalar_dim = framework::make_ddim({1}); + if (ctx->HasInput("AccumulatePositivePair") || + ctx->HasInput("AccumulateNegativePair") || + ctx->HasInput("AccumulateNeutralPair")) { + PADDLE_ENFORCE(ctx->HasInput("AccumulatePositivePair") && + ctx->HasInput("AccumulateNegativePair") && + ctx->HasInput("AccumulateNeutralPair"), + "All optional inputs(AccumulatePositivePair, " + "AccumulateNegativePair, AccumulateNeutralPair) of " + "PositiveNegativePairOp are required if one of them is " + "specified."); + PADDLE_ENFORCE_EQ(ctx->GetInputDim("AccumulatePositivePair"), scalar_dim, + "Shape of AccumulatePositivePair should be {1}."); + PADDLE_ENFORCE_EQ(ctx->GetInputDim("AccumulateNegativePair"), scalar_dim, + "Shape of AccumulateNegativePair should be {1}."); + PADDLE_ENFORCE_EQ(ctx->GetInputDim("AccumulateNeutralPair"), scalar_dim, + "Shape of AccumulateNeutralPair should be {1}."); + } + + auto score_dim = ctx->GetInputDim("Score"); + auto label_dim = ctx->GetInputDim("Label"); + auto query_dim = ctx->GetInputDim("QueryID"); + PADDLE_ENFORCE_EQ(score_dim.size(), 2, "Score should be a 2-D tensor."); + PADDLE_ENFORCE_EQ(label_dim.size(), 2, "Label should be a 2-D tensor."); + PADDLE_ENFORCE_EQ( + label_dim[0], score_dim[0], + "Tensor Score and Label should have the same height (batch size)."); + PADDLE_ENFORCE_EQ(label_dim[1], 1, + "The width of Label should be 1, i.e. each item should " + "have a scalar label."); + PADDLE_ENFORCE(query_dim == label_dim, + "QueryID should have the same shape as Label."); + if (ctx->HasInput("Weight")) { + PADDLE_ENFORCE(ctx->GetInputDim("Weight") == label_dim, + "Weight should have the same shape as Label."); + } + int column = ctx->Attrs().Get("column"); + auto depth = score_dim[1]; + PADDLE_ENFORCE(column < depth && column >= -depth, + "Attribute column should be in the range of [-%l, %l)", + depth, depth); + + ctx->SetOutputDim("PositivePair", scalar_dim); + ctx->SetOutputDim("NegativePair", scalar_dim); + ctx->SetOutputDim("NeutralPair", scalar_dim); + } + + protected: + framework::DataType IndicateDataType( + const framework::ExecutionContext &ctx) const override { + return framework::ToDataType(ctx.Input("Score")->type()); + } +}; + +class PositiveNegativePairOpMaker : public framework::OpProtoAndCheckerMaker { + public: + PositiveNegativePairOpMaker(framework::OpProto *proto, + framework::OpAttrChecker *op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("Score", + "(Tensor, float) Model Score on an item (with " + "respect to QueryID). It's a 2-D tensor with shape [batch_size, " + "depth], where the column specified by the attribute \"column\" " + "is used as item score."); + AddInput("Label", + "(Tensor, float) Label of an item (with repsect to " + "QueryId). It's a 2-D tensor with shape [batch_size, 1]."); + AddInput("QueryID", + "(Tensor, int64) Query ID that indicates the context. Its shape " + "should be the same as Label."); + AddInput( + "AccumulatePositivePair", + "(float) Optional. The accumulated number of positive pairs over a " + "stream of data. If provided, the output PositivePair will be " + "initialized with this number rather than 0. it won't be modified " + "in place.") + .AsDispensable(); + AddInput( + "AccumulateNegativePair", + "(float) Optional. The accumulated number of negative pairs over a " + "stream of data. If provided, the output NegativePair will be " + "initialized with this number rather than 0. it won't be modified " + "in place.") + .AsDispensable(); + AddInput("AccumulateNeutralPair", + "(float) Optional. The accumulated number of neutral pairs over a " + "stream of data. If provided, the output NeutralPair will be " + "initialized with this number rather than 0. it won't be modified " + "in place.") + .AsDispensable(); + AddInput("Weight", + "(float) Optional. Weight of current item. If specified, its " + "shape should be the same as Label, and the meaning of the output " + "changes from numbers of pairs to the total sum of pairs' " + "weights. Weight of a pair of items is the average of their " + "weights.") + .AsDispensable(); + AddOutput("PositivePair", + "(float) Number of positive pairs, i.e. the pairs of " + "items that are ranked correctly."); + AddOutput("NegativePair", + "(float) Number of negative pairs, i.e. the pairs of " + "items that are ranked incorrectly."); + AddOutput("NeutralPair", + "(float) Number of neutral pairs, i.e. the pairs of items " + "that have the same score.") + .AsDispensable(); + AddAttr( + "column", + "(int, default -1) The column position of Score used to rank items in " + "descending order. It must be in the range of [-rank(Score), " + "rank(Score)). " + "If `dim < 0`, the dim to reduce is `rank + dim`. " + "Noting that reducing on the first dim will make the LoD info lost.") + .SetDefault(0); + AddComment(R"DOC( + PositiveNegativePairOp can be used to evaluate Learning To Rank(LTR) + model performance. + Within some context, e.g. the "query", a LTR model generates scores + for a list of items, which gives a partial order of the items. + PositiveNegativePairOp takes a list of reference rank order + (Input("Label")) and the model generated scores (Input(Score)) as + inputs and counts the pairs that ranked correctly and incorrectly. +)DOC"); + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OP_WITHOUT_GRADIENT(positive_negative_pair, + ops::PositiveNegativePairOp, + ops::PositiveNegativePairOpMaker); +REGISTER_OP_CPU_KERNEL( + positive_negative_pair, + ops::PositiveNegativePairKernel, + ops::PositiveNegativePairKernel); diff --git a/paddle/operators/positive_negative_pair_op.h b/paddle/operators/positive_negative_pair_op.h new file mode 100644 index 0000000000000000000000000000000000000000..2efd3777e04c17b27c07bccde524de5785af35fe --- /dev/null +++ b/paddle/operators/positive_negative_pair_op.h @@ -0,0 +1,114 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once +#include +#include +#include "paddle/framework/eigen.h" +#include "paddle/framework/op_registry.h" +#include "paddle/utils/Logging.h" + +namespace paddle { +namespace operators { + +using Tensor = framework::Tensor; +using LoDTensor = framework::LoDTensor; + +template +class PositiveNegativePairKernel : public framework::OpKernel { + public: + struct PredictionResult { + PredictionResult(T score, T label, T weight) + : score(score), label(label), weight(weight) {} + T score; + T label; + T weight; + }; + + void Compute(const framework::ExecutionContext& context) const override { + auto score_t = context.Input("Score"); + auto label_t = context.Input("Label"); + auto query_t = context.Input("QueryID"); + auto acc_positive_t = context.Input("AccumulatePositivePair"); + auto acc_negative_t = context.Input("AccumulateNegativePair"); + auto acc_neutral_t = context.Input("AccumulateNeutralPair"); + auto positive_t = context.Output("PositivePair"); + auto negative_t = context.Output("NegativePair"); + auto neutral_t = context.Output("NeutralPair"); + auto weight_t = context.Input("Weight"); + + auto score = score_t->data(); + auto label = label_t->data(); + auto query = query_t->data(); + const T* weight = nullptr; + if (weight_t != nullptr) { + weight = weight_t->data(); + } + T* positive = positive_t->mutable_data(context.GetPlace()); + T* negative = negative_t->mutable_data(context.GetPlace()); + T* neutral = neutral_t->mutable_data(context.GetPlace()); + + auto score_dim = score_t->dims(); + auto batch_size = score_dim[0]; + auto width = score_dim[1]; + auto column = context.Attr("column"); + if (column < 0) { + column += width; + } + + // construct document instances for each query: Query => List[, ...] + std::unordered_map> predictions; + for (auto i = 0; i < batch_size; ++i) { + if (predictions.find(query[i]) == predictions.end()) { + predictions.emplace( + std::make_pair(query[i], std::vector())); + } + predictions[query[i]].emplace_back(score[i * width + column], label[i], + weight_t != nullptr ? weight[i] : 1.0); + } + + // for each query, accumulate pair counts + T pos = 0, neg = 0, neu = 0; + if (acc_positive_t != nullptr && acc_negative_t != nullptr && + acc_neutral_t != nullptr) { + pos = acc_positive_t->data()[0]; + neg = acc_negative_t->data()[0]; + neu = acc_neutral_t->data()[0]; + } + auto evaluate_one_list = [&pos, &neg, + &neu](std::vector vec) { + for (auto ite1 = vec.begin(); ite1 != vec.end(); ++ite1) { + for (auto ite2 = ite1 + 1; ite2 != vec.end(); ++ite2) { + if (ite1->label == ite2->label) { // labels are equal, ignore. + continue; + } + T w = (ite1->weight + ite2->weight) * 0.5; + if (ite1->score == ite2->score) { + neu += w; + } + (ite1->score - ite2->score) * (ite1->label - ite2->label) > 0.0 + ? pos += w + : neg += w; + } + } + }; + for (auto prediction : predictions) { + evaluate_one_list(prediction.second); + } + *positive = pos; + *negative = neg; + *neutral = neu; + } +}; + +} // namespace operators +} // namespace paddle diff --git a/paddle/operators/precision_recall_op.cc b/paddle/operators/precision_recall_op.cc index 39da1e0bf89ce308de62d38a6cce6dbd4c7c7f83..641f7135ded159b1c7330e87c4b1d983e959b466 100644 --- a/paddle/operators/precision_recall_op.cc +++ b/paddle/operators/precision_recall_op.cc @@ -92,76 +92,78 @@ class PrecisionRecallOpMaker : public framework::OpProtoAndCheckerMaker { framework::OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("MaxProbs", - "(Tensor, default Tensor), a 2-D tensor with shape N x 1, " + "(Tensor, default Tensor) A 2-D tensor with shape N x 1, " "where N is the batch size. Each row contains the max probability " "of an instance which computed by the previous top_k (k=1) " "operator."); AddInput("Indices", - "(Tensor, default Tensor), a 2-D tensor with shape N x 1, " + "(Tensor, default Tensor) A 2-D tensor with shape N x 1, " "where N is the batch size. Each row contains the corresponding " "index which computed by the previous top_k (k=1) operator."); AddInput("Labels", - "(Tensor, default Tensor), a 2-D tensor with shape N x 1, " + "(Tensor, default Tensor) A 2-D tensor with shape N x 1, " "where N is the batch size. Each element is a label and the " "value should be in [0, class_number - 1]."); AddInput("Weights", - "(Tensor, default Tensor), a 2-D tensor with shape N x 1, " + "(Tensor, default Tensor) A 2-D tensor with shape N x 1, " "where N is the batch size. This input is optional. If provided, " "weight of instance would be considered when computing metrics.") .AsDispensable(); AddInput("StatesInfo", - "(Tensor, default Tensor), a 2-D tensor with shape D x 4, " + "(Tensor, default Tensor) A 2-D tensor with shape D x 4, " "where D is the number of classes. This input is optional. If " "provided, current state will be accumulated to this state and " - "the accumulation state will be as the output state.") + "the accumulation state will be the output state.") .AsDispensable(); AddOutput("BatchMetrics", - "(Tensor, default Tensor), a 1-D tensor with shape {6}." - "This output tensor contains metrics for current batch data." + "(Tensor, default Tensor) A 1-D tensor with shape {6}. " + "This output tensor contains metrics for current batch data. " "The layout is [macro average precision, macro average recall, " "macro f1 score, micro average precision, micro average recall, " - "micro f1 score]"); + "micro f1 score]."); AddOutput("AccumMetrics", - "(Tensor, default Tensor), a 1-D tensor with shape {6}." - "This output tensor contains metrics for accumulated data." + "(Tensor, default Tensor) A 1-D tensor with shape {6}. " + "This output tensor contains metrics for accumulated data. " "The layout is [macro average precision, macro average recall, " "macro f1 score, micro average precision, micro average recall, " - "micro f1 score]"); + "micro f1 score]."); AddOutput("AccumStatesInfo", - "(Tensor, default Tensor), a 2-D tensor with shape D x 4, " + "(Tensor, default Tensor) A 2-D tensor with shape D x 4, " "where D is equal to class number. This output tensor contains " "accumulated state variables used to compute metrics. The layout " "for each class is [true positives, false positives, " "true negatives, false negatives]."); - AddAttr("class_number", "Number of classes to be evaluated."); + AddAttr("class_number", "(int) Number of classes to be evaluated."); AddComment(R"DOC( -When given 'Input(Indices)' and 'Input(Labels)', this operator can be used +Precision Recall Operator. + +When given Input(Indices) and Input(Labels), this operator can be used to compute various metrics including: - - macro average precision - - macro average recall - - macro f1 score - - micro average precision - - micro average recall - - micro f1 score +1. macro average precision +2. macro average recall +3. macro f1 score +4. micro average precision +5. micro average recall +6. micro f1 score To compute the above metrics, we need to do statistics for true positives, -false positives and false negatives. Here count of true negatives is not +false positives and false negatives. Here the count of true negatives is not necessary, but counting it may provide potential usage and the cost is -trivial, so the operator also provides count of true negatives. +trivial, so the operator also provides the count of true negatives. We define state as a 2-D tensor with shape [class_number, 4]. Each row of a state contains statistic variables for corresponding class. Layout of each row is: TP(true positives), FP(false positives), TN(true negatives), -FN(false negatives). If 'Input(Weights)' provided, TP, FP, TN, FN will be -calculated by given weight instead of instance count. +FN(false negatives). If Input(Weights) is provided, TP, FP, TN, FN will be +calculated by given weight instead of the instance count. This operator also supports metrics computing for cross-batch situation. To -achieve this, 'Input(StatesInfo)' should be provided. State of current batch -data will be accumulated to 'Input(StatesInfo)' and 'Output(AccumStatesInfo)' +achieve this, Input(StatesInfo) should be provided. State of current batch +data will be accumulated to Input(StatesInfo) and Output(AccumStatesInfo) is the accumulation state. -'Output(BatchMetrics)' is metrics of current batch data while -'Output(AccumStatesInfo)' is metrics of accumulation data. +Output(BatchMetrics) is metrics of current batch data while +Output(AccumStatesInfo) is metrics of accumulation data. )DOC"); } diff --git a/paddle/operators/prelu_op.cc b/paddle/operators/prelu_op.cc index eef2e34eaacf59b9adacb343e9a0091ebabeaea3..055c471b4561e5fd3c7a65c6f81d66cdce1a5578 100644 --- a/paddle/operators/prelu_op.cc +++ b/paddle/operators/prelu_op.cc @@ -41,17 +41,24 @@ class PReluOpMaker : public framework::OpProtoAndCheckerMaker { PReluOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "The input tensor of prelu operator."); - AddInput("Alpha", "The alpha weight of PRelu operator."); - AddOutput("Out", "The output tensor of PRelu operator."); - AddComment(R"DOC(PRelu operator + AddInput("Alpha", "The alpha weight of prelu operator."); + AddOutput("Out", "The output tensor of prelu operator."); + AddComment(R"DOC( +PRelu Operator. The equation is: - f(x) = alpha * x , for x < 0 - f(x) = x , for x >= 0 +$$ +f(x) = +\begin{cases} +\alpha * x, \quad \text{if} \ x < 0 \\ +x, \qquad \text{if} \ x >= 0 +\end{cases} +$$ The input `X` can carry the LoD (Level of Details) information, -or not. And the output shares the LoD with input `X`. +or not. And the output shares the LoD information with input `X`. + )DOC"); } }; diff --git a/paddle/operators/proximal_adagrad_op.cc b/paddle/operators/proximal_adagrad_op.cc index 39fbf800031cd559a49654667e5a6f634384523d..36e460103ab46bf6f1408840a0699793e2be134d 100644 --- a/paddle/operators/proximal_adagrad_op.cc +++ b/paddle/operators/proximal_adagrad_op.cc @@ -83,22 +83,26 @@ class ProximalAdagradOpMaker : public framework::OpProtoAndCheckerMaker { "L1 regularization strength.") .SetDefault(0.0f); AddAttr("l2", - "(float, default 0.0)" + "(float, default 0.0) " "L2 regularization strength.") .SetDefault(0.0f); AddComment(R"DOC( +Proximal Adagrad Optimizer. -Optimizer that implements the proximal adagrad algorithm. +Optimizer that implements the proximal adagrad algorithm: -moment = moment + grad * grad -prox_param = param - learning_rate * grad * (1 / sqrt(moment)) -param = sign(prox_param) / (1 + learning_rate * l2) * - max { |prox_param| - learning_rate * l1 , 0 } +$$ +moment = moment + grad * grad \\ +prox\_param = param - learning\_rate * grad * (1 / \sqrt{moment}) \\ +param = sign(prox\_param) / (1 + learning\_rate * l2) * + \max(|prox\_param| - learning\_rate * l1 , 0) +$$ The paper that proposed Proximal GD: (http://papers.nips.cc/paper/3793-efficient-learning-using-forward-backward-splitting.pdf) Here, we use the adagrad learning rate as specified here: (http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf) + )DOC"); } }; diff --git a/paddle/operators/proximal_gd_op.cc b/paddle/operators/proximal_gd_op.cc index e4b014b9f5866ec0791cba9b3998b1734066eeeb..5693d0ec9ebf4c470dfa5141b6eeee431f24f2ea 100644 --- a/paddle/operators/proximal_gd_op.cc +++ b/paddle/operators/proximal_gd_op.cc @@ -67,19 +67,23 @@ class ProximalGDOpMaker : public framework::OpProtoAndCheckerMaker { "L1 regularization strength.") .SetDefault(0.0f); AddAttr("l2", - "(float, default 0.0)" + "(float, default 0.0) " "L2 regularization strength.") .SetDefault(0.0f); AddComment(R"DOC( +ProximalGD Operator. -Optimizer that implements the proximal gradient descent algorithm. +Optimizer that implements the proximal gradient descent algorithm: -prox_param = param - learning_rate * grad -param = sign(prox_param) / (1 + learning_rate * l2) * - max { |prox_param| - learning_rate * l1 , 0 } +$$ +prox\_param = param - learning\_rate * grad \\ +param = sign(prox\_param) / (1 + learning\_rate * l2) * + \max(|prox\_param| - learning\_rate * l1, 0) +$$ The paper that proposed Proximal Gradient Descent: (http://papers.nips.cc/paper/3793-efficient-learning-using-forward-backward-splitting.pdf) + )DOC"); } }; diff --git a/paddle/operators/rank_loss_op.cc b/paddle/operators/rank_loss_op.cc index 17ef2b1d01bd37abf2ece97ed0a307c2f1bf7e6f..061e82412ea5f4f17fd26a7094e68b97138cc09c 100644 --- a/paddle/operators/rank_loss_op.cc +++ b/paddle/operators/rank_loss_op.cc @@ -26,9 +26,9 @@ class RankLossOp : public framework::OperatorWithKernel { void InferShape(framework::InferShapeContext *ctx) const override { // input check - PADDLE_ENFORCE(ctx->HasInput("Label"), "Input(Label) shouldn't be null"); - PADDLE_ENFORCE(ctx->HasInput("Left"), "Input(Left) shouldn't be null"); - PADDLE_ENFORCE(ctx->HasInput("Right"), "Input(Right) shouldn't be null"); + PADDLE_ENFORCE(ctx->HasInput("Label"), "Input(Label) shouldn't be null."); + PADDLE_ENFORCE(ctx->HasInput("Left"), "Input(Left) shouldn't be null."); + PADDLE_ENFORCE(ctx->HasInput("Right"), "Input(Right) shouldn't be null."); auto label_dims = ctx->GetInputDim("Label"); auto left_dims = ctx->GetInputDim("Left"); @@ -50,32 +50,32 @@ class RankLossOpMaker : public framework::OpProtoAndCheckerMaker { AddInput("Label", "The label indicating A ranked higher than B or not, row vector."); AddInput("Left", "The output of RankNet for doc A, vector."); - AddInput("Right", "The output of RankNet for doc B, vetor"); + AddInput("Right", "The output of RankNet for doc B, vetor."); AddOutput("Out", "The output loss of RankLoss operator, vector."); - AddComment(R"DOC(RankLoss operator + AddComment(R"DOC( +RankLoss Operator. -Rank loss operator for RankNet[1]. RankNet is a pairwise ranking model with +RankLoss operator for RankNet +(http://icml.cc/2015/wp-content/uploads/2015/06/icml_ranking.pdf). +RankNet is a pairwise ranking model with one training sample consisting of a pair of doc A and B, and the label P indicating that A is ranked higher than B or not: P = {0, 1} or {0, 0.5, 1}, where 0.5 means no information about the rank of the input pair. -The RankLoss operator contains three inputs: Left (o_i), Right (o_j) and Label -(P_{i,j}), which represent the output of RankNet for two docs and the label -respectively, and yields the rank loss C_{i,j} by following the expression +The RankLoss operator takes three inputs: Left (o_i), Right (o_j) and Label +(P_{i,j}), which represent the output of RankNet for the two docs and the label, +respectively, and yields the rank loss C_{i,j} using the following equation: -\f[ +\f$$ C_{i,j} = -\tilde{P_{ij}} * o_{i,j} + log(1 + e^{o_{i,j}}) \\ o_{i,j} = o_i - o_j \\ \tilde{P_{i,j}} = \left \{0, 0.5, 1 \right \} \ or \ \left \{0, 1 \right \} -\f] +\f$$ The operator can take inputs of one sample or in batch. -[1]. Chris Burges, Tal Shaked, Erin Renshaw, et al. Learning to - Rank using Gradient Descent. - http://icml.cc/2015/wp-content/uploads/2015/06/icml_ranking.pdf )DOC"); } }; diff --git a/paddle/operators/recurrent_op.cc b/paddle/operators/recurrent_op.cc index 9eb2d79b4f65d23222e68ad2a439f7554469278b..b0e87b7059eab3772c179fe31cdb09477b589ed1 100644 --- a/paddle/operators/recurrent_op.cc +++ b/paddle/operators/recurrent_op.cc @@ -509,14 +509,14 @@ class RecurrentOpProtoMaker : public framework::OpProtoAndCheckerMaker { AddInput(kInitialStates, "rnn initial states").AsDuplicable(); AddInput(kParameters, "Parameters are used by step block as its input. However, the " - "inputs is not a sequence tensor. Every time step, each operator " - "in step block just use the parameter directly") + "input is not a sequence tensor. Every time step, each operator " + "in step block just use the parameter directly.") .AsDuplicable(); AddOutput(kOutputs, - "The output sequence of RNN. The sequence length must be same") + "The output sequence of RNN. The sequence length must be same.") .AsDuplicable(); AddOutput(kStepScopes, - "StepScopes contains all local variables in each time step."); + "StepScopes contain all local variables in each time step."); AddAttr>(kExStates, string::Sprintf( R"DOC(The ex-state variable names. @@ -556,10 +556,12 @@ if reverse is True o o o o )DOC").SetDefault(false); AddAttr(kIsTrain, "").SetDefault(true); - AddComment(R"DOC(Static Length Recurrent Operator + AddComment(R"DOC( +Static Length Recurrent Operator. + +The static length recurrent operator can only operate on fixed size sequence +data, i.e. in each mini-batch, the sequence length of all inputs are the same. -The static length recurrent operator can only operate on fix sized sequence -data, i.e. in each mini-batch, the sequence length of all inputs are same. )DOC"); } }; diff --git a/paddle/operators/reduce_op.cc b/paddle/operators/reduce_op.cc index 0599daa7688a5658ebea8902c4e15e63570539fb..2589a54cfc7fc5bc11ae983797d480a134e0eb25 100644 --- a/paddle/operators/reduce_op.cc +++ b/paddle/operators/reduce_op.cc @@ -80,24 +80,27 @@ class ReduceOpMaker : public framework::OpProtoAndCheckerMaker { public: ReduceOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput( - "X", - "(Tensor) The input tensor. Tensors with rank at most 6 are supported"); + AddInput("X", + "(Tensor) The input tensor. Tensors with rank at most 6 are " + "supported."); AddOutput("Out", "(Tensor) The result tensor."); AddAttr( "dim", - "(int, default 1) The dimension to reduce. " + "(int, default 0) The dimension to reduce. " "Must be in the range [-rank(input), rank(input)). " "If `dim < 0`, the dim to reduce is `rank + dim`. " - "Noting that reducing on the first dim will make the LoD info lost.") + "Note that reducing on the first dim will make the LoD info lost.") .SetDefault(0); AddAttr("keep_dim", "(bool, default false) " "If true, retain the reduced dimension with length 1.") .SetDefault(false); comment_ = R"DOC( -{ReduceOP} operator computes the {reduce} of input tensor along the given dimension. -The result tensor has 1 fewer dimension than the input unless `keep_dim` is true. +{ReduceOp} Operator. + +This operator computes the {reduce} of input tensor along the given dimension. +The result tensor has 1 fewer dimension than the input unless keep_dim is true. + )DOC"; AddComment(comment_); } diff --git a/paddle/operators/reshape_op.cc b/paddle/operators/reshape_op.cc index 9213cc7a85822e4c78ef72aec2bf86d2edac023a..ba774ec2160c0460867de42f7ad9d5cd65ad8d6a 100644 --- a/paddle/operators/reshape_op.cc +++ b/paddle/operators/reshape_op.cc @@ -71,8 +71,11 @@ class ReshapeOpMaker : public framework::OpProtoAndCheckerMaker { : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "The input tensor of reshape operator."); AddOutput("Out", "The output tensor of reshape operator."); - AddAttr>("shape", "Target shape of reshape operator."); - AddComment(R"DOC(Reshape operator + AddAttr>("shape", + "(vector) " + "Target shape of reshape operator."); + AddComment(R"DOC( +Reshape Operator. Reshape Input(X) into the shape specified by Attr(shape). @@ -81,7 +84,7 @@ Given a 2-D tensor X with 2 rows and 2 columns [[1, 2], [3, 4]] -with target shape = [1, 4], the reshape operator will transform +and target shape = [1, 4], the reshape operator will transform the tensor X into a 1-D tensor: [1, 2, 3, 4] diff --git a/paddle/operators/rmsprop_op.cc b/paddle/operators/rmsprop_op.cc index fd5567a365c4c843de3b8aec7fa77164f16644a4..a9c45f639c6728ff2fd6de6fcdadfe5032a705d7 100644 --- a/paddle/operators/rmsprop_op.cc +++ b/paddle/operators/rmsprop_op.cc @@ -68,22 +68,22 @@ class RmspropOpMaker : public framework::OpProtoAndCheckerMaker { : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("Param", "(Tensor, default Tensor) " - "Input parameter value that has to be updated"); + "Input parameter value that has to be updated."); AddInput("MeanSquare", "(Tensor, default Tensor)" - " The mean square value that gets updated"); + " The mean square value that gets updated."); AddInput("LearningRate", "(Tensor, default Tensor) " - "The learning rate should be a tensor of size 1"); + "The learning rate should be a tensor of size 1."); AddInput("Grad", "(Tensor, default Tensor) " - "Input gradient of the parameter"); + "Input gradient of the parameter."); AddInput("Moment", - "(Tensor, default Tensor) The moment that gets updated"); + "(Tensor, default Tensor) The moment that gets updated."); - AddOutput("ParamOut", "(Tensor) Output updated parameter value"); - AddOutput("MomentOut", "(Tensor) Output updated moment"); - AddOutput("MeanSquareOut", "(Tensor) Output Mean squared updated value"); + AddOutput("ParamOut", "(Tensor) Output updated parameter value."); + AddOutput("MomentOut", "(Tensor) Output updated moment."); + AddOutput("MeanSquareOut", "(Tensor) Output Mean squared updated value."); AddAttr("epsilon", "(float, default 1e-10) Constant " @@ -93,18 +93,19 @@ class RmspropOpMaker : public framework::OpProtoAndCheckerMaker { "(float, default 0.9) " "Discounting factor for coming gradient.") .SetDefault(0.9f); - AddAttr("momentum", "(float, default 0.0) Constant value") + AddAttr("momentum", "(float, default 0.0) Constant value.") .SetDefault(0.0f); AddComment(R"DOC( +Rmsprop Optimizer. -RMSprop - -MeanSquareOut = decay * MeanSquare + (1 - decay) * Grad * Grad +$$ +MeanSquareOut = decay * MeanSquare + (1 - decay) * Grad * Grad \\ MomentOut = momentum * Moment + - LearningRate * Grad / sqrt(MeanSquareOut + epsilon) + \frac{LearningRate * Grad}{\sqrt{MeanSquareOut + epsilon}} \\ ParamOut = Param - MomentOut +$$ -The original slides that proposed RMSprop: Slide 29 of +The original slides that proposed Rmsprop: Slide 29 of http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf) )DOC"); diff --git a/paddle/operators/save_op.cc b/paddle/operators/save_op.cc index 490256dfa1cf9b891713dac264e9260906ce1025..56909fb65f44ad00314103e21bee9535fbd59317 100644 --- a/paddle/operators/save_op.cc +++ b/paddle/operators/save_op.cc @@ -163,14 +163,19 @@ class SaveOpProtoMaker : public framework::OpProtoAndCheckerMaker { SaveOpProtoMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("X", "The tensor need to be saved"); - AddComment(R"DOC(Save operator -Save operator will serialize and write a tensor variable to disk file. + AddInput("X", "(Tensor ) Input tensor to be saved"); + AddComment(R"DOC( +Save operator + +This operator will serialize and write a tensor variable to file on disk. )DOC"); - AddAttr("overwrite", "Overwrite the output file if exist") + AddAttr("overwrite", + "(boolean, default true)" + "Overwrite the output file if exist") .SetDefault(true); AddAttr("file_path", - "Variable will be saved to \"file_path\".") + "(string)" + "The \"file_path\" where the variable will be saved.") .AddCustomChecker( [](const std::string &path) { return !path.empty(); }); } diff --git a/paddle/operators/scale_op.cc b/paddle/operators/scale_op.cc index 5fcacf70d80527b4580a8f744ab3b79fb301d1d9..5745580504fb9bda551f21665bff5c65ae82aeb9 100644 --- a/paddle/operators/scale_op.cc +++ b/paddle/operators/scale_op.cc @@ -40,13 +40,16 @@ class ScaleOpMaker : public framework::OpProtoAndCheckerMaker { public: ScaleOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("X", "The input tensor of scale operator."); - AddOutput("Out", "The output tensor of scale operator."); - AddComment(R"DOC(Scale operator + AddInput("X", "(Tensor) Input tensor of scale operator."); + AddOutput("Out", "(Tensor) Output tensor of scale operator."); + AddComment(R"DOC( +Scale operator -The equation is: Out = scale*X +$$Out = scale*X$$ )DOC"); - AddAttr("scale", "The scaling factor of the scale operator.") + AddAttr("scale", + "(float, default 0)" + "The scaling factor of the scale operator.") .SetDefault(1.0); } }; diff --git a/paddle/operators/seq_expand_op.cc b/paddle/operators/seq_expand_op.cc index 08fda9b44564249634f0d1a570e8b2458f88fd41..b862056ad400290a60e8a75a23dceeb1d4422ea4 100644 --- a/paddle/operators/seq_expand_op.cc +++ b/paddle/operators/seq_expand_op.cc @@ -53,8 +53,10 @@ class SeqExpandOpMaker : public framework::OpProtoAndCheckerMaker { "(LodTensor)The output of seq_expand op." "The lod of output will be as same as input(Y)'s lod."); AddComment(R"DOC( -Expand input(X) according to LOD of input(Y). +Seq Expand Operator. +This operator expands input(X) according to LOD of input(Y). +Following are cases to better explain how this works: Case 1: Given 2-level a LoDTensor input(X) diff --git a/paddle/operators/seq_expand_op.h b/paddle/operators/seq_expand_op.h index 8703105385183c1a0ee1a1b3831228f942c04dda..4ef0d02cf85c43e95335660be65a67df66b4f55c 100644 --- a/paddle/operators/seq_expand_op.h +++ b/paddle/operators/seq_expand_op.h @@ -32,7 +32,8 @@ class SeqExpandKernel : public framework::OpKernel { const T* x_data = x->data(); auto x_dims = x->dims(); auto* y = context.Input("Y"); - PADDLE_ENFORCE_EQ(x_dims[0], y->lod().back().size() - 1, + PADDLE_ENFORCE_EQ(static_cast(x_dims[0]), + y->lod().back().size() - 1, "The size of last lod level in Input(Y)" "must be equal to dims[0] of Input(X)."); out->set_lod(y->lod()); diff --git a/paddle/operators/sequence_concat_op.cc b/paddle/operators/sequence_concat_op.cc index 46f73e3c279835bbb4bfdd7dede03a5535186b24..64097ef2525d734f79f22ddd7957b3216b06ee7b 100644 --- a/paddle/operators/sequence_concat_op.cc +++ b/paddle/operators/sequence_concat_op.cc @@ -47,19 +47,19 @@ class SequenceConcatOpMaker : public framework::OpProtoAndCheckerMaker { framework::OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", - "(A vector of LoDTensor), the input is a vector of LoDTensor, " + "(vector) Input is a vector of LoDTensor, " "each of which is a variable-length sequence or nested sequence.") .AsDuplicable(); AddOutput("Out", - "(A LoDTensor), the variable-length output of " + "(LoDTensor), Variable-length output of " "sequence_concat Op."); AddAttr("axis", - "(int, default 0)" - "The axis which the inputs will be joined with. " + "(int, default 0) " + "The axis along which the inputs will be joined. " "If axis is 0, the inputs will be joined with LoD index.") .SetDefault(0); AddAttr("level", - "(int, default 0)" + "(int, default 0) " "The level at which the inputs will be joined. " "If the level is 0, the inputs will be joined at the nested " "sequence level. " @@ -68,34 +68,38 @@ class SequenceConcatOpMaker : public framework::OpProtoAndCheckerMaker { "The level should be less than the level number of inputs.") .SetDefault(0); AddComment(R"DOC( - The sequence_concat operator concatenates multiple LoDTensors. - It only supports sequence (LoD Tensor with level number is 1) - or a nested sequence (LoD tensor with level number is 2) as its input. - - Case1: - If the axis is other than 0(here, axis is 1 and level is 1), - each input should have the same LoD information and the LoD - information of the output keeps the same as the input. - - LoD(x0) = {{0,2,4}, {0,1,2,3,4}}; Dims(x0) = (4,3,4) - LoD(x1) = {{0,2,4}, {0,1,2,3,4}}; Dims(x1) = (4,4,4) - LoD(Out) = {{0,2,4}, {0,1,2,3,4}}; Dims(Out) = (4,7,4) - - - Case2: - If the axis is 0(here, leve is 0), the inputs are concatenated along - time steps, the LoD information of the output need to re-compute. - - LoD(x0) = {{0,2,4}, {0,1,2,3,4}}; Dims(x0) = (4,3,4) - LoD(x1) = {{0,3,5}, {0,1,2,3,5}}; Dims(x1) = (5,3,4) - LoD(Out) = {{0,5,9}, {0,1,2,3,4,5,6,7,9}}; Dims(Out) = (9,3,4) - - - Case3: - If the axis is 0(here, level is 1). - - LoD(x0) = {{0,2,4}, {0,1,2,3,4}}; Dims(x0) = (4,3,4) - LoD(x1) = {{0,3,5}, {0,1,3,4,5}}; Dims(x1) = (5,3,4) - LoD(Out) = {{0,5,9}, {0,2,5,7,9}}; Dims(Out) = (9,3,4) - - NOTE: The levels of all the inputs should be the same. +Sequence Concat Operator. + +The sequence_concat operator concatenates multiple LoDTensors. +It supports a sequence (LoD Tensor with level number is 1) +or a nested sequence (LoD tensor with level number is 2) as its input. +The following examples explain how the operator works: +- Case1: + If the axis is other than 0(here, axis is 1 and level is 1), + each input should have the same LoD information and the LoD + information of the output keeps the same as the input. + + LoD(x0) = {{0,2,4}, {0,1,2,3,4}}; Dims(x0) = (4,3,4) + LoD(x1) = {{0,2,4}, {0,1,2,3,4}}; Dims(x1) = (4,4,4) + LoD(Out) = {{0,2,4}, {0,1,2,3,4}}; Dims(Out) = (4,7,4) + +- Case2: + If the axis is 0(here, leve is 0), the inputs are concatenated along + time steps, the LoD information of the output need to re-compute. + + LoD(x0) = {{0,2,4}, {0,1,2,3,4}}; Dims(x0) = (4,3,4) + LoD(x1) = {{0,3,5}, {0,1,2,3,5}}; Dims(x1) = (5,3,4) + LoD(Out) = {{0,5,9}, {0,1,2,3,4,5,6,7,9}}; Dims(Out) = (9,3,4) + +- Case3: + If the axis is 0(here, level is 1). + + LoD(x0) = {{0,2,4}, {0,1,2,3,4}}; Dims(x0) = (4,3,4) + LoD(x1) = {{0,3,5}, {0,1,3,4,5}}; Dims(x1) = (5,3,4) + LoD(Out) = {{0,5,9}, {0,2,5,7,9}}; Dims(Out) = (9,3,4) + +NOTE: The levels of all the inputs should be the same. + )DOC"); } }; diff --git a/paddle/operators/sequence_conv_op.cc b/paddle/operators/sequence_conv_op.cc index a3f2ed14439572e9723c3057d212bb773b2a4e44..41cadce4c603a9c14db79e2f6b30f8664cf72a38 100644 --- a/paddle/operators/sequence_conv_op.cc +++ b/paddle/operators/sequence_conv_op.cc @@ -105,10 +105,10 @@ class SequenceConvOpMaker : public framework::OpProtoAndCheckerMaker { : OpProtoAndCheckerMaker(proto, op_checker) { AddInput( "X", - "(LoDTensor) the input(X) is a LodTensor, which support " + "(LoDTensor) the input(X) is a LodTensor, which supports " "variable-time length input sequence. The underlying tensor in " - "this LoDTensor is a matrix with shape (T, N), where, T is the " - "total time steps in this mini-batch, N is the input_hidden_size."); + "this LoDTensor is a matrix with shape (T, N), where T is the " + "total time steps in this mini-batch and N is the input_hidden_size."); AddInput("PaddingData", "(Tensor, optional) the input(PaddingData) is an optional " "parameter, and it is learnable. " @@ -157,14 +157,16 @@ class SequenceConvOpMaker : public framework::OpProtoAndCheckerMaker { .GreaterThan(0); AddComment(R"DOC( - SequenceConvOp performs convolution operation on features of - contextLength time-steps of each instance. - The convolution operation calculates the output based on the input, filter - and strides, paddings parameters. The size of each dimension of the - parameters is checked in the infer-shape. In order to ensure the equal - length of sequence before and after convolution, it is necessary to fill - the top and bottom of each sequence according to context_length, - context_stride and context_start. +Sequence Conv Operator. + +SequenceConvOp performs convolution operation on features of contextLength +time-steps of each instance. The convolution operation calculates the output +based on the input, filter, strides and paddings parameters. +The size of each dimension of the parameters is checked during infer-shape. +In order to ensure the equal length of sequence before and after convolution, +it is necessary to fill the top and bottom of each sequence based on +context_length, context_stride and context_start. + )DOC"); } }; diff --git a/paddle/operators/sequence_pool_op.cc b/paddle/operators/sequence_pool_op.cc index dfe8de49858bffee77249ff745f483fdb08302cc..710f280017fa5e188b187a3e91b27e2bedc65d10 100644 --- a/paddle/operators/sequence_pool_op.cc +++ b/paddle/operators/sequence_pool_op.cc @@ -27,6 +27,11 @@ class SequencePoolOp : public framework::OperatorWithKernel { PADDLE_ENFORCE(ctx->HasOutput("Out"), "Output(Out) of SequencePoolOp should not be null."); ctx->SetOutputDim("Out", ctx->GetInputDim("X")); + if (ctx->Attrs().Get("pooltype") == "MAX") { + PADDLE_ENFORCE(ctx->HasOutput("MaxIndex"), + "Output(MaxIndex) of SequencePoolOp should not be null."); + ctx->SetOutputDim("MaxIndex", ctx->GetInputDim("X")); + } } }; @@ -35,43 +40,50 @@ class SequencePoolOpMaker : public framework::OpProtoAndCheckerMaker { SequencePoolOpMaker(framework::OpProto* proto, framework::OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("X", "(LoDTensor), the variable-length input of SequencePoolOp"); + AddInput("X", "(LoDTensor) The variable-length input of SequencePoolOp"); AddOutput("Out", - "(Tensor), output of SequencePoolOp, which does not contain LoD " + "(Tensor) The output of SequencePoolOp does not contain LoD " "infomation."); + AddOutput("MaxIndex", + "(Tensor) This tensor is used for the sequence max-pooling " + "to record the max indexes.") + .AsIntermediate(); AddAttr( "pooltype", "(int, default AVERAGE) the pooling pooltype of SequencePoolOp.") .SetDefault("AVERAGE") .InEnum({"AVERAGE", "SUM", "SQRT", "LAST", "FIRST", "MAX"}); AddComment(R"DOC( - SequencePoolOp pools features of all time-steps of each instance. - - It supports six pooling pooltype: - - AVERAGE: Out[i] = average_{for each instance in i-th sequence}{X[i]} - - SUM: Out[i] = sum_{for each instance in i-th sequence}{X[i]} - - SQRT: Out[i] = sum_{for each instance in i-th sequence}{X[i]} - / sqrt(i-th sequence length) - - LAST: Out[i] = last instance in i-th sequence X[i] - - FIRST: Out[i] = first instance in i-th sequence X[i] - - MAX: Out[i] = max_{for each instance in i-th sequence}{X[i]} - - For a mini-batch of 3 variable-length sentences, containing 2, 3, and 2 time-steps: - - Assume X is a [7,M,N] LoDTensor, and X->lod()[0] = [0, 2, 5, 7], 7=2+3+2. - Besides, for the sake of simplicity, we assume M=1 and N=1, - and the value of X = [[1, 3], [2, 4, 6], [5, 1]]. - - Thus, Out is a [3,1,1] Tensor without LoD infomation. - And for different pooltype, the value of Out is as follows: - - - AVERAGE: [2, 4, 3], where 2=(1+3)/2, 4=(2+4+6)/3, 3=(5+1)/2 - - SUM: [4, 12, 6], where 4=1+3, 12=2+4+6, 6=5+1 - - SQRT: [2.82, 6.93, 4.24], where 2.82=(1+3)/sqrt(2), +Sequence Pool Operator. + +The SequencePoolOp pools features of all time-steps of each instance. +It supports six pooling types: +1. AVERAGE: Out[i] = $$avg(X_i)$$ +2. SUM: Out[i] = $$\sum_jX_{ij}$$ +3. SQRT: Out[i] = $$\frac{\sum_jX_{ij}}{\sqrt{len(X_i)}}$$ +4. LAST: Out[i] = last instance in i-th sequence X[i] +5. FIRST: Out[i] = first instance in i-th sequence X[i] +6. MAX: Out[i] = $$max(X_i)$$ + +The following example explains how this works: +For a mini-batch of 3 variable-length sentences, +containing 2, 3, and 2 time-steps: + +Assume X is a [7,M,N] LoDTensor, and X->lod()[0] = [0, 2, 5, 7], 7=2+3+2. +Besides, for the sake of simplicity, we assume M=1 and N=1, +and the value of X = [[1, 3], [2, 4, 6], [5, 1]]. + +Thus, Out is a [3,1,1] Tensor without LoD infomation. +And for different pooltype, the value of Out is as follows: + +- AVERAGE: [2, 4, 3], where 2=(1+3)/2, 4=(2+4+6)/3, 3=(5+1)/2 +- SUM: [4, 12, 6], where 4=1+3, 12=2+4+6, 6=5+1 +- SQRT: [2.82, 6.93, 4.24], where 2.82=(1+3)/sqrt(2), 6.93=(2+4+6)/sqrt(3), 4.24=(5+1)/sqrt(2) - - MAX: [3, 6, 5], where 3=max(1,3), 6=max(2,4,6), 5=max(5,1) - - LAST: [3, 6, 1], where 3=last(1,3), 6=last(2,4,6), 1=last(5,1) - - FIRST: [1, 2, 5], where 1=first(1,3), 2=first(2,4,6), 5=first(5,1) +- MAX: [3, 6, 5], where 3=max(1,3), 6=max(2,4,6), 5=max(5,1) +- LAST: [3, 6, 1], where 3=last(1,3), 6=last(2,4,6), 1=last(5,1) +- FIRST: [1, 2, 5], where 1=first(1,3), 2=first(2,4,6), 5=first(5,1) + )DOC"); } }; @@ -93,6 +105,12 @@ class SequencePoolGradOp : public framework::OperatorWithKernel { } ctx->SetOutputDim(framework::GradVarName("X"), x_dims); } + + protected: + framework::DataType IndicateDataType( + const framework::ExecutionContext& ctx) const override { + return framework::ToDataType(ctx.Input("X")->type()); + } }; } // namespace operators diff --git a/paddle/operators/sequence_pool_op.h b/paddle/operators/sequence_pool_op.h index e0e0493fe0ef7e1963ce5c2e3f37c164a605809b..2b8a25c2414c20efaffedfc8603697b3a104634f 100644 --- a/paddle/operators/sequence_pool_op.h +++ b/paddle/operators/sequence_pool_op.h @@ -16,6 +16,7 @@ limitations under the License. */ #include "paddle/framework/eigen.h" #include "paddle/framework/op_registry.h" #include "paddle/operators/math/math_function.h" +#include "paddle/operators/math/sequence_pooling.h" namespace paddle { namespace operators { @@ -34,7 +35,7 @@ class SequencePoolKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { auto* in = context.Input("X"); - auto* out = context.Output("Out"); + auto* out = context.Output("Out"); std::string pooltype = context.Attr("pooltype"); auto dims = in->dims(); @@ -53,6 +54,16 @@ class SequencePoolKernel : public framework::OpKernel { auto lod_level_0 = lod[0]; out->mutable_data(context.GetPlace()); + + if (pooltype == "MAX") { + math::MaxSeqPoolFunctor max_pool; + auto* index = context.Output("MaxIndex"); + index->Resize({dims}); + index->mutable_data(context.GetPlace()); + max_pool(context.device_context(), *in, out, index); + return; + } + auto place = context.GetEigenDevice(); for (int i = 0; i < static_cast(lod_level_0.size()) - 1; ++i) { Tensor in_t = in->Slice(static_cast(lod_level_0[i]), @@ -69,8 +80,6 @@ class SequencePoolKernel : public framework::OpKernel { } else if (pooltype == "SQRT") { out_e.device(place) = in_e.sum(Eigen::array({{0}})) / std::sqrt(static_cast(h)); - } else if (pooltype == "MAX") { - out_e.device(place) = in_e.maximum(Eigen::array({{0}})); } else if (pooltype == "LAST") { out_e.device(place) = in_e.chip(h - 1, 0); } else if (pooltype == "FIRST") { @@ -87,8 +96,8 @@ class SequencePoolGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { auto* in = context.Input("X"); + auto* out_g = context.Input(framework::GradVarName("Out")); auto* in_g = context.Output(framework::GradVarName("X")); - auto* out_g = context.Input(framework::GradVarName("Out")); std::string pooltype = context.Attr("pooltype"); auto dims = in->dims(); @@ -96,6 +105,14 @@ class SequencePoolGradKernel : public framework::OpKernel { int64_t w = in->numel() / dims[0]; in_g->mutable_data(context.GetPlace()); + + if (pooltype == "MAX") { + math::MaxSeqPoolGradFunctor max_pool_grad; + auto* index = context.Input("MaxIndex"); + max_pool_grad(context.device_context(), *out_g, *index, in_g); + return; + } + if (pooltype == "LAST" || pooltype == "FIRST") { // set X@Grad be zero at first when pooltype is LAST/FIRST math::SetConstant functor; @@ -118,20 +135,6 @@ class SequencePoolGradKernel : public framework::OpKernel { } else if (pooltype == "SQRT") { in_g_e.device(place) = (out_g_e / std::sqrt(static_cast(h))).broadcast(bcast); - } else if (pooltype == "MAX") { - auto in_t = - in->Slice(static_cast(lod[i]), static_cast(lod[i + 1])); - Eigen::Map> - in_t_map(in_t.data(), h, w); - int row_id; - Eigen::array extents{{1, 1}}; - for (int col_id = 0; col_id < w; col_id++) { - in_t_map.col(col_id).maxCoeff(&row_id); - Eigen::array in_offsets{{row_id, col_id}}; - Eigen::array out_offsets{{0, col_id}}; - in_g_e.slice(in_offsets, extents).device(place) = - out_g_e.slice(out_offsets, extents); - } } else if (pooltype == "LAST") { in_g_e.chip(h - 1, 0).device(place) = out_g_e; } else if (pooltype == "FIRST") { diff --git a/paddle/operators/sequence_softmax_op.cc b/paddle/operators/sequence_softmax_op.cc index c891ab1fdcbb167453462c45b00b4632e663dd0e..32c15025660ebf0baf317e269a33c047e6844219 100644 --- a/paddle/operators/sequence_softmax_op.cc +++ b/paddle/operators/sequence_softmax_op.cc @@ -43,20 +43,24 @@ class SequenceSoftmaxOpMaker : public framework::OpProtoAndCheckerMaker { "(LoDTensor) 1-D or 2-D output LoDTensor with the 2-nd dimension " "of length 1."); AddComment(R"DOC( -SequenceSoftmaxOp computes softmax activation among all time-steps for each +Sequence Softmax Operator. + +SequenceSoftmaxOp computes the softmax activation among all time-steps for each sequence. The dimension of each time-step should be 1. Thus, the shape of -input Tensor can be either [N, 1] or [N], where N is the sum of all sequences' -lengths. +input Tensor can be either [N, 1] or [N], where N is the sum of the length +of all sequences. -Equation: +The algorithm works as follows: for i-th sequence in a mini-batch: - Out(X[lod[i]:lod[i+1]], :) = - exp(X[lod[i]:lod[i+1], :]) / sum(exp(X[lod[i]:lod[i+1], :])) + $$Out(X[lod[i]:lod[i+1]], :) = + \frac{\exp(X[lod[i]:lod[i+1], :])} + {\sum(\exp(X[lod[i]:lod[i+1], :]))}$$ For example, for a mini-batch of 3 sequences with variable-length, each containing 2, 3, 2 time-steps, the lod of which is [0, 2, 5, 7], then softmax will be computed among X[0:2, :], X[2:5, :], X[5:7, :] and N turns out to be 7. + )DOC"); } }; diff --git a/paddle/operators/sgd_op.cc b/paddle/operators/sgd_op.cc index 939176c73dc21dc662b1aaf23d8077c6856a5650..72f4e4d5cbcd692423fa2a3e9ec8e7033b552c3c 100644 --- a/paddle/operators/sgd_op.cc +++ b/paddle/operators/sgd_op.cc @@ -45,15 +45,17 @@ class SGDOpMaker : public framework::OpProtoAndCheckerMaker { public: SGDOpMaker(framework::OpProto* proto, framework::OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("Param", "Input parameter"); - AddInput("LearningRate", "Learning rate of SGD"); - AddInput("Grad", "Input gradient"); - AddOutput("ParamOut", "output parameter"); + AddInput("Param", "(Tensor) Input parameter"); + AddInput("LearningRate", "(Tensor) Learning rate of SGD"); + AddInput("Grad", "(Tensor) Input gradient"); + AddOutput("ParamOut", "(Tensor) Output parameter"); AddComment(R"DOC( -Simplest sgd algorithm. +SGD operator -param_out = param - learning_rate * grad; +This operator implements one step of the stochastic gradient descent algorithm. + +$$param_out = param - learning_rate * grad$$ )DOC"); } diff --git a/paddle/operators/sigmoid_cross_entropy_with_logits_op.cc b/paddle/operators/sigmoid_cross_entropy_with_logits_op.cc index e781c8db208464cb94d94d1914e50f5aba3db2c6..d9e40546523c60b0a7eec2e0593446258996ba58 100644 --- a/paddle/operators/sigmoid_cross_entropy_with_logits_op.cc +++ b/paddle/operators/sigmoid_cross_entropy_with_logits_op.cc @@ -107,26 +107,28 @@ class SigmoidCrossEntropyWithLogitsOpMaker AddComment(R"DOC( SigmoidCrossEntropyWithLogits Operator. -This measures the elementwise probability error in discrete classification tasks +This measures the element-wise probability error in classification tasks in which each class is independent. This can be thought of as predicting labels -for a data-point that are not mutually exclusive. For example, a news article -can be about politics, technology or sports at the same time or none of these. +for a data-point, where labels are not mutually exclusive. +For example, a news article can be about politics, technology or sports +at the same time or none of these. The logistic loss is given as follows: - loss = -Labels * log(sigmoid(X)) - (1 - Labels) * log(1 - sigmoid(X)) + $$loss = -Labels * \log(\sigma(X)) - (1 - Labels) * \log(1 - \sigma(X))$$ -We know that sigmoid(X) = (1 / (1 + exp(-X))). By substituting this we get +We know that $$\sigma(X) = (1 / (1 + \exp(-X)))$$. By substituting this we get: - loss = X - X * Labels + log(1 + exp(-X)) + $$loss = X - X * Labels + \log(1 + \exp(-X))$$ -For stability and to prevent overflow of exp(-X) when X < 0, -we can reformulate the loss as follows: +For stability and to prevent overflow of $$\exp(-X)$$ when X < 0, +we reformulate the loss as follows: - loss = max(X, 0) - X * Labels + log(1 + exp(-abs(X))) + $$loss = \max(X, 0) - X * Labels + \log(1 + \exp(-|X|))$$ Both the input `X` and `Labels` can carry the LoD (Level of Details) information. However the output only shares the LoD with input `X`. + )DOC"); } }; diff --git a/paddle/operators/sign_op.cc b/paddle/operators/sign_op.cc index 1b2f879d6d305e4e77be41683d8249904337a6f8..08bf2e4e7cc101a3bcc907d3b40ee82347b39f80 100644 --- a/paddle/operators/sign_op.cc +++ b/paddle/operators/sign_op.cc @@ -38,9 +38,10 @@ class SignOpMaker : public framework::OpProtoAndCheckerMaker { : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "(Tensor) Input tensor of sign operator."); AddOutput("Out", "(Tensor) Output tensor of sign operator."); - AddComment(R"DOC(Sign operator + AddComment(R"DOC( +Sign operator -The equation is: Out = X.sign() +$$Out = X.sign()$$ )DOC"); } }; diff --git a/paddle/operators/smooth_l1_loss_op.cc b/paddle/operators/smooth_l1_loss_op.cc index 758481943d463f22eb6c6e0be9a99ad99161da5b..ebf7b43700a7498aa18b5f648b0b8c2c4e7b442b 100644 --- a/paddle/operators/smooth_l1_loss_op.cc +++ b/paddle/operators/smooth_l1_loss_op.cc @@ -77,14 +77,17 @@ class SmoothL1LossOpMaker : public framework::OpProtoAndCheckerMaker { "A float scalar with default value 3.0.") .SetDefault(3.0); AddComment(R"DOC( -Compute smooth l1 loss for input and target. The operator take the 1st -dimension of input as batch size. For each instance, it will compute -smooth l1 loss element by element first and sum all losses to one value. -So the output shape is [batch_size, 1]. +Smooth L1 Loss Operator. + +This operator computes the smooth l1 loss for input and target. +The operator takes the first dimension of input as the batch size. +For each instance, it computes the smooth l1 loss element by element first +and then sums all the losses. So the resulting output shape +is [batch_size, 1]. The equation is: -loss = 0.5 * (sigma * (x-y))^2 if abs(x - y) < 1 / sigma^2 - abs(x - y) - 0.5 / sigma^2 otherwise +loss = $$0.5 * (\sigma * (x-y))^2$$ if $$|x - y| < 1 /({\sigma}^2)$$ + $$\frac{|x - y| - 0.5}{{\sigma}^2}$$ otherwise )DOC"); } diff --git a/paddle/operators/softmax_op.cc b/paddle/operators/softmax_op.cc index 00fd0b32a9b3c0dd9fedf7b7621b1f15e5c4ce93..93f89e33a73c5f4c6c0e5a8793a0abe7c692b656 100644 --- a/paddle/operators/softmax_op.cc +++ b/paddle/operators/softmax_op.cc @@ -44,20 +44,23 @@ class SoftmaxOpMaker : public framework::OpProtoAndCheckerMaker { "2-D with shape [batch_size, input_feature_dimensions]."); AddOutput("Y", "The normalized values with the same shape as X."); AddComment(R"DOC( -The input of softmax operator is a 2-D tensor with shape N x K (N is the +Softmax Operator. + +The input of the softmax operator is a 2-D tensor with shape N x K (N is the batch_size, K is the dimension of input feature). The output tensor has the same shape as the input tensor. For each row of the input tensor, the softmax operator squashes the K-dimensional vector of arbitrary real values to a K-dimensional vector of real -values in the range [0, 1] that add up to 1. Specifically, it computes the -exponential of the given dimension and the sum of exponential values of all -the other dimensions in the K-dimensional vector input. Then the ratio of the -exponential of the given dimension and the sum of exponential values of all -the other dimensions is the output of the softmax operator. +values in the range [0, 1] that add up to 1. +It computes the exponential of the given dimension and the sum of exponential +values of all the other dimensions in the K-dimensional vector input. +Then the ratio of the exponential of the given dimension and the sum of +exponential values of all the other dimensions is the output of the softmax +operator. For each row `i` and each column `j` in input X, we have: - Y[i, j] = exp(X[i, j]) / sum_j(exp(X[i, j])) + $$Y[i, j] = \frac{\exp(X[i, j])}{\sum_j(exp(X[i, j])}$$ )DOC"); } diff --git a/paddle/operators/softmax_with_cross_entropy_op.cc b/paddle/operators/softmax_with_cross_entropy_op.cc index 50497da1b70d39d2638240dd91035c9181124af9..c6b94f5cc947ccb86315fd9058b8c57d1a996927 100644 --- a/paddle/operators/softmax_with_cross_entropy_op.cc +++ b/paddle/operators/softmax_with_cross_entropy_op.cc @@ -4,13 +4,13 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 +http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/operators/softmax_with_cross_entropy_op.h" #include @@ -30,12 +30,10 @@ class SoftmaxWithCrossEntropyOpMaker "which is a 2-D tensor with shape [N x K]. N is the batch_size, " "and K is the class number."); AddInput("Label", - "(Tensor, default: Tensor), The ground truth which is a 2-D " - "tensor. " - "If softLabel is set to false, Label is a Tensor with shape " - "[N x 1]." - "If softLabel is set to true, Label is a Tensor " - "with shape [N x K]."); + "(Tensor) The ground truth which is a 2-D tensor. If soft_label " + "is set to false, Label is a Tensor with shape [N x 1]. If " + "soft_label is set to true, Label is a Tensor with " + "shape [N x K]."); AddOutput( "Softmax", "(Tensor, default: Tensor), A 2-D tensor with shape [N x K]. " @@ -51,32 +49,34 @@ class SoftmaxWithCrossEntropyOpMaker "the given labels as soft labels.") .SetDefault(false); AddComment(R"DOC( -Cross entropy loss with softmax are used as the output layer extensively. This +Softmax With Cross Entropy Operator. + +Cross entropy loss with softmax is used as the output layer extensively. This operator computes the softmax normalized values for each row of the input -tensor, after which cross-entropy loss is then computed. This provides a more +tensor, after which cross-entropy loss is computed. This provides a more numerically stable gradient. -Because this operators performs a softmax on logits internally, it expects -unscaled logits. Please do not call this op with the output of softmax operator, -which will produce incorrect results. +Because this operator performs a softmax on logits internally, it expects +unscaled logits. This operator should not be used with the output of +softmax operator since that would produce incorrect results. -When the attribute softLabel is set false, this operators expects mutually -exclusive hard labels, each sample in a batch is in exactly one class with -probabilities 1. Each sample in the batch with one and only one label. +When the attribute soft_label is set false, this operators expects mutually +exclusive hard labels, each sample in a batch is in exactly one class with a +probability of 1.0. Each sample in the batch will have a single label. -Equation: +The equation is as follows: -1) hard label (one-hot label) +1) Hard label (one-hot label, so every sample has exactly one class) -Loss_j = \f$ -\text{Logit}_{Label_j} + +$$Loss_j = \f$ -\text{Logit}_{Label_j} + \log\left(\sum_{i=0}^{K}\exp(\text{Logit}_i)\right), -j = 1, ..., K $\f +j = 1, ..., K $\f$$ -2) soft label (a distribution over all classes) +2) Soft label (each sample can have a distribution over all classes) -Loss_j = \f$ -\sum_{i=0}^{K}\text{Label}_i\left(\text{Logit}_i - +$$Loss_j = \f$ -\sum_{i=0}^{K}\text{Label}_i\left(\text{Logit}_i - \log\left(\sum_{i=0}^{K}\exp(\text{Logit}_i)\right)\right), -j = 1,...,K $\f +j = 1,...,K $\f$$ )DOC"); } @@ -196,6 +196,8 @@ REGISTER_OPERATOR(softmax_with_cross_entropy, ops::SoftmaxWithCrossEntropyOp, REGISTER_OPERATOR(softmax_with_cross_entropy_grad, ops::SoftmaxWithCrossEntropyOpGrad); REGISTER_OP_CPU_KERNEL(softmax_with_cross_entropy, - ops::SoftmaxWithCrossEntropyKernel); + ops::SoftmaxWithCrossEntropyKernel, + ops::SoftmaxWithCrossEntropyKernel); REGISTER_OP_CPU_KERNEL(softmax_with_cross_entropy_grad, - ops::SoftmaxWithCrossEntropyGradKernel); + ops::SoftmaxWithCrossEntropyGradKernel, + ops::SoftmaxWithCrossEntropyGradKernel); diff --git a/paddle/operators/softmax_with_cross_entropy_op.cu b/paddle/operators/softmax_with_cross_entropy_op.cu index 7602918bb39312db3c4d1a4064801712ef94ec72..b1faddac3fd21aaf817caf9d3e57e664f4e0e2d5 100644 --- a/paddle/operators/softmax_with_cross_entropy_op.cu +++ b/paddle/operators/softmax_with_cross_entropy_op.cu @@ -4,13 +4,13 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 +http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #define EIGEN_USE_GPU @@ -24,7 +24,7 @@ using Tensor = framework::Tensor; namespace { template __global__ void CrossEntropyGrad(T* logit_grad, const T* loss_grad, - const int* labels, const int batch_size, + const int64_t* labels, const int batch_size, const int class_num) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int sample_idx = tid / class_num; @@ -50,7 +50,7 @@ __global__ void SoftCrossEntropyGradientKernel(T* logit_grad, int ids = blockIdx.x * blockDim.x + threadIdx.x; if (ids < batch_size * class_num) { int row_ids = ids / class_num; - logit_grad[ids] = logit_grad[ids] * (loss_grad[row_ids] - labels[ids]); + logit_grad[ids] = loss_grad[row_ids] * (logit_grad[ids] - labels[ids]); } } } // namespace @@ -104,7 +104,7 @@ class SoftmaxWithCrossEntropyGradCUDAKernel : public framework::OpKernel { .stream()>>>(logit_grad_data, loss_grad_data, label_data, batch_size, class_num); } else { - const int* label_data = labels->data(); + const int64_t* label_data = labels->data(); CrossEntropyGrad<<< grid, block, 0, reinterpret_cast( context.device_context()) @@ -119,6 +119,8 @@ class SoftmaxWithCrossEntropyGradCUDAKernel : public framework::OpKernel { namespace ops = paddle::operators; REGISTER_OP_GPU_KERNEL(softmax_with_cross_entropy, - ops::SoftmaxWithCrossEntropyCUDAKernel); + ops::SoftmaxWithCrossEntropyCUDAKernel, + ops::SoftmaxWithCrossEntropyCUDAKernel); REGISTER_OP_GPU_KERNEL(softmax_with_cross_entropy_grad, - ops::SoftmaxWithCrossEntropyGradCUDAKernel); + ops::SoftmaxWithCrossEntropyGradCUDAKernel, + ops::SoftmaxWithCrossEntropyGradCUDAKernel); diff --git a/paddle/operators/softmax_with_cross_entropy_op.h b/paddle/operators/softmax_with_cross_entropy_op.h index 7f3f9e23aa9455437cfa893363b3e59a0699dbea..c4ab3f74b4b07d13957d99e01aa4868fac719f61 100644 --- a/paddle/operators/softmax_with_cross_entropy_op.h +++ b/paddle/operators/softmax_with_cross_entropy_op.h @@ -4,13 +4,13 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 +http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #pragma once #include "paddle/framework/eigen.h" @@ -60,25 +60,25 @@ class SoftmaxWithCrossEntropyGradKernel : public framework::OpKernel { logit_grad->ShareDataWith(*context.Input("Softmax")); const int class_num = logit_grad->dims()[1]; + auto out_grad_mat = EigenMatrix::From(*out_grad); + auto logit_grad_mat = EigenMatrix::From(*logit_grad); + if (context.Attr("soft_label")) { - auto out_grad_mat = EigenMatrix::From(*out_grad); - auto logit_grad_mat = EigenMatrix::From(*logit_grad); auto lbl_mat = EigenMatrix::From(*labels); - logit_grad_mat.device(context.GetEigenDevice()) = - logit_grad_mat * - (out_grad_mat.broadcast(Eigen::DSizes(1, class_num)) - - lbl_mat); + out_grad_mat.broadcast(Eigen::DSizes(1, class_num)) * + (logit_grad_mat - lbl_mat); } else { + logit_grad_mat.device(context.GetEigenDevice()) = + logit_grad_mat * + out_grad_mat.broadcast(Eigen::DSizes(1, class_num)); + const int batch_size = logit_grad->dims()[0]; - const int* label_data = labels->data(); - const T* out_grad_data = out_grad->data(); + const int64_t* label_data = labels->data(); T* logit_grad_data = logit_grad->data(); - + const T* out_grad_data = out_grad->data(); for (int i = 0; i < batch_size; ++i) { - int index = i * class_num + label_data[i]; - logit_grad_data[index] = - out_grad_data[i] * (logit_grad_data[index] - 1.); + logit_grad_data[i * class_num + label_data[i]] -= out_grad_data[i]; } } } diff --git a/paddle/operators/split_op.cc b/paddle/operators/split_op.cc index 1ef314b77f0fdd395ddb0cecf8f29e97559cb7ca..275b25e96aa75fdbcb7275e272c49ea8d278d2c8 100644 --- a/paddle/operators/split_op.cc +++ b/paddle/operators/split_op.cc @@ -67,30 +67,38 @@ class SplitOpMaker : public framework::OpProtoAndCheckerMaker { public: SplitOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("X", "the input tensor of split operator."); - AddOutput("Out", "the output tensors of split operator.").AsDuplicable(); + AddInput("X", "(Tensor) Input tensor of the split operator."); + AddOutput("Out", "(Tensor) Output tensors of the split operator.") + .AsDuplicable(); AddComment(R"DOC( - Split the input tensor into multiple sub-tensors. - Example: - Input = [[1,2], - [3,4], - [5,6]] - sections = [2,1] - axis = 0 - Output[0] = [[1,2], - [3,4]] - Output[1] = [[5,6]] +Split operator + +This operator splits the input tensor into multiple sub-tensors. + +Example: + Input = [[1,2], + [3,4], + [5,6]] + sections = [2,1] + axis = 0 + Output[0] = [[1,2], + [3,4]] + Output[1] = [[5,6]] )DOC"); AddAttr>("sections", - "the length for each" - "output along with the specify axis.") + "(vector) " + "the length of each output along the " + "specified axis.") .SetDefault(std::vector{}); AddAttr("num", - "number of the sub-tensors, it must evenly divide " + "(int, default 0)" + "Number of sub-tensors. This must evenly divide " "Input.dims()[axis]") .SetDefault(0); - AddAttr("axis", "The axis which the input will be splited on.") + AddAttr("axis", + "(int, default 0) " + "The axis which the input will be splited on.") .SetDefault(0); } }; diff --git a/paddle/operators/squared_l2_distance_op.cc b/paddle/operators/squared_l2_distance_op.cc index e360c19b47eae7fc32ae66f9e4e3873bff211b04..bec2a2c18ae8da892ee7d71f45afe53c887c0f57 100644 --- a/paddle/operators/squared_l2_distance_op.cc +++ b/paddle/operators/squared_l2_distance_op.cc @@ -59,23 +59,26 @@ class SquaredL2DistanceOpMaker : public framework::OpProtoAndCheckerMaker { SquaredL2DistanceOpMaker(framework::OpProto* proto, framework::OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("X", "Input of SquaredL2DistanceOp."); - AddInput("Y", "Target of SquaredL2DistanceOp."); + AddInput("X", "(Tensor) Input of SquaredL2DistanceOp."); + AddInput("Y", "(Tensor) Target of SquaredL2DistanceOp."); AddOutput("sub_result", - "Buffering substraction result which " + "(Tensor) Buffering subtraction result which " "will be reused in backward.") .AsIntermediate(); - AddOutput("Out", "Squared l2 distance between input and target."); + AddOutput("Out", "(Tensor) Squared l2 distance between input and target."); AddComment(R"DOC( - SquaredL2DistanceOp will cacluate the squared L2 distance for - input and target. Number of distance value equals to the - first dimension of input. First dimension of target could be equal to - input or to 1. If the first dimension of target is 1, SquaredL2DistanceOp - will broadcast target's first dimension to input's first dimension. - You can decide whether calculate the gradient of input and target. - - Both the input X and Y can carry the LoD (Level of Details) information, - or not. But the output only shares the LoD with input X. +SquaredL2Distance operator + +This operator will cacluate the squared L2 distance for the input and +the target. Number of distance value will be equal to the first dimension +of input. First dimension of the target could be equal to the input or to 1. +If the first dimension of target is 1, the operator will broadcast target's +first dimension to input's first dimension. During backward propagation, +the user can decide whether to calculate the gradient of the input or +the target or both. + +Both the input X and Y can carry the LoD (Level of Details) information. +However, the output only shares the LoD information with input X. )DOC"); } }; diff --git a/paddle/operators/squared_l2_norm_op.cc b/paddle/operators/squared_l2_norm_op.cc index 42ad87e65a85355e1b9b927dcef9ebbb88cde717..3c10e6159f44bc8c21b1e79aefaa962c7a2b64ed 100644 --- a/paddle/operators/squared_l2_norm_op.cc +++ b/paddle/operators/squared_l2_norm_op.cc @@ -52,13 +52,13 @@ class SquaredL2NormOpMaker : public framework::OpProtoAndCheckerMaker { framework::OpAttrChecker* op_checker) : framework::OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "(Tensor) The input of squared_l2_norm op."); - AddOutput("Out", "(Float) The output of squared_l2_norm op."); + AddOutput("Out", "(Scalar) The output of squared_l2_norm op."); AddComment(R"DOC( SquaredL2Norm Operator. Computes the squared L2 norm of a tensor. -Out = sum (X ** 2) +$$Out = \sum_{i} X_{i}^2$$ )DOC"); } diff --git a/paddle/operators/sum_op.cc b/paddle/operators/sum_op.cc index ca36ad764c8a4cb5f6c58d3ac3d9ff4a588f3200..b1e58952fdba35183822ad2f9a51d5bcc5e6ad6a 100644 --- a/paddle/operators/sum_op.cc +++ b/paddle/operators/sum_op.cc @@ -24,10 +24,16 @@ class SumOp : public framework::OperatorWithKernel { void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasInputs("X"), "Inputs(X) should not be null"); - auto x_dims = ctx->GetInputsDim("X"); + PADDLE_ENFORCE(ctx->HasOutput("Out"), "Output(Out) of SumOp should not be null."); + if (ctx->IsRuntime() && + ctx->GetOutputsVarType("Out")[0] == + framework::VarDesc::LOD_TENSOR_ARRAY) { + return; // skip runtime infershape when is tensor array; + } + auto x_dims = ctx->GetInputsDim("X"); size_t N = x_dims.size(); PADDLE_ENFORCE_GT(N, 1, "Input tensors count should > 1."); @@ -39,19 +45,43 @@ class SumOp : public framework::OperatorWithKernel { ctx->SetOutputDim("Out", in_dim); ctx->ShareLoD("X", /*->*/ "Out"); } + + protected: + framework::DataType IndicateDataType( + const framework::ExecutionContext& ctx) const override { + auto x_vars = ctx.MultiInputVar("X"); + if (x_vars[0]->IsType()) { + return framework::ToDataType( + x_vars[0]->Get().type()); + } else if (x_vars[0]->IsType()) { + return framework::ToDataType( + x_vars[0]->Get().value().type()); + } else if (x_vars[0]->IsType()) { + auto& array = x_vars[0]->Get(); + for (auto& each : array) { + if (each.numel() != 0) { + return framework::ToDataType(each.type()); + } + } + } + PADDLE_THROW("Unexpected branch. Input type is %s", + x_vars[0]->Type().name()); + } }; class SumOpMaker : public framework::OpProtoAndCheckerMaker { public: SumOpMaker(framework::OpProto* proto, framework::OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("X", "the input tensors of sum operator.").AsDuplicable(); - AddOutput("Out", "the output tensor of sum operator."); + AddInput("X", "(vector) The input tensors of sum operator.") + .AsDuplicable(); + AddOutput("Out", "(Tensor) The output tensor of sum operator."); AddComment(R"DOC( -Sum the input tensors. +Sum operator. -All the inputs can carry the LoD (Level of Details) information, -or not. But the output only shares the LoD with the first input. +This operators sums the input tensors. All the inputs can carry the +LoD (Level of Details) information. However, the output only shares +the LoD information with the first input. )DOC"); } }; @@ -61,18 +91,32 @@ class SumOpVarTypeInference : public framework::VarTypeInference { void operator()(const framework::OpDescBind& op_desc, framework::BlockDescBind* block) const override { auto& inputs = op_desc.Input("X"); - auto default_var_type = framework::VarDesc::SELECTED_ROWS; + auto var_type = framework::VarDesc::SELECTED_ROWS; bool any_input_is_lod_tensor = std::any_of( inputs.begin(), inputs.end(), [block](const std::string& name) { return block->Var(name)->GetType() == framework::VarDesc::LOD_TENSOR; }); - if (any_input_is_lod_tensor) { - default_var_type = framework::VarDesc::LOD_TENSOR; + + auto is_tensor_array = [block](const std::string& name) { + return block->Var(name)->GetType() == + framework::VarDesc::LOD_TENSOR_ARRAY; + }; + + bool any_input_is_tensor_array = + std::any_of(inputs.begin(), inputs.end(), is_tensor_array); + bool all_inputs_are_tensor_array = + std::all_of(inputs.begin(), inputs.end(), is_tensor_array); + + if (any_input_is_tensor_array) { + PADDLE_ENFORCE(all_inputs_are_tensor_array); + var_type = framework::VarDesc::LOD_TENSOR_ARRAY; + } else if (any_input_is_lod_tensor) { + var_type = framework::VarDesc::LOD_TENSOR; } auto out_var_name = op_desc.Output("Out").front(); - block->Var(out_var_name)->SetType(default_var_type); + block->Var(out_var_name)->SetType(var_type); } }; diff --git a/paddle/operators/sum_op.h b/paddle/operators/sum_op.h index ad441a598040aca71a72c9c03d477934d14e9a8b..4ca15611392b3117aa6c92cba95911eb8bebeb15 100644 --- a/paddle/operators/sum_op.h +++ b/paddle/operators/sum_op.h @@ -11,6 +11,7 @@ limitations under the License. */ #pragma once #include "paddle/framework/eigen.h" +#include "paddle/framework/lod_tensor_array.h" #include "paddle/framework/op_registry.h" #include "paddle/operators/math/math_function.h" #include "paddle/operators/math/selected_rows_functor.h" @@ -28,7 +29,7 @@ using EigenVector = framework::EigenVector; template class SumKernel : public framework::OpKernel { public: - void Compute(const framework::ExecutionContext& context) const override { + void Compute(const framework::ExecutionContext &context) const override { auto in_vars = context.MultiInputVar("X"); int N = in_vars.size(); auto out_var = context.OutputVar("Out"); @@ -36,7 +37,7 @@ class SumKernel : public framework::OpKernel { bool in_place = out_var == in_vars[0]; if (out_var->IsType()) { - auto* out = context.Output("Out"); + auto *out = context.Output("Out"); out->mutable_data(context.GetPlace()); auto result = EigenVector::Flatten(*out); @@ -51,11 +52,11 @@ class SumKernel : public framework::OpKernel { // If in_place, just skip the first tensor for (int i = in_place ? 1 : 0; i < N; i++) { if (in_vars[i]->IsType()) { - auto& in_t = in_vars[i]->Get(); + auto &in_t = in_vars[i]->Get(); auto in = EigenVector::Flatten(in_t); result.device(place) = result + in; } else if (in_vars[i]->IsType()) { - auto& in_t = in_vars[i]->Get(); + auto &in_t = in_vars[i]->Get(); functor(context.device_context(), in_t, out); } else { PADDLE_THROW("Variable type must be LoDTensor/SelectedRows."); @@ -63,8 +64,8 @@ class SumKernel : public framework::OpKernel { } } else if (out_var->IsType()) { PADDLE_ENFORCE(!in_place, "SelectedRows not support inplace sum now"); - auto* out = context.Output("Out"); - auto* out_value = out->mutable_value(); + auto *out = context.Output("Out"); + auto *out_value = out->mutable_value(); // Runtime InferShape size_t first_dim = 0; @@ -88,9 +89,36 @@ class SumKernel : public framework::OpKernel { offset, out); offset += in_vars[i]->Get().value().numel(); } + } else if (out_var->IsType()) { + auto &out_array = *out_var->GetMutable(); + for (size_t i = in_place ? 1 : 0; i < in_vars.size(); ++i) { + PADDLE_ENFORCE(in_vars[i]->IsType(), + "Only support all inputs are TensorArray"); + auto &in_array = in_vars[i]->Get(); + + for (size_t i = 0; i < in_array.size(); ++i) { + if (in_array[i].numel() != 0) { + if (i >= out_array.size()) { + out_array.resize(i + 1); + } + if (out_array[i].numel() == 0) { + out_array[i].CopyFrom(in_array[i], in_array[i].place(), + context.device_context()); + out_array[i].set_lod(in_array[i].lod()); + } else { + PADDLE_ENFORCE(out_array[i].lod() == in_array[i].lod()); + auto in = EigenVector::Flatten(in_array[i]); + auto result = EigenVector::Flatten(out_array[i]); + result.device(context.GetEigenDevice()) = result + in; + } + } + } + } + } else { + PADDLE_THROW("Unexpected branch, output variable type is %s", + out_var->Type().name()); } } }; - } // namespace operators } // namespace paddle diff --git a/paddle/operators/tensor_array_read_write_op.cc b/paddle/operators/tensor_array_read_write_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..50824032ca0e23b6f961928103ea4aa74b6ac23a --- /dev/null +++ b/paddle/operators/tensor_array_read_write_op.cc @@ -0,0 +1,218 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ +#include "paddle/framework/lod_tensor_array.h" +#include "paddle/framework/op_registry.h" + +namespace paddle { +namespace operators { +class ArrayOpBase : public framework::OperatorBase { + public: + ArrayOpBase(const std::string &type, const framework::VariableNameMap &inputs, + const framework::VariableNameMap &outputs, + const framework::AttributeMap &attrs) + : OperatorBase(type, inputs, outputs, attrs) {} + void Run(const framework::Scope &scope, + const platform::DeviceContext &dev_ctx) const override {} + + protected: + size_t GetOffset(const framework::Scope &scope, + const platform::DeviceContext &dev_ctx) const { + auto *i = scope.FindVar(Input("I")); + PADDLE_ENFORCE(i != nullptr, "I must be set"); + auto &i_tensor = i->Get(); + PADDLE_ENFORCE_EQ(i_tensor.numel(), 1); + size_t offset; + if (platform::is_gpu_place(i_tensor.place())) { + // FIXME: Avoid copy from GPU to CPU + framework::Tensor t; + t.CopyFrom(i_tensor, platform::CPUPlace(), dev_ctx); + dev_ctx.Wait(); + offset = static_cast(*t.data()); + } else { + offset = static_cast(*i_tensor.data()); + } + return offset; + } +}; + +class WriteToArrayOp : public ArrayOpBase { + public: + WriteToArrayOp(const std::string &type, + const framework::VariableNameMap &inputs, + const framework::VariableNameMap &outputs, + const framework::AttributeMap &attrs) + : ArrayOpBase(type, inputs, outputs, attrs) {} + + void Run(const framework::Scope &scope, + const platform::DeviceContext &dev_ctx) const override { + auto *x = scope.FindVar(Input("X")); + PADDLE_ENFORCE(x != nullptr, "X must be set"); + auto &x_tensor = x->Get(); + size_t offset = GetOffset(scope, dev_ctx); + auto *out = + scope.FindVar(Output("Out"))->GetMutable(); + if (offset >= out->size()) { + out->resize(offset + 1); + } + auto *out_tensor = &out->at(offset); + out_tensor->CopyFrom(x_tensor, dev_ctx.GetPlace(), dev_ctx); + out_tensor->set_lod(x_tensor.lod()); + } +}; + +class WriteToArrayOpProtoMaker : public framework::OpProtoAndCheckerMaker { + public: + WriteToArrayOpProtoMaker(framework::OpProto *proto, + framework::OpAttrChecker *op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("X", "(LoDTensor) the tensor will be written to tensor array"); + AddInput( + "I", + "(Tensor) the subscript index in tensor array. The number of element " + "should be 1"); + AddOutput("Out", "(TensorArray) the tensor array will be written"); + AddComment(R"DOC(Write a LoDTensor to a LoDTensor array. + +Assume T is LoDTensor, i is the subscript of the array, and A is the array. The +equation is + +A[i] = T +)DOC"); + } +}; + +class WriteToArrayInferShape : public framework::InferShapeBase { + public: + void operator()(framework::InferShapeContext *context) const override { + PADDLE_ENFORCE(context->HasInput("I"), "Must set the subscript index"); + PADDLE_ENFORCE_EQ(framework::product(context->GetInputDim("I")), 1, + "The number of element of subscript index must be 1"); + PADDLE_ENFORCE(context->HasInput("X"), NotHasXError()); + PADDLE_ENFORCE(context->HasOutput("Out"), NotHasOutError()); + context->SetOutputDim("Out", context->GetInputDim("X")); + } + + protected: + virtual const char *NotHasXError() const { return "Must set the lod tensor"; } + + virtual const char *NotHasOutError() const { + return "Must set the lod tensor array"; + } +}; + +class WriteToArrayInferVarType : public framework::VarTypeInference { + public: + void operator()(const framework::OpDescBind &op_desc, + framework::BlockDescBind *block) const override { + for (auto &out_var : op_desc.OutputArgumentNames()) { + VLOG(10) << "Set Variable " << out_var << " as LOD_TENSOR_ARRAY"; + block->Var(out_var)->SetType(framework::VarDesc::LOD_TENSOR_ARRAY); + } + } +}; + +class ReadFromArrayOp : public ArrayOpBase { + public: + ReadFromArrayOp(const std::string &type, + const framework::VariableNameMap &inputs, + const framework::VariableNameMap &outputs, + const framework::AttributeMap &attrs) + : ArrayOpBase(type, inputs, outputs, attrs) {} + void Run(const framework::Scope &scope, + const platform::DeviceContext &dev_ctx) const override { + auto *x = scope.FindVar(Input("X")); + PADDLE_ENFORCE(x != nullptr, "X must be set"); + auto &x_array = x->Get(); + auto *out = scope.FindVar(Output("Out")); + PADDLE_ENFORCE(out != nullptr, "Out must be set"); + auto *out_tesnor = out->GetMutable(); + size_t offset = GetOffset(scope, dev_ctx); + PADDLE_ENFORCE_LT(offset, x_array.size()); + out_tesnor->CopyFrom(x_array[offset], dev_ctx.GetPlace(), dev_ctx); + out_tesnor->set_lod(x_array[offset].lod()); + } +}; + +class ReadFromArrayProtoMaker : public framework::OpProtoAndCheckerMaker { + public: + ReadFromArrayProtoMaker(framework::OpProto *proto, + framework::OpAttrChecker *op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("X", "(TensorArray) the array will be read from."); + AddInput("I", + "(Tensor) the subscript index in tensor array. The number of " + "element should be 1"); + AddOutput("Out", "(LoDTensor) the tensor will be read from."); + AddComment(R"DOC(Read a LoDTensor from a LoDTensor Array + +Assume T is LoDTensor, i is th e subscript of the array, and A is the array. The +equation is + +T = A[i] +)DOC"); + } +}; + +class ReadFromArrayInferShape : public WriteToArrayInferShape { + protected: + const char *NotHasXError() const override { + return "The input array X must be set"; + } + const char *NotHasOutError() const override { + return "The output tensor out must be set"; + } +}; + +class WriteToArrayGradMaker : public framework::SingleGradOpDescMaker { + public: + using framework::SingleGradOpDescMaker::SingleGradOpDescMaker; + + protected: + std::unique_ptr Apply() const override { + auto *grad_op = new framework::OpDescBind(); + grad_op->SetType("read_from_array"); + grad_op->SetInput("I", Input("I")); + grad_op->SetInput("X", OutputGrad("Out")); + grad_op->SetOutput("Out", InputGrad("X")); + grad_op->SetAttrMap(Attrs()); + return std::unique_ptr(grad_op); + } +}; + +class ReadFromArrayGradMaker : public framework::SingleGradOpDescMaker { + public: + using framework::SingleGradOpDescMaker::SingleGradOpDescMaker; + + protected: + std::unique_ptr Apply() const override { + auto *grad_op = new framework::OpDescBind(); + grad_op->SetType("write_to_array"); + grad_op->SetInput("I", Input("I")); + grad_op->SetInput("X", OutputGrad("Out")); + grad_op->SetOutput("Out", InputGrad("X")); + grad_op->SetAttrMap(Attrs()); + return std::unique_ptr(grad_op); + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OPERATOR(write_to_array, ops::WriteToArrayOp, + ops::WriteToArrayInferShape, ops::WriteToArrayOpProtoMaker, + ops::WriteToArrayGradMaker, ops::WriteToArrayInferVarType); +REGISTER_OPERATOR(read_from_array, ops::ReadFromArrayOp, + ops::ReadFromArrayInferShape, ops::ReadFromArrayProtoMaker, + ops::ReadFromArrayGradMaker); diff --git a/paddle/operators/top_k_op.cc b/paddle/operators/top_k_op.cc index ac9257259548dc4223efac239d8362f69366850c..16ae925eb5cab1c05f3bc376972cabadc4367d20 100644 --- a/paddle/operators/top_k_op.cc +++ b/paddle/operators/top_k_op.cc @@ -48,20 +48,20 @@ class TopkOpMaker : public framework::OpProtoAndCheckerMaker { public: TopkOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("X", "The input of Topk op"); - AddOutput("Out", "The output tensor of Topk op"); - AddOutput("Indices", "The indices of Topk elements of input"); - AddComment( - R"DOC(If the input is a vector (1d tensor), - finds the k largest entries in the vector - and outputs their values and indices as vectors. - Thus values[j] is the j-th largest entry in input, - and its index is indices[j]. + AddInput("X", "(Tensor) The input of Topk op"); + AddOutput("Out", "(Tensor) The output tensor of Topk op"); + AddOutput("Indices", "(Tensor) The indices of Topk elements of input"); + AddComment(R"DOC( +Top K operator - For matrices, computes the top k entries in each row. )DOC"); +If the input is a vector (1d tensor), this operator finds the k largest +entries in the vector and outputs their values and indices as vectors. +Thus values[j] is the j-th largest entry in input, and its index is indices[j]. + +For matrices, this operator computes the top k entries in each row. )DOC"); AddAttr("k", - "Number of top elements to look for along the last " - "dimension (along each row for matrices).") + "(int, default 1) Number of top elements to look for along " + "the last dimension (along each row for matrices).") .SetDefault(1); } }; diff --git a/paddle/operators/transpose_op.cc b/paddle/operators/transpose_op.cc index d785e57c830439ad80005d9a3d4bb77faf1ae1b9..94de3d5069017a7ca818e246ad574c4db92d8006 100644 --- a/paddle/operators/transpose_op.cc +++ b/paddle/operators/transpose_op.cc @@ -32,7 +32,7 @@ class TransposeOp : public framework::OperatorWithKernel { size_t axis_size = axis.size(); PADDLE_ENFORCE_EQ(x_rank, axis_size, - "the input tensor's rank(%d) " + "The input tensor's rank(%d) " "should be equal to the axis's size(%d)", x_rank, axis_size); @@ -64,12 +64,14 @@ class TransposeOpMaker : public framework::OpProtoAndCheckerMaker { AddOutput("Out", "(Tensor)The output tensor"); AddAttr>( "axis", - "(vector)a list of values, and the size of the list should be " + "(vector)A list of values, and the size of the list should be " "the same with the input tensor rank, the tensor will " "permute the axes according the the values given"); AddComment(R"DOC( -The Tensor will be permuted according to the axis values given. -The op is very much like the numpy.transpose function in python +Transpose Operator. + +The input tensor will be permuted according to the axis values given. +The op functions similar to how numpy.transpose works in python. For example: >> input = numpy.arange(6).reshape((2,3)) >> input @@ -83,6 +85,7 @@ For example: [2, 5]]) So, given a input tensor of shape(N, C, H, W) and the axis is {0, 2, 3, 1}, the output tensor shape will be (N, H, W, C) + )DOC"); } }; diff --git a/paddle/operators/uniform_random_op.cc b/paddle/operators/uniform_random_op.cc index 82f9b8fbf1094bde1def83b9a1c464207b7e4669..cd22c561acb6cbd66faf1d3d6ee21779001a2795 100644 --- a/paddle/operators/uniform_random_op.cc +++ b/paddle/operators/uniform_random_op.cc @@ -74,18 +74,30 @@ class UniformRandomOpMaker : public framework::OpProtoAndCheckerMaker { UniformRandomOpMaker(framework::OpProto* proto, framework::OpAttrChecker* op_checker) : framework::OpProtoAndCheckerMaker(proto, op_checker) { - AddOutput("Out", "The output tensor of uniform random op"); - AddComment(R"DOC(Uniform random operator. -Used to initialize tensor with uniform random generator. + AddOutput("Out", "(Tensor) The output tensor of uniform random op"); + AddComment(R"DOC( +Uniform random operator. + +This operator initializes a tensor with random values sampled from a +uniform distribution. + )DOC"); - AddAttr>("shape", "the dimension of random tensor"); - AddAttr("min", "Minimum value of uniform random").SetDefault(-1.0f); - AddAttr("max", "Maximun value of uniform random").SetDefault(1.0f); + AddAttr>("shape", + "(vector) The shape of the output tensor"); + AddAttr("min", + "(float, default -1.0) " + "Minimum value of uniform random") + .SetDefault(-1.0f); + AddAttr("max", + "(float, default 1.0) " + "Maximun value of uniform random") + .SetDefault(1.0f); AddAttr("seed", - "Random seed of uniform random. " - "0 means generate a seed by system") + "(int, default 0) " + "Random seed used for generating samples. " + "0 means use a seed generated by the system.") .SetDefault(0); - AddAttr("data_type", "output tensor data type") + AddAttr("data_type", "(int, default 5(FP32)) Output tensor data type") .SetDefault(framework::DataType::FP32); } }; diff --git a/paddle/optimizer/CMakeLists.txt b/paddle/optimizer/CMakeLists.txt index 926fee47e1f86efa60dc40a2727edb06499bec4f..25fc35311fc63988c64a445d72fc6255e49e8d4b 100644 --- a/paddle/optimizer/CMakeLists.txt +++ b/paddle/optimizer/CMakeLists.txt @@ -1,5 +1,3 @@ -include_directories(${CMAKE_CURRENT_BINARY_DIR}) - set(OPITMIZER_SRCS adadelta_optimizer.cc adagrad_optimizer.cc @@ -9,11 +7,6 @@ set(OPITMIZER_SRCS sgd_optimizer.cc ) -add_library(paddle_optimizer STATIC ${OPITMIZER_SRCS}) -add_dependencies(paddle_optimizer paddle_proto ${external_project_dependencies}) - - -if(WITH_TESTING) - add_simple_unittest(serialization_test) - add_simple_unittest(parameter_optimizer_test) -endif() +cc_library(paddle_optimizer STATIC SRCS ${OPITMIZER_SRCS} DEPS paddle_proto glog) +cc_test(serialization_test SRCS serialization_test.cc DEPS paddle_proto) +cc_test(parameter_optimizer_test SRCS parameter_optimizer_test.cc DEPS paddle_optimizer) diff --git a/paddle/optimizer/adadelta_optimizer.cc b/paddle/optimizer/adadelta_optimizer.cc index 34913c405075ed72af30ed056f74e8b4d7482488..5cc7c47d4486c3d149c37fd6e312780f3d44eda8 100644 --- a/paddle/optimizer/adadelta_optimizer.cc +++ b/paddle/optimizer/adadelta_optimizer.cc @@ -1,3 +1,17 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + #include "adadelta_optimizer.h" #include #include diff --git a/paddle/optimizer/adadelta_optimizer.h b/paddle/optimizer/adadelta_optimizer.h index bc634ee46d60abc9ffc4a31abac5c2f8edaf7aba..6aab1ad553b15ebbd2d04c9323c5e56e1b8f60f5 100644 --- a/paddle/optimizer/adadelta_optimizer.h +++ b/paddle/optimizer/adadelta_optimizer.h @@ -1,3 +1,17 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + #pragma once #include "parameter_optimizer.h" diff --git a/paddle/optimizer/adagrad_optimizer.cc b/paddle/optimizer/adagrad_optimizer.cc index d915ffb8705eaa96bc96b8071a2c534d4d472273..c981996bab1b2e7ae5d6e2d858a73efde12e32f3 100644 --- a/paddle/optimizer/adagrad_optimizer.cc +++ b/paddle/optimizer/adagrad_optimizer.cc @@ -1,3 +1,17 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + #include #include "adagrad_optimizer.h" diff --git a/paddle/optimizer/adagrad_optimizer.h b/paddle/optimizer/adagrad_optimizer.h index b2935f8aff87f710f508c5c5757dd36526ca63f9..447b7c7547d5bad7436df6f3b3582b4a219f08c8 100644 --- a/paddle/optimizer/adagrad_optimizer.h +++ b/paddle/optimizer/adagrad_optimizer.h @@ -1,3 +1,17 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + #pragma once #include "parameter_optimizer.h" diff --git a/paddle/optimizer/adam_optimizer.cc b/paddle/optimizer/adam_optimizer.cc index 18e5896a22dc8a3c6292293fffc36ca9e3737b4c..6dc2d749708d0e2a7f36734d89eec30d4576842e 100644 --- a/paddle/optimizer/adam_optimizer.cc +++ b/paddle/optimizer/adam_optimizer.cc @@ -1,3 +1,17 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + #include "adam_optimizer.h" #include diff --git a/paddle/optimizer/adam_optimizer.h b/paddle/optimizer/adam_optimizer.h index d25cdc0731f65e9875d2fbf67783cce62d88af60..37ab53afc37a5f749a2909de12c7871ed926583f 100644 --- a/paddle/optimizer/adam_optimizer.h +++ b/paddle/optimizer/adam_optimizer.h @@ -1,3 +1,17 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + #pragma once #include "parameter_optimizer.h" diff --git a/paddle/optimizer/optimizer.cc b/paddle/optimizer/optimizer.cc index a2af139d012433214b825bd68289708098b76da8..faa23764522cef03bae1359adbf58d10ee7809ac 100644 --- a/paddle/optimizer/optimizer.cc +++ b/paddle/optimizer/optimizer.cc @@ -1,3 +1,17 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + #include "optimizer.h" #include #include @@ -6,8 +20,8 @@ #include "parameter_optimizer.h" -using namespace paddle; -using namespace paddle::optimizer; +using paddle::optimizer::ParameterOptimizer; +using paddle::optimizer::Tensor; template struct EnumToType {}; @@ -15,22 +29,21 @@ struct EnumToType {}; template struct TypeToEnum {}; -#define MATCH_ENUM_TYPE(TYPE, ENUM) \ - template <> \ - struct TypeToEnum { \ - static paddle_element_type v() { return ENUM; }; \ - static constexpr TYPE value = ENUM; \ - }; \ - template <> \ - struct EnumToType { \ - typedef TYPE Type; \ +#define MATCH_ENUM_TYPE(TYPE, ENUM) \ + template <> \ + struct TypeToEnum { \ + static paddle_element_type v() { return ENUM; } \ + static constexpr TYPE value = ENUM; \ + }; \ + template <> \ + struct EnumToType { \ + typedef TYPE Type; \ } MATCH_ENUM_TYPE(int32_t, PADDLE_ELEMENT_TYPE_INT32); MATCH_ENUM_TYPE(uint32_t, PADDLE_ELEMENT_TYPE_UINT32); MATCH_ENUM_TYPE(int64_t, PADDLE_ELEMENT_TYPE_INT64); MATCH_ENUM_TYPE(uint64_t, PADDLE_ELEMENT_TYPE_UINT64); -// TODO(zhihong): only implement below type, need to fix MATCH_ENUM_TYPE(float, PADDLE_ELEMENT_TYPE_FLOAT32); MATCH_ENUM_TYPE(double, PADDLE_ELEMENT_TYPE_FLOAT64); diff --git a/paddle/optimizer/optimizer.h b/paddle/optimizer/optimizer.h index aabf7a458dd30092ed1e522c4d88c6cfe63fcce1..e6fa12a4d250ccb078358704b0131942ea6ab039 100644 --- a/paddle/optimizer/optimizer.h +++ b/paddle/optimizer/optimizer.h @@ -1,3 +1,17 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + #pragma once #include diff --git a/paddle/optimizer/parameter_optimizer.cc b/paddle/optimizer/parameter_optimizer.cc index db0714635f9366b0404019688daf4708b4a0052f..da92c2d01cc2a27d1fadd51a338d23b01e0cb0bc 100644 --- a/paddle/optimizer/parameter_optimizer.cc +++ b/paddle/optimizer/parameter_optimizer.cc @@ -1,3 +1,17 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + #include #include "adadelta_optimizer.h" #include "adagrad_optimizer.h" diff --git a/paddle/optimizer/parameter_optimizer.h b/paddle/optimizer/parameter_optimizer.h index 8319f84e1b820adf5cc0006045f2e13dffa91797..99d0416e751c4ca6695d6ed77396e18d48fc86b8 100644 --- a/paddle/optimizer/parameter_optimizer.h +++ b/paddle/optimizer/parameter_optimizer.h @@ -1,3 +1,17 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + #pragma once #include diff --git a/paddle/optimizer/parameter_optimizer_test.cpp b/paddle/optimizer/parameter_optimizer_test.cc similarity index 97% rename from paddle/optimizer/parameter_optimizer_test.cpp rename to paddle/optimizer/parameter_optimizer_test.cc index c88fa11748716693355042d1784b33d7cfb616f1..f29e5317120642e3790a6f6c1976bdda67093a0c 100644 --- a/paddle/optimizer/parameter_optimizer_test.cpp +++ b/paddle/optimizer/parameter_optimizer_test.cc @@ -85,7 +85,7 @@ public: for (size_t i = 0; i < opts_.size(); ++i) { int s = 0; float* newp = (float*)opts_[i]->get_weight(&s); - EXPECT_EQ(s, kSize); + EXPECT_EQ(static_cast(s), kSize); for (size_t j = 0; j < kSize; ++j) { EXPECT_EQ(newp[j], (*p)[j]); } @@ -110,7 +110,7 @@ public: int s = 0; float* newp = (float*)opts_[i]->get_weight(&s); - EXPECT_EQ(s, kSize); + EXPECT_EQ(static_cast(s), kSize); for (size_t j = 0; j < kSize; ++j) { EXPECT_EQ(newp[j], (*p)[j]); } diff --git a/paddle/optimizer/serialization_test.cpp b/paddle/optimizer/serialization_test.cc similarity index 100% rename from paddle/optimizer/serialization_test.cpp rename to paddle/optimizer/serialization_test.cc diff --git a/paddle/optimizer/sgd_optimizer.cc b/paddle/optimizer/sgd_optimizer.cc index 1090419083c8b8cf60eca02791ef673287f4a9a4..c150144ac24b8375d08691a98be680b6bf5d1e7f 100644 --- a/paddle/optimizer/sgd_optimizer.cc +++ b/paddle/optimizer/sgd_optimizer.cc @@ -1,3 +1,17 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + #include "sgd_optimizer.h" #include "serialization.h" diff --git a/paddle/optimizer/sgd_optimizer.h b/paddle/optimizer/sgd_optimizer.h index 6e1a0f0d3f9ecfeb51ccb355d65985a2e6388fb0..0b1da0aa27d98e8d6a8d9fd7a1ebe355acb2a1f4 100644 --- a/paddle/optimizer/sgd_optimizer.h +++ b/paddle/optimizer/sgd_optimizer.h @@ -1,3 +1,17 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + #pragma once #include "parameter_optimizer.h" @@ -15,7 +29,6 @@ public: nesterov_(n) { if (momentum_ != 0.0) { size_t size = parameter->size(); - // TODO: fix it with align aware allocator bind to Tensor momentums_ = new Tensor(size); } } diff --git a/paddle/pybind/protobuf.cc b/paddle/pybind/protobuf.cc index dcae426c7e231757d796c5a84cc5a1c2b0d6763b..5a1ff9b7976abbe4a37f8366181d9d1ae78ea4a0 100644 --- a/paddle/pybind/protobuf.cc +++ b/paddle/pybind/protobuf.cc @@ -97,6 +97,15 @@ namespace pybind { using namespace paddle::framework; // NOLINT +template +static py::bytes SerializeMessage(T &self) { + // Check IsInitialized in Python + std::string retv; + PADDLE_ENFORCE(self.Proto()->SerializePartialToString(&retv), + "Cannot serialize message"); + return retv; +} + // Bind Methods void BindProgramDesc(py::module &m) { py::class_(m, "ProgramDesc", "") @@ -132,17 +141,7 @@ void BindProgramDesc(py::module &m) { .def("block", &ProgramDescBind::MutableBlock, py::return_value_policy::reference) .def("num_blocks", &ProgramDescBind::Size) - .def("serialize_to_string", - [](ProgramDescBind &program_desc) -> py::bytes { - const ProgramDesc *desc = program_desc.Proto(); - PADDLE_ENFORCE(desc->IsInitialized(), - "ProgramDesc has not been initialized."); - std::string res; - PADDLE_ENFORCE( - desc->SerializeToString(&res), - "Serialize ProgramDesc Error. This could be a bug of Paddle."); - return res; - }) + .def("serialize_to_string", SerializeMessage) .def("parse_from_string", [](ProgramDescBind &program_desc, const std::string &data) { ProgramDesc *desc = program_desc.Proto(); @@ -181,16 +180,7 @@ void BindBlockDesc(py::module &m) { py::return_value_policy::reference) .def("op_size", &BlockDescBind::OpSize) .def("op", &BlockDescBind::Op, py::return_value_policy::reference) - .def("serialize_to_string", [](BlockDescBind &block_desc) -> py::bytes { - const BlockDesc *desc = block_desc.Proto(); - PADDLE_ENFORCE(desc->IsInitialized(), - "BlockDesc has not been initialized."); - std::string res; - PADDLE_ENFORCE( - desc->SerializeToString(&res), - "Serialize BlockDesc Error. This could be a bug of Paddle."); - return res; - }); + .def("serialize_to_string", SerializeMessage); } void BindVarDsec(py::module &m) { @@ -219,17 +209,7 @@ void BindVarDsec(py::module &m) { .def("set_lod_level", &VarDescBind::SetLoDLevel) .def("type", &VarDescBind::GetType) .def("set_type", &VarDescBind::SetType) - .def("serialize_to_string", - [](VarDescBind &var_desc) -> py::bytes { - const VarDesc *desc = var_desc.Proto(); - PADDLE_ENFORCE(desc->IsInitialized(), - "VarDesc has not been initialized."); - std::string res; - PADDLE_ENFORCE( - desc->SerializeToString(&res), - "Serialize VarDesc Error. This could be a bug of Paddle."); - return res; - }) + .def("serialize_to_string", SerializeMessage) .def("persistable", &VarDescBind::Persistable) .def("set_persistable", &VarDescBind::SetPersistable); @@ -238,7 +218,9 @@ void BindVarDsec(py::module &m) { .value("SELECTED_ROWS", VarDesc::SELECTED_ROWS) .value("FEED_MINIBATCH", VarDesc::FEED_MINIBATCH) .value("FETCH_LIST", VarDesc::FETCH_LIST) - .value("STEP_SCOPES", VarDesc::STEP_SCOPES); + .value("STEP_SCOPES", VarDesc::STEP_SCOPES) + .value("LOD_RANK_TABLE", VarDesc::LOD_RANK_TABLE) + .value("LOD_TENSOR_ARRAY", VarDesc::LOD_TENSOR_ARRAY); } void BindOpDesc(py::module &m) { @@ -272,16 +254,7 @@ void BindOpDesc(py::module &m) { .def("check_attrs", &OpDescBind::CheckAttrs) .def("infer_shape", &OpDescBind::InferShape) .def("infer_var_type", &OpDescBind::InferVarType) - .def("serialize_to_string", [](OpDescBind &op_desc) -> py::bytes { - const OpDesc *desc = op_desc.Proto(); - PADDLE_ENFORCE(desc->IsInitialized(), - "OpDesc has not been initialized."); - std::string res; - PADDLE_ENFORCE( - desc->SerializeToString(&res), - "Serialize OpDesc Error. This could be a bug of Paddle."); - return res; - }); + .def("serialize_to_string", SerializeMessage); } } // namespace pybind diff --git a/paddle/pybind/pybind.cc b/paddle/pybind/pybind.cc index aab08a759b094382c62feec57c6a907490331fea..0c528174b2b2b3a27869ed0083fc96b8d90e723b 100644 --- a/paddle/pybind/pybind.cc +++ b/paddle/pybind/pybind.cc @@ -21,7 +21,9 @@ limitations under the License. */ #include "paddle/framework/executor.h" #include "paddle/framework/feed_fetch_method.h" #include "paddle/framework/framework.pb.h" +#include "paddle/framework/lod_rank_table.h" #include "paddle/framework/lod_tensor.h" +#include "paddle/framework/lod_tensor_array.h" #include "paddle/framework/prune.h" #include "paddle/framework/selected_rows.h" #include "paddle/framework/tensor_array.h" @@ -224,11 +226,17 @@ All parameter, weight, gradient are variables in Paddle. return self.GetMutable(); }, py::return_value_policy::reference) + .def("get_lod_rank_table", + [](Variable &self) { return self.GetMutable(); }, + py::return_value_policy::reference) .def("get_selected_rows", [](Variable &self) -> SelectedRows * { return self.GetMutable(); }, py::return_value_policy::reference) + .def("get_lod_tensor_array", + [](Variable &self) { return self.GetMutable(); }, + py::return_value_policy::reference) #ifdef PADDLE_WITH_CUDA .def("get_communicator", [](Variable &self) -> platform::Communicator * { @@ -492,6 +500,32 @@ All parameter, weight, gradient are variables in Paddle. BindVarDsec(m); BindOpDesc(m); + py::class_(m, "LodRankTable") + .def("items", [](framework::LoDRankTable &table) { + std::vector> res; + for (auto &item : table.items()) { + res.push_back({item.index, item.length}); + } + return res; + }); + + py::class_(m, "LoDTensorArray") + .def("__getitem__", + [](LoDTensorArray &self, size_t i) { return &self.at(i); }, + py::return_value_policy::reference) + .def("__len__", [](LoDTensorArray &self) { return self.size(); }) + .def("__setitem__", + [](LoDTensorArray &self, size_t i, const LoDTensor &t) { + PADDLE_ENFORCE_LT(i, self.size()); + self[i].ShareDataWith(t); + self[i].set_lod(t.lod()); + }) + .def("append", [](LoDTensorArray &self, const LoDTensor &t) { + self.emplace_back(); + self.back().ShareDataWith(t); + self.back().set_lod(t.lod()); + }); + m.def("op_support_gpu", OpSupportGPU); #ifdef PADDLE_WITH_CUDA m.def("get_cuda_device_count", platform::GetCUDADeviceCount); diff --git a/paddle/scripts/docker/build.sh b/paddle/scripts/docker/build.sh index a08716c5a559def54bb7b989f250b489f6a805a2..53e68648e6c1718cc4d4d9fe074e8deb418d3b1d 100644 --- a/paddle/scripts/docker/build.sh +++ b/paddle/scripts/docker/build.sh @@ -2,170 +2,185 @@ set -xe -# Set BASE_IMAGE according to env variables -if [[ ${WITH_GPU} == "ON" ]]; then - BASE_IMAGE="nvidia/cuda:8.0-cudnn5-runtime-ubuntu16.04" -else - BASE_IMAGE="ubuntu:16.04" -fi - -DOCKERFILE_GPU_ENV="" -DOCKERFILE_CUDNN_DSO="" -if [[ ${WITH_GPU:-OFF} == 'ON' ]]; then - DOCKERFILE_GPU_ENV="ENV LD_LIBRARY_PATH /usr/lib/x86_64-linux-gnu:${LD_LIBRARY_PATH}" - DOCKERFILE_CUDNN_DSO="RUN ln -s /usr/lib/x86_64-linux-gnu/libcudnn.so.5 /usr/lib/x86_64-linux-gnu/libcudnn.so" -fi - -mkdir -p /paddle/build -cd /paddle/build - -# build script will not fail if *.deb does not exist -rm *.deb 2>/dev/null || true -# delete previous built whl packages -rm -rf /paddle/paddle/dist 2>/dev/null || true - -cat </dev/null || true + # delete previous built whl packages + rm -rf /paddle/paddle/dist 2>/dev/null || true -if [ ${WITH_TESTING:-ON} == "ON" ] && [ ${RUN_TEST:-OFF} == "ON" ] ; then -cat < /paddle/build/Dockerfile < -ENV HOME /root +function gen_dockerfile() { + + cat <> /paddle/build/Dockerfile < /paddle/build/Dockerfile < + ENV HOME /root EOF -fi - -if [[ ${WITH_GPU} == "ON" ]]; then - NCCL_DEPS="apt-get install -y libnccl-dev &&" -else - NCCL_DEPS="" -fi - -cat >> /paddle/build/Dockerfile <> /paddle/build/Dockerfile < -#if (defined(__APPLE__) || defined(__OSX__)) && !defined(__arm__) && \ - !defined(__aarch64__) +#if defined(__APPLE__) || defined(__OSX__) int fegetexcept(void); int feenableexcept(unsigned int excepts); diff --git a/paddle/utils/arch/osx/Excepts.cpp b/paddle/utils/arch/osx/Excepts.cpp index 42ecaa06d256c9d259a20c648626605d77ce0308..ac444615786fa9f89f96504a31b2289eae7bb643 100644 --- a/paddle/utils/arch/osx/Excepts.cpp +++ b/paddle/utils/arch/osx/Excepts.cpp @@ -14,9 +14,13 @@ limitations under the License. */ #include "paddle/utils/Excepts.h" -#if (defined(__APPLE__) || defined(__OSX__)) && !defined(__arm__) && \ - !defined(__aarch64__) - +#if defined(__APPLE__) || defined(__OSX__) +#if defined(__arm__) || defined(__arm64__) +// TODO(liuyiqun): implement the arm version +int fegetexcept(void) { return -1; } +int feenableexcept(unsigned int excepts) { return -1; } +int fedisableexcept(unsigned int excepts) { return -1; } +#else int fegetexcept(void) { static fenv_t fenv; return fegetenv(&fenv) ? -1 : (fenv.__control & FE_ALL_EXCEPT); @@ -49,5 +53,5 @@ int fedisableexcept(unsigned int excepts) { return (fesetenv(&fenv) ? -1 : old_excepts); } - +#endif #endif diff --git a/paddle/utils/tests/test_StringUtils.cpp b/paddle/utils/tests/test_StringUtils.cpp index fdc914d1bcc3c74e0f05ef475069abc315bdc306..248f58a7f26e26e82b55110930964cee04fb558b 100644 --- a/paddle/utils/tests/test_StringUtils.cpp +++ b/paddle/utils/tests/test_StringUtils.cpp @@ -18,6 +18,6 @@ limitations under the License. */ TEST(StringUtil, to) { ASSERT_NEAR(paddle::str::to("12.45"), 12.45, 1e-5); - ASSERT_DEATH(paddle::str::to("12.45x23"), ".*"); - ASSERT_DEATH(paddle::str::to(""), ".*"); + ASSERT_DEATH_IF_SUPPORTED(paddle::str::to("12.45x23"), ".*"); + ASSERT_DEATH_IF_SUPPORTED(paddle::str::to(""), ".*"); } diff --git a/python/CMakeLists.txt b/python/CMakeLists.txt index 7bd6d59b0096c23bb791b9b50702130057628879..32578ad7799c0a276972ccef7770c2eae8438069 100644 --- a/python/CMakeLists.txt +++ b/python/CMakeLists.txt @@ -44,6 +44,7 @@ add_custom_target(copy_paddle_pybind ALL DEPENDS ${PADDLE_SOURCE_DIR}/python/pad add_custom_command(OUTPUT ${PADDLE_PYTHON_BUILD_DIR}/.timestamp + COMMAND touch stub.cc COMMAND env ${py_env} ${PYTHON_EXECUTABLE} setup.py bdist_wheel COMMAND ${CMAKE_COMMAND} -E touch ${PADDLE_PYTHON_BUILD_DIR}/.timestamp COMMAND ${CMAKE_COMMAND} -E remove_directory ${PADDLE_PYTHON_BUILD_DIR}/lib-python diff --git a/python/paddle/trainer/config_parser.py b/python/paddle/trainer/config_parser.py index e88e962cff5bbfcb8be1014dbaab85568d2625ff..0e65598485d8785b3f5b2f1bc7e87f377b35792e 100644 --- a/python/paddle/trainer/config_parser.py +++ b/python/paddle/trainer/config_parser.py @@ -2775,9 +2775,15 @@ class NCELayer(LayerBase): @config_layer('addto') class AddToLayer(LayerBase): + layer_type = 'addto' + def __init__(self, name, inputs, bias=True, **xargs): + use_mkldnn = bool(int(g_command_config_args.get("use_mkldnn", 0))) + if self.layer_type == "mkldnn_addto": + config_assert(use_mkldnn, "mkldnn_addto only support MKLDNN") + self.layer_type = 'mkldnn_addto' if use_mkldnn else 'addto' super(AddToLayer, self).__init__( - name, 'addto', 0, inputs=inputs, **xargs) + name, self.layer_type, 0, inputs=inputs, **xargs) config_assert(len(inputs) > 0, 'inputs cannot be empty for AddToLayer') if len(self.inputs) > 1: @@ -2796,6 +2802,11 @@ class AddToLayer(LayerBase): self.create_bias_parameter(bias, self.config.size) +@config_layer('mkldnn_addto') +class MKLDNNAddtoLayer(AddToLayer): + layer_type = 'mkldnn_addto' + + @config_layer('agent') class AgentLayer(LayerBase): def __init__(self, name, size, device=None): diff --git a/python/paddle/trainer_config_helpers/layers.py b/python/paddle/trainer_config_helpers/layers.py index cc1b34df9e7cf8d17bafeb57624548de017066e9..169e201046a0d7b8c3e85f60946d8c1c762c88f4 100644 --- a/python/paddle/trainer_config_helpers/layers.py +++ b/python/paddle/trainer_config_helpers/layers.py @@ -143,6 +143,7 @@ __all__ = [ 'scale_shift_layer', 'img_conv3d_layer', 'resize_layer', + 'sub_seq_layer', ] @@ -252,6 +253,7 @@ class LayerType(object): SCALE_SHIFT_LAYER = 'scale_shift' RESIZE = 'resize' + SUB_SEQ_LAYER = 'subseq' @staticmethod def is_layer_type(type_name): @@ -1047,6 +1049,13 @@ def fc_layer(input, if isinstance(param_attr, collections.Sequence): assert len(input) == len(param_attr) else: + if "parameter_name" in param_attr.attr and len(input) > 1: + logger.fatal( + "When the name field of param_attr is manually specified " + "and the input is a list, the param_attr should also be a " + "list with each item being the param_attr for each input " + "item. If only one named param_attr is provided, all the " + "input items would share this parameter.") param_attr = [copy.deepcopy(param_attr) for _ in range(len(input))] assert isinstance(input, collections.Sequence) @@ -4877,6 +4886,13 @@ def selective_fc_layer(input, if isinstance(param_attr, collections.Sequence): assert len(input) == len(param_attr) else: + if "parameter_name" in param_attr.attr and len(input) > 1: + logger.fatal( + "When the name field of param_attr is manually specified " + "and the input is a list, the param_attr should also be a " + "list with each item being the param_attr for each input " + "item. If only one named param_attr is provided, all the " + "input items would share this parameter.") param_attr = [copy.deepcopy(param_attr) for _ in range(len(input))] assert isinstance(input, collections.Sequence) @@ -6966,3 +6982,58 @@ def resize_layer(input, size, name=None): """ Layer(name=name, type=LayerType.RESIZE, inputs=Input(input.name), size=size) return LayerOutput(name, LayerType.RESIZE, parents=[input], size=input.size) + + +@wrap_act_default(act=LinearActivation()) +@wrap_name_default('sub_seq') +def sub_seq_layer(input, offsets, sizes, act=None, bias_attr=None, name=None): + """ + sub_seq_layer will return sub-sequences from the input sequences. For each + sequence in the input sequence layer, sub_seq_layer will slice it by given + offset and size. Please notice that, number of offset value and size value + both are equal to the number of sequence in the input layer. + + .. code-block:: python + + sub_seq = sub_seq_layer(input=input_seq, offsets=offsets, sizes=sizes) + + :param name: The name of this layer. It is optional. + :type name: basestring + :param input: The input of this layer, which should be sequence. + :type input: LayerOutput + :param offsets: offset indices to slice the input sequence, which should be + sequence type. + :type offsets: LayerOutput + :param sizes: sizes of the sub-sequences, which should be sequence type. + :type sizes: LayerOutput + :param act: Layer activation, default is LinearActivation + :type act: BaseActivation. + :param bias_attr: The Bias Attribute. If the parameter is set to + False or something not type of ParameterAttribute, + no bias is defined. If the parameter is set to + True, the bias is initialized to zero. + :type bias_attr: ParameterAttribute | None | bool | Any + :return: LayerOutput object. + :rtype: LayerOutput + """ + + assert isinstance(input, LayerOutput), ( + 'The first input of sub_seq_layer layer must be a PaddlePaddle layer.') + assert isinstance(offsets, LayerOutput), ( + 'The offset indices for sub_seq_layer, ' + 'must be a PaddlePaddle layer.') + assert isinstance(sizes, LayerOutput), ( + 'The sizes of sub-sequences, must be a PaddlePaddle layer.') + + Layer( + name=name, + type=LayerType.SUB_SEQ_LAYER, + inputs=[input.name, offsets.name, sizes.name], + active_type=act.name, + bias=ParamAttr.to_bias(bias_attr)) + + return LayerOutput( + name, + LayerType.SUB_SEQ_LAYER, + parents=[input, offsets, sizes], + size=input.size) diff --git a/python/paddle/trainer_config_helpers/optimizers.py b/python/paddle/trainer_config_helpers/optimizers.py index c3495ee110bfaf91a47637a52e88b3bb56dce7a9..c3cd4cf8c32e20f3ef86305489fc415397dec1b8 100644 --- a/python/paddle/trainer_config_helpers/optimizers.py +++ b/python/paddle/trainer_config_helpers/optimizers.py @@ -116,7 +116,7 @@ class AdamOptimizer(BaseSGDOptimizer): m(w, t) & = \\beta_1 m(w, t-1) + (1 - \\beta_1) \\nabla Q_i(w) \\\\ v(w, t) & = \\beta_2 v(w, t-1) + (1 - \\beta_2)(\\nabla Q_i(w)) ^2 \\\\ - w & = w - \\frac{\\eta}{\\sqrt{v(w,t) + \\epsilon}} + w & = w - \\frac{\\eta m(w, t)}{\\sqrt{v(w,t) + \\epsilon}} :param beta1: the :math:`\\beta_1` in equation. :type beta1: float diff --git a/python/paddle/utils/merge_model.py b/python/paddle/utils/merge_model.py index 48e5087cc281bd3a3d0b4a403372456ebbf39c62..421e953d2775f145800cf7179ec644697a265060 100644 --- a/python/paddle/utils/merge_model.py +++ b/python/paddle/utils/merge_model.py @@ -23,32 +23,32 @@ from paddle.v2.topology import Topology def merge_v2_model(net, param_file, output_file): - '''Integrate the model config and model parameters into one file. - + '''Merge the model config and parameters into one file. + The model configuration file describes the model structure which ends with .py. The parameters file stores the parameters of the model which ends with .tar.gz. - - @param net The output layer of the network. - @param param_file Path of the model parameters(.tar.gz) which is stored by v2 api. + + @param net The output layer of the network for inference. + @param param_file Path of the parameters (.tar.gz) which is stored by v2 api. @param output_file Path of the merged file which will be generated. - + Usage: - from paddle.util.merge_model import merge_v2_model + from paddle.utils.merge_model import merge_v2_model # import your network configuration - from mobilenet import mobile_net - - net = mobile_net(3*224*224, 102) + from example_net import net_conf + + net = net_conf(is_predict=True) param_file = './param_pass_00000.tar.gz' output_file = './output.paddle' - + merge_v2_model(net, param_file, output_file) ''' assert isinstance(net, LayerOutput), \ - "The net should be the output of the network" + "The net should be the output of the network for inference" assert os.path.exists(param_file), \ "The model parameters file %s does not exists " % (param_file) diff --git a/python/paddle/v2/framework/backward.py b/python/paddle/v2/framework/backward.py index 6827792cb351243f926aeca5f37324dc987d6a79..678efd5d20585355a684bb2df16fdb57a69e0eeb 100644 --- a/python/paddle/v2/framework/backward.py +++ b/python/paddle/v2/framework/backward.py @@ -19,8 +19,20 @@ def append_backward_ops(loss, parameter_list=None, no_grad_set=None): :rtype: list[Variable] """ assert isinstance(loss, framework.Variable) - param_grad_map = loss.block.program.append_backward(loss, no_grad_set or - set()) + + if no_grad_set is None: + program = loss.block.program + assert isinstance(program, framework.Program) + no_grad_set = list() + for block in program.blocks: + assert isinstance(block, framework.Block) + for var in block.vars.itervalues(): + assert isinstance(var, framework.Variable) + if var.stop_gradient: + no_grad_set.append(var.name) + no_grad_set = set(no_grad_set) + + param_grad_map = loss.block.program.append_backward(loss, no_grad_set) if parameter_list is not None: parameters = parameter_list else: diff --git a/python/paddle/v2/framework/executor.py b/python/paddle/v2/framework/executor.py index 8268d0d8f5126b00365d4e5e76cd98de4c47e670..f5c833190e73a277bef2509e02c4be051768933d 100644 --- a/python/paddle/v2/framework/executor.py +++ b/python/paddle/v2/framework/executor.py @@ -1,5 +1,5 @@ import paddle.v2.framework.core as core -from paddle.v2.framework.framework import Block, Program +from paddle.v2.framework.framework import Block, Program, g_main_program g_scope = core.Scope() @@ -18,7 +18,7 @@ class Executor(object): self.executor = core.Executor(act_places) def run(self, - program, + program=None, feed=None, fetch_list=None, feed_var_name='feed', @@ -29,6 +29,9 @@ class Executor(object): if fetch_list is None: fetch_list = [] + if program is None: + program = g_main_program + if not isinstance(program, Program): raise TypeError() diff --git a/python/paddle/v2/framework/framework.py b/python/paddle/v2/framework/framework.py index 690e29db470b74ed0f7de08d824b2e3d4e8708ad..3a7d440db94b13833e05552fe1032d218ddbdac2 100644 --- a/python/paddle/v2/framework/framework.py +++ b/python/paddle/v2/framework/framework.py @@ -12,6 +12,14 @@ def unique_name(prefix): return "_".join([prefix, str(uid)]) +def _debug_string_(proto): + error_fields = list() + if not proto.IsInitialized(error_fields): + raise ValueError("{0} are not initialized\nThe message is {1}".format( + error_fields, proto)) + return proto.__str__() + + class Variable(object): def __init__(self, block, @@ -21,6 +29,7 @@ class Variable(object): dtype=None, lod_level=None, persistable=None, + stop_gradient=False, **kwargs): self.block = block @@ -89,11 +98,12 @@ class Variable(object): self.block.vars[name] = self self.op = None + self.stop_gradient = stop_gradient def __str__(self): protostr = self.desc.serialize_to_string() proto = framework_pb2.VarDesc.FromString(str(protostr)) - return proto.__str__() + return _debug_string_(proto) __repr__ = __str__ @@ -101,6 +111,10 @@ class Variable(object): def persistable(self): return self.desc.persistable() + @persistable.setter + def persistable(self, p): + self.desc.set_persistable(p) + @property def name(self): return self.desc.name() @@ -280,7 +294,7 @@ class Operator(object): def __str__(self): protostr = self.desc.serialize_to_string() proto = framework_pb2.OpDesc.FromString(str(protostr)) - return proto.__str__() + return _debug_string_(proto) __repr__ = __str__ @@ -337,7 +351,7 @@ class Block(object): def __str__(self): protostr = self.desc.serialize_to_string() proto = framework_pb2.BlockDesc.FromString(str(protostr)) - return proto.__str__() + return _debug_string_(proto) __repr__ = __str__ @@ -442,7 +456,7 @@ class Program(object): def __str__(self): protostr = self.desc.serialize_to_string() proto = framework_pb2.ProgramDesc.FromString(str(protostr)) - return proto.__str__() + return _debug_string_(proto) def clone(self): p = Program() @@ -546,5 +560,5 @@ class Parameter(Variable): # program is a global instance. -g_program = Program() -g_init_program = Program() +g_main_program = Program() +g_startup_program = Program() diff --git a/python/paddle/v2/framework/io.py b/python/paddle/v2/framework/io.py index f3ba719bde086f696a27b806228a8c97466a681e..5c247904a330e25b1a9f53db431947840db3f615 100644 --- a/python/paddle/v2/framework/io.py +++ b/python/paddle/v2/framework/io.py @@ -1,7 +1,7 @@ import os import cPickle as pickle -from paddle.v2.framework.framework import Program, Parameter, g_program, \ +from paddle.v2.framework.framework import Program, Parameter, g_main_program, \ Variable __all__ = [ @@ -29,13 +29,13 @@ def _clone_var_in_block_(block, var): persistable=True) -def save_vars(executor, dirname, program=None, vars=None, predicate=None): +def save_vars(executor, dirname, main_program=None, vars=None, predicate=None): """ Save variables to directory by executor. :param executor: executor that save variable :param dirname: directory path - :param program: program. If vars is None, then filter all variables in this + :param main_program: program. If vars is None, then filter all variables in this program which fit `predicate`. Default g_program. :param predicate: The Predicate describes a callable that returns a variable as a bool. If it returns true, the variables will be saved. @@ -44,15 +44,15 @@ def save_vars(executor, dirname, program=None, vars=None, predicate=None): :return: None """ if vars is None: - if program is None: - program = g_program - if not isinstance(program, Program): + if main_program is None: + main_program = g_main_program + if not isinstance(main_program, Program): raise TypeError("program should be as Program type or None") save_vars( executor, dirname=dirname, - vars=filter(predicate, program.list_vars())) + vars=filter(predicate, main_program.list_vars())) else: save_program = Program() save_block = save_program.global_block() @@ -66,37 +66,37 @@ def save_vars(executor, dirname, program=None, vars=None, predicate=None): executor.run(save_program) -def save_params(executor, dirname, program=None): +def save_params(executor, dirname, main_program=None): """ Save all parameters to directory with executor. """ save_vars( executor, dirname=dirname, - program=program, + main_program=main_program, vars=None, predicate=is_parameter) -def save_persistables(executor, dirname, program=None): +def save_persistables(executor, dirname, main_program=None): """ Save all persistables to directory with executor. """ save_vars( executor, dirname=dirname, - program=program, + main_program=main_program, vars=None, predicate=is_persistable) -def load_vars(executor, dirname, program=None, vars=None, predicate=None): +def load_vars(executor, dirname, main_program=None, vars=None, predicate=None): """ Load variables from directory by executor. :param executor: executor that save variable :param dirname: directory path - :param program: program. If vars is None, then filter all variables in this + :param main_program: program. If vars is None, then filter all variables in this program which fit `predicate`. Default g_program. :param predicate: The Predicate describes a callable that returns a variable as a bool. If it returns true, the variables will be loaded. @@ -105,15 +105,15 @@ def load_vars(executor, dirname, program=None, vars=None, predicate=None): :return: None """ if vars is None: - if program is None: - program = g_program - if not isinstance(program, Program): + if main_program is None: + main_program = g_main_program + if not isinstance(main_program, Program): raise TypeError("program's type should be Program") load_vars( executor, dirname=dirname, - vars=filter(predicate, program.list_vars())) + vars=filter(predicate, main_program.list_vars())) else: load_prog = Program() load_block = load_prog.global_block() @@ -129,27 +129,33 @@ def load_vars(executor, dirname, program=None, vars=None, predicate=None): executor.run(load_prog) -def load_params(executor, dirname, program=None): +def load_params(executor, dirname, main_program=None): """ load all parameters from directory by executor. """ load_vars( - executor, dirname=dirname, program=program, predicate=is_parameter) + executor, + dirname=dirname, + main_program=main_program, + predicate=is_parameter) -def load_persistables(executor, dirname, program=None): +def load_persistables(executor, dirname, main_program=None): """ load all persistables from directory by executor. """ load_vars( - executor, dirname=dirname, program=program, predicate=is_persistable) + executor, + dirname=dirname, + main_program=main_program, + predicate=is_persistable) def save_inference_model(dirname, feeded_var_names, target_vars, executor, - program=None): + main_program=None): """ Build a model especially for inference, and save it to directory by the executor. @@ -158,20 +164,20 @@ def save_inference_model(dirname, :param feeded_var_names: Names of variables that need to be feeded data during inference :param target_vars: Variables from which we can get inference results. :param executor: executor that save inference model - :param program: original program, which will be pruned to build the inference model. + :param main_program: original program, which will be pruned to build the inference model. Default g_program. :return: None """ - if program is None: - program = g_program + if main_program is None: + main_program = g_main_program if not isinstance(target_vars, list): target_vars = [target_vars] if not os.path.isdir(dirname): os.makedirs(dirname) - pruned_program = program.prune(target_vars) + pruned_program = main_program.prune(target_vars) fetch_var_names = [v.name for v in target_vars] model_file_name = dirname + "/__model__" @@ -182,10 +188,10 @@ def save_inference_model(dirname, "fetch_var_names": fetch_var_names }, f, -1) - save_params(executor, dirname, program) + save_params(executor, dirname, main_program) -def load_persistables_if_exist(executor, dirname, program=None): +def load_persistables_if_exist(executor, dirname, main_program=None): filenames = next(os.walk(dirname))[2] filenames = set(filenames) @@ -198,7 +204,7 @@ def load_persistables_if_exist(executor, dirname, program=None): load_vars( executor, dirname, - program=program, + main_program=main_program, vars=None, predicate=_is_presistable_and_exist_) diff --git a/python/paddle/v2/framework/layer_helper.py b/python/paddle/v2/framework/layer_helper.py index aa7dd0b50d430352d681bac5175b84850f672c46..c38346b79fecfb2f82a60b360c505da16ecdf3c0 100644 --- a/python/paddle/v2/framework/layer_helper.py +++ b/python/paddle/v2/framework/layer_helper.py @@ -1,8 +1,8 @@ import copy import itertools -from paddle.v2.framework.framework import Variable, g_program, \ - g_init_program, unique_name, Program +from paddle.v2.framework.framework import Variable, g_main_program, \ + g_startup_program, unique_name, Program from paddle.v2.framework.initializer import ConstantInitializer, \ UniformInitializer @@ -20,23 +20,23 @@ class LayerHelper(object): return self.kwargs['name'] @property - def program(self): - prog = self.kwargs.get('program', None) + def main_program(self): + prog = self.kwargs.get('main_program', None) if prog is None: - return g_program + return g_main_program else: return prog @property - def init_program(self): - prog = self.kwargs.get('init_program', None) + def startup_program(self): + prog = self.kwargs.get('startup_program', None) if prog is None: - return g_init_program + return g_startup_program else: return prog def append_op(self, *args, **kwargs): - return self.program.current_block().append_op(*args, **kwargs) + return self.main_program.current_block().append_op(*args, **kwargs) def multiple_input(self, input_param_name='input'): inputs = self.kwargs.get(input_param_name, []) @@ -112,32 +112,35 @@ class LayerHelper(object): raise ValueError("Data Type mismatch") return dtype - def create_parameter(self, attr, shape, dtype, suffix='w'): + def create_parameter(self, attr, shape, dtype, suffix='w', + initializer=None): # Deepcopy the attr so that parameters can be shared in program attr_copy = copy.deepcopy(attr) + if initializer is not None: + attr_copy['initializer'] = initializer if attr_copy['name'] is None: attr_copy['name'] = unique_name(".".join([self.name, suffix])) - self.init_program.global_block().create_parameter( + self.startup_program.global_block().create_parameter( dtype=dtype, shape=shape, **attr_copy) - return self.program.global_block().create_parameter( + return self.main_program.global_block().create_parameter( name=attr_copy['name'], dtype=dtype, shape=shape) def create_tmp_variable(self, dtype): - return self.program.current_block().create_var( + return self.main_program.current_block().create_var( name=unique_name(".".join([self.name, 'tmp'])), dtype=dtype, persistable=False) def create_variable(self, *args, **kwargs): - return self.program.current_block().create_var(*args, **kwargs) + return self.main_program.current_block().create_var(*args, **kwargs) def create_global_variable(self, persistable=False, *args, **kwargs): - return self.program.global_block().create_var( + return self.main_program.global_block().create_var( *args, persistable=persistable, **kwargs) def set_variable_initializer(self, var, initializer): assert isinstance(var, Variable) - self.init_program.global_block().create_var( + self.startup_program.global_block().create_var( name=var.name, type=var.type, dtype=var.data_type, diff --git a/python/paddle/v2/framework/layers.py b/python/paddle/v2/framework/layers.py index a98b4e554f9877436381ced6a2576bbe286feb3f..917d3d938862640145591c5084c9f0bc4d7c23bf 100644 --- a/python/paddle/v2/framework/layers.py +++ b/python/paddle/v2/framework/layers.py @@ -1,8 +1,9 @@ -from paddle.v2.framework.layer_helper import LayerHelper, unique_name import paddle.v2.framework.core as core from paddle.v2.framework.framework import OpProtoHolder, Variable, Program, \ Operator -from paddle.v2.framework.initializer import ConstantInitializer +from paddle.v2.framework.initializer import ConstantInitializer, \ + NormalInitializer +from paddle.v2.framework.layer_helper import LayerHelper, unique_name import re __all__ = [ @@ -19,8 +20,8 @@ def fc(input, name=None, act=None, num_flatten_dims=1, - program=None, - init_program=None): + main_program=None, + startup_program=None): # create helper helper = LayerHelper('fc', **locals()) @@ -65,8 +66,8 @@ def embedding(input, data_type='float32', is_sparse=False, param_attr=None, - program=None, - init_program=None): + main_program=None, + startup_program=None): helper = LayerHelper('embedding', **locals()) w = helper.create_parameter( attr=helper.param_attr, shape=size, dtype=data_type) @@ -85,8 +86,8 @@ def data(name, data_type='float32', type=core.VarDesc.VarType.LOD_TENSOR, append_batch_size=True, - program=None, - init_program=None): + main_program=None, + startup_program=None): helper = LayerHelper('data', **locals()) shape = list(shape) for i in xrange(len(shape)): @@ -100,7 +101,7 @@ def data(name, shape = [-1] + shape # append batch size as -1 return helper.create_global_variable( - name=name, shape=shape, dtype=data_type, type=type) + name=name, shape=shape, dtype=data_type, type=type, stop_gradient=True) def _convert_(name): @@ -179,7 +180,7 @@ _create_op_func_('sigmoid') _create_op_func_('scale') -def cast(x, data_type, program=None): +def cast(x, data_type, main_program=None): helper = LayerHelper('cast', **locals()) out = helper.create_tmp_variable(dtype=data_type) helper.append_op( @@ -191,7 +192,7 @@ def cast(x, data_type, program=None): return out -def concat(input, axis, program=None, init_program=None): +def concat(input, axis, main_program=None, startup_program=None): helper = LayerHelper('concat', **locals()) out = helper.create_tmp_variable(dtype=helper.input_dtype()) helper.append_op( @@ -202,7 +203,7 @@ def concat(input, axis, program=None, init_program=None): return out -def sums(input, program=None, init_program=None): +def sums(input, main_program=None, startup_program=None): helper = LayerHelper('sum', **locals()) out = helper.create_tmp_variable(dtype=helper.input_dtype()) helper.append_op(type='sum', inputs={'X': input}, outputs={'Out': out}) @@ -282,8 +283,8 @@ def sequence_conv(input, padding=None, bias_attr=None, param_attr=None, - program=None, - init_program=None): + main_program=None, + startup_program=None): # FIXME(dzh) : want to unify the argument of python layer # function. So we ignore some unecessary attributes. # such as, padding_trainable, context_start. @@ -322,8 +323,8 @@ def conv2d(input, padding=None, bias_attr=None, param_attr=None, - program=None, - init_program=None): + main_program=None, + startup_program=None): helper = LayerHelper('conv2d', **locals()) dtype = helper.input_dtype() @@ -344,8 +345,13 @@ def conv2d(input, input_shape = input.shape filter_shape = [num_filters, num_filter_channels] + filter_size + + std = (2.0 / (filter_size[0]**2 * num_channels))**0.5 filter = helper.create_parameter( - attr=helper.param_attr, shape=filter_shape, dtype=dtype) + attr=helper.param_attr, + shape=filter_shape, + dtype=dtype, + initializer=NormalInitializer(0.0, std, 0)) pre_bias = helper.create_tmp_variable(dtype) helper.append_op( @@ -368,11 +374,13 @@ def sequence_pool(input, pool_type, **kwargs): helper = LayerHelper('sequence_pool', input=input, **kwargs) dtype = helper.input_dtype() pool_out = helper.create_tmp_variable(dtype) + max_index = helper.create_tmp_variable(dtype) helper.append_op( type="sequence_pool", - inputs={"X": [input]}, - outputs={"Out": [pool_out]}, + inputs={"X": input}, + outputs={"Out": pool_out, + "MaxIndex": max_index}, attrs={"pooltype": pool_type.upper()}) return pool_out @@ -384,8 +392,8 @@ def pool2d(input, pool_stride=[1, 1], pool_padding=[0, 0], global_pooling=False, - program=None, - init_program=None): + main_program=None, + startup_program=None): if pool_type not in ["max", "avg"]: raise ValueError( "Unknown pool_type: '%s'. It can only be 'max' or 'avg'.", @@ -420,12 +428,12 @@ def batch_norm(input, act=None, is_test=False, momentum=0.9, - epsilon=1e05, + epsilon=1e-05, param_attr=None, bias_attr=None, data_layout='NCHW', - program=None, - init_program=None): + main_program=None, + startup_program=None): helper = LayerHelper('batch_norm', **locals()) dtype = helper.input_dtype() @@ -438,27 +446,29 @@ def batch_norm(input, else: raise ValueError("unsupported data layout:" + data_layout) - def create_persistable_var(dtype, shape, initializer=None): - name = unique_name(".".join([helper.name, "xxxx"])) - var = init_program.global_block().create_var( - dtype=dtype, shape=shape, name=name, persistable=True) - if initializer is not None: - initializer(var, var.block) - return program.global_block().create_var( - name=name, dtype=dtype, shape=shape, persistable=True) - param_shape = [channel_num] # create parameter scale = helper.create_parameter( - attr=helper.param_attr, shape=param_shape, dtype=dtype) + attr=helper.param_attr, + shape=param_shape, + dtype=dtype, + initializer=ConstantInitializer(1.0)) bias = helper.create_parameter( - attr=helper.param_attr, shape=param_shape, dtype=dtype) + attr=helper.param_attr, + shape=param_shape, + dtype=dtype, + initializer=ConstantInitializer(0.0)) + + mean = helper.create_global_variable( + dtype=input.data_type, shape=param_shape, persistable=True) + helper.set_variable_initializer( + var=mean, initializer=ConstantInitializer(0.0)) - # create input - mean = create_persistable_var(dtype, param_shape, ConstantInitializer(0.0)) - variance = create_persistable_var(dtype, param_shape, - ConstantInitializer(1.0)) + variance = helper.create_global_variable( + dtype=input.data_type, shape=param_shape, persistable=True) + helper.set_variable_initializer( + var=variance, initializer=ConstantInitializer(1.0)) # create output # mean and mean_out share the same memory @@ -499,16 +509,16 @@ class BlockGuard(object): keyword. """ - def __init__(self, program): - if not isinstance(program, Program): + def __init__(self, main_program): + if not isinstance(main_program, Program): raise TypeError("BlockGuard takes a program") - self.program = program + self.main_program = main_program def __enter__(self): - self.program.create_block() + self.main_program.create_block() def __exit__(self, exc_type, exc_val, exc_tb): - self.program.rollback() + self.main_program.rollback() if exc_type is not None: return False # re-raise exception return True @@ -518,7 +528,7 @@ class StaticRNNGuard(BlockGuard): def __init__(self, rnn): if not isinstance(rnn, StaticRNN): raise TypeError("StaticRNNGuard takes an StaticRNN") - super(StaticRNNGuard, self).__init__(rnn.helper.program) + super(StaticRNNGuard, self).__init__(rnn.helper.main_program) self.rnn = rnn def __enter__(self): @@ -554,8 +564,9 @@ class StaticRNN(object): IN_RNN_BLOCK = 1 AFTER_RNN_BLOCK = 2 - def __init__(self, name=None, program=None): - self.helper = LayerHelper("static_rnn", name=name, program=program) + def __init__(self, name=None, main_program=None): + self.helper = LayerHelper( + "static_rnn", name=name, main_program=main_program) self.memories = {} # memory map, from pre_mem.name --> MemoryLink self.inputs = [] # input variable list in current block self.outputs = [] # output variable list in parent block @@ -570,25 +581,45 @@ class StaticRNN(object): if self.status != StaticRNN.IN_RNN_BLOCK: raise ValueError("You must invoke {0} in rnn block".format(method)) - def memory(self, init=None, shape=None, dtype=None, init_value=0): + def memory(self, + init=None, + shape=None, + batch_ref=None, + init_value=0.0, + init_batch_dim_idx=0, + ref_batch_dim_idx=1): + ''' + :param init: boot memory, if not set, a shape, batch_ref must be provided + :param shape: shape of the boot memory + :param batch_ref: batch size reference variable + :param init_value: the init value of boot memory + :param init_batch_dim_idx: the index of batch size in init's dimension + :param ref_batch_dim_idx: the index of batch size in batch_ref's dimension + :return: boot memory + ''' self._assert_in_rnn_block_('memory') if init is None: - if shape is None or dtype is None: + if shape is None or batch_ref is None: raise ValueError( - "if init is None, memory at least need shape and dtype") + "if init is None, memory at least need shape and batch_ref") parent_block = self.parent_block() var_name = unique_name("@".join([self.helper.name, "memory_boot"])) boot_var = parent_block.create_var( - name=var_name, shape=shape, dtype=dtype, persistable=False) + name=var_name, + shape=shape, + dtype=batch_ref.data_type, + persistable=False) parent_block.append_op( - type="fill_constant", - inputs={}, + type="fill_constant_batch_size_like", + inputs={'Input': [batch_ref]}, outputs={'Out': [boot_var]}, attrs={ 'value': init_value, - 'shape': [40] + list(boot_var.shape[1:]), - 'data_type': boot_var.data_type + 'shape': boot_var.shape, + 'data_type': boot_var.data_type, + 'input_dim_idx': ref_batch_dim_idx, + 'output_dim_idx': init_batch_dim_idx }) return self.memory(init=boot_var) @@ -647,7 +678,7 @@ class StaticRNN(object): self.memories[mem.name].mem = var def parent_block(self): - prog = self.helper.program + prog = self.helper.main_program parent_idx = prog.current_block().parent_idx assert parent_idx >= 0 parent_block = prog.block(parent_idx) @@ -664,8 +695,8 @@ class StaticRNN(object): return self.outputs def complete_rnn_op(self): - program = self.helper.program - rnn_block = program.current_block() + main_program = self.helper.main_program + rnn_block = main_program.current_block() parent_block = self.parent_block() local_inputs = set() @@ -729,3 +760,81 @@ class StaticRNN(object): 'states': memories, 'step_block': rnn_block }) + + +def lod_rank_table(x, level=0, main_program=None): + helper = LayerHelper("lod_rank_table", **locals()) + table = helper.create_variable( + type=core.VarDesc.VarType.LOD_RANK_TABLE, + name=unique_name("lod_rank_table")) + helper.append_op( + type='lod_rank_table', + inputs={'X': x}, + outputs={'Out': table}, + attrs={'level': level}) + return table + + +def fill_constant(shape, dtype, value, main_program=None): + helper = LayerHelper("ones", **locals()) + out = helper.create_tmp_variable(dtype=dtype) + helper.append_op( + type='fill_constant', + inputs={}, + outputs={'Out': [out]}, + attrs={ + 'shape': shape, + 'data_type': out.data_type, + 'value': float(value) + }) + out.stop_gradient = True + return out + + +def ones(shape, dtype, main_program=None): + return fill_constant(value=1.0, **locals()) + + +def zeros(shape, dtype, main_program=None): + return fill_constant(value=0.0, **locals()) + + +def increment(x, value=1.0, main_program=None): + helper = LayerHelper("increment", **locals()) + tmp = helper.create_tmp_variable(dtype=x.data_type) + helper.append_op( + type='increment', + inputs={'X': [x]}, + outputs={'Out': [tmp]}, + attrs={'step': value}) + return tmp + + +def array_write(x, i, array=None, main_program=None): + helper = LayerHelper('array_write', **locals()) + if array is None: + array = helper.create_variable( + name="{0}.out".format(helper.name), + type=core.VarDesc.VarType.LOD_TENSOR_ARRAY, + dtype=x.data_type) + helper.append_op( + type='write_to_array', + inputs={'X': [x], + 'I': [i]}, + outputs={'Out': [array]}) + return array + + +def array_read(array, i, main_program=None): + helper = LayerHelper('array_read', **locals()) + if not isinstance( + array, + Variable) or array.type != core.VarDesc.VarType.LOD_TENSOR_ARRAY: + raise TypeError("array should be tensor array vairable") + out = helper.create_tmp_variable(dtype=array.data_type) + helper.append_op( + type='read_from_array', + inputs={'X': [array], + 'I': [i]}, + outputs={'Out': [out]}) + return out diff --git a/python/paddle/v2/framework/net_drawer.py b/python/paddle/v2/framework/net_drawer.py index aa30e2a6ca231667186529aec30adcdbc6efcff9..045e267c253e2485e75df3fb95cc0e591ee29ea5 100644 --- a/python/paddle/v2/framework/net_drawer.py +++ b/python/paddle/v2/framework/net_drawer.py @@ -80,7 +80,7 @@ def parse_graph(program, graph, var_dict, **kwargs): graph.edge(**draw_edge(var_dict, op, e, arg)) -def draw_graph(init_program, program, **kwargs): +def draw_graph(startup_program, main_program, **kwargs): if kwargs.has_key("graph_attr"): GRAPH_STYLE.update(kwargs[graph_attr]) if kwargs.has_key("node_attr"): @@ -101,8 +101,8 @@ def draw_graph(init_program, program, **kwargs): **kwargs) var_dict = {} - parse_graph(init_program, g, var_dict) - parse_graph(program, g, var_dict) + parse_graph(startup_program, g, var_dict) + parse_graph(main_program, g, var_dict) if filename != None: g.save() diff --git a/python/paddle/v2/framework/nets.py b/python/paddle/v2/framework/nets.py index f5a2c27676a02b953026be0893cd49b832bf2c6b..725d2fa7f5e7a862eea0ef9172a9e63858ebd0dd 100644 --- a/python/paddle/v2/framework/nets.py +++ b/python/paddle/v2/framework/nets.py @@ -10,23 +10,23 @@ def simple_img_conv_pool(input, pool_stride, act, pool_type='max', - program=None, - init_program=None): + main_program=None, + startup_program=None): conv_out = layers.conv2d( input=input, num_filters=num_filters, filter_size=filter_size, act=act, - program=program, - init_program=init_program) + main_program=main_program, + startup_program=startup_program) pool_out = layers.pool2d( input=conv_out, pool_size=pool_size, pool_type=pool_type, pool_stride=pool_stride, - program=program, - init_program=init_program) + main_program=main_program, + startup_program=startup_program) return pool_out @@ -40,8 +40,8 @@ def img_conv_group(input, conv_batchnorm_drop_rate=None, pool_stride=1, pool_type=None, - program=None, - init_program=None): + main_program=None, + startup_program=None): """ Image Convolution Group, Used for vgg net. """ @@ -71,30 +71,30 @@ def img_conv_group(input, filter_size=conv_filter_size[i], padding=conv_padding[i], act=local_conv_act, - program=program, - init_program=init_program) + main_program=main_program, + startup_program=startup_program) if conv_with_batchnorm[i]: tmp = layers.batch_norm( input=tmp, act=conv_act, - program=program, - init_program=init_program) + main_program=main_program, + startup_program=startup_program) drop_rate = conv_batchnorm_drop_rate[i] if abs(drop_rate) > 1e-5: tmp = layers.dropout( x=tmp, dropout_prob=drop_rate, - program=program, - init_program=init_program) + main_program=main_program, + startup_program=startup_program) pool_out = layers.pool2d( input=tmp, pool_size=pool_size, pool_type=pool_type, pool_stride=pool_stride, - program=program, - init_program=init_program) + main_program=main_program, + startup_program=startup_program) return pool_out @@ -103,19 +103,19 @@ def sequence_conv_pool(input, filter_size, act="sigmoid", pool_type="max", - program=None, - init_program=None): + main_program=None, + startup_program=None): conv_out = layers.sequence_conv( input=input, num_filters=num_filters, filter_size=filter_size, act=act, - program=program, - init_program=init_program) + main_program=main_program, + startup_program=startup_program) pool_out = layers.sequence_pool( input=conv_out, pool_type=pool_type, - program=program, - init_program=init_program) + main_program=main_program, + startup_program=startup_program) return pool_out diff --git a/python/paddle/v2/framework/optimizer.py b/python/paddle/v2/framework/optimizer.py index 902442297eb54df91a46db68a66e9208ece3e71c..f20865d604f68c8398f299bf8edfd020bfa4e4c5 100644 --- a/python/paddle/v2/framework/optimizer.py +++ b/python/paddle/v2/framework/optimizer.py @@ -132,7 +132,7 @@ class Optimizer(object): def create_optimization_pass(self, parameters_and_grads, loss, - init_program=None): + startup_program=None): """Add optimization operators to update gradients to variables. Args: @@ -144,7 +144,7 @@ class Optimizer(object): optimization. This will include parameter update ops, global step update ops and any other custom ops required by subclasses to manage their internal state. - :param init_program: + :param startup_program: """ # This is a default implementation of create_optimization_pass that # can be shared by most optimizers. This implementation assumes that @@ -156,7 +156,9 @@ class Optimizer(object): # Create any accumulators program = loss.block.program self.helper = LayerHelper( - self.__class__.__name__, program=program, init_program=init_program) + self.__class__.__name__, + main_program=program, + startup_program=startup_program) self._create_accumulators(loss.block, [p[0] for p in parameters_and_grads]) # Create any necessary tensors @@ -185,7 +187,7 @@ class Optimizer(object): def minimize(self, loss, - init_program=None, + startup_program=None, parameter_list=None, no_grad_set=None): """Add operations to minimize `loss` by updating `parameter_list`. @@ -198,7 +200,7 @@ class Optimizer(object): # Add regularization if any params_grads = append_regularization_ops(params_grads) optimize_ops = self.create_optimization_pass(params_grads, loss, - init_program) + startup_program) return optimize_ops diff --git a/python/paddle/v2/framework/tests/test_array_read_write_op.py b/python/paddle/v2/framework/tests/test_array_read_write_op.py new file mode 100644 index 0000000000000000000000000000000000000000..b2a2ff2b8213305fe039ae494fb933e65a76781a --- /dev/null +++ b/python/paddle/v2/framework/tests/test_array_read_write_op.py @@ -0,0 +1,93 @@ +import unittest +import paddle.v2.framework.core as core +import paddle.v2.framework.layers as layers +from paddle.v2.framework.executor import Executor +from paddle.v2.framework.backward import append_backward_ops +from paddle.v2.framework.framework import g_main_program +import numpy + + +class TestArrayReadWrite(unittest.TestCase): + def test_read_write(self): + x = [ + layers.data( + name='x0', shape=[100]), layers.data( + name='x1', shape=[100]), layers.data( + name='x2', shape=[100]) + ] + + for each_x in x: + each_x.stop_gradient = False + + i = layers.zeros(shape=[1], dtype='int64') + arr = layers.array_write(x=x[0], i=i) + i = layers.increment(x=i) + i.stop_gradient = True + arr = layers.array_write(x=x[1], i=i, array=arr) + i = layers.increment(x=i) + i.stop_gradient = True + arr = layers.array_write(x=x[2], i=i, array=arr) + + i = layers.zeros(shape=[1], dtype='int64') + a0 = layers.array_read(array=arr, i=i) + i = layers.increment(x=i) + i.stop_gradient = True # index should not calculate gradient + a1 = layers.array_read(array=arr, i=i) + i = layers.increment(x=i) + i.stop_gradient = True + a2 = layers.array_read(array=arr, i=i) + + mean_a0 = layers.mean(x=a0) + mean_a1 = layers.mean(x=a1) + mean_a2 = layers.mean(x=a2) + + a_sum = layers.sums(input=[mean_a0, mean_a1, mean_a2]) + + mean_x0 = layers.mean(x=x[0]) + mean_x1 = layers.mean(x=x[1]) + mean_x2 = layers.mean(x=x[2]) + + x_sum = layers.sums(input=[mean_x0, mean_x1, mean_x2]) + + scope = core.Scope() + cpu = core.CPUPlace() + + exe = Executor(cpu) + + tensor = core.LoDTensor() + tensor.set(numpy.random.random(size=(100, 100)).astype('float32'), cpu) + + outs = map(numpy.array, + exe.run(feed={'x0': tensor, + 'x1': tensor, + 'x2': tensor}, + fetch_list=[a_sum, x_sum], + scope=scope)) + self.assertEqual(outs[0], outs[1]) + + total_sum = layers.sums(input=[a_sum, x_sum]) + total_sum_scaled = layers.scale(x=total_sum, scale=1 / 6.0) + + append_backward_ops(total_sum_scaled) + + g_vars = map(g_main_program.global_block().var, + [each_x.name + "@GRAD" for each_x in x]) + g_out = [ + item.sum() + for item in map( + numpy.array, + exe.run(feed={'x0': tensor, + 'x1': tensor, + 'x2': tensor}, + fetch_list=g_vars)) + ] + g_out_sum = numpy.array(g_out).sum() + + # since our final gradient is 1 and the neural network are all linear + # with mean_op. + # the input gradient should also be 1 + self.assertAlmostEqual(1.0, g_out_sum, delta=0.1) + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/v2/framework/tests/test_conv2d_op.py b/python/paddle/v2/framework/tests/test_conv2d_op.py index f58b96463cf78103b2acb3d80652ef0aa988ad49..04ae7f294c27fdceaaff2e9a7ed854213e643945 100644 --- a/python/paddle/v2/framework/tests/test_conv2d_op.py +++ b/python/paddle/v2/framework/tests/test_conv2d_op.py @@ -61,25 +61,23 @@ class TestConv2dOp(OpTest): def test_check_grad(self): self.check_grad( - set(['Input', 'Filter']), 'Output', max_relative_error=0.05) + set(['Input', 'Filter']), 'Output', max_relative_error=0.02) def test_check_grad_no_filter(self): self.check_grad( ['Input'], 'Output', - max_relative_error=0.05, + max_relative_error=0.02, no_grad_set=set(['Filter'])) def test_check_grad_no_input(self): self.check_grad( ['Filter'], 'Output', - max_relative_error=0.05, + max_relative_error=0.02, no_grad_set=set(['Input'])) def init_test_case(self): - # self.groups = 1 - # self.op_type = "conv2d" self.pad = [0, 0] self.stride = [1, 1] self.dilations = [1, 1] @@ -103,6 +101,9 @@ class TestWithGroup(TestConv2dOp): self.op_type = "conv2d" +#----------------Conv2dCudnn---------------- + + class TestCudnn(TestConv2dOp): def init_group(self): self.groups = 1 diff --git a/python/paddle/v2/framework/tests/test_conv2d_transpose_op.py b/python/paddle/v2/framework/tests/test_conv2d_transpose_op.py index 999a0bdc629010d96a8db31b317ba7a65bf35773..54349c018c4a53b8767d6cd4f94d99c719dc0237 100644 --- a/python/paddle/v2/framework/tests/test_conv2d_transpose_op.py +++ b/python/paddle/v2/framework/tests/test_conv2d_transpose_op.py @@ -58,36 +58,37 @@ class TestConv2dTransposeOp(OpTest): print 'check output here for', self.op_type self.check_output() - def init_test_case(self): - self.pad = [0, 0] - self.stride = [1, 1] - self.dilations = [1, 1] - self.input_size = [2, 3, 5, 5] # NCHW - f_c = self.input_size[1] - self.filter_size = [f_c, 6, 3, 3] - - def init_op_type(self): - self.op_type = "conv2d_transpose" - def test_check_grad_no_input(self): self.check_grad( ['Filter'], 'Output', - max_relative_error=0.05, + max_relative_error=0.02, no_grad_set=set(['Input'])) def test_check_grad_no_filter(self): self.check_grad( ['Input'], 'Output', - max_relative_error=0.05, + max_relative_error=0.02, no_grad_set=set(['Filter'])) def test_check_grad(self): self.check_grad( - set(['Input', 'Filter']), 'Output', max_relative_error=0.05) + set(['Input', 'Filter']), 'Output', max_relative_error=0.02) + + def init_test_case(self): + self.pad = [0, 0] + self.stride = [1, 1] + self.dilations = [1, 1] + self.input_size = [2, 3, 5, 5] # NCHW + f_c = self.input_size[1] + self.filter_size = [f_c, 6, 3, 3] + + def init_op_type(self): + self.op_type = "conv2d_transpose" +# ------------ test_cudnn ------------ class TestCudnn(TestConv2dTransposeOp): def init_op_type(self): self.op_type = "conv2d_transpose_cudnn" diff --git a/python/paddle/v2/framework/tests/test_conv3d_op.py b/python/paddle/v2/framework/tests/test_conv3d_op.py new file mode 100644 index 0000000000000000000000000000000000000000..44c192f58d25f8ddaa38d2ba7c7c19b9a5bd7dc1 --- /dev/null +++ b/python/paddle/v2/framework/tests/test_conv3d_op.py @@ -0,0 +1,131 @@ +import unittest +import numpy as np +from op_test import OpTest + + +def conv3d_forward_naive(input, filter, group, conv_param): + in_n, in_c, in_d, in_h, in_w = input.shape + out_c, f_c, f_d, f_h, f_w = filter.shape + assert f_c * group == in_c + assert np.mod(out_c, group) == 0 + sub_out_c = out_c / group + + stride, pad = conv_param['stride'], conv_param['pad'] + out_d = 1 + (in_d + 2 * pad[0] - f_h) / stride[0] + out_h = 1 + (in_h + 2 * pad[1] - f_h) / stride[1] + out_w = 1 + (in_w + 2 * pad[2] - f_w) / stride[2] + out = np.zeros((in_n, out_c, out_d, out_h, out_w)) + + input_pad = np.pad(input, ((0, ), (0, ), (pad[0], ), (pad[1], ), + (pad[2], )), + mode='constant', + constant_values=0) + for d in range(out_d): + for i in range(out_h): + for j in range(out_w): + for g in range(group): + input_pad_masked = \ + input_pad[:, g * f_c:(g + 1) * f_c, + d * stride[0]:d * stride[0] + f_d, + i * stride[1]:i * stride[1] + f_h, + j * stride[2]:j * stride[2] + f_w] + f_sub = filter[g * sub_out_c:(g + 1) * + sub_out_c, :, :, :, :] + for k in range(sub_out_c): + out[:, g * sub_out_c + k, d, i, j] = \ + np.sum(input_pad_masked * f_sub[k, :, :, :, :], + axis=(1, 2, 3, 4)) + + return out + + +class TestConv3dOp(OpTest): + def setUp(self): + self.init_group() + self.init_op_type() + self.init_test_case() + + conv3d_param = {'stride': self.stride, 'pad': self.pad} + input = np.random.random(self.input_size).astype("float32") + filter = np.random.random(self.filter_size).astype("float32") + output = conv3d_forward_naive(input, filter, self.groups, + conv3d_param).astype("float32") + + self.inputs = {'Input': input, 'Filter': filter} + self.attrs = { + 'strides': self.stride, + 'paddings': self.pad, + 'groups': self.groups + } + self.outputs = {'Output': output} + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad( + set(['Input', 'Filter']), 'Output', max_relative_error=0.03) + + def test_check_grad_no_filter(self): + self.check_grad( + ['Input'], + 'Output', + max_relative_error=0.03, + no_grad_set=set(['Filter'])) + + def test_check_grad_no_input(self): + self.check_grad( + ['Filter'], + 'Output', + max_relative_error=0.03, + no_grad_set=set(['Input'])) + + def init_test_case(self): + self.pad = [0, 0, 0] + self.stride = [1, 1, 1] + self.input_size = [2, 3, 4, 4, 4] # NCDHW + assert np.mod(self.input_size[1], self.groups) == 0 + f_c = self.input_size[1] / self.groups + self.filter_size = [6, f_c, 3, 3, 3] + + def init_group(self): + self.groups = 1 + + def init_op_type(self): + self.op_type = "conv3d" + + +class TestCase1(TestConv3dOp): + def init_test_case(self): + self.pad = [1, 1, 1] + self.stride = [1, 1, 1] + self.input_size = [2, 3, 4, 4, 4] # NCDHW + assert np.mod(self.input_size[1], self.groups) == 0 + f_c = self.input_size[1] / self.groups + self.filter_size = [6, f_c, 3, 3, 3] + + def init_group(self): + self.groups = 1 + + def init_op_type(self): + self.op_type = "conv3d" + + +class TestWithGroup1(TestConv3dOp): + def init_group(self): + self.groups = 3 + + def init_op_type(self): + self.op_type = "conv3d" + + +class TestWithGroup2(TestCase1): + def init_group(self): + self.groups = 3 + + def init_op_type(self): + self.op_type = "conv3d" + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/v2/framework/tests/test_conv3d_transpose_op.py b/python/paddle/v2/framework/tests/test_conv3d_transpose_op.py new file mode 100644 index 0000000000000000000000000000000000000000..132fe7931438a30cf02e4ad2894c0838e48ffc9f --- /dev/null +++ b/python/paddle/v2/framework/tests/test_conv3d_transpose_op.py @@ -0,0 +1,97 @@ +import unittest +import numpy as np +from op_test import OpTest + + +def conv3dtranspose_forward_naive(input_, filter_, conv3dtranspose_param): + # [2, 3, 5, 5, 5] + in_n, in_c, in_d, in_h, in_w = input_.shape + # [3, 6, 3, 3, 3] + f_c, out_c, f_d, f_h, f_w = filter_.shape + assert in_c == f_c + + stride, pad = conv3dtranspose_param['stride'], conv3dtranspose_param['pad'] + out_d = (in_d - 1) * stride[0] + f_d + out_h = (in_h - 1) * stride[1] + f_h + out_w = (in_w - 1) * stride[2] + f_w + + out = np.zeros((in_n, out_c, out_d, out_h, out_w)) + + for n in range(in_n): + for d in range(in_d): + for i in range(in_h): + for j in range(in_w): + input_masked = input_[n, :, d, i, j] # (c) + input_masked = np.reshape(input_masked, (in_c, 1, 1, 1)) + input_masked = np.tile(input_masked, (1, f_d, f_h, f_w)) + + for k in range(out_c): + tmp_out = np.sum(input_masked * filter_[:, k, :, :, :], + axis=0) + d1, d2 = d * stride[0], d * stride[0] + f_d + i1, i2 = i * stride[1], i * stride[1] + f_h + j1, j2 = j * stride[2], j * stride[2] + f_w + out[n, k, d1:d2, i1:i2, j1:j2] += tmp_out + + return out + + +class TestConv3dTransposeOp(OpTest): + def setUp(self): + # init as conv transpose + self.init_op_type() + + # [2, 3, 5, 5, 5] -> kernel [3, 6, 3, 3, 3] -> output [2, 6, 7, 7, 7] + self.init_test_case() + + conv3dtranspose_param = {'stride': self.stride, 'pad': self.pad} + input_ = np.random.random(self.input_size).astype("float32") + filter_ = np.random.random(self.filter_size).astype("float32") + output = conv3dtranspose_forward_naive( + input_, filter_, conv3dtranspose_param).astype("float32") + # print 'deconv output py', output, output.shape + + self.inputs = {'Input': input_, 'Filter': filter_} + self.attrs = { + 'strides': self.stride, + 'paddings': self.pad, + # 'dilations': self.dilations + } + self.outputs = {'Output': output} + + def test_check_output(self): + print 'check output here' + self.check_output() + + def test_check_grad(self): + self.check_grad( + set(['Input', 'Filter']), 'Output', max_relative_error=0.02) + + def test_check_grad_no_filter(self): + self.check_grad( + ['Input'], + 'Output', + max_relative_error=0.02, + no_grad_set=set(['Filter'])) + + def test_check_grad_no_input(self): + self.check_grad( + ['Filter'], + 'Output', + max_relative_error=0.02, + no_grad_set=set(['Input'])) + + def init_test_case(self): + self.pad = [0, 0, 0] + self.stride = [1, 1, 1] + self.dilations = [1, 1, 1] + self.input_size = [2, 3, 5, 5, 5] # NCHW + f_c = self.input_size[1] + self.filter_size = [f_c, 6, 3, 3, 3] + + def init_op_type(self): + self.op_type = "conv3d_transpose" + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/v2/framework/tests/test_crf_decoding_op.py b/python/paddle/v2/framework/tests/test_crf_decoding_op.py new file mode 100644 index 0000000000000000000000000000000000000000..ee2b996bf430d5a0edaa0de459a937adffd9f8f6 --- /dev/null +++ b/python/paddle/v2/framework/tests/test_crf_decoding_op.py @@ -0,0 +1,146 @@ +import unittest +import random +import numpy as np + +from op_test import OpTest + + +class CRFDecoding(object): + def __init__(self, emission_weights, transition_weights, + seq_start_positions): + assert (emission_weights.shape[0] == seq_start_positions[-1]) + self.tag_num = emission_weights.shape[1] + self.seq_num = len(seq_start_positions) - 1 + + self.seq_start_positions = seq_start_positions + self.x = emission_weights + + self.a = transition_weights[0, :] + self.b = transition_weights[1, :] + self.w = transition_weights[2:, :] + + self.track = np.zeros( + (seq_start_positions[-1], self.tag_num), dtype="int32") + self.decoded_path = np.zeros( + (seq_start_positions[-1], 1), dtype="int32") + + def _decode_one_sequence(self, decoded_path, x): + seq_len, tag_num = x.shape + alpha = np.zeros((seq_len, tag_num), dtype="float64") + track = np.zeros((seq_len, tag_num), dtype="int32") + + for i in range(tag_num): + alpha[0, i] = self.a[i] + x[0, i] + + for k in range(1, seq_len): + for i in range(tag_num): + max_score = -np.finfo("float64").max + max_idx = 0 + for j in range(tag_num): + score = alpha[k - 1, j] + self.w[j, i] + if score > max_score: + max_score = score + max_idx = j + alpha[k, i] = max_score + x[k, i] + track[k, i] = max_idx + + max_score = -np.finfo("float64").max + max_idx = 0 + for i in range(tag_num): + score = alpha[seq_len - 1, i] + self.b[i] + if score > max_score: + max_score = score + max_idx = i + + decoded_path[-1] = max_idx + for i in range(seq_len - 1, 0, -1): + decoded_path[i - 1] = max_idx = track[i, max_idx] + + def decode(self): + for i in range(self.seq_num): + start = self.seq_start_positions[i] + end = self.seq_start_positions[i + 1] + self._decode_one_sequence(self.decoded_path[start:end, :], + self.x[start:end, :]) + return self.decoded_path + + +class TestCRFDecodingOp1(OpTest): + """ + Compare the dynamic program with random generated parameters and inputs + with grouth truth not being given. + """ + + def set_test_data(self): + SEQ_NUM = 3 + TAG_NUM = 17 + MAX_SEQ_LEN = 10 + + lod = [[0]] + for i in range(SEQ_NUM): + lod[-1].append(lod[-1][-1] + random.randint(1, MAX_SEQ_LEN)) + emission = np.random.uniform(-1, 1, + [lod[-1][-1], TAG_NUM]).astype("float64") + transition = np.random.uniform(-0.5, 0.5, + [TAG_NUM + 2, TAG_NUM]).astype("float64") + + self.inputs = { + "Emission": (emission, lod), + "Transition": transition, + } + + decoder = CRFDecoding(emission, transition, lod[0]) + decoded_path = decoder.decode() + + self.outputs = {"ViterbiPath": decoded_path} + + def setUp(self): + self.op_type = "crf_decoding" + self.set_test_data() + + def test_check_output(self): + self.check_output() + + +class TestCRFDecodingOp2(OpTest): + """ + Compare the dynamic program with brute force computation with + ground truth being given. + """ + + def setUp(self): + self.op_type = "crf_decoding" + TAG_NUM = 5 + + lod = [[0, 1, 3, 6, 10]] + transition = np.repeat( + np.arange( + TAG_NUM, dtype="float64").reshape(1, TAG_NUM), + TAG_NUM + 2, + axis=0) + emission = np.repeat( + np.arange( + TAG_NUM, dtype="float64").reshape(1, TAG_NUM), + lod[-1][-1], + axis=0) + + labels = np.random.randint( + low=0, high=TAG_NUM, size=(lod[-1][-1], 1), dtype="int32") + predicted_labels = np.ones( + (lod[-1][-1], 1), dtype="int32") * (TAG_NUM - 1) + expected_output = (labels == predicted_labels).astype("int32") + + self.inputs = { + "Emission": (emission, lod), + "Transition": transition, + "Label": (labels, lod) + } + + self.outputs = {"ViterbiPath": expected_output} + + def test_check_output(self): + self.check_output() + + +if __name__ == "__main__": + unittest.main() diff --git a/python/paddle/v2/framework/tests/test_executor_and_mul.py b/python/paddle/v2/framework/tests/test_executor_and_mul.py index 35f775711167ce0d210044ab4cb382db802f39a5..c885cfbebd4b665ddf50adbc43673942dc949a0b 100644 --- a/python/paddle/v2/framework/tests/test_executor_and_mul.py +++ b/python/paddle/v2/framework/tests/test_executor_and_mul.py @@ -2,7 +2,7 @@ import unittest from paddle.v2.framework.layers import mul, data import paddle.v2.framework.core as core from paddle.v2.framework.executor import Executor -from paddle.v2.framework.framework import g_program +from paddle.v2.framework.framework import g_main_program import numpy @@ -23,7 +23,7 @@ class TestExecutor(unittest.TestCase): tensor_b = core.LoDTensor() tensor_b.set(b_np, place) exe = Executor(place) - outs = exe.run(g_program, + outs = exe.run(g_main_program, feed={'a': tensor_a, 'b': tensor_b}, fetch_list=[out]) diff --git a/python/paddle/v2/framework/tests/test_fill_constant_batch_size_like_op.py b/python/paddle/v2/framework/tests/test_fill_constant_batch_size_like_op.py index 319ae52fb38ee122808a6e6b0e44d3a3787d8e53..99de6b5d052b41499800afb6181a235da340bc15 100644 --- a/python/paddle/v2/framework/tests/test_fill_constant_batch_size_like_op.py +++ b/python/paddle/v2/framework/tests/test_fill_constant_batch_size_like_op.py @@ -21,9 +21,14 @@ class TestFillConstantBatchSizeLikeWhenSecondDimIsBatchSize(OpTest): def setUp(self): self.op_type = "fill_constant_batch_size_like" self.inputs = {'Input': np.random.random((219, 232)).astype("float32")} - self.attrs = {'value': 3.5, 'shape': [132, -1, 7], 'dim_idx': 1} - - out = np.random.random((132, 232, 7)).astype("float32") + self.attrs = { + 'value': 3.5, + 'shape': [132, -1, 7], + 'input_dim_idx': 0, + 'output_dim_idx': 1 + } + + out = np.random.random((132, 219, 7)).astype("float32") out.fill(3.5) self.outputs = {'Out': out} diff --git a/python/paddle/v2/framework/tests/test_fit_a_line.py b/python/paddle/v2/framework/tests/test_fit_a_line.py index 588e1d588227e1960e8dc6ec256505ff6af58cf8..aba1f27ad64b2e510a957736fb566c7616d2b435 100644 --- a/python/paddle/v2/framework/tests/test_fit_a_line.py +++ b/python/paddle/v2/framework/tests/test_fit_a_line.py @@ -3,43 +3,47 @@ import paddle.v2.framework.layers as layers import paddle.v2.framework.core as core import paddle.v2.framework.optimizer as optimizer -from paddle.v2.framework.framework import Program, g_program +from paddle.v2.framework.framework import Program, g_main_program from paddle.v2.framework.io import save_persistables, load_persistables from paddle.v2.framework.executor import Executor from paddle.v2.framework.evaluator import Accuracy import numpy as np -init_program = Program() -program = Program() +startup_program = Program() +main_program = Program() x = layers.data( name='x', shape=[13], data_type='float32', - program=program, - init_program=init_program) + main_program=main_program, + startup_program=startup_program) y_predict = layers.fc(input=x, size=1, act=None, - program=program, - init_program=init_program) + main_program=main_program, + startup_program=startup_program) y = layers.data( name='y', shape=[1], data_type='float32', - program=program, - init_program=init_program) + main_program=main_program, + startup_program=startup_program) accuracy = evaluator.Accuracy(input=y_predict, label=y) cost = layers.square_error_cost( - input=y_predict, label=y, program=program, init_program=init_program) -avg_cost = layers.mean(x=cost, program=program, init_program=init_program) + input=y_predict, + label=y, + main_program=main_program, + startup_program=startup_program) +avg_cost = layers.mean( + x=cost, main_program=main_program, startup_program=startup_program) sgd_optimizer = optimizer.SGDOptimizer(learning_rate=0.001) -opts = sgd_optimizer.minimize(avg_cost, init_program) +opts = sgd_optimizer.minimize(avg_cost, startup_program) BATCH_SIZE = 20 @@ -51,13 +55,12 @@ train_reader = paddle.batch( place = core.CPUPlace() exe = Executor(place) -exe.run(init_program, feed={}, fetch_list=[]) +exe.run(startup_program, feed={}, fetch_list=[]) PASS_NUM = 100 for pass_id in range(PASS_NUM): - save_persistables(exe, "./fit_a_line.model/", program=program) - load_persistables(exe, "./fit_a_line.model/", program=program) - exe.run(accuracy.eval(), ) + save_persistables(exe, "./fit_a_line.model/", main_program=main_program) + load_persistables(exe, "./fit_a_line.model/", main_program=main_program) for data in train_reader(): x_data = np.array(map(lambda x: x[0], data)).astype("float32") y_data = np.array(map(lambda x: x[1], data)).astype("float32") @@ -69,7 +72,7 @@ for pass_id in range(PASS_NUM): tensor_y = core.LoDTensor() tensor_y.set(y_data, place) # print tensor_y.get_dims() - outs = exe.run(program, + outs = exe.run(main_program, feed={'x': tensor_x, 'y': tensor_y}, fetch_list=[avg_cost]) diff --git a/python/paddle/v2/framework/tests/test_framework_debug_str.py b/python/paddle/v2/framework/tests/test_framework_debug_str.py new file mode 100644 index 0000000000000000000000000000000000000000..8fdf8f91171ee334fac93c05a4d49056fa0e803d --- /dev/null +++ b/python/paddle/v2/framework/tests/test_framework_debug_str.py @@ -0,0 +1,13 @@ +import unittest +from paddle.v2.framework.framework import Program + + +class TestDebugStringFramework(unittest.TestCase): + def test_debug_str(self): + p = Program() + p.current_block().create_var(name='t', shape=[0, 1]) + self.assertRaises(ValueError, callableObj=p.__str__) + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/v2/framework/tests/test_gru_op.py b/python/paddle/v2/framework/tests/test_gru_op.py new file mode 100644 index 0000000000000000000000000000000000000000..b2474cff94c6c71cc62bc8e69a5d83e38d51c511 --- /dev/null +++ b/python/paddle/v2/framework/tests/test_gru_op.py @@ -0,0 +1,156 @@ +import unittest +import numpy as np +import math +from op_test import OpTest +from test_lstm_op import identity, sigmoid, tanh, relu + + +class TestGRUOp(OpTest): + batch_size = 9 + frame_size = 5 + activate = { + 'identity': identity, + 'sigmoid': sigmoid, + 'tanh': tanh, + 'relu': relu + } + + @staticmethod + def seq_to_batch(lod, is_reverse): + idx_in_seq_list = [] + seq_starts = lod[0] + seq_lens = [] + for i in range(len(seq_starts) - 1): + seq_lens.append(seq_starts[i + 1] - seq_starts[i]) + sorted_seqs = sorted( + range(len(seq_lens)), lambda x, y: seq_lens[y] - seq_lens[x]) + num_batch = seq_lens[sorted_seqs[0]] + for batch_idx in range(num_batch): + idx_in_seq = [] + for i in range(len(seq_lens)): + if seq_lens[sorted_seqs[i]] <= batch_idx: + break + idx = (seq_starts[sorted_seqs[i] + 1] - 1 - batch_idx + ) if is_reverse else ( + seq_starts[sorted_seqs[i]] + batch_idx) + idx_in_seq.append(idx) + idx_in_seq_list.append(idx_in_seq) + return idx_in_seq_list + + def gru_step(self, x, h_p, w, b): + batch_size = x.shape[0] + frame_size = w.shape[0] + g = x + np.tile(b, (batch_size, 1)) + w_u_r = w.flatten()[:frame_size * frame_size * 2].reshape( + (frame_size, frame_size * 2)) + u_r = self.activate[self.attrs['gate_activation']](np.dot( + h_p, w_u_r) + g[:, :frame_size * 2]) + u = u_r[:, :frame_size] + r = u_r[:, frame_size:frame_size * 2] + r_h_p = r * h_p + w_c = w.flatten()[frame_size * frame_size * 2:].reshape( + (frame_size, frame_size)) + c = self.activate[self.attrs['activation']](np.dot(r_h_p, w_c) + + g[:, frame_size * 2:]) + g = np.hstack((u_r, c)) + h = u * c + (1 - u) * h_p + return g, r_h_p, h + + def gru(self): + input, lod = self.inputs['Input'] + w = self.inputs['Weight'] + b = self.inputs['Bias'] if self.inputs.has_key('Bias') else np.zeros( + (1, self.frame_size * 3)) + batch_gate = self.outputs['BatchGate'] + batch_reset_hidden_prev = self.outputs['BatchResetHiddenPrev'] + batch_hidden = self.outputs['BatchHidden'] + hidden = self.outputs['Hidden'] + idx_in_seq_list = self.idx_in_seq_list + h_p = self.inputs['H0'] if self.inputs.has_key('H0') else np.zeros( + (len(idx_in_seq_list[0]), self.frame_size)) + num_batch = len(idx_in_seq_list) + end_idx = 0 + for batch_idx in range(num_batch): + x = input[idx_in_seq_list[batch_idx]] + g, r_h_p, h = self.gru_step(x, h_p, w, b) + if batch_idx < (num_batch - 1): + h_p = h[:len(idx_in_seq_list[batch_idx + 1])] + start_idx = end_idx + end_idx = start_idx + len(idx_in_seq_list[batch_idx]) + batch_gate[start_idx:end_idx] = g + batch_reset_hidden_prev[start_idx:end_idx] = r_h_p + batch_hidden[start_idx:end_idx] = h + hidden[idx_in_seq_list[batch_idx]] = h + return batch_gate, batch_reset_hidden_prev, hidden + + def set_data(self): + lod = [[0, 2, 6, self.batch_size]] + self.idx_in_seq_list = self.seq_to_batch(lod, self.is_reverse) + batch_size = self.batch_size + frame_size = self.frame_size + input = np.random.rand(batch_size, frame_size * 3).astype('float64') + h0 = np.random.rand(len(self.idx_in_seq_list[0]), + frame_size).astype('float64') + weight = np.random.rand(frame_size, frame_size * 3).astype('float64') + bias = np.random.rand(1, frame_size * 3).astype('float64') + + self.inputs = { + 'Input': (input, lod), + 'H0': h0, + 'Weight': weight, + 'Bias': bias + } + + self.outputs = { + 'BatchGate': np.zeros( + (batch_size, frame_size * 3), dtype='float64'), + 'BatchResetHiddenPrev': np.zeros( + (batch_size, frame_size), dtype='float64'), + 'BatchHidden': np.zeros( + (batch_size, frame_size), dtype='float64'), + 'Hidden': np.zeros( + (batch_size, frame_size), dtype='float64') + } + + def set_confs(self): + self.is_reverse = False + self.attrs = { + 'activation': 'tanh', + 'gate_activation': 'sigmoid', + 'is_reverse': self.is_reverse + } + + def setUp(self): + self.op_type = "gru" + self.set_confs() + self.set_data() + self.gru() + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad(['Input', 'H0', 'Weight', 'Bias'], ['Hidden']) + + +class TestGRUOpNoInitial(TestGRUOp): + def set_data(self): + super(TestGRUOpNoInitial, self).set_data() + self.inputs.pop('H0') + + def test_check_grad(self): + self.check_grad(['Input', 'Weight', 'Bias'], ['Hidden']) + + +class TestGRUOpReverse(TestGRUOp): + def set_confs(self): + self.is_reverse = True + self.attrs = { + 'activation': 'identity', + 'gate_activation': 'sigmoid', + 'is_reverse': self.is_reverse + } + + +if __name__ == "__main__": + unittest.main() diff --git a/python/paddle/v2/framework/tests/test_huber_loss_op.py b/python/paddle/v2/framework/tests/test_huber_loss_op.py index 003e7d7ed7ccdfc48b0aa8db0a6765b5c93e7c14..a24fcbec6cc4801118ce4ef97eb4692cd2351c28 100644 --- a/python/paddle/v2/framework/tests/test_huber_loss_op.py +++ b/python/paddle/v2/framework/tests/test_huber_loss_op.py @@ -21,7 +21,8 @@ class TestHuberLossOp(OpTest): 'Y': np.random.uniform(0, 1., (samples_num, 1)).astype('float32'), } residual = self.inputs['Y'] - self.inputs['X'] - loss = np.vectorize(huber_loss_forward)(residual, delta) + loss = np.vectorize(huber_loss_forward)(residual, + delta).astype('float32') self.attrs = {'delta': delta} self.outputs = { 'Residual': residual, @@ -43,6 +44,5 @@ class TestHuberLossOp(OpTest): ['X'], 'Out', max_relative_error=0.008, no_grad_set=set('residual')) -# TODO(typhoonzero): should add this back till we fix it -#if __name__ == '__main__': -# unittest.main() +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/v2/framework/tests/test_image_classification_layer.py b/python/paddle/v2/framework/tests/test_image_classification_layer.py index b4eda13552e60f009ec910e3d21e9d77107133a1..b1a267ec32b1c937b946bee82e41b846ebbf1288 100644 --- a/python/paddle/v2/framework/tests/test_image_classification_layer.py +++ b/python/paddle/v2/framework/tests/test_image_classification_layer.py @@ -9,8 +9,8 @@ def conv_block(input, num_filter, groups, dropouts, - program=None, - init_program=None): + main_program=None, + startup_program=None): return nets.img_conv_group( input=input, pool_size=2, @@ -21,77 +21,81 @@ def conv_block(input, conv_with_batchnorm=True, conv_batchnorm_drop_rate=dropouts, pool_type='max', - program=program, - init_program=init_program) + main_program=main_program, + startup_program=startup_program) class TestLayer(unittest.TestCase): def test_batch_norm_layer(self): - program = Program() - init_program = Program() + main_program = Program() + startup_program = Program() images = layers.data( name='pixel', shape=[3, 48, 48], data_type='float32', - program=program) + main_program=main_program) layers.batch_norm( - input=images, program=program, init_program=init_program) + input=images, + main_program=main_program, + startup_program=startup_program) - # print str(program) + # print str(main_program) def test_dropout_layer(self): - program = Program() - init_program = Program() + main_program = Program() + startup_program = Program() images = layers.data( name='pixel', shape=[3, 48, 48], data_type='float32', - program=program) + main_program=main_program) layers.dropout( x=images, dropout_prob=0.5, - program=program, - init_program=init_program) + main_program=main_program, + startup_program=startup_program) - # print str(program) + # print str(main_program) def test_img_conv_group(self): - program = Program() - init_program = Program() + main_program = Program() + startup_program = Program() images = layers.data( name='pixel', shape=[3, 48, 48], data_type='float32', - program=program, - init_program=init_program) - conv1 = conv_block(images, 64, 2, [0.3, 0], program, init_program) - conv2 = conv_block(conv1, 256, 3, [0.4, 0.4, 0], program, init_program) + main_program=main_program, + startup_program=startup_program) + conv1 = conv_block(images, 64, 2, [0.3, 0], main_program, + startup_program) + conv2 = conv_block(conv1, 256, 3, [0.4, 0.4, 0], main_program, + startup_program) - # print str(program) + # print str(main_program) def test_elementwise_add_with_act(self): - program = Program() - init_program = Program() + main_program = Program() + startup_program = Program() image1 = layers.data( name='pixel1', shape=[3, 48, 48], data_type='float32', - program=program, - init_program=init_program) + main_program=main_program, + startup_program=startup_program) image2 = layers.data( name='pixel2', shape=[3, 48, 48], data_type='float32', - program=program, - init_program=init_program) + main_program=main_program, + startup_program=startup_program) out = layers.elementwise_add( x=image1, y=image2, act='relu', - program=program, - init_program=init_program) - # print(program) + main_program=main_program, + startup_program=startup_program) + # print(main_program) if __name__ == '__main__': diff --git a/python/paddle/v2/framework/tests/test_image_classification_train.py b/python/paddle/v2/framework/tests/test_image_classification_train.py index 21adc7f38f8a0463fab020aab87751fe69a9b76f..a4165da9703c55ae3347123409407f0cae30856f 100644 --- a/python/paddle/v2/framework/tests/test_image_classification_train.py +++ b/python/paddle/v2/framework/tests/test_image_classification_train.py @@ -1,24 +1,23 @@ +import numpy as np import paddle.v2 as paddle +import paddle.v2.framework.core as core import paddle.v2.framework.layers as layers import paddle.v2.framework.nets as nets -import paddle.v2.framework.core as core import paddle.v2.framework.optimizer as optimizer - -from paddle.v2.framework.framework import Program, g_program from paddle.v2.framework.executor import Executor - -import numpy as np +from paddle.v2.framework.framework import g_startup_program, g_main_program +from paddle.v2.framework.initializer import XavierInitializer -def resnet_cifar10(input, depth=32, program=None, init_program=None): +def resnet_cifar10(input, depth=32, main_program=None, startup_program=None): def conv_bn_layer(input, ch_out, filter_size, stride, padding, act='relu', - program=None, - init_program=None): + main_program=None, + startup_program=None): tmp = layers.conv2d( input=input, filter_size=filter_size, @@ -27,10 +26,13 @@ def resnet_cifar10(input, depth=32, program=None, init_program=None): padding=padding, act=None, bias_attr=False, - program=program, - init_program=init_program) + main_program=main_program, + startup_program=startup_program) return layers.batch_norm( - input=tmp, act=act, program=program, init_program=init_program) + input=tmp, + act=act, + main_program=main_program, + startup_program=startup_program) def shortcut(input, ch_in, ch_out, stride, program, init_program): if ch_in != ch_out: @@ -43,16 +45,16 @@ def resnet_cifar10(input, depth=32, program=None, init_program=None): ch_in, ch_out, stride, - program=program, - init_program=init_program): + main_program=main_program, + startup_program=startup_program): tmp = conv_bn_layer( input, ch_out, 3, stride, 1, - program=program, - init_program=init_program) + main_program=main_program, + startup_program=startup_program) tmp = conv_bn_layer( tmp, ch_out, @@ -60,21 +62,22 @@ def resnet_cifar10(input, depth=32, program=None, init_program=None): 1, 1, act=None, - program=program, - init_program=init_program) - short = shortcut(input, ch_in, ch_out, stride, program, init_program) + main_program=main_program, + startup_program=startup_program) + short = shortcut(input, ch_in, ch_out, stride, main_program, + startup_program) return layers.elementwise_add( x=tmp, y=short, act='relu', - program=program, - init_program=init_program) + main_program=main_program, + startup_program=startup_program) def layer_warp(block_func, input, ch_in, ch_out, count, stride, program, - init_program): - tmp = block_func(input, ch_in, ch_out, stride, program, init_program) + startup_program): + tmp = block_func(input, ch_in, ch_out, stride, program, startup_program) for i in range(1, count): - tmp = block_func(tmp, ch_out, ch_out, 1, program, init_program) + tmp = block_func(tmp, ch_out, ch_out, 1, program, startup_program) return tmp assert (depth - 2) % 6 == 0 @@ -85,8 +88,8 @@ def resnet_cifar10(input, depth=32, program=None, init_program=None): filter_size=3, stride=1, padding=1, - program=program, - init_program=init_program) + main_program=main_program, + startup_program=startup_program) res1 = layer_warp( basicblock, conv1, @@ -94,8 +97,8 @@ def resnet_cifar10(input, depth=32, program=None, init_program=None): 16, n, 1, - program=program, - init_program=init_program) + main_program=main_program, + startup_program=startup_program) res2 = layer_warp( basicblock, res1, @@ -103,8 +106,8 @@ def resnet_cifar10(input, depth=32, program=None, init_program=None): 32, n, 2, - program=program, - init_program=init_program) + main_program=main_program, + startup_program=startup_program) res3 = layer_warp( basicblock, res2, @@ -112,25 +115,25 @@ def resnet_cifar10(input, depth=32, program=None, init_program=None): 64, n, 2, - program=program, - init_program=init_program) + main_program=main_program, + startup_program=startup_program) pool = layers.pool2d( input=res3, pool_size=8, pool_type='avg', pool_stride=1, - program=program, - init_program=init_program) + main_program=main_program, + startup_program=startup_program) return pool -def vgg16_bn_drop(input, program, init_program): +def vgg16_bn_drop(input, main_program=None, startup_program=None): def conv_block(input, num_filter, groups, dropouts, - program=None, - init_program=None): + main_program=None, + startup_program=None): return nets.img_conv_group( input=input, pool_size=2, @@ -141,74 +144,75 @@ def vgg16_bn_drop(input, program, init_program): conv_with_batchnorm=True, conv_batchnorm_drop_rate=dropouts, pool_type='max', - program=program, - init_program=init_program) + main_program=main_program, + startup_program=startup_program) - conv1 = conv_block(input, 64, 2, [0.3, 0], program, init_program) - conv2 = conv_block(conv1, 128, 2, [0.4, 0], program, init_program) - conv3 = conv_block(conv2, 256, 3, [0.4, 0.4, 0], program, init_program) - conv4 = conv_block(conv3, 512, 3, [0.4, 0.4, 0], program, init_program) - conv5 = conv_block(conv4, 512, 3, [0.4, 0.4, 0], program, init_program) + conv1 = conv_block(input, 64, 2, [0.3, 0], main_program, startup_program) + conv2 = conv_block(conv1, 128, 2, [0.4, 0], main_program, startup_program) + conv3 = conv_block(conv2, 256, 3, [0.4, 0.4, 0], main_program, + startup_program) + conv4 = conv_block(conv3, 512, 3, [0.4, 0.4, 0], main_program, + startup_program) + conv5 = conv_block(conv4, 512, 3, [0.4, 0.4, 0], main_program, + startup_program) drop = layers.dropout( - x=conv5, dropout_prob=0.5, program=program, init_program=init_program) + x=conv5, + dropout_prob=0.5, + main_program=main_program, + startup_program=startup_program) fc1 = layers.fc(input=drop, size=512, act=None, - program=program, - init_program=init_program) + param_attr={"initializer": XavierInitializer()}, + main_program=main_program, + startup_program=startup_program) reshape1 = layers.reshape( x=fc1, shape=list(fc1.shape + (1, 1)), - program=program, - init_program=init_program) + main_program=main_program, + startup_program=startup_program) bn = layers.batch_norm( - input=reshape1, act='relu', program=program, init_program=init_program) + input=reshape1, + act='relu', + main_program=main_program, + startup_program=startup_program) drop2 = layers.dropout( - x=bn, dropout_prob=0.5, program=program, init_program=init_program) + x=bn, + dropout_prob=0.5, + main_program=main_program, + startup_program=startup_program) fc2 = layers.fc(input=drop2, size=512, act=None, - program=program, - init_program=init_program) + param_attr={"initializer": XavierInitializer()}, + main_program=main_program, + startup_program=startup_program) return fc2 -init_program = Program() -program = Program() - classdim = 10 data_shape = [3, 32, 32] -images = layers.data( - name='pixel', shape=data_shape, data_type='float32', program=program) - -label = layers.data( - name='label', - shape=[1], - data_type='int64', - program=program, - init_program=init_program) +images = layers.data(name='pixel', shape=data_shape, data_type='float32') +label = layers.data(name='label', shape=[1], data_type='int64') # Add neural network config # option 1. resnet -net = resnet_cifar10(images, 32, program, init_program) +# net = resnet_cifar10(images, 32) # option 2. vgg -# net = vgg16_bn_drop(images, program, init_program) +net = vgg16_bn_drop(images) # print(program) -predict = layers.fc(input=net, - size=classdim, - act='softmax', - program=program, - init_program=init_program) -cost = layers.cross_entropy( - input=predict, label=label, program=program, init_program=init_program) -avg_cost = layers.mean(x=cost, program=program, init_program=init_program) +predict = layers.fc(input=net, size=classdim, act='softmax') +cost = layers.cross_entropy(input=predict, label=label) +avg_cost = layers.mean(x=cost) +accuracy = layers.accuracy(input=predict, label=label) -sgd_optimizer = optimizer.SGDOptimizer(learning_rate=0.001) -opts = sgd_optimizer.minimize(avg_cost, init_program) +# optimizer = optimizer.SGDOptimizer(learning_rate=0.001) +optimizer = optimizer.AdamOptimizer(learning_rate=0.001) +opts = optimizer.minimize(avg_cost) BATCH_SIZE = 128 PASS_NUM = 1 @@ -221,7 +225,7 @@ train_reader = paddle.batch( place = core.CPUPlace() exe = Executor(place) -exe.run(init_program, feed={}, fetch_list=[]) +exe.run(g_startup_program, feed={}, fetch_list=[]) for pass_id in range(PASS_NUM): batch_id = 0 @@ -239,14 +243,15 @@ for pass_id in range(PASS_NUM): tensor_img.set(img_data, place) tensor_y.set(y_data, place) - outs = exe.run(program, + outs = exe.run(g_main_program, feed={"pixel": tensor_img, "label": tensor_y}, - fetch_list=[avg_cost]) + fetch_list=[avg_cost, accuracy]) loss = np.array(outs[0]) + acc = np.array(outs[1]) print("pass_id:" + str(pass_id) + " batch_id:" + str(batch_id) + - " loss:" + str(loss)) + " loss:" + str(loss) + " acc:" + str(acc)) batch_id = batch_id + 1 if batch_id > 1: diff --git a/python/paddle/v2/framework/tests/test_inference_model_io.py b/python/paddle/v2/framework/tests/test_inference_model_io.py index e9c9cd27d9ead5b45a2e708a669035cc1ce9cb0c..d273387a35820a27f87ef4569f158db4fe670864 100644 --- a/python/paddle/v2/framework/tests/test_inference_model_io.py +++ b/python/paddle/v2/framework/tests/test_inference_model_io.py @@ -3,7 +3,7 @@ import paddle.v2.framework.layers as layers import paddle.v2.framework.core as core import paddle.v2.framework.optimizer as optimizer -from paddle.v2.framework.framework import Program, g_program +from paddle.v2.framework.framework import Program, g_main_program from paddle.v2.framework.io import save_inference_model, load_inference_model import paddle.v2.framework.executor as executor import unittest @@ -20,28 +20,28 @@ class TestBook(unittest.TestCase): name='x', shape=[2], data_type='float32', - program=program, - init_program=init_program) + main_program=program, + startup_program=init_program) y = layers.data( name='y', shape=[1], data_type='float32', - program=program, - init_program=init_program) + main_program=program, + startup_program=init_program) y_predict = layers.fc(input=x, size=1, act=None, - program=program, - init_program=init_program) + main_program=program, + startup_program=init_program) cost = layers.square_error_cost( input=y_predict, label=y, - program=program, - init_program=init_program) + main_program=program, + startup_program=init_program) avg_cost = layers.mean( - x=cost, program=program, init_program=init_program) + x=cost, main_program=program, startup_program=init_program) sgd_optimizer = optimizer.SGDOptimizer(learning_rate=0.001) opts = sgd_optimizer.minimize(avg_cost, init_program) diff --git a/python/paddle/v2/framework/tests/test_layers.py b/python/paddle/v2/framework/tests/test_layers.py index 5cbe790e3f019f5dcf6b201c4744e7502141ed99..716963fb431a8aca70e1b87efaf268595af2b09e 100644 --- a/python/paddle/v2/framework/tests/test_layers.py +++ b/python/paddle/v2/framework/tests/test_layers.py @@ -1,6 +1,6 @@ import paddle.v2.framework.layers as layers import paddle.v2.framework.nets as nets -from paddle.v2.framework.framework import Program, g_program +from paddle.v2.framework.framework import Program, g_main_program import paddle.v2.framework.core as core import unittest @@ -9,15 +9,15 @@ class TestBook(unittest.TestCase): def test_fit_a_line(self): program = Program() x = layers.data( - name='x', shape=[13], data_type='float32', program=program) - y_predict = layers.fc(input=x, size=1, act=None, program=program) + name='x', shape=[13], data_type='float32', main_program=program) + y_predict = layers.fc(input=x, size=1, act=None, main_program=program) y = layers.data( - name='y', shape=[1], data_type='float32', program=program) + name='y', shape=[1], data_type='float32', main_program=program) cost = layers.square_error_cost( - input=y_predict, label=y, program=program) + input=y_predict, label=y, main_program=program) - avg_cost = layers.mean(x=cost, program=program) + avg_cost = layers.mean(x=cost, main_program=program) self.assertIsNotNone(avg_cost) program.append_backward(avg_cost) print str(program) @@ -27,26 +27,42 @@ class TestBook(unittest.TestCase): # Change g_program, so the rest layers use `g_program` images = layers.data( - name='pixel', shape=[784], data_type='float32', program=program) + name='pixel', + shape=[784], + data_type='float32', + main_program=program) label = layers.data( - name='label', shape=[1], data_type='int32', program=program) - hidden1 = layers.fc(input=images, size=128, act='relu', program=program) - hidden2 = layers.fc(input=hidden1, size=64, act='relu', program=program) + name='label', shape=[1], data_type='int32', main_program=program) + hidden1 = layers.fc(input=images, + size=128, + act='relu', + main_program=program) + hidden2 = layers.fc(input=hidden1, + size=64, + act='relu', + main_program=program) predict = layers.fc(input=hidden2, size=10, act='softmax', - program=program) - cost = layers.cross_entropy(input=predict, label=label, program=program) - avg_cost = layers.mean(x=cost, program=program) + main_program=program) + cost = layers.cross_entropy( + input=predict, label=label, main_program=program) + avg_cost = layers.mean(x=cost, main_program=program) self.assertIsNotNone(avg_cost) print str(program) def test_simple_conv2d(self): program = Program() images = layers.data( - name='pixel', shape=[3, 48, 48], data_type='int32', program=program) + name='pixel', + shape=[3, 48, 48], + data_type='int32', + main_program=program) layers.conv2d( - input=images, num_filters=3, filter_size=[4, 4], program=program) + input=images, + num_filters=3, + filter_size=[4, 4], + main_program=program) print str(program) @@ -57,9 +73,9 @@ class TestBook(unittest.TestCase): name='pixel', shape=[1, 28, 28], data_type='float32', - program=program) + main_program=program) label = layers.data( - name='label', shape=[1], data_type='int32', program=program) + name='label', shape=[1], data_type='int32', main_program=program) conv_pool_1 = nets.simple_img_conv_pool( input=images, filter_size=5, @@ -67,7 +83,7 @@ class TestBook(unittest.TestCase): pool_size=2, pool_stride=2, act="relu", - program=program) + main_program=program) conv_pool_2 = nets.simple_img_conv_pool( input=conv_pool_1, filter_size=5, @@ -75,14 +91,15 @@ class TestBook(unittest.TestCase): pool_size=2, pool_stride=2, act="relu", - program=program) + main_program=program) predict = layers.fc(input=conv_pool_2, size=10, act="softmax", - program=program) - cost = layers.cross_entropy(input=predict, label=label, program=program) - avg_cost = layers.mean(x=cost, program=program) + main_program=program) + cost = layers.cross_entropy( + input=predict, label=label, main_program=program) + avg_cost = layers.mean(x=cost, main_program=program) program.append_backward(avg_cost) @@ -93,58 +110,58 @@ class TestBook(unittest.TestCase): dict_size = 10000 embed_size = 32 first_word = layers.data( - name='firstw', shape=[1], data_type='int64', program=program) + name='firstw', shape=[1], data_type='int64', main_program=program) second_word = layers.data( - name='secondw', shape=[1], data_type='int64', program=program) + name='secondw', shape=[1], data_type='int64', main_program=program) third_word = layers.data( - name='thirdw', shape=[1], data_type='int64', program=program) + name='thirdw', shape=[1], data_type='int64', main_program=program) forth_word = layers.data( - name='forthw', shape=[1], data_type='int64', program=program) + name='forthw', shape=[1], data_type='int64', main_program=program) next_word = layers.data( - name='nextw', shape=[1], data_type='int64', program=program) + name='nextw', shape=[1], data_type='int64', main_program=program) embed_first = layers.embedding( input=first_word, size=[dict_size, embed_size], data_type='float32', param_attr={'name': 'shared_w'}, - program=program) + main_program=program) embed_second = layers.embedding( input=second_word, size=[dict_size, embed_size], data_type='float32', param_attr={'name': 'shared_w'}, - program=program) + main_program=program) embed_third = layers.embedding( input=third_word, size=[dict_size, embed_size], data_type='float32', param_attr={'name': 'shared_w'}, - program=program) + main_program=program) embed_forth = layers.embedding( input=forth_word, size=[dict_size, embed_size], data_type='float32', param_attr={'name': 'shared_w'}, - program=program) + main_program=program) concat_embed = layers.concat( input=[embed_first, embed_second, embed_third, embed_forth], axis=1, - program=program) + main_program=program) hidden1 = layers.fc(input=concat_embed, size=256, act='sigmoid', - program=program) + main_program=program) predict_word = layers.fc(input=hidden1, size=dict_size, act='softmax', - program=program) + main_program=program) cost = layers.cross_entropy( - input=predict_word, label=next_word, program=program) - avg_cost = layers.mean(x=cost, program=program) + input=predict_word, label=next_word, main_program=program) + avg_cost = layers.mean(x=cost, main_program=program) self.assertIsNotNone(avg_cost) print str(program) diff --git a/python/paddle/v2/framework/tests/test_lod_rank_table.py b/python/paddle/v2/framework/tests/test_lod_rank_table.py new file mode 100644 index 0000000000000000000000000000000000000000..2242d4391dc7c12e59f4f157b34b0525efcbc9a1 --- /dev/null +++ b/python/paddle/v2/framework/tests/test_lod_rank_table.py @@ -0,0 +1,29 @@ +from paddle.v2.framework.layers import lod_rank_table, data +from paddle.v2.framework.executor import Executor +from paddle.v2.framework.framework import g_main_program +import paddle.v2.framework.core as core +import numpy +import unittest + + +class TestLoDRankTable(unittest.TestCase): + def test_lod_rank_table(self): + x = data(name='x', shape=[100]) + cpu = core.CPUPlace() + rank_table = lod_rank_table(x=x, level=1) + rank_table.persistable = True + exe = Executor(cpu) + scope = core.Scope() + + tensor = core.LoDTensor() + tensor.set(numpy.random.random(size=(17, 100)), cpu) + tensor.set_lod([[0, 1, 3], [0, 5, 6, 7], [0, 3, 4, 9, 10, 13, 16, 17]]) + + exe.run(g_main_program, scope=scope, feed={'x': tensor}) + var = scope.find_var(rank_table.name) + table = var.get_lod_rank_table() + self.assertEqual([(0, 5), (1, 1), (2, 1)], table.items()) + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/v2/framework/tests/test_lod_tensor_array.py b/python/paddle/v2/framework/tests/test_lod_tensor_array.py new file mode 100644 index 0000000000000000000000000000000000000000..a433bcf622b14a1d2d33b5b98d555e1a21e4b9e8 --- /dev/null +++ b/python/paddle/v2/framework/tests/test_lod_tensor_array.py @@ -0,0 +1,38 @@ +import unittest +import paddle.v2.framework.core as core +import numpy + + +class TestLoDTensorArray(unittest.TestCase): + def test_get_set(self): + scope = core.Scope() + arr = scope.var('tmp_lod_tensor_array') + tensor_array = arr.get_lod_tensor_array() + self.assertEqual(0, len(tensor_array)) + cpu = core.CPUPlace() + for i in xrange(10): + t = core.LoDTensor() + t.set(numpy.array([i], dtype='float32'), cpu) + t.set_lod([[0, 1]]) + tensor_array.append(t) + + self.assertEqual(10, len(tensor_array)) + + for i in xrange(10): + t = tensor_array[i] + self.assertEqual(numpy.array(t), numpy.array([i], dtype='float32')) + self.assertEqual([[0, 1]], t.lod()) + + t = core.LoDTensor() + t.set(numpy.array([i + 10], dtype='float32'), cpu) + t.set_lod([[0, 2]]) + tensor_array[i] = t + t = tensor_array[i] + self.assertEqual( + numpy.array(t), numpy.array( + [i + 10], dtype='float32')) + self.assertEqual([[0, 2]], t.lod()) + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/v2/framework/tests/test_operator_desc.py b/python/paddle/v2/framework/tests/test_operator_desc.py index 7355f72455ca4f821c9520d97162e3e0050383af..a0bc4e0b91602cfc90f91a1e2dd4bce22c0dbf6d 100644 --- a/python/paddle/v2/framework/tests/test_operator_desc.py +++ b/python/paddle/v2/framework/tests/test_operator_desc.py @@ -1,11 +1,11 @@ import unittest -from paddle.v2.framework.framework import Variable, Program, g_program +from paddle.v2.framework.framework import Variable, Program, g_main_program import paddle.v2.framework.core as core class TestOperator(unittest.TestCase): def test_error_type(self): - block = g_program.create_block() + block = g_main_program.create_block() try: block.append_op() self.assertFail() diff --git a/python/paddle/v2/framework/tests/test_parameter.py b/python/paddle/v2/framework/tests/test_parameter.py index 1ac0cdd99f1b7c15d64ae9d2c465d5a9d563bd80..f04eb4cf27276b0f7da0793c97742ac42e4583be 100644 --- a/python/paddle/v2/framework/tests/test_parameter.py +++ b/python/paddle/v2/framework/tests/test_parameter.py @@ -1,11 +1,11 @@ import unittest -from paddle.v2.framework.framework import g_program +from paddle.v2.framework.framework import g_main_program import paddle.v2.framework.core as core class TestParameter(unittest.TestCase): def test_param(self): - b = g_program.create_block() + b = g_main_program.create_block() param = b.create_parameter( name='fc.w', shape=[784, 100], diff --git a/python/paddle/v2/framework/tests/test_positive_negative_pair_op.py b/python/paddle/v2/framework/tests/test_positive_negative_pair_op.py new file mode 100644 index 0000000000000000000000000000000000000000..f6a6c428a26dece01fe2958991edd3edf3a8266e --- /dev/null +++ b/python/paddle/v2/framework/tests/test_positive_negative_pair_op.py @@ -0,0 +1,106 @@ +import unittest +import itertools +import numpy as np +from op_test import OpTest + + +def py_pnpair_op(score, label, query, column=-1, weight=None): + # group by query id + predictions = {} + batch_size = label.shape[0] + if weight is None: + weight = np.ones(shape=(batch_size, 1)).astype('float32') + for s, l, q, w in zip(score, label, query, weight): + s, l, q, w = s[column], l[0], q[0], w[0] + if q not in predictions: + predictions[q] = [] + predictions[q].append((s, l, w)) + + # accumulate statistics + pos, neg, neu = 0, 0, 0 + for _, ranks in predictions.items(): + for e1, e2 in itertools.combinations(ranks, 2): + s1, s2, l1, l2, w1, w2 = e1[0], e2[0], e1[1], e2[1], e1[2], e2[2] + w = (w1 + w2) * 0.5 + if l1 == l2: + continue + if s1 == s2: + neu += w + elif (s1 - s2) * (l1 - l2) > 0: + pos += w + else: + neg += w + + return np.array(pos).astype('float32'), np.array(neg).astype( + 'float32'), np.array(neu).astype('float32') + + +class TestPositiveNegativePairOp(OpTest): + def setUp(self): + self.op_type = 'positive_negative_pair' + batch_size = 20 + max_query_id = 5 + score = np.random.normal(size=(batch_size, 1)).astype('float32') + label = np.random.normal(size=(batch_size, 1)).astype('float32') + query = np.array( + [np.random.randint(max_query_id) for i in range(batch_size)]) + query = np.reshape(query, newshape=(batch_size, 1)).astype('int64') + + pos, neg, neu = py_pnpair_op(score, label, query) + self.inputs = {'Score': score, 'Label': label, 'QueryID': query} + self.attrs = {'column': -1} + self.outputs = { + 'PositivePair': pos, + 'NegativePair': neg, + 'NeutralPair': neu + } + + def test_check_output(self): + self.check_output() + + +class TestPositiveNegativePairOpAccumulateWeight(OpTest): + def setUp(self): + self.op_type = 'positive_negative_pair' + batch_size = 20 + max_query_id = 5 + max_random_num = 2 << 15 + score_dim = 2 + score = np.random.normal(size=(batch_size, 2)).astype('float32') + label = np.random.normal(size=(batch_size, 1)).astype('float32') + weight = np.random.normal(size=(batch_size, 1)).astype('float32') + query = np.array( + [np.random.randint(max_query_id) for i in range(batch_size)]) + query = np.reshape(query, newshape=(batch_size, 1)).astype('int64') + acc_pos = np.reshape( + np.random.randint(max_random_num), newshape=(1)).astype('float32') + acc_neg = np.reshape( + np.random.randint(max_random_num), newshape=(1)).astype('float32') + acc_neu = np.reshape( + np.random.randint(max_random_num), newshape=(1)).astype('float32') + column = np.random.randint(score_dim) + + pos, neg, neu = py_pnpair_op( + score, label, query, column=column, weight=weight) + self.inputs = { + 'Score': score, + 'Label': label, + 'QueryID': query, + 'AccumulatePositivePair': acc_pos, + 'AccumulateNegativePair': acc_neg, + 'AccumulateNeutralPair': acc_neu, + 'Weight': weight + } + self.attrs = {'column': column} + self.outputs = { + 'PositivePair': pos + acc_pos, + 'NegativePair': neg + acc_neg, + 'NeutralPair': neu + acc_neu + } + + def test_check_output(self): + self.check_output() + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/v2/framework/tests/test_program.py b/python/paddle/v2/framework/tests/test_program.py index be020573b7dcd9f8dcd0f99d654dc8b2106abb2b..7be67b6614ee3302a319289b821a214a81b6f64e 100644 --- a/python/paddle/v2/framework/tests/test_program.py +++ b/python/paddle/v2/framework/tests/test_program.py @@ -2,35 +2,35 @@ import unittest import paddle.v2.framework.core as core from paddle.v2.framework.framework import Program -from paddle.v2.framework.framework import g_program +from paddle.v2.framework.framework import g_main_program class TestProgram(unittest.TestCase): def test_program(self): - b = g_program.current_block() + b = g_main_program.current_block() self.assertEqual(-1, b.parent_idx) self.assertEqual(0, b.idx) - b = g_program.create_block() + b = g_main_program.create_block() self.assertEqual(1, b.idx) self.assertEqual(0, b.parent_idx) - b = g_program.create_block() + b = g_main_program.create_block() self.assertEqual(2, b.idx) self.assertEqual(1, b.parent_idx) - g_program.rollback() + g_main_program.rollback() - b = g_program.current_block() + b = g_main_program.current_block() self.assertEqual(1, b.idx) self.assertEqual(0, b.parent_idx) - b = g_program.create_block() + b = g_main_program.create_block() self.assertEqual(3, b.idx) self.assertEqual(1, b.parent_idx) - g_program.rollback() - b = g_program.current_block() + g_main_program.rollback() + b = g_main_program.current_block() self.assertEqual(1, b.idx) self.assertEqual(0, b.parent_idx) diff --git a/python/paddle/v2/framework/tests/test_recognize_digits_conv.py b/python/paddle/v2/framework/tests/test_recognize_digits_conv.py index 695236f3df6d34446038756055df83ebc86becd9..c3186e25b37fe112987017521daa336199085134 100644 --- a/python/paddle/v2/framework/tests/test_recognize_digits_conv.py +++ b/python/paddle/v2/framework/tests/test_recognize_digits_conv.py @@ -4,26 +4,26 @@ import paddle.v2.framework.nets as nets import paddle.v2.framework.core as core import paddle.v2.framework.optimizer as optimizer -from paddle.v2.framework.framework import Program, g_program +from paddle.v2.framework.framework import Program, g_main_program from paddle.v2.framework.executor import Executor import numpy as np -init_program = Program() -program = Program() +startup_program = Program() +main_program = Program() images = layers.data( name='pixel', shape=[1, 28, 28], data_type='float32', - program=program, - init_program=init_program) + main_program=main_program, + startup_program=startup_program) label = layers.data( name='label', shape=[1], data_type='int64', - program=program, - init_program=init_program) + main_program=main_program, + startup_program=startup_program) conv_pool_1 = nets.simple_img_conv_pool( input=images, filter_size=5, @@ -31,8 +31,8 @@ conv_pool_1 = nets.simple_img_conv_pool( pool_size=2, pool_stride=2, act="relu", - program=program, - init_program=init_program) + main_program=main_program, + startup_program=startup_program) conv_pool_2 = nets.simple_img_conv_pool( input=conv_pool_1, filter_size=5, @@ -40,24 +40,30 @@ conv_pool_2 = nets.simple_img_conv_pool( pool_size=2, pool_stride=2, act="relu", - program=program, - init_program=init_program) + main_program=main_program, + startup_program=startup_program) predict = layers.fc(input=conv_pool_2, size=10, act="softmax", - program=program, - init_program=init_program) + main_program=main_program, + startup_program=startup_program) cost = layers.cross_entropy( - input=predict, label=label, program=program, init_program=init_program) -avg_cost = layers.mean(x=cost, program=program) + input=predict, + label=label, + main_program=main_program, + startup_program=startup_program) +avg_cost = layers.mean(x=cost, main_program=main_program) accuracy = layers.accuracy( - input=predict, label=label, program=program, init_program=init_program) + input=predict, + label=label, + main_program=main_program, + startup_program=startup_program) # optimizer = optimizer.MomentumOptimizer(learning_rate=0.1 / 128.0, # momentum=0.9) optimizer = optimizer.AdamOptimizer(learning_rate=0.01, beta1=0.9, beta2=0.999) -opts = optimizer.minimize(avg_cost, init_program) +opts = optimizer.minimize(avg_cost, startup_program) BATCH_SIZE = 50 PASS_NUM = 3 @@ -69,7 +75,7 @@ train_reader = paddle.batch( place = core.CPUPlace() exe = Executor(place) -exe.run(init_program, feed={}, fetch_list=[]) +exe.run(startup_program, feed={}, fetch_list=[]) for pass_id in range(PASS_NUM): count = 0 @@ -84,7 +90,7 @@ for pass_id in range(PASS_NUM): tensor_img.set(img_data, place) tensor_y.set(y_data, place) - outs = exe.run(program, + outs = exe.run(main_program, feed={"pixel": tensor_img, "label": tensor_y}, fetch_list=[avg_cost, accuracy]) diff --git a/python/paddle/v2/framework/tests/test_recognize_digits_mlp.py b/python/paddle/v2/framework/tests/test_recognize_digits_mlp.py index c116d1a6d359751cceac419b4a26e41746689214..076cf882160cd53f45ef291d82ba57ada843a287 100644 --- a/python/paddle/v2/framework/tests/test_recognize_digits_mlp.py +++ b/python/paddle/v2/framework/tests/test_recognize_digits_mlp.py @@ -11,14 +11,14 @@ from paddle.v2.framework.initializer import UniformInitializer import numpy as np BATCH_SIZE = 128 -init_program = Program() -program = Program() +startup_program = Program() +main_program = Program() image = layers.data( name='x', shape=[784], data_type='float32', - program=program, - init_program=init_program) + main_program=main_program, + startup_program=startup_program) param_attr = { 'name': None, @@ -30,36 +30,45 @@ param_attr = { hidden1 = layers.fc(input=image, size=128, act='relu', - program=program, - init_program=init_program, + main_program=main_program, + startup_program=startup_program, param_attr=param_attr) hidden2 = layers.fc(input=hidden1, size=64, act='relu', - program=program, - init_program=init_program, + main_program=main_program, + startup_program=startup_program, param_attr=param_attr) predict = layers.fc(input=hidden2, size=10, act='softmax', - program=program, - init_program=init_program, + main_program=main_program, + startup_program=startup_program, param_attr=param_attr) label = layers.data( name='y', shape=[1], data_type='int64', - program=program, - init_program=init_program) + main_program=main_program, + startup_program=startup_program) cost = layers.cross_entropy( - input=predict, label=label, program=program, init_program=init_program) -avg_cost = layers.mean(x=cost, program=program, init_program=init_program) + input=predict, + label=label, + main_program=main_program, + startup_program=startup_program) +avg_cost = layers.mean( + x=cost, main_program=main_program, startup_program=startup_program) +accuracy = layers.accuracy( + input=predict, + label=label, + main_program=main_program, + startup_program=startup_program) optimizer = optimizer.MomentumOptimizer(learning_rate=0.001, momentum=0.9) -opts = optimizer.minimize(avg_cost, init_program) +opts = optimizer.minimize(avg_cost, startup_program) train_reader = paddle.batch( paddle.reader.shuffle( @@ -69,7 +78,7 @@ train_reader = paddle.batch( place = core.CPUPlace() exe = Executor(place) -exe.run(init_program, feed={}, fetch_list=[]) +exe.run(startup_program, feed={}, fetch_list=[]) PASS_NUM = 100 for pass_id in range(PASS_NUM): @@ -84,12 +93,12 @@ for pass_id in range(PASS_NUM): tensor_y = core.LoDTensor() tensor_y.set(y_data, place) - outs = exe.run(program, + outs = exe.run(main_program, feed={'x': tensor_x, 'y': tensor_y}, - fetch_list=[avg_cost]) + fetch_list=[avg_cost, accuracy]) out = np.array(outs[0]) - + acc = np.array(outs[1]) if out[0] < 5.0: exit(0) # if avg cost less than 5.0, we think our code is good. exit(1) diff --git a/python/paddle/v2/framework/tests/test_recommender_system.py b/python/paddle/v2/framework/tests/test_recommender_system.py index 7bc3f84a935884d4b7532a848f90a4648e92896a..7e54f0d1b86461954b05d1e3c1d5f49f71ff9068 100644 --- a/python/paddle/v2/framework/tests/test_recommender_system.py +++ b/python/paddle/v2/framework/tests/test_recommender_system.py @@ -4,13 +4,13 @@ import paddle.v2.framework.nets as nets import paddle.v2.framework.core as core import paddle.v2.framework.optimizer as optimizer -from paddle.v2.framework.framework import Program, g_program +from paddle.v2.framework.framework import Program, g_main_program from paddle.v2.framework.executor import Executor import numpy as np -init_program = Program() -program = Program() +startup_program = Program() +main_program = Program() is_sparse = True use_gpu = False BATCH_SIZE = 256 @@ -26,8 +26,8 @@ def get_usr_combined_features(): name='user_id', shape=[1], data_type='int64', - program=program, - init_program=init_program) + main_program=main_program, + startup_program=startup_program) usr_emb = layers.embedding( input=uid, @@ -35,13 +35,13 @@ def get_usr_combined_features(): size=[USR_DICT_SIZE, 32], param_attr={'name': 'user_table'}, is_sparse=is_sparse, - program=program, - init_program=init_program) + main_program=main_program, + startup_program=startup_program) usr_fc = layers.fc(input=usr_emb, size=32, - program=program, - init_program=init_program) + main_program=main_program, + startup_program=startup_program) USR_GENDER_DICT_SIZE = 2 @@ -49,75 +49,75 @@ def get_usr_combined_features(): name='gender_id', shape=[1], data_type='int64', - program=program, - init_program=init_program) + main_program=main_program, + startup_program=startup_program) usr_gender_emb = layers.embedding( input=usr_gender_id, size=[USR_GENDER_DICT_SIZE, 16], param_attr={'name': 'gender_table'}, is_sparse=is_sparse, - program=program, - init_program=init_program) + main_program=main_program, + startup_program=startup_program) usr_gender_fc = layers.fc(input=usr_gender_emb, size=16, - program=program, - init_program=init_program) + main_program=main_program, + startup_program=startup_program) USR_AGE_DICT_SIZE = len(paddle.dataset.movielens.age_table) usr_age_id = layers.data( name='age_id', shape=[1], data_type="int64", - program=program, - init_program=init_program) + main_program=main_program, + startup_program=startup_program) usr_age_emb = layers.embedding( input=usr_age_id, size=[USR_AGE_DICT_SIZE, 16], is_sparse=is_sparse, param_attr={'name': 'age_table'}, - program=program, - init_program=init_program) + main_program=main_program, + startup_program=startup_program) usr_age_fc = layers.fc(input=usr_age_emb, size=16, - program=program, - init_program=init_program) + main_program=main_program, + startup_program=startup_program) USR_JOB_DICT_SIZE = paddle.dataset.movielens.max_job_id() + 1 usr_job_id = layers.data( name='job_id', shape=[1], data_type="int64", - program=program, - init_program=init_program) + main_program=main_program, + startup_program=startup_program) usr_job_emb = layers.embedding( input=usr_job_id, size=[USR_JOB_DICT_SIZE, 16], param_attr={'name': 'job_table'}, is_sparse=is_sparse, - program=program, - init_program=init_program) + main_program=main_program, + startup_program=startup_program) usr_job_fc = layers.fc(input=usr_job_emb, size=16, - program=program, - init_program=init_program) + main_program=main_program, + startup_program=startup_program) concat_embed = layers.concat( input=[usr_fc, usr_gender_fc, usr_age_fc, usr_job_fc], axis=1, - program=program, - init_program=init_program) + main_program=main_program, + startup_program=startup_program) usr_combined_features = layers.fc(input=concat_embed, size=200, act="tanh", - program=program, - init_program=init_program) + main_program=main_program, + startup_program=startup_program) return usr_combined_features @@ -130,8 +130,8 @@ def get_mov_combined_features(): name='movie_id', shape=[1], data_type='int64', - program=program, - init_program=init_program) + main_program=main_program, + startup_program=startup_program) mov_emb = layers.embedding( input=mov_id, @@ -139,13 +139,13 @@ def get_mov_combined_features(): size=[MOV_DICT_SIZE, 32], param_attr={'name': 'movie_table'}, is_sparse=is_sparse, - program=program, - init_program=init_program) + main_program=main_program, + startup_program=startup_program) mov_fc = layers.fc(input=mov_emb, size=32, - program=program, - init_program=init_program) + main_program=main_program, + startup_program=startup_program) CATEGORY_DICT_SIZE = len(paddle.dataset.movielens.movie_categories()) @@ -153,21 +153,21 @@ def get_mov_combined_features(): name='category_id', shape=[1], data_type='int64', - program=program, - init_program=init_program) + main_program=main_program, + startup_program=startup_program) mov_categories_emb = layers.embedding( input=category_id, size=[CATEGORY_DICT_SIZE, 32], is_sparse=is_sparse, - program=program, - init_program=init_program) + main_program=main_program, + startup_program=startup_program) mov_categories_hidden = layers.sequence_pool( input=mov_categories_emb, pool_type="sum", - program=program, - init_program=init_program) + main_program=main_program, + startup_program=startup_program) MOV_TITLE_DICT_SIZE = len(paddle.dataset.movielens.get_movie_title_dict()) @@ -175,15 +175,15 @@ def get_mov_combined_features(): name='movie_title', shape=[1], data_type='int64', - program=program, - init_program=init_program) + main_program=main_program, + startup_program=startup_program) mov_title_emb = layers.embedding( input=mov_title_id, size=[MOV_TITLE_DICT_SIZE, 32], is_sparse=is_sparse, - program=program, - init_program=init_program) + main_program=main_program, + startup_program=startup_program) mov_title_conv = nets.sequence_conv_pool( input=mov_title_emb, @@ -191,21 +191,21 @@ def get_mov_combined_features(): filter_size=3, act="tanh", pool_type="sum", - program=program, - init_program=init_program) + main_program=main_program, + startup_program=startup_program) concat_embed = layers.concat( input=[mov_fc, mov_categories_hidden, mov_title_conv], axis=1, - program=program, - init_program=init_program) + main_program=main_program, + startup_program=startup_program) # FIXME(dzh) : need tanh operator mov_combined_features = layers.fc(input=concat_embed, size=200, act="tanh", - program=program, - init_program=init_program) + main_program=main_program, + startup_program=startup_program) return mov_combined_features @@ -218,24 +218,26 @@ def model(): inference = layers.cos_sim( X=usr_combined_features, Y=mov_combined_features, - program=program, - init_program=init_program) + main_program=main_program, + startup_program=startup_program) label = layers.data( name='score', shape=[1], data_type='float32', - program=program, - init_program=init_program) + main_program=main_program, + startup_program=startup_program) square_cost = layers.square_error_cost( input=inference, label=label, - program=program, - init_program=init_program) + main_program=main_program, + startup_program=startup_program) avg_cost = layers.mean( - x=square_cost, program=program, init_program=init_program) + x=square_cost, + main_program=main_program, + startup_program=startup_program) return avg_cost @@ -243,8 +245,8 @@ def model(): def main(): cost = model() sgd_optimizer = optimizer.SGDOptimizer(learning_rate=0.2) - opts = sgd_optimizer.minimize(cost, init_program=init_program) - block = program.block(0) + opts = sgd_optimizer.minimize(cost, startup_program=startup_program) + block = main_program.block(0) if use_gpu: place = core.GPUPlace(0) @@ -252,7 +254,7 @@ def main(): place = core.CPUPlace() exe = Executor(place) - exe.run(init_program, feed={}, fetch_list=[]) + exe.run(startup_program, feed={}, fetch_list=[]) train_reader = paddle.batch( paddle.reader.shuffle( @@ -301,7 +303,7 @@ def main(): PASS_NUM = 100 for pass_id in range(PASS_NUM): for data in train_reader(): - outs = exe.run(program, + outs = exe.run(main_program, feed=func_feed(feeding, data), fetch_list=[cost]) out = np.array(outs[0]) diff --git a/python/paddle/v2/framework/tests/test_recurrent_op.py b/python/paddle/v2/framework/tests/test_recurrent_op.py index 157befd2effa252356fdb51b273fefd4bb7ae31c..16100429dd4010eb5c9a3e8896212f39295a4c8a 100644 --- a/python/paddle/v2/framework/tests/test_recurrent_op.py +++ b/python/paddle/v2/framework/tests/test_recurrent_op.py @@ -1,9 +1,6 @@ import unittest -import logging - -from op_test import get_numeric_gradient -from paddle.v2.framework.layers import * +import paddle.v2.framework.layers as layers from paddle.v2.framework.framework import Program from paddle.v2.framework.executor import Executor from paddle.v2.framework.backward import append_backward_ops @@ -16,8 +13,8 @@ class PyRNNBase(object): self.x = np.ones(shape=input_shape).astype("float32") self.y = np.zeros(shape=output_shape).astype("float32") - def step(self): - pass + def step(self, step_id, x): + raise NotImplementedError def forward(self): for step_id in range(self.x.shape[0]): @@ -99,45 +96,47 @@ class RecurrentOpTest1(unittest.TestCase): batch_size = 1 sent_len = 1 - def init_program(self): - self.program = Program() - self.init_program = Program() + def setup_program(self): + self.main_program = Program() + self.startup_program = Program() self.p_info = { - "program": self.program, - "init_program": self.init_program + "main_program": self.main_program, + "startup_program": self.startup_program } self.place = core.CPUPlace() def setUp(self): - self.init_program() + self.setup_program() self.data_field = {"x", "h_boot"} self.input_shape = (self.sent_len, self.batch_size, self.input_dim) self.output_shape = (self.sent_len, self.batch_size, self.input_dim) self.py_rnn = PySimpleRNN1(self.input_shape, self.output_shape) - self.output = mean(x=self.create_rnn_op(), **self.p_info) + self.output = layers.mean(x=self.create_rnn_op(), **self.p_info) def create_rnn_op(self): - x = data( + x = layers.data( shape=[self.sent_len, self.batch_size, self.input_dim], data_type='float32', name='x', append_batch_size=False, **self.p_info) - h_boot = data( + x.stop_gradient = False + h_boot = layers.data( shape=[self.input_dim], data_type='float32', name='h_boot', **self.p_info) + h_boot.stop_gradient = False - rnn = StaticRNN(program=self.program) + rnn = layers.StaticRNN(main_program=self.main_program) with rnn.step(): h_pre = rnn.memory(init=h_boot) x_t = rnn.step_input(x) - h = scale( - x=elementwise_add( + h = layers.scale( + x=layers.elementwise_add( x=h_pre, y=x_t, **self.p_info), scale=self.py_rnn.scale, **self.p_info) @@ -153,7 +152,7 @@ class RecurrentOpTest1(unittest.TestCase): for x in self.data_field } exe = Executor(self.place) - out = exe.run(self.program, + out = exe.run(self.main_program, feed=self.feed_map, fetch_list=[self.output]) @@ -165,12 +164,14 @@ class RecurrentOpTest1(unittest.TestCase): for x in self.data_field } fetch_list = [ - self.program.global_block().var(x + "@GRAD") + self.main_program.global_block().var(x + "@GRAD") for x in self.data_field ] exe = Executor(self.place) - return exe.run(self.program, feed=self.feed_map, fetch_list=fetch_list) + return exe.run(self.main_program, + feed=self.feed_map, + fetch_list=fetch_list) def test_backward(self): self.check_forward() @@ -237,7 +238,7 @@ class RecurrentOpTest2(RecurrentOpTest1): sent_len = 2 def setUp(self): - self.init_program() + self.setup_program() self.data_field = {"x", "h_boot", "W", "U"} @@ -245,39 +246,41 @@ class RecurrentOpTest2(RecurrentOpTest1): self.output_shape = (self.sent_len, self.batch_size, self.input_dim) self.py_rnn = PySimpleRNN2(self.input_shape, self.output_shape) - self.output = mean(x=self.create_rnn_op(), **self.p_info) + self.output = layers.mean(x=self.create_rnn_op(), **self.p_info) def create_rnn_op(self): - x = data( + x = layers.data( shape=[self.sent_len, self.batch_size, self.input_dim], data_type='float32', name='x', append_batch_size=False, **self.p_info) - h_boot = data( + x.stop_gradient = False + h_boot = layers.data( shape=[self.input_dim], data_type='float32', name='h_boot', **self.p_info) + h_boot.stop_gradient = False - rnn = StaticRNN(program=self.program) + rnn = layers.StaticRNN(main_program=self.main_program) with rnn.step(): h_pre = rnn.memory(init=h_boot) x_t = rnn.step_input(x) - temp_l = fc(input=x_t, - size=self.input_dim, - param_attr={'name': 'W'}, - bias_attr=False, - **self.p_info) - temp_r = fc(input=h_pre, - size=self.input_dim, - param_attr={'name': 'U'}, - bias_attr=False, - **self.p_info) - - h = sigmoid( - x=elementwise_add( + temp_l = layers.fc(input=x_t, + size=self.input_dim, + param_attr={'name': 'W'}, + bias_attr=False, + **self.p_info) + temp_r = layers.fc(input=h_pre, + size=self.input_dim, + param_attr={'name': 'U'}, + bias_attr=False, + **self.p_info) + + h = layers.sigmoid( + x=layers.elementwise_add( x=temp_l, y=temp_r, **self.p_info), **self.p_info) @@ -287,7 +290,7 @@ class RecurrentOpTest2(RecurrentOpTest1): return rnn() -class RecurrentOpTest3(RecurrentOpTest1): +class RecurrentOpMultipleMemoryTest(RecurrentOpTest1): ''' Test RNNOp with two memories equation: @@ -304,8 +307,8 @@ class RecurrentOpTest3(RecurrentOpTest1): class PySimpleRNN3(PyRNNBase): def __init__(self, input_shape, output_shape): - super(RecurrentOpTest3.PySimpleRNN3, self).__init__(input_shape, - output_shape) + super(RecurrentOpMultipleMemoryTest.PySimpleRNN3, self).__init__( + input_shape, output_shape) seq_len, batch_size, input_dim = input_shape self.h_boot1 = np.random.normal(size=(batch_size, @@ -333,46 +336,49 @@ class RecurrentOpTest3(RecurrentOpTest1): sent_len = 2 def setUp(self): - self.init_program() + self.setup_program() self.data_field = {"x", "h_boot1", "h_boot2"} self.input_shape = (self.sent_len, self.batch_size, self.input_dim) self.output_shape = (self.sent_len, self.batch_size, self.input_dim) - self.py_rnn = RecurrentOpTest3.PySimpleRNN3(self.input_shape, - self.output_shape) + self.py_rnn = RecurrentOpMultipleMemoryTest.PySimpleRNN3( + self.input_shape, self.output_shape) - self.output = mean(x=self.create_rnn_op(), **self.p_info) + self.output = layers.mean(x=self.create_rnn_op(), **self.p_info) def create_rnn_op(self): - x = data( + x = layers.data( shape=[self.sent_len, self.batch_size, self.input_dim], data_type='float32', name='x', append_batch_size=False, **self.p_info) - h_boot1 = data( + x.stop_gradient = False + h_boot1 = layers.data( shape=[self.batch_size, self.input_dim], data_type='float32', name='h_boot1', append_batch_size=False, **self.p_info) - h_boot2 = data( + h_boot1.stop_gradient = False + h_boot2 = layers.data( shape=[self.batch_size, self.input_dim], data_type='float32', name='h_boot2', append_batch_size=False, **self.p_info) + h_boot2.stop_gradient = False - rnn = StaticRNN(program=self.program) + rnn = layers.StaticRNN(main_program=self.main_program) with rnn.step(): h_pre1 = rnn.memory(init=h_boot1) h_pre2 = rnn.memory(init=h_boot2) x_t = rnn.step_input(x) - mem1 = scale(x=h_pre1, scale=1.0, **self.p_info) - mem2 = scale(x=h_pre2, scale=1.0, **self.p_info) - out = sums(input=[mem1, x_t, mem2], **self.p_info) + mem1 = layers.scale(x=h_pre1, scale=1.0, **self.p_info) + mem2 = layers.scale(x=h_pre2, scale=1.0, **self.p_info) + out = layers.sums(input=[mem1, x_t, mem2], **self.p_info) rnn.update_memory(h_pre1, mem1) rnn.update_memory(h_pre2, mem2) @@ -381,5 +387,70 @@ class RecurrentOpTest3(RecurrentOpTest1): return rnn() +class RecurrentOpNoMemBootTest(RecurrentOpTest1): + ''' + Test RNNOp with two memories + equation: + mem = x + mem_pre + y = mem + vars: + - x + memories: + - mem + outputs: + - y + ''' + + class PySimpleRNN4(PyRNNBase): + def __init__(self, input_shape, output_shape): + super(RecurrentOpNoMemBootTest.PySimpleRNN4, self).__init__( + input_shape, output_shape) + men_dim = input_shape + self.mems = np.zeros(shape=men_dim).astype("float32") + + def step(self, step_id, x): + if step_id == 0: + pre_mem = np.zeros_like(x) + else: + pre_mem = self.mems[step_id - 1] + self.mems[step_id] = pre_mem + x + self.y[step_id] = self.mems[step_id] + + input_dim = 1 + batch_size = 1 + sent_len = 2 + + def setUp(self): + self.setup_program() + + self.data_field = {"x"} + + self.input_shape = (self.sent_len, self.batch_size, self.input_dim) + self.output_shape = (self.sent_len, self.batch_size, self.input_dim) + self.py_rnn = RecurrentOpNoMemBootTest.PySimpleRNN4(self.input_shape, + self.output_shape) + self.output = layers.mean(x=self.create_rnn_op(), **self.p_info) + print self.main_program + + def create_rnn_op(self): + x = layers.data( + shape=[self.sent_len, self.batch_size, self.input_dim], + data_type='float32', + name='x', + append_batch_size=False, + **self.p_info) + x.stop_gradient = False + + rnn = layers.StaticRNN(main_program=self.main_program) + with rnn.step(): + mem_pre = rnn.memory(shape=[-1, self.input_dim], batch_ref=x) + x_t = rnn.step_input(x) + mem = layers.elementwise_add(x=mem_pre, y=x_t, **self.p_info) + rnn.update_memory(mem_pre, mem) + rnn.output(mem) + + return rnn() + + if __name__ == '__main__': unittest.main() diff --git a/python/paddle/v2/framework/tests/test_seq_pool.py b/python/paddle/v2/framework/tests/test_seq_pool.py index efc4920124afb539017a3b3f211c7320da68ffef..512d8b315f29cecf79ae274dca491c240f3447a1 100644 --- a/python/paddle/v2/framework/tests/test_seq_pool.py +++ b/python/paddle/v2/framework/tests/test_seq_pool.py @@ -29,6 +29,9 @@ class TestSeqAvgPool(OpTest): self.check_output() def test_check_grad(self): + # Remove MaxIndex after check_grad is refined. + self.outputs['MaxIndex'] = \ + np.zeros(self.outputs['Out'].shape).astype('int32') self.check_grad(["X"], "Out") @@ -85,31 +88,53 @@ class TestSeqSqrtPool2D(TestSeqAvgPool2D): out[i] = np.reshape(sub_x.sum(axis=0) / np.sqrt(len), (3, 17)) def test_check_grad(self): + # Remove MaxIndex after check_grad is refined. + self.outputs['MaxIndex'] = \ + np.zeros(self.outputs['Out'].shape).astype('int32') self.check_grad(["X"], "Out", max_relative_error=0.06) class TestSeqMaxPool(TestSeqAvgPool): + def set_data(self): + self.op_type = 'sequence_pool' + x = np.random.uniform(0.1, 1, [13, 23]).astype('float32') + lod = [[0, 4, 5, 8, 13]] + for i in range(4): + l = lod[0][i + 1] - lod[0][i] + x[lod[0][i] + np.random.randint(l), :] += 2.0 + + self.inputs = {'X': (x, lod)} + + out = np.zeros((4, 23)).astype('float32') + self.outputs = {'Out': out} + return x, lod, out + def compute(self, x, lod, out): self.attrs = {'pooltype': "MAX"} for i in range(4): sub_x = x[lod[0][i]:lod[0][i + 1], :] out[i] = np.amax(sub_x, axis=0) - def test_check_grad(self): - # Remove MaxPool2D from gradient check to confirm the success of CI. - return - class TestSeqMaxPool2D(TestSeqAvgPool2D): + def set_data(self): + self.op_type = 'sequence_pool' + x = np.random.uniform(0.1, 1, [13, 3, 11]).astype('float32') + lod = [[0, 4, 5, 8, 13]] + self.inputs = {'X': (x, lod)} + for i in range(4): + l = lod[0][i + 1] - lod[0][i] + x[lod[0][i] + np.random.randint(l), :] += 1.0 + + out = np.zeros((4, 3, 11)).astype('float32') + self.outputs = {'Out': out} + return x, lod, out + def compute(self, x, lod, out): self.attrs = {'pooltype': "MAX"} for i in range(4): - sub_x = np.reshape(x[lod[0][i]:lod[0][i + 1], :], (-1, 3 * 17)) - out[i] = np.reshape(np.amax(sub_x, axis=0), (3, 17)) - - def test_check_grad(self): - # Remove MaxPool2D from gradient check to confirm the success of CI. - return + sub_x = np.reshape(x[lod[0][i]:lod[0][i + 1], :], (-1, 3 * 11)) + out[i] = np.reshape(np.amax(sub_x, axis=0), (3, 11)) class TestSeqLastPool(TestSeqAvgPool): diff --git a/python/paddle/v2/framework/tests/test_softmax_with_cross_entropy_op.py b/python/paddle/v2/framework/tests/test_softmax_with_cross_entropy_op.py index f93feb20696f126423bc9412eab3b4aa41b19426..c2f07f9096c69f3d4977f9444bdd5dcda8028973 100644 --- a/python/paddle/v2/framework/tests/test_softmax_with_cross_entropy_op.py +++ b/python/paddle/v2/framework/tests/test_softmax_with_cross_entropy_op.py @@ -12,30 +12,30 @@ class TestSoftmaxWithCrossEntropyOp(OpTest): def setUp(self): self.op_type = "softmax_with_cross_entropy" - batch_size = 3 + batch_size = 2 class_num = 37 logits = np.random.uniform(0.1, 1.0, - [batch_size, class_num]).astype("float32") + [batch_size, class_num]).astype("float64") softmax = np.apply_along_axis(stable_softmax, 1, logits) - labels = np.random.randint(0, class_num, [batch_size, 1], dtype="int32") + labels = np.random.randint(0, class_num, [batch_size, 1], dtype="int64") cross_entropy = np.asmatrix( [[-np.log(softmax[i][labels[i][0]])] for i in range(softmax.shape[0])], - dtype="float32") + dtype="float64") self.inputs = {"Logits": logits, "Label": labels} self.outputs = { - "Softmax": softmax.astype('float32'), - "Loss": cross_entropy.astype('float32') + "Softmax": softmax.astype("float64"), + "Loss": cross_entropy.astype("float64") } def test_check_output(self): self.check_output() def test_check_grad(self): - self.check_grad(["Logits"], "Loss", max_relative_error=0.05) + self.check_grad(["Logits"], "Loss") class TestSoftmaxWithCrossEntropyOp2(OpTest): @@ -49,19 +49,19 @@ class TestSoftmaxWithCrossEntropyOp2(OpTest): class_num = 37 logits = np.random.uniform(0.1, 1.0, - [batch_size, class_num]).astype("float32") + [batch_size, class_num]).astype("float64") softmax = np.apply_along_axis(stable_softmax, 1, logits) labels = np.random.uniform(0.1, 1.0, - [batch_size, class_num]).astype("float32") + [batch_size, class_num]).astype("float64") labels /= np.sum(labels, axis=1, keepdims=True) cross_entropy = (-labels * np.log(softmax)).sum( - axis=1, keepdims=True).astype("float32") + axis=1, keepdims=True).astype("float64") self.inputs = {"Logits": logits, "Label": labels} self.outputs = { - "Softmax": softmax.astype('float32'), - "Loss": cross_entropy.astype('float32') + "Softmax": softmax.astype("float64"), + "Loss": cross_entropy.astype("float64") } self.attrs = {"soft_label": True} @@ -69,9 +69,8 @@ class TestSoftmaxWithCrossEntropyOp2(OpTest): self.check_output() def test_check_grad(self): - self.check_grad(["Logits"], "Loss", max_relative_error=0.05) + self.check_grad(["Logits"], "Loss") if __name__ == "__main__": - exit(0) # FIXME: xe has bug unittest.main() diff --git a/python/paddle/v2/framework/tests/test_understand_sentiment_conv.py b/python/paddle/v2/framework/tests/test_understand_sentiment_conv.py index dcbb34ccfcff65086dff1cb1ffd859c4c1e0d7ca..eb377e9264b6031e9bf484a90b7c2b39442407f1 100644 --- a/python/paddle/v2/framework/tests/test_understand_sentiment_conv.py +++ b/python/paddle/v2/framework/tests/test_understand_sentiment_conv.py @@ -4,7 +4,7 @@ import paddle.v2.framework.nets as nets import paddle.v2.framework.core as core import paddle.v2.framework.optimizer as optimizer -from paddle.v2.framework.framework import Program, g_program, g_init_program +from paddle.v2.framework.framework import Program, g_main_program, g_startup_program from paddle.v2.framework.executor import Executor import numpy as np @@ -70,7 +70,7 @@ def main(): place = core.CPUPlace() exe = Executor(place) - exe.run(g_init_program) + exe.run(g_startup_program) for pass_id in xrange(PASS_NUM): for data in train_data(): @@ -82,7 +82,7 @@ def main(): tensor_label = core.LoDTensor() tensor_label.set(label, place) - outs = exe.run(g_program, + outs = exe.run(g_main_program, feed={"words": tensor_words, "label": tensor_label}, fetch_list=[cost, acc]) diff --git a/python/paddle/v2/framework/tests/test_variable.py b/python/paddle/v2/framework/tests/test_variable.py index c670ca19afbd778747303cb002666aa2a5e62c37..03115f10a5a494424c6f8310c544c569be818e5b 100644 --- a/python/paddle/v2/framework/tests/test_variable.py +++ b/python/paddle/v2/framework/tests/test_variable.py @@ -1,5 +1,5 @@ import unittest -from paddle.v2.framework.framework import Variable, g_program, Program +from paddle.v2.framework.framework import Variable, g_main_program, Program import paddle.v2.framework.core as core import numpy as np @@ -18,7 +18,7 @@ class TestVariable(unittest.TestCase): self.assertRaises(ValueError, lambda: convert("int8")) def test_var(self): - b = g_program.current_block() + b = g_main_program.current_block() w = b.create_var( dtype="float64", shape=[784, 100], lod_level=0, name="fc.w") self.assertNotEqual(str(w), "") diff --git a/python/paddle/v2/framework/tests/test_word2vec.py b/python/paddle/v2/framework/tests/test_word2vec.py index 2aaf8d6a2b2023416ed8daf93d9a252bd4b0b05c..6c3a448ec79717da6b81f51d9a4dccf038539b79 100644 --- a/python/paddle/v2/framework/tests/test_word2vec.py +++ b/python/paddle/v2/framework/tests/test_word2vec.py @@ -3,13 +3,13 @@ import paddle.v2.framework.layers as layers import paddle.v2.framework.core as core import paddle.v2.framework.optimizer as optimizer -from paddle.v2.framework.framework import Program, g_program +from paddle.v2.framework.framework import Program, g_main_program from paddle.v2.framework.executor import Executor import numpy as np -init_program = Program() -program = Program() +startup_program = Program() +main_program = Program() embed_size = 32 hidden_size = 256 @@ -24,32 +24,32 @@ first_word = layers.data( name='firstw', shape=[1], data_type='int64', - program=program, - init_program=init_program) + main_program=main_program, + startup_program=startup_program) second_word = layers.data( name='secondw', shape=[1], data_type='int64', - program=program, - init_program=init_program) + main_program=main_program, + startup_program=startup_program) third_word = layers.data( name='thirdw', shape=[1], data_type='int64', - program=program, - init_program=init_program) + main_program=main_program, + startup_program=startup_program) forth_word = layers.data( name='forthw', shape=[1], data_type='int64', - program=program, - init_program=init_program) + main_program=main_program, + startup_program=startup_program) next_word = layers.data( name='nextw', shape=[1], data_type='int64', - program=program, - init_program=init_program) + main_program=main_program, + startup_program=startup_program) embed_first = layers.embedding( input=first_word, @@ -57,16 +57,16 @@ embed_first = layers.embedding( data_type='float32', is_sparse=is_sparse, param_attr={'name': 'shared_w'}, - program=program, - init_program=init_program) + main_program=main_program, + startup_program=startup_program) embed_second = layers.embedding( input=second_word, size=[dict_size, embed_size], data_type='float32', is_sparse=is_sparse, param_attr={'name': 'shared_w'}, - program=program, - init_program=init_program) + main_program=main_program, + startup_program=startup_program) embed_third = layers.embedding( input=third_word, @@ -74,42 +74,43 @@ embed_third = layers.embedding( data_type='float32', is_sparse=is_sparse, param_attr={'name': 'shared_w'}, - program=program, - init_program=init_program) + main_program=main_program, + startup_program=startup_program) embed_forth = layers.embedding( input=forth_word, size=[dict_size, embed_size], data_type='float32', is_sparse=is_sparse, param_attr={'name': 'shared_w'}, - program=program, - init_program=init_program) + main_program=main_program, + startup_program=startup_program) concat_embed = layers.concat( input=[embed_first, embed_second, embed_third, embed_forth], axis=1, - program=program, - init_program=init_program) + main_program=main_program, + startup_program=startup_program) hidden1 = layers.fc(input=concat_embed, size=hidden_size, act='sigmoid', - program=program, - init_program=init_program) + main_program=main_program, + startup_program=startup_program) predict_word = layers.fc(input=hidden1, size=dict_size, act='softmax', - program=program, - init_program=init_program) + main_program=main_program, + startup_program=startup_program) cost = layers.cross_entropy( input=predict_word, label=next_word, - program=program, - init_program=init_program) -avg_cost = layers.mean(x=cost, program=program, init_program=init_program) + main_program=main_program, + startup_program=startup_program) +avg_cost = layers.mean( + x=cost, main_program=main_program, startup_program=startup_program) sgd_optimizer = optimizer.SGDOptimizer(learning_rate=0.001) -opts = sgd_optimizer.minimize(avg_cost, init_program) +opts = sgd_optimizer.minimize(avg_cost, startup_program) train_reader = paddle.batch( paddle.dataset.imikolov.train(word_dict, N), batch_size) @@ -117,7 +118,7 @@ train_reader = paddle.batch( place = core.CPUPlace() exe = Executor(place) -exe.run(init_program, feed={}, fetch_list=[]) +exe.run(startup_program, feed={}, fetch_list=[]) PASS_NUM = 100 for pass_id in range(PASS_NUM): for data in train_reader(): @@ -145,7 +146,7 @@ for pass_id in range(PASS_NUM): next_tensor = core.LoDTensor() next_tensor.set(next_data, place) - outs = exe.run(program, + outs = exe.run(main_program, feed={ 'firstw': first_tensor, 'secondw': second_tensor, diff --git a/python/paddle/v2/optimizer.py b/python/paddle/v2/optimizer.py index 29f0945eb4c88eab8fa9ee83f455190dfd473aa4..94d706b1d6289a7bffbdfb161c35d44c78fdf46f 100644 --- a/python/paddle/v2/optimizer.py +++ b/python/paddle/v2/optimizer.py @@ -11,11 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -""" -Optimizers(update equation) for SGD method. - -TODO(yuyang18): Complete comments. -""" import paddle.trainer_config_helpers.config_parser_utils as config_parser_utils import paddle.trainer_config_helpers.optimizers as v1_optimizers @@ -101,32 +96,37 @@ class Optimizer(object): class Momentum(Optimizer): """ - SGD Optimizer. - - SGD is an optimization method, trying to find a neural network that - minimize the "cost/error" of it by iteration. In paddle's implementation - SGD Optimizer is synchronized, which means all gradients will be wait to - calculate and reduced into one gradient, then do optimize operation. + Momentum Optimizer. - The neural network consider the learning problem of minimizing an objective - function, that has the form of a sum + When sparse=False, the momentum update formula is as follows: .. math:: - Q(w) = \\sum_{i}^{n} Q_i(w) + v_{t} &= k * v_{t-1} - \\gamma_t / (g_{t} + \\lambda w_{t-1}) \\\\ + w_{t} &= w_{t-1} + v_{t} \\\\ - The value of function Q sometimes is the cost of neural network (Mean - Square Error between prediction and label for example). The function Q is - parametrised by w, the weight/bias of neural network. And weights is what to - be learned. The i is the i-th observation in (trainning) data. + where, :math:`k` is momentum, :math:`\\lambda` is decay rate, + :math:`\\gamma_t` is learning rate at the t'th iteration. + :math:`w_{t}` is the weight as the t'th iteration. + And the :math:`v_{t}` is the history momentum variable. - So, the SGD method will optimize the weight by + When sparse=True, the update scheme: .. math:: - w = w - \\eta \\nabla Q(w) = w - \\eta \\sum_{i}^{n} \\nabla Q_i(w) - - where :math:`\\eta` is learning rate. And :math:`n` is batch size. + \\alpha_t &= \\alpha_{t-1} / k \\\\ + \\beta_t &= \\beta_{t-1} / (1 + \\lambda \\gamma_t) \\\\ + u_t &= u_{t-1} - \\alpha_t \\gamma_t g_t \\\\ + v_t &= v_{t-1} + \\tau_{t-1} \\alpha_t \\gamma_t g_t \\\\ + \\tau_t &= \\tau_{t-1} + \\beta_t / \\alpha_t + + where :math:`k` is momentum, :math:`\\lambda` is decay rate, + :math:`\\gamma_t` is learning rate at the t'th iteration. + + :param momentum: the momentum factor. + :type momentum: float + :param sparse: with sparse support or not, False by default. + :type sparse: bool """ def __init__(self, momentum=None, sparse=False, **kwargs): @@ -146,7 +146,7 @@ class Adam(Optimizer): m(w, t) & = \\beta_1 m(w, t-1) + (1 - \\beta_1) \\nabla Q_i(w) \\\\ v(w, t) & = \\beta_2 v(w, t-1) + (1 - \\beta_2)(\\nabla Q_i(w)) ^2 \\\\ - w & = w - \\frac{\\eta}{\\sqrt{v(w,t) + \\epsilon}} + w & = w - \\frac{\\eta m(w, t)}{\\sqrt{v(w,t) + \\epsilon}} :param beta1: the :math:`\\beta_1` in equation. :type beta1: float diff --git a/python/setup.py.in b/python/setup.py.in index 87b3823e52604b889cdee76bc696a1ae9b9de802..5348c2d8d7e9b5adc5fe93e2943bef149ba047cc 100644 --- a/python/setup.py.in +++ b/python/setup.py.in @@ -1,4 +1,4 @@ -from setuptools import setup, Distribution +from setuptools import setup, Distribution, Extension class BinaryDistribution(Distribution): def has_ext_modules(foo): return True @@ -41,6 +41,7 @@ setup(name='paddlepaddle', description='Parallel Distributed Deep Learning', install_requires=setup_requires, packages=packages, + ext_modules=[Extension('_foo', ['stub.cc'])], package_data={ 'paddle.v2.master': ['libpaddle_master.so'], 'paddle.v2.framework': ['core.so'], @@ -54,6 +55,5 @@ setup(name='paddlepaddle', 'py_paddle': '${PADDLE_SOURCE_DIR}/paddle/py_paddle' }, scripts=paddle_bins, - distclass=BinaryDistribution, data_files=[(paddle_rt_lib_dir, paddle_rt_libs)] )