提交 90e05a4b 编写于 作者: P peterzhang2029

Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into add_bn_eq

......@@ -36,8 +36,7 @@ include(simd)
################################ Configurations #######################################
option(WITH_GPU "Compile PaddlePaddle with NVIDIA GPU" ${CUDA_FOUND})
option(WITH_AVX "Compile PaddlePaddle with AVX intrinsics" ${AVX_FOUND})
option(WITH_MKLDNN "Compile PaddlePaddle with mkl-dnn support." ${AVX_FOUND})
option(WITH_MKLML "Compile PaddlePaddle with mklml package." ${AVX_FOUND})
option(WITH_MKL "Compile PaddlePaddle with MKL support." ${AVX_FOUND})
option(WITH_DSO "Compile PaddlePaddle with dynamic linked CUDA" ON)
option(WITH_TESTING "Compile PaddlePaddle with unit testing" ON)
option(WITH_SWIG_PY "Compile PaddlePaddle with inference api" ON)
......@@ -82,10 +81,8 @@ if(ANDROID OR IOS)
"Disable PYTHON when cross-compiling for Android and iOS" FORCE)
set(WITH_RDMA OFF CACHE STRING
"Disable RDMA when cross-compiling for Android and iOS" FORCE)
set(WITH_MKLDNN OFF CACHE STRING
"Disable MKLDNN when cross-compiling for Android and iOS" FORCE)
set(WITH_MKLML OFF CACHE STRING
"Disable MKLML package when cross-compiling for Android and iOS" FORCE)
set(WITH_MKL OFF CACHE STRING
"Disable MKL when cross-compiling for Android and iOS" FORCE)
# Compile PaddlePaddle mobile inference library
if (NOT WITH_C_API)
......@@ -111,6 +108,14 @@ else()
set(THIRD_PARTY_BUILD_TYPE Release)
endif()
set(WITH_MKLML ${WITH_MKL})
if (WITH_MKL AND AVX2_FOUND)
set(WITH_MKLDNN ON)
else()
message(STATUS "Do not have AVX2 intrinsics and disabled MKL-DNN")
set(WITH_MKLDNN OFF)
endif()
########################################################################################
include(external/mklml) # download mklml package
......@@ -158,14 +163,15 @@ set(EXTERNAL_LIBS
)
if(WITH_GPU)
list(APPEND EXTERNAL_LIBS ${CUDA_LIBRARIES} ${CUDA_rt_LIBRARY})
if(NOT WITH_DSO)
list(APPEND EXTERNAL_LIBS ${CUDNN_LIBRARY} ${CUDA_CUBLAS_LIBRARIES} ${CUDA_curand_LIBRARY} ${NCCL_LIBRARY})
endif(NOT WITH_DSO)
include(cuda)
endif(WITH_GPU)
if(WITH_MKLML)
list(APPEND EXTERNAL_LIBS ${MKLML_IOMP_LIB})
endif()
if(WITH_MKLDNN)
list(APPEND EXTERNAL_LIBS ${MKLDNN_LIB} ${MKLDNN_IOMP_LIB})
list(APPEND EXTERNAL_LIBS ${MKLDNN_LIB})
endif()
if(USE_NNPACK)
......
......@@ -12,11 +12,11 @@ Machine:
System: CentOS release 6.3 (Final), Docker 1.12.1.
PaddlePaddle: paddlepaddle/paddle:latest (TODO: will rerun after 0.11.0)
- MKL-DNN tag v0.10
- MKLML 2018.0.20170720
PaddlePaddle: paddlepaddle/paddle:latest (for MKLML and MKL-DNN), paddlepaddle/paddle:latest-openblas (for OpenBLAS)
- MKL-DNN tag v0.11
- MKLML 2018.0.1.20171007
- OpenBLAS v0.2.20
(TODO: will rerun after 0.11.0)
On each machine, we will test and compare the performance of training on single node using MKL-DNN / MKLML / OpenBLAS respectively.
......@@ -31,15 +31,26 @@ Input image size - 3 * 224 * 224, Time: images/second
| BatchSize | 64 | 128 | 256 |
|--------------|-------| -----| --------|
| OpenBLAS | 7.82 | 8.62 | 10.34 |
| MKLML | 11.02 | 12.86 | 15.33 |
| MKL-DNN | 27.69 | 28.8 | 29.27 |
| OpenBLAS | 7.80 | 9.00 | 10.80 |
| MKLML | 12.12 | 13.70 | 16.18 |
| MKL-DNN | 28.46 | 29.83 | 30.44 |
chart on batch size 128
TBD
- ResNet-50
| BatchSize | 64 | 128 | 256 |
|--------------|-------| ------| -------|
| OpenBLAS | 25.22 | 25.68 | 27.12 |
| MKLML | 32.52 | 31.89 | 33.12 |
| MKL-DNN | 81.69 | 82.35 | 84.08 |
chart on batch size 128
TBD
- ResNet
- GoogLeNet
### Laptop
......
......@@ -5,6 +5,7 @@ height = 224
width = 224
num_class = 1000
batch_size = get_config_arg('batch_size', int, 128)
use_gpu = get_config_arg('use_gpu', bool, True)
args = {'height': height, 'width': width, 'color': True, 'num_class': num_class}
define_py_data_sources2(
......@@ -16,6 +17,8 @@ settings(
learning_method=MomentumOptimizer(0.9),
regularization=L2Regularization(0.0005 * batch_size))
conv_projection = conv_projection if use_gpu else img_conv_layer
def inception2(name, input, channels, \
filter1,
filter3R, filter3,
......@@ -138,7 +141,7 @@ def inception(name, input, channels, \
cat = concat_layer(
name=name,
input=[cov1, cov3, cov5, covprj],
bias_attr=True,
bias_attr=True if use_gpu else False,
act=ReluActivation())
return cat
......
......@@ -40,6 +40,7 @@ fi
for use_mkldnn in True False; do
for batchsize in 64 128 256; do
train vgg 19 $batchsize $use_mkldnn
train resnet 50 $batchsize $use_mkldnn
train resnet 50 $batchsize $use_mkldnn
train googlenet v1 $batchsize $use_mkldnn
done
done
......@@ -76,27 +76,14 @@ else()
include_directories(${CUDA_TOOLKIT_INCLUDE})
endif(NOT WITH_GPU)
if(WITH_MKLDNN)
add_definitions(-DPADDLE_USE_MKLDNN)
if (WITH_MKLML AND MKLDNN_IOMP_DIR)
message(STATUS "Enable Intel OpenMP at ${MKLDNN_IOMP_DIR}")
set(OPENMP_FLAGS "-fopenmp")
set(CMAKE_C_CREATE_SHARED_LIBRARY_FORBIDDEN_FLAGS ${OPENMP_FLAGS})
set(CMAKE_CXX_CREATE_SHARED_LIBRARY_FORBIDDEN_FLAGS ${OPENMP_FLAGS})
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${OPENMP_FLAGS}")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OPENMP_FLAGS}")
else()
find_package(OpenMP)
if(OPENMP_FOUND)
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${OpenMP_C_FLAGS}")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OpenMP_CXX_FLAGS}")
else()
message(WARNING "Can not find OpenMP."
"Some performance features in MKLDNN may not be available")
endif()
endif()
endif(WITH_MKLDNN)
if (WITH_MKLML AND MKLML_IOMP_LIB)
message(STATUS "Enable Intel OpenMP with ${MKLML_IOMP_LIB}")
set(OPENMP_FLAGS "-fopenmp")
set(CMAKE_C_CREATE_SHARED_LIBRARY_FORBIDDEN_FLAGS ${OPENMP_FLAGS})
set(CMAKE_CXX_CREATE_SHARED_LIBRARY_FORBIDDEN_FLAGS ${OPENMP_FLAGS})
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${OPENMP_FLAGS}")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OPENMP_FLAGS}")
endif()
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${SIMD_FLAG}")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SIMD_FLAG}")
......
if(NOT WITH_GPU)
return()
endif()
set(paddle_known_gpu_archs "30 35 50 52 60 61 70")
set(paddle_known_gpu_archs7 "30 35 50 52")
set(paddle_known_gpu_archs8 "30 35 50 52 60 61")
######################################################################################
# A function for automatic detection of GPUs installed (if autodetection is enabled)
# Usage:
# detect_installed_gpus(out_variable)
function(detect_installed_gpus out_variable)
if(NOT CUDA_gpu_detect_output)
set(cufile ${PROJECT_BINARY_DIR}/detect_cuda_archs.cu)
file(WRITE ${cufile} ""
"#include <cstdio>\n"
"int main() {\n"
" int count = 0;\n"
" if (cudaSuccess != cudaGetDeviceCount(&count)) return -1;\n"
" if (count == 0) return -1;\n"
" for (int device = 0; device < count; ++device) {\n"
" cudaDeviceProp prop;\n"
" if (cudaSuccess == cudaGetDeviceProperties(&prop, device))\n"
" std::printf(\"%d.%d \", prop.major, prop.minor);\n"
" }\n"
" return 0;\n"
"}\n")
execute_process(COMMAND "${CUDA_NVCC_EXECUTABLE}" "-ccbin=${CUDA_HOST_COMPILER}"
"--run" "${cufile}"
WORKING_DIRECTORY "${PROJECT_BINARY_DIR}/CMakeFiles/"
RESULT_VARIABLE nvcc_res OUTPUT_VARIABLE nvcc_out
ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE)
if(nvcc_res EQUAL 0)
# only keep the last line of nvcc_out
STRING(REGEX REPLACE ";" "\\\\;" nvcc_out "${nvcc_out}")
STRING(REGEX REPLACE "\n" ";" nvcc_out "${nvcc_out}")
list(GET nvcc_out -1 nvcc_out)
string(REPLACE "2.1" "2.1(2.0)" nvcc_out "${nvcc_out}")
set(CUDA_gpu_detect_output ${nvcc_out} CACHE INTERNAL "Returned GPU architetures from detect_installed_gpus tool" FORCE)
endif()
endif()
if(NOT CUDA_gpu_detect_output)
message(STATUS "Automatic GPU detection failed. Building for all known architectures.")
set(${out_variable} ${paddle_known_gpu_archs} PARENT_SCOPE)
else()
set(${out_variable} ${CUDA_gpu_detect_output} PARENT_SCOPE)
endif()
endfunction()
########################################################################
# Function for selecting GPU arch flags for nvcc based on CUDA_ARCH_NAME
# Usage:
# select_nvcc_arch_flags(out_variable)
function(select_nvcc_arch_flags out_variable)
# List of arch names
set(archs_names "Kepler" "Maxwell" "Pascal" "All" "Manual")
set(archs_name_default "All")
if(NOT CMAKE_CROSSCOMPILING)
list(APPEND archs_names "Auto")
endif()
# set CUDA_ARCH_NAME strings (so it will be seen as dropbox in CMake-Gui)
set(CUDA_ARCH_NAME ${archs_name_default} CACHE STRING "Select target NVIDIA GPU achitecture.")
set_property( CACHE CUDA_ARCH_NAME PROPERTY STRINGS "" ${archs_names} )
mark_as_advanced(CUDA_ARCH_NAME)
# verify CUDA_ARCH_NAME value
if(NOT ";${archs_names};" MATCHES ";${CUDA_ARCH_NAME};")
string(REPLACE ";" ", " archs_names "${archs_names}")
message(FATAL_ERROR "Only ${archs_names} architeture names are supported.")
endif()
if(${CUDA_ARCH_NAME} STREQUAL "Manual")
set(CUDA_ARCH_BIN ${paddle_known_gpu_archs} CACHE STRING "Specify 'real' GPU architectures to build binaries for, BIN(PTX) format is supported")
set(CUDA_ARCH_PTX "50" CACHE STRING "Specify 'virtual' PTX architectures to build PTX intermediate code for")
mark_as_advanced(CUDA_ARCH_BIN CUDA_ARCH_PTX)
else()
unset(CUDA_ARCH_BIN CACHE)
unset(CUDA_ARCH_PTX CACHE)
endif()
if(${CUDA_ARCH_NAME} STREQUAL "Kepler")
set(cuda_arch_bin "30 35")
elseif(${CUDA_ARCH_NAME} STREQUAL "Maxwell")
set(cuda_arch_bin "50")
elseif(${CUDA_ARCH_NAME} STREQUAL "Pascal")
set(cuda_arch_bin "60 61")
elseif(${CUDA_ARCH_NAME} STREQUAL "Volta")
set(cuda_arch_bin "70")
elseif(${CUDA_ARCH_NAME} STREQUAL "All")
set(cuda_arch_bin ${paddle_known_gpu_archs})
elseif(${CUDA_ARCH_NAME} STREQUAL "Auto")
detect_installed_gpus(cuda_arch_bin)
else() # (${CUDA_ARCH_NAME} STREQUAL "Manual")
set(cuda_arch_bin ${CUDA_ARCH_BIN})
endif()
# remove dots and convert to lists
string(REGEX REPLACE "\\." "" cuda_arch_bin "${cuda_arch_bin}")
string(REGEX REPLACE "\\." "" cuda_arch_ptx "${CUDA_ARCH_PTX}")
string(REGEX MATCHALL "[0-9()]+" cuda_arch_bin "${cuda_arch_bin}")
string(REGEX MATCHALL "[0-9]+" cuda_arch_ptx "${cuda_arch_ptx}")
list(REMOVE_DUPLICATES cuda_arch_bin)
list(REMOVE_DUPLICATES cuda_arch_ptx)
set(nvcc_flags "")
set(nvcc_archs_readable "")
# Tell NVCC to add binaries for the specified GPUs
foreach(arch ${cuda_arch_bin})
if(arch MATCHES "([0-9]+)\\(([0-9]+)\\)")
# User explicitly specified PTX for the concrete BIN
list(APPEND nvcc_flags -gencode arch=compute_${CMAKE_MATCH_2},code=sm_${CMAKE_MATCH_1})
list(APPEND nvcc_archs_readable sm_${CMAKE_MATCH_1})
else()
# User didn't explicitly specify PTX for the concrete BIN, we assume PTX=BIN
list(APPEND nvcc_flags -gencode arch=compute_${arch},code=sm_${arch})
list(APPEND nvcc_archs_readable sm_${arch})
endif()
endforeach()
# Tell NVCC to add PTX intermediate code for the specified architectures
foreach(arch ${cuda_arch_ptx})
list(APPEND nvcc_flags -gencode arch=compute_${arch},code=compute_${arch})
list(APPEND nvcc_archs_readable compute_${arch})
endforeach()
string(REPLACE ";" " " nvcc_archs_readable "${nvcc_archs_readable}")
set(${out_variable} ${nvcc_flags} PARENT_SCOPE)
set(${out_variable}_readable ${nvcc_archs_readable} PARENT_SCOPE)
endfunction()
message(STATUS "CUDA detected: " ${CUDA_VERSION})
if (${CUDA_VERSION} LESS 7.0)
set(paddle_known_gpu_archs ${paddle_known_gpu_archs})
elseif (${CUDA_VERSION} LESS 8.0) # CUDA 7.x
set(paddle_known_gpu_archs ${paddle_known_gpu_archs7})
list(APPEND CUDA_NVCC_FLAGS "-D_MWAITXINTRIN_H_INCLUDED")
list(APPEND CUDA_NVCC_FLAGS "-D__STRICT_ANSI__")
elseif (${CUDA_VERSION} LESS 9.0) # CUDA 8.x
set(paddle_known_gpu_archs ${paddle_known_gpu_archs8})
list(APPEND CUDA_NVCC_FLAGS "-D_MWAITXINTRIN_H_INCLUDED")
list(APPEND CUDA_NVCC_FLAGS "-D__STRICT_ANSI__")
# CUDA 8 may complain that sm_20 is no longer supported. Suppress the
# warning for now.
list(APPEND CUDA_NVCC_FLAGS "-Wno-deprecated-gpu-targets")
endif()
include_directories(${CUDA_INCLUDE_DIRS})
list(APPEND EXTERNAL_LIBS ${CUDA_LIBRARIES} ${CUDA_rt_LIBRARY})
if(NOT WITH_DSO)
list(APPEND EXTERNAL_LIBS ${CUDNN_LIBRARY} ${CUDA_CUBLAS_LIBRARIES} ${CUDA_curand_LIBRARY} ${NCCL_LIBRARY})
endif(NOT WITH_DSO)
# setting nvcc arch flags
select_nvcc_arch_flags(NVCC_FLAGS_EXTRA)
list(APPEND CUDA_NVCC_FLAGS ${NVCC_FLAGS_EXTRA})
message(STATUS "Added CUDA NVCC flags for: ${NVCC_FLAGS_EXTRA_readable}")
# Set C++11 support
set(CUDA_PROPAGATE_HOST_FLAGS OFF)
# Release/Debug flags set by cmake. Such as -O3 -g -DNDEBUG etc.
# So, don't set these flags here.
list(APPEND CUDA_NVCC_FLAGS "-std=c++11")
list(APPEND CUDA_NVCC_FLAGS "--use_fast_math")
list(APPEND CUDA_NVCC_FLAGS "-Xcompiler -fPIC")
# Set :expt-relaxed-constexpr to suppress Eigen warnings
list(APPEND CUDA_NVCC_FLAGS "--expt-relaxed-constexpr")
if(CMAKE_BUILD_TYPE STREQUAL "Debug")
list(APPEND CUDA_NVCC_FLAGS ${CMAKE_CXX_FLAGS_DEBUG})
elseif(CMAKE_BUILD_TYPE STREQUAL "Release")
list(APPEND CUDA_NVCC_FLAGS ${CMAKE_CXX_FLAGS_RELEASE})
elseif(CMAKE_BUILD_TYPE STREQUAL "RelWithDebInfo")
list(APPEND CUDA_NVCC_FLAGS ${CMAKE_CXX_FLAGS_RELWITHDEBINFO})
elseif(CMAKE_BUILD_TYPE STREQUAL "MinSizeRel")
list(APPEND CUDA_NVCC_FLAGS ${CMAKE_CXX_FLAGS_MINSIZEREL})
endif()
mark_as_advanced(CUDA_BUILD_CUBIN CUDA_BUILD_EMULATION CUDA_VERBOSE_BUILD)
mark_as_advanced(CUDA_SDK_ROOT_DIR CUDA_SEPARABLE_COMPILATION)
......@@ -40,10 +40,9 @@ INCLUDE_DIRECTORIES(${MKLDNN_INC_DIR})
IF(${CBLAS_PROVIDER} STREQUAL "MKLML")
SET(MKLDNN_DEPENDS ${MKLML_PROJECT})
SET(MKLDNN_MKLROOT ${MKLML_ROOT})
SET(MKLDNN_IOMP_LIB ${MKLML_IOMP_LIB})
SET(MKLDNN_IOMP_DIR ${MKLML_LIB_DIR})
MESSAGE(STATUS "Build MKLDNN with ${MKLDNN_MKLROOT}")
MESSAGE(STATUS "Build MKLDNN with MKLML ${MKLML_ROOT}")
ELSE()
MESSAGE(FATAL_ERROR "Should enable MKLML when build MKLDNN")
ENDIF()
SET(MKLDNN_CFLAG "${CMAKE_C_FLAGS} -Wno-error=strict-overflow")
......@@ -57,15 +56,16 @@ ExternalProject_Add(
PREFIX ${MKLDNN_SOURCES_DIR}
UPDATE_COMMAND ""
CMAKE_ARGS -DCMAKE_INSTALL_PREFIX=${MKLDNN_INSTALL_DIR}
CMAKE_ARGS -DMKLROOT=${MKLDNN_MKLROOT}
CMAKE_ARGS -DMKLROOT=${MKLML_ROOT}
CMAKE_ARGS -DCMAKE_C_FLAGS=${MKLDNN_CFLAG}
CMAKE_ARGS -DCMAKE_CXX_FLAGS=${MKLDNN_CXXFLAG}
CMAKE_CACHE_ARGS -DCMAKE_INSTALL_PREFIX:PATH=${MKLDNN_INSTALL_DIR}
-DMKLROOT:PATH=${MKLDNN_MKLROOT}
-DMKLROOT:PATH=${MKLML_ROOT}
)
ADD_LIBRARY(mkldnn SHARED IMPORTED GLOBAL)
SET_PROPERTY(TARGET mkldnn PROPERTY IMPORTED_LOCATION ${MKLDNN_LIB})
ADD_DEPENDENCIES(mkldnn ${MKLDNN_PROJECT})
MESSAGE(STATUS "Mkldnn library: ${MKLDNN_LIB}")
MESSAGE(STATUS "MKLDNN library: ${MKLDNN_LIB}")
add_definitions(-DPADDLE_USE_MKLDNN)
LIST(APPEND external_project_dependencies mkldnn)
......@@ -149,58 +149,3 @@ endforeach()
foreach(flag ${GPU_COMMON_FLAGS})
safe_set_nvflag(${flag})
endforeach()
set(CUDA_PROPAGATE_HOST_FLAGS OFF)
# Release/Debug flags set by cmake. Such as -O3 -g -DNDEBUG etc.
# So, don't set these flags here.
LIST(APPEND CUDA_NVCC_FLAGS -std=c++11)
LIST(APPEND CUDA_NVCC_FLAGS --use_fast_math)
if(CMAKE_BUILD_TYPE STREQUAL "Debug")
LIST(APPEND CUDA_NVCC_FLAGS ${CMAKE_CXX_FLAGS_DEBUG})
elseif(CMAKE_BUILD_TYPE STREQUAL "Release")
LIST(APPEND CUDA_NVCC_FLAGS ${CMAKE_CXX_FLAGS_RELEASE})
elseif(CMAKE_BUILD_TYPE STREQUAL "RelWithDebInfo")
LIST(APPEND CUDA_NVCC_FLAGS ${CMAKE_CXX_FLAGS_RELWITHDEBINFO})
elseif(CMAKE_BUILD_TYPE STREQUAL "MinSizeRel")
LIST(APPEND CUDA_NVCC_FLAGS ${CMAKE_CXX_FLAGS_MINSIZEREL})
endif()
function(specify_cuda_arch cuda_version cuda_arch)
if(${cuda_version} VERSION_GREATER "8.0")
foreach(capability 61 62)
if(${cuda_arch} STREQUAL ${capability})
list(APPEND __arch_flags " -gencode arch=compute_${cuda_arch},code=sm_${cuda_arch}")
endif()
endforeach()
elseif(${cuda_version} VERSION_GREATER "7.0" and ${cuda_arch} STREQUAL "53")
list(APPEND __arch_flags " -gencode arch=compute_${cuda_arch},code=sm_${cuda_arch}")
endif()
endfunction()
# Common gpu architectures: Kepler, Maxwell
foreach(capability 30 35 50)
list(APPEND __arch_flags " -gencode arch=compute_${capability},code=sm_${capability}")
endforeach()
if (CUDA_VERSION VERSION_GREATER "7.0" OR CUDA_VERSION VERSION_EQUAL "7.0")
list(APPEND __arch_flags " -gencode arch=compute_52,code=sm_52")
endif()
# Modern gpu architectures: Pascal
if (CUDA_VERSION VERSION_GREATER "8.0" OR CUDA_VERSION VERSION_EQUAL "8.0")
list(APPEND __arch_flags " -gencode arch=compute_60,code=sm_60")
list(APPEND CUDA_NVCC_FLAGS --expt-relaxed-constexpr)
endif()
# Custom gpu architecture
set(CUDA_ARCH)
if(CUDA_ARCH)
specify_cuda_arch(${CUDA_VERSION} ${CUDA_ARCH})
endif()
set(CUDA_NVCC_FLAGS ${__arch_flags} ${CUDA_NVCC_FLAGS})
......@@ -115,8 +115,8 @@ function(link_paddle_exe TARGET_NAME)
target_link_libraries(${TARGET_NAME} log)
endif(ANDROID)
if(WITH_MKLDNN AND WITH_MKLML AND MKLDNN_IOMP_DIR)
target_link_libraries(${TARGET_NAME} "-L${MKLDNN_IOMP_DIR} -liomp5 -Wl,--as-needed")
if(WITH_MKLML AND MKLML_LIB_DIR AND MKLML_IOMP_LIB)
target_link_libraries(${TARGET_NAME} "-L${MKLML_LIB_DIR} -liomp5 -Wl,--as-needed")
endif()
add_dependencies(${TARGET_NAME} ${external_project_dependencies})
......
......@@ -335,6 +335,16 @@ bilinear_interp
.. autoclass:: paddle.v2.layer.bilinear_interp
:noindex:
dot_prod
---------
.. autoclass:: paddle.v2.layer.dot_prod
:noindex:
out_prod
--------
.. autoclass:: paddle.v2.layer.out_prod
:noindex:
power
-----
.. autoclass:: paddle.v2.layer.power
......@@ -372,6 +382,11 @@ cos_sim
.. autoclass:: paddle.v2.layer.cos_sim
:noindex:
l2_distance
-----------
.. autoclass:: paddle.v2.layer.l2_distance
:noindex:
trans
-----
.. autoclass:: paddle.v2.layer.trans
......
......@@ -36,13 +36,13 @@ Figure 1. PaddlePaddle on IA.
我们把集成方案大致分为了如下几个方面。
### CMake
我们会在`CMakeLists.txt`中会添加`WITH_MKLDNN`的选项,当设置这个值为`ON`的时候会启用编译MKL-DNN功能。同时会自动开启OpenMP用于提高MKL-DNN的性能
我们会在`CMakeLists.txt`中会给用户添加一个`WITH_MKL`的开关,他是负责`WITH_MKLML``WITH_MKLDNN`的总开关
同时,我们会引入`WITH_MKLML`选项,用于选择是否使用MKL-DNN自带的MKLML安装包。这个安装包可以独立于MKL-DNN使用,但是建议在开启MKL-DNN的同时也打开MKLML的开关,这样才能发挥最好的性能。
当打开`WITH_MKL`时,会开启MKLML的功能,作为PaddlePaddle的CBLAS和LAPACK库,同时会开启Intel OpenMP用于提高MKLML的性能。 如果系统支持AVX2指令集及以上,同时会开启MKL-DNN功能。
所以,我们会在`cmake/external`目录新建`mkldnn.cmake``mklml.cmake`文件,它们会在编译PaddlePaddle的时候下载对应的软件包,并放到PaddlePaddle的third party目录中
当关闭`WITH_MKL`时,MKLML和MKL-DNN功能会同时关闭
**备注**:当`WITH_MKLML=ON`的时候,会优先使用这个包作为PaddlePaddle的CBLAS和LAPACK库,所以会稍微改动`cmake/cblas.cmake`中的逻辑
所以,我们会在`cmake/external`目录新建`mkldnn.cmake``mklml.cmake`文件,它们会在编译PaddlePaddle的时候下载对应的软件包,并放到PaddlePaddle的third party目录中
### Layers
所有MKL-DNN相关的C++ layers,都会按照PaddlePaddle的目录结构存放在
......
......@@ -34,7 +34,7 @@ PaddlePaddle的文档构建有两种方式。
cd TO_YOUR_PADDLE_CLONE_PATH
mkdir -p build
cd build
cmake .. -DCMAKE_BUILD_TYPE=Debug -DWITH_GPU=OFF -DWITH_MKLDNN=OFF -DWITH_MKLML=OFF -DWITH_DOC=ON
cmake .. -DCMAKE_BUILD_TYPE=Debug -DWITH_GPU=OFF -DWITH_MKL=OFF -DWITH_DOC=ON
make gen_proto_py
make paddle_docs paddle_docs_cn
......
......@@ -513,19 +513,14 @@ ParamGradInfoMap AppendBackward(
const int root_block_idx = 0;
auto root_block = program_desc.MutableBlock(root_block_idx);
// insert fill one op for target
// TODO(qiao) add some check to the target.
std::string fill_one_op_out = GradVarName(target.Name());
std::vector<int64_t> target_shape_desc = target.Shape();
std::vector<int> target_shape;
std::transform(target_shape_desc.begin(), target_shape_desc.end(),
std::back_inserter(target_shape),
[](int64_t dim) { return static_cast<int>(dim); });
bool is_scalar = target.Shape() == std::vector<int64_t>{1};
PADDLE_ENFORCE(is_scalar, "target should be scalar");
VLOG(3) << "backward from loss=" << target.Name()
<< " data_type=" << target.GetDataType();
std::unique_ptr<OpDescBind> fill_one_op(
new OpDescBind("fill_constant", {}, {{"Out", {fill_one_op_out}}},
{{"shape", target_shape},
{{"shape", std::vector<int>{1}},
{"value", static_cast<float>(1.0)},
{"data_type", target.GetDataType()}}));
// infer var type of fill_one_op
......
......@@ -508,6 +508,7 @@ TEST(Backward, simple_single_op) {
op->SetOutput("Out", {"out"});
auto target = f::VarDescBind("out");
target.SetShape({1});
auto var_to_grad = AppendBackward(program, target, {});
ASSERT_EQ(block->AllOps().size(), 3UL);
......@@ -544,6 +545,7 @@ TEST(Backward, default_attribute) {
op->CheckAttrs();
auto target = f::VarDescBind("out");
target.SetShape({1});
AppendBackward(program, target, {});
ASSERT_EQ(block->AllOps().size(), 3UL);
......@@ -581,6 +583,7 @@ TEST(Backward, simple_mult_op) {
op3->SetOutput("Out", {"out3"});
auto target = f::VarDescBind("out3");
target.SetShape({1});
size_t forward_len = block->AllOps().size();
auto var_to_grad = AppendBackward(program, target, {});
......@@ -670,6 +673,7 @@ TEST(Backward, intermedia_var_no_grad) {
op4->SetOutput("Out", {"out4"});
auto target = f::VarDescBind("out4");
target.SetShape({1});
size_t forward_len = block->AllOps().size();
auto var_to_grad = AppendBackward(program, target, {"out3"});
......@@ -730,6 +734,7 @@ TEST(Backward, var_no_grad) {
op2->SetOutput("Z", {"z2"});
auto target = f::VarDescBind("z2");
target.SetShape({1});
size_t forward_len = block->AllOps().size();
auto var_to_grad = AppendBackward(program, target, {"z1"});
......@@ -810,6 +815,7 @@ TEST(Backward, shared_var) {
op3->SetOutput("Out", {"out3"});
auto target = f::VarDescBind("out3");
target.SetShape({1});
size_t forward_len = block->AllOps().size();
auto var_to_grad = AppendBackward(program, target, {});
......@@ -888,6 +894,7 @@ TEST(Backward, half_backward) {
op1->SetOutput("Out", {"out"});
auto target = f::VarDescBind("out");
target.SetShape({1});
size_t forward_len = block->AllOps().size();
auto var_to_grad = AppendBackward(program, target, {"b"});
f::OpDescBind *fill_op = block->AllOps()[forward_len];
......
......@@ -46,6 +46,8 @@ inline std::type_index ToTypeIndex(DataType type) {
return typeid(int);
case DataType::INT64:
return typeid(int64_t);
case DataType::BOOL:
return typeid(bool);
default:
PADDLE_THROW("Not support type %d", type);
}
......@@ -66,6 +68,9 @@ inline void VisitDataType(DataType type, Visitor visitor) {
case DataType::INT64:
visitor.template operator()<int64_t>();
break;
case DataType::BOOL:
visitor.template operator()<bool>();
break;
default:
PADDLE_THROW("Not supported");
}
......
......@@ -73,7 +73,6 @@ if(MOBILE_INFERENCE)
list(REMOVE_ITEM GSERVER_SOURCES
dataproviders/DataProvider.cpp
dataproviders/MultiDataProvider.cpp
dataproviders/ProtoDataProvider.cpp
dataproviders/PyDataProvider2.cpp
dataproviders/PyDataProvider.cpp)
......
......@@ -212,6 +212,37 @@ Error __must_check backward(Argument& act) {
}
END_DEFINE_ACTIVATION(sequence_softmax)
/*
* @brief SoftSign Activation.
* \f[
* f(z) = \frac{z}{1 + |z|}
* \f]
*/
BEGIN_DEFINE_ACTIVATION(softsign)
private:
MatrixPtr denominator_;
Error __must_check forward(Argument& act) {
size_t height = act.value->getHeight();
size_t width = act.value->getWidth();
Matrix::resizeOrCreate(
denominator_, height, width, false, useGpu(act.deviceId));
denominator_->assign(*act.value);
denominator_->abs2();
denominator_->add(1.);
act.value->dotDiv(*act.value, *denominator_);
return Error();
}
Error __must_check backward(Argument& act) {
denominator_->square2();
denominator_->scalarDiv(*denominator_, 1.);
act.grad->dotMul(*act.grad, *denominator_);
return Error();
}
END_DEFINE_ACTIVATION(softsign)
/**
* @brief Relu Activation.
* forward. y = max(0, z)
......
......@@ -16,8 +16,8 @@ limitations under the License. */
#include <unistd.h>
#include <algorithm>
#include "ProtoDataProvider.h"
#include "paddle/utils/Logging.h"
#include "paddle/utils/Stat.h"
#include "paddle/utils/StringUtil.h"
#include "paddle/utils/Util.h"
......@@ -164,8 +164,6 @@ DataProvider* DataProvider::create(const DataConfig& config,
REGISTER_DATA_PROVIDER(simple, SimpleDataProvider);
REGISTER_DATA_PROVIDER(dummy, DummyDataProvider);
REGISTER_DATA_PROVIDER(proto, ProtoDataProvider);
REGISTER_DATA_PROVIDER(proto_sequence, ProtoSequenceDataProvider);
int64_t DataProvider::getNextBatch(int64_t size, DataBatch* batch) {
int64_t batchSize = doubleBuffer_ ? getNextBatchFromBuffer(size, batch)
......
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "ProtoDataProvider.h"
#include <algorithm>
#include <fstream>
#include <istream>
#include "paddle/utils/StringUtil.h"
#include "paddle/utils/Util.h"
#include "DataProviderGroup.h"
#include "paddle/utils/Logging.h"
DEFINE_double(memory_threshold_on_load_data,
1.0,
"stop loading data when memory is not sufficient");
namespace paddle {
REGISTER_DATA_PROVIDER(proto_group, DataProviderGroup<ProtoDataProvider>);
REGISTER_DATA_PROVIDER(proto_sequence_group,
DataProviderGroup<ProtoSequenceDataProvider>);
ProtoDataProvider::ProtoDataProvider(const DataConfig& config,
bool useGpu,
bool loadDataAll)
: DataProvider(config, useGpu), sampleNums_(0), currentSequenceIndex_(0) {
if (loadDataAll) {
loadData(config_.files());
}
}
void ProtoDataProvider::loadData(const std::vector<std::string>& fileList) {
for (auto& file : fileList) {
if (FLAGS_memory_threshold_on_load_data < 1.0) {
double memUsage = getMemoryUsage();
if (memUsage > FLAGS_memory_threshold_on_load_data) {
LOG(INFO) << "memUsage is " << memUsage << ", > "
<< FLAGS_memory_threshold_on_load_data
<< " therefore SKIP ALL REMAINING file.";
break;
}
}
LOG(INFO) << "load data file " << file;
loadDataFile(file);
}
if (sequenceStartPositions_.size() == sampleNums_) {
// This means that each sample is one sequence
shuffledSequenceIds_.swap(sequenceStartPositions_);
} else {
sequenceStartPositions_.push_back(sampleNums_);
shuffledSequenceIds_.reserve(sequenceStartPositions_.size() - 1);
for (size_t i = 0; i < sequenceStartPositions_.size() - 1; ++i) {
shuffledSequenceIds_.push_back(i);
}
}
LOG(INFO) << "read done, num of instance=" << sampleNums_;
showDataStats();
}
void ProtoDataProvider::loadData(const std::string& fileName) {
std::vector<std::string> fileList;
loadFileList(fileName, fileList);
loadData(fileList);
}
void ProtoDataProvider::checkDataHeader(const DataHeader& header) {
if (header_.slot_defs_size()) {
// header_ is already set. Need to check consistency.
CHECK_EQ(header_.slot_defs_size(), header.slot_defs_size())
<< "Different header";
for (int i = 0; i < header.slot_defs_size(); ++i) {
CHECK_EQ(header_.slot_defs(i).type(), header.slot_defs(i).type());
CHECK_EQ(header_.slot_defs(i).dim(), header.slot_defs(i).dim());
}
return;
}
// header_ is not set before
CHECK(header.slot_defs_size()) << "Invalid header: no slot is defined";
int i;
for (i = 0; i < header.slot_defs_size(); ++i) {
if (header.slot_defs(i).type() == SlotDef::INDEX ||
header.slot_defs(i).type() == SlotDef::VAR_MDIM_INDEX) {
break;
}
constexpr int kBufLen = 100;
char buf[kBufLen];
snprintf(buf, kBufLen, "slot%d_nnz", i);
nnzStats_.push_back(getStat(buf));
}
numVecSlots_ = i;
// Check that INDEX slots are after VECTOR slots
for (int i = numVecSlots_; i < header.slot_defs_size(); ++i) {
CHECK(header.slot_defs(i).type() == SlotDef::INDEX ||
header.slot_defs(i).type() == SlotDef::VAR_MDIM_INDEX);
}
slots_.clear();
slots_.reserve(header.slot_defs_size());
for (int i = 0; i < header.slot_defs_size(); ++i) {
slots_.emplace_back();
slots_.back().type = header.slot_defs(i).type();
slots_.back().dim = header.slot_defs(i).dim();
if (SlotDef::VECTOR_SPARSE_NON_VALUE == header.slot_defs(i).type() ||
SlotDef::VECTOR_SPARSE_VALUE == header.slot_defs(i).type()) {
slots_.back().indices.push_back(0);
}
}
header_ = header;
}
void ProtoDataProvider::checkSample(const DataSample& sample) {
CHECK_EQ(numVecSlots_, sample.vector_slots_size());
CHECK(header_.slot_defs_size() == numVecSlots_ + sample.id_slots_size() ||
header_.slot_defs_size() == numVecSlots_ + sample.var_id_slots_size());
for (int i = 0; i < numVecSlots_; ++i) {
uint32_t dim = header_.slot_defs(i).dim();
switch (header_.slot_defs(i).type()) {
case SlotDef::VECTOR_DENSE: {
CHECK_EQ(static_cast<int>(dim), sample.vector_slots(i).values_size());
CHECK_EQ(0, sample.vector_slots(i).ids_size());
break;
}
case SlotDef::VECTOR_SPARSE_NON_VALUE: {
if (0 == sample.vector_slots(i).ids_size()) {
break;
}
CHECK_LT(0, sample.vector_slots(i).ids_size());
CHECK_EQ(0, sample.vector_slots(i).values_size());
auto maxId = *std::max_element(sample.vector_slots(i).ids().begin(),
sample.vector_slots(i).ids().end());
CHECK_GT(dim, maxId);
break;
}
case SlotDef::VECTOR_SPARSE_VALUE: {
if (0 == sample.vector_slots(i).ids_size()) {
CHECK_EQ(0, sample.vector_slots(i).values_size());
break;
}
CHECK_LT(0, sample.vector_slots(i).values_size());
CHECK_GE(static_cast<int>(dim), sample.vector_slots(i).values_size());
CHECK_EQ(sample.vector_slots(i).values_size(),
sample.vector_slots(i).ids_size());
auto maxId = *std::max_element(sample.vector_slots(i).ids().begin(),
sample.vector_slots(i).ids().end());
CHECK_GT(dim, maxId);
break;
}
case SlotDef::VAR_MDIM_DENSE: {
if (static_cast<int>(dim) != 0) {
CHECK_EQ(static_cast<int>(dim), sample.vector_slots(i).values_size());
if (sample.vector_slots(i).dims_size() != 0) {
int totalDim = sample.vector_slots(i).dims(0);
for (int j = 1; j < sample.vector_slots(i).dims_size(); ++j) {
totalDim *= sample.vector_slots(i).dims(j);
}
CHECK_EQ(static_cast<int>(dim), totalDim);
}
} else {
CHECK_NE(sample.vector_slots(i).dims_size(), 0);
int totalDim = sample.vector_slots(i).dims(0);
for (int j = 1; j < sample.vector_slots(i).dims_size(); ++j) {
totalDim *= sample.vector_slots(i).dims(j);
}
CHECK_EQ(totalDim, sample.vector_slots(i).values_size());
}
break;
}
case SlotDef::STRING: {
CHECK_EQ(static_cast<int>(1), sample.vector_slots(i).strs_size());
CHECK_EQ(0, sample.vector_slots(i).ids_size());
CHECK_EQ(0, sample.vector_slots(i).values_size());
break;
}
default:
LOG(FATAL) << "BUG: Should not reach here";
}
}
for (int i = numVecSlots_; i < header_.slot_defs_size(); ++i) {
if (header_.slot_defs(i).type() != SlotDef::VAR_MDIM_INDEX) {
uint32_t id = sample.id_slots(i - numVecSlots_);
if (id == -1U) continue;
CHECK_LT(id, header_.slot_defs(i).dim());
} else {
for (int j = 0; j < sample.var_id_slots(i - numVecSlots_).ids_size();
++j) {
uint32_t id = sample.var_id_slots(i - numVecSlots_).ids(j);
CHECK_LT(id, header_.slot_defs(i).dim());
}
}
}
}
void ProtoDataProvider::loadDataFile(const std::string& fileName) {
std::ifstream is(fileName);
CHECK(is) << "Fail to open " << fileName;
bool dataCompression = str::endsWith(fileName, ".gz");
std::unique_ptr<ProtoReader> reader(new ProtoReader(&is, dataCompression));
CHECK(reader) << "Fail to create proto data input stream";
DataHeader header;
CHECK(reader->read(&header));
checkDataHeader(header);
DataSample sample;
do {
if (!reader->read(&sample)) {
break;
}
checkSample(sample);
if (sample.is_beginning()) {
sequenceStartPositions_.push_back(sampleNums_);
}
fillSlots(sample);
++sampleNums_;
} while (true);
CHECK(is.eof()) << "Fail to read file";
reader.reset(nullptr);
is.close();
}
// checkSample has done before, no check here
void ProtoDataProvider::fillSlots(const DataSample& sample) {
for (size_t i = 0; i < slots_.size(); ++i) {
auto& slot = slots_[i];
int dim = slot.dim;
switch (slot.type) {
case SlotDef::VECTOR_DENSE: {
size_t oldSize = slot.denseData.size();
slot.denseData.resize(oldSize + dim);
const float* values = sample.vector_slots(i).values().data();
#ifdef PADDLE_TYPE_DOUBLE
std::copy(values, values + dim, slot.denseData.begin() + oldSize);
#else
memcpy(slot.denseData.data() + oldSize, values, sizeof(real) * dim);
#endif
break;
}
case SlotDef::VECTOR_SPARSE_NON_VALUE: {
int slotSize = sample.vector_slots(i).ids_size();
int subSlotSize = 0;
int id = 0; // the slot id
// find whether this vector_slots has subseq. If not has subseq,
// subSlotSize = 0.
for (id = 0; id < sample.subseq_slots_size(); id++) {
if (sample.subseq_slots(id).slot_id() == i) {
subSlotSize = sample.subseq_slots(id).lens_size();
break;
}
}
if (subSlotSize && slot.subIndices.size() == 0UL) {
// If has subSeq, the first element of subIndices = 0.
slot.subIndices.push_back(0);
}
if (slotSize == 0UL) {
// if has no id, new indices = old indices.
slot.indices.push_back(slot.indices.back());
// if has subSeq, new subIndices = old subIndices.
if (slot.subIndices.size()) {
slot.subIndices.push_back(slot.subIndices.back());
}
break;
}
slot.sparseNonValueData.resize(slot.indices.back() + slotSize);
const unsigned int* ids = sample.vector_slots(i).ids().data();
memcpy(slot.sparseNonValueData.data() + slot.indices.back(),
ids,
sizeof(*ids) * slotSize);
slot.indices.push_back(slot.indices.back() + slotSize);
if (subSlotSize) {
for (int ii = 0; ii < subSlotSize; ++ii) {
slot.subIndices.push_back(slot.subIndices.back() +
sample.subseq_slots(id).lens(ii));
}
}
break;
}
case SlotDef::VECTOR_SPARSE_VALUE: {
if (0 == sample.vector_slots(i).ids_size()) {
slot.indices.push_back(slot.indices.back());
break;
}
int slotSize = sample.vector_slots(i).ids_size();
slot.sparseFloatValueData.resize(slot.indices.back() + slotSize);
const unsigned int* ids = sample.vector_slots(i).ids().data();
const float* values = sample.vector_slots(i).values().data();
for (int ii = 0; ii < slotSize; ++ii) {
slot.sparseFloatValueData[slot.indices.back() + ii].col = ids[ii];
slot.sparseFloatValueData[slot.indices.back() + ii].value =
values[ii];
}
slot.indices.push_back(slot.indices.back() + slotSize);
break;
}
case SlotDef::INDEX: {
slot.indexData.push_back(sample.id_slots(i - numVecSlots_));
break;
}
case SlotDef::VAR_MDIM_DENSE: {
size_t oldSize = slot.varDenseData.size();
slot.varDenseData.resize(oldSize + 1);
size_t varDim = sample.vector_slots(i).values_size();
slot.varDenseData[oldSize].data.resize(varDim);
const float* values = sample.vector_slots(i).values().data();
#ifdef PADDLE_TYPE_DOUBLE
std::copy(
values, values + varDim, slot.varDenseData[oldSize].data.data());
#else
memcpy(slot.varDenseData[oldSize].data.data(),
values,
sizeof(real) * varDim);
#endif
slot.varDenseData[oldSize].dims.resize(
sample.vector_slots(i).dims_size());
memcpy(slot.varDenseData[oldSize].dims.data(),
sample.vector_slots(i).dims().data(),
sizeof(uint32_t) * sample.vector_slots(i).dims_size());
break;
}
case SlotDef::VAR_MDIM_INDEX: {
size_t oldSize = slot.varIndices.size();
slot.varIndices.resize(oldSize + 1);
size_t varDim = sample.var_id_slots(i - numVecSlots_).ids_size();
slot.varIndices[oldSize].resize(varDim);
memcpy(slot.varIndices[oldSize].data(),
sample.var_id_slots(i - numVecSlots_).ids().data(),
sizeof(uint32_t) * varDim);
break;
}
case SlotDef::STRING: {
slot.strData.push_back(sample.vector_slots(i).strs(0));
break;
}
}
}
}
void ProtoDataProvider::showDataStats() {
std::ostringstream oss;
for (size_t i = 0; i < slots_.size(); ++i) {
auto& slot = slots_[i];
if (slot.type == SlotDef::VECTOR_SPARSE_NON_VALUE) {
size_t nnz = slot.sparseNonValueData.size();
oss << "slot" << i << ":avgNNZ=" << ((double)nnz / sampleNums_) << "; ";
} else if (slot.type == SlotDef::VECTOR_SPARSE_VALUE) {
size_t nnz = slot.sparseFloatValueData.size();
oss << "slot" << i << ":avgNNZ=" << ((double)nnz / sampleNums_) << "; ";
}
}
LOG(INFO) << oss.str();
}
void ProtoDataProvider::reset() {
currentSequenceIndex_ = 0;
if (!skipShuffle_) {
shuffle();
}
DataProvider::reset();
}
void ProtoDataProvider::shuffle() {
std::shuffle(shuffledSequenceIds_.begin(),
shuffledSequenceIds_.end(),
ThreadLocalRandomEngine::get());
}
/*
Loop through sequences starting from currentSequenceIndex_
for at most size samples. For each sequence ranging from [begin, end),
op(begin, end) will be called.
return the number of sequences scanned
*/
template <class Op>
int64_t ProtoDataProvider::sequenceLoop(Op op, int64_t size) {
int64_t sz = 0;
size_t i;
size_t sequenceCount = shuffledSequenceIds_.size();
if (usageRatio_ < 1.0f) {
sequenceCount = static_cast<int64_t>(sequenceCount * usageRatio_);
}
for (i = currentSequenceIndex_; i < sequenceCount; ++i) {
size_t id = shuffledSequenceIds_[i];
int64_t begin = sequenceStartPositions_[id];
int64_t end = sequenceStartPositions_[id + 1];
int64_t len = end - begin;
if (sz + len > size && sz > 0) break;
sz += len;
op(begin, end);
}
return i - currentSequenceIndex_;
}
/*
Loop through sequences starting from currentSequenceIndex_
for at most size samples. For each sample of each sequence at position
pos, op(pos) will be called.
return the number of sequences scanned
*/
template <class Op>
int64_t ProtoDataProvider::sampleLoop(Op op, int64_t size) {
if (iidData()) {
size = std::min<int64_t>(sampleNums_ - currentSequenceIndex_, size);
for (int64_t i = currentSequenceIndex_; i < currentSequenceIndex_ + size;
++i) {
size_t pos = shuffledSequenceIds_[i];
op(pos);
}
return size;
} else {
auto f = [op](int64_t begin, int64_t end) {
for (int64_t pos = begin; pos < end; ++pos) {
op(pos);
}
};
return sequenceLoop(f, size);
}
}
/*
Loop through sub-sequences starting from currentSequenceIndex_
for at most size samples. For each sample of each sub-sequence at position
pos, op(pos) will be called.
return the number of sub-sequences scanned
*/
template <class Op>
int64_t ProtoDataProvider::subSampleLoop(Op op, int64_t size, int slot) {
CHECK(iidData()) << "subSampleLoop only accepts iid data";
size = std::min<int64_t>(sampleNums_ - currentSequenceIndex_, size);
int subSize = 0;
for (int64_t i = currentSequenceIndex_; i < currentSequenceIndex_ + size;
++i) {
size_t pos = shuffledSequenceIds_[i];
int64_t* indexs = slots_[slot].indices.data();
int64_t* subIndexs = slots_[slot].subIndices.data();
int64_t subSeqStart = 0;
int64_t subSeqEnd = 0;
for (int j = 0; j < (int)slots_[slot].subIndices.size(); j++) {
if (subIndexs[j] == indexs[pos]) {
subSeqStart = j;
if (subIndexs[pos] == subIndexs[pos + 1]) {
subSeqEnd = j + 1;
break;
}
} else if (subIndexs[j] == indexs[pos + 1]) {
subSeqEnd = j;
break;
}
}
for (int j = subSeqStart; j < subSeqEnd; j++) {
op(j);
}
subSize += subSeqEnd - subSeqStart;
}
return subSize;
}
int64_t ProtoDataProvider::getNextBatchInternal(int64_t size,
DataBatch* batch) {
int64_t numSequences = 0; // actual number of sequences in the batch
// the number of sequences scanned, including those skipped because too long
int64_t numScannedSeqs = 0;
std::lock_guard<RWLock> guard(lock_);
if (iidData()) {
size = std::min<int64_t>(getSize() - currentSequenceIndex_, size);
numScannedSeqs = numSequences = size;
} else {
int64_t sz = 0;
auto op = [&sz, &numSequences](int64_t begin, int64_t end) {
++numSequences;
sz += end - begin;
};
numScannedSeqs = sequenceLoop(op, size);
VLOG_IF(1, numScannedSeqs > numSequences)
<< numScannedSeqs - numSequences
<< " sequences are skipped because longer than " << size;
size = sz;
}
if (size <= 0) return 0;
DataBatch& cpuBatch = *cpuBatch_;
std::vector<Argument>& cpuArguments = cpuBatch.getStreams();
cpuBatch.setSize(size);
cpuArguments.resize(header_.slot_defs_size());
if (!iidData()) {
ICpuGpuVector::resizeOrCreate(cpuArguments[0].sequenceStartPositions,
numSequences + 1,
/* useGpu= */ false);
int* buf = cpuArguments[0].sequenceStartPositions->getMutableData(false);
int pos = 0;
int i = 0;
auto op = [buf, &pos, &i](int64_t begin, int64_t end) {
buf[i] = pos;
pos += end - begin;
++i;
};
sequenceLoop(op, size);
buf[i] = size;
for (size_t slot = 1; slot < cpuArguments.size(); ++slot) {
cpuArguments[slot].sequenceStartPositions =
cpuArguments[0].sequenceStartPositions;
}
}
for (int slot = 0; slot < header_.slot_defs_size(); ++slot) {
size_t dim = header_.slot_defs(slot).dim();
SlotDef::SlotType slotType = header_.slot_defs(slot).type();
std::vector<int64_t> dataPos;
dataPos.reserve(size);
auto op = [this, &dataPos](int64_t pos) { dataPos.push_back(pos); };
sampleLoop(op, size);
switch (slotType) {
case SlotDef::VECTOR_DENSE: {
Matrix::resizeOrCreate(cpuArguments[slot].value,
size,
dim,
false, // trans = false
false); // useGpu = false
real* buf = cpuArguments[slot].value->getData();
for (int i = 0; i < size; ++i) {
memcpy(buf + i * dim,
slots_[slot].denseData.data() + dataPos[i] * dim,
sizeof(real) * dim);
}
break;
}
case SlotDef::VECTOR_SPARSE_NON_VALUE: {
if (!(cpuArguments[slot].value)) {
cpuArguments[slot].value =
Matrix::createSparseMatrix(size,
dim,
size /*DEFAULT_AVG_WIDTH = 1*/,
NO_VALUE,
SPARSE_CSR,
false,
useGpu_);
}
auto mat = cpuArguments[slot].value;
mat->resize(size, dim);
if (std::dynamic_pointer_cast<GpuSparseMatrix>(mat)) {
std::dynamic_pointer_cast<GpuSparseMatrix>(mat)->copyFrom(
dataPos.data(),
slots_[slot].indices.data(),
slots_[slot].sparseNonValueData.data(),
HPPL_STREAM_1);
} else if (std::dynamic_pointer_cast<CpuSparseMatrix>(mat)) {
std::dynamic_pointer_cast<CpuSparseMatrix>(mat)->copyFrom(
dataPos.data(),
slots_[slot].indices.data(),
slots_[slot].sparseNonValueData.data());
} else {
LOG(FATAL) << "Not Supported";
}
size_t numElements = 0;
for (auto pos : dataPos) {
numElements +=
slots_[slot].indices[pos + 1] - slots_[slot].indices[pos];
}
nnzStats_[slot]->addSample(numElements);
break;
}
case SlotDef::VECTOR_SPARSE_VALUE: {
if (!(cpuArguments[slot].value)) {
cpuArguments[slot].value =
Matrix::createSparseMatrix(size,
dim,
size /*DEFAULT_AVG_WIDTH = 1*/,
FLOAT_VALUE,
SPARSE_CSR,
false,
useGpu_);
}
auto mat = cpuArguments[slot].value;
mat->resize(size, dim);
if (std::dynamic_pointer_cast<GpuSparseMatrix>(mat)) {
std::dynamic_pointer_cast<GpuSparseMatrix>(mat)->copyFrom(
dataPos.data(),
slots_[slot].indices.data(),
slots_[slot].sparseFloatValueData.data(),
HPPL_STREAM_1);
} else if (std::dynamic_pointer_cast<CpuSparseMatrix>(mat)) {
std::dynamic_pointer_cast<CpuSparseMatrix>(mat)->copyFrom(
dataPos.data(),
slots_[slot].indices.data(),
slots_[slot].sparseFloatValueData.data());
} else {
LOG(FATAL) << "Not Supported";
}
break;
}
case SlotDef::INDEX: {
IVector::resizeOrCreate(cpuArguments[slot].ids,
size,
/* useGpu= */ false);
int* buf = cpuArguments[slot].ids->getData();
for (int i = 0; i < size; ++i) {
buf[i] = slots_[slot].indexData[dataPos[i]];
}
break;
}
case SlotDef::VAR_MDIM_DENSE: {
CHECK_EQ(size, 1);
auto mat = cpuArguments[slot].value;
size_t totalDim = slots_[slot].varDenseData[dataPos[0]].data.size();
CHECK_EQ(slots_[slot].varDenseData[dataPos[0]].dims.size(), size_t(3));
size_t height, width, depth, oldWidth;
/* dims[2] is depth, will be changed to dims[0] in future */
depth = slots_[slot].varDenseData[dataPos[0]].dims[2];
height = slots_[slot].varDenseData[dataPos[0]].dims[1];
width = slots_[slot].varDenseData[dataPos[0]].dims[0];
oldWidth = width;
/* process the undesirable sample */
if (oldWidth < height) {
width = height;
}
cpuArguments[slot].setFrameHeight(height);
cpuArguments[slot].setFrameWidth(width);
if (oldWidth < height) {
totalDim = width * height * depth;
}
Matrix::resizeOrCreate(cpuArguments[slot].value,
size,
totalDim,
false, // trans = false
false); // useGpu = false
real* buf = cpuArguments[slot].value->getData();
cpuArguments[slot].value->zeroMem();
if (oldWidth < height) {
real* srcBuf = slots_[slot].varDenseData[dataPos[0]].data.data();
for (size_t i = 0; i < depth; i++) {
for (size_t j = 0; j < height; j++) {
for (size_t k = 0; k < oldWidth; k++) {
buf[i * height * width + j * width + k] =
srcBuf[i * height * oldWidth + j * oldWidth + k];
}
}
}
} else {
memcpy(buf,
slots_[slot].varDenseData[dataPos[0]].data.data(),
sizeof(real) * totalDim);
}
ICpuGpuVector::resizeOrCreate(cpuArguments[slot].sequenceStartPositions,
size + 1, /* size == 1 currently */
/* useGpu= */ false);
int* bufStarts =
cpuArguments[slot].sequenceStartPositions->getMutableData(false);
bufStarts[0] = 0;
bufStarts[1] = 1;
break;
}
case SlotDef::VAR_MDIM_INDEX: {
CHECK_EQ(size, 1);
size_t totalDim = slots_[slot].varIndices[dataPos[0]].size();
IVector::resizeOrCreate(cpuArguments[slot].ids,
totalDim,
/* useGpu= */ false);
int* buf = cpuArguments[slot].ids->getData();
memcpy(buf,
slots_[slot].varIndices[dataPos[0]].data(),
sizeof(int) * totalDim);
ICpuGpuVector::resizeOrCreate(cpuArguments[slot].sequenceStartPositions,
size + 1, /* size == 1 currently */
/* useGpu= */ false);
int* bufStarts =
cpuArguments[slot].sequenceStartPositions->getMutableData(false);
bufStarts[0] = 0;
/* we expand the convolutinal feature map to a sequence data,
* so there should be a corresponding sequence labels */
bufStarts[1] = totalDim;
break;
}
case SlotDef::STRING: {
if (cpuArguments[slot].strs) {
cpuArguments[slot].strs->resize(size);
} else {
cpuArguments[slot].strs =
std::make_shared<std::vector<std::string>>(size);
}
for (int i = 0; i < size; ++i) {
(*cpuArguments[slot].strs)[i] = slots_[slot].strData[dataPos[i]];
}
break;
}
}
}
if (useGpu_) {
std::vector<Argument>& cpuArguments = cpuBatch.getStreams();
DataBatch& gpuBatch = *gpuBatch_;
std::vector<Argument>& gpuArguments = gpuBatch.getStreams();
gpuArguments.resize(cpuArguments.size());
gpuBatch.setSize(size);
for (int i = 0; i < header_.slot_defs_size(); ++i) {
SlotDef::SlotType slotType = header_.slot_defs(i).type();
if (SlotDef::VECTOR_SPARSE_VALUE == slotType ||
SlotDef::VECTOR_SPARSE_NON_VALUE == slotType) {
gpuArguments[i] = cpuArguments[i];
gpuArguments[i].sequenceStartPositions =
cpuArguments[i].sequenceStartPositions;
} else {
gpuArguments[i].resizeAndCopyFrom(
cpuArguments[i], useGpu_, HPPL_STREAM_1);
}
}
hl_stream_synchronize(HPPL_STREAM_1);
*batch = gpuBatch;
} else {
*batch = cpuBatch;
}
currentSequenceIndex_ += numScannedSeqs;
return batch->getSize();
}
ProtoSequenceDataProvider::ProtoSequenceDataProvider(const DataConfig& config,
bool useGpu,
bool loadDataAll)
: ProtoDataProvider(config, useGpu, loadDataAll) {}
int64_t ProtoSequenceDataProvider::getNextBatchInternal(int64_t size,
DataBatch* batch) {
CHECK(iidData()) << "ProtoSequenceDataProvider only accepts iid data";
int64_t numSequences = 0; // actual number of sequences in the batch
// the number of sequences scanned, including those skipped because too long
int64_t numScannedSeqs = 0;
std::lock_guard<RWLock> guard(lock_);
size = std::min<int64_t>(getSize() - currentSequenceIndex_, size);
numScannedSeqs = numSequences = size;
if (size <= 0) return 0;
DataBatch& cpuBatch = *cpuBatch_;
std::vector<Argument>& cpuArguments = cpuBatch.getStreams();
cpuBatch.setSize(size);
cpuArguments.resize(header_.slot_defs_size());
for (int slot = 0; slot < header_.slot_defs_size(); ++slot) {
SlotDef::SlotType slotType = header_.slot_defs(slot).type();
std::vector<int64_t> dataPos;
dataPos.reserve(size);
auto op = [this, &dataPos](int64_t pos) { dataPos.push_back(pos); };
sampleLoop(op, size);
// current slot: sequenceStartPositions
ICpuGpuVector::resizeOrCreate(cpuArguments[slot].sequenceStartPositions,
size + 1,
/* useGpu= */ false);
switch (slotType) {
case SlotDef::VECTOR_SPARSE_VALUE:
case SlotDef::VAR_MDIM_DENSE:
case SlotDef::VAR_MDIM_INDEX: {
LOG(FATAL) << "ProtoSequenceDataProvider only support"
<< " VECTOR_DENSE, VECTOR_SPARSE_NON_VALUE and INDEX slots";
break;
}
case SlotDef::VECTOR_SPARSE_NON_VALUE: {
// copy to IDS, not value
// pointers used in current slot
sparse_non_value_t* data = slots_[slot].sparseNonValueData.data();
int64_t* indexs = slots_[slot].indices.data();
int64_t* seqs = dataPos.data();
// current slot: i need size instances. what is the total length?
int totalFeatureInCurrentSlot = 0;
for (int ins = 0; ins < size; ins++) {
int64_t currInsId = seqs[ins];
totalFeatureInCurrentSlot +=
indexs[currInsId + 1] - indexs[currInsId];
// special: if current instance has NO feature in current slot
if (indexs[currInsId + 1] == indexs[currInsId]) {
totalFeatureInCurrentSlot++;
}
}
// done
// current slot: ids
IVector::resizeOrCreate(cpuArguments[slot].ids,
totalFeatureInCurrentSlot,
/* useGpu= */ false);
// where to write
int* currPosOfArgumentId = cpuArguments[slot].ids->getData();
int* currPosOfArgumentSeqStart =
cpuArguments[slot].sequenceStartPositions->getMutableData(false);
int allSequenceLength = 0;
currPosOfArgumentSeqStart[0] = 0;
// for each instance, copy data and fill sequence positions
for (int instance = 0; instance < size; instance++) {
int64_t currInstanceId = seqs[instance];
int64_t currInstanceLength =
indexs[currInstanceId + 1] - indexs[currInstanceId];
sparse_non_value_t* currInstanceData = data + indexs[currInstanceId];
// write sequenceStartPositions
allSequenceLength += currInstanceLength;
currPosOfArgumentSeqStart[instance + 1] = allSequenceLength;
// copy features
for (int featCopier = 0; featCopier < currInstanceLength;
featCopier++) {
currPosOfArgumentId[featCopier] = currInstanceData[featCopier].col;
}
currPosOfArgumentId += currInstanceLength;
// special: if current instance has NO feature in current slot
if (currInstanceLength == 0) {
allSequenceLength++;
currPosOfArgumentSeqStart[instance + 1] = allSequenceLength;
currPosOfArgumentId[0] = -1;
currPosOfArgumentId++;
}
// done
}
if (slots_[slot].subIndices.size()) {
std::vector<int64_t> dataSubPos;
auto op = [this, &dataSubPos](int64_t pos) {
dataSubPos.push_back(pos);
};
int subSize = subSampleLoop(op, size, slot);
ICpuGpuVector::resizeOrCreate(
cpuArguments[slot].subSequenceStartPositions, subSize + 1, false);
int* currPosOfArgumentSubSeqStart =
cpuArguments[slot].subSequenceStartPositions->getMutableData(
false);
int64_t* subSeqs = dataSubPos.data();
int64_t* subIndexs = slots_[slot].subIndices.data();
int allSubSequenceLength = 0;
currPosOfArgumentSubSeqStart[0] = 0;
// for each instance, compute sub-sequence number
for (int instance = 0; instance < subSize; instance++) {
int64_t currSubInstanceId = subSeqs[instance];
int64_t currSubInstanceLength =
subIndexs[currSubInstanceId + 1] - subIndexs[currSubInstanceId];
// write subSequenceStartPositions
allSubSequenceLength += currSubInstanceLength;
currPosOfArgumentSubSeqStart[instance + 1] = allSubSequenceLength;
// special: if current instance has NO feature in current slot
if (currSubInstanceLength == 0) {
allSubSequenceLength++;
currPosOfArgumentSubSeqStart[instance + 1] = allSubSequenceLength;
}
}
cpuArguments[slot].checkSubset();
}
break;
}
case SlotDef::INDEX: {
// label slot
IVector::resizeOrCreate(cpuArguments[slot].ids,
size,
/* useGpu= */ false);
// fill labels
int* buf = cpuArguments[slot].ids->getData();
for (int i = 0; i < size; ++i) {
buf[i] = slots_[slot].indexData[dataPos[i]];
}
// label HAS sequence structure
cpuArguments[slot].sequenceStartPositions->fillSequence(false);
break;
}
case SlotDef::VECTOR_DENSE: {
// copy values
size_t dim = header_.slot_defs(slot).dim();
Matrix::resizeOrCreate(cpuArguments[slot].value,
size,
dim,
false, // trans = false
false); // useGpu = false
real* buf = cpuArguments[slot].value->getData();
for (int i = 0; i < size; ++i) {
memcpy(buf + i * dim,
slots_[slot].denseData.data() + dataPos[i] * dim,
sizeof(real) * dim);
}
// sequence structure
cpuArguments[slot].sequenceStartPositions->fillSequence(false);
break;
}
default: { LOG(FATAL) << "should not reach here"; }
}
}
if (useGpu_) {
std::vector<Argument>& cpuArguments = cpuBatch.getStreams();
DataBatch& gpuBatch = *gpuBatch_;
std::vector<Argument>& gpuArguments = gpuBatch.getStreams();
gpuArguments.resize(cpuArguments.size());
gpuBatch.setSize(size);
for (size_t i = 0; i < cpuArguments.size(); ++i) {
gpuArguments[i].resizeAndCopyFrom(
cpuArguments[i], useGpu_, HPPL_STREAM_1);
}
hl_stream_synchronize(HPPL_STREAM_1);
*batch = gpuBatch;
} else {
*batch = cpuBatch;
}
currentSequenceIndex_ += numScannedSeqs;
return batch->getSize();
}
} // namespace paddle
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <vector>
#include "DataFormat.pb.h"
#include "paddle/utils/Stat.h"
#include "DataProvider.h"
#include "ProtoReader.h"
namespace paddle {
/**
* @brief Provider data from protobuf data file with each sample
* specified by proto message
*
* DataSample defined in DataFormat.proto.
*
* The file format is
*
* header
*
* sample1
*
* sample2
*
* ...
*
* sampleN
*
* @note: In the data file, each message is prefixed with its length.
* The read/write of the protbuf are implemented in ProtoReader.h
*/
class ProtoDataProvider : public DataProvider {
public:
ProtoDataProvider(const DataConfig& config,
bool useGpu,
bool loadDataAll = true);
virtual void reset();
/**
* @note this size includes the sequences which are skipped because they
* are longer than the batch size.
*/
virtual int64_t getSize() {
int64_t size = sampleNums_;
if (usageRatio_ < 1.0f) {
size = static_cast<int64_t>(size * usageRatio_);
}
return size;
}
virtual void shuffle();
void loadData(const std::vector<std::string>& fileList);
virtual int64_t getNextBatchInternal(int64_t size, DataBatch* batch);
protected:
/**
* @brief load protobuf data from a list of file
* @param[in] fileName file name of a file which contains
* a list of file names
*/
void loadData(const std::string& fileName);
/**
* @brief load protobuf data from file
* @param[in] fileName data file name
*/
void loadDataFile(const std::string& fileName);
/** @brief check data header of each data sample
* @param[in] header data header read from protobuf data
*/
void checkDataHeader(const DataHeader& header);
/**
* @brief fill protobuf data into slot_,
* slot_ is a vector of ProtoSlot in memory.
* @param[in] sample data sample read from protobuf data
*/
void fillSlots(const DataSample& sample);
/**
* @brief return true if each sample is one sequence, i.e., independent
* of other samples.
*/
inline bool iidData() const { return sequenceStartPositions_.empty(); }
/**
* @brief check that sample is consistent with header_
*/
void checkSample(const DataSample& sample);
template <class Op>
int64_t sequenceLoop(Op op, int64_t size);
template <class Op>
int64_t sampleLoop(Op op, int64_t size);
template <class Op>
int64_t subSampleLoop(Op op, int64_t size, int slot);
void showDataStats();
protected:
struct ProtoVarSlot {
std::vector<real> data;
std::vector<int> dims;
};
struct ProtoSlot {
SlotDef::SlotType type;
int dim;
std::vector<int> indexData;
std::vector<real> denseData;
std::vector<sparse_non_value_t> sparseNonValueData;
std::vector<sparse_float_value_t> sparseFloatValueData;
std::vector<int64_t> indices;
std::vector<int64_t> subIndices;
std::vector<ProtoVarSlot> varDenseData;
std::vector<std::vector<int>> varIndices;
std::vector<std::string> strData;
};
DataHeader header_;
int numVecSlots_;
std::vector<ProtoSlot> slots_;
size_t sampleNums_;
/**
* The starting position of each sequence in samples.
* The last element should be num of samples.
* If empty, each sample is one sequence.
*/
std::vector<size_t> sequenceStartPositions_;
int64_t currentSequenceIndex_;
// The size should be the number of sequences.
std::vector<size_t> shuffledSequenceIds_;
ThreadLocalD<DataBatch> cpuBatch_;
ThreadLocalD<DataBatch> gpuBatch_;
RWLock lock_;
std::vector<StatPtr> nnzStats_; // stats for number of none-zeros entries
};
/**
* @brief Special use for Proto data: instances should contain sparse-non-value
* slots
* and label.
*
* @note ProtoSequenceDataProvider treats each SPARSE SLOT as a SEQUENCE
*/
class ProtoSequenceDataProvider : public ProtoDataProvider {
public:
ProtoSequenceDataProvider(const DataConfig& config,
bool useGpu,
bool loadDataAll = true);
~ProtoSequenceDataProvider() {}
virtual int64_t getNextBatchInternal(int64_t size, DataBatch* batch);
};
} // namespace paddle
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "Layer.h"
#include "paddle/math/Matrix.h"
#include "paddle/utils/Logging.h"
#include "paddle/utils/Stat.h"
namespace paddle {
/**
* @brief A layer for computing the dot product of two vectors.
* Input1: vector (batchSize * dim)
* Input2: vector (batchSize * dim)
* Output: a matrix: (batchSize * 1)
*/
class DotProdLayer : public Layer {
public:
explicit DotProdLayer(const LayerConfig& config) : Layer(config) {}
~DotProdLayer() {}
bool init(const LayerMap& layerMap,
const ParameterMap& parameterMap) override;
void forward(PassType passType) override;
void backward(const UpdateCallback& callback = nullptr) override;
};
REGISTER_LAYER(dot_prod, DotProdLayer);
bool DotProdLayer::init(const LayerMap& layerMap,
const ParameterMap& parameterMap) {
Layer::init(layerMap, parameterMap);
CHECK_EQ(inputLayers_.size(), 2U);
CHECK_EQ(1UL, getSize())
<< "The output dimensionality of this layer should be fixed to 1.";
return true;
}
void DotProdLayer::forward(PassType passType) {
Layer::forward(passType);
MatrixPtr inV0 = getInputValue(0);
MatrixPtr inV1 = getInputValue(1);
size_t batchSize = inV0->getHeight();
CHECK_EQ(inV1->getHeight(), batchSize);
CHECK_EQ(inV0->getWidth(), inV1->getWidth());
{
REGISTER_TIMER_INFO("FwResetTimer", getName().c_str());
reserveOutput(batchSize, 1);
}
MatrixPtr outV = getOutputValue();
{
REGISTER_TIMER_INFO("FwDotProdTimer", getName().c_str());
outV->sumOfProducts(*inV0, *inV1, 1, 0);
}
}
void DotProdLayer::backward(const UpdateCallback& callback) {
MatrixPtr inV0 = getInputValue(0);
MatrixPtr inV1 = getInputValue(1);
MatrixPtr outG = getOutputGrad();
MatrixPtr inG0 = getInputGrad(0);
MatrixPtr inG1 = getInputGrad(1);
{
REGISTER_TIMER_INFO("BwDotProdTimer", getName().c_str());
if (inG0) {
inG0->addRowScale(0, *inV1, *outG);
}
if (inG1) {
inG1->addRowScale(0, *inV0, *outG);
}
}
}
} // namespace paddle
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "L2DistanceLayer.h"
#include "paddle/utils/Logging.h"
#include "paddle/utils/Stat.h"
namespace paddle {
REGISTER_LAYER(l2_distance, L2DistanceLayer);
bool L2DistanceLayer::init(const LayerMap& layerMap,
const ParameterMap& parameterMap) {
/* Initialize the basic parent class */
Layer::init(layerMap, parameterMap);
CHECK_EQ(inputLayers_.size(), 2UL) << "The L2DistanceLayer accepts two and "
<< "only two inputs.";
CHECK_EQ(getSize(), 1UL) << "The output dimensionality of L2DistanceLayer "
<< "is fixed to be 1.";
return true;
}
void L2DistanceLayer::forward(PassType passType) {
Layer::forward(passType);
const auto inV1 = getInputValue(0);
const auto inV2 = getInputValue(1);
CHECK(inV1 && inV2);
CHECK_EQ(inV1->getHeight(), inV2->getHeight())
<< "The height of two inputs of this layer must be the same.";
CHECK_EQ(inV1->getWidth(), inV2->getWidth())
<< "The width of two inputs of this layer must be the same.";
int batchSize = inV1->getHeight();
int output_dim = getSize();
{
REGISTER_TIMER_INFO("L2DistanceBpAtvTimer", getName().c_str());
reserveOutput(batchSize, output_dim);
auto outV = getOutputValue();
CHECK(outV) << "The output matrix should not be null.";
Matrix::resizeOrCreate(
inputSub_, inV1->getHeight(), inV1->getWidth(), false, useGpu_);
inputSub_->assign(*inV1);
inputSub_->sub(*inV2);
outV->sumOfProducts(*inputSub_, *inputSub_, 1, 0);
outV->sqrt2(*outV);
}
}
void L2DistanceLayer::backward(const UpdateCallback& callback) {
const auto outG = getOutputGrad();
const auto outV = getOutputValue();
CHECK(outG && outV);
auto inGrad1 = getInputGrad(0);
auto inGrad2 = getInputGrad(1);
{
REGISTER_TIMER_INFO("L2DistanceBpAtvTimer", getName().c_str());
if (inGrad1 || inGrad2) {
outV->scalarDiv(*outV, 1.);
outV->dotMul(*outG, *outV);
}
if (inGrad1) inGrad1->addRowScale(0, *inputSub_, *outV);
if (inGrad2) {
inputSub_->mulScalar(-1.);
inGrad2->addRowScale(0, *inputSub_, *outV);
}
}
}
} // namespace paddle
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "Layer.h"
#include "paddle/math/Matrix.h"
namespace paddle {
/**
* @brief The layer calculates the l2 distance between two input vectors.
* \f[
* f(\bf{x}, \bf{y}) = \sqrt{\sum_{i=1}^D(x_i - y_i)}
* \f]
*
* - Input1: A vector (batchSize * dataDim)
* - Input2: A vector (batchSize * dataDim)
* - Output: A vector (batchSize * 1)
*
* The configuration api is: l2_distance_layer.
*/
class L2DistanceLayer : public Layer {
public:
explicit L2DistanceLayer(const LayerConfig& config) : Layer(config) {}
~L2DistanceLayer() {}
bool init(const LayerMap& layerMap,
const ParameterMap& parameterMap) override;
void forward(PassType passType) override;
void backward(const UpdateCallback& callback = nullptr) override;
private:
// Store the result of subtracting Input2 from Input1 in forward computation,
// which will be reused in backward computation.
MatrixPtr inputSub_;
};
} // namespace paddle
......@@ -38,12 +38,13 @@ bool MKLDNNAddtoLayer::init(const LayerMap& layerMap,
}
void MKLDNNAddtoLayer::reshape(
int& bs, int& ic, int& ih, int& iw, int oc, int& oh, int& ow) {
int& bs, int& ic, int& ih, int& iw, int& oc, int& oh, int& ow) {
CHECK_EQ(layerSize_, getSize()) << "this layer size can not be changed";
reshapeInput(bs, ih, iw);
ic = inputLayers_[0]->getSize() / ih / iw;
CHECK_EQ((size_t)ic * ih * iw, inputLayers_[0]->getSize());
CHECK_EQ(inputElemenCnt_, (size_t)bs * ic * ih * iw);
CHECK_EQ(inputLayers_[0]->getOutputValue()->getElementCnt(),
(size_t)bs * ic * ih * iw);
for (size_t i = 0; i < inputLayers_.size(); i++) {
CHECK_EQ(int64_t(bs), inputLayers_[i]->getOutput().getBatchSize());
CHECK_EQ(layerSize_, inputLayers_[i]->getSize());
......@@ -57,47 +58,43 @@ void MKLDNNAddtoLayer::reshape(
}
void MKLDNNAddtoLayer::resetFwd(std::vector<primitive>& pipeline,
MKLDNNMatrixPtr& in,
MKLDNNMatrixPtr& wgt,
MKLDNNMatrixPtr& bias,
std::vector<MKLDNNMatrixPtr>& inputs,
MKLDNNMatrixPtr& out) {
resetFwdBuffers(inVals_, bias, out);
in = inVals_[0];
resetFwdBuffers(inputs, biasVal_, out);
std::shared_ptr<sum::primitive_desc> fwdPD;
std::shared_ptr<sum::primitive_desc> biasPD;
resetFwdPD(fwdPD, biasPD, inVals_, bias, out);
resetFwdPD(fwdPD, biasPD, inputs, biasVal_, out);
resetFwdPipeline(pipeline, fwdPD, biasPD, inVals_, bias, out);
resetFwdPipeline(pipeline, fwdPD, biasPD, inputs, biasVal_, out);
}
void MKLDNNAddtoLayer::resetBwd(std::vector<primitive>& pipeline,
MKLDNNMatrixPtr& in,
MKLDNNMatrixPtr& wgt,
MKLDNNMatrixPtr& bias,
std::vector<MKLDNNMatrixPtr>& inputs,
MKLDNNMatrixPtr& out) {
resetBwdBuffers(inGrads_, bias, out);
in = inGrads_[0];
resetBwdBuffers(inputs, biasGrad_, out);
// backward only need share output grad to input grad
for (size_t i = 0; i < inGrads_.size(); i++) {
if (inGrads_[i] != nullptr) {
inGrads_[i] = out;
inputLayers_[i]->getOutputGrad()->setData(inGrads_[i]->getData());
for (size_t i = 0; i < inputs.size(); i++) {
if (inputs[i] != nullptr) {
inputs[i] = out;
inputLayers_[i]->getOutputGrad()->setData(inputs[i]->getData());
}
}
// backward bias
bwdBias_ = nullptr;
if (bias) {
if (biasGrad_) {
std::vector<float> scales(bs_, 1.0);
std::vector<memory::primitive_desc> srcPDs(bs_, bias->getPrimitiveDesc());
auto biasPD = sum::primitive_desc(bias->getMemoryDesc(), scales, srcPDs);
std::vector<memory::primitive_desc> srcPDs(bs_,
biasGrad_->getPrimitiveDesc());
auto biasPD =
sum::primitive_desc(biasGrad_->getMemoryDesc(), scales, srcPDs);
std::vector<primitive::at> srcs;
for (size_t i = 0; i < grads_.size(); ++i) {
srcs.push_back(*(grads_[i]));
}
bwdBias_.reset(new sum(biasPD, srcs, *bias));
bwdBias_.reset(new sum(biasPD, srcs, *biasGrad_));
pipeline.push_back(*bwdBias_);
}
}
......@@ -208,7 +205,7 @@ void MKLDNNAddtoLayer::resetBwdBuffers(std::vector<MKLDNNMatrixPtr>& inputs,
inputs.resize(inputLayers_.size());
for (size_t i = 0; i < inputs.size(); i++) {
resetInGrad(inputs[i], inVal_->getPrimitiveDesc(), i);
resetInGrad(inputs[i], inVals_[i]->getPrimitiveDesc(), i);
CHECK_PRIMITIVE_DESC_EQ(inputs[i], out->getPrimitiveDesc());
}
......
......@@ -26,9 +26,6 @@ namespace paddle {
*/
class MKLDNNAddtoLayer : public MKLDNNLayer {
protected:
std::vector<MKLDNNMatrixPtr> inVals_;
std::vector<MKLDNNMatrixPtr> inGrads_;
// layer size == ic * ih * iw == oc * oh *ow, and can not be changed
size_t layerSize_;
......@@ -50,52 +47,19 @@ public:
const ParameterMap& parameterMap) override;
void reshape(
int& bs, int& ic, int& ih, int& iw, int oc, int& oh, int& ow) override;
int& bs, int& ic, int& ih, int& iw, int& oc, int& oh, int& ow) override;
void resetFwd(std::vector<mkldnn::primitive>& pipeline,
MKLDNNMatrixPtr& in,
MKLDNNMatrixPtr& wgt,
MKLDNNMatrixPtr& bias,
std::vector<MKLDNNMatrixPtr>& inputs,
MKLDNNMatrixPtr& out) override;
void resetBwd(std::vector<mkldnn::primitive>& pipeline,
MKLDNNMatrixPtr& in,
MKLDNNMatrixPtr& wgt,
MKLDNNMatrixPtr& bias,
std::vector<MKLDNNMatrixPtr>& inputs,
MKLDNNMatrixPtr& out) override;
void updateWeights(const UpdateCallback& callback) override;
void printValueFormat() override {
for (size_t i = 0; i < inVals_.size(); ++i) {
VLOG(MKLDNN_FMTS) << i << " input: " << inVals_[i]->getFormat() << " >>>";
}
if (outVal_) {
VLOG(MKLDNN_FMTS) << outVal_->getFormat() << " >>> ";
}
if (extOutVal_) {
VLOG(MKLDNN_FMTS) << extOutVal_->getFormat();
}
}
void printGradFormat() override {
if (extOutGrad_) {
VLOG(MKLDNN_FMTS) << extOutGrad_->getFormat();
}
if (outGrad_) {
VLOG(MKLDNN_FMTS) << outGrad_->getFormat() << " <<< ";
}
for (size_t i = 0; i < inGrads_.size(); ++i) {
VLOG(MKLDNN_FMTS) << i << " input: " << inGrads_[i]->getFormat() << "<<<";
}
}
protected:
/**
* Forward functions: reset buffers(inputs, output, bias),
* reset primitive descriptor,
* reset pipeline.
*/
void resetFwdBuffers(std::vector<MKLDNNMatrixPtr>& inputs,
MKLDNNMatrixPtr& bias,
MKLDNNMatrixPtr& out);
......@@ -110,17 +74,10 @@ protected:
std::vector<MKLDNNMatrixPtr>& inputs,
MKLDNNMatrixPtr& bias,
MKLDNNMatrixPtr& out);
/**
* Backward functions: reset buffers(inputs, output, bias)
*/
void resetBwdBuffers(std::vector<MKLDNNMatrixPtr>& inputs,
MKLDNNMatrixPtr& bias,
MKLDNNMatrixPtr& out);
/**
* prepare for bias
*/
void prepareBias(MKLDNNMatrixPtr& bias,
const MatrixPtr& biasMat,
const MKLDNNMatrixPtr& out,
......
......@@ -116,21 +116,20 @@ void MKLDNNBatchNormLayer::calMovingMeanAndVar() {
}
void MKLDNNBatchNormLayer::reshape(
int& bs, int& ic, int& ih, int& iw, int oc, int& oh, int& ow) {
int& bs, int& ic, int& ih, int& iw, int& oc, int& oh, int& ow) {
reshapeInput(bs, ih, iw);
oh = ih;
ow = iw;
// ic_ and oc can not be changed
CHECK_EQ(inputElemenCnt_ / bs / ih / iw, (size_t)ic)
CHECK_EQ((size_t)ic,
inputLayers_[0]->getOutputValue()->getElementCnt() / bs / ih / iw)
<< "Input channel can not be changed";
reshapeOutput(oh, ow);
resizeOutput(bs, oc * oh * ow);
}
void MKLDNNBatchNormLayer::resetFwd(std::vector<primitive>& pipeline,
MKLDNNMatrixPtr& in,
MKLDNNMatrixPtr& wgt,
MKLDNNMatrixPtr& bias,
std::vector<MKLDNNMatrixPtr>& inputs,
MKLDNNMatrixPtr& out) {
// In training phase, it will always calculate mean and var,
// so useGlobalStats must be false.
......@@ -140,25 +139,23 @@ void MKLDNNBatchNormLayer::resetFwd(std::vector<primitive>& pipeline,
useGlobalStats_ = false;
}
resetFwdBuffers(in, wgt, out);
resetFwdBuffers(inputs[0], wgtVal_, out);
resetFwdPD(fwdPD_, in, wgt, out);
resetFwdPD(fwdPD_, inputs[0], wgtVal_, out);
resetFwdPipeline(pipeline, fwdPD_, in, wgt, out);
resetFwdPipeline(pipeline, fwdPD_, inputs[0], wgtVal_, out);
}
void MKLDNNBatchNormLayer::resetBwd(std::vector<primitive>& pipeline,
MKLDNNMatrixPtr& in,
MKLDNNMatrixPtr& wgt,
MKLDNNMatrixPtr& bias,
std::vector<MKLDNNMatrixPtr>& inputs,
MKLDNNMatrixPtr& out) {
std::shared_ptr<bn_bwd::primitive_desc> pd;
resetBwdBuffers(in, wgt, out);
resetBwdBuffers(inputs[0], wgtGrad_, out);
resetBwdPD(pd, in, wgt, out);
resetBwdPD(pd, inputs[0], wgtGrad_, out);
resetBwdPipeline(pipeline, pd, in, wgt, out);
resetBwdPipeline(pipeline, pd, inputs[0], wgtGrad_, out);
}
void MKLDNNBatchNormLayer::forward(PassType passType) {
......@@ -260,9 +257,9 @@ void MKLDNNBatchNormLayer::resetFwdPipeline(
void MKLDNNBatchNormLayer::resetBwdBuffers(MKLDNNMatrixPtr& in,
MKLDNNMatrixPtr& wgt,
MKLDNNMatrixPtr& out) {
CHECK(inVal_ && outVal_);
CHECK(inVals_[0] && outVal_);
resetOutGrad(out, outVal_->getPrimitiveDesc());
resetInGrad(in, inVal_->getPrimitiveDesc());
resetInGrad(in, inVals_[0]->getPrimitiveDesc());
if (gradScaleShift_) {
CHECK(wgtVal_);
resetWithMatrix(wgt, gradScaleShift_, wgtVal_->getPrimitiveDesc());
......@@ -297,11 +294,12 @@ void MKLDNNBatchNormLayer::resetBwdPipeline(
if (pd == nullptr) {
return;
}
CHECK(inVal_);
CHECK(inVals_[0]);
bwdData_.reset(
wgt && wgtVal_
? new bn_bwd(*pd, *inVal_, *mean_, *var_, *out, *wgtVal_, *in, *wgt)
: new bn_bwd(*pd, *inVal_, *mean_, *var_, *out, *in));
? new bn_bwd(
*pd, *inVals_[0], *mean_, *var_, *out, *wgtVal_, *in, *wgt)
: new bn_bwd(*pd, *inVals_[0], *mean_, *var_, *out, *in));
pipeline.push_back(*bwdData_);
}
......
......@@ -74,18 +74,14 @@ public:
void forward(PassType passType) override;
void reshape(
int& bs, int& ic, int& ih, int& iw, int oc, int& oh, int& ow) override;
int& bs, int& ic, int& ih, int& iw, int& oc, int& oh, int& ow) override;
void resetFwd(std::vector<mkldnn::primitive>& pipeline,
MKLDNNMatrixPtr& in,
MKLDNNMatrixPtr& wgt,
MKLDNNMatrixPtr& bias,
std::vector<MKLDNNMatrixPtr>& inputs,
MKLDNNMatrixPtr& out) override;
void resetBwd(std::vector<mkldnn::primitive>& pipeline,
MKLDNNMatrixPtr& in,
MKLDNNMatrixPtr& wgt,
MKLDNNMatrixPtr& bias,
std::vector<MKLDNNMatrixPtr>& inputs,
MKLDNNMatrixPtr& out) override;
void updateWeights(const UpdateCallback& callback) override;
......@@ -99,11 +95,7 @@ protected:
* moving = moving * AvgFraction + local * (1 - AvgFraction)
*/
void calMovingMeanAndVar();
/**
* Forward functions: reset buffers(input, weight, output),
* reset primitive descriptor,
* reset pipeline.
*/
void resetFwdBuffers(MKLDNNMatrixPtr& in,
MKLDNNMatrixPtr& wgt,
MKLDNNMatrixPtr& out);
......@@ -116,12 +108,6 @@ protected:
MKLDNNMatrixPtr& in,
MKLDNNMatrixPtr& wgt,
MKLDNNMatrixPtr& out);
/**
* Backward functions: reset buffers(input, weight, output),
* reset primitive descriptor,
* reset pipeline.
*/
void resetBwdBuffers(MKLDNNMatrixPtr& in,
MKLDNNMatrixPtr& wgt,
MKLDNNMatrixPtr& out);
......
/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "MKLDNNConcatLayer.h"
using namespace mkldnn; // NOLINT
typedef memory::format format;
namespace paddle {
REGISTER_LAYER(mkldnn_concat, MKLDNNConcatLayer);
bool MKLDNNConcatLayer::init(const LayerMap& layerMap,
const ParameterMap& parameterMap) {
if (!MKLDNNLayer::init(layerMap, parameterMap)) {
return false;
}
CHECK_GT(inputLayers_.size(), 1UL);
CHECK(!biasParameter_);
return true;
}
void MKLDNNConcatLayer::reshape(
int& bs, int& ic, int& ih, int& iw, int& oc, int& oh, int& ow) {
reshapeInput(bs, ih, iw);
ic = inputLayers_[0]->getSize() / ih / iw;
CHECK_EQ((size_t)ic * ih * iw, inputLayers_[0]->getSize());
CHECK_EQ(inputLayers_[0]->getOutputValue()->getElementCnt(),
(size_t)bs * ic * ih * iw);
CHECK_GT(inputLayers_.size(), 1UL);
channels_.resize(inputLayers_.size());
channels_[0] = ic;
oc = ic;
for (size_t i = 1; i < inputLayers_.size(); i++) {
int batchsize, height, witdh;
reshapeInput(batchsize, height, witdh, i);
CHECK_EQ(bs, batchsize);
CHECK_EQ(ih, height);
CHECK_EQ(iw, witdh);
channels_[i] = inputLayers_[i]->getSize() / height / witdh;
CHECK_EQ((size_t)channels_[i] * height * witdh, inputLayers_[i]->getSize());
oc += channels_[i];
}
oh = ih;
ow = iw;
reshapeOutput(oh, ow);
resizeOutput(bs, oc * oh * ow);
}
void MKLDNNConcatLayer::resetFwd(std::vector<primitive>& pipeline,
std::vector<MKLDNNMatrixPtr>& inputs,
MKLDNNMatrixPtr& out) {
resetFwdBuffers(inputs, out);
std::shared_ptr<concat::primitive_desc> fwdPD;
resetFwdPD(fwdPD, inputs, out);
resetFwdPipeline(pipeline, fwdPD, inputs, out);
}
void MKLDNNConcatLayer::resetBwd(std::vector<primitive>& pipeline,
std::vector<MKLDNNMatrixPtr>& inputs,
MKLDNNMatrixPtr& out) {
resetBwdBuffers(inputs, out);
resetBwdPipeline(pipeline, bwds_, inputs, out);
}
void MKLDNNConcatLayer::resetFwdBuffers(std::vector<MKLDNNMatrixPtr>& inputs,
MKLDNNMatrixPtr& out) {
inputs.resize(inputLayers_.size());
bool has8c = false, has16c = false, hasnc = false;
for (size_t i = 0; i < inputs.size(); i++) {
resetInValue(inputs[i], nullptr, i, channels_[i]);
CHECK(inputs[i]);
auto dm = inputs[i]->getDims();
// inputs format can be different, but ndims must equal
CHECK(i == 0 || dm.size() == inputs[0]->getDims().size());
CHECK_EQ(bs_, dm[0]);
CHECK_EQ(channels_[i], dm[1]);
if (dm.size() > 2) {
CHECK_EQ(ih_, dm[2]);
CHECK_EQ(iw_, dm[3]);
}
if (inputs[i]->getFormat() == format::nc) {
hasnc = true;
}
if (inputs[i]->getFormat() == format::nChw8c) {
has8c = true;
}
if (inputs[i]->getFormat() == format::nChw16c) {
has16c = true;
}
}
format outFmt;
if (has16c && oc_ % 16 == 0) {
outFmt = format::nChw16c;
} else if (has8c && oc_ % 8 == 0) {
outFmt = format::nChw8c;
} else if (hasnc) {
CHECK(oh_ == 1 && ow_ == 1);
outFmt = format::nc;
} else {
outFmt = format::nchw;
}
memory::dims outDims =
hasnc ? memory::dims{bs_, oc_} : memory::dims{bs_, oc_, oh_, ow_};
auto outPD = MKLDNNMatrix::createPrimitiveDesc(outDims, outFmt, engine_);
resetOutValue(out, outPD);
}
void MKLDNNConcatLayer::resetFwdPD(std::shared_ptr<concat::primitive_desc>& pd,
std::vector<MKLDNNMatrixPtr>& inputs,
MKLDNNMatrixPtr out) {
std::vector<memory::primitive_desc> srcPDs;
for (size_t i = 0; i < inputs.size(); i++) {
srcPDs.push_back(inputs[i]->getPrimitiveDesc());
}
CHECK(out);
pd.reset(new concat::primitive_desc(out->getMemoryDesc(), axis_, srcPDs));
CHECK_PRIMITIVE_DESC_EQ(out, pd->dst_primitive_desc());
}
void MKLDNNConcatLayer::resetFwdPipeline(
std::vector<primitive>& pipeline,
std::shared_ptr<concat::primitive_desc>& pd,
std::vector<MKLDNNMatrixPtr>& inputs,
MKLDNNMatrixPtr& out) {
std::vector<primitive::at> srcs;
for (size_t i = 0; i < inputs.size(); i++) {
srcs.push_back(*(inputs[i]));
}
fwd_.reset(new concat(*pd, srcs, *out));
pipeline.push_back(*fwd_);
}
void MKLDNNConcatLayer::resetBwdBuffers(std::vector<MKLDNNMatrixPtr>& inputs,
MKLDNNMatrixPtr& out) {
CHECK(outVal_);
resetOutGrad(out, outVal_->getPrimitiveDesc());
CHECK(out);
inputs.resize(inputLayers_.size());
for (size_t i = 0; i < inputs.size(); i++) {
CHECK(inVals_[i]);
resetInGrad(inputs[i], inVals_[i]->getPrimitiveDesc(), i);
CHECK_PRIMITIVE_DESC_EQ(inputs[i], inVals_[i]->getPrimitiveDesc());
}
}
void MKLDNNConcatLayer::resetBwdPipeline(
std::vector<mkldnn::primitive>& pipeline,
std::vector<std::shared_ptr<mkldnn::primitive>>& prims,
std::vector<MKLDNNMatrixPtr>& inputs,
MKLDNNMatrixPtr& out) {
// reset the backward primitives
memory::dims offsets = {0, 0, 0, 0};
prims.resize(inputs.size());
CHECK_EQ(inputs.size(), channels_.size());
for (size_t i = 0; i < inputs.size(); i++) {
auto viewPD = view::primitive_desc(
out->getPrimitiveDesc(), inputs[i]->getDims(), offsets);
auto bwdPD = reorder::primitive_desc(viewPD.dst_primitive_desc(),
inputs[i]->getPrimitiveDesc());
prims[i].reset(new reorder(bwdPD, *out, *(inputs[i])));
offsets[axis_] += channels_[i];
// push to pipeline
pipeline.push_back(*prims[i]);
}
}
} // namespace paddle
/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "MKLDNNLayer.h"
#include "mkldnn.hpp"
namespace paddle {
/**
* @brief A subclass of MKLDNNLayer Concatenate layer.
*
* The config file api is mkldnn_concat
*/
class MKLDNNConcatLayer : public MKLDNNLayer {
protected:
std::vector<std::shared_ptr<mkldnn::primitive>> bwds_;
// input channel numbers
std::vector<int> channels_;
// concat_dimension in MKLDNN
// if axis_ == 0, concat batchsize
// if axis_ == 1, concat channel (default)
int axis_;
public:
explicit MKLDNNConcatLayer(const LayerConfig& config)
: MKLDNNLayer(config), axis_(1) {}
~MKLDNNConcatLayer() {}
bool init(const LayerMap& layerMap,
const ParameterMap& parameterMap) override;
void reshape(
int& bs, int& ic, int& ih, int& iw, int& oc, int& oh, int& ow) override;
void resetFwd(std::vector<mkldnn::primitive>& pipeline,
std::vector<MKLDNNMatrixPtr>& inputs,
MKLDNNMatrixPtr& out) override;
void resetBwd(std::vector<mkldnn::primitive>& pipeline,
std::vector<MKLDNNMatrixPtr>& inputs,
MKLDNNMatrixPtr& out) override;
void printSizeInfo() override {
CHECK_EQ(channels_.size(), inputLayers_.size());
for (size_t i = 0; i < channels_.size(); ++i) {
VLOG(MKLDNN_SIZES) << "Input " << i << ", " << inputLayers_[i]->getName()
<< ": " << bs_ << ", " << channels_[i] << ", " << ih_
<< ", " << iw_;
}
VLOG(MKLDNN_SIZES) << "Output: " << bs_ << ", " << oc_ << ", " << oh_
<< ", " << ow_;
}
size_t keepCondition() {
// reset when the total element size of all inputs changed
size_t totalSize = inputLayers_[0]->getOutputValue()->getElementCnt();
for (size_t i = 1; i < inputLayers_.size(); ++i) {
totalSize += inputLayers_[i]->getOutputValue()->getElementCnt();
}
return totalSize;
}
protected:
void resetFwdBuffers(std::vector<MKLDNNMatrixPtr>& inputs,
MKLDNNMatrixPtr& out);
void resetFwdPD(std::shared_ptr<mkldnn::concat::primitive_desc>& pd,
std::vector<MKLDNNMatrixPtr>& inputs,
MKLDNNMatrixPtr out);
void resetFwdPipeline(std::vector<mkldnn::primitive>& pipeline,
std::shared_ptr<mkldnn::concat::primitive_desc>& pd,
std::vector<MKLDNNMatrixPtr>& inputs,
MKLDNNMatrixPtr& out);
void resetBwdBuffers(std::vector<MKLDNNMatrixPtr>& inputs,
MKLDNNMatrixPtr& out);
void resetBwdPipeline(std::vector<mkldnn::primitive>& pipeline,
std::vector<std::shared_ptr<mkldnn::primitive>>& prims,
std::vector<MKLDNNMatrixPtr>& inputs,
MKLDNNMatrixPtr& out);
};
} // namespace paddle
......@@ -90,7 +90,7 @@ void MKLDNNConvLayer::convertWeightsToPaddle() {
}
void MKLDNNConvLayer::reshape(
int& bs, int& ic, int& ih, int& iw, int oc, int& oh, int& ow) {
int& bs, int& ic, int& ih, int& iw, int& oc, int& oh, int& ow) {
reshapeInput(bs, ih, iw);
// cal output sizes
......@@ -105,21 +105,17 @@ void MKLDNNConvLayer::reshape(
}
void MKLDNNConvLayer::resetFwd(std::vector<primitive>& pipeline,
MKLDNNMatrixPtr& in,
MKLDNNMatrixPtr& wgt,
MKLDNNMatrixPtr& bias,
std::vector<MKLDNNMatrixPtr>& inputs,
MKLDNNMatrixPtr& out) {
resetFwdPD(fwdPD_);
resetFwdBuffers(fwdPD_, in, wgt, bias, out);
resetFwdBuffers(fwdPD_, inputs[0], wgtVal_, biasVal_, out);
resetFwdPipeline(pipeline, fwdPD_, in, wgt, bias, out);
resetFwdPipeline(pipeline, fwdPD_, inputs[0], wgtVal_, biasVal_, out);
}
void MKLDNNConvLayer::resetBwd(std::vector<primitive>& pipeline,
MKLDNNMatrixPtr& in,
MKLDNNMatrixPtr& wgt,
MKLDNNMatrixPtr& bias,
std::vector<MKLDNNMatrixPtr>& inputs,
MKLDNNMatrixPtr& out) {
std::shared_ptr<conv_bwdWgt::primitive_desc> bwdWgtPD;
std::shared_ptr<conv_bwdData::primitive_desc> bwdDataPD;
......@@ -128,9 +124,10 @@ void MKLDNNConvLayer::resetBwd(std::vector<primitive>& pipeline,
resetBwdDataPD(bwdDataPD);
resetBwdBuffers(bwdWgtPD, bwdDataPD, in, wgt, bias, out);
resetBwdBuffers(bwdWgtPD, bwdDataPD, inputs[0], wgtGrad_, biasGrad_, out);
resetBwdPipeline(pipeline, bwdWgtPD, bwdDataPD, in, wgt, bias, out);
resetBwdPipeline(
pipeline, bwdWgtPD, bwdDataPD, inputs[0], wgtGrad_, biasGrad_, out);
}
void MKLDNNConvLayer::updateWeights(const UpdateCallback& callback) {
......@@ -236,14 +233,14 @@ void MKLDNNConvLayer::resetBwdWgtPD(
loadConvSettings(wgtDims, biasDims, strides, dilations, padL, padR);
// create backward weight using input, output and weight value memory desc
CHECK(inVal_) << "Should have internal input value";
CHECK(inVals_[0]) << "Should have internal input value";
CHECK(outVal_) << "Should have internal output value";
CHECK(wgtVal_) << "Should have weight value";
algorithm algo = algorithm::convolution_direct;
padding_kind padKind = padding_kind::zero;
auto bwdWgtDesc = biasVal_ != nullptr
? conv_bwdWgt::desc(algo,
inVal_->getMemoryDesc(),
inVals_[0]->getMemoryDesc(),
wgtVal_->getMemoryDesc(),
biasVal_->getMemoryDesc(),
outVal_->getMemoryDesc(),
......@@ -252,7 +249,7 @@ void MKLDNNConvLayer::resetBwdWgtPD(
padR,
padKind)
: conv_bwdWgt::desc(algo,
inVal_->getMemoryDesc(),
inVals_[0]->getMemoryDesc(),
wgtVal_->getMemoryDesc(),
outVal_->getMemoryDesc(),
strides,
......@@ -260,7 +257,7 @@ void MKLDNNConvLayer::resetBwdWgtPD(
padR,
padKind);
pd.reset(new conv_bwdWgt::primitive_desc(bwdWgtDesc, engine_, *fwdPD_));
CHECK_PRIMITIVE_DESC_EQ(inVal_, pd->src_primitive_desc());
CHECK_PRIMITIVE_DESC_EQ(inVals_[0], pd->src_primitive_desc());
CHECK_PRIMITIVE_DESC_EQ(
outVal_,
pd->diff_dst_primitive_desc(),
......@@ -280,12 +277,12 @@ void MKLDNNConvLayer::resetBwdDataPD(
memory::dims wgtDims, biasDims, strides, dilations, padL, padR;
loadConvSettings(wgtDims, biasDims, strides, dilations, padL, padR);
CHECK(inVal_) << "Should have internal input value";
CHECK(inVals_[0]) << "Should have internal input value";
CHECK(outVal_) << "Should have internal output value";
// create backward data using input and output value memory desc
// but using weight memory desc with any format
auto bwdDataDesc = conv_bwdData::desc(algorithm::convolution_direct,
inVal_->getMemoryDesc(),
inVals_[0]->getMemoryDesc(),
MKLDNNMatrix::createMemoryDesc(wgtDims),
outVal_->getMemoryDesc(),
strides,
......@@ -294,7 +291,7 @@ void MKLDNNConvLayer::resetBwdDataPD(
padding_kind::zero);
pd.reset(new conv_bwdData::primitive_desc(bwdDataDesc, engine_, *fwdPD_));
CHECK_PRIMITIVE_DESC_EQ(
inVal_,
inVals_[0],
pd->diff_src_primitive_desc(),
"primitive desc of in value and grad should be equal");
CHECK_PRIMITIVE_DESC_EQ(
......@@ -346,12 +343,12 @@ void MKLDNNConvLayer::resetBwdPipeline(
MKLDNNMatrixPtr& wgt,
MKLDNNMatrixPtr& bias,
MKLDNNMatrixPtr& out) {
CHECK(inVal_);
CHECK(inVals_[0]);
// add bwdWgt handle
if (bias) {
bwdWgt_.reset(new conv_bwdWgt(*wgtPD, *inVal_, *out, *wgt, *bias));
bwdWgt_.reset(new conv_bwdWgt(*wgtPD, *inVals_[0], *out, *wgt, *bias));
} else {
bwdWgt_.reset(new conv_bwdWgt(*wgtPD, *inVal_, *out, *wgt));
bwdWgt_.reset(new conv_bwdWgt(*wgtPD, *inVals_[0], *out, *wgt));
}
pipeline.push_back(*bwdWgt_);
......
......@@ -69,18 +69,14 @@ public:
const ParameterMap& parameterMap) override;
void reshape(
int& bs, int& ic, int& ih, int& iw, int oc, int& oh, int& ow) override;
int& bs, int& ic, int& ih, int& iw, int& oc, int& oh, int& ow) override;
void resetFwd(std::vector<mkldnn::primitive>& pipeline,
MKLDNNMatrixPtr& in,
MKLDNNMatrixPtr& wgt,
MKLDNNMatrixPtr& bias,
std::vector<MKLDNNMatrixPtr>& inputs,
MKLDNNMatrixPtr& out) override;
void resetBwd(std::vector<mkldnn::primitive>& pipeline,
MKLDNNMatrixPtr& in,
MKLDNNMatrixPtr& wgt,
MKLDNNMatrixPtr& bias,
std::vector<MKLDNNMatrixPtr>& inputs,
MKLDNNMatrixPtr& out) override;
void updateWeights(const UpdateCallback& callback) override;
......@@ -107,48 +103,26 @@ protected:
mkldnn::memory::dims& padL,
mkldnn::memory::dims& padR);
/**
* reset the forward primitive descriptor.
*/
void resetFwdPD(std::shared_ptr<conv_fwd::primitive_desc>& pd);
/**
* reset the MKLDNNMatrix buffers used in forward.
*/
void resetFwdBuffers(std::shared_ptr<conv_fwd::primitive_desc>& pd,
MKLDNNMatrixPtr& in,
MKLDNNMatrixPtr& wgt,
MKLDNNMatrixPtr& bias,
MKLDNNMatrixPtr& out);
/**
* reset the forward pipeline.
*/
void resetFwdPipeline(std::vector<mkldnn::primitive>& pipeline,
std::shared_ptr<conv_fwd::primitive_desc>& pd,
MKLDNNMatrixPtr& in,
MKLDNNMatrixPtr& wgt,
MKLDNNMatrixPtr& bias,
MKLDNNMatrixPtr& out);
/**
* reset the backward weight primitive descriptor.
*/
void resetBwdWgtPD(std::shared_ptr<conv_bwdWgt::primitive_desc>& pd);
/**
* reset the backward data primitive descriptor.
*/
void resetBwdDataPD(std::shared_ptr<conv_bwdData::primitive_desc>& pd);
/**
* reset the MKLDNNMatrix buffers used in backward.
*/
void resetBwdBuffers(std::shared_ptr<conv_bwdWgt::primitive_desc>& wgtPD,
std::shared_ptr<conv_bwdData::primitive_desc>& dataPD,
MKLDNNMatrixPtr& in,
MKLDNNMatrixPtr& wgt,
MKLDNNMatrixPtr& bias,
MKLDNNMatrixPtr& out);
/**
* reset the backward pipeline.
*/
void resetBwdPipeline(std::vector<mkldnn::primitive>& pipeline,
std::shared_ptr<conv_bwdWgt::primitive_desc>& wgtPD,
std::shared_ptr<conv_bwdData::primitive_desc>& dataPD,
......
......@@ -74,7 +74,7 @@ void MKLDNNFcLayer::convertWeightsToPaddle() {
}
void MKLDNNFcLayer::reshape(
int& bs, int& ic, int& ih, int& iw, int oc, int& oh, int& ow) {
int& bs, int& ic, int& ih, int& iw, int& oc, int& oh, int& ow) {
reshapeInput(bs, ih, iw);
CHECK_EQ(iLayerSize_, inputLayers_[0]->getSize());
......@@ -87,32 +87,29 @@ void MKLDNNFcLayer::reshape(
}
void MKLDNNFcLayer::resetFwd(std::vector<primitive>& pipeline,
MKLDNNMatrixPtr& in,
MKLDNNMatrixPtr& wgt,
MKLDNNMatrixPtr& bias,
std::vector<MKLDNNMatrixPtr>& inputs,
MKLDNNMatrixPtr& out) {
resetFwdBuffers(in, wgt, bias, out);
resetFwdBuffers(inputs[0], wgtVal_, biasVal_, out);
resetFwdPD(fwdPD_, in, wgt, bias, out);
resetFwdPD(fwdPD_, inputs[0], wgtVal_, biasVal_, out);
resetFwdPipeline(pipeline, fwdPD_, in, wgt, bias, out);
resetFwdPipeline(pipeline, fwdPD_, inputs[0], wgtVal_, biasVal_, out);
}
void MKLDNNFcLayer::resetBwd(std::vector<primitive>& pipeline,
MKLDNNMatrixPtr& in,
MKLDNNMatrixPtr& wgt,
MKLDNNMatrixPtr& bias,
std::vector<MKLDNNMatrixPtr>& inputs,
MKLDNNMatrixPtr& out) {
std::shared_ptr<fc_bwdWgt::primitive_desc> bwdWgtPD;
std::shared_ptr<fc_bwdData::primitive_desc> bwdDataPD;
resetBwdBuffers(in, wgt, bias, out);
resetBwdBuffers(inputs[0], wgtGrad_, biasGrad_, out);
resetBwdWgtPD(bwdWgtPD, wgt, bias, out);
resetBwdWgtPD(bwdWgtPD, wgtGrad_, biasGrad_, out);
resetBwdDataPD(bwdDataPD, in, out);
resetBwdDataPD(bwdDataPD, inputs[0], out);
resetBwdPipeline(pipeline, bwdWgtPD, bwdDataPD, in, wgt, bias, out);
resetBwdPipeline(
pipeline, bwdWgtPD, bwdDataPD, inputs[0], wgtGrad_, biasGrad_, out);
}
void MKLDNNFcLayer::updateWeights(const UpdateCallback& callback) {
......@@ -193,9 +190,9 @@ void MKLDNNFcLayer::resetBwdBuffers(MKLDNNMatrixPtr& in,
MKLDNNMatrixPtr& wgt,
MKLDNNMatrixPtr& bias,
MKLDNNMatrixPtr& out) {
CHECK(inVal_ && outVal_);
CHECK(inVals_[0] && outVal_);
resetOutGrad(out, outVal_->getPrimitiveDesc());
resetInGrad(in, inVal_->getPrimitiveDesc());
resetInGrad(in, inVals_[0]->getPrimitiveDesc());
CHECK(wgtVal_);
resetWithMatrix(wgt, weight_->getWGrad(), wgtVal_->getPrimitiveDesc());
......@@ -212,14 +209,15 @@ void MKLDNNFcLayer::resetBwdWgtPD(
MKLDNNMatrixPtr& wgt,
MKLDNNMatrixPtr& bias,
MKLDNNMatrixPtr& out) {
CHECK(inVal_);
fc_bwdWgt::desc bwdWgtDesc = bias ? fc_bwdWgt::desc(inVal_->getMemoryDesc(),
wgt->getMemoryDesc(),
bias->getMemoryDesc(),
out->getMemoryDesc())
: fc_bwdWgt::desc(inVal_->getMemoryDesc(),
wgt->getMemoryDesc(),
out->getMemoryDesc());
CHECK(inVals_[0]);
fc_bwdWgt::desc bwdWgtDesc =
bias ? fc_bwdWgt::desc(inVals_[0]->getMemoryDesc(),
wgt->getMemoryDesc(),
bias->getMemoryDesc(),
out->getMemoryDesc())
: fc_bwdWgt::desc(inVals_[0]->getMemoryDesc(),
wgt->getMemoryDesc(),
out->getMemoryDesc());
pd.reset(new fc_bwdWgt::primitive_desc(bwdWgtDesc, engine_, *fwdPD_));
}
......@@ -245,11 +243,11 @@ void MKLDNNFcLayer::resetBwdPipeline(
MKLDNNMatrixPtr& wgt,
MKLDNNMatrixPtr& bias,
MKLDNNMatrixPtr& out) {
CHECK(inVal_);
CHECK(inVals_[0]);
if (bias) {
bwdWgt_.reset(new fc_bwdWgt(*bwdWgtPD, *inVal_, *out, *wgt, *bias));
bwdWgt_.reset(new fc_bwdWgt(*bwdWgtPD, *inVals_[0], *out, *wgt, *bias));
} else {
bwdWgt_.reset(new fc_bwdWgt(*bwdWgtPD, *inVal_, *out, *wgt));
bwdWgt_.reset(new fc_bwdWgt(*bwdWgtPD, *inVals_[0], *out, *wgt));
}
pipeline.push_back(*bwdWgt_);
......
......@@ -52,18 +52,14 @@ public:
const ParameterMap& parameterMap) override;
void reshape(
int& bs, int& ic, int& ih, int& iw, int oc, int& oh, int& ow) override;
int& bs, int& ic, int& ih, int& iw, int& oc, int& oh, int& ow) override;
void resetFwd(std::vector<mkldnn::primitive>& pipeline,
MKLDNNMatrixPtr& in,
MKLDNNMatrixPtr& wgt,
MKLDNNMatrixPtr& bias,
std::vector<MKLDNNMatrixPtr>& inputs,
MKLDNNMatrixPtr& out) override;
void resetBwd(std::vector<mkldnn::primitive>& pipeline,
MKLDNNMatrixPtr& in,
MKLDNNMatrixPtr& wgt,
MKLDNNMatrixPtr& bias,
std::vector<MKLDNNMatrixPtr>& inputs,
MKLDNNMatrixPtr& out) override;
void updateWeights(const UpdateCallback& callback) override;
......@@ -73,11 +69,6 @@ public:
void convertWeightsToPaddle() override;
protected:
/**
* Forward functions: reset buffers(input, output, weight and bias),
* reset primitive descriptor,
* reset pipeline.
*/
void resetFwdBuffers(MKLDNNMatrixPtr& in,
MKLDNNMatrixPtr& wgt,
MKLDNNMatrixPtr& bias,
......@@ -93,13 +84,6 @@ protected:
MKLDNNMatrixPtr& wgt,
MKLDNNMatrixPtr& bias,
MKLDNNMatrixPtr& out);
/**
* Backward functions: reset buffers(input, output, weight and bias),
* reset primitive descriptor for backward weight,
* reset primitive descriptor for backward data,
* reset pipeline.
*/
void resetBwdBuffers(MKLDNNMatrixPtr& in,
MKLDNNMatrixPtr& wgt,
MKLDNNMatrixPtr& bias,
......
......@@ -21,8 +21,8 @@ namespace paddle {
bool MKLDNNLayer::init(const LayerMap& layerMap,
const ParameterMap& parameterMap) {
CHECK(FLAGS_use_mkldnn) << "MkldnnLayers only support use_mkldnn."
<< "Please set WITH_MKLDNN=ON "
CHECK(FLAGS_use_mkldnn) << "MKLDNNLayers only support use_mkldnn."
<< "Please set WITH_MKL=ON "
<< "and set use_mkldnn=True";
CHECK(!useGpu_) << "Do not support GPU yet";
......@@ -48,31 +48,20 @@ void MKLDNNLayer::forward(PassType passType) {
REGISTER_TIMER_INFO("mkldnn_FwdTimer", getName().c_str());
CHECK(!inputLayers_.empty());
copySeqInfoToOutputs();
size_t elemenCnt = inputLayers_[0]->getOutputValue()->getElementCnt();
if (inputElemenCnt_ != elemenCnt) {
if (condition_ != keepCondition()) {
VLOG(MKLDNN_BASE) << getName() << " reset mkldnn forward";
// reset when input total sizes changed, not only the batchsize
inputElemenCnt_ = elemenCnt;
pipelineFwd_.clear();
condition_ = keepCondition();
reshape(bs_, ic_, ih_, iw_, oc_, oh_, ow_);
// all cpu device output grad or value share output's
printSizeInfo();
// the output_.value and output_.grad are shared with CPU device
shareCPUDevice();
resetFwd(pipelineFwd_, inVal_, wgtVal_, biasVal_, outVal_);
// MKLDNNLayer output value should be MKLDNNMatrix
// so external output value is necessary.
// Then external input value is not necessary,
// since input may be mkldnn internal buffer.
CHECK(extOutVal_) << "external output value is necessary";
output_.value = std::dynamic_pointer_cast<Matrix>(extOutVal_);
CHECK(inVal_ && outVal_) << "internal memories are necessary";
if (cvtInVal_) {
pipelineFwd_.insert(pipelineFwd_.begin(), *cvtInVal_);
}
if (cvtOutVal_) {
pipelineFwd_.push_back(*cvtOutVal_);
}
pipelineFwd_.clear();
inVals_.resize(inputLayers_.size(), nullptr);
extInVals_.resize(inputLayers_.size(), nullptr);
cvtInVals_.resize(inputLayers_.size(), nullptr);
resetFwd(pipelineFwd_, inVals_, outVal_);
prepareValueConversions(pipelineFwd_);
convertWeightsFromPaddle();
printSizeInfo();
printValueFormat();
needResetBwd_ = true;
}
......@@ -80,8 +69,8 @@ void MKLDNNLayer::forward(PassType passType) {
if (inputLayers_[0]->getType() == "data" && inputLayers_.size() == 1) {
// Update input value data when input layer is "data" type,
// since the input value data address might be changed.
CHECK(extInVal_);
extInVal_->setData(getInputValue(0, CPU_DEVICE)->getData());
CHECK(extInVals_[0]);
extInVals_[0]->setData(getInputValue(0, CPU_DEVICE)->getData());
}
if (!outputOnlyMKLDNN_) {
......@@ -99,22 +88,13 @@ void MKLDNNLayer::backward(const UpdateCallback& callback) {
if (needResetBwd_) {
VLOG(MKLDNN_BASE) << getName() << " reset mkldnn backward";
pipelineBwd_.clear();
inGrads_.resize(inputLayers_.size(), nullptr);
extInGrads_.resize(inputLayers_.size(), nullptr);
cvtInGrads_.resize(inputLayers_.size(), nullptr);
pipelineMergeGrad_.clear();
mergeGrad_ = nullptr;
resetBwd(pipelineBwd_, inGrad_, wgtGrad_, biasGrad_, outGrad_);
// external output grad is not necessary
// since output may be mkldnn internal buffer or merge them directly.
CHECK(outGrad_) << "internal output grad is necessary";
if (extOutGrad_) {
CHECK_EQ(extOutGrad_->getData(), output_.grad->getData())
<< "the external buffer should share the same data with output_.grad";
}
if (cvtOutGrad_) {
pipelineBwd_.insert(pipelineBwd_.begin(), *cvtOutGrad_);
}
if (cvtInGrad_) {
pipelineBwd_.push_back(*cvtInGrad_);
}
resetBwd(pipelineBwd_, inGrads_, outGrad_);
prepareGradConversions(pipelineBwd_);
printGradFormat();
needResetBwd_ = false;
}
......@@ -138,8 +118,11 @@ void MKLDNNLayer::backward(const UpdateCallback& callback) {
}
}
void MKLDNNLayer::reshapeInput(int& batchsize, int& height, int& width) {
const Argument& input = inputLayers_[0]->getOutput();
void MKLDNNLayer::reshapeInput(int& batchsize,
int& height,
int& width,
size_t idx) {
const Argument& input = inputLayers_[idx]->getOutput();
batchsize = input.getBatchSize();
int h = input.getFrameHeight();
int w = input.getFrameWidth();
......@@ -173,27 +156,30 @@ void MKLDNNLayer::resetWithMatrix(MKLDNNMatrixPtr& dnn,
void MKLDNNLayer::resetInValue(
MKLDNNMatrixPtr& in,
const std::shared_ptr<memory::primitive_desc>& intPD,
size_t inputIdx) {
cvtInVal_ = nullptr;
extInVal_ = nullptr;
size_t idx,
int inputChannel) {
cvtInVals_[idx] = nullptr;
extInVals_[idx] = nullptr;
in = nullptr;
CHECK_GT(bs_ * ic_ * ih_ * iw_, 0);
inputChannel = inputChannel == 0 ? ic_ : inputChannel;
CHECK_GT(bs_ * inputChannel * ih_ * iw_, 0);
auto extPD = MKLDNNMatrix::createPrimitiveDesc(
{bs_, ic_, ih_, iw_}, format::nchw, engine_);
const MatrixPtr& inMat = inputLayers_[inputIdx]->getOutputValue();
extInVal_ = std::dynamic_pointer_cast<MKLDNNMatrix>(inMat);
CHECK_EQ(inputIsOnlyMKLDNN(), extInVal_ != nullptr);
if (extInVal_ == nullptr || extInVal_->getFormat() == format::nc) {
extInVal_ = MKLDNNMatrix::create(extPD, inMat);
{bs_, inputChannel, ih_, iw_}, format::nchw, engine_);
const MatrixPtr& inMat = inputLayers_[idx]->getOutputValue();
extInVals_[idx] = std::dynamic_pointer_cast<MKLDNNMatrix>(inMat);
CHECK_EQ(inputIsOnlyMKLDNN(), extInVals_[idx] != nullptr);
if (extInVals_[idx] == nullptr ||
extInVals_[idx]->getFormat() == format::nc) {
extInVals_[idx] = MKLDNNMatrix::create(extPD, inMat);
}
in = extInVal_;
in = extInVals_[idx];
if (nullptr == intPD || in->getPrimitiveDesc() == *intPD) {
return;
}
// need create reorder
in = MKLDNNMatrix::create(*intPD);
cvtInVal_ = MKLDNNMatrix::createReorder(extInVal_, in);
CHECK(cvtInVal_) << "should not be emptry";
cvtInVals_[idx] = MKLDNNMatrix::createReorder(extInVals_[idx], in);
CHECK(cvtInVals_[idx]) << "should not be emptry";
}
void MKLDNNLayer::resetOutValue(MKLDNNMatrixPtr& out,
......@@ -215,11 +201,11 @@ void MKLDNNLayer::resetOutValue(MKLDNNMatrixPtr& out,
void MKLDNNLayer::resetInGrad(MKLDNNMatrixPtr& in,
memory::primitive_desc intPD,
size_t inputIdx) {
cvtInGrad_ = nullptr;
extInGrad_ = nullptr;
size_t idx) {
cvtInGrads_[idx] = nullptr;
extInGrads_[idx] = nullptr;
in = nullptr;
LayerPtr& input = inputLayers_[inputIdx];
LayerPtr& input = inputLayers_[idx];
if (input->getOutputGrad() == nullptr) {
// no need input grad
return;
......@@ -234,23 +220,25 @@ void MKLDNNLayer::resetInGrad(MKLDNNMatrixPtr& in,
in = MKLDNNMatrix::create(intPD, inMat);
Argument& arg = input->getOutput(this->getName());
arg.grad = std::dynamic_pointer_cast<Matrix>(in);
CHECK_PRIMITIVE_DESC_EQ(inVal_, intPD);
CHECK_PRIMITIVE_DESC_EQ(inVals_[idx], intPD);
if (inputIsOnlyMKLDNN()) {
return;
}
extInGrad_ = in;
if (isPaddleFormat(extInGrad_->getFormat())) {
extInGrads_[idx] = in;
if (isPaddleFormat(extInGrads_[idx]->getFormat())) {
return;
}
// need create reorder
CHECK(extInVal_ != nullptr && isPaddleFormat(extInVal_->getFormat()))
CHECK(extInVals_[idx] != nullptr &&
isPaddleFormat(extInVals_[idx]->getFormat()))
<< "should have external input value and the format must be nchw(nc)";
extInGrad_ = MKLDNNMatrix::create(extInVal_->getPrimitiveDesc(), inMat);
CHECK_PRIMITIVE_DESC_EQ(inVal_, intPD);
extInGrads_[idx] =
MKLDNNMatrix::create(extInVals_[idx]->getPrimitiveDesc(), inMat);
CHECK_PRIMITIVE_DESC_EQ(inVals_[idx], intPD);
in = MKLDNNMatrix::create(intPD);
cvtInGrad_ = MKLDNNMatrix::createReorder(in, extInGrad_);
CHECK(cvtInGrad_);
cvtInGrads_[idx] = MKLDNNMatrix::createReorder(in, extInGrads_[idx]);
CHECK(cvtInGrads_[idx]);
}
void MKLDNNLayer::resetOutGrad(MKLDNNMatrixPtr& out,
......
......@@ -34,15 +34,16 @@ typedef std::shared_ptr<MKLDNNLayer> MKLDNNLayerPtr;
*/
class MKLDNNLayer : public Layer {
protected:
// input value element count
size_t inputElemenCnt_;
// batch size
int bs_;
// their sizes are always from the first input layer
// input image channel, height and width
int ic_, ih_, iw_;
// output image channel, height and width
int oc_, oh_, ow_;
// the condition that forward need be reset
size_t condition_;
// backward also need reset after reset forward handle
bool needResetBwd_;
......@@ -67,18 +68,18 @@ protected:
* When all layers are mkldnn layers, they could save internal data.
*/
// below MKLDNNMatrix buffers are all internal buffers
MKLDNNMatrixPtr inVal_;
MKLDNNMatrixPtr inGrad_;
std::vector<MKLDNNMatrixPtr> inVals_;
std::vector<MKLDNNMatrixPtr> inGrads_;
MKLDNNMatrixPtr outVal_;
MKLDNNMatrixPtr outGrad_;
// below are external value and grad
MKLDNNMatrixPtr extInVal_;
MKLDNNMatrixPtr extInGrad_;
std::vector<MKLDNNMatrixPtr> extInVals_;
std::vector<MKLDNNMatrixPtr> extInGrads_;
MKLDNNMatrixPtr extOutVal_;
MKLDNNMatrixPtr extOutGrad_;
// convert handle between external and internal buffers
std::shared_ptr<mkldnn::reorder> cvtInVal_;
std::shared_ptr<mkldnn::reorder> cvtInGrad_;
std::vector<std::shared_ptr<mkldnn::reorder>> cvtInVals_;
std::vector<std::shared_ptr<mkldnn::reorder>> cvtInGrads_;
std::shared_ptr<mkldnn::reorder> cvtOutVal_;
std::shared_ptr<mkldnn::reorder> cvtOutGrad_;
......@@ -102,14 +103,7 @@ protected:
public:
explicit MKLDNNLayer(const LayerConfig& config)
: Layer(config),
inputElemenCnt_(0),
bs_(0),
ic_(0),
ih_(0),
iw_(0),
oc_(0),
oh_(0),
ow_(0),
condition_(0),
needResetBwd_(true),
outputOnlyMKLDNN_(false),
engine_(mkldnn::engine::cpu, 0),
......@@ -125,31 +119,28 @@ public:
virtual void backward(const UpdateCallback& callback);
/**
* reshape the input image sizes
* and reset output image and buffer size
* output channel can not be changed
* reshape the input and output channels and image sizes
* and reset output buffer size
*/
virtual void reshape(
int& bs, int& ic, int& ih, int& iw, int oc, int& oh, int& ow) = 0;
int& bs, int& ic, int& ih, int& iw, int& oc, int& oh, int& ow) = 0;
/**
* reset the mkldnn forward primitve and memories
* only would be called when input size changes
* weight and bias buffers should be coverd by child class itself
*/
virtual void resetFwd(std::vector<mkldnn::primitive>& pipeline,
MKLDNNMatrixPtr& in,
MKLDNNMatrixPtr& wgt,
MKLDNNMatrixPtr& bias,
std::vector<MKLDNNMatrixPtr>& inputs,
MKLDNNMatrixPtr& out) = 0;
/**
* reset the mkldnn backward primitve and memories
* only would be called when needed
* weight and bias buffers should be coverd by child class itself
*/
virtual void resetBwd(std::vector<mkldnn::primitive>& pipeline,
MKLDNNMatrixPtr& in,
MKLDNNMatrixPtr& wgt,
MKLDNNMatrixPtr& bias,
std::vector<MKLDNNMatrixPtr>& inputs,
MKLDNNMatrixPtr& out) = 0;
/**
......@@ -175,10 +166,19 @@ public:
void addOutputArgument(int deviceId) { Layer::addOutputArgument(deviceId); }
protected:
/**
* Some layers may have different condition to reset the forward.
* The function returns the condition that do not need reset forward.
*/
inline virtual size_t keepCondition() {
// reset when the first input element size changed, not only the batchsize
return inputLayers_[0]->getOutputValue()->getElementCnt();
}
/**
* reshape the input image sizes and input batchsize
*/
void reshapeInput(int& batchsize, int& height, int& width);
void reshapeInput(int& batchsize, int& height, int& width, size_t idx = 0);
/**
* reshape output image sizes
......@@ -196,11 +196,13 @@ protected:
/**
* reset input value from input MKLDNNMatrix and internal primitive desc.
* reset both internal and external buffer and create reorder if necessary.
* input channel may be different in concat.
*/
void resetInValue(
MKLDNNMatrixPtr& in,
const std::shared_ptr<mkldnn::memory::primitive_desc>& intPD = nullptr,
size_t inputIdx = 0);
size_t idx = 0,
int inputChannel = 0);
/**
* reset output value from internal primitive desc.
......@@ -215,7 +217,7 @@ protected:
*/
void resetInGrad(MKLDNNMatrixPtr& in,
mkldnn::memory::primitive_desc intPD,
size_t inputIdx = 0);
size_t idx = 0);
/**
* reset output grad from internal primitive desc.
......@@ -293,17 +295,19 @@ protected:
* print the mkldnn memory format of value
*/
virtual void printValueFormat() {
if (extInVal_) {
VLOG(MKLDNN_FMTS) << extInVal_->getFormat() << " >>> ";
}
if (inVal_) {
VLOG(MKLDNN_FMTS) << inVal_->getFormat() << " >>>";
for (size_t i = 0; i < inVals_.size(); ++i) {
if (!inVals_[i]) {
continue;
}
VLOG(MKLDNN_FMTS) << "Input " << i << ", " << inputLayers_[i]->getName()
<< ": " << (extInVals_[i] ? extInVals_[i]->getFormat()
: inVals_[i]->getFormat())
<< " >>> " << inVals_[i]->getFormat() << " >>>";
}
if (outVal_) {
VLOG(MKLDNN_FMTS) << outVal_->getFormat() << " >>> ";
}
if (extOutVal_) {
VLOG(MKLDNN_FMTS) << extOutVal_->getFormat();
VLOG(MKLDNN_FMTS) << outVal_->getFormat() << " >>> "
<< (extOutVal_ ? extOutVal_->getFormat()
: outVal_->getFormat());
}
if (wgtVal_) {
VLOG(MKLDNN_FMTS) << "Weight value format: " << wgtVal_->getFormat();
......@@ -317,17 +321,19 @@ protected:
* print the mkldnn memory format of grad
*/
virtual void printGradFormat() {
if (extOutGrad_) {
VLOG(MKLDNN_FMTS) << extOutGrad_->getFormat();
}
if (outGrad_) {
VLOG(MKLDNN_FMTS) << outGrad_->getFormat() << " <<< ";
VLOG(MKLDNN_FMTS) << outGrad_->getFormat() << " <<< "
<< (extOutGrad_ ? extOutGrad_->getFormat()
: outGrad_->getFormat());
}
if (inGrad_) {
VLOG(MKLDNN_FMTS) << inGrad_->getFormat() << " <<<";
}
if (extInGrad_) {
VLOG(MKLDNN_FMTS) << extInGrad_->getFormat() << " <<< ";
for (size_t i = 0; i < inGrads_.size(); ++i) {
if (!inGrads_[i]) {
continue;
}
VLOG(MKLDNN_FMTS) << "Input " << i << ", " << inputLayers_[i]->getName()
<< ": " << (extInGrads_[i] ? extInGrads_[i]->getFormat()
: inGrads_[i]->getFormat())
<< " <<< " << inGrads_[i]->getFormat() << " <<<";
}
if (wgtGrad_) {
VLOG(MKLDNN_FMTS) << "Weight grad format: " << wgtGrad_->getFormat();
......@@ -434,6 +440,41 @@ private:
outputOtherDevice_[i].cpuSequenceDims = output_.cpuSequenceDims;
}
}
void prepareValueConversions(std::vector<mkldnn::primitive>& pipeline) {
// MKLDNNLayer output value should be MKLDNNMatrix
// so external output value is necessary.
// Then external input value is not necessary,
// since input may be mkldnn internal buffer.
CHECK(extOutVal_) << "external output value is necessary";
output_.value = std::dynamic_pointer_cast<Matrix>(extOutVal_);
CHECK(inVals_[0] && outVal_) << "internal memories are necessary";
for (size_t i = 0; i < cvtInVals_.size(); ++i) {
if (cvtInVals_[i]) {
pipeline.insert(pipeline.begin(), *cvtInVals_[i]);
}
}
if (cvtOutVal_) {
pipeline.push_back(*cvtOutVal_);
}
}
void prepareGradConversions(std::vector<mkldnn::primitive>& pipeline) {
// external output grad is not necessary
// since output may be mkldnn internal buffer or merge them directly.
CHECK(outGrad_) << "internal output grad is necessary";
if (extOutGrad_) {
CHECK_EQ(extOutGrad_->getData(), output_.grad->getData())
<< "the external buffer should share the same data with output_.grad";
}
if (cvtOutGrad_) {
pipeline.insert(pipeline.begin(), *cvtOutGrad_);
}
for (size_t i = 0; i < cvtInGrads_.size(); ++i) {
if (cvtInGrads_[i]) {
pipeline.push_back(*cvtInGrads_[i]);
}
}
}
};
} // namespace paddle
......@@ -58,10 +58,11 @@ bool MKLDNNPoolLayer::init(const LayerMap& layerMap,
}
void MKLDNNPoolLayer::reshape(
int& bs, int& ic, int& ih, int& iw, int oc, int& oh, int& ow) {
int& bs, int& ic, int& ih, int& iw, int& oc, int& oh, int& ow) {
reshapeInput(bs, ih, iw);
// ic_ and oc can not be changed
CHECK_EQ(inputElemenCnt_ / bs / ih / iw, (size_t)ic)
CHECK_EQ((size_t)ic,
inputLayers_[0]->getOutputValue()->getElementCnt() / bs / ih / iw)
<< "Input channel can not be changed";
// cal output sizes
......@@ -74,29 +75,25 @@ void MKLDNNPoolLayer::reshape(
}
void MKLDNNPoolLayer::resetFwd(std::vector<primitive>& pipeline,
MKLDNNMatrixPtr& in,
MKLDNNMatrixPtr& wgt,
MKLDNNMatrixPtr& bias,
std::vector<MKLDNNMatrixPtr>& inputs,
MKLDNNMatrixPtr& out) {
resetFwdBuffers(in, out);
resetFwdBuffers(inputs[0], out);
resetFwdPD(fwdPD_, in, out);
resetFwdPD(fwdPD_, inputs[0], out);
resetFwdPipeline(pipeline, fwdPD_, in, out);
resetFwdPipeline(pipeline, fwdPD_, inputs[0], out);
}
void MKLDNNPoolLayer::resetBwd(std::vector<primitive>& pipeline,
MKLDNNMatrixPtr& in,
MKLDNNMatrixPtr& wgt,
MKLDNNMatrixPtr& bias,
std::vector<MKLDNNMatrixPtr>& inputs,
MKLDNNMatrixPtr& out) {
std::shared_ptr<pool_bwd::primitive_desc> pd;
resetBwdBuffers(in, out);
resetBwdBuffers(inputs[0], out);
resetBwdPD(pd, in, out);
resetBwdPD(pd, inputs[0], out);
resetBwdPipeline(pipeline, pd, in, out);
resetBwdPipeline(pipeline, pd, inputs[0], out);
}
void MKLDNNPoolLayer::resetFwdBuffers(MKLDNNMatrixPtr& in,
......@@ -151,9 +148,9 @@ void MKLDNNPoolLayer::resetFwdPipeline(
void MKLDNNPoolLayer::resetBwdBuffers(MKLDNNMatrixPtr& in,
MKLDNNMatrixPtr& out) {
CHECK(inVal_ && outVal_);
CHECK(inVals_[0] && outVal_);
resetOutGrad(out, outVal_->getPrimitiveDesc());
resetInGrad(in, inVal_->getPrimitiveDesc());
resetInGrad(in, inVals_[0]->getPrimitiveDesc());
}
void MKLDNNPoolLayer::resetBwdPD(std::shared_ptr<pool_bwd::primitive_desc>& pd,
......
......@@ -53,18 +53,14 @@ public:
const ParameterMap& parameterMap) override;
void reshape(
int& bs, int& ic, int& ih, int& iw, int oc, int& oh, int& ow) override;
int& bs, int& ic, int& ih, int& iw, int& oc, int& oh, int& ow) override;
void resetFwd(std::vector<mkldnn::primitive>& pipeline,
MKLDNNMatrixPtr& in,
MKLDNNMatrixPtr& wgt,
MKLDNNMatrixPtr& bias,
std::vector<MKLDNNMatrixPtr>& inputs,
MKLDNNMatrixPtr& out) override;
void resetBwd(std::vector<mkldnn::primitive>& pipeline,
MKLDNNMatrixPtr& in,
MKLDNNMatrixPtr& wgt,
MKLDNNMatrixPtr& bias,
std::vector<MKLDNNMatrixPtr>& inputs,
MKLDNNMatrixPtr& out) override;
void printSizeInfo() override {
......@@ -75,11 +71,6 @@ public:
}
protected:
/**
* Forward functions: reset buffers(input, output),
* reset primitive descriptor,
* reset pipeline.
*/
void resetFwdBuffers(MKLDNNMatrixPtr& in, MKLDNNMatrixPtr& out);
void resetFwdPD(std::shared_ptr<pool_fwd::primitive_desc>& pd,
MKLDNNMatrixPtr in,
......@@ -88,12 +79,6 @@ protected:
std::shared_ptr<pool_fwd::primitive_desc>& pd,
MKLDNNMatrixPtr& in,
MKLDNNMatrixPtr& out);
/**
* Backward functions: reset buffers(input, output),
* reset primitive descriptor,
* reset pipeline.
*/
void resetBwdBuffers(MKLDNNMatrixPtr& in, MKLDNNMatrixPtr& out);
void resetBwdPD(std::shared_ptr<pool_bwd::primitive_desc>& pd,
MKLDNNMatrixPtr& in,
......
......@@ -29,7 +29,7 @@ gserver_test(test_KmaxSeqScore)
gserver_test(test_Expand)
gserver_test(test_MaxPoolingWithMaskOutput)
########## test_Mkldnn layers and activations ##########
########## test_MKLDNN layers and activations ##########
if(WITH_MKLDNN)
add_unittest_without_exec(test_MKLDNN
test_MKLDNN.cpp
......@@ -62,17 +62,6 @@ if(NOT WITH_DOUBLE AND NOT MOBILE_INFERENCE)
endif()
if(NOT MOBILE_INFERENCE)
################### test_ProtoDataProvider ############
add_unittest_without_exec(test_ProtoDataProvider
test_ProtoDataProvider.cpp)
# test_ProtoDataProvider will mkdir as same name,
# so if WORKING_DIRECTORY is default directory, then
# mkdir will get error.
add_test(NAME test_ProtoDataProvider
COMMAND ${CMAKE_CURRENT_BINARY_DIR}/test_ProtoDataProvider
WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle)
################## test_Evaluator #######################
add_unittest(test_Evaluator
test_Evaluator.cpp)
......@@ -110,3 +99,24 @@ add_test(NAME test_PyDataProvider2
COMMAND .set_python_path.sh -d ${PADDLE_SOURCE_DIR}/paddle/gserver/tests:${PADDLE_SOURCE_DIR}/python ${CMAKE_CURRENT_BINARY_DIR}/test_PyDataProvider2
WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle
)
################# test_CompareSparse ##################
add_unittest_without_exec(test_CompareSparse
test_CompareSparse.cpp)
if(NOT ON_TRAVIS)
add_test(NAME test_CompareSparse
COMMAND ${PADDLE_SOURCE_DIR}/paddle/.set_python_path.sh -d
${PADDLE_SOURCE_DIR}/python:${PADDLE_SOURCE_DIR}/paddle/gserver/tests
./.set_port.sh -p port -n 6
${CMAKE_CURRENT_BINARY_DIR}/test_CompareSparse
WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle/)
endif()
################ test_CompareTwoNets ######################
add_unittest_without_exec(test_CompareTwoNets
test_CompareTwoNets.cpp)
add_test(NAME test_CompareTwoNets
COMMAND ${PADDLE_SOURCE_DIR}/paddle/.set_python_path.sh -d
${PADDLE_SOURCE_DIR}/python:${PADDLE_SOURCE_DIR}/paddle/gserver/tests
${CMAKE_CURRENT_BINARY_DIR}/test_CompareTwoNets
WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle/)
......@@ -23,7 +23,7 @@ limitations under the License. */
namespace paddle {
/**
* @brief test the functionality of Mkldnnlayers
* @brief test the functionality of MKLDNNlayers and MKLDNNActivations
* refer to paddle original function
*/
class MKLDNNTester {
......
./test_ProtoDataProvider/data1.bin
./test_ProtoDataProvider/data2.bin
./test_ProtoDataProvider/data1.bin.gz
./test_ProtoDataProvider/data2.bin.gz
#!/usr/bin/env python
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
......@@ -14,27 +15,50 @@
from paddle.trainer_config_helpers import *
################################### Data Configuration ###################################
TrainData(ProtoData(files = "trainer/tests/mnist.list"))
################################### Algorithm Configuration ###################################
settings(batch_size = 1000,
learning_method = MomentumOptimizer(momentum=0.5, sparse=False))
################################### Network Configuration ###################################
data = data_layer(name ="input", size=784)
######################## data source ################################
dict_path = 'gserver/tests/Sequence/tour_dict_phrase.dict'
dict_file = dict()
for line_count, line in enumerate(open(dict_path, "r")):
dict_file[line.strip()] = line_count
fc1 = fc_layer(input=data, size=800,
bias_attr=True,
act=SigmoidActivation())
define_py_data_sources2(
train_list='gserver/tests/Sequence/train.list',
test_list=None,
module='sequenceGen',
obj='process',
args={"dict_file": dict_file})
fc2 = fc_layer(input=fc1, size=800,
bias_attr=True,
act=SigmoidActivation())
settings(batch_size=5)
######################## network configure ################################
dict_dim = len(open(dict_path, 'r').readlines())
word_dim = 128
hidden_dim = 256
label_dim = 3
sparse_update = get_config_arg("sparse_update", bool, False)
output = fc_layer(input=[fc1, fc2], size=10,
bias_attr=True,
act=SoftmaxActivation())
data = data_layer(name="word", size=dict_dim)
lbl = data_layer(name ="label", size=1)
emb = embedding_layer(
input=data,
size=word_dim,
param_attr=ParamAttr(sparse_update=sparse_update))
cost = classification_cost(input=output, label=lbl)
outputs(cost)
with mixed_layer(size=hidden_dim * 4) as lstm_input:
lstm_input += full_matrix_projection(input=emb)
lstm = lstmemory(
input=lstm_input,
act=TanhActivation(),
gate_act=SigmoidActivation(),
state_act=TanhActivation())
lstm_last = last_seq(input=lstm)
with mixed_layer(
size=label_dim, act=SoftmaxActivation(), bias_attr=True) as output:
output += full_matrix_projection(input=lstm_last)
outputs(
classification_cost(
input=output, label=data_layer(
name="label", size=1)))
#!/usr/bin/env python
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
......@@ -14,27 +15,42 @@
from paddle.trainer_config_helpers import *
################################### Data Configuration ###################################
TrainData(ProtoData(files = "trainer/tests/mnist.list"))
################################### Algorithm Configuration ###################################
settings(batch_size = 1000,
learning_method = MomentumOptimizer(momentum=0.5, sparse=False))
################################### Network Configuration ###################################
data = data_layer(name ="input", size=784)
######################## data source ################################
dict_path = 'gserver/tests/Sequence/tour_dict_phrase.dict'
dict_file = dict()
for line_count, line in enumerate(open(dict_path, "r")):
dict_file[line.strip()] = line_count
fc1 = fc_layer(input=data, size=800,
bias_attr=True,
act=SigmoidActivation())
define_py_data_sources2(
train_list='gserver/tests/Sequence/train.list',
test_list=None,
module='sequenceGen',
obj='process',
args={"dict_file": dict_file})
fc2 = fc_layer(input=fc1, size=800,
bias_attr=True,
act=SigmoidActivation())
settings(batch_size=5)
######################## network configure ################################
dict_dim = len(open(dict_path, 'r').readlines())
word_dim = 128
hidden_dim = 128
label_dim = 3
output = fc_layer(input=[fc1, fc2], size=10,
bias_attr=True,
act=SoftmaxActivation())
# This config is designed to be equivalent with sequence_recurrent_group.py
lbl = data_layer(name ="label", size=1)
data = data_layer(name="word", size=dict_dim)
cost = classification_cost(input=output, label=lbl)
outputs(cost)
emb = embedding_layer(
input=data, size=word_dim, param_attr=ParamAttr(name="emb"))
recurrent = recurrent_layer(input=emb, bias_attr=False, act=SoftmaxActivation())
recurrent_last = last_seq(input=recurrent)
with mixed_layer(
size=label_dim, act=SoftmaxActivation(), bias_attr=True) as output:
output += full_matrix_projection(input=recurrent_last)
outputs(
classification_cost(
input=output, label=data_layer(
name="label", size=1)))
#!/usr/bin/env python
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle.trainer_config_helpers import *
######################## data source ################################
dict_path = 'gserver/tests/Sequence/tour_dict_phrase.dict'
dict_file = dict()
for line_count, line in enumerate(open(dict_path, "r")):
dict_file[line.strip()] = line_count
define_py_data_sources2(
train_list='gserver/tests/Sequence/train.list',
test_list=None,
module='sequenceGen',
obj='process',
args={"dict_file": dict_file})
settings(batch_size=5)
######################## network configure ################################
dict_dim = len(open(dict_path, 'r').readlines())
word_dim = 128
hidden_dim = 128
label_dim = 3
# This config is designed to be equivalent with sequence_recurrent.py
data = data_layer(name="word", size=dict_dim)
emb = embedding_layer(
input=data, size=word_dim, param_attr=ParamAttr(name="emb"))
def step(y):
mem = memory(name="rnn_state", size=hidden_dim)
with mixed_layer(
name="rnn_state",
size=hidden_dim,
bias_attr=False,
act=SoftmaxActivation()) as out:
out += identity_projection(input=y)
out += full_matrix_projection(
input=mem, param_attr=ParamAttr(name="___recurrent_layer_0__"))
return out
recurrent = recurrent_group(name="rnn", step=step, input=emb)
recurrent_last = last_seq(input=recurrent)
with mixed_layer(
size=label_dim, act=SoftmaxActivation(), bias_attr=True) as output:
output += full_matrix_projection(input=recurrent_last)
outputs(
classification_cost(
input=output, label=data_layer(
name="label", size=1)))
......@@ -22,8 +22,7 @@ limitations under the License. */
using namespace paddle; // NOLINT
using namespace std; // NOLINT
static const string& configFile1 =
"trainer/tests/sample_trainer_config_compare_sparse.conf";
static const string& configFile1 = "gserver/tests/sequence_lstm.conf";
DECLARE_bool(use_gpu);
DECLARE_string(config);
......
......@@ -30,8 +30,6 @@ DECLARE_bool(use_gpu);
DECLARE_string(config);
DECLARE_string(nics);
DEFINE_string(config_file_a, "", "config of one network to compare");
DEFINE_string(config_file_b, "", "config of another network to compare");
DEFINE_bool(need_high_accuracy,
false,
"whether need to run in double accuracy");
......@@ -42,6 +40,10 @@ DEFINE_double(
DECLARE_bool(thread_local_rand_use_global_seed);
DECLARE_int32(seed);
static const string& config_file_a = "gserver/tests/sequence_recurrent.py";
static const string& config_file_b =
"gserver/tests/sequence_recurrent_group.py";
struct ComData {
vector<Argument> outArgs;
vector<ParameterPtr> parameters;
......@@ -66,6 +68,7 @@ void calcGradient(ComData& data, const string configFile) {
DataBatch dataBatch;
int32_t batchSize = trainer.getConfig().opt_config().batch_size();
trainer.getDataProvider()->reset();
trainer.getDataProvider()->setSkipShuffle();
trainer.getDataProvider()->getNextBatch(batchSize, &dataBatch);
......@@ -167,11 +170,11 @@ void compareGradient(ComData& comDataA, ComData& comDataB) {
TEST(Trainer, create) {
ComData dataA;
calcGradient(dataA, FLAGS_config_file_a);
calcGradient(dataA, config_file_a);
LOG(INFO) << "\n\nforwardBackward of Network A is finished\n\n";
ComData dataB;
calcGradient(dataB, FLAGS_config_file_b);
calcGradient(dataB, config_file_b);
LOG(INFO) << "\n\nforwardBackward of the Network B is finished\n\n";
compareGradient(dataA, dataB);
......
......@@ -583,6 +583,7 @@ TEST(Layer, maxoutLayer) {
testLayerGrad(config, "maxout", 10, false, useGpu);
}
}
void testFcLayer(string format, size_t nnz) {
TestConfig config;
config.biasSize = 1024;
......@@ -1081,6 +1082,21 @@ TEST(Layer, InterpolationLayer) {
}
}
TEST(Layer, DotProdLayer) {
TestConfig config;
config.layerConfig.set_type("dot_prod");
config.layerConfig.set_size(1);
config.inputDefs.push_back({INPUT_DATA, "layer_0", 10, 0});
config.layerConfig.add_inputs();
config.inputDefs.push_back({INPUT_DATA, "layer_1", 10, 0});
config.layerConfig.add_inputs();
for (auto useGpu : {false, true}) {
testLayerGrad(config, "dot_prod", 10, false, useGpu);
}
}
TEST(Layer, OuterProdLayer) {
TestConfig config;
config.layerConfig.set_type("out_prod");
......@@ -2429,6 +2445,25 @@ TEST(Layer, ScaleSubRegionLayer) {
}
}
TEST(Layer, L2DistanceLayer) {
TestConfig config;
config.layerConfig.set_type("l2_distance");
config.layerConfig.set_size(1);
config.biasSize = 0;
const size_t input_dim = 27;
const size_t batch_size = 11;
config.inputDefs.push_back({INPUT_DATA, "layer_0", input_dim, 0});
config.inputDefs.push_back({INPUT_DATA, "layer_1", input_dim, 0});
config.layerConfig.add_inputs();
config.layerConfig.add_inputs();
for (auto useGpu : {false, true}) {
testLayerGrad(config, "l2_distance", batch_size, false, useGpu);
}
}
int main(int argc, char** argv) {
testing::InitGoogleTest(&argc, argv);
initMain(argc, argv);
......
......@@ -313,6 +313,47 @@ TEST(MKLDNNLayer, AddtoLayer) {
testAddtoLayer({4, 12, 1, 1}, 3);
}
static void getMKLDNNConcatConfig(TestConfig& cfg,
const std::vector<testImageDesc>& inputs) {
CHECK_GE(inputs.size(), 2UL) << "at least two inputs";
int oc = inputs[0].ic;
for (size_t i = 1; i < inputs.size(); ++i) {
CHECK_EQ(inputs[i].bs, inputs[0].bs);
CHECK_EQ(inputs[i].ih, inputs[0].ih);
CHECK_EQ(inputs[i].iw, inputs[0].iw);
oc += inputs[i].ic;
}
cfg.biasSize = 0;
cfg.layerConfig.set_type("mkldnn_concat");
cfg.layerConfig.set_size(oc * inputs[0].ih * inputs[0].iw);
cfg.layerConfig.set_active_type("relu");
for (size_t i = 0; i < inputs.size(); ++i) {
std::stringstream ss;
ss << "layer_" << i;
cfg.inputDefs.push_back(
{INPUT_DATA,
ss.str(),
(size_t)(inputs[i].ic) * inputs[i].ih * inputs[i].iw,
0});
LayerInputConfig* input = cfg.layerConfig.add_inputs();
ImageConfig* img_conf = input->mutable_image_conf();
img_conf->set_channels(inputs[i].ic);
img_conf->set_img_size_y(inputs[i].ih);
img_conf->set_img_size(inputs[i].iw);
}
}
void testConcatLayer(const std::vector<testImageDesc>& inputs) {
TestConfig dnnConfig;
getMKLDNNConcatConfig(dnnConfig, inputs);
RUN_MKLDNN_TEST_LAYER(dnnConfig, "concat", inputs[0])
}
TEST(MKLDNNLayer, ConcatLayer) {
testConcatLayer({{64, 128, 1, 1}, {64, 32, 1, 1}, {64, 64, 1, 1}});
testConcatLayer({{32, 100, 8, 8}, {32, 10, 8, 8}});
}
void testActivation(std::string actType, const testImageDesc& pm) {
// TODO(TJ): remove me when paddle support elu activation
if (actType == "mkldnn_elu") {
......
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <memory>
#include <string>
#include <gtest/gtest.h>
#include "paddle/gserver/dataproviders/ProtoDataProvider.h"
#include "paddle/utils/Util.h"
#include "paddle/testing/TestUtil.h"
using namespace std; // NOLINT
std::vector<string> protoFiles{
"./test_ProtoDataProvider/data1.bin", "./test_ProtoDataProvider/data2.bin",
};
std::vector<string> protoFilesCompressed{
"./test_ProtoDataProvider/data1.bin.gz",
"./test_ProtoDataProvider/data2.bin.gz",
};
const char* kTestDir = "./test_ProtoDataProvider";
const char kProtoFileList[] = "gserver/tests/proto_files.txt";
const char kProtoFileListCompressed[] =
"gserver/tests/proto_files_compressed.txt";
const int kSpraseMatrixDim = 1024;
using namespace paddle; // NOLINT
void prepareData(DataBatch* batch,
const int* numPerSlotType,
bool iid,
bool useGpu) {
batch->clear();
int64_t size = uniformRandom(100) + 10;
batch->setSize(size);
ICpuGpuVectorPtr sequenceStartPositions;
ICpuGpuVectorPtr subSequenceStartPositions;
if (!iid) {
int numSeqs = uniformRandom(10) + 1;
sequenceStartPositions =
ICpuGpuVector::create(numSeqs + 1, /* useGpu= */ false);
int* buf = sequenceStartPositions->getMutableData(false);
subSequenceStartPositions =
ICpuGpuVector::create(numSeqs + 1, /* useGpu= */ false);
int* subBuf = subSequenceStartPositions->getMutableData(false);
int64_t pos = 0;
int maxLen = 2 * size / numSeqs;
for (int i = 0; i < numSeqs; ++i) {
int len =
uniformRandom(min<int64_t>(maxLen, size - pos - numSeqs + i)) + 1;
buf[i] = pos;
subBuf[i] = pos;
pos += len;
VLOG(1) << " len=" << len;
}
buf[numSeqs] = size;
subBuf[numSeqs] = size;
}
vector<Argument>& arguments = batch->getStreams();
for (int i = 0; i < numPerSlotType[SlotDef::VECTOR_DENSE]; ++i) {
int64_t dim = rand() % 10 + 4; // NOLINT rand_r
MatrixPtr mat = Matrix::create(size, dim, /* trans= */ false, false);
mat->randomizeUniform();
Argument arg;
arg.value = mat;
arg.sequenceStartPositions = sequenceStartPositions;
arguments.push_back(arg);
}
for (int i = 0; i < numPerSlotType[SlotDef::VECTOR_SPARSE_NON_VALUE]; ++i) {
MatrixPtr mat =
makeRandomSparseMatrix(size, kSpraseMatrixDim, false, useGpu);
Argument arg;
arg.value = mat;
arg.sequenceStartPositions = sequenceStartPositions;
arg.subSequenceStartPositions = subSequenceStartPositions;
arguments.push_back(arg);
}
for (int i = 0; i < numPerSlotType[SlotDef::VECTOR_SPARSE_VALUE]; ++i) {
MatrixPtr mat =
makeRandomSparseMatrix(size, kSpraseMatrixDim, true, useGpu);
Argument arg;
arg.value = mat;
arg.sequenceStartPositions = sequenceStartPositions;
arguments.push_back(arg);
}
for (int i = 0; i < numPerSlotType[SlotDef::STRING]; ++i) {
int64_t dim = rand() % 10 + 4; // NOLINT rand_r
SVectorPtr vec = std::make_shared<std::vector<std::string>>();
for (int j = 0; j < size; ++j) {
vec->push_back(randStr(dim));
}
Argument arg;
arg.strs = vec;
arg.sequenceStartPositions = sequenceStartPositions;
arguments.push_back(arg);
}
for (int i = 0; i < numPerSlotType[SlotDef::INDEX]; ++i) {
int64_t dim = rand() % 10 + 4; // NOLINT rand_r
IVectorPtr vec = IVector::create(size, /* useGpu= */ false);
int* buf = vec->getData();
for (int j = 0; j < size; ++j) {
buf[j] = uniformRandom(dim);
}
Argument arg;
arg.ids = vec;
arg.sequenceStartPositions = sequenceStartPositions;
arguments.push_back(arg);
}
}
inline int getSlotDim(const Argument& arg) {
if (arg.value) {
return arg.value->getWidth();
} else if (arg.ids) {
return arg.ids->getMax() + 1;
} else if (arg.strs) {
return 1;
}
LOG(FATAL) << "Invalid argument";
return 0;
}
inline SlotDef::SlotType getSlotType(const Argument& arg) {
if (arg.value) {
auto& m = *arg.value;
auto& type = typeid(m);
if (type == typeid(CpuMatrix) || type == typeid(GpuMatrix)) {
return SlotDef::VECTOR_DENSE;
}
if (type == typeid(CpuSparseMatrix)) {
auto valueType =
std::dynamic_pointer_cast<CpuSparseMatrix>(arg.value)->getValueType();
if (NO_VALUE == valueType) {
return SlotDef::VECTOR_SPARSE_NON_VALUE;
} else {
return SlotDef::VECTOR_SPARSE_VALUE;
}
}
if (type == typeid(GpuSparseMatrix)) {
auto valueType =
std::dynamic_pointer_cast<GpuSparseMatrix>(arg.value)->getValueType();
if (NO_VALUE == valueType) {
return SlotDef::VECTOR_SPARSE_NON_VALUE;
} else {
return SlotDef::VECTOR_SPARSE_VALUE;
}
}
LOG(FATAL) << "Unknown matrix type";
}
if (arg.ids) return SlotDef::INDEX;
if (arg.strs) return SlotDef::STRING;
LOG(FATAL) << "Invalid argument";
return SlotDef::VECTOR_DENSE;
}
void getColRow(const Argument& arg,
int64_t pos,
bool useGpu,
int* colNum,
const int** rowCols,
const real** rowValues) {
SlotDef::SlotType type = getSlotType(arg);
GpuSparseMatrixPtr matGpu;
CpuSparseMatrixPtr matCpu;
if (useGpu) {
matGpu = dynamic_pointer_cast<GpuSparseMatrix>(arg.value);
ASSERT_TRUE(matGpu != NULL);
} else {
matCpu = dynamic_pointer_cast<CpuSparseMatrix>(arg.value);
ASSERT_TRUE(matCpu != NULL);
}
*colNum = useGpu ? matGpu->getColNum(pos) : matCpu->getColNum(pos);
*rowCols = useGpu ? matGpu->getRowCols(pos) : matCpu->getRowCols(pos);
if (type == SlotDef::VECTOR_SPARSE_VALUE) {
*rowValues = useGpu ? matGpu->getRowValues(pos) : matCpu->getRowValues(pos);
} else {
*rowValues = NULL;
}
}
void makeSample(const vector<Argument>& arguments,
int64_t pos,
bool isBeginning,
DataSample* sample,
bool useGpu) {
sample->set_is_beginning(isBeginning);
int slotid = 0;
for (auto& arg : arguments) {
SlotDef::SlotType type = getSlotType(arg);
int64_t dim = getSlotDim(arg);
switch (type) {
case SlotDef::VECTOR_DENSE: {
VectorSlot* vecSlot = sample->add_vector_slots();
auto values = vecSlot->mutable_values();
values->Reserve(dim);
for (int i = 0; i < dim; ++i) {
values->AddAlreadyReserved(
static_cast<float>(arg.value->getElement(pos, i)));
}
break;
}
case SlotDef::INDEX: {
sample->add_id_slots(arg.ids->get(pos));
break;
}
case SlotDef::VECTOR_SPARSE_NON_VALUE: {
VectorSlot* vecSlot = sample->add_vector_slots();
auto ids = vecSlot->mutable_ids();
int colNum;
const int* rowCols;
const real* rowValues; // nullptr
getColRow(arg, pos, useGpu, &colNum, &rowCols, &rowValues);
ids->Reserve(colNum);
for (int i = 0; i < colNum; ++i) {
ids->AddAlreadyReserved(rowCols[i]);
}
SubseqSlot* subseqSlot = sample->add_subseq_slots(); // subseq
subseqSlot->set_slot_id(slotid);
auto lens = subseqSlot->mutable_lens();
lens->Add(colNum);
break;
}
case SlotDef::VECTOR_SPARSE_VALUE: {
VectorSlot* vecSlot = sample->add_vector_slots();
auto values = vecSlot->mutable_values();
auto ids = vecSlot->mutable_ids();
int colNum;
const int* rowCols;
const real* rowValues;
getColRow(arg, pos, useGpu, &colNum, &rowCols, &rowValues);
ids->Reserve(colNum);
values->Reserve(colNum);
for (int i = 0; i < colNum; ++i) {
ids->AddAlreadyReserved(rowCols[i]);
values->AddAlreadyReserved(rowValues[i]);
}
break;
}
case SlotDef::VAR_MDIM_DENSE:
case SlotDef::VAR_MDIM_INDEX: {
LOG(FATAL) << "Not implemented";
break;
}
case SlotDef::STRING: {
VectorSlot* vecSlot = sample->add_vector_slots();
vecSlot->add_strs((*arg.strs)[pos]);
break;
}
}
slotid++;
}
}
void writeData(const DataBatch& batch, bool useGpu, bool dataCompression) {
DataHeader header;
const vector<Argument>& arguments = batch.getStreams();
for (auto& argument : arguments) {
SlotDef* slotDef = header.add_slot_defs();
slotDef->set_type(getSlotType(argument));
slotDef->set_dim(getSlotDim(argument));
}
VLOG(1) << "header=" << header.DebugString();
int64_t totalSeqs = batch.getNumSequences();
int64_t seq = 0;
ICpuGpuVectorPtr sequenceStartPositions = arguments[0].sequenceStartPositions;
int64_t numWritten = 0;
vector<string> curProtoFiles =
dataCompression ? protoFilesCompressed : protoFiles;
for (size_t i = 0; i < curProtoFiles.size(); ++i) {
int64_t numSeqs = totalSeqs * (i + 1) / curProtoFiles.size() -
totalSeqs * i / curProtoFiles.size();
ofstream os(curProtoFiles[i]);
CHECK(os) << "Fail to open " << curProtoFiles[i];
unique_ptr<ProtoWriter> writer(new ProtoWriter(&os, dataCompression));
CHECK(writer->write(header));
for (int j = 0; j < numSeqs; ++j, ++seq) {
int64_t begin = seq;
int64_t end = seq + 1;
if (sequenceStartPositions) {
begin = sequenceStartPositions->getElement(seq);
end = sequenceStartPositions->getElement(seq + 1);
}
for (int pos = begin; pos < end; ++pos) {
DataSample sample;
makeSample(arguments, pos, pos == begin, &sample, useGpu);
CHECK(writer->write(sample));
++numWritten;
}
}
writer.reset(nullptr);
os.close();
}
CHECK_EQ(arguments[0].getBatchSize(), numWritten);
}
// check that the sample at pos1 in args1 is same as the sample at pos2 in args2
void checkSample(const vector<Argument>& args1,
int64_t pos1,
const vector<Argument>& args2,
int64_t pos2,
bool useGpu) {
EXPECT_EQ(args1.size(), args2.size());
VLOG(1) << " pos1=" << pos1 << " pos2=" << pos2;
for (size_t i = 0; i < args1.size(); ++i) {
auto type = getSlotType(args1[i]);
int dim = getSlotDim(args1[i]);
EXPECT_EQ(type, getSlotType(args2[i]));
if (type == SlotDef::INDEX) {
EXPECT_GE(dim, getSlotDim(args2[i]));
} else {
EXPECT_EQ(dim, getSlotDim(args2[i]));
}
switch (type) {
case SlotDef::VECTOR_DENSE: {
for (int j = 0; j < dim; ++j) {
EXPECT_EQ(static_cast<float>(args1[i].value->getElement(pos1, j)),
static_cast<float>(args2[i].value->getElement(pos2, j)));
}
break;
}
case SlotDef::INDEX: {
EXPECT_EQ(args1[i].ids->get(pos1), args2[i].ids->get(pos2));
break;
}
case SlotDef::VECTOR_SPARSE_NON_VALUE:
case SlotDef::VECTOR_SPARSE_VALUE: {
int colNum1, colNum2;
const int *rowCols1, *rowCols2;
const real *rowValues1, *rowValues2;
getColRow(args1[i], pos1, useGpu, &colNum1, &rowCols1, &rowValues1);
getColRow(args2[i], pos2, useGpu, &colNum2, &rowCols2, &rowValues2);
EXPECT_EQ(colNum1, colNum2);
for (int j = 0; j < colNum1; ++j) {
EXPECT_EQ(rowCols1[j], rowCols2[j]);
if (type == SlotDef::VECTOR_SPARSE_VALUE) {
EXPECT_EQ(rowValues1[j], rowValues2[j]);
}
}
break;
}
case SlotDef::VAR_MDIM_DENSE:
case SlotDef::VAR_MDIM_INDEX: {
LOG(FATAL) << "Not implemented";
break;
}
case SlotDef::STRING: {
EXPECT_EQ((*args1[i].strs)[pos1], (*args2[i].strs)[pos2]);
break;
}
}
}
}
void testProtoDataProvider(int* numPerSlotType,
bool iid,
bool async,
bool useGpu,
bool dataCompression,
int numConstantSlots = 0) {
mkDir(kTestDir);
DataBatch data;
prepareData(&data, numPerSlotType, iid, useGpu);
writeData(data, useGpu, dataCompression);
DataConfig config;
config.set_type("proto");
config.set_files(dataCompression ? kProtoFileListCompressed : kProtoFileList);
config.set_async_load_data(async);
for (int i = 0; i < numConstantSlots; ++i) {
config.add_constant_slots(i + 11);
MatrixPtr w = Matrix::create(data.getSize(),
1,
/* trans= */ false,
/* useGpu= */ false);
w->assign(config.constant_slots(i));
data.appendData(w);
}
unique_ptr<DataProvider> dataProvider(DataProvider::create(config, useGpu));
dataProvider->setSkipShuffle();
EXPECT_EQ(data.getSize(), dataProvider->getSize());
int64_t batchSize = 10;
DataBatch batch;
size_t seq1 = 0;
vector<Argument>& args1 = data.getStreams();
ICpuGpuVectorPtr sequenceStartPositions1 = args1[0].sequenceStartPositions;
dataProvider->reset();
while (dataProvider->getNextBatch(batchSize, &batch) > 0) {
CHECK_EQ(data.getNumStreams(), batch.getNumStreams());
vector<Argument>& args2 = batch.getStreams();
ICpuGpuVectorPtr sequenceStartPositions2 = args2[0].sequenceStartPositions;
for (auto& arg : args2) {
EXPECT_EQ(iid, !arg.sequenceStartPositions);
}
size_t numSeqs = batch.getNumSequences();
VLOG(1) << "numSeqs=" << numSeqs;
for (size_t seq2 = 0; seq2 < numSeqs; ++seq1, ++seq2) {
int64_t begin1 = seq1;
int64_t end1 = seq1 + 1;
if (sequenceStartPositions1) {
begin1 = sequenceStartPositions1->getElement(seq1);
end1 = sequenceStartPositions1->getElement(seq1 + 1);
EXPECT_LT(seq1, sequenceStartPositions1->getSize() - 1);
}
int64_t begin2 = seq2;
int64_t end2 = seq2 + 1;
if (sequenceStartPositions2) {
begin2 = sequenceStartPositions2->getElement(seq2);
end2 = sequenceStartPositions2->getElement(seq2 + 1);
}
VLOG(1) << " begin1=" << begin1 << " end1=" << end1
<< " begin2=" << begin2 << " end2=" << end2;
EXPECT_EQ(end1 - begin1, end2 - begin2);
for (int i = 0; i < end1 - begin1; ++i) {
checkSample(args1, begin1 + i, args2, begin2 + i, useGpu);
}
}
}
EXPECT_EQ(seq1, (size_t)data.getNumSequences());
rmDir(kTestDir);
}
TEST(ProtoDataProvider, test) {
int numSlotsArray[] = {0, 3};
int numTwoArray[] = {0, 1};
int numSlotsArraySize = sizeof(numSlotsArray) / sizeof(numSlotsArray[0]);
const int numSlot = 5;
int combination[numSlot] = {0};
int k = numSlot - 1;
while (k >= 0) {
int numDenseVecSlots = numSlotsArray[combination[0]];
int numSparseNonValueVecSlots = numSlotsArray[combination[1]];
int numSparseValueVectorSlots = numSlotsArray[combination[2]];
int numStrSlots = numSlotsArray[combination[3]];
int numIdSlots = numSlotsArray[combination[4]];
// while loop : traverse all cases
k = numSlot - 1;
while (k >= 0) {
if (combination[k] < (numSlotsArraySize - 1)) {
++combination[k];
break;
} else {
combination[k] = 0;
--k;
}
}
if (numDenseVecSlots + numSparseNonValueVecSlots +
numSparseValueVectorSlots + numStrSlots + numIdSlots <
1)
continue;
for (int iid : numTwoArray) {
for (int async : numTwoArray) {
for (int useGpu : numTwoArray) {
for (int dataCompression : numTwoArray) {
if (async && useGpu) {
// Currently in async mode, useGpu is not supported
continue;
}
#ifndef PADDLE_WITH_CUDA
if (useGpu) {
continue;
}
#endif
LOG(INFO) << " numDenseVecSlots=" << numDenseVecSlots
<< " numSparseNonValueVecSlots="
<< numSparseNonValueVecSlots
<< " numSparseValueVectorSlots="
<< numSparseValueVectorSlots
<< " numStrSlots=" << numStrSlots
<< " numIdSlots=" << numIdSlots << " iid=" << iid
<< " async=" << async << " useGpu=" << useGpu
<< " dataCompression=" << dataCompression;
int numPerSlotType[SlotDef::SlotType_ARRAYSIZE] = {0};
numPerSlotType[SlotDef::VECTOR_DENSE] = numDenseVecSlots;
numPerSlotType[SlotDef::VECTOR_SPARSE_NON_VALUE] =
numSparseNonValueVecSlots;
numPerSlotType[SlotDef::VECTOR_SPARSE_VALUE] =
numSparseValueVectorSlots;
numPerSlotType[SlotDef::INDEX] = numIdSlots;
numPerSlotType[SlotDef::STRING] = numStrSlots;
testProtoDataProvider(
numPerSlotType, iid, async, useGpu, dataCompression);
} // end for (int dataCompression : numTwoArray)
} // end for (int useGpu : numTwoArray)
} // end for (int async : numTwoArray)
} // end for (int iid : numTwoArray)
} // end for (while, traverse all slots)
}
TEST(ProtoDataProvider, constant_slots) {
int numSlotsArray[] = {0, 3};
int numTwoArray[] = {0, 1};
for (int numDenseVecSlots : numSlotsArray) {
for (int numSparseNonValueVecSlots : numSlotsArray) {
if (numDenseVecSlots + numSparseNonValueVecSlots < 1) continue;
for (int numConstantSlots : {1, 2}) {
for (int useGpu : numTwoArray) {
for (int dataCompression : numTwoArray) {
#ifndef PADDLE_WITH_CUDA
if (useGpu) {
continue;
}
#endif
LOG(INFO) << " numDenseVecSlots=" << numDenseVecSlots
<< " numSparseNonValueVecSlots="
<< numSparseNonValueVecSlots
<< " numConstantSlogs=" << numConstantSlots
<< " useGpu=" << useGpu
<< " dataCompression=" << dataCompression;
int numPerSlotType[SlotDef::SlotType_ARRAYSIZE] = {0};
numPerSlotType[SlotDef::VECTOR_DENSE] = numDenseVecSlots;
numPerSlotType[SlotDef::VECTOR_SPARSE_NON_VALUE] =
numSparseNonValueVecSlots;
numPerSlotType[SlotDef::VECTOR_SPARSE_VALUE] = 1;
numPerSlotType[SlotDef::INDEX] = 1;
testProtoDataProvider(numPerSlotType,
/* iid= */ true,
/* async= */ false,
useGpu,
dataCompression,
numConstantSlots);
} // end for (int dataCompression : numTwoArray)
} // end for (int useGpu : numTwoArray)
} // end for (int numConstantSlots : {1, 2})
} // end for (int numSparseNonValueVecSlots : numSlotsArray)
} // end for (int numDenseVecSlots : numSlotsArray)
}
void checkSampleSequence(const vector<Argument>& args1,
const vector<Argument>& args2,
int64_t offset,
int64_t numSeqs,
bool useGpu) {
// check slot num are equal
EXPECT_EQ(args1.size(), args2.size());
for (size_t i = 0; i < args1.size(); i++) {
auto type = getSlotType(args1[i]);
// check for args2: sequenceStartPositions vs numSeqs
// (1) size
EXPECT_EQ(args2[i].sequenceStartPositions->getSize(), (size_t)numSeqs + 1);
// (2) content
auto checkArgContent = [&](const Argument& args, int numSeqs) {
for (int j = 0; j <= numSeqs; j++) {
int start_pos = args.sequenceStartPositions->getElement(j);
EXPECT_EQ(start_pos, j);
}
};
switch (type) {
case SlotDef::INDEX: {
// args1: for label
checkArgContent(args2[i], numSeqs);
// check for args2: ids are equal to args1[offset]
// (1) size
EXPECT_EQ(args2[i].ids->getSize(), (size_t)numSeqs);
// (2) content
for (int j = 0; j < numSeqs; j++) {
EXPECT_EQ(args2[i].ids->get(j), args1[i].ids->get(offset + j));
}
break;
}
case SlotDef::VECTOR_SPARSE_NON_VALUE: {
// args1: for sparse_non_value
// args2 should put sparse indexes in ids
int colNum1;
const int* rowCols1;
const real* rowValues1; // nullptr
int totalLength = 0;
for (int j = 0; j < numSeqs; j++) {
getColRow(
args1[i], offset + j, useGpu, &colNum1, &rowCols1, &rowValues1);
// (1) lengths
EXPECT_EQ(totalLength,
args2[i].sequenceStartPositions->getElement(j));
EXPECT_EQ(totalLength,
args2[i].subSequenceStartPositions->getElement(j));
// (2) content
for (int k = 0; k < colNum1; k++) {
EXPECT_EQ(rowCols1[k], args2[i].ids->get(totalLength + k));
}
totalLength += colNum1;
if (colNum1 == 0) {
// special case here: we will put a "-1" into ids when column num is
// zero. see ProtoSequenceDataProvider::getNextBatchInternal.
EXPECT_EQ(-1, args2[i].ids->get(totalLength));
totalLength++;
}
}
EXPECT_EQ(totalLength,
args2[i].sequenceStartPositions->getElement(numSeqs));
EXPECT_EQ(totalLength,
args2[i].subSequenceStartPositions->getElement(numSeqs));
break;
}
case SlotDef::VECTOR_DENSE: {
// args1: for dense vector
checkArgContent(args2[i], numSeqs);
// check for args2: values are equal to args1[offset]
// (1) size
EXPECT_EQ(args2[i].value->getHeight(), (size_t)numSeqs);
EXPECT_EQ(args2[i].value->getWidth(), (size_t)getSlotDim(args1[i]));
// (2) content
for (int j = 0; j < numSeqs; j++) {
for (size_t k = 0; k < args2[i].value->getWidth(); k++) {
EXPECT_EQ(
static_cast<float>(args1[i].value->getElement(j + offset, k)),
static_cast<float>(args2[i].value->getElement(j, k)));
}
}
break;
}
default: { EXPECT_EQ(true, false) << "should not reach here"; }
}
}
}
void testProtoSequenceDataProvider(int* numPerSlotType,
bool async,
bool useGpu) {
mkDir(kTestDir);
DataBatch data;
prepareData(&data,
numPerSlotType,
/* iid */ true,
useGpu);
writeData(data, useGpu, /* dataCompression */ false);
DataConfig config;
config.set_type("proto_sequence");
config.set_files(kProtoFileList);
config.set_async_load_data(async);
unique_ptr<DataProvider> dataProvider(DataProvider::create(config, useGpu));
dataProvider->setSkipShuffle();
EXPECT_EQ(data.getSize(), dataProvider->getSize());
int64_t batchSize = 10;
DataBatch batch;
vector<Argument>& args1 = data.getStreams();
ICpuGpuVectorPtr sequenceStartPositions1 = args1[0].sequenceStartPositions;
dataProvider->reset();
size_t args1Offset = 0;
while (dataProvider->getNextBatch(batchSize, &batch) > 0) {
CHECK_EQ(data.getNumStreams(), batch.getNumStreams());
vector<Argument>& args2 = batch.getStreams();
ICpuGpuVectorPtr sequenceStartPositions2 = args2[0].sequenceStartPositions;
for (auto& arg : args1) {
// args1 should not has sequence
EXPECT_EQ(true, !arg.sequenceStartPositions);
}
for (auto& arg : args2) {
// args2 should has sequence
EXPECT_NE(true, !arg.sequenceStartPositions);
}
size_t numSeqs = batch.getNumSequences();
checkSampleSequence(args1, args2, args1Offset, numSeqs, useGpu);
args1Offset += numSeqs;
}
EXPECT_EQ(args1Offset, (size_t)data.getNumSequences());
rmDir(kTestDir);
}
TEST(ProtoSequenceDataProvider, test) {
int numSlotsArray[] = {0, 3};
int numTwoArray[] = {0, 1};
for (int numSparseNonValueVecSlots : numSlotsArray) {
for (int numIdSlots : numSlotsArray) {
for (int numDenseVecSlots : numSlotsArray) {
if (numDenseVecSlots + numSparseNonValueVecSlots + numIdSlots < 1)
continue;
for (int async : numTwoArray) {
for (int useGpu : numTwoArray) {
if (async && useGpu) {
// Currently in async mode, useGpu is not supported
continue;
}
#ifndef PADDLE_WITH_CUDA
if (useGpu) {
continue;
}
#endif
LOG(INFO) << " numDenseVecSlots=" << numDenseVecSlots
<< " numSparseNonValueVecSlots="
<< numSparseNonValueVecSlots
<< " numIdSlots=" << numIdSlots << " async=" << async
<< " useGpu=" << useGpu;
int numPerSlotType[SlotDef::SlotType_ARRAYSIZE] = {0};
numPerSlotType[SlotDef::VECTOR_DENSE] = numDenseVecSlots;
numPerSlotType[SlotDef::VECTOR_SPARSE_NON_VALUE] =
numSparseNonValueVecSlots;
numPerSlotType[SlotDef::INDEX] = numIdSlots;
testProtoSequenceDataProvider(numPerSlotType, async, useGpu);
} // end for (int useGpu : numTwoArray)
} // end for (int async : numTwoArray)
} // end for (int numDenseVecSlots : numSlotsArray)
} // end for (int numIdSlots : numSlotsArray)
} // end for (int numSparseNonValueVecSlots : numSlotsArray)
}
......@@ -61,6 +61,18 @@ function(op_library TARGET)
set(pybind_flag 1)
endif()
if ("${TARGET}" STREQUAL "compare_op")
set(pybind_flag 1)
file(APPEND ${pybind_file} "USE_OP(less_than);\nUSE_OP(equal);\n")
endif()
# conv_op contains several operators
if ("${TARGET}" STREQUAL "conv_op")
set(pybind_flag 1)
# It's enough to just adding one operator to pybind
file(APPEND ${pybind_file} "USE_OP(conv2d);\n")
endif()
# pool_op contains several operators
if ("${TARGET}" STREQUAL "pool_op")
set(pybind_flag 1)
......@@ -68,23 +80,23 @@ function(op_library TARGET)
file(APPEND ${pybind_file} "USE_OP(pool2d);\n")
endif()
if ("${TARGET}" STREQUAL "compare_op")
# pool_cudnn_op contains several operators
if ("${TARGET}" STREQUAL "pool_cudnn_op")
set(pybind_flag 1)
file(APPEND ${pybind_file} "USE_OP(less_than);\nUSE_OP(equal);\n")
# It's enough to just adding one operator to pybind
file(APPEND ${pybind_file} "USE_OP(pool2d_cudnn);\n")
endif()
# pool_with_index_op contains several operators
if ("${TARGET}" STREQUAL "pool_with_index_op")
if ("${TARGET}" STREQUAL "logical_op")
set(pybind_flag 1)
# It's enough to just adding one operator to pybind
file(APPEND ${pybind_file} "USE_OP(max_pool2d_with_index);\n")
file(APPEND ${pybind_file} "USE_OP(logical_and);\n")
endif()
# conv_op contains several operators
if ("${TARGET}" STREQUAL "conv_op")
# pool_with_index_op contains several operators
if ("${TARGET}" STREQUAL "pool_with_index_op")
set(pybind_flag 1)
# It's enough to just adding one operator to pybind
file(APPEND ${pybind_file} "USE_OP(conv2d);\n")
file(APPEND ${pybind_file} "USE_OP(max_pool2d_with_index);\n")
endif()
# conv_transpose_op contains several operators
......@@ -93,12 +105,12 @@ function(op_library TARGET)
# It's enough to just adding one operator to pybind
file(APPEND ${pybind_file} "USE_OP(conv2d_transpose);\n")
endif()
# pool_cudnn_op contains several operators
if ("${TARGET}" STREQUAL "pool_cudnn_op")
# conv_transpose_cudnn_op contains two operators
if ("${TARGET}" STREQUAL "conv_transpose_cudnn_op")
set(pybind_flag 1)
# It's enough to just adding one operator to pybind
file(APPEND ${pybind_file} "USE_OP(pool2d_cudnn);\n")
file(APPEND ${pybind_file} "USE_OP(conv2d_transpose_cudnn);\n")
endif()
# save_restore_op contains several operators
......@@ -172,6 +184,7 @@ set(DEPS_OPS
sequence_softmax_op
sum_op
pool_op
maxout_op
pool_with_index_op
conv_op
conv_transpose_op
......@@ -198,6 +211,7 @@ op_library(sgd_op DEPS selected_rows_functor)
op_library(adagrad_op DEPS selected_rows_functor)
op_library(conv_op DEPS vol2col)
op_library(pool_op DEPS pooling)
op_library(maxout_op DEPS maxouting)
op_library(pool_with_index_op DEPS pooling)
op_library(lod_rank_table_op SRCS lod_rank_table_op.cc DEPS lod_rank_table)
op_library(lod_tensor_to_array_op SRCS lod_tensor_to_array_op.cc DEPS lod_rank_table_op)
......
......@@ -98,7 +98,6 @@ $y = \max(x, 0)$
}
};
template <typename AttrType>
class LeakyReluOpMaker : public framework::OpProtoAndCheckerMaker {
public:
LeakyReluOpMaker(framework::OpProto *proto,
......@@ -106,8 +105,7 @@ class LeakyReluOpMaker : public framework::OpProtoAndCheckerMaker {
: OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "Input of LeakyRelu operator");
AddOutput("Y", "Output of LeakyRelu operator");
AddAttr<AttrType>("alpha", "The small negative slope")
.SetDefault(static_cast<AttrType>(0.02f));
AddAttr<float>("alpha", "The small negative slope").SetDefault(0.02f);
AddComment(R"DOC(
LeakyRelu Activation Operator.
......@@ -117,7 +115,6 @@ $y = \max(x, \alpha * x)$
}
};
template <typename AttrType>
class SoftShrinkOpMaker : public framework::OpProtoAndCheckerMaker {
public:
SoftShrinkOpMaker(framework::OpProto *proto,
......@@ -125,8 +122,7 @@ class SoftShrinkOpMaker : public framework::OpProtoAndCheckerMaker {
: OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "Input of Softshrink operator");
AddOutput("Y", "Output of Softshrink operator");
AddAttr<AttrType>("lambda", "non-negative offset")
.SetDefault(static_cast<AttrType>(0.5f));
AddAttr<float>("lambda", "non-negative offset").SetDefault(0.5f);
AddComment(R"DOC(
Softshrink Activation Operator.
......@@ -173,7 +169,6 @@ $$y = x - \frac{e^{x} - e^{-x}}{e^{x} + e^{-x}}$$
}
};
template <typename AttrType>
class HardShrinkOpMaker : public framework::OpProtoAndCheckerMaker {
public:
HardShrinkOpMaker(framework::OpProto *proto,
......@@ -181,8 +176,8 @@ class HardShrinkOpMaker : public framework::OpProtoAndCheckerMaker {
: OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "Input of HardShrink operator");
AddOutput("Y", "Output of HardShrink operator");
AddAttr<AttrType>("threshold", "The value of threshold for HardShrink")
.SetDefault(static_cast<AttrType>(0.5));
AddAttr<float>("threshold", "The value of threshold for HardShrink")
.SetDefault(0.5f);
AddComment(R"DOC(
HardShrink Activation Operator.
......@@ -308,17 +303,16 @@ $$y = \frac{x}{1 + |x|}$$
}
};
template <typename AttrType>
class BReluOpMaker : public framework::OpProtoAndCheckerMaker {
public:
BReluOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "Input of BRelu operator");
AddOutput("Y", "Output of BRelu operator");
AddAttr<AttrType>("t_min", "The min marginal value of BRelu")
.SetDefault(static_cast<AttrType>(0));
AddAttr<AttrType>("t_max", "The max marginal value of BRelu")
.SetDefault(static_cast<AttrType>(24));
AddAttr<float>("t_min", "The min marginal value of BRelu")
.SetDefault(static_cast<float>(0));
AddAttr<float>("t_max", "The max marginal value of BRelu")
.SetDefault(static_cast<float>(24));
AddComment(R"DOC(
BRelu Activation Operator.
......@@ -328,7 +322,6 @@ $y = \max(\min(x, t_{min}), t_{max})$
}
};
template <typename AttrType>
class SoftReluOpMaker : public framework::OpProtoAndCheckerMaker {
public:
SoftReluOpMaker(framework::OpProto *proto,
......@@ -336,8 +329,8 @@ class SoftReluOpMaker : public framework::OpProtoAndCheckerMaker {
: OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "Input of SoftRelu operator");
AddOutput("Y", "Output of SoftRelu operator");
AddAttr<AttrType>("threshold", "The threshold value of SoftRelu")
.SetDefault(static_cast<AttrType>(40));
AddAttr<float>("threshold", "The threshold value of SoftRelu")
.SetDefault(40.0f);
AddComment(R"DOC(
SoftRelu Activation Operator.
......@@ -347,15 +340,13 @@ $y = \ln(1 + \exp(\max(\min(x, threshold), threshold))$
}
};
template <typename AttrType>
class ELUOpMaker : public framework::OpProtoAndCheckerMaker {
public:
ELUOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "Input of ELU operator");
AddOutput("Y", "Output of ELU operator");
AddAttr<AttrType>("alpha", "The alpha value of ELU")
.SetDefault(static_cast<AttrType>(1.0f));
AddAttr<float>("alpha", "The alpha value of ELU").SetDefault(1.0f);
AddComment(R"DOC(
ELU Activation Operator.
......@@ -368,15 +359,14 @@ $y = \max(0, x) + \min(0, \alpha * (e^x - 1))$
}
};
template <typename AttrType>
class Relu6OpMaker : public framework::OpProtoAndCheckerMaker {
public:
Relu6OpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "Input of Relu6 operator");
AddOutput("Y", "Output of Relu6 operator");
AddAttr<AttrType>("threshold", "The threshold value of Relu6")
.SetDefault(static_cast<AttrType>(6));
AddAttr<float>("threshold", "The threshold value of Relu6")
.SetDefault(6.0f);
AddComment(R"DOC(
Relu6 Activation Operator.
......@@ -386,15 +376,13 @@ $y = \min(\max(0, x), 6)$
}
};
template <typename AttrType>
class PowOpMaker : public framework::OpProtoAndCheckerMaker {
public:
PowOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "Input of Pow operator");
AddOutput("Y", "Output of Pow operator");
AddAttr<AttrType>("factor", "The exponential factor of Pow")
.SetDefault(static_cast<AttrType>(1));
AddAttr<float>("factor", "The exponential factor of Pow").SetDefault(1.0f);
AddComment(R"DOC(
Pow Activation Operator.
......@@ -404,17 +392,16 @@ $y = x^{factor}$
}
};
template <typename AttrType>
class STanhOpMaker : public framework::OpProtoAndCheckerMaker {
public:
STanhOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "Input of STanh operator");
AddOutput("Y", "Output of STanh operator");
AddAttr<AttrType>("scale_a", "The scale parameter of a for the input")
.SetDefault(static_cast<AttrType>(2 / 3));
AddAttr<AttrType>("scale_b", "The scale parameter of b for the input")
.SetDefault(static_cast<AttrType>(1.7159));
AddAttr<float>("scale_a", "The scale parameter of a for the input")
.SetDefault(2.0f / 3.0f);
AddAttr<float>("scale_b", "The scale parameter of b for the input")
.SetDefault(1.7159f);
AddComment(R"DOC(
STanh Activation Operator.
......@@ -424,7 +411,6 @@ $$y = b * \frac{e^{a * x} - e^{-a * x}}{e^{a * x} + e^{-a * x}}$$
}
};
template <typename AttrType>
class ThresholdedReluOpMaker : public framework::OpProtoAndCheckerMaker {
public:
ThresholdedReluOpMaker(framework::OpProto *proto,
......@@ -432,8 +418,8 @@ class ThresholdedReluOpMaker : public framework::OpProtoAndCheckerMaker {
: OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "Input of ThresholdedRelu operator");
AddOutput("Y", "Output of ThresholdedRelu operator");
AddAttr<AttrType>("threshold", "The threshold location of activation")
.SetDefault(static_cast<AttrType>(1.0));
AddAttr<float>("threshold", "The threshold location of activation")
.SetDefault(1.0f);
AddComment(R"DOC(
ThresholdedRelu Activation Operator.
......@@ -448,7 +434,6 @@ $$
}
};
template <typename AttrType>
class HardSigmoidOpMaker : public framework::OpProtoAndCheckerMaker {
public:
HardSigmoidOpMaker(framework::OpProto *proto,
......@@ -456,10 +441,10 @@ class HardSigmoidOpMaker : public framework::OpProtoAndCheckerMaker {
: OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "Input of HardSigmoid operator");
AddOutput("Y", "Output of HardSigmoid operator");
AddAttr<AttrType>("slope", "Slope for linear approximation of sigmoid")
.SetDefault(static_cast<AttrType>(0.2));
AddAttr<AttrType>("offset", "Offset for linear approximation of sigmoid")
.SetDefault(static_cast<AttrType>(0.5));
AddAttr<float>("slope", "Slope for linear approximation of sigmoid")
.SetDefault(0.2f);
AddAttr<float>("offset", "Offset for linear approximation of sigmoid")
.SetDefault(0.5f);
AddComment(R"DOC(
HardSigmoid Activation Operator.
......@@ -499,7 +484,7 @@ REGISTER_OP(tanh, ops::ActivationOp, ops::TanhOpMaker, tanh_grad,
REGISTER_OP(tanh_shrink, ops::ActivationOp, ops::TanhShrinkOpMaker,
tanh_shrink_grad, ops::ActivationOpGrad);
REGISTER_OP(softshrink, ops::ActivationOp, ops::SoftShrinkOpMaker<float>,
REGISTER_OP(softshrink, ops::ActivationOp, ops::SoftShrinkOpMaker,
softshrink_grad, ops::ActivationOpGrad);
REGISTER_OP(sqrt, ops::ActivationOp, ops::SqrtOpMaker, sqrt_grad,
......@@ -523,35 +508,34 @@ REGISTER_OP(softplus, ops::ActivationOp, ops::SoftplusOpMaker, softplus_grad,
REGISTER_OP(softsign, ops::ActivationOp, ops::SoftsignOpMaker, softsign_grad,
ops::ActivationOpGrad);
REGISTER_OP(brelu, ops::ActivationOp, ops::BReluOpMaker<float>, brelu_grad,
REGISTER_OP(brelu, ops::ActivationOp, ops::BReluOpMaker, brelu_grad,
ops::ActivationOpGrad);
REGISTER_OP(leaky_relu, ops::ActivationOp, ops::LeakyReluOpMaker<float>,
REGISTER_OP(leaky_relu, ops::ActivationOp, ops::LeakyReluOpMaker,
leaky_relu_grad, ops::ActivationOpGrad);
REGISTER_OP(soft_relu, ops::ActivationOp, ops::SoftReluOpMaker<float>,
soft_relu_grad, ops::ActivationOpGrad);
REGISTER_OP(soft_relu, ops::ActivationOp, ops::SoftReluOpMaker, soft_relu_grad,
ops::ActivationOpGrad);
REGISTER_OP(elu, ops::ActivationOp, ops::ELUOpMaker<float>, elu_grad,
REGISTER_OP(elu, ops::ActivationOp, ops::ELUOpMaker, elu_grad,
ops::ActivationOpGrad);
REGISTER_OP(relu6, ops::ActivationOp, ops::Relu6OpMaker<float>, relu6_grad,
REGISTER_OP(relu6, ops::ActivationOp, ops::Relu6OpMaker, relu6_grad,
ops::ActivationOpGrad);
REGISTER_OP(pow, ops::ActivationOp, ops::PowOpMaker<float>, pow_grad,
REGISTER_OP(pow, ops::ActivationOp, ops::PowOpMaker, pow_grad,
ops::ActivationOpGrad);
REGISTER_OP(stanh, ops::ActivationOp, ops::STanhOpMaker<float>, stanh_grad,
REGISTER_OP(stanh, ops::ActivationOp, ops::STanhOpMaker, stanh_grad,
ops::ActivationOpGrad);
REGISTER_OP(hard_shrink, ops::ActivationOp, ops::HardShrinkOpMaker<float>,
REGISTER_OP(hard_shrink, ops::ActivationOp, ops::HardShrinkOpMaker,
hard_shrink_grad, ops::ActivationOpGrad);
REGISTER_OP(thresholded_relu, ops::ActivationOp,
ops::ThresholdedReluOpMaker<float>, thresholded_relu_grad,
ops::ActivationOpGrad);
REGISTER_OP(thresholded_relu, ops::ActivationOp, ops::ThresholdedReluOpMaker,
thresholded_relu_grad, ops::ActivationOpGrad);
REGISTER_OP(hard_sigmoid, ops::ActivationOp, ops::HardSigmoidOpMaker<float>,
REGISTER_OP(hard_sigmoid, ops::ActivationOp, ops::HardSigmoidOpMaker,
hard_sigmoid_grad, ops::ActivationOpGrad);
#define REGISTER_ACTIVATION_CPU_KERNEL(act_type, functor, grad_functor) \
......
......@@ -109,4 +109,5 @@ paramOut = param + paramUpdate$$
namespace ops = paddle::operators;
REGISTER_OP_WITHOUT_GRADIENT(adadelta, ops::AdadeltaOp, ops::AdadeltaOpMaker);
REGISTER_OP_CPU_KERNEL(
adadelta, ops::AdadeltaOpKernel<paddle::platform::CPUPlace, float>);
adadelta, ops::AdadeltaOpKernel<paddle::platform::CPUPlace, float>,
ops::AdadeltaOpKernel<paddle::platform::CPUPlace, double>);
......@@ -17,4 +17,5 @@
namespace ops = paddle::operators;
REGISTER_OP_GPU_KERNEL(
adadelta, ops::AdadeltaOpKernel<paddle::platform::GPUPlace, float>);
adadelta, ops::AdadeltaOpKernel<paddle::platform::GPUPlace, float>,
ops::AdadeltaOpKernel<paddle::platform::GPUPlace, double>);
......@@ -33,8 +33,8 @@ class AdadeltaOpKernel : public framework::OpKernel<T> {
avg_squared_grad_out_tensor->mutable_data<T>(ctx.GetPlace());
avg_squared_update_out_tensor->mutable_data<T>(ctx.GetPlace());
float rho = ctx.Attr<float>("rho");
float epsilon = ctx.Attr<float>("epsilon");
T rho = static_cast<T>(ctx.Attr<float>("rho"));
T epsilon = static_cast<T>(ctx.Attr<float>("epsilon"));
auto param = framework::EigenVector<T>::Flatten(
*ctx.Input<framework::Tensor>("Param"));
......
......@@ -14,8 +14,8 @@
#define EIGEN_USE_GPU
#include "paddle/operators/adagrad_op.h"
#include "paddle/operators/math/selected_rows_functor.h"
#include "paddle/operators/math/math_function.h"
#include "paddle/operators/math/selected_rows_functor.h"
#include "paddle/platform/cuda_helper.h"
namespace paddle {
......@@ -134,8 +134,8 @@ struct SparseAdagradFunctor<platform::GPUPlace, T> {
T, 256><<<grid2, threads, 0,
reinterpret_cast<const platform::CUDADeviceContext&>(context)
.stream()>>>(grad_merge_data, grad_merge->rows().data(),
lr, param_data,
moment_data, grad_width, epsilon);
lr, param_data, moment_data, grad_width,
epsilon);
}
};
......
......@@ -127,4 +127,5 @@ paramOut = param - learningRate * moment_1/ ($\sqrt{(moment_2)} + \epsilon)$$
namespace ops = paddle::operators;
REGISTER_OP_WITHOUT_GRADIENT(adam, ops::AdamOp, ops::AdamOpMaker);
REGISTER_OP_CPU_KERNEL(adam,
ops::AdamOpKernel<paddle::platform::CPUPlace, float>);
ops::AdamOpKernel<paddle::platform::CPUPlace, float>,
ops::AdamOpKernel<paddle::platform::CPUPlace, double>);
......@@ -17,4 +17,5 @@
namespace ops = paddle::operators;
REGISTER_OP_GPU_KERNEL(adam,
ops::AdamOpKernel<paddle::platform::GPUPlace, float>);
ops::AdamOpKernel<paddle::platform::GPUPlace, float>,
ops::AdamOpKernel<paddle::platform::GPUPlace, double>);
......@@ -31,9 +31,9 @@ class AdamOpKernel : public framework::OpKernel<T> {
moment1_out_tensor->mutable_data<T>(ctx.GetPlace());
moment2_out_tensor->mutable_data<T>(ctx.GetPlace());
float beta1 = ctx.Attr<float>("beta1");
float beta2 = ctx.Attr<float>("beta2");
float epsilon = ctx.Attr<float>("epsilon");
T beta1 = static_cast<T>(ctx.Attr<float>("beta1"));
T beta2 = static_cast<T>(ctx.Attr<float>("beta2"));
T epsilon = static_cast<T>(ctx.Attr<float>("epsilon"));
auto param = framework::EigenVector<T>::Flatten(
*ctx.Input<framework::Tensor>("Param"));
......
......@@ -126,4 +126,5 @@ division by 0 error.
namespace ops = paddle::operators;
REGISTER_OP_WITHOUT_GRADIENT(adamax, ops::AdamaxOp, ops::AdamaxOpMaker);
REGISTER_OP_CPU_KERNEL(adamax,
ops::AdamaxOpKernel<paddle::platform::CPUPlace, float>);
ops::AdamaxOpKernel<paddle::platform::CPUPlace, float>,
ops::AdamaxOpKernel<paddle::platform::CPUPlace, double>);
......@@ -17,4 +17,5 @@
namespace ops = paddle::operators;
REGISTER_OP_GPU_KERNEL(adamax,
ops::AdamaxOpKernel<paddle::platform::GPUPlace, float>);
ops::AdamaxOpKernel<paddle::platform::GPUPlace, float>,
ops::AdamaxOpKernel<paddle::platform::GPUPlace, double>);
......@@ -31,9 +31,9 @@ class AdamaxOpKernel : public framework::OpKernel<T> {
moment_out_tensor->mutable_data<T>(ctx.GetPlace());
inf_norm_out_tensor->mutable_data<T>(ctx.GetPlace());
float beta1 = ctx.Attr<float>("beta1");
float beta2 = ctx.Attr<float>("beta2");
float epsilon = ctx.Attr<float>("epsilon");
T beta1 = static_cast<T>(ctx.Attr<float>("beta1"));
T beta2 = static_cast<T>(ctx.Attr<float>("beta2"));
T epsilon = static_cast<T>(ctx.Attr<float>("epsilon"));
auto param = framework::EigenVector<T>::Flatten(
*ctx.Input<framework::Tensor>("Param"));
......
......@@ -139,7 +139,7 @@ bool BeamSearch::NextItemSet(std::vector<BeamSearch::Item> *items) {
items->reserve(framework::product(ids.dims()));
for (size_t offset = abs_lod[lod_level_][sent_offset_];
offset < abs_lod[lod_level_][sent_offset_ + 1]; offset++) {
for (int d = 0; d < instance_dim; d++) {
for (size_t d = 0; d < instance_dim; d++) {
const size_t dim_offset = offset * instance_dim + d;
items->emplace_back(offset, ids_data[dim_offset],
scores_data[dim_offset]);
......
......@@ -40,7 +40,8 @@ REGISTER_OP(conv_cudnn, ops::ConvOp, ops::CudnnConvOpMaker, conv_cudnn_grad,
ops::ConvOpGrad);
REGISTER_OP_CPU_KERNEL(conv_cudnn,
ops::GemmConvKernel<paddle::platform::CPUPlace, float>);
ops::GemmConvKernel<paddle::platform::CPUPlace, float>,
ops::GemmConvKernel<paddle::platform::CPUPlace, double>);
REGISTER_OP_CPU_KERNEL(
conv_cudnn_grad,
ops::GemmConvGradKernel<paddle::platform::CPUPlace, float>);
conv_cudnn_grad, ops::GemmConvGradKernel<paddle::platform::CPUPlace, float>,
ops::GemmConvGradKernel<paddle::platform::CPUPlace, double>);
......@@ -226,9 +226,8 @@ class CudnnConvGradOpKernel : public framework::OpKernel<T> {
T alpha = 1.0f, beta = 0.0f;
if (input_grad) {
T* input_grad_data = input_grad->mutable_data<T>(ctx.GetPlace());
auto t = framework::EigenVector<T>::Flatten(*input_grad);
t.device(ctx.GetEigenDevice<platform::GPUPlace>()) =
t.constant(static_cast<T>(0));
// Because beta is zero, it is unnecessary to reset input_grad.
for (int i = 0; i < groups; i++) {
PADDLE_ENFORCE(platform::dynload::cudnnConvolutionBackwardData(
handle, &alpha, cudnn_filter_desc,
......@@ -241,9 +240,8 @@ class CudnnConvGradOpKernel : public framework::OpKernel<T> {
// ------------------- cudnn conv backward filter ---------------------
if (filter_grad) {
T* filter_grad_data = filter_grad->mutable_data<T>(ctx.GetPlace());
auto t = framework::EigenVector<T>::Flatten(*filter_grad);
t.device(ctx.GetEigenDevice<platform::GPUPlace>()) =
t.constant(static_cast<T>(0));
// Because beta is zero, it is unnecessary to reset filter_grad.
for (int i = 0; i < groups; i++) {
PADDLE_ENFORCE(platform::dynload::cudnnConvolutionBackwardFilter(
handle, &alpha, cudnn_input_desc, input_data + i * group_offset_in,
......@@ -261,6 +259,8 @@ class CudnnConvGradOpKernel : public framework::OpKernel<T> {
} // namespace operators
} // namespace paddle
REGISTER_OP_GPU_KERNEL(conv_cudnn, paddle::operators::CudnnConvOpKernel<float>);
REGISTER_OP_GPU_KERNEL(conv_cudnn, paddle::operators::CudnnConvOpKernel<float>,
paddle::operators::CudnnConvOpKernel<double>);
REGISTER_OP_GPU_KERNEL(conv_cudnn_grad,
paddle::operators::CudnnConvGradOpKernel<float>);
paddle::operators::CudnnConvGradOpKernel<float>,
paddle::operators::CudnnConvGradOpKernel<double>);
......@@ -225,11 +225,15 @@ REGISTER_OP(conv3d, ops::ConvOp, ops::Conv3DOpMaker, conv3d_grad,
ops::ConvOpGrad);
REGISTER_OP_CPU_KERNEL(conv2d,
ops::GemmConvKernel<paddle::platform::CPUPlace, float>);
ops::GemmConvKernel<paddle::platform::CPUPlace, float>,
ops::GemmConvKernel<paddle::platform::CPUPlace, double>);
REGISTER_OP_CPU_KERNEL(
conv2d_grad, ops::GemmConvGradKernel<paddle::platform::CPUPlace, float>);
conv2d_grad, ops::GemmConvGradKernel<paddle::platform::CPUPlace, float>,
ops::GemmConvGradKernel<paddle::platform::CPUPlace, double>);
REGISTER_OP_CPU_KERNEL(conv3d,
ops::GemmConvKernel<paddle::platform::CPUPlace, float>);
ops::GemmConvKernel<paddle::platform::CPUPlace, float>,
ops::GemmConvKernel<paddle::platform::CPUPlace, double>);
REGISTER_OP_CPU_KERNEL(
conv3d_grad, ops::GemmConvGradKernel<paddle::platform::CPUPlace, float>);
conv3d_grad, ops::GemmConvGradKernel<paddle::platform::CPUPlace, float>,
ops::GemmConvGradKernel<paddle::platform::CPUPlace, double>);
......@@ -17,11 +17,15 @@
namespace ops = paddle::operators;
REGISTER_OP_GPU_KERNEL(conv2d,
ops::GemmConvKernel<paddle::platform::GPUPlace, float>);
ops::GemmConvKernel<paddle::platform::GPUPlace, float>,
ops::GemmConvKernel<paddle::platform::GPUPlace, double>);
REGISTER_OP_GPU_KERNEL(
conv2d_grad, ops::GemmConvGradKernel<paddle::platform::GPUPlace, float>);
conv2d_grad, ops::GemmConvGradKernel<paddle::platform::GPUPlace, float>,
ops::GemmConvGradKernel<paddle::platform::GPUPlace, double>);
REGISTER_OP_GPU_KERNEL(conv3d,
ops::GemmConvKernel<paddle::platform::GPUPlace, float>);
ops::GemmConvKernel<paddle::platform::GPUPlace, float>,
ops::GemmConvKernel<paddle::platform::GPUPlace, double>);
REGISTER_OP_GPU_KERNEL(
conv3d_grad, ops::GemmConvGradKernel<paddle::platform::GPUPlace, float>);
conv3d_grad, ops::GemmConvGradKernel<paddle::platform::GPUPlace, float>,
ops::GemmConvGradKernel<paddle::platform::GPUPlace, double>);
......@@ -23,7 +23,24 @@ class CudnnConv2DTransposeOpMaker : public Conv2DTransposeOpMaker {
framework::OpAttrChecker* op_checker)
: Conv2DTransposeOpMaker(proto, op_checker) {
AddAttr<std::vector<int>>("dilations", "dilations of convolution operator.")
.SetDefault(std::vector<int>{1, 1});
.SetDefault({1, 1});
AddAttr<int>("workspace_size_MB",
"workspace size for cudnn, in MB, "
"workspace is a section of GPU memory which will be "
"allocated/freed each time the operator runs, larger "
"workspace size can increase performance but also requires "
"better hardward. This size should be carefully setted.")
.SetDefault(4096);
}
};
class CudnnConv3DTransposeOpMaker : public Conv3DTransposeOpMaker {
public:
CudnnConv3DTransposeOpMaker(framework::OpProto* proto,
framework::OpAttrChecker* op_checker)
: Conv3DTransposeOpMaker(proto, op_checker) {
AddAttr<std::vector<int>>("dilations", "dilations of convolution operator.")
.SetDefault({1, 1, 1});
AddAttr<int>("workspace_size_MB",
"workspace size for cudnn, in MB, "
"workspace is a section of GPU memory which will be "
......@@ -44,7 +61,22 @@ REGISTER_OP(conv2d_transpose_cudnn, ops::ConvTransposeOp,
REGISTER_OP_CPU_KERNEL(
conv2d_transpose_cudnn,
ops::GemmConvTransposeKernel<paddle::platform::CPUPlace, float>);
ops::GemmConvTransposeKernel<paddle::platform::CPUPlace, float>,
ops::GemmConvTransposeKernel<paddle::platform::CPUPlace, double>);
REGISTER_OP_CPU_KERNEL(
conv2d_transpose_cudnn_grad,
ops::GemmConvTransposeGradKernel<paddle::platform::CPUPlace, float>);
ops::GemmConvTransposeGradKernel<paddle::platform::CPUPlace, float>,
ops::GemmConvTransposeGradKernel<paddle::platform::CPUPlace, double>);
REGISTER_OP(conv3d_transpose_cudnn, ops::ConvTransposeOp,
ops::CudnnConv3DTransposeOpMaker, conv3d_transpose_cudnn_grad,
ops::ConvTransposeOpGrad);
REGISTER_OP_CPU_KERNEL(
conv3d_transpose_cudnn,
ops::GemmConvTransposeKernel<paddle::platform::CPUPlace, float>,
ops::GemmConvTransposeKernel<paddle::platform::CPUPlace, double>);
REGISTER_OP_CPU_KERNEL(
conv3d_transpose_cudnn_grad,
ops::GemmConvTransposeGradKernel<paddle::platform::CPUPlace, float>,
ops::GemmConvTransposeGradKernel<paddle::platform::CPUPlace, double>);
......@@ -54,15 +54,21 @@ class CudnnConvTransposeOpKernel : public framework::OpKernel<T> {
ScopedTensorDescriptor output_desc;
ScopedFilterDescriptor filter_desc;
ScopedConvolutionDescriptor conv_desc;
DataLayout layout = DataLayout::kNCHW;
DataLayout layout;
if (strides.size() == 2U) {
layout = DataLayout::kNCHW;
} else {
layout = DataLayout::kNCDHW;
}
// N, M, H, W
// (N, M, H, W) or (N, M, D, H, W)
cudnnTensorDescriptor_t cudnn_input_desc = input_desc.descriptor<T>(
layout, framework::vectorize2int(input->dims()));
// N, C, O_h, O_w
// (N, C, O_h, O_w) or (N, C, O_d, O_h, O_w)
cudnnTensorDescriptor_t cudnn_output_desc = output_desc.descriptor<T>(
layout, framework::vectorize2int(output->dims()));
// M, C, K_h, K_w
// (M, C, K_h, K_w) or (M, C, K_d, K_h, K_w)
cudnnFilterDescriptor_t cudnn_filter_desc = filter_desc.descriptor<T>(
layout, framework::vectorize2int(filter->dims()));
cudnnConvolutionDescriptor_t cudnn_conv_desc =
......@@ -136,13 +142,13 @@ class CudnnConvTransposeGradOpKernel : public framework::OpKernel<T> {
ScopedConvolutionDescriptor conv_desc;
DataLayout layout = DataLayout::kNCHW;
// Input: (N, M, H, W)
// Input: (N, M, H, W) or (N, M, D, H, W)
cudnnTensorDescriptor_t cudnn_input_desc = input_desc.descriptor<T>(
layout, framework::vectorize2int(input->dims()));
// Output: (N, C, O_H, O_W)
// Output: (N, C, O_h, O_w) or (N, C, O_d, O_h, O_w)
cudnnTensorDescriptor_t cudnn_output_desc = output_desc.descriptor<T>(
layout, framework::vectorize2int(output_grad->dims()));
// Filter (M, C, K_H, K_W)
// Filter (M, C, K_h, K_w) or (M, C, K_d K_h, K_w)
cudnnFilterDescriptor_t cudnn_filter_desc = filter_desc.descriptor<T>(
layout, framework::vectorize2int(filter->dims()));
......@@ -200,8 +206,7 @@ class CudnnConvTransposeGradOpKernel : public framework::OpKernel<T> {
T alpha = 1.0f, beta = 0.0f;
if (input_grad) {
T* input_grad_data = input_grad->mutable_data<T>(ctx.GetPlace());
math::set_constant(ctx.device_context(), input_grad, 0);
// Because beta is zero, it is unnecessary to reset input_grad.
PADDLE_ENFORCE(platform::dynload::cudnnConvolutionForward(
handle, &alpha, cudnn_output_desc, output_grad_data,
cudnn_filter_desc, filter_data, cudnn_conv_desc, data_algo,
......@@ -212,8 +217,7 @@ class CudnnConvTransposeGradOpKernel : public framework::OpKernel<T> {
// ------------------- cudnn conv backward filter ---------------------
if (filter_grad) {
T* filter_grad_data = filter_grad->mutable_data<T>(ctx.GetPlace());
math::set_constant(ctx.device_context(), filter_grad, 0);
// Because beta is zero, it is unnecessary to reset filter_grad.
// Gradient with respect to the filter
PADDLE_ENFORCE(platform::dynload::cudnnConvolutionBackwardFilter(
handle, &alpha, cudnn_output_desc, output_grad_data, cudnn_input_desc,
......@@ -231,6 +235,15 @@ class CudnnConvTransposeGradOpKernel : public framework::OpKernel<T> {
namespace ops = paddle::operators;
REGISTER_OP_GPU_KERNEL(conv2d_transpose_cudnn,
ops::CudnnConvTransposeOpKernel<float>);
ops::CudnnConvTransposeOpKernel<float>,
ops::CudnnConvTransposeOpKernel<double>);
REGISTER_OP_GPU_KERNEL(conv2d_transpose_cudnn_grad,
ops::CudnnConvTransposeGradOpKernel<float>);
ops::CudnnConvTransposeGradOpKernel<float>,
ops::CudnnConvTransposeGradOpKernel<double>);
REGISTER_OP_GPU_KERNEL(conv3d_transpose_cudnn,
ops::CudnnConvTransposeOpKernel<float>,
ops::CudnnConvTransposeOpKernel<double>);
REGISTER_OP_GPU_KERNEL(conv3d_transpose_cudnn_grad,
ops::CudnnConvTransposeGradOpKernel<float>,
ops::CudnnConvTransposeGradOpKernel<double>);
......@@ -30,11 +30,6 @@ void ConvTransposeOp::InferShape(framework::InferShapeContext* ctx) const {
std::vector<int> strides = ctx->Attrs().Get<std::vector<int>>("strides");
std::vector<int> paddings = ctx->Attrs().Get<std::vector<int>>("paddings");
for (size_t i = 0; i < paddings.size(); ++i) {
PADDLE_ENFORCE_EQ(paddings[i], 0,
"No Padding allowed in conv transpose op.");
}
PADDLE_ENFORCE(in_dims.size() == 4 || in_dims.size() == 5,
"ConvTransposeOp intput should be 4-D or 5-D tensor.");
PADDLE_ENFORCE_EQ(in_dims.size(), filter_dims.size(),
......@@ -52,7 +47,7 @@ void ConvTransposeOp::InferShape(framework::InferShapeContext* ctx) const {
std::vector<int64_t> output_shape({in_dims[0], filter_dims[1]});
for (size_t i = 0; i < strides.size(); ++i) {
output_shape.push_back((in_dims[i + 2] - 1) * strides[i] +
output_shape.push_back((in_dims[i + 2] - 1) * strides[i] - 2 * paddings[i] +
filter_dims[i + 2]);
}
ctx->SetOutputDim("Output", framework::make_ddim(output_shape));
......@@ -190,17 +185,21 @@ REGISTER_OP(conv2d_transpose, ops::ConvTransposeOp, ops::Conv2DTransposeOpMaker,
REGISTER_OP_CPU_KERNEL(
conv2d_transpose,
ops::GemmConvTransposeKernel<paddle::platform::CPUPlace, float>);
ops::GemmConvTransposeKernel<paddle::platform::CPUPlace, float>,
ops::GemmConvTransposeKernel<paddle::platform::CPUPlace, double>);
REGISTER_OP_CPU_KERNEL(
conv2d_transpose_grad,
ops::GemmConvTransposeGradKernel<paddle::platform::CPUPlace, float>);
ops::GemmConvTransposeGradKernel<paddle::platform::CPUPlace, float>,
ops::GemmConvTransposeGradKernel<paddle::platform::CPUPlace, double>);
REGISTER_OP(conv3d_transpose, ops::ConvTransposeOp, ops::Conv3DTransposeOpMaker,
conv3d_transpose_grad, ops::ConvTransposeOpGrad);
REGISTER_OP_CPU_KERNEL(
conv3d_transpose,
ops::GemmConvTransposeKernel<paddle::platform::CPUPlace, float>);
ops::GemmConvTransposeKernel<paddle::platform::CPUPlace, float>,
ops::GemmConvTransposeKernel<paddle::platform::CPUPlace, double>);
REGISTER_OP_CPU_KERNEL(
conv3d_transpose_grad,
ops::GemmConvTransposeGradKernel<paddle::platform::CPUPlace, float>);
ops::GemmConvTransposeGradKernel<paddle::platform::CPUPlace, float>,
ops::GemmConvTransposeGradKernel<paddle::platform::CPUPlace, double>);
......@@ -18,14 +18,18 @@ namespace ops = paddle::operators;
REGISTER_OP_GPU_KERNEL(
conv2d_transpose,
ops::GemmConvTransposeKernel<paddle::platform::GPUPlace, float>);
ops::GemmConvTransposeKernel<paddle::platform::GPUPlace, float>,
ops::GemmConvTransposeKernel<paddle::platform::GPUPlace, double>);
REGISTER_OP_GPU_KERNEL(
conv2d_transpose_grad,
ops::GemmConvTransposeGradKernel<paddle::platform::GPUPlace, float>);
ops::GemmConvTransposeGradKernel<paddle::platform::GPUPlace, float>,
ops::GemmConvTransposeGradKernel<paddle::platform::GPUPlace, double>);
REGISTER_OP_GPU_KERNEL(
conv3d_transpose,
ops::GemmConvTransposeKernel<paddle::platform::GPUPlace, float>);
ops::GemmConvTransposeKernel<paddle::platform::GPUPlace, float>,
ops::GemmConvTransposeKernel<paddle::platform::GPUPlace, double>);
REGISTER_OP_GPU_KERNEL(
conv3d_transpose_grad,
ops::GemmConvTransposeGradKernel<paddle::platform::GPUPlace, float>);
ops::GemmConvTransposeGradKernel<paddle::platform::GPUPlace, float>,
ops::GemmConvTransposeGradKernel<paddle::platform::GPUPlace, double>);
......@@ -62,7 +62,6 @@ class GemmConvTransposeKernel : public framework::OpKernel<T> {
Tensor* output = context.Output<Tensor>("Output");
std::vector<int> strides = context.Attr<std::vector<int>>("strides");
// Actually, no paddings and groups allowed in conv transpose.
std::vector<int> paddings = context.Attr<std::vector<int>>("paddings");
// TODO(Zhuoyuan): Paddings can be added in future.
// groups will alway be disabled in conv2dtranspose.
......@@ -148,8 +147,8 @@ class GemmConvTransposeKernel : public framework::OpKernel<T> {
} else if (filter_shape_vec.size() == 3) {
// col2vol: col_matrix -> dy
// from (c * k_d * k_h * k_w, d * h * w) to (c, o_d, o_h, o_w)
col2vol(context.device_context(), col, dilations, strides,
std::vector<int>{0, 0, 0}, &output_batch);
col2vol(context.device_context(), col, dilations, strides, paddings,
&output_batch);
}
}
}
......@@ -173,7 +172,6 @@ class GemmConvTransposeGradKernel : public framework::OpKernel<T> {
if ((!input_grad) && (!filter_grad)) return;
std::vector<int> strides = context.Attr<std::vector<int>>("strides");
// Actually, no paddings and groups allowed in conv transpose.
std::vector<int> paddings = context.Attr<std::vector<int>>("paddings");
const int batch_size = static_cast<int>(input->dims()[0]);
......
......@@ -24,8 +24,17 @@
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
using LoDTensor = framework::LoDTensor;
using Tensor = framework::Tensor;
template <typename Place, typename T>
inline void ReorderInitState(const platform::DeviceContext& ctx,
const framework::Tensor& src, const size_t* index,
framework::Tensor* dst, bool indexed_src) {
math::CopyMatrixRowsFunctor<Place, T> row_shuffle;
dst->mutable_data<T>(src.dims(), ctx.GetPlace());
row_shuffle(ctx, src, index, *dst, indexed_src);
}
template <typename Place, typename T>
class GRUKernel : public framework::OpKernel<T> {
......@@ -33,7 +42,6 @@ class GRUKernel : public framework::OpKernel<T> {
void BatchCompute(const framework::ExecutionContext& context) const {
auto* input = context.Input<LoDTensor>("Input");
auto* h0 = context.Input<Tensor>("H0");
const T* h0_data = h0 ? h0->data<T>() : nullptr;
auto* weight = context.Input<Tensor>("Weight");
const T* weight_data = weight->data<T>();
auto* bias = context.Input<Tensor>("Bias");
......@@ -66,7 +74,18 @@ class GRUKernel : public framework::OpKernel<T> {
gru_value.gateWeight = const_cast<T*>(weight_data);
gru_value.stateWeight =
const_cast<T*>(weight_data + 2 * frame_size * frame_size);
gru_value.prevOutValue = const_cast<T*>(h0_data);
Tensor ordered_h0;
const size_t* order = batch_gate->lod()[2].data();
if (h0) {
// Since the batch computing for GRU reorders the input sequences
// according to their length. The initialized cell state also needs
// to reorder.
ReorderInitState<Place, T>(context.device_context(), *h0, order,
&ordered_h0, true);
gru_value.prevOutValue = ordered_h0.data<T>();
} else {
gru_value.prevOutValue = nullptr;
}
auto batch_starts = batch_gate->lod()[0];
size_t num_batch = batch_starts.size() - 1;
for (size_t n = 0; n < num_batch; n++) {
......@@ -102,7 +121,6 @@ class GRUGradKernel : public framework::OpKernel<T> {
public:
void BatchCompute(const framework::ExecutionContext& context) const {
auto* h0 = context.Input<Tensor>("H0");
const T* h0_data = h0 ? h0->data<T>() : nullptr;
auto* weight = context.Input<Tensor>("Weight");
const T* weight_data = weight->data<T>();
auto* batch_gate = context.Input<LoDTensor>("BatchGate");
......@@ -135,6 +153,17 @@ class GRUGradKernel : public framework::OpKernel<T> {
zero(dev_ctx, &batch_gate_grad, static_cast<T>(0.0));
zero(dev_ctx, &batch_reset_hidden_prev_grad, static_cast<T>(0.0));
Tensor ordered_h0, ordered_h0_grad;
const size_t* order = batch_gate->lod()[2].data();
if (h0) {
ReorderInitState<Place, T>(context.device_context(), *h0, order,
&ordered_h0, true);
}
if (h0_grad) {
ordered_h0_grad.mutable_data<T>(h0_grad->dims(), context.GetPlace());
zero(context.device_context(), &ordered_h0_grad, static_cast<T>(0.0));
}
bool is_reverse = context.Attr<bool>("is_reverse");
batch_hidden_grad.set_lod(batch_hidden->lod());
to_batch(dev_ctx, *hidden_grad, batch_hidden_grad, false, is_reverse);
......@@ -176,14 +205,9 @@ class GRUGradKernel : public framework::OpKernel<T> {
batch_reset_hidden_prev_grad.Slice(bstart, bend);
gru_grad.resetOutputGrad = reset_hidden_prev_grad_t.data<T>();
if (n == 0) {
gru_value.prevOutValue = const_cast<T*>(h0_data);
if (h0_grad) {
T* h0_grad_data = h0_grad->mutable_data<T>(context.GetPlace());
zero(dev_ctx, h0_grad, static_cast<T>(0.0));
gru_grad.prevOutGrad = h0_grad_data;
} else {
gru_grad.prevOutGrad = nullptr;
}
gru_value.prevOutValue = h0 ? ordered_h0.data<T>() : nullptr;
gru_grad.prevOutGrad =
h0 && h0_grad ? ordered_h0_grad.data<T>() : nullptr;
} else {
int bstart_pre = static_cast<int>(batch_starts[n - 1]);
Tensor hidden_prev_t = batch_hidden->Slice(bstart_pre, bstart);
......@@ -208,6 +232,10 @@ class GRUGradKernel : public framework::OpKernel<T> {
math::ColwiseSum<Place, T> col_sum;
col_sum(dev_ctx, batch_gate_grad, bias_grad);
}
if (h0 && h0_grad) {
ReorderInitState<Place, T>(context.device_context(), ordered_h0_grad,
order, h0_grad, false);
}
}
void Compute(const framework::ExecutionContext& context) const override {
......
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/operators/logical_op.h"
#include "paddle/framework/op_registry.h"
namespace paddle {
namespace operators {
template <typename OpComment>
class BinaryLogicalOpProtoMaker : public framework::OpProtoAndCheckerMaker {
public:
BinaryLogicalOpProtoMaker(framework::OpProto *proto,
framework::OpAttrChecker *op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) {
OpComment comment;
AddInput("X",
string::Sprintf("(LoDTensor) Left hand operand of %s operator",
comment.type));
AddInput("Y",
string::Sprintf("(LoDTensor) Right hand operand of %s operator",
comment.type));
AddOutput("Out", string::Sprintf(
"(LoDTensor) n-dim bool tensor. Each element is %s",
comment.equation));
AddComment(string::Sprintf(R"DOC(%s Operator
It operates element-wise on X and Y, and returns the Out. X, Y and Out are N-dim boolean tensors.
Each element of Out is calculated by %s
)DOC",
comment.type, comment.equation));
}
};
template <typename OpComment>
class UnaryLogicalOpProtoMaker : public framework::OpProtoAndCheckerMaker {
public:
UnaryLogicalOpProtoMaker(framework::OpProto *proto,
framework::OpAttrChecker *op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) {
OpComment comment;
AddInput("X", string::Sprintf("(LoDTensor) Operand of %s operator",
comment.type));
AddOutput("Out", string::Sprintf(
"(LoDTensor) n-dim bool tensor. Each element is %s",
comment.equation));
AddComment(string::Sprintf(R"DOC(%s Operator
It operates element-wise on X, and returns the Out. X and Out are N-dim boolean tensors.
Each element of Out is calculated by %s
)DOC",
comment.type, comment.equation));
}
};
template <typename OpComment>
class BinaryLogicalOpInferShape : public framework::InferShapeBase {
public:
void operator()(framework::InferShapeContext *context) const override {
OpComment comment;
PADDLE_ENFORCE(context->HasInput("X"),
"Input(X) of %s operator must not be null", comment.type);
PADDLE_ENFORCE(context->HasInput("Y"),
"Input(Y) of %s operator must not be null", comment.type);
auto dim_x = context->GetInputDim("X");
auto dim_y = context->GetInputDim("Y");
PADDLE_ENFORCE_EQ(framework::product(dim_x), framework::product(dim_y),
"The number of elements in X and Y should be same");
context->SetOutputDim("Out", context->GetInputDim("X"));
context->ShareLoD("X", "Out");
}
};
template <typename OpComment>
class UnaryLogicalOpInferShape : public framework::InferShapeBase {
public:
void operator()(framework::InferShapeContext *context) const override {
OpComment comment;
PADDLE_ENFORCE(context->HasInput("X"),
"Input(X) of %s operator must not be null", comment.type);
auto dim_x = context->GetInputDim("X");
context->SetOutputDim("Out", context->GetInputDim("X"));
context->ShareLoD("X", "Out");
}
};
class LogicalOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected:
framework::OpKernelType GetKernelType(
const framework::ExecutionContext &ctx) const override {
framework::OpKernelType kt = OperatorWithKernel::GetKernelType(ctx);
// LogicalOp kernel's device type is decided by input tensor place
kt.place_ = ctx.Input<framework::LoDTensor>("X")->place();
return kt;
}
};
} // namespace operators
} // namespace paddle
#define REGISTER_BINARY_LOGICAL_OP(op_type, _equation) \
struct _##op_type##Comment { \
static char type[]; \
static char equation[]; \
}; \
char _##op_type##Comment::type[]{#op_type}; \
char _##op_type##Comment::equation[]{_equation}; \
REGISTER_OPERATOR( \
op_type, ::paddle::operators::LogicalOp, \
::paddle::operators::BinaryLogicalOpProtoMaker<_##op_type##Comment>, \
::paddle::operators::BinaryLogicalOpInferShape<_##op_type##Comment>, \
::paddle::framework::EmptyGradOpMaker);
#define REGISTER_UNARY_LOGICAL_OP(op_type, _equation) \
struct _##op_type##Comment { \
static char type[]; \
static char equation[]; \
}; \
char _##op_type##Comment::type[]{#op_type}; \
char _##op_type##Comment::equation[]{_equation}; \
REGISTER_OPERATOR( \
op_type, ::paddle::operators::LogicalOp, \
::paddle::operators::UnaryLogicalOpProtoMaker<_##op_type##Comment>, \
::paddle::operators::UnaryLogicalOpInferShape<_##op_type##Comment>, \
::paddle::framework::EmptyGradOpMaker);
REGISTER_BINARY_LOGICAL_OP(logical_and, "Out = X && Y");
REGISTER_BINARY_LOGICAL_KERNEL(logical_and, CPU,
paddle::operators::LogicalAndFunctor);
REGISTER_BINARY_LOGICAL_OP(logical_or, "Out = X && Y");
REGISTER_BINARY_LOGICAL_KERNEL(logical_or, CPU,
paddle::operators::LogicalOrFunctor);
REGISTER_UNARY_LOGICAL_OP(logical_not, "Out = !X");
REGISTER_UNARY_LOGICAL_KERNEL(logical_not, CPU,
paddle::operators::LogicalNotFunctor);
REGISTER_BINARY_LOGICAL_OP(logical_xor, "Out = (X || Y) && !(X && Y)");
REGISTER_BINARY_LOGICAL_KERNEL(logical_xor, CPU,
paddle::operators::LogicalXorFunctor);
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/operators/logical_op.h"
REGISTER_BINARY_LOGICAL_KERNEL(logical_and, GPU,
paddle::operators::LogicalAndFunctor);
REGISTER_BINARY_LOGICAL_KERNEL(logical_or, GPU,
paddle::operators::LogicalOrFunctor);
REGISTER_UNARY_LOGICAL_KERNEL(logical_not, GPU,
paddle::operators::LogicalNotFunctor);
REGISTER_BINARY_LOGICAL_KERNEL(logical_xor, GPU,
paddle::operators::LogicalXorFunctor);
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <math.h>
#include <type_traits>
#include "paddle/framework/op_registry.h"
#include "paddle/platform/transform.h"
namespace paddle {
namespace operators {
template <typename T>
struct LogicalAndFunctor {
using ELEM_TYPE = T;
HOSTDEVICE bool operator()(const T& a, const T& b) const { return a && b; }
};
template <typename T>
struct LogicalOrFunctor {
using ELEM_TYPE = T;
HOSTDEVICE bool operator()(const T& a, const T& b) const { return a || b; }
};
template <typename T>
struct LogicalNotFunctor {
using ELEM_TYPE = T;
HOSTDEVICE bool operator()(const T& a) const { return !a; }
};
template <typename T>
struct LogicalXorFunctor {
using ELEM_TYPE = T;
HOSTDEVICE bool operator()(const T& a, const T& b) const {
return (a || b) && !(a && b);
}
};
template <typename Place, typename Functor>
class BinaryLogicalOpKernel
: public framework::OpKernel<typename Functor::ELEM_TYPE> {
public:
void Compute(const framework::ExecutionContext& context) const override {
using T = typename Functor::ELEM_TYPE;
auto* x = context.Input<framework::Tensor>("X");
auto* y = context.Input<framework::Tensor>("Y");
auto* out = context.Output<framework::Tensor>("Out");
Functor binary_func;
platform::Transform<Place> trans;
trans(context.device_context(), x->data<T>(), x->data<T>() + x->numel(),
y->data<T>(), out->mutable_data<bool>(context.GetPlace()),
binary_func);
}
};
template <typename Place, typename Functor>
class UnaryLogicalOpKernel
: public framework::OpKernel<typename Functor::ELEM_TYPE> {
public:
void Compute(const framework::ExecutionContext& context) const override {
using T = typename Functor::ELEM_TYPE;
auto* x = context.Input<framework::Tensor>("X");
auto* out = context.Output<framework::Tensor>("Out");
Functor unary_func;
platform::Transform<Place> trans;
trans(context.device_context(), x->data<T>(), x->data<T>() + x->numel(),
out->mutable_data<bool>(context.GetPlace()), unary_func);
}
};
} // namespace operators
} // namespace paddle
#define REGISTER_BINARY_LOGICAL_KERNEL(op_type, dev, functor) \
REGISTER_OP_##dev##_KERNEL( \
op_type, ::paddle::operators::BinaryLogicalOpKernel< \
::paddle::platform::dev##Place, functor<bool>>);
#define REGISTER_UNARY_LOGICAL_KERNEL(op_type, dev, functor) \
REGISTER_OP_##dev##_KERNEL( \
op_type, ::paddle::operators::UnaryLogicalOpKernel< \
::paddle::platform::dev##Place, functor<bool>>);
add_subdirectory(detail)
if(WITH_GPU)
nv_library(math_function SRCS math_function.cc math_function.cu im2col.cc im2col.cu DEPS cblas device_context)
nv_library(math_function SRCS math_function.cc math_function.cu im2col.cc im2col.cu DEPS cblas device_context framework_proto)
nv_test(math_function_gpu_test SRCS math_function_test.cu DEPS math_function tensor)
nv_library(selected_rows_functor SRCS selected_rows_functor.cc selected_rows_functor.cu DEPS selected_rows math_function)
nv_test(selected_rows_functor_gpu_test SRCS selected_rows_functor_test.cu DEPS selected_rows_functor)
......@@ -14,8 +14,9 @@ if(WITH_GPU)
nv_library(sequence2batch SRCS sequence2batch.cc sequence2batch.cu DEPS device_context)
nv_library(lstm_compute SRCS lstm_compute.cc lstm_compute.cu DEPS device_context activation_functions)
nv_library(gru_compute SRCS gru_compute.cc gru_compute.cu DEPS device_context activation_functions math_function)
nv_library(maxouting SRCS maxouting.cc maxouting.cu DEPS device_context)
else()
cc_library(math_function SRCS math_function.cc im2col.cc DEPS cblas device_context)
cc_library(math_function SRCS math_function.cc im2col.cc DEPS cblas device_context framework_proto)
cc_library(selected_rows_functor SRCS selected_rows_functor.cc DEPS selected_rows math_function)
cc_library(softmax SRCS softmax.cc DEPS device_context)
cc_library(cross_entropy SRCS cross_entropy.cc DEPS device_context)
......@@ -26,6 +27,7 @@ else()
cc_library(sequence2batch SRCS sequence2batch.cc DEPS device_context)
cc_library(lstm_compute SRCS lstm_compute.cc DEPS device_context activation_functions)
cc_library(gru_compute SRCS gru_compute.cc DEPS device_context activation_functions math_function)
cc_library(maxouting SRCS maxouting.cc DEPS device_context)
endif()
cc_test(math_function_test SRCS math_function_test.cc DEPS math_function tensor)
......
......@@ -119,8 +119,8 @@ __global__ void col2im(int n, const T* data_col, int im_height, int im_width,
if (index < n) {
T val = 0;
int w = index % im_width;
int h = (index / im_width) % im_height;
int w = index % im_width + padding_width;
int h = (index / im_width) % im_height + padding_height;
int c = index / (im_width * im_height);
// compute the start and end of the output
......
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/operators/math/maxouting.h"
namespace paddle {
namespace operators {
namespace math {
// All tensors are in NCHW format, and the groups must be greater than 1
template <typename T>
class MaxOutFunctor<platform::CPUPlace, T> {
public:
void operator()(const platform::DeviceContext& context,
const framework::Tensor& input,
framework::Tensor * output,
int groups) {
const int batch_size = input.dims()[0];
const int input_height = input.dims()[2];
const int input_width = input.dims()[3];
const int output_channels = output->dims()[1];
int fea_size = input_height * input_width;
// c_size means the output size of each sample
int c_size = fea_size * output_channels;
const T* input_data = input.data<T>();
T* output_data = output->mutable_data<T>(context.GetPlace());
for (int i = 0; i < batch_size; ++i) {
int new_bindex = c_size * i;
for (int c = 0; c < output_channels; ++c) {
int new_cindex = fea_size * c;
for (int f = 0; f < fea_size; ++f) {
T ele = static_cast<T>(-FLT_MAX);
for (int ph = 0; ph < groups; ++ph) {
T x = input_data[(new_bindex + new_cindex) * groups
+ ph * fea_size + f];
ele = ele > x ? ele : x;
}
output_data[(new_bindex+new_cindex+f)] = ele;
}
}
}
}
};
template <class T>
class MaxOutGradFunctor<platform::CPUPlace, T> {
public:
void operator()(const platform::DeviceContext& context,
const framework::Tensor& input,
framework::Tensor * input_grad,
const framework::Tensor& output,
const framework::Tensor& output_grad,
int groups) {
const int batch_size = input.dims()[0];
const int input_height = input.dims()[2];
const int input_width = input.dims()[3];
const int output_channels = output.dims()[1];
int fea_size = input_height * input_width;
const T* input_data = input.data<T>();
const T* output_data = output.data<T>();
const T* output_grad_data = output_grad.data<T>();
T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace());
for (int i = 0; i < batch_size; ++i) {
int blen = fea_size * output_channels * i;
for (int c = 0; c < output_channels; ++c) {
int clen = fea_size * c;
for (int f = 0; f < fea_size; ++f) {
int input_idx0 = (blen + clen) * groups + f;
bool continue_match = true;
int output_idx = blen + clen + f;
for (int g = 0; g < groups && continue_match; ++g) {
int input_idx = input_idx0 + fea_size * g;
if (input_data[input_idx] == output_data[output_idx]) {
input_grad_data[input_idx] += output_grad_data[output_idx];
continue_match = false;
}
}
}
}
}
}
};
template class MaxOutGradFunctor<platform::CPUPlace, float>;
template class MaxOutGradFunctor<platform::CPUPlace, double>;
template class MaxOutFunctor<platform::CPUPlace, float>;
template class MaxOutFunctor<platform::CPUPlace, double>;
} // namespace math
} // namespace operators
} // namespace paddle
/* Copyright (c) 2016 paddlepaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/operators/math/maxouting.h"
#include "paddle/platform/cuda_helper.h"
namespace paddle {
namespace operators {
namespace math {
template <typename T>
__global__ void KernelMaxOut(const int nthreads, const T* input_data,
const int channels,
const int input_height, const int input_width,
int groups, T* output_data ) {
const int size = input_height * input_width * channels / groups;
const int feat_len = input_height * input_width;
int index = blockIdx.x * blockDim.x + threadIdx.x;
int offset = blockDim.x * gridDim.x;
for (int i = index; i < nthreads; i += offset) {
int batch_idx = i / size;
int batch_offset = i % size;
int channel_idx = batch_offset / feat_len;
int feat_idx = batch_offset % feat_len;
int data_idx =
(batch_idx * size + channel_idx * feat_len) * groups + feat_idx;
T ele = static_cast<T>(-FLT_MAX);
for (int g = 0; g < groups; ++g) {
T x = input_data[data_idx + g * feat_len];
ele = ele > x ? ele : x;
}
output_data[i] = ele;
}
}
template <typename T>
__global__ void KernelMaxoutGrad(
const int nthreads, const T* input_data, const T* output_data,
const T* output_grad, T* input_grad, const int channels,
const int input_height, const int input_width, int groups) {
const int size = input_height * input_width * channels / groups;
const int feat_len = input_height * input_width;
int index = blockIdx.x * blockDim.x + threadIdx.x;
int offset = blockDim.x * gridDim.x;
for (int i = index; i < nthreads; i += offset) {
int batch_idx = i / size;
int batch_offset = i % size;
int channel_idx = batch_offset / feat_len;
int feat_idx = batch_offset % feat_len;
int data_idx =
(batch_idx * size + channel_idx * feat_len) * groups + feat_idx;
int max_index = -1;
bool continue_match = true;
for (int g = 0; g < groups && continue_match; ++g) {
if (input_data[data_idx + g * feat_len] == output_data[i]) {
max_index = data_idx + g * feat_len;
continue_match = false;
break;
}
}
if (max_index != -1) {
input_grad[max_index] += output_grad[index];
}
}
}
/*
* All tensors are in NCHW format.
*/
template <typename T>
class MaxOutFunctor<platform::GPUPlace, T> {
public:
void operator()(const platform::DeviceContext& context,
const framework::Tensor& input, framework::Tensor * output,
int groups) {
const int batch_size = input.dims()[0];
const int input_channels = input.dims()[1];
const int input_height = input.dims()[2];
const int input_width = input.dims()[3];
const int output_channels = output->dims()[1];
const int output_height = output->dims()[2];
const int output_width = output->dims()[3];
const T* input_data = input.data<T>();
T* output_data = output->mutable_data<T>(context.GetPlace());
int nthreads = output->numel();
int blocks = (nthreads + 1024 - 1) / 1024;
dim3 threads(1024, 1);
dim3 grid(blocks, 1);
KernelMaxOut<
T><<<grid, threads, 0,
reinterpret_cast<const platform::CUDADeviceContext&>(context)
.stream()>>>(nthreads, input_data, input_channels,
input_height, input_width, groups,
output_data);
}
};
/*
* All tensors are in NCHW format.
*/
template <typename T>
class MaxOutGradFunctor<platform::GPUPlace, T> {
public:
void operator()(const platform::DeviceContext& context,
const framework::Tensor& input,
framework::Tensor * input_grad,
const framework::Tensor& output,
const framework::Tensor& output_grad,
int groups) {
const int batch_size = input.dims()[0];
const int input_channels = input.dims()[1];
const int input_height = input.dims()[2];
const int input_width = input.dims()[3];
const int output_channels = output.dims()[1];
const int output_height = output.dims()[2];
const int output_width = output.dims()[3];
const T* input_data = input.data<T>();
const T* output_data = output.data<T>();
const T* output_grad_data = output_grad.data<T>();
T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace());
int nthreads = output.numel();
int blocks = (nthreads + 1024 - 1) / 1024;
dim3 threads(1024, 1);
dim3 grid(blocks, 1);
KernelMaxoutGrad<
T><<<grid, threads, 0,
reinterpret_cast<const platform::CUDADeviceContext&>(context)
.stream()>>>(
nthreads, input_data, output_data, output_grad_data, input_grad_data,
input_channels, input_height, input_width, groups);
}
};
template class MaxOutGradFunctor<platform::GPUPlace, float>;
template class MaxOutGradFunctor<platform::GPUPlace, double>;
template class MaxOutFunctor<platform::GPUPlace, float>;
template class MaxOutFunctor<platform::GPUPlace, double>;
} // namespace math
} // namespace operators
} // namespace paddle
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "paddle/framework/tensor.h"
#include "paddle/platform/device_context.h"
#include "paddle/platform/hostdevice.h"
namespace paddle {
namespace operators {
namespace math {
#define FLT_MAX \
__FLT_MAX__
template <typename Place, typename T>
class MaxOutFunctor {
public:
void operator()(const platform::DeviceContext& context,
const framework::Tensor& input, framework::Tensor * output,
int groups);
};
template <typename Place, class T>
class MaxOutGradFunctor {
public:
void operator()(const platform::DeviceContext& context,
const framework::Tensor& input,
framework::Tensor * input_grad,
const framework::Tensor& output,
const framework::Tensor& output_grad, int groups);
};
} // namespace math
} // namespace operators
} // namespace paddle
......@@ -498,8 +498,8 @@ template class Pool3dGradFunctor<
* Ksize, strides, paddings are two elements. These two elements represent
* height and width, respectively.
*/
template <typename T>
class MaxPool2dWithIndexFunctor<platform::CPUPlace, T> {
template <typename T1, typename T2>
class MaxPool2dWithIndexFunctor<platform::CPUPlace, T1, T2> {
public:
void operator()(const platform::DeviceContext& context,
const framework::Tensor& input, std::vector<int>& ksize,
......@@ -520,9 +520,9 @@ class MaxPool2dWithIndexFunctor<platform::CPUPlace, T> {
const int input_stride = input_height * input_width;
const int output_stride = output_height * output_width;
const T* input_data = input.data<T>();
T* output_data = output->mutable_data<T>(context.GetPlace());
T* mask_data = mask->mutable_data<T>(context.GetPlace());
const T1* input_data = input.data<T1>();
T1* output_data = output->mutable_data<T1>(context.GetPlace());
T2* mask_data = mask->mutable_data<T2>(context.GetPlace());
for (int i = 0; i < batch_size; i++) {
for (int c = 0; c < output_channels; ++c) {
......@@ -535,7 +535,7 @@ class MaxPool2dWithIndexFunctor<platform::CPUPlace, T> {
int wend = std::min(wstart + ksize_width, input_width);
wstart = std::max(wstart, 0);
T ele = static_cast<T>(-FLT_MAX);
T1 ele = static_cast<T1>(-FLT_MAX);
int index = -1;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
......@@ -563,8 +563,8 @@ class MaxPool2dWithIndexFunctor<platform::CPUPlace, T> {
* Ksize, strides, paddings are two elements. These two elements represent
* height and width, respectively.
*/
template <typename T>
class MaxPool2dWithIndexGradFunctor<platform::CPUPlace, T> {
template <typename T1, typename T2>
class MaxPool2dWithIndexGradFunctor<platform::CPUPlace, T1, T2> {
public:
void operator()(const platform::DeviceContext& context,
const framework::Tensor& output_grad,
......@@ -580,9 +580,9 @@ class MaxPool2dWithIndexGradFunctor<platform::CPUPlace, T> {
const int input_stride = input_height * input_width;
const int output_stride = output_height * output_width;
const T* mask_data = mask.data<T>();
const T* output_grad_data = output_grad.data<T>();
T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace());
const T2* mask_data = mask.data<T2>();
const T1* output_grad_data = output_grad.data<T1>();
T1* input_grad_data = input_grad->mutable_data<T1>(context.GetPlace());
for (int n = 0; n < batch_size; ++n) {
for (int c = 0; c < output_channels; ++c) {
......@@ -602,18 +602,18 @@ class MaxPool2dWithIndexGradFunctor<platform::CPUPlace, T> {
}
};
template class MaxPool2dWithIndexFunctor<platform::CPUPlace, float>;
template class MaxPool2dWithIndexGradFunctor<platform::CPUPlace, float>;
template class MaxPool2dWithIndexFunctor<platform::CPUPlace, double>;
template class MaxPool2dWithIndexGradFunctor<platform::CPUPlace, double>;
template class MaxPool2dWithIndexFunctor<platform::CPUPlace, float, int>;
template class MaxPool2dWithIndexGradFunctor<platform::CPUPlace, float, int>;
template class MaxPool2dWithIndexFunctor<platform::CPUPlace, double, int>;
template class MaxPool2dWithIndexGradFunctor<platform::CPUPlace, double, int>;
/*
* All tensors are in NCDHW format.
* Ksize, strides, paddings are three elements. These three elements represent
* depth, height and width, respectively.
*/
template <typename T>
class MaxPool3dWithIndexFunctor<platform::CPUPlace, T> {
template <typename T1, typename T2>
class MaxPool3dWithIndexFunctor<platform::CPUPlace, T1, T2> {
public:
void operator()(const platform::DeviceContext& context,
const framework::Tensor& input, std::vector<int>& ksize,
......@@ -639,9 +639,9 @@ class MaxPool3dWithIndexFunctor<platform::CPUPlace, T> {
const int input_stride = input_depth * input_height * input_width;
const int output_stride = output_depth * output_height * output_width;
const T* input_data = input.data<T>();
T* output_data = output->mutable_data<T>(context.GetPlace());
T* mask_data = mask->mutable_data<T>(context.GetPlace());
const T1* input_data = input.data<T1>();
T1* output_data = output->mutable_data<T1>(context.GetPlace());
T2* mask_data = mask->mutable_data<T2>(context.GetPlace());
for (int i = 0; i < batch_size; i++) {
for (int c = 0; c < output_channels; ++c) {
......@@ -659,7 +659,7 @@ class MaxPool3dWithIndexFunctor<platform::CPUPlace, T> {
wstart = std::max(wstart, 0);
int output_idx = (pd * output_height + ph) * output_width + pw;
T ele = static_cast<T>(-FLT_MAX);
T1 ele = static_cast<T1>(-FLT_MAX);
int index = -1;
for (int d = dstart; d < dend; ++d) {
for (int h = hstart; h < hend; ++h) {
......@@ -691,8 +691,8 @@ class MaxPool3dWithIndexFunctor<platform::CPUPlace, T> {
* Ksize, strides, paddings are three elements. These three elements represent
* depth, height and width, respectively.
*/
template <typename T>
class MaxPool3dWithIndexGradFunctor<platform::CPUPlace, T> {
template <typename T1, typename T2>
class MaxPool3dWithIndexGradFunctor<platform::CPUPlace, T1, T2> {
public:
void operator()(const platform::DeviceContext& context,
const framework::Tensor& output_grad,
......@@ -710,9 +710,9 @@ class MaxPool3dWithIndexGradFunctor<platform::CPUPlace, T> {
const int input_stride = input_depth * input_height * input_width;
const int output_stride = output_depth * output_height * output_width;
const T* mask_data = mask.data<T>();
const T* output_grad_data = output_grad.data<T>();
T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace());
const T2* mask_data = mask.data<T2>();
const T1* output_grad_data = output_grad.data<T1>();
T1* input_grad_data = input_grad->mutable_data<T1>(context.GetPlace());
for (int n = 0; n < batch_size; ++n) {
for (int c = 0; c < output_channels; ++c) {
......@@ -735,10 +735,10 @@ class MaxPool3dWithIndexGradFunctor<platform::CPUPlace, T> {
}
};
template class MaxPool3dWithIndexFunctor<platform::CPUPlace, float>;
template class MaxPool3dWithIndexGradFunctor<platform::CPUPlace, float>;
template class MaxPool3dWithIndexFunctor<platform::CPUPlace, double>;
template class MaxPool3dWithIndexGradFunctor<platform::CPUPlace, double>;
template class MaxPool3dWithIndexFunctor<platform::CPUPlace, float, int>;
template class MaxPool3dWithIndexGradFunctor<platform::CPUPlace, float, int>;
template class MaxPool3dWithIndexFunctor<platform::CPUPlace, double, int>;
template class MaxPool3dWithIndexGradFunctor<platform::CPUPlace, double, int>;
} // namespace math
} // namespace operators
} // namespace paddle
......@@ -658,13 +658,13 @@ template class Pool3dGradFunctor<
template class Pool3dGradFunctor<
platform::GPUPlace, paddle::operators::math::AvgPoolGrad<double>, double>;
template <typename T>
template <typename T1, typename T2>
__global__ void KernelMaxPool2dWithIdx(
const int nthreads, const T* input_data, const int channels,
const int nthreads, const T1* input_data, const int channels,
const int input_height, const int input_width, const int output_height,
const int output_width, const int ksize_height, const int ksize_width,
const int stride_height, const int stride_width, const int padding_height,
const int padding_width, T* output_data, T* mask_data) {
const int padding_width, T1* output_data, T2* mask_data) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads;
index += blockDim.x * gridDim.x) {
int pw = index % output_width;
......@@ -681,7 +681,7 @@ __global__ void KernelMaxPool2dWithIdx(
wstart = max(wstart, 0);
input_data += (batch_idx * channels + c) * input_height * input_width;
T ele = -FLT_MAX;
T1 ele = -FLT_MAX;
int max_index = -1;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
......@@ -697,13 +697,13 @@ __global__ void KernelMaxPool2dWithIdx(
}
}
template <typename T>
template <typename T1, typename T2>
__global__ void KernelMaxPool2DWithIdxGrad(
const int nthreads, const T* output_grad, const T* mask_data,
const int nthreads, const T1* output_grad, const T2* mask_data,
const int channels, const int input_height, const int input_width,
const int output_height, const int output_width, const int ksize_height,
const int ksize_width, const int stride_height, const int stride_width,
const int padding_height, const int padding_width, T* input_grad) {
const int padding_height, const int padding_width, T1* input_grad) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads;
index += blockDim.x * gridDim.x) {
int w_offset = index % input_width;
......@@ -724,7 +724,7 @@ __global__ void KernelMaxPool2DWithIdxGrad(
int pw_end =
min((w_offset + padding_width) / stride_width + 1, output_width);
T gradient = 0;
T1 gradient = 0;
int input_current_featuremap_idx = h_offset * input_width + w_offset;
int output_idx =
(batch_idx * channels + c_offset) * output_height * output_width;
......@@ -746,8 +746,8 @@ __global__ void KernelMaxPool2DWithIdxGrad(
* Ksize, strides, paddings are two elements. These two elements represent
* height and width, respectively.
*/
template <typename T>
class MaxPool2dWithIndexFunctor<platform::GPUPlace, T> {
template <typename T1, typename T2>
class MaxPool2dWithIndexFunctor<platform::GPUPlace, T1, T2> {
public:
void operator()(const platform::DeviceContext& context,
const framework::Tensor& input, std::vector<int>& ksize,
......@@ -767,9 +767,9 @@ class MaxPool2dWithIndexFunctor<platform::GPUPlace, T> {
const int padding_height = paddings[0];
const int padding_width = paddings[1];
const T* input_data = input.data<T>();
T* output_data = output->mutable_data<T>(context.GetPlace());
T* mask_data = mask->mutable_data<T>(context.GetPlace());
const T1* input_data = input.data<T1>();
T1* output_data = output->mutable_data<T1>(context.GetPlace());
T2* mask_data = mask->mutable_data<T2>(context.GetPlace());
int nthreads = batch_size * output_channels * output_height * output_width;
int blocks = (nthreads + 1024 - 1) / 1024;
......@@ -777,9 +777,9 @@ class MaxPool2dWithIndexFunctor<platform::GPUPlace, T> {
dim3 grid(blocks, 1);
KernelMaxPool2dWithIdx<
T><<<grid, threads, 0,
reinterpret_cast<const platform::CUDADeviceContext&>(context)
.stream()>>>(
T1, T2><<<grid, threads, 0,
reinterpret_cast<const platform::CUDADeviceContext&>(context)
.stream()>>>(
nthreads, input_data, input_channels, input_height, input_width,
output_height, output_width, ksize_height, ksize_width, stride_height,
stride_width, padding_height, padding_width, output_data, mask_data);
......@@ -791,8 +791,8 @@ class MaxPool2dWithIndexFunctor<platform::GPUPlace, T> {
* Ksize, strides, paddings are two elements. These two elements represent
* height and width, respectively.
*/
template <typename T>
class MaxPool2dWithIndexGradFunctor<platform::GPUPlace, T> {
template <typename T1, typename T2>
class MaxPool2dWithIndexGradFunctor<platform::GPUPlace, T1, T2> {
public:
void operator()(const platform::DeviceContext& context,
const framework::Tensor& output_grad,
......@@ -812,9 +812,9 @@ class MaxPool2dWithIndexGradFunctor<platform::GPUPlace, T> {
const int padding_height = paddings[0];
const int padding_width = paddings[1];
const T* mask_data = mask.data<T>();
const T* output_grad_data = output_grad.data<T>();
T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace());
const T2* mask_data = mask.data<T2>();
const T1* output_grad_data = output_grad.data<T1>();
T1* input_grad_data = input_grad->mutable_data<T1>(context.GetPlace());
int nthreads = batch_size * input_channels * input_height * input_width;
int blocks = (nthreads + 1024 - 1) / 1024;
......@@ -822,30 +822,30 @@ class MaxPool2dWithIndexGradFunctor<platform::GPUPlace, T> {
dim3 grid(blocks, 1);
KernelMaxPool2DWithIdxGrad<
T><<<grid, threads, 0,
reinterpret_cast<const platform::CUDADeviceContext&>(context)
.stream()>>>(nthreads, output_grad_data, mask_data,
input_channels, input_height, input_width,
output_height, output_width, ksize_height,
ksize_width, stride_height, stride_width,
padding_height, padding_width, input_grad_data);
T1, T2><<<grid, threads, 0,
reinterpret_cast<const platform::CUDADeviceContext&>(context)
.stream()>>>(
nthreads, output_grad_data, mask_data, input_channels, input_height,
input_width, output_height, output_width, ksize_height, ksize_width,
stride_height, stride_width, padding_height, padding_width,
input_grad_data);
}
};
template class MaxPool2dWithIndexFunctor<platform::GPUPlace, float>;
template class MaxPool2dWithIndexGradFunctor<platform::GPUPlace, float>;
template class MaxPool2dWithIndexFunctor<platform::GPUPlace, double>;
template class MaxPool2dWithIndexGradFunctor<platform::GPUPlace, double>;
template class MaxPool2dWithIndexFunctor<platform::GPUPlace, float, int>;
template class MaxPool2dWithIndexGradFunctor<platform::GPUPlace, float, int>;
template class MaxPool2dWithIndexFunctor<platform::GPUPlace, double, int>;
template class MaxPool2dWithIndexGradFunctor<platform::GPUPlace, double, int>;
template <typename T>
template <typename T1, typename T2>
__global__ void KernelMaxPool3DWithIdx(
const int nthreads, const T* input_data, const int channels,
const int nthreads, const T1* input_data, const int channels,
const int input_depth, const int input_height, const int input_width,
const int output_depth, const int output_height, const int output_width,
const int ksize_depth, const int ksize_height, const int ksize_width,
const int stride_depth, const int stride_height, const int stride_width,
const int padding_depth, const int padding_height, const int padding_width,
T* output_data, T* mask_data) {
T1* output_data, T2* mask_data) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads;
index += blockDim.x * gridDim.x) {
int pw = index % output_width;
......@@ -865,7 +865,7 @@ __global__ void KernelMaxPool3DWithIdx(
hstart = max(hstart, 0);
wstart = max(wstart, 0);
T ele = -FLT_MAX;
T1 ele = -FLT_MAX;
int max_index = -1;
input_data +=
(batch_idx * channels + c) * input_depth * input_height * input_width;
......@@ -885,15 +885,15 @@ __global__ void KernelMaxPool3DWithIdx(
}
}
template <typename T>
template <typename T1, typename T2>
__global__ void KernelMaxPool3DWithIdxGrad(
const int nthreads, const T* output_grad, const T* mask, const int channels,
const int input_depth, const int input_height, const int input_width,
const int output_depth, const int output_height, const int output_width,
const int ksize_depth, const int ksize_height, const int ksize_width,
const int stride_depth, const int stride_height, const int stride_width,
const int padding_depth, const int padding_height, const int padding_width,
T* input_grad) {
const int nthreads, const T1* output_grad, const T2* mask,
const int channels, const int input_depth, const int input_height,
const int input_width, const int output_depth, const int output_height,
const int output_width, const int ksize_depth, const int ksize_height,
const int ksize_width, const int stride_depth, const int stride_height,
const int stride_width, const int padding_depth, const int padding_height,
const int padding_width, T1* input_grad) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads;
index += blockDim.x * gridDim.x) {
int w_offset = index % input_width;
......@@ -922,7 +922,7 @@ __global__ void KernelMaxPool3DWithIdxGrad(
int pw_end =
min((w_offset + padding_width) / stride_width + 1, output_width);
T gradient = 0;
T1 gradient = 0;
int input_current_feature_map_idx =
(d_offset * input_height + h_offset) * input_width + w_offset;
int output_idx = (batch_idx * channels + c_offset) * output_depth *
......@@ -949,8 +949,8 @@ __global__ void KernelMaxPool3DWithIdxGrad(
* Ksize, strides, paddings are three elements. These three elements represent
* depth, height and width, respectively.
*/
template <typename T>
class MaxPool3dWithIndexFunctor<platform::GPUPlace, T> {
template <typename T1, typename T2>
class MaxPool3dWithIndexFunctor<platform::GPUPlace, T1, T2> {
public:
void operator()(const platform::DeviceContext& context,
const framework::Tensor& input, std::vector<int>& ksize,
......@@ -975,9 +975,9 @@ class MaxPool3dWithIndexFunctor<platform::GPUPlace, T> {
const int padding_height = paddings[1];
const int padding_width = paddings[2];
const T* input_data = input.data<T>();
T* output_data = output->mutable_data<T>(context.GetPlace());
T* mask_data = mask->mutable_data<T>(context.GetPlace());
const T1* input_data = input.data<T1>();
T1* output_data = output->mutable_data<T1>(context.GetPlace());
T2* mask_data = mask->mutable_data<T2>(context.GetPlace());
int nthreads = batch_size * output_channels * output_depth * output_height *
output_width;
......@@ -986,9 +986,9 @@ class MaxPool3dWithIndexFunctor<platform::GPUPlace, T> {
dim3 grid(blocks, 1);
KernelMaxPool3DWithIdx<
T><<<grid, threads, 0,
reinterpret_cast<const platform::CUDADeviceContext&>(context)
.stream()>>>(
T1, T2><<<grid, threads, 0,
reinterpret_cast<const platform::CUDADeviceContext&>(context)
.stream()>>>(
nthreads, input_data, input_channels, input_depth, input_height,
input_width, output_depth, output_height, output_width, ksize_depth,
ksize_height, ksize_width, stride_depth, stride_height, stride_width,
......@@ -1001,8 +1001,8 @@ class MaxPool3dWithIndexFunctor<platform::GPUPlace, T> {
* Ksize, strides, paddings are three elements. These three elements represent
* depth, height and width, respectively.
*/
template <typename T>
class MaxPool3dWithIndexGradFunctor<platform::GPUPlace, T> {
template <typename T1, typename T2>
class MaxPool3dWithIndexGradFunctor<platform::GPUPlace, T1, T2> {
public:
void operator()(const platform::DeviceContext& context,
const framework::Tensor& output_grad,
......@@ -1027,9 +1027,9 @@ class MaxPool3dWithIndexGradFunctor<platform::GPUPlace, T> {
const int padding_height = paddings[1];
const int padding_width = paddings[2];
const T* output_grad_data = output_grad.data<T>();
const T* mask_data = mask.data<T>();
T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace());
const T1* output_grad_data = output_grad.data<T1>();
const T2* mask_data = mask.data<T2>();
T1* input_grad_data = input_grad->mutable_data<T1>(context.GetPlace());
int nthreads =
batch_size * input_channels * input_depth * input_height * input_width;
......@@ -1038,9 +1038,9 @@ class MaxPool3dWithIndexGradFunctor<platform::GPUPlace, T> {
dim3 grid(blocks, 1);
KernelMaxPool3DWithIdxGrad<
T><<<grid, threads, 0,
reinterpret_cast<const platform::CUDADeviceContext&>(context)
.stream()>>>(
T1, T2><<<grid, threads, 0,
reinterpret_cast<const platform::CUDADeviceContext&>(context)
.stream()>>>(
nthreads, output_grad_data, mask_data, input_channels, input_depth,
input_height, input_width, output_depth, output_height, output_width,
ksize_depth, ksize_height, ksize_width, stride_depth, stride_height,
......@@ -1049,10 +1049,10 @@ class MaxPool3dWithIndexGradFunctor<platform::GPUPlace, T> {
}
};
template class MaxPool3dWithIndexFunctor<platform::GPUPlace, float>;
template class MaxPool3dWithIndexGradFunctor<platform::GPUPlace, float>;
template class MaxPool3dWithIndexFunctor<platform::GPUPlace, double>;
template class MaxPool3dWithIndexGradFunctor<platform::GPUPlace, double>;
template class MaxPool3dWithIndexFunctor<platform::GPUPlace, float, int>;
template class MaxPool3dWithIndexGradFunctor<platform::GPUPlace, float, int>;
template class MaxPool3dWithIndexFunctor<platform::GPUPlace, double, int>;
template class MaxPool3dWithIndexGradFunctor<platform::GPUPlace, double, int>;
} // namespace math
} // namespace operators
......
......@@ -153,7 +153,7 @@ class MaxPool3dGradFunctor {
* In pool2d, all tensors are in NCHW format. In pool3d, all tensors are in
* NCDHW format.
*/
template <typename Place, typename T>
template <typename Place, typename T1, typename T2>
class MaxPool2dWithIndexFunctor {
public:
void operator()(const platform::DeviceContext& context,
......@@ -162,7 +162,7 @@ class MaxPool2dWithIndexFunctor {
framework::Tensor* output, framework::Tensor* mask);
};
template <typename Place, typename T>
template <typename Place, typename T1, typename T2>
class MaxPool2dWithIndexGradFunctor {
public:
void operator()(const platform::DeviceContext& context,
......@@ -172,7 +172,7 @@ class MaxPool2dWithIndexGradFunctor {
framework::Tensor* input_grad);
};
template <typename Place, typename T>
template <typename Place, typename T1, typename T2>
class MaxPool3dWithIndexFunctor {
public:
void operator()(const platform::DeviceContext& context,
......@@ -181,7 +181,7 @@ class MaxPool3dWithIndexFunctor {
framework::Tensor* output, framework::Tensor* mask);
};
template <typename Place, typename T>
template <typename Place, typename T1, typename T2>
class MaxPool3dWithIndexGradFunctor {
public:
void operator()(const platform::DeviceContext& context,
......
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License. */
#include "paddle/operators/maxout_op.h"
namespace paddle {
namespace operators {
using framework::Tensor;
class MaxOutOpMaker : public framework::OpProtoAndCheckerMaker {
public:
MaxOutOpMaker(framework::OpProto* proto, framework::OpAttrChecker* op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X",
"(Tensor) The input tensor of maxout operator. "
"The format of input tensor is NCHW. Where N is batch size, C is the "
"number of channels, H and W is the height and width of feature.");
AddOutput("Out",
"(Tensor) The output tensor of maxout operator."
"The format of output tensor is also NCHW."
"Where N is batch size, C is "
"the number of channels, H and W is the height and "
"width of feature.");
AddAttr<int>(
"groups",
R"DOC("Specifies how many groups the input tensor will be split"
"in the channel dimension. And the number of output channel is "
"the number of channels divided by groups.."
)DOC");
AddComment(R"DOC(
Assumed the input shape is (N, Ci, H, W).
The output shape is (N, Co, H, W). Then `Co = Ci / groups`.
math:
y_{si+j} = \max_k x_{gsi + sk + j}
g = groups
s = input.size / num_channels
0 \le i < num_channels / groups
0 \le j < s
0 \le k < groups
Please refer to Paper:
- Maxout Networks: http://www.jmlr.org/proceedings/papers/v28/goodfellow13.pdf
- Multi-digit Number Recognition from Street View \
Imagery using Deep Convolutional Neural Networks: \
https://arxiv.org/pdf/1312.6082v4.pdf
)DOC");
}
};
class MaxOutOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) of MaxoutOp"
"should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("Out"),
"Output(Out) of MaxoutOp should not be null.");
auto in_x_dims = ctx->GetInputDim("X");
int groups = ctx->Attrs().Get<int>("groups");
// check groups > 1
PADDLE_ENFORCE_GT(
groups, 1,
"groups should be larger than 1 in maxoutop");
std::vector<int64_t> output_shape({in_x_dims[0], in_x_dims[1] / groups});
output_shape.push_back(in_x_dims[2]);
output_shape.push_back(in_x_dims[3]);
ctx->SetOutputDim("Out", framework::make_ddim(output_shape));
}
};
class MaxOutOpGrad : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) must not be null.");
PADDLE_ENFORCE(ctx->HasOutput(framework::GradVarName("X")),
"Input(X@GRAD) should not be null.");
ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X"));
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP(maxout, ops::MaxOutOp, ops::MaxOutOpMaker, maxout_grad,
ops::MaxOutOpGrad);
REGISTER_OP_CPU_KERNEL(maxout, ops::MaxOutKernel<paddle::platform::CPUPlace,
float>);
REGISTER_OP_CPU_KERNEL(maxout_grad,
ops::MaxOutGradKernel<paddle::platform::CPUPlace,
float>);
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/operators/maxout_op.h"
namespace ops = paddle::operators;
REGISTER_OP_GPU_KERNEL(maxout,
ops::MaxOutKernel<paddle::platform::GPUPlace, float>,
ops::MaxOutKernel<paddle::platform::GPUPlace, double>);
REGISTER_OP_GPU_KERNEL(maxout_grad,
ops::MaxOutGradKernel<paddle::platform::GPUPlace,
float>,
ops::MaxOutGradKernel<paddle::platform::GPUPlace,
double>);
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "paddle/framework/op_registry.h"
#include "paddle/operators/math/math_function.h"
#include "paddle/operators/math/maxouting.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
template <typename Place, typename T>
class MaxOutKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
const Tensor* in_x = context.Input<Tensor>("X");
Tensor* out = context.Output<Tensor>("Out");
int groups = context.template Attr<int>("groups");
math::MaxOutFunctor<Place, T> maxout_forward;
maxout_forward(context.device_context(), *in_x, out, groups);
}
};
template <typename Place, typename T>
class MaxOutGradKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
const Tensor* in_x = context.Input<Tensor>("X");
const Tensor* out = context.Input<Tensor>("Out");
const Tensor* out_grad =
context.Input<Tensor>(framework::GradVarName("Out"));
Tensor* in_x_grad = context.Output<Tensor>(framework::GradVarName("X"));
int groups = context.template Attr<int>("groups");
auto& device_ctx = context.device_context();
math::SetConstant<Place, T> zero;
if (in_x_grad) {
in_x_grad->mutable_data<T>(context.GetPlace());
zero(device_ctx, in_x_grad, static_cast<T>(0.0));
math::MaxOutGradFunctor<Place, T> maxout_backward;
maxout_backward(context.device_context(), *in_x, in_x_grad, *out,
*out_grad, groups);
}
}
};
} // namespace operators
} // namespace paddle
......@@ -20,6 +20,18 @@ REGISTER_OP(pool2d_cudnn, ops::PoolOp, ops::Pool2dOpMaker, pool2d_cudnn_grad,
ops::PoolOpGrad);
REGISTER_OP_CPU_KERNEL(pool2d_cudnn,
ops::PoolKernel<paddle::platform::CPUPlace, float>);
ops::PoolKernel<paddle::platform::CPUPlace, float>,
ops::PoolKernel<paddle::platform::CPUPlace, double>);
REGISTER_OP_CPU_KERNEL(pool2d_cudnn_grad,
ops::PoolGradKernel<paddle::platform::CPUPlace, float>)
ops::PoolGradKernel<paddle::platform::CPUPlace, float>,
ops::PoolGradKernel<paddle::platform::CPUPlace, double>)
REGISTER_OP(pool3d_cudnn, ops::PoolOp, ops::Pool3dOpMaker, pool3d_cudnn_grad,
ops::PoolOpGrad);
REGISTER_OP_CPU_KERNEL(pool3d_cudnn,
ops::PoolKernel<paddle::platform::CPUPlace, float>,
ops::PoolKernel<paddle::platform::CPUPlace, double>);
REGISTER_OP_CPU_KERNEL(pool3d_cudnn_grad,
ops::PoolGradKernel<paddle::platform::CPUPlace, float>,
ops::PoolGradKernel<paddle::platform::CPUPlace, double>)
......@@ -52,7 +52,13 @@ class PoolCudnnOpKernel : public framework::OpKernel<T> {
ScopedTensorDescriptor input_desc;
ScopedTensorDescriptor output_desc;
ScopedPoolingDescriptor pool_desc;
DataLayout layout = DataLayout::kNCHW;
DataLayout layout;
if (strides.size() == 2U) {
layout = DataLayout::kNCHW;
} else {
layout = DataLayout::kNCDHW;
}
cudnnTensorDescriptor_t cudnn_input_desc = input_desc.descriptor<T>(
layout, framework::vectorize2int(input->dims()));
......@@ -112,7 +118,13 @@ class PoolCudnnGradOpKernel : public framework::OpKernel<T> {
ScopedTensorDescriptor input_desc;
ScopedTensorDescriptor output_desc;
ScopedPoolingDescriptor pool_desc;
DataLayout layout = DataLayout::kNCHW;
DataLayout layout;
if (strides.size() == 2U) {
layout = DataLayout::kNCHW;
} else {
layout = DataLayout::kNCDHW;
}
cudnnTensorDescriptor_t cudnn_input_desc = input_desc.descriptor<T>(
layout, framework::vectorize2int(input->dims()));
......@@ -135,8 +147,7 @@ class PoolCudnnGradOpKernel : public framework::OpKernel<T> {
if (input_grad) {
T *input_grad_data = input_grad->mutable_data<T>(ctx.GetPlace());
math::SetConstant<paddle::platform::GPUPlace, T> set_zero;
set_zero(ctx.device_context(), input_grad, static_cast<T>(0));
// Because beta is zero, it is unnecessary to reset input_grad.
PADDLE_ENFORCE(platform::dynload::cudnnPoolingBackward(
handle, cudnn_pool_desc, &alpha, cudnn_output_desc, output_data,
......@@ -151,5 +162,12 @@ class PoolCudnnGradOpKernel : public framework::OpKernel<T> {
namespace ops = paddle::operators;
REGISTER_OP_GPU_KERNEL(pool2d_cudnn, ops::PoolCudnnOpKernel<float>);
REGISTER_OP_GPU_KERNEL(pool2d_cudnn_grad, ops::PoolCudnnGradOpKernel<float>);
REGISTER_OP_GPU_KERNEL(pool2d_cudnn, ops::PoolCudnnOpKernel<float>,
ops::PoolCudnnOpKernel<double>);
REGISTER_OP_GPU_KERNEL(pool2d_cudnn_grad, ops::PoolCudnnGradOpKernel<float>,
ops::PoolCudnnGradOpKernel<double>);
REGISTER_OP_GPU_KERNEL(pool3d_cudnn, ops::PoolCudnnOpKernel<float>,
ops::PoolCudnnOpKernel<double>);
REGISTER_OP_GPU_KERNEL(pool3d_cudnn_grad, ops::PoolCudnnGradOpKernel<float>,
ops::PoolCudnnGradOpKernel<double>);
......@@ -217,14 +217,18 @@ REGISTER_OP(pool2d, ops::PoolOp, ops::Pool2dOpMaker, pool2d_grad,
ops::PoolOpGrad);
REGISTER_OP_CPU_KERNEL(pool2d,
ops::PoolKernel<paddle::platform::CPUPlace, float>);
ops::PoolKernel<paddle::platform::CPUPlace, float>,
ops::PoolKernel<paddle::platform::CPUPlace, double>);
REGISTER_OP_CPU_KERNEL(pool2d_grad,
ops::PoolGradKernel<paddle::platform::CPUPlace, float>)
ops::PoolGradKernel<paddle::platform::CPUPlace, float>,
ops::PoolGradKernel<paddle::platform::CPUPlace, double>)
REGISTER_OP(pool3d, ops::PoolOp, ops::Pool3dOpMaker, pool3d_grad,
ops::PoolOpGrad);
REGISTER_OP_CPU_KERNEL(pool3d,
ops::PoolKernel<paddle::platform::CPUPlace, float>);
ops::PoolKernel<paddle::platform::CPUPlace, float>,
ops::PoolKernel<paddle::platform::CPUPlace, double>);
REGISTER_OP_CPU_KERNEL(pool3d_grad,
ops::PoolGradKernel<paddle::platform::CPUPlace, float>);
ops::PoolGradKernel<paddle::platform::CPUPlace, float>,
ops::PoolGradKernel<paddle::platform::CPUPlace, double>);
......@@ -17,11 +17,15 @@ limitations under the License. */
namespace ops = paddle::operators;
REGISTER_OP_GPU_KERNEL(pool2d,
ops::PoolKernel<paddle::platform::GPUPlace, float>);
ops::PoolKernel<paddle::platform::GPUPlace, float>,
ops::PoolKernel<paddle::platform::GPUPlace, double>);
REGISTER_OP_GPU_KERNEL(pool2d_grad,
ops::PoolGradKernel<paddle::platform::GPUPlace, float>);
ops::PoolGradKernel<paddle::platform::GPUPlace, float>,
ops::PoolGradKernel<paddle::platform::GPUPlace, double>);
REGISTER_OP_GPU_KERNEL(pool3d,
ops::PoolKernel<paddle::platform::GPUPlace, float>);
ops::PoolKernel<paddle::platform::GPUPlace, float>,
ops::PoolKernel<paddle::platform::GPUPlace, double>);
REGISTER_OP_GPU_KERNEL(pool3d_grad,
ops::PoolGradKernel<paddle::platform::GPUPlace, float>);
ops::PoolGradKernel<paddle::platform::GPUPlace, float>,
ops::PoolGradKernel<paddle::platform::GPUPlace, double>);
......@@ -29,11 +29,11 @@ class MaxPoolWithIndexOp : public framework::OperatorWithKernel {
void InferShape(framework::InferShapeContext *ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"),
"X(Input) of Pooling should not be null.");
"Input(X) of Pooling should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("Out"),
"Out(Output) of Pooling should not be null.");
"Output(Out) of Pooling should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("Mask"),
"Mask(Output) of Pooling should not be null.");
"Output(Mask) of Pooling should not be null.");
auto in_x_dims = ctx->GetInputDim("X");
......@@ -67,6 +67,14 @@ class MaxPoolWithIndexOp : public framework::OperatorWithKernel {
ctx->SetOutputDim("Out", framework::make_ddim(output_shape));
ctx->SetOutputDim("Mask", framework::make_ddim(output_shape));
}
protected:
framework::OpKernelType GetKernelType(
const framework::ExecutionContext &ctx) const override {
return framework::OpKernelType(
framework::ToDataType(ctx.Input<framework::Tensor>("X")->type()),
ctx.device_context());
}
};
class MaxPoolWithIndexOpGrad : public framework::OperatorWithKernel {
......@@ -80,6 +88,14 @@ class MaxPoolWithIndexOpGrad : public framework::OperatorWithKernel {
"Input(X@GRAD) should not be null.");
ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X"));
}
protected:
framework::OpKernelType GetKernelType(
const framework::ExecutionContext &ctx) const override {
return framework::OpKernelType(
framework::ToDataType(ctx.Input<framework::Tensor>("X")->type()),
ctx.device_context());
}
};
class MaxPool2dWithIndexOpMaker : public framework::OpProtoAndCheckerMaker {
......@@ -116,7 +132,7 @@ class MaxPool2dWithIndexOpMaker : public framework::OpProtoAndCheckerMaker {
// TypedAttrChecker don't support vector type.)
AddAttr<bool>(
"global_pooling",
"(bool, default false) Whether to use the global pooling. "
"(bool, default:false) Whether to use the global pooling. "
"If global_pooling = true, ksize and paddings will be ignored.")
.SetDefault(false);
AddAttr<std::vector<int>>("strides",
......@@ -126,7 +142,7 @@ class MaxPool2dWithIndexOpMaker : public framework::OpProtoAndCheckerMaker {
// TypedAttrChecker don't support vector type.)
AddAttr<std::vector<int>>(
"paddings",
"(vector<int>, defalut {0, 0}), paddings(height, width) of pooling "
"(vector<int>, defalut:{0, 0}), paddings(height, width) of pooling "
"operator. "
"If global_pooling = true, paddings and will be ignored.")
.SetDefault({0, 0}); // TODO(Chengduo): Add checker. (Currently,
......@@ -250,10 +266,12 @@ REGISTER_OP(max_pool2d_with_index, ops::MaxPoolWithIndexOp,
REGISTER_OP_CPU_KERNEL(
max_pool2d_with_index,
ops::MaxPoolWithIndexKernel<paddle::platform::CPUPlace, float>);
ops::MaxPoolWithIndexKernel<paddle::platform::CPUPlace, float, int>,
ops::MaxPoolWithIndexKernel<paddle::platform::CPUPlace, double, int>);
REGISTER_OP_CPU_KERNEL(
max_pool2d_with_index_grad,
ops::MaxPoolWithIndexGradKernel<paddle::platform::CPUPlace, float>)
ops::MaxPoolWithIndexGradKernel<paddle::platform::CPUPlace, float, int>,
ops::MaxPoolWithIndexGradKernel<paddle::platform::CPUPlace, double, int>)
REGISTER_OP(max_pool3d_with_index, ops::MaxPoolWithIndexOp,
ops::MaxPool3dWithIndexOpMaker, max_pool3d_with_index_grad,
......@@ -261,7 +279,9 @@ REGISTER_OP(max_pool3d_with_index, ops::MaxPoolWithIndexOp,
REGISTER_OP_CPU_KERNEL(
max_pool3d_with_index,
ops::MaxPoolWithIndexKernel<paddle::platform::CPUPlace, float>);
ops::MaxPoolWithIndexKernel<paddle::platform::CPUPlace, float, int>,
ops::MaxPoolWithIndexKernel<paddle::platform::CPUPlace, double, int>);
REGISTER_OP_CPU_KERNEL(
max_pool3d_with_index_grad,
ops::MaxPoolWithIndexGradKernel<paddle::platform::CPUPlace, float>)
ops::MaxPoolWithIndexGradKernel<paddle::platform::CPUPlace, float, int>,
ops::MaxPoolWithIndexGradKernel<paddle::platform::CPUPlace, double, int>)
......@@ -18,14 +18,18 @@ namespace ops = paddle::operators;
REGISTER_OP_GPU_KERNEL(
max_pool2d_with_index,
ops::MaxPoolWithIndexKernel<paddle::platform::GPUPlace, float>);
ops::MaxPoolWithIndexKernel<paddle::platform::GPUPlace, float, int>,
ops::MaxPoolWithIndexKernel<paddle::platform::GPUPlace, double, int>);
REGISTER_OP_GPU_KERNEL(
max_pool2d_with_index_grad,
ops::MaxPoolWithIndexGradKernel<paddle::platform::GPUPlace, float>)
ops::MaxPoolWithIndexGradKernel<paddle::platform::GPUPlace, float, int>,
ops::MaxPoolWithIndexGradKernel<paddle::platform::GPUPlace, double, int>)
REGISTER_OP_GPU_KERNEL(
max_pool3d_with_index,
ops::MaxPoolWithIndexKernel<paddle::platform::GPUPlace, float>);
ops::MaxPoolWithIndexKernel<paddle::platform::GPUPlace, float, int>,
ops::MaxPoolWithIndexKernel<paddle::platform::GPUPlace, double, int>);
REGISTER_OP_GPU_KERNEL(
max_pool3d_with_index_grad,
ops::MaxPoolWithIndexGradKernel<paddle::platform::GPUPlace, float>)
ops::MaxPoolWithIndexGradKernel<paddle::platform::GPUPlace, float, int>,
ops::MaxPoolWithIndexGradKernel<paddle::platform::GPUPlace, double, int>)
......@@ -24,8 +24,8 @@ namespace operators {
using Tensor = framework::Tensor;
template <typename Place, typename T>
class MaxPoolWithIndexKernel : public framework::OpKernel<T> {
template <typename Place, typename T1, typename T2>
class MaxPoolWithIndexKernel : public framework::OpKernel<T1> {
public:
void Compute(const framework::ExecutionContext& context) const override {
const Tensor* in_x = context.Input<Tensor>("X");
......@@ -44,13 +44,13 @@ class MaxPoolWithIndexKernel : public framework::OpKernel<T> {
switch (ksize.size()) {
case 2: {
paddle::operators::math::MaxPool2dWithIndexFunctor<Place, T>
paddle::operators::math::MaxPool2dWithIndexFunctor<Place, T1, T2>
pool2d_forward;
pool2d_forward(context.device_context(), *in_x, ksize, strides,
paddings, out, mask);
} break;
case 3: {
paddle::operators::math::MaxPool3dWithIndexFunctor<Place, T>
paddle::operators::math::MaxPool3dWithIndexFunctor<Place, T1, T2>
pool3d_forward;
pool3d_forward(context.device_context(), *in_x, ksize, strides,
paddings, out, mask);
......@@ -60,8 +60,8 @@ class MaxPoolWithIndexKernel : public framework::OpKernel<T> {
}
};
template <typename Place, typename T>
class MaxPoolWithIndexGradKernel : public framework::OpKernel<T> {
template <typename Place, typename T1, typename T2>
class MaxPoolWithIndexGradKernel : public framework::OpKernel<T1> {
public:
void Compute(const framework::ExecutionContext& context) const override {
const Tensor* mask = context.Input<Tensor>("Mask");
......@@ -80,19 +80,19 @@ class MaxPoolWithIndexGradKernel : public framework::OpKernel<T> {
}
if (in_x_grad) {
in_x_grad->mutable_data<T>(context.GetPlace());
in_x_grad->mutable_data<T1>(context.GetPlace());
auto& device_ctx = context.device_context();
math::set_constant(device_ctx, in_x_grad, 0);
switch (ksize.size()) {
case 2: {
paddle::operators::math::MaxPool2dWithIndexGradFunctor<Place, T>
paddle::operators::math::MaxPool2dWithIndexGradFunctor<Place, T1, T2>
pool2d_backward;
pool2d_backward(device_ctx, *out_grad, *mask, ksize, strides,
paddings, in_x_grad);
} break;
case 3: {
paddle::operators::math::MaxPool3dWithIndexGradFunctor<Place, T>
paddle::operators::math::MaxPool3dWithIndexGradFunctor<Place, T1, T2>
pool3d_backward;
pool3d_backward(device_ctx, *out_grad, *mask, ksize, strides,
paddings, in_x_grad);
......
......@@ -179,7 +179,9 @@ REGISTER_OP(sequence_conv, ops::SequenceConvOp, ops::SequenceConvOpMaker,
sequence_conv_grad, ops::SequenceConvGradOp);
REGISTER_OP_CPU_KERNEL(
sequence_conv, ops::SequenceConvKernel<paddle::platform::CPUPlace, float>);
sequence_conv, ops::SequenceConvKernel<paddle::platform::CPUPlace, float>,
ops::SequenceConvKernel<paddle::platform::CPUPlace, double>);
REGISTER_OP_CPU_KERNEL(
sequence_conv_grad,
ops::SequenceConvGradKernel<paddle::platform::CPUPlace, float>);
ops::SequenceConvGradKernel<paddle::platform::CPUPlace, float>,
ops::SequenceConvGradKernel<paddle::platform::CPUPlace, double>);
......@@ -16,7 +16,9 @@
namespace ops = paddle::operators;
REGISTER_OP_GPU_KERNEL(
sequence_conv, ops::SequenceConvKernel<paddle::platform::GPUPlace, float>);
sequence_conv, ops::SequenceConvKernel<paddle::platform::GPUPlace, float>,
ops::SequenceConvKernel<paddle::platform::GPUPlace, double>);
REGISTER_OP_GPU_KERNEL(
sequence_conv_grad,
ops::SequenceConvGradKernel<paddle::platform::GPUPlace, float>);
ops::SequenceConvGradKernel<paddle::platform::GPUPlace, float>,
ops::SequenceConvGradKernel<paddle::platform::GPUPlace, double>);
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/operators/sequence_slice_op.h"
namespace paddle {
namespace operators {
class SequenceSliceOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"),
"Input(X) of SequenceSliceOp should not be null.");
PADDLE_ENFORCE(ctx->HasInput("Offset"),
"Input(Offset) of SequenceSliceOp should not be null.");
PADDLE_ENFORCE(ctx->HasInput("Length"),
"Input(Length) of SequenceSliceOp should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("Out"),
"Output(Out) of SequenceSliceOp should not be null.");
auto input_dims = ctx->GetInputDim("X");
auto offset_dim = ctx->GetInputDim("Offset");
auto length_dim = ctx->GetInputDim("Length");
PADDLE_ENFORCE_EQ(
offset_dim.size(), 2UL,
"Only support one level sequence now, The rank of offset must be 2.");
PADDLE_ENFORCE_EQ(
length_dim.size(), 2UL,
"Only support one level sequence now, The rank of Length must be 2.");
// Initialize the output's dims to maximum,
// and re-set to real dims by the value of Offset and Length at kernel
ctx->SetOutputDim("Out", input_dims);
}
protected:
framework::OpKernelType GetKernelType(
const framework::ExecutionContext& ctx) const override {
return framework::OpKernelType(
framework::ToDataType(ctx.Input<framework::LoDTensor>("X")->type()),
ctx.device_context());
}
};
class SequenceSliceGradOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")),
"The gradient of Out should not be null.");
PADDLE_ENFORCE(ctx->HasOutputs(framework::GradVarName("X")),
"The gradient of X should not be null.");
ctx->SetOutputsDim(framework::GradVarName("X"), ctx->GetInputsDim("X"));
}
protected:
framework::OpKernelType GetKernelType(
const framework::ExecutionContext& ctx) const override {
return framework::OpKernelType(
framework::ToDataType(ctx.Input<framework::LoDTensor>("X")->type()),
ctx.device_context());
}
};
class SequenceSliceOpMaker : public framework::OpProtoAndCheckerMaker {
public:
SequenceSliceOpMaker(framework::OpProto* proto,
framework::OpAttrChecker* op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X",
"(LoDTensor), "
"the input of SequenceSliceOp.");
AddInput("Offset",
"(Tensor), "
"a vector<int> to describe the offset of every input sequence for "
"sub sequence item.");
AddInput("Length",
"(Tensor), "
"a vector<int> to describe the length of every input sequence for "
"sub sequence item.");
AddOutput("Out",
"(LoDTensor), the output of SequenceSliceOp.");
AddComment(R"DOC(
Sequence slice operator
The operator crops a subsequence from given sequence with given start offset and subsequence length.
It only supports sequence (LoD Tensor with level number is 1).
- Case:
X = [[a1, a2;
b1, b2;
c1, c2]
[d1, d2;
e1, e2]]
LoD(X) = {{0, 3, 5}}; Dims(X) = (5, 2)
Offset = [[0], [1]]; Length = [[2], [1]]
Out = [[a1, a2;
b1, b2]
[e1, e2]]
LoD(Out) = {{0, 2, 3}}; Dims(Out) = (3, 2)
NOTE: The first dimension size of input, the size of offset and Length, should be equal. The offset start from 0.
)DOC");
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP(sequence_slice, ops::SequenceSliceOp, ops::SequenceSliceOpMaker,
sequence_slice_grad, ops::SequenceSliceGradOp);
REGISTER_OP_CPU_KERNEL(
sequence_slice,
ops::SequenceSliceOpKernel<paddle::platform::CPUPlace, float>);
REGISTER_OP_CPU_KERNEL(
sequence_slice_grad,
ops::SequenceSliceGradOpKernel<paddle::platform::CPUPlace, float>);
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/operators/sequence_slice_op.h"
namespace ops = paddle::operators;
REGISTER_OP_GPU_KERNEL(
sequence_slice,
ops::SequenceSliceOpKernel<paddle::platform::GPUPlace, float>);
REGISTER_OP_GPU_KERNEL(
sequence_slice_grad,
ops::SequenceSliceGradOpKernel<paddle::platform::GPUPlace, float>);
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "paddle/framework/op_registry.h"
#include "paddle/operators/math/math_function.h"
#include "paddle/operators/strided_memcpy.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
using LoDTensor = framework::LoDTensor;
using LoD = framework::LoD;
template <typename T>
inline LoD SequenceSliceLoD(const T& in, const int64_t* offset_data,
const int64_t* length_data) {
auto out_lod = in.lod();
size_t lod_offset = 0;
auto n = in.lod()[0].size() - 1;
out_lod[0][0] = 0;
for (size_t i = 0; i < n; ++i) {
lod_offset += length_data[i];
out_lod[0][i+1] = lod_offset;
}
return out_lod;
}
template <typename Place, typename T>
class SequenceSliceOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* in = ctx.Input<LoDTensor>("X");
auto* offset = ctx.Input<Tensor>("Offset");
auto* length = ctx.Input<Tensor>("Length");
auto* out = ctx.Output<LoDTensor>("Out");
auto lod = in->lod();
auto n = lod[0].size() - 1;
PADDLE_ENFORCE_EQ(lod.size(), 1UL,
"Only support one level sequence now.");
PADDLE_ENFORCE_EQ(
n, static_cast<size_t>(length->dims()[0]),
"The size of input-sequence and length-array should be the same")
PADDLE_ENFORCE_EQ(
n, static_cast<size_t>(offset->dims()[0]),
"The size of input-sequence and offset-array should be the same")
const int64_t* offset_data = offset->data<int64_t>();
const int64_t* length_data = length->data<int64_t>();
framework::Tensor offset_cpu;
framework::Tensor length_cpu;
if (platform::is_gpu_place(ctx.GetPlace())) {
offset_cpu.mutable_data<T>(offset->dims(), platform::CPUPlace());
offset_cpu.CopyFrom(*offset, platform::CPUPlace(), ctx.device_context());
offset_data = offset_cpu.data<int64_t>();
length_cpu.mutable_data<T>(length->dims(), platform::CPUPlace());
length_cpu.CopyFrom(*length, platform::CPUPlace(), ctx.device_context());
length_data = length_cpu.data<int64_t>();
}
for (size_t i = 0; i < n; ++i) {
PADDLE_ENFORCE_LT(0, offset_data[i],
"The offset[%d] must greater than zero.", i)
PADDLE_ENFORCE_LT(0, length_data[i],
"The length[%d] must greater than zero.", i)
PADDLE_ENFORCE_LT(
lod[0][i] + offset_data[i] + length_data[i],
lod[0][i + 1],
"The target tensor's length overflow.")
}
out->mutable_data<T>(ctx.GetPlace());
auto out_lod = SequenceSliceLoD(*in, offset_data, length_data);
auto out_dims = in->dims();
out_dims[0] = out_lod[0][out_lod[0].size() - 1];
out->Resize(out_dims);
out->set_lod(out_lod);
auto in_stride = framework::stride(in->dims());
auto out_stride = framework::stride(out->dims());
size_t out_offset = 0;
for (size_t i = 0; i < n; ++i) {
Tensor in_t =
in->Slice(static_cast<int>(lod[0][i] + offset_data[i]),
static_cast<int>(lod[0][i] + offset_data[i] +
length_data[i]));
StridedMemcpy<T>(ctx.device_context(), in_t.data<T>(),
in_stride, in_t.dims(), out_stride,
out->data<T>() + out_offset);
out_offset += length_data[i] * in_stride[0];
}
}
};
template <typename Place, typename T>
class SequenceSliceGradOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* in = ctx.Input<LoDTensor>("X");
auto* offset = ctx.Input<Tensor>("Offset");
auto* length = ctx.Input<Tensor>("Length");
auto* out_grad =
ctx.Input<framework::LoDTensor>(framework::GradVarName("Out"));
auto* x_grad =
ctx.Output<framework::LoDTensor>(framework::GradVarName("X"));
const int64_t* offset_data = offset->data<int64_t>();
const int64_t* length_data = length->data<int64_t>();
framework::Tensor offset_cpu;
framework::Tensor length_cpu;
if (platform::is_gpu_place(ctx.GetPlace())) {
offset_cpu.mutable_data<T>(offset->dims(), platform::CPUPlace());
offset_cpu.CopyFrom(*offset, platform::CPUPlace(), ctx.device_context());
offset_data = offset_cpu.data<int64_t>();
length_cpu.mutable_data<T>(length->dims(), platform::CPUPlace());
length_cpu.CopyFrom(*length, platform::CPUPlace(), ctx.device_context());
length_data = length_cpu.data<int64_t>();
}
auto lod = in->lod();
auto out_lod = out_grad->lod();
if (x_grad) {
x_grad->mutable_data<T>(ctx.GetPlace());
x_grad->set_lod(in->lod());
math::SetConstant<Place, T> set_zero;
set_zero(ctx.device_context(), x_grad, static_cast<T>(0));
auto out_grad_stride = framework::stride(out_grad->dims());
for (size_t i = 0; i < out_lod[0].size() - 1; ++i) {
Tensor out_grad_t =
out_grad->Slice(static_cast<int>(out_lod[0][i]),
static_cast<int>(out_lod[0][i + 1]));
auto out_grad_stride = framework::stride(out_grad_t.dims());
auto x_grad_stride = framework::stride(x_grad->dims());
Tensor x_grad_t = x_grad->Slice(
static_cast<int>(lod[0][i] + offset_data[i]),
static_cast<int>(lod[0][i] + offset_data[i] + length_data[i]));
StridedMemcpy<T>(ctx.device_context(), out_grad_t.data<T>(),
out_grad_stride, out_grad_t.dims(), x_grad_stride,
x_grad_t.data<T>());
}
}
}
};
} // namespace operators
} // namespace paddle
......@@ -30,7 +30,7 @@ void sgdUpdateCpu(real learningRate,
const real* grad,
real* momentumVec) {
decayRate *= learningRate;
#ifdef PADDLE_USE_MKLDNN
#ifdef PADDLE_USE_MKLML
#pragma omp parallel for
#endif
for (size_t i = 0; i < size; ++i) {
......
......@@ -63,9 +63,10 @@ inline const char* cudnnGetErrorString(cudnnStatus_t status) {
} \
} while (false)
enum class DataLayout {
enum class DataLayout { // Not use
kNHWC,
kNCHW,
kNCDHW,
kNCHW_VECT_C,
};
......@@ -107,12 +108,15 @@ class CudnnDataType<double> {
}
};
inline cudnnTensorFormat_t GetCudnnTensorFormat(const DataLayout& order) {
inline cudnnTensorFormat_t GetCudnnTensorFormat(
const DataLayout& order) { // Not use
switch (order) {
case DataLayout::kNHWC:
return CUDNN_TENSOR_NHWC;
case DataLayout::kNCHW:
return CUDNN_TENSOR_NCHW;
case DataLayout::kNCDHW:
return CUDNN_TENSOR_NCHW; // TODO(chengduoZH) : add CUDNN_TENSOR_NCDHW
default:
PADDLE_THROW("Unknown cudnn equivalent for order");
}
......@@ -139,7 +143,7 @@ class ScopedTensorDescriptor {
strides[i] = dims[i + 1] * strides[i + 1];
}
// Update tensor descriptor dims setting if groups > 1
// FIXME(typhoonzero): Assume using NCHW order
// FIXME(typhoonzero): Assume using NCHW or NCDHW order
std::vector<int> dims_with_group(dims.begin(), dims.end()); // copy
if (groups > 1) {
dims_with_group[1] = dims_with_group[1] / groups;
......@@ -176,9 +180,10 @@ class ScopedFilterDescriptor {
const cudnnDataType_t type,
const std::vector<int>& kernel,
const int groups = 1) {
// filter layout: MCHW, where M is the number of
// filter layout: MCHW(MCDHW), where M is the number of
// output image channels, C is the number of input image channels,
// H and W is height and width of filter.
// D is the depth of the filter, H is the height of the filter, and W is the
// width of the filter.
std::vector<int> kernel_with_group(kernel.begin(), kernel.end());
if (groups > 1) {
// M /= groups
......@@ -219,13 +224,15 @@ class ScopedConvolutionDescriptor {
PADDLE_ENFORCE_EQ(pads.size(), strides.size());
PADDLE_ENFORCE_EQ(pads.size(), dilations.size());
#if CUDNN_VERSION < 6000
#if !CUDNN_VERSION_MIN(6, 0, 0)
// cudnn v5 does not support dilation conv, the argument is called upscale
// instead of dilations and it is must be one.
for (size_t i = 0; i < dilations.size(); ++i) {
PADDLE_ENFORCE_EQ(
dilations[i], 1,
"Dilations conv is not supported in this cuDNN version");
"Dilations conv is not supported in this cuDNN version(%d.%d.%d).",
CUDNN_VERSION / 1000, CUDNN_VERSION % 1000 / 100,
CUDNN_VERSION % 100);
}
#endif
......
......@@ -38,6 +38,26 @@ TEST(CudnnHelper, ScopedTensorDescriptor) {
EXPECT_EQ(strides[2], 6);
EXPECT_EQ(strides[1], 36);
EXPECT_EQ(strides[0], 144);
// test tensor5d: ScopedTensorDescriptor
ScopedTensorDescriptor tensor5d_desc;
std::vector<int> shape_5d = {2, 4, 6, 6, 6};
auto desc_5d = tensor5d_desc.descriptor<float>(DataLayout::kNCDHW, shape_5d);
std::vector<int> dims_5d(5);
std::vector<int> strides_5d(5);
paddle::platform::dynload::cudnnGetTensorNdDescriptor(
desc_5d, 5, &type, &nd, dims_5d.data(), strides_5d.data());
EXPECT_EQ(nd, 5);
for (size_t i = 0; i < dims_5d.size(); ++i) {
EXPECT_EQ(dims_5d[i], shape_5d[i]);
}
EXPECT_EQ(strides_5d[4], 1);
EXPECT_EQ(strides_5d[3], 6);
EXPECT_EQ(strides_5d[2], 36);
EXPECT_EQ(strides_5d[1], 216);
EXPECT_EQ(strides_5d[0], 864);
}
TEST(CudnnHelper, ScopedFilterDescriptor) {
......@@ -60,6 +80,20 @@ TEST(CudnnHelper, ScopedFilterDescriptor) {
for (size_t i = 0; i < shape.size(); ++i) {
EXPECT_EQ(kernel[i], shape[i]);
}
ScopedFilterDescriptor filter_desc_4d;
std::vector<int> shape_4d = {2, 3, 3, 3};
auto desc_4d = filter_desc.descriptor<float>(DataLayout::kNCDHW, shape_4d);
std::vector<int> kernel_4d(4);
paddle::platform::dynload::cudnnGetFilterNdDescriptor(
desc_4d, 4, &type, &format, &nd, kernel_4d.data());
EXPECT_EQ(GetCudnnTensorFormat(DataLayout::kNCHW), format);
EXPECT_EQ(nd, 4);
for (size_t i = 0; i < shape_4d.size(); ++i) {
EXPECT_EQ(kernel_4d[i], shape_4d[i]);
}
}
TEST(CudnnHelper, ScopedConvolutionDescriptor) {
......
......@@ -57,8 +57,7 @@ Users can specify the following Docker build arguments with either "ON" or "OFF"
| `WITH_GPU` | OFF | Generates NVIDIA CUDA GPU code and relies on CUDA libraries. |
| `WITH_AVX` | OFF | Set to "ON" to enable AVX support. |
| `WITH_TESTING` | ON | Build unit tests binaries. |
| `WITH_MKLDNN` | ON | Build with [Intel® MKL DNN](https://github.com/01org/mkl-dnn) support. |
| `WITH_MKLML` | ON | Build with [Intel® MKL](https://software.intel.com/en-us/mkl) support. |
| `WITH_MKL` | ON | Build with [Intel® MKL](https://software.intel.com/en-us/mkl) and [Intel® MKL-DNN](https://github.com/01org/mkl-dnn) support. |
| `WITH_GOLANG` | ON | Build fault-tolerant parameter server written in go. |
| `WITH_SWIG_PY` | ON | Build with SWIG python API support. |
| `WITH_C_API` | OFF | Build capi libraries for inference. |
......
......@@ -34,8 +34,7 @@ function cmake_gen() {
${PYTHON_FLAGS}
-DWITH_DOC=OFF
-DWITH_GPU=${WITH_GPU:-OFF}
-DWITH_MKLDNN=${WITH_MKLDNN:-ON}
-DWITH_MKLML=${WITH_MKLML:-ON}
-DWITH_MKL=${WITH_MKL:-ON}
-DWITH_AVX=${WITH_AVX:-OFF}
-DWITH_GOLANG=${WITH_GOLANG:-ON}
-DWITH_SWIG_PY=ON
......@@ -56,8 +55,7 @@ EOF
${PYTHON_FLAGS} \
-DWITH_DOC=OFF \
-DWITH_GPU=${WITH_GPU:-OFF} \
-DWITH_MKLDNN=${WITH_MKLDNN:-ON} \
-DWITH_MKLML=${WITH_MKLML:-ON} \
-DWITH_MKL=${WITH_MKL:-ON} \
-DWITH_AVX=${WITH_AVX:-OFF} \
-DWITH_GOLANG=${WITH_GOLANG:-ON} \
-DWITH_SWIG_PY=${WITH_SWIG_PY:-ON} \
......@@ -146,7 +144,7 @@ function gen_dockerfile() {
DOCKERFILE_GPU_ENV=""
DOCKERFILE_CUDNN_DSO=""
if [[ ${WITH_GPU:-OFF} == 'ON' ]]; then
DOCKERFILE_GPU_ENV="ENV LD_LIBRARY_PATH /usr/lib/x86_64-linux-gnu:${LD_LIBRARY_PATH}"
DOCKERFILE_GPU_ENV="ENV LD_LIBRARY_PATH /usr/lib/x86_64-linux-gnu:\${LD_LIBRARY_PATH}"
DOCKERFILE_CUDNN_DSO="RUN ln -s /usr/lib/x86_64-linux-gnu/libcudnn.so.5 /usr/lib/x86_64-linux-gnu/libcudnn.so"
fi
......
......@@ -18,8 +18,8 @@ function version(){
echo "PaddlePaddle @PADDLE_VERSION@, compiled with"
echo " with_avx: @WITH_AVX@"
echo " with_gpu: @WITH_GPU@"
echo " with_mkl: @WITH_MKL@"
echo " with_mkldnn: @WITH_MKLDNN@"
echo " with_mklml: @WITH_MKLML@"
echo " with_double: @WITH_DOUBLE@"
echo " with_python: @WITH_PYTHON@"
echo " with_rdma: @WITH_RDMA@"
......@@ -45,8 +45,8 @@ function ver2num() {
function cpu_config() {
# auto set KMP_AFFINITY and OMP_DYNAMIC from Hyper Threading Status
# only when MKLDNN or MKLML enabled
if [ "@WITH_MKLDNN@" == "OFF" ] && [ "@WITH_MKLML@" == "OFF"]; then
# only when MKL enabled
if [ "@WITH_MKL@" == "OFF" ]; then
return 0
fi
ht=`lscpu |grep "per core"|awk -F':' '{print $2}'|xargs`
......@@ -70,8 +70,8 @@ function cpu_config() {
function threads_config() {
# auto set OMP_NUM_THREADS and MKL_NUM_THREADS
# according to trainer_count and total processors
# only when MKLDNN or MKLML enabled
if [ "@WITH_MKLDNN@" == "OFF" ] && [ "@WITH_MKLML@" == "OFF"]; then
# only when MKL enabled
if [ "@WITH_MKL@" == "OFF" ]; then
return 0
fi
processors=`grep "processor" /proc/cpuinfo|sort -u|wc -l`
......
......@@ -6,7 +6,7 @@ mkdir -p $TRAVIS_BUILD_DIR/build
cd $TRAVIS_BUILD_DIR/build
# Compile Documentation only.
cmake .. -DCMAKE_BUILD_TYPE=Debug -DWITH_GPU=OFF -DWITH_MKLDNN=OFF -DWITH_MKLML=OFF -DWITH_DOC=ON
cmake .. -DCMAKE_BUILD_TYPE=Debug -DWITH_GPU=OFF -DWITH_MKL=OFF -DWITH_DOC=ON
make -j `nproc` gen_proto_py
make -j `nproc` paddle_docs paddle_docs_cn
......
......@@ -137,6 +137,10 @@ void Trainer::init(const std::shared_ptr<TrainerConfigHelper>& config,
}
}
if (FLAGS_use_mkldnn) {
CHECK_EQ(FLAGS_trainer_count, 1) << "MKLDNN only need 1 trainer";
}
if (testing) {
LOG(INFO) << "trainer: in testing mode";
if (config_->getOptConfig().use_sparse_remote_updater() ||
......
......@@ -11,7 +11,6 @@ add_unittest_without_exec(test_Trainer
test_Trainer.cpp)
add_test(NAME test_Trainer
COMMAND ${PADDLE_SOURCE_DIR}/paddle/.set_python_path.sh -d ${PADDLE_SOURCE_DIR}/python/
${PYTHON_EXECUTABLE} ${PADDLE_SOURCE_DIR}/paddle/trainer/tests/gen_proto_data.py &&
${PADDLE_SOURCE_DIR}/paddle/.set_python_path.sh -d ${PADDLE_SOURCE_DIR}/python/
${CMAKE_CURRENT_BINARY_DIR}/test_Trainer
WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle/)
......@@ -28,35 +27,7 @@ if(WITH_PYTHON)
${PADDLE_SOURCE_DIR}/paddle/.set_port.sh -p port ${CMAKE_CURRENT_BINARY_DIR}/test_TrainerOnePass
WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle/)
endif()
################ test_CompareTwoNets ######################
add_unittest_without_exec(test_CompareTwoNets
test_CompareTwoNets.cpp)
add_test(NAME test_CompareTwoNets
COMMAND ${PADDLE_SOURCE_DIR}/paddle/.set_python_path.sh -d ${PADDLE_SOURCE_DIR}/python/
${CMAKE_CURRENT_BINARY_DIR}/test_CompareTwoNets
--config_file_a=trainer/tests/sample_trainer_config_qb_rnn.conf --config_file_b=trainer/tests/sample_trainer_config_rnn.conf
WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle/)
############### test_CompareTwoOpts ###################
add_unittest_without_exec(test_CompareTwoOpts
test_CompareTwoOpts.cpp)
add_test(NAME test_CompareTwoOpts
COMMAND ${PADDLE_SOURCE_DIR}/paddle/.set_python_path.sh -d ${PADDLE_SOURCE_DIR}/python/
${CMAKE_CURRENT_BINARY_DIR}/test_CompareTwoOpts
--config_file_a=trainer/tests/sample_trainer_config_opt_a.conf --config_file_b=trainer/tests/sample_trainer_config_opt_b.conf
--num_passes=1 --need_high_accuracy=0
WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle/)
################# test_CompareSparse ##################
add_unittest_without_exec(test_CompareSparse
test_CompareSparse.cpp)
if(NOT ON_TRAVIS)
add_test(NAME test_CompareSparse
COMMAND ${PADDLE_SOURCE_DIR}/paddle/.set_python_path.sh -d ${PADDLE_SOURCE_DIR}/python/
./.set_port.sh -p port -n 6
${CMAKE_CURRENT_BINARY_DIR}/test_CompareSparse
WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle/)
endif()
################# test_recurrent_machine_generation ###############
add_unittest_without_exec(test_recurrent_machine_generation
test_recurrent_machine_generation.cpp)
......
#edit-mode: -*- python -*-
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#Todo(luotao02) This config is only used for unitest. It is out of date now, and will be updated later.
TrainData(ProtoData(
files = 'trainer/tests/train_files.txt',
usage_ratio = 1.0,
))
TestData(ProtoData(
files = 'trainer/tests/test_files.txt'
))
default_initial_std(1)
default_decay_rate(4e-4)
default_device(0)
Inputs("features", "word", "pos", "chunk")
Outputs("crf")
Layer(
name = "features",
type = "data",
size = 4339,
)
Layer(
name = "word",
type = "data",
size = 478,
)
Layer(
name = "pos",
type = "data",
size = 45
)
Layer(
name = "chunk",
type = "data",
size = 23
)
Layer(
name = "output",
type = "mixed",
size = 23,
bias = False,
device = -1,
inputs = [
FullMatrixProjection("features", parameter_name="feature_weights"),
# TableProjection("word"),
# TableProjection("pos"),
],
)
Layer(
name = "crf",
type = "crf",
size = 23,
device = -1,
inputs = [
Input("output", parameter_name="crfw"),
"chunk"
]
)
Layer(
name = "crf_decoding",
type = "crf_decoding",
size = 23,
device = -1,
inputs = [
Input("output", parameter_name="crfw"),
"chunk"
]
)
Evaluator(
name = "error",
type = "sum",
inputs = "crf_decoding",
)
'''
# chuck evaluator cannot be used for GPU training
Evaluator(
name = "chunk_f1",
type = "chunk",
inputs = ["crf_decoding", "chunk"],
chunk_scheme = "IOB",
num_chunk_types = 11,
)
'''
Settings(
algorithm = 'sgd',
batch_size = 100,
average_window = 0.5,
max_average_window = 2500,
learning_rate = 1e-1,
learning_rate_decay_a = 5e-7,
learning_rate_decay_b = 0.75,
l1weight = 0,
l2weight = 1,
c1 = 0.0001,
backoff = 0.5,
owlqn_steps = 100,
max_backoff = 5,
)
因为 它太大了无法显示 source diff 。你可以改为 查看blob
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cStringIO import StringIO
import paddle.proto.DataFormat_pb2 as DataFormat
from google.protobuf.internal.encoder import _EncodeVarint
import logging
import pprint
logging.basicConfig(
format='[%(levelname)s %(asctime)s %(filename)s:%(lineno)s] %(message)s', )
logger = logging.getLogger('paddle')
logger.setLevel(logging.INFO)
OOV_POLICY_IGNORE = 0
OOV_POLICY_USE = 1
OOV_POLICY_ERROR = 2
num_original_columns = 3
# Feature combination patterns.
# [[-1,0], [0,0]] means previous token at column 0 and current token at
# column 0 are combined as one feature.
patterns = [
[[-2, 0]],
[[-1, 0]],
[[0, 0]],
[[1, 0]],
[[2, 0]],
[[-1, 0], [0, 0]],
[[0, 0], [1, 0]],
[[-2, 1]],
[[-1, 1]],
[[0, 1]],
[[1, 1]],
[[2, 1]],
[[-2, 1], [-1, 1]],
[[-1, 1], [0, 1]],
[[0, 1], [1, 1]],
[[1, 1], [2, 1]],
[[-2, 1], [-1, 1], [0, 1]],
[[-1, 1], [0, 1], [1, 1]],
[[0, 1], [1, 1], [2, 1]],
]
def make_features(sequence):
length = len(sequence)
num_features = len(sequence[0])
def get_features(pos):
if pos < 0:
return ['#B%s' % -pos] * num_features
if pos >= length:
return ['#E%s' % (pos - length + 1)] * num_features
return sequence[pos]
for i in xrange(length):
for pattern in patterns:
fname = '/'.join([get_features(i + pos)[f] for pos, f in pattern])
sequence[i].append(fname)
'''
Source file format:
Each line is for one timestep. The features are separated by space.
An empty line indicates end of a sequence.
cutoff: a list of numbers. If count of a feature is smaller than this,
it will be ignored.
if oov_policy[i] is OOV_POLICY_USE, id 0 is reserved for OOV features of
i-th column.
return a list of dict for each column
'''
def create_dictionaries(filename, cutoff, oov_policy):
def add_to_dict(sequence, dicts):
num_features = len(dicts)
for features in sequence:
l = len(features)
assert l == num_features, "Wrong number of features " + line
for i in xrange(l):
if features[i] in dicts[i]:
dicts[i][features[i]] += 1
else:
dicts[i][features[i]] = 1
num_features = len(cutoff)
dicts = []
for i in xrange(num_features):
dicts.append(dict())
f = open(filename, 'rb')
sequence = []
for line in f:
line = line.strip()
if not line:
make_features(sequence)
add_to_dict(sequence, dicts)
sequence = []
continue
features = line.split(' ')
sequence.append(features)
for i in xrange(num_features):
dct = dicts[i]
n = 1 if oov_policy[i] == OOV_POLICY_USE else 0
todo = []
for k, v in dct.iteritems():
if v < cutoff[i]:
todo.append(k)
else:
dct[k] = n
n += 1
if oov_policy[i] == OOV_POLICY_USE:
# placeholder so that len(dct) will be the number of features
# including OOV
dct['#OOV#'] = 0
logger.info('column %d dict size=%d, ignored %d' % (i, n, len(todo)))
for k in todo:
del dct[k]
f.close()
return dicts
def encode_varint(v):
out = StringIO()
_EncodeVarint(out.write, v)
return out.getvalue()
def write_proto(file, message):
s = message.SerializeToString()
packed_len = encode_varint(len(s))
file.write(packed_len + s)
'''
if oov_policy[i] == OOV_POLICY_USE, features in i-th column which are not
existed in dicts[i] will be assigned to id 0.
if oov_policy[i] == OOV_POLICY_ERROR, all features in i-th column MUST exist
in dicts[i].
'''
def gen_proto_file(input_file, dicts, oov_policy, output_file):
def write_sequence(out, sequence):
num_features = len(dicts)
is_beginning = True
for features in sequence:
assert len(features) == num_features, \
"Wrong number of features: " + line
sample = DataFormat.DataSample()
for i in xrange(num_original_columns):
id = dicts[i].get(features[i], -1)
if id != -1:
sample.id_slots.append(id)
elif oov_policy[i] == OOV_POLICY_IGNORE:
sample.id_slots.append(0xffffffff)
elif oov_policy[i] == OOV_POLICY_ERROR:
logger.fatal("Unknown token: %s" % features[i])
else:
sample.id_slots.append(0)
if patterns:
dim = 0
vec = sample.vector_slots.add()
for i in xrange(num_original_columns, num_features):
id = dicts[i].get(features[i], -1)
if id != -1:
vec.ids.append(dim + id)
elif oov_policy[i] == OOV_POLICY_IGNORE:
pass
elif oov_policy[i] == OOV_POLICY_ERROR:
logger.fatal("Unknown token: %s" % features[i])
else:
vec.ids.append(dim + 0)
dim += len(dicts[i])
sample.is_beginning = is_beginning
is_beginning = False
write_proto(out, sample)
num_features = len(dicts)
f = open(input_file, 'rb')
out = open(output_file, 'wb')
header = DataFormat.DataHeader()
if patterns:
slot_def = header.slot_defs.add()
slot_def.type = DataFormat.SlotDef.VECTOR_SPARSE_NON_VALUE
slot_def.dim = sum(
[len(dicts[i]) for i in xrange(num_original_columns, len(dicts))])
logger.info("feature_dim=%s" % slot_def.dim)
for i in xrange(num_original_columns):
slot_def = header.slot_defs.add()
slot_def.type = DataFormat.SlotDef.INDEX
slot_def.dim = len(dicts[i])
write_proto(out, header)
num_sequences = 0
sequence = []
for line in f:
line = line.strip()
if not line:
make_features(sequence)
write_sequence(out, sequence)
sequence = []
num_sequences += 1
continue
features = line.split(' ')
sequence.append(features)
f.close()
out.close()
logger.info("num_sequences=%s" % num_sequences)
dict2 = {
'B-ADJP': 0,
'I-ADJP': 1,
'B-ADVP': 2,
'I-ADVP': 3,
'B-CONJP': 4,
'I-CONJP': 5,
'B-INTJ': 6,
'I-INTJ': 7,
'B-LST': 8,
'I-LST': 9,
'B-NP': 10,
'I-NP': 11,
'B-PP': 12,
'I-PP': 13,
'B-PRT': 14,
'I-PRT': 15,
'B-SBAR': 16,
'I-SBAR': 17,
'B-UCP': 18,
'I-UCP': 19,
'B-VP': 20,
'I-VP': 21,
'O': 22
}
if __name__ == '__main__':
cutoff = [3, 1, 0]
cutoff += [3] * len(patterns)
oov_policy = [OOV_POLICY_IGNORE, OOV_POLICY_ERROR, OOV_POLICY_ERROR]
oov_policy += [OOV_POLICY_IGNORE] * len(patterns)
dicts = create_dictionaries('trainer/tests/train.txt', cutoff, oov_policy)
dicts[2] = dict2
gen_proto_file('trainer/tests/train.txt', dicts, oov_policy,
'trainer/tests/train_proto.bin')
gen_proto_file('trainer/tests/test.txt', dicts, oov_policy,
'trainer/tests/test_proto.bin')
./trainer/tests/pydata_provider_wrapper_dir/test_pydata_provider_wrapper.proto_data
#edit-mode: -*- python -*-
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#Todo(luotao02) This config is only used for unitest. It is out of date now, and will be updated later.
# Note: when making change to this file, please make sure
# sample_trainer_config_rnn.conf is changed accordingly so that the uniitest
# for comparing these two nets can pass (test_CompareTwoNets)
default_initial_std(0.1)
default_device(0)
word_dim = 999
l1 = 0
l2 = 0
model_type("nn")
sparse_update = get_config_arg("sparse_update", bool, False)
TrainData(ProtoData(
type = "proto_sequence",
files = ('trainer/tests/train_sparse.list'),
))
Settings(
algorithm='sgd',
batch_size=100,
learning_rate=0.0001,
learning_rate_decay_a=4e-08,
learning_rate_decay_b=0.0,
learning_rate_schedule='poly',
)
wordvec_dim = 32
layer2_dim = 16
layer3_dim = 16
hidden_dim = 32
slot_names = ["qb", "qw", "tb", "tw"]
def ltr_network(network_name,
word_dim=word_dim,
wordvec_dim=wordvec_dim,
layer2_dim=layer2_dim,
layer3_dim=layer3_dim,
hidden_dim=hidden_dim,
slot_names=slot_names,
l1=l1,
l2=l2):
slotnum = len(slot_names)
for i in xrange(slotnum):
Inputs(slot_names[i] + network_name)
for i in xrange(slotnum):
Layer(
name = slot_names[i] + network_name,
type = "data",
size = word_dim,
device = -1,
)
Layer(
name = slot_names[i] + "_embedding_" + network_name,
type = "mixed",
size = wordvec_dim,
bias = False,
device = -1,
inputs = TableProjection(slot_names[i] + network_name,
parameter_name = "embedding.w0",
decay_rate_l1=l1,
sparse_remote_update = True,
sparse_update = sparse_update,
),
)
Layer(
name = slot_names[i] + "_rnn1_" + network_name,
type = "recurrent",
active_type = "tanh",
bias = Bias(initial_std = 0,
parameter_name = "rnn1.bias"),
inputs = Input(slot_names[i] + "_embedding_" + network_name,
parameter_name = "rnn1.w0")
)
Layer(
name = slot_names[i] + "_rnnlast_" + network_name,
type = "seqlastins",
inputs = [
slot_names[i] + "_rnn1_" + network_name,
],
)
Layer(
name = "layer2_" + network_name,
type = "fc",
active_type = "tanh",
size = layer2_dim,
bias = Bias(parameter_name = "layer2.bias"),
inputs = [Input(slot_name + "_rnnlast_" + network_name,
parameter_name = "_layer2_" + slot_name + ".w",
decay_rate = l2,
initial_smart = True) for slot_name in slot_names]
)
Layer(
name = "layer3_" + network_name,
type = "fc",
active_type = "tanh",
size = layer3_dim,
bias = Bias(parameter_name = "layer3.bias"),
inputs = [
Input("layer2_" + network_name,
parameter_name = "_layer3.w",
decay_rate = l2,
initial_smart = True),
]
)
Layer(
name = "output_" + network_name,
type = "fc",
size = 1,
bias = False,
inputs = [
Input("layer3_" + network_name,
parameter_name = "_layerO.w"),
],
)
ltr_network("left")
ltr_network("right")
Inputs("label")
Layer(
name = "label",
type = "data",
size = 1,
)
Outputs("cost", "qb_rnnlast_left")
Layer(
name = "cost",
type = "rank-cost",
inputs = ["output_left", "output_right", "label"],
)
#edit-mode: -*- python -*-
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#Todo(luotao02) This config is only used for unitest. It is out of date now, and will be updated later.
# Note: when making change to this file, please make sure
# sample_trainer_config_rnn.conf is changed accordingly so that the uniitest
# for comparing these two nets can pass (test_CompareTwoNets)
default_initial_std(0.1)
default_device(0)
word_dim = 1451594
l1 = 0
l2 = 0
model_type("nn")
sparse_update = get_config_arg("sparse_update", bool, False)
TrainData(ProtoData(
type = "proto_sequence",
files = ('trainer/tests/train.list'),
))
Settings(
algorithm='sgd',
batch_size=100,
learning_rate=0.0001,
learning_rate_decay_a=4e-08,
learning_rate_decay_b=0.0,
learning_rate_schedule='poly',
)
wordvec_dim = 128
layer2_dim = 96
layer3_dim = 96
hidden_dim = 128
slot_names = ["qb", "qw", "tb", "tw"]
def ltr_network(network_name,
word_dim=word_dim,
wordvec_dim=wordvec_dim,
layer2_dim=layer2_dim,
layer3_dim=layer3_dim,
hidden_dim=hidden_dim,
slot_names=slot_names,
l1=l1,
l2=l2):
slotnum = len(slot_names)
for i in xrange(slotnum):
Inputs(slot_names[i] + network_name)
for i in xrange(slotnum):
Layer(
name = slot_names[i] + network_name,
type = "data",
size = word_dim,
device = -1,
)
Layer(
name = slot_names[i] + "_embedding_" + network_name,
type = "mixed",
size = wordvec_dim,
bias = False,
device = -1,
inputs = TableProjection(slot_names[i] + network_name,
parameter_name = "embedding.w0",
decay_rate_l1=l1,
sparse_remote_update = True,
sparse_update = sparse_update,
),
)
Layer(
name = slot_names[i] + "_rnn1_" + network_name,
type = "recurrent",
active_type = "tanh",
bias = Bias(initial_std = 0,
parameter_name = "rnn1.bias"),
inputs = Input(slot_names[i] + "_embedding_" + network_name,
parameter_name = "rnn1.w0")
)
Layer(
name = slot_names[i] + "_rnnlast_" + network_name,
type = "seqlastins",
inputs = [
slot_names[i] + "_rnn1_" + network_name,
],
)
Layer(
name = "layer2_" + network_name,
type = "fc",
active_type = "tanh",
size = layer2_dim,
bias = Bias(parameter_name = "layer2.bias"),
inputs = [Input(slot_name + "_rnnlast_" + network_name,
parameter_name = "_layer2_" + slot_name + ".w",
decay_rate = l2,
initial_smart = True) for slot_name in slot_names]
)
Layer(
name = "layer3_" + network_name,
type = "fc",
active_type = "tanh",
size = layer3_dim,
bias = Bias(parameter_name = "layer3.bias"),
inputs = [
Input("layer2_" + network_name,
parameter_name = "_layer3.w",
decay_rate = l2,
initial_smart = True),
]
)
Layer(
name = "output_" + network_name,
type = "fc",
size = 1,
bias = False,
inputs = [
Input("layer3_" + network_name,
parameter_name = "_layerO.w"),
],
)
ltr_network("left")
ltr_network("right")
Inputs("label")
Layer(
name = "label",
type = "data",
size = 1,
)
Outputs("cost", "qb_rnnlast_left")
Layer(
name = "cost",
type = "rank-cost",
inputs = ["output_left", "output_right", "label"],
)
#edit-mode: -*- python -*-
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#Todo(luotao02) This config is only used for unitest. It is out of date now, and will be updated later.
# Note: when making change to this file, please make sure
# sample_trainer_config_qb_rnn.conf is changed accordingly so that the uniitest
# for comparing these two nets can pass (test_CompareTwoNets)
default_initial_std(0.1)
default_device(0)
word_dim = 1451594
l1 = 0
l2 = 0
model_type("recurrent_nn")
sparse_update = get_config_arg("sparse_update", bool, False)
TrainData(ProtoData(
type = "proto_sequence",
files = ('trainer/tests/train.list'),
))
Settings(
algorithm='sgd',
batch_size=100,
learning_rate=0.0001,
learning_rate_decay_a=4e-08,
learning_rate_decay_b=0.0,
learning_rate_schedule='poly',
)
wordvec_dim = 128
layer2_dim = 96
layer3_dim = 96
hidden_dim = 128
slot_names = ["qb", "qw", "tb", "tw"]
def SimpleRecurrentLayer(name,
size,
active_type,
bias,
input_layer_name,
parameter_name,
seq_reversed = False):
RecurrentLayerGroupBegin(name + "_layer_group",
in_links=[input_layer_name],
out_links=[name],
seq_reversed=seq_reversed)
memory_name = Memory(name=name, size=size)
Layer(
name = name,
type = "mixed",
size = size,
active_type = active_type,
bias = bias,
inputs = [IdentityProjection(input_layer_name),
FullMatrixProjection(memory_name,
parameter_name = parameter_name,
),
]
)
RecurrentLayerGroupEnd(name + "_layer_group")
def ltr_network(network_name,
word_dim=word_dim,
wordvec_dim=wordvec_dim,
layer2_dim=layer2_dim,
layer3_dim=layer3_dim,
hidden_dim=hidden_dim,
slot_names=slot_names,
l1=l1,
l2=l2):
slotnum = len(slot_names)
for i in xrange(slotnum):
Inputs(slot_names[i] + network_name)
for i in xrange(slotnum):
Layer(
name = slot_names[i] + network_name,
type = "data",
size = word_dim,
device = -1,
)
Layer(
name = slot_names[i] + "_embedding_" + network_name,
type = "mixed",
size = wordvec_dim,
bias = False,
device = -1,
inputs = TableProjection(slot_names[i] + network_name,
parameter_name = "embedding.w0",
decay_rate_l1=l1,
sparse_remote_update = True,
sparse_update = sparse_update,
),
)
SimpleRecurrentLayer(
name = slot_names[i] + "_rnn1_" + network_name,
size = hidden_dim,
active_type = "tanh",
bias = Bias(initial_std = 0,
parameter_name = "rnn1.bias"),
input_layer_name = slot_names[i] + "_embedding_" + network_name,
parameter_name = "rnn1.w0",
)
Layer(
name = slot_names[i] + "_rnnlast_" + network_name,
type = "seqlastins",
inputs = [
slot_names[i] + "_rnn1_" + network_name,
],
)
Layer(
name = "layer2_" + network_name,
type = "fc",
active_type = "tanh",
size = layer2_dim,
bias = Bias(parameter_name = "layer2.bias"),
inputs = [Input(slot_name + "_rnnlast_" + network_name,
parameter_name = "_layer2_" + slot_name + ".w",
decay_rate = l2,
initial_smart = True) for slot_name in slot_names]
)
Layer(
name = "layer3_" + network_name,
type = "fc",
active_type = "tanh",
size = layer3_dim,
bias = Bias(parameter_name = "layer3.bias"),
inputs = [
Input("layer2_" + network_name,
parameter_name = "_layer3.w",
decay_rate = l2,
initial_smart = True),
]
)
Layer(
name = "output_" + network_name,
type = "fc",
size = 1,
bias = False,
inputs = [
Input("layer3_" + network_name,
parameter_name = "_layerO.w"),
],
)
ltr_network("left")
ltr_network("right")
Inputs("label")
Layer(
name = "label",
type = "data",
size = 1,
)
Outputs("cost", "qb_rnnlast_left")
Layer(
name = "cost",
type = "rank-cost",
inputs = ["output_left", "output_right", "label"],
)
Confidence NN B-NP
in IN B-PP
the DT B-NP
pound NN I-NP
is VBZ B-VP
widely RB I-VP
expected VBN I-VP
to TO I-VP
take VB I-VP
another DT B-NP
sharp JJ I-NP
dive NN I-NP
if IN B-SBAR
trade NN B-NP
figures NNS I-NP
for IN B-PP
September NNP B-NP
, , O
due JJ B-ADJP
for IN B-PP
release NN B-NP
tomorrow NN B-NP
, , O
fail VB B-VP
to TO I-VP
show VB I-VP
a DT B-NP
substantial JJ I-NP
improvement NN I-NP
from IN B-PP
July NNP B-NP
and CC I-NP
August NNP I-NP
's POS B-NP
near-record JJ I-NP
deficits NNS I-NP
. . O
Chancellor NNP O
of IN B-PP
the DT B-NP
Exchequer NNP I-NP
Nigel NNP B-NP
Lawson NNP I-NP
's POS B-NP
restated VBN I-NP
commitment NN I-NP
to TO B-PP
a DT B-NP
firm NN I-NP
monetary JJ I-NP
policy NN I-NP
has VBZ B-VP
helped VBN I-VP
to TO I-VP
prevent VB I-VP
a DT B-NP
freefall NN I-NP
in IN B-PP
sterling NN B-NP
over IN B-PP
the DT B-NP
past JJ I-NP
week NN I-NP
. . O
But CC O
analysts NNS B-NP
reckon VBP B-VP
underlying VBG B-NP
support NN I-NP
for IN B-PP
sterling NN B-NP
has VBZ B-VP
been VBN I-VP
eroded VBN I-VP
by IN B-PP
the DT B-NP
chancellor NN I-NP
's POS B-NP
failure NN I-NP
to TO B-VP
announce VB I-VP
any DT B-NP
new JJ I-NP
policy NN I-NP
measures NNS I-NP
in IN B-PP
his PRP$ B-NP
Mansion NNP I-NP
House NNP I-NP
speech NN I-NP
last JJ B-NP
Thursday NNP I-NP
. . O
This DT B-NP
has VBZ B-VP
increased VBN I-VP
the DT B-NP
risk NN I-NP
of IN B-PP
the DT B-NP
government NN I-NP
being VBG B-VP
forced VBN I-VP
to TO I-VP
increase VB I-VP
base NN B-NP
rates NNS I-NP
to TO B-PP
16 CD B-NP
% NN I-NP
from IN B-PP
their PRP$ B-NP
current JJ I-NP
15 CD I-NP
% NN I-NP
level NN I-NP
to TO B-VP
defend VB I-VP
the DT B-NP
pound NN I-NP
, , O
economists NNS B-NP
and CC O
foreign JJ B-NP
exchange NN I-NP
market NN I-NP
analysts NNS I-NP
say VBP B-VP
. . O
`` `` O
The DT B-NP
risks NNS I-NP
for IN B-PP
sterling NN B-NP
of IN B-PP
a DT B-NP
bad JJ I-NP
trade NN I-NP
figure NN I-NP
are VBP B-VP
very RB B-ADVP
heavily RB I-ADVP
on IN B-PP
the DT B-NP
down JJ I-NP
side NN I-NP
, , O
'' '' O
said VBD B-VP
Chris NNP B-NP
Dillow NNP I-NP
, , O
senior JJ B-NP
U.K. NNP I-NP
economist NN I-NP
at IN B-PP
Nomura NNP B-NP
Research NNP I-NP
Institute NNP I-NP
. . O
`` `` O
If IN B-SBAR
there EX B-NP
is VBZ B-VP
another DT B-NP
bad JJ I-NP
trade NN I-NP
number NN I-NP
, , O
there EX B-NP
could MD B-VP
be VB I-VP
an DT B-NP
awful JJ I-NP
lot NN I-NP
of IN B-PP
pressure NN B-NP
, , O
'' '' O
noted VBD B-VP
Simon NNP B-NP
Briscoe NNP I-NP
, , O
U.K. NNP B-NP
economist NN I-NP
for IN B-PP
Midland NNP B-NP
Montagu NNP I-NP
, , O
a DT B-NP
unit NN I-NP
of IN B-PP
Midland NNP B-NP
Bank NNP I-NP
PLC NNP I-NP
. . O
Forecasts NNS B-NP
for IN B-PP
the DT B-NP
trade NN I-NP
figures NNS I-NP
range VBP B-VP
widely RB B-ADVP
, , O
but CC O
few JJ B-NP
economists NNS I-NP
expect VBP B-VP
the DT B-NP
data NNS I-NP
to TO B-VP
show VB I-VP
a DT B-NP
very RB I-NP
marked VBN I-NP
improvement NN I-NP
from IN B-PP
the DT O
# # O
2 CD O
billion CD O
-LRB- ( O
$ $ B-ADJP
3.2 CD O
billion CD O
-RRB- ) O
deficit NN B-NP
in IN B-PP
the DT B-NP
current JJ I-NP
account NN I-NP
reported VBD B-VP
for IN B-PP
August NNP B-NP
. . O
The DT B-NP
August NNP I-NP
deficit NN I-NP
and CC O
the DT B-NP
# # I-NP
2.2 CD I-NP
billion CD I-NP
gap NN I-NP
registered VBN B-VP
in IN B-PP
July NNP B-NP
are VBP B-VP
topped VBN I-VP
only RB B-ADVP
by IN B-PP
the DT B-NP
# # I-NP
2.3 CD I-NP
billion CD I-NP
deficit NN I-NP
of IN B-PP
October NNP B-NP
1988 CD I-NP
. . O
Sanjay NNP B-NP
Joshi NNP I-NP
, , O
European JJ B-NP
economist NN I-NP
at IN B-PP
Baring NNP B-NP
Brothers NNPS I-NP
& CC I-NP
Co. NNP I-NP
, , O
said VBD B-VP
there EX B-NP
is VBZ B-VP
no DT B-NP
sign NN I-NP
that IN B-SBAR
Britain NNP B-NP
's POS B-NP
manufacturing NN I-NP
industry NN I-NP
is VBZ B-VP
transforming VBG I-VP
itself PRP B-NP
to TO B-VP
boost VB I-VP
exports NNS B-NP
. . O
At IN B-PP
the DT B-NP
same JJ I-NP
time NN I-NP
, , O
he PRP B-NP
remains VBZ B-VP
fairly RB B-ADJP
pessimistic JJ I-ADJP
about IN B-PP
the DT B-NP
outlook NN I-NP
for IN B-PP
imports NNS B-NP
, , O
given VBN B-PP
continued VBD B-NP
high JJ I-NP
consumer NN I-NP
and CC I-NP
capital NN I-NP
goods NNS I-NP
inflows NNS I-NP
. . O
He PRP B-NP
reckons VBZ B-VP
the DT B-NP
current JJ I-NP
account NN I-NP
deficit NN I-NP
will MD B-VP
narrow VB I-VP
to TO B-PP
only RB B-NP
# # I-NP
1.8 CD I-NP
billion CD I-NP
in IN B-PP
September NNP B-NP
. . O
However RB B-ADVP
, , O
Mr. NNP B-NP
Dillow NNP I-NP
said VBD B-VP
he PRP B-NP
believes VBZ B-VP
that IN B-SBAR
a DT B-NP
reduction NN I-NP
in IN B-PP
raw JJ B-NP
material NN I-NP
stockbuilding VBG I-NP
by IN B-PP
industry NN B-NP
could MD B-VP
lead VB I-VP
to TO B-PP
a DT B-NP
sharp JJ I-NP
drop NN I-NP
in IN B-PP
imports NNS B-NP
. . O
Combined VBN B-PP
with IN B-PP
at IN B-ADVP
least JJS I-ADVP
some DT B-NP
rebound NN I-NP
in IN B-PP
exports NNS B-NP
after IN B-PP
August NNP B-NP
's POS B-NP
unexpected JJ I-NP
decline NN I-NP
, , O
the DT B-NP
deficit NN I-NP
could MD B-VP
narrow VB I-VP
to TO B-PP
as RB B-NP
little JJ I-NP
as IN I-NP
# # I-NP
1.3 CD I-NP
billion CD I-NP
. . O
Mr. NNP B-NP
Briscoe NNP I-NP
, , O
who WP B-NP
also RB B-ADVP
forecasts VBZ B-VP
a DT B-NP
# # I-NP
1.3 CD I-NP
billion CD I-NP
current JJ I-NP
account NN I-NP
gap NN I-NP
, , O
warns VBZ B-VP
that IN B-SBAR
even RB B-SBAR
if IN I-SBAR
the DT B-NP
trade NN I-NP
figures NNS I-NP
are VBP B-VP
bullish JJ B-ADJP
for IN B-PP
sterling NN B-NP
, , O
the DT B-NP
currency NN I-NP
wo MD B-VP
n't RB I-VP
advance VB I-VP
much JJ B-NP
because IN B-SBAR
investors NNS B-NP
will MD B-VP
want VB I-VP
to TO I-VP
see VB I-VP
further JJ B-NP
evidence NN I-NP
of IN B-PP
the DT B-NP
turnaround NN I-NP
before IN B-PP
adjusting VBG B-VP
positions NNS B-NP
. . O
Nevertheless RB B-ADVP
, , O
he PRP B-NP
noted VBD B-VP
, , O
`` `` O
No DT B-NP
one PRP I-NP
will MD B-VP
want VB I-VP
to TO I-VP
go VB I-VP
into IN B-PP
the DT B-NP
trade NN I-NP
figures NNS I-NP
without IN B-PP
a DT B-NP
flat JJ I-NP
position NN I-NP
'' '' O
in IN B-PP
the DT B-NP
pound NN I-NP
. . O
Meanwhile RB B-ADVP
, , O
overall JJ B-NP
evidence NN I-NP
on IN B-PP
the DT B-NP
economy NN I-NP
remains VBZ B-VP
fairly RB B-ADJP
clouded VBN I-ADJP
. . O
In IN B-PP
his PRP$ B-NP
Mansion NNP I-NP
House NNP I-NP
speech NN I-NP
, , O
Mr. NNP B-NP
Lawson NNP I-NP
warned VBD B-VP
that IN B-SBAR
a DT B-NP
further JJ I-NP
slowdown NN I-NP
can MD B-VP
be VB I-VP
expected VBN I-VP
as IN B-SBAR
the DT B-NP
impact NN I-NP
of IN B-PP
the DT B-NP
last JJ I-NP
rise NN I-NP
in IN B-PP
interest NN B-NP
rates NNS I-NP
earlier RBR B-NP
this DT I-NP
month NN I-NP
takes VBZ B-VP
effect NN B-NP
. . O
U.K. JJ B-NP
base NN I-NP
rates NNS I-NP
are VBP B-VP
at IN B-PP
their PRP$ B-NP
highest JJS I-NP
level NN I-NP
in IN B-PP
eight CD B-NP
years NNS I-NP
. . O
But CC O
consumer NN B-NP
expenditure NN I-NP
data NNS I-NP
released VBD B-VP
Friday NNP B-NP
do VBP B-VP
n't RB I-VP
suggest VB I-VP
that IN B-SBAR
the DT B-NP
U.K. NNP I-NP
economy NN I-NP
is VBZ B-VP
slowing VBG I-VP
that DT B-ADVP
quickly RB I-ADVP
. . O
The DT B-NP
figures NNS I-NP
show VBP B-VP
that DT O
spending NN B-NP
rose VBD B-VP
0.1 CD B-NP
% NN I-NP
in IN B-PP
the DT B-NP
third JJ I-NP
quarter NN I-NP
from IN B-PP
the DT B-NP
second JJ I-NP
quarter NN I-NP
and CC O
was VBD B-VP
up IN B-ADVP
3.8 CD B-NP
% NN I-NP
from IN B-PP
a DT B-NP
year NN I-NP
ago RB B-ADVP
. . O
This DT B-NP
compares VBZ B-VP
with IN B-PP
a DT B-NP
1.6 CD I-NP
% NN I-NP
rise NN I-NP
in IN B-PP
the DT B-NP
second NN I-NP
from IN B-PP
the DT B-NP
first JJ I-NP
quarter NN I-NP
and CC O
a DT B-NP
5.4 CD I-NP
% NN I-NP
increase NN I-NP
from IN B-PP
the DT B-NP
second JJ I-NP
quarter NN I-NP
of IN B-PP
1988 CD B-NP
. . O
Mr. NNP B-NP
Dillow NNP I-NP
said VBD B-VP
the DT B-NP
data NNS I-NP
show VBP B-VP
the DT B-NP
economy NN I-NP
`` `` O
is VBZ B-VP
still RB B-ADVP
quite RB B-ADJP
strong JJ I-ADJP
, , O
'' '' O
but CC O
suggestions NNS B-NP
that IN B-SBAR
much NN B-NP
of IN B-PP
the DT B-NP
spending NN I-NP
went VBD B-VP
on IN B-PP
services NNS B-NP
rather RB B-PP
than IN I-PP
consumer NN B-NP
goods NNS I-NP
should MD B-VP
reduce VB I-VP
fears NNS B-NP
of IN B-PP
more JJR B-NP
import NN I-NP
rises NNS I-NP
. . O
Certainly RB B-ADVP
, , O
the DT B-NP
chancellor NN I-NP
has VBZ B-VP
made VBN I-VP
it PRP B-NP
clear JJ B-ADJP
that IN B-SBAR
he PRP B-NP
is VBZ B-VP
prepared VBN I-VP
to TO I-VP
increase VB I-VP
interest NN B-NP
rates NNS I-NP
again RB B-ADVP
if IN B-SBAR
necessary JJ B-ADJP
to TO B-VP
both DT I-VP
ensure VB I-VP
that IN B-SBAR
a DT B-NP
substantial JJ I-NP
slowdown NN I-NP
does VBZ B-VP
take VB I-VP
place NN B-NP
and CC O
that DT O
sterling NN B-NP
does VBZ B-VP
n't RB I-VP
decline VB I-VP
further JJ B-ADVP
. . O
Thursday NNP B-NP
, , O
he PRP B-NP
reminded VBD B-VP
his PRP$ B-NP
audience NN I-NP
that IN B-SBAR
the DT B-NP
government NN I-NP
`` `` O
can MD B-VP
not RB I-VP
allow VB I-VP
the DT B-NP
necessary JJ I-NP
rigor NN I-NP
of IN B-PP
monetary JJ B-NP
policy NN I-NP
to TO B-VP
be VB I-VP
undermined VBN I-VP
by IN B-PP
exchange NN B-NP
rate NN I-NP
weakness NN I-NP
. . O
'' '' O
Analysts NNS B-NP
agree VBP B-VP
there EX B-NP
is VBZ B-VP
little JJ B-NP
holding NN B-VP
sterling NN B-NP
firm NN B-ADJP
at IN B-PP
the DT B-NP
moment NN I-NP
other JJ B-ADJP
than IN B-PP
Mr. NNP B-NP
Lawson NNP I-NP
's POS B-NP
promise NN I-NP
that IN B-SBAR
rates NNS B-NP
will MD B-VP
be VB I-VP
pushed VBN I-VP
higher JJR B-ADJP
if IN B-SBAR
necessary JJ B-ADJP
. . O
And CC O
, , O
they PRP B-NP
warn VBP B-VP
, , O
any DT B-NP
further JJ I-NP
drop NN I-NP
in IN B-PP
the DT B-NP
government NN I-NP
's POS B-NP
popularity NN I-NP
could MD B-VP
swiftly RB I-VP
make VB I-VP
this DT B-NP
promise NN I-NP
sound NN B-VP
hollow JJ B-ADJP
. . O
Sterling NNP B-NP
was VBD B-VP
already RB I-VP
showing VBG I-VP
some DT B-NP
signs NNS I-NP
of IN B-PP
a DT B-NP
lack NN I-NP
of IN B-PP
confidence NN B-NP
in IN B-PP
Mr. NNP B-NP
Lawson NNP I-NP
's POS B-NP
promise NN I-NP
Friday NNP B-NP
. . O
In IN B-PP
European JJ B-NP
trading NN I-NP
it PRP B-NP
declined VBD B-VP
to TO B-PP
$ $ B-NP
1.5890 CD I-NP
and CC O
2.9495 CD B-NP
marks NNS I-NP
from IN B-PP
$ $ B-NP
1.5940 CD I-NP
and CC O
2.9429 CD B-NP
marks NNS I-NP
late JJ B-NP
Thursday NNP I-NP
. . O
Economists NNS B-NP
suggested VBD B-VP
that IN B-SBAR
if IN B-SBAR
the DT B-NP
pound NN I-NP
falls VBZ B-VP
much JJ B-NP
below IN B-PP
2.90 CD B-NP
marks NNS I-NP
, , O
the DT B-NP
government NN I-NP
will MD B-VP
be VB I-VP
forced VBN I-VP
to TO I-VP
increase VB I-VP
rates NNS B-NP
to TO B-PP
16 CD B-NP
% NN I-NP
, , O
both DT B-VP
to TO I-VP
halt VB B-VP
any DT B-NP
further JJ I-NP
decline NN I-NP
and CC O
ensure VB B-VP
that IN B-SBAR
the DT B-NP
balance NN I-NP
of IN B-PP
monetary JJ B-NP
policy NN I-NP
remains VBZ B-VP
unchanged JJ B-ADJP
. . O
Friday NNP B-NP
's POS B-NP
Market NNP I-NP
Activity NN I-NP
The DT B-NP
dollar NN I-NP
posted VBD B-VP
gains NNS B-NP
in IN B-PP
quiet JJ B-NP
trading NN I-NP
as IN B-SBAR
concerns NNS B-NP
about IN B-PP
equities NNS B-NP
abated VBN B-VP
. . O
Foreign JJ B-NP
exchange NN I-NP
dealers NNS I-NP
said VBD B-VP
that IN B-SBAR
the DT B-NP
currency NN I-NP
market NN I-NP
has VBZ B-VP
begun VBN I-VP
to TO I-VP
distance VB I-VP
itself PRP B-NP
from IN B-PP
the DT B-NP
volatile JJ I-NP
stock NN I-NP
exchange NN I-NP
, , O
which WDT B-NP
has VBZ B-VP
preoccupied VBN I-VP
the DT B-NP
market NN I-NP
since IN B-PP
Oct. NNP B-NP
13 CD I-NP
, , O
when WRB B-ADVP
the DT B-NP
Dow NNP I-NP
Jones NNP I-NP
Industrial NNP I-NP
Average NNP I-NP
plunged VBD B-VP
more JJR B-NP
than IN I-NP
190 CD I-NP
points NNS I-NP
. . O
Currency NN B-NP
analysts NNS I-NP
predict VBP B-VP
that IN B-SBAR
in IN B-PP
the DT B-NP
coming VBG I-NP
week NN I-NP
the DT B-NP
foreign JJ I-NP
exchange NN I-NP
market NN I-NP
will MD B-VP
shift VB I-VP
its PRP$ B-NP
focus NN I-NP
back RB B-ADVP
to TO B-PP
economic JJ B-NP
fundamentals NNS I-NP
, , O
keeping VBG B-VP
a DT B-NP
close NN I-NP
eye NN I-NP
out IN B-ADVP
for IN B-PP
any DT B-NP
signs NNS I-NP
of IN B-PP
monetary JJ B-NP
easing NN I-NP
by IN B-PP
U.S. NNP B-NP
Federal NNP I-NP
Reserve NNP I-NP
. . O
Late RB B-ADVP
in IN B-PP
the DT B-NP
New NNP I-NP
York NNP I-NP
trading NN I-NP
day NN I-NP
, , O
the DT B-NP
dollar NN I-NP
was VBD B-VP
quoted VBN I-VP
at IN B-PP
1.8578 CD B-NP
marks NNS I-NP
, , O
up IN B-ADVP
from IN B-PP
1.8470 CD B-NP
marks NNS I-NP
late JJ B-NP
Thursday NNP I-NP
in IN B-PP
New NNP B-NP
York NNP I-NP
. . O
The DT B-NP
U.S. NNP I-NP
currency NN I-NP
was VBD B-VP
also RB I-VP
changing VBG I-VP
hands NNS B-NP
at IN B-PP
142.43 CD B-NP
yen NN I-NP
, , O
up IN B-ADVP
from IN B-PP
141.70 CD B-NP
yen NN I-NP
in IN B-PP
New NNP B-NP
York NNP I-NP
late JJ B-NP
Thursday NNP I-NP
. . O
In IN B-PP
Tokyo NNP B-NP
on IN B-PP
Monday NNP B-NP
, , O
the DT B-NP
U.S. NNP I-NP
currency NN I-NP
opened VBD B-VP
for IN B-PP
trading NN B-NP
at IN B-PP
141.95 CD B-NP
yen NN I-NP
, , O
up IN B-ADVP
from IN B-PP
Friday NNP B-NP
's POS B-NP
Tokyo NNP I-NP
......@@ -20,28 +20,6 @@ import random
import json
import string
@provider(slots=[
SparseNonValueSlot(10), DenseSlot(2), SparseValueSlot(10), StringSlot(1),
IndexSlot(3)
])
def processNonSequenceData(obj, filename):
with open(filename, "rb") as f:
for line in f:
slots_str = line.split(';')
index = int(slots_str[0])
non_values = map(int, slots_str[1].split()[1:])
dense = map(float, slots_str[2].split()[1:])
strs = slots_str[4].strip().split(' ', 1)[1]
def __values_mapper__(s):
s = s.split(":")
return int(s[0]), float(s[1])
values = map(__values_mapper__, slots_str[3].split()[1:])
yield [non_values, dense, values, strs, index]
SPARSE_ID_LIMIT = 1000
SPARSE_ID_COUNT = 100
SEQUENCE_LIMIT = 50
......@@ -146,8 +124,6 @@ def processSubSeqAndGenerateData(obj, name):
if __name__ == "__main__":
pvd = processNonSequenceData("test.txt")
print pvd.getNextBatch(100)
pvd = processSeqAndGenerateData("_")
print pvd.getNextBatch(100)
pvd = processSubSeqAndGenerateData("_")
......
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <gtest/gtest.h>
#include <paddle/utils/PythonUtil.h>
#include <algorithm>
#include <cstdlib>
#include "paddle/trainer/Trainer.h"
using namespace paddle; // NOLINT
using namespace std; // NOLINT
DECLARE_int32(gpu_id);
DECLARE_bool(local);
DECLARE_bool(use_gpu);
DECLARE_string(config);
DECLARE_string(nics);
DEFINE_string(config_file_a, "", "config of one network to compare");
DEFINE_string(config_file_b, "", "config of another network to compare");
DEFINE_bool(need_high_accuracy,
true,
"whether need to run in double accuracy (recommended)");
DEFINE_double(
max_diff_ratio,
0.0f,
"max diff ratio allowed for outputs and parameters (value/gradient)");
struct ComData {
vector<Argument> outArgs;
vector<ParameterPtr> parameters;
};
void calcGradient(ComData& data, const string configFile) {
FLAGS_config = configFile;
FLAGS_local = true;
FLAGS_use_gpu = false;
FLAGS_nics = "";
*ThreadLocalRand::getSeed() = 0;
srand(0);
Trainer trainer;
trainer.init(TrainerConfigHelper::createFromFlagConfig(), false);
data.parameters = trainer.getGradientMachine()->getParameters();
trainer.getDataProvider()->setSkipShuffle();
trainer.train();
}
void checkBuffer(real* A,
const char* desA,
real* B,
const char* desB,
size_t len,
size_t width = 1) {
int nNum = 0;
for (size_t i = 0; i < len; ++i) {
real diff = fabs(A[i] - B[i]);
if (diff > 0.0f &&
diff / std::max(fabs(A[i]), fabs(B[i])) > FLAGS_max_diff_ratio) {
nNum++;
LOG(INFO) << "Row: " << i / width << ", " << desA << " : " << A[i]
<< " " << desB << " : " << B[i];
}
}
EXPECT_EQ(0, nNum);
LOG(INFO) << "\n\n";
}
void compareGradient(ComData& comDataA, ComData& comDataB) {
vector<Argument> outArgsA = comDataA.outArgs;
vector<Argument> outArgsB = comDataB.outArgs;
for (size_t i = 0; i < outArgsA.size(); ++i) {
CpuMatrix matA(outArgsA[i].value->getHeight(),
outArgsA[i].value->getWidth());
CpuMatrix matB(outArgsB[i].value->getHeight(),
outArgsB[i].value->getWidth());
matA.copyFrom(*outArgsA[i].value);
matB.copyFrom(*outArgsB[i].value);
LOG(INFO) << "\n--------------------------------"
<< " Check Network Output_" << i << ":"
<< " -------------------------------------\n";
checkBuffer(matA.getData(),
"network A output",
matB.getData(),
"network B output",
matA.getElementCnt(),
matA.getWidth());
}
vector<ParameterPtr>& parametersA = comDataA.parameters;
vector<ParameterPtr>& parametersB = comDataB.parameters;
LOG(INFO) << "\n\n--------------------------------"
<< " Check Gradient Machine Parameters:"
<< " -------------------------------------\n";
for (size_t i = 0; i < parametersA.size(); ++i) {
ParameterPtr parameterA, parameterB;
parameterA = parametersA[i];
parameterB = parametersB[i];
CpuVector paraA(parameterA->getSize());
CpuVector paraB(parameterB->getSize());
paraA.copyFrom(*parameterA->getBuf(PARAMETER_VALUE));
paraB.copyFrom(*parameterB->getBuf(PARAMETER_VALUE));
LOG(INFO) << "\n\n----------- PARAMETER_VALUE: " << parameterA->getName()
<< " ; size : " << paraA.getSize() << " ------------";
checkBuffer(paraA.getData(),
"Network A",
paraB.getData(),
"Network B",
paraA.getSize());
CpuVector gradA(*parameterA->getBuf(PARAMETER_GRADIENT));
CpuVector gradB(*parameterB->getBuf(PARAMETER_GRADIENT));
LOG(INFO) << "\n\n----------- PARAMETER_GRADIENT: " << parameterA->getName()
<< " ; size : " << gradA.getSize() << " -----------";
checkBuffer(gradA.getData(),
"Network A",
gradB.getData(),
"Network B",
gradA.getSize());
}
}
TEST(Trainer, create) {
ComData dataA;
calcGradient(dataA, FLAGS_config_file_a);
LOG(INFO) << "\n\ntraining of Network A is finished\n\n";
ComData dataB;
calcGradient(dataB, FLAGS_config_file_b);
LOG(INFO) << "\n\ntraining of the Network B is finished\n\n";
compareGradient(dataA, dataB);
}
int main(int argc, char** argv) {
paddle::initMain(argc, argv);
testing::InitGoogleTest(&argc, argv);
initPython(argc, argv);
#ifndef PADDLE_TYPE_DOUBLE
if (FLAGS_need_high_accuracy) {
LOG(INFO) << "skip test due to it's need high accuracy";
return 0;
}
if (FLAGS_max_diff_ratio == 0.0f) {
FLAGS_max_diff_ratio = 2e-4;
LOG(INFO) << "auto set max_diff_ratio " << FLAGS_max_diff_ratio
<< " in low accuracy mode";
}
#else
if (FLAGS_max_diff_ratio == 0.0f) {
FLAGS_max_diff_ratio = 2e-7;
LOG(INFO) << "auto set max_diff_ratio " << FLAGS_max_diff_ratio
<< " in high accuracy mode";
}
#endif
int ret = RUN_ALL_TESTS();
return ret;
}
......@@ -25,45 +25,9 @@ limitations under the License. */
#include <unordered_set>
#include "picojson.h"
void checkEqual(const paddle::Argument& expect, const paddle::Argument& actual);
void checkValue(std::vector<paddle::Argument>& arguments, picojson::array& arr);
const std::string kDir = "./trainer/tests/pydata_provider_wrapper_dir/";
TEST(PyDataProviderWrapper, NoSequenceData) {
paddle::DataConfig conf;
conf.set_type("py");
conf.set_load_data_module(std::string("testPyDataWrapper"));
conf.set_load_data_object(std::string("processNonSequenceData"));
conf.set_async_load_data(false);
conf.clear_files();
conf.set_files(kDir + "test_pydata_provider_wrapper.list");
paddle::DataProviderPtr provider(paddle::DataProvider::create(conf, false));
provider->setSkipShuffle();
provider->reset();
paddle::DataBatch batchFromPy;
provider->getNextBatch(100, &batchFromPy);
paddle::DataConfig conf2;
conf2.set_type("proto");
conf2.set_async_load_data(false);
conf2.clear_files();
conf2.set_files(kDir + "test_pydata_provider_wrapper.protolist");
provider.reset(paddle::DataProvider::create(conf2, false));
provider->setSkipShuffle();
provider->reset();
paddle::DataBatch batchFromProto;
provider->getNextBatch(100, &batchFromProto);
std::vector<paddle::Argument>& pyArguments = batchFromPy.getStreams();
std::vector<paddle::Argument>& protoArguments = batchFromProto.getStreams();
EXPECT_EQ(pyArguments.size(), protoArguments.size());
for (size_t i = 0; i < pyArguments.size(); ++i) {
checkEqual(protoArguments[i], pyArguments[i]);
}
}
TEST(PyDataProviderWrapper, SequenceData) {
paddle::DataConfig conf;
conf.set_type("py");
......@@ -148,66 +112,6 @@ int main(int argc, char** argv) {
return RUN_ALL_TESTS();
}
void checkEqual(const paddle::Argument& expect,
const paddle::Argument& actual) {
if (expect.value) {
EXPECT_TRUE(actual.value != nullptr);
paddle::Matrix* e = expect.value.get();
paddle::Matrix* a = actual.value.get();
EXPECT_EQ(e->getWidth(), a->getWidth());
EXPECT_EQ(e->getHeight(), a->getHeight());
if (dynamic_cast<paddle::CpuSparseMatrix*>(e)) {
paddle::CpuSparseMatrix* se = dynamic_cast<paddle::CpuSparseMatrix*>(e);
paddle::CpuSparseMatrix* sa = dynamic_cast<paddle::CpuSparseMatrix*>(a);
EXPECT_EQ(se->getFormat(), sa->getFormat());
EXPECT_EQ(se->getElementCnt(), sa->getElementCnt());
size_t rowSize = se->getFormat() == paddle::SPARSE_CSC
? se->getElementCnt()
: se->getHeight() + 1;
size_t colSize = se->getFormat() == paddle::SPARSE_CSC
? se->getWidth() + 1
: se->getElementCnt();
for (size_t i = 0; i < rowSize; ++i) {
EXPECT_EQ(se->getRows()[i], sa->getRows()[i]);
}
for (size_t i = 0; i < colSize; ++i) {
EXPECT_EQ(se->getCols()[i], sa->getCols()[i]);
}
if (se->getValueType() == paddle::FLOAT_VALUE) {
EXPECT_EQ(paddle::FLOAT_VALUE, sa->getValueType());
for (size_t i = 0; i < se->getElementCnt(); ++i) {
EXPECT_EQ(se->getValue()[i], sa->getValue()[i]);
}
}
} else if (dynamic_cast<paddle::CpuMatrix*>(e)) {
EXPECT_EQ(e->getElementCnt(), a->getElementCnt());
for (size_t i = 0; i < e->getElementCnt(); ++i) {
EXPECT_EQ(e->getData()[i], a->getData()[i]);
}
}
}
if (expect.ids) {
EXPECT_TRUE(actual.ids != nullptr);
paddle::VectorT<int>* e = expect.ids.get();
paddle::VectorT<int>* a = actual.ids.get();
EXPECT_EQ(e->getSize(), a->getSize());
for (size_t i = 0; i < e->getSize(); ++i) {
EXPECT_EQ(e->getData()[i], a->getData()[i]);
}
}
if (expect.strs) {
EXPECT_TRUE(actual.strs != nullptr);
std::vector<std::string>* e = expect.strs.get();
std::vector<std::string>* a = actual.strs.get();
EXPECT_EQ(e->size(), a->size());
for (size_t i = 0; i < e->size(); ++i) {
EXPECT_EQ((*e)[i], (*a)[i]);
}
}
}
void checkValue(std::vector<paddle::Argument>& arguments,
picojson::array& arr) {
// CHECK SLOT 0, Sparse Value.
......
......@@ -24,7 +24,6 @@ using namespace std; // NOLINT
static const string& configFile1 = "trainer/tests/sample_trainer_config.conf";
static const string& configFile2 =
"trainer/tests/sample_trainer_config_hsigmoid.conf";
static const string& configFile3 = "trainer/tests/chunking.conf";
static const string& configFile4 =
"trainer/tests/sample_trainer_config_parallel.conf";
......@@ -95,13 +94,6 @@ TEST(checkGradient, multi) {
TEST(checkGradient, hsigmoid) { checkGradientTest(configFile2, false, false); }
TEST(checkGradient, chunk) {
checkGradientTest(configFile3, false, false);
#ifdef PADDLE_WITH_CUDA
checkGradientTest(configFile3, true, true);
#endif
}
TEST(checkGradient, non_parallel) {
checkGradientTest(configFile4, false, false);
}
......
......@@ -15,12 +15,7 @@
from paddle.trainer_config_helpers import *
TrainData(ProtoData(
files = "dummy_list",
constant_slots = [1.0],
async_load_data = True))
TestData(SimpleData(
TrainData(SimpleData(
files = "trainer/tests/sample_filelist.txt",
feat_dim = 3,
context_len = 0,
......
Confidence NN B-NP
in IN B-PP
the DT B-NP
pound NN I-NP
is VBZ B-VP
widely RB I-VP
expected VBN I-VP
to TO I-VP
take VB I-VP
another DT B-NP
sharp JJ I-NP
dive NN I-NP
if IN B-SBAR
trade NN B-NP
figures NNS I-NP
for IN B-PP
September NNP B-NP
, , O
due JJ B-ADJP
for IN B-PP
release NN B-NP
tomorrow NN B-NP
, , O
fail VB B-VP
to TO I-VP
show VB I-VP
a DT B-NP
substantial JJ I-NP
improvement NN I-NP
from IN B-PP
July NNP B-NP
and CC I-NP
August NNP I-NP
's POS B-NP
near-record JJ I-NP
deficits NNS I-NP
. . O
Chancellor NNP O
of IN B-PP
the DT B-NP
Exchequer NNP I-NP
Nigel NNP B-NP
Lawson NNP I-NP
's POS B-NP
restated VBN I-NP
commitment NN I-NP
to TO B-PP
a DT B-NP
firm NN I-NP
monetary JJ I-NP
policy NN I-NP
has VBZ B-VP
helped VBN I-VP
to TO I-VP
prevent VB I-VP
a DT B-NP
freefall NN I-NP
in IN B-PP
sterling NN B-NP
over IN B-PP
the DT B-NP
past JJ I-NP
week NN I-NP
. . O
But CC O
analysts NNS B-NP
reckon VBP B-VP
underlying VBG B-NP
support NN I-NP
for IN B-PP
sterling NN B-NP
has VBZ B-VP
been VBN I-VP
eroded VBN I-VP
by IN B-PP
the DT B-NP
chancellor NN I-NP
's POS B-NP
failure NN I-NP
to TO B-VP
announce VB I-VP
any DT B-NP
new JJ I-NP
policy NN I-NP
measures NNS I-NP
in IN B-PP
his PRP$ B-NP
Mansion NNP I-NP
House NNP I-NP
speech NN I-NP
last JJ B-NP
Thursday NNP I-NP
. . O
This DT B-NP
has VBZ B-VP
increased VBN I-VP
the DT B-NP
risk NN I-NP
of IN B-PP
the DT B-NP
government NN I-NP
being VBG B-VP
forced VBN I-VP
to TO I-VP
increase VB I-VP
base NN B-NP
rates NNS I-NP
to TO B-PP
16 CD B-NP
% NN I-NP
from IN B-PP
their PRP$ B-NP
current JJ I-NP
15 CD I-NP
% NN I-NP
level NN I-NP
to TO B-VP
defend VB I-VP
the DT B-NP
pound NN I-NP
, , O
economists NNS B-NP
and CC O
foreign JJ B-NP
exchange NN I-NP
market NN I-NP
analysts NNS I-NP
say VBP B-VP
. . O
`` `` O
The DT B-NP
risks NNS I-NP
for IN B-PP
sterling NN B-NP
of IN B-PP
a DT B-NP
bad JJ I-NP
trade NN I-NP
figure NN I-NP
are VBP B-VP
very RB B-ADVP
heavily RB I-ADVP
on IN B-PP
the DT B-NP
down JJ I-NP
side NN I-NP
, , O
'' '' O
said VBD B-VP
Chris NNP B-NP
Dillow NNP I-NP
, , O
senior JJ B-NP
U.K. NNP I-NP
economist NN I-NP
at IN B-PP
Nomura NNP B-NP
Research NNP I-NP
Institute NNP I-NP
. . O
`` `` O
If IN B-SBAR
there EX B-NP
is VBZ B-VP
another DT B-NP
bad JJ I-NP
trade NN I-NP
number NN I-NP
, , O
there EX B-NP
could MD B-VP
be VB I-VP
an DT B-NP
awful JJ I-NP
lot NN I-NP
of IN B-PP
pressure NN B-NP
, , O
'' '' O
noted VBD B-VP
Simon NNP B-NP
Briscoe NNP I-NP
, , O
U.K. NNP B-NP
economist NN I-NP
for IN B-PP
Midland NNP B-NP
Montagu NNP I-NP
, , O
a DT B-NP
unit NN I-NP
of IN B-PP
Midland NNP B-NP
Bank NNP I-NP
PLC NNP I-NP
. . O
Forecasts NNS B-NP
for IN B-PP
the DT B-NP
trade NN I-NP
figures NNS I-NP
range VBP B-VP
widely RB B-ADVP
, , O
but CC O
few JJ B-NP
economists NNS I-NP
expect VBP B-VP
the DT B-NP
data NNS I-NP
to TO B-VP
show VB I-VP
a DT B-NP
very RB I-NP
marked VBN I-NP
improvement NN I-NP
from IN B-PP
the DT O
# # O
2 CD O
billion CD O
-LRB- ( O
$ $ B-ADJP
3.2 CD O
billion CD O
-RRB- ) O
deficit NN B-NP
in IN B-PP
the DT B-NP
current JJ I-NP
account NN I-NP
reported VBD B-VP
for IN B-PP
August NNP B-NP
. . O
The DT B-NP
August NNP I-NP
deficit NN I-NP
and CC O
the DT B-NP
# # I-NP
2.2 CD I-NP
billion CD I-NP
gap NN I-NP
registered VBN B-VP
in IN B-PP
July NNP B-NP
are VBP B-VP
topped VBN I-VP
only RB B-ADVP
by IN B-PP
the DT B-NP
# # I-NP
2.3 CD I-NP
billion CD I-NP
deficit NN I-NP
of IN B-PP
October NNP B-NP
1988 CD I-NP
. . O
Sanjay NNP B-NP
Joshi NNP I-NP
, , O
European JJ B-NP
economist NN I-NP
at IN B-PP
Baring NNP B-NP
Brothers NNPS I-NP
& CC I-NP
Co. NNP I-NP
, , O
said VBD B-VP
there EX B-NP
is VBZ B-VP
no DT B-NP
sign NN I-NP
that IN B-SBAR
Britain NNP B-NP
's POS B-NP
manufacturing NN I-NP
industry NN I-NP
is VBZ B-VP
transforming VBG I-VP
itself PRP B-NP
to TO B-VP
boost VB I-VP
exports NNS B-NP
. . O
At IN B-PP
the DT B-NP
same JJ I-NP
time NN I-NP
, , O
he PRP B-NP
remains VBZ B-VP
fairly RB B-ADJP
pessimistic JJ I-ADJP
about IN B-PP
the DT B-NP
outlook NN I-NP
for IN B-PP
imports NNS B-NP
, , O
given VBN B-PP
continued VBD B-NP
high JJ I-NP
consumer NN I-NP
and CC I-NP
capital NN I-NP
goods NNS I-NP
inflows NNS I-NP
. . O
He PRP B-NP
reckons VBZ B-VP
the DT B-NP
current JJ I-NP
account NN I-NP
deficit NN I-NP
will MD B-VP
narrow VB I-VP
to TO B-PP
only RB B-NP
# # I-NP
1.8 CD I-NP
billion CD I-NP
in IN B-PP
September NNP B-NP
. . O
However RB B-ADVP
, , O
Mr. NNP B-NP
Dillow NNP I-NP
said VBD B-VP
he PRP B-NP
believes VBZ B-VP
that IN B-SBAR
a DT B-NP
reduction NN I-NP
in IN B-PP
raw JJ B-NP
material NN I-NP
stockbuilding VBG I-NP
by IN B-PP
industry NN B-NP
could MD B-VP
lead VB I-VP
to TO B-PP
a DT B-NP
sharp JJ I-NP
drop NN I-NP
in IN B-PP
imports NNS B-NP
. . O
Combined VBN B-PP
with IN B-PP
at IN B-ADVP
least JJS I-ADVP
some DT B-NP
rebound NN I-NP
in IN B-PP
exports NNS B-NP
after IN B-PP
August NNP B-NP
's POS B-NP
unexpected JJ I-NP
decline NN I-NP
, , O
the DT B-NP
deficit NN I-NP
could MD B-VP
narrow VB I-VP
to TO B-PP
as RB B-NP
little JJ I-NP
as IN I-NP
# # I-NP
1.3 CD I-NP
billion CD I-NP
. . O
Mr. NNP B-NP
Briscoe NNP I-NP
, , O
who WP B-NP
also RB B-ADVP
forecasts VBZ B-VP
a DT B-NP
# # I-NP
1.3 CD I-NP
billion CD I-NP
current JJ I-NP
account NN I-NP
gap NN I-NP
, , O
warns VBZ B-VP
that IN B-SBAR
even RB B-SBAR
if IN I-SBAR
the DT B-NP
trade NN I-NP
figures NNS I-NP
are VBP B-VP
bullish JJ B-ADJP
for IN B-PP
sterling NN B-NP
, , O
the DT B-NP
currency NN I-NP
wo MD B-VP
n't RB I-VP
advance VB I-VP
much JJ B-NP
because IN B-SBAR
investors NNS B-NP
will MD B-VP
want VB I-VP
to TO I-VP
see VB I-VP
further JJ B-NP
evidence NN I-NP
of IN B-PP
the DT B-NP
turnaround NN I-NP
before IN B-PP
adjusting VBG B-VP
positions NNS B-NP
. . O
Nevertheless RB B-ADVP
, , O
he PRP B-NP
noted VBD B-VP
, , O
`` `` O
No DT B-NP
one PRP I-NP
will MD B-VP
want VB I-VP
to TO I-VP
go VB I-VP
into IN B-PP
the DT B-NP
trade NN I-NP
figures NNS I-NP
without IN B-PP
a DT B-NP
flat JJ I-NP
position NN I-NP
'' '' O
in IN B-PP
the DT B-NP
pound NN I-NP
. . O
Meanwhile RB B-ADVP
, , O
overall JJ B-NP
evidence NN I-NP
on IN B-PP
the DT B-NP
economy NN I-NP
remains VBZ B-VP
fairly RB B-ADJP
clouded VBN I-ADJP
. . O
In IN B-PP
his PRP$ B-NP
Mansion NNP I-NP
House NNP I-NP
speech NN I-NP
, , O
Mr. NNP B-NP
Lawson NNP I-NP
warned VBD B-VP
that IN B-SBAR
a DT B-NP
further JJ I-NP
slowdown NN I-NP
can MD B-VP
be VB I-VP
expected VBN I-VP
as IN B-SBAR
the DT B-NP
impact NN I-NP
of IN B-PP
the DT B-NP
last JJ I-NP
rise NN I-NP
in IN B-PP
interest NN B-NP
rates NNS I-NP
earlier RBR B-NP
this DT I-NP
month NN I-NP
takes VBZ B-VP
effect NN B-NP
. . O
U.K. JJ B-NP
base NN I-NP
rates NNS I-NP
are VBP B-VP
at IN B-PP
their PRP$ B-NP
highest JJS I-NP
level NN I-NP
in IN B-PP
eight CD B-NP
years NNS I-NP
. . O
But CC O
consumer NN B-NP
expenditure NN I-NP
data NNS I-NP
released VBD B-VP
Friday NNP B-NP
do VBP B-VP
n't RB I-VP
suggest VB I-VP
that IN B-SBAR
the DT B-NP
U.K. NNP I-NP
economy NN I-NP
is VBZ B-VP
slowing VBG I-VP
that DT B-ADVP
quickly RB I-ADVP
. . O
The DT B-NP
figures NNS I-NP
show VBP B-VP
that DT O
spending NN B-NP
rose VBD B-VP
0.1 CD B-NP
% NN I-NP
in IN B-PP
the DT B-NP
third JJ I-NP
quarter NN I-NP
from IN B-PP
the DT B-NP
second JJ I-NP
quarter NN I-NP
and CC O
was VBD B-VP
up IN B-ADVP
3.8 CD B-NP
% NN I-NP
from IN B-PP
a DT B-NP
year NN I-NP
ago RB B-ADVP
. . O
This DT B-NP
compares VBZ B-VP
with IN B-PP
a DT B-NP
1.6 CD I-NP
% NN I-NP
rise NN I-NP
in IN B-PP
the DT B-NP
second NN I-NP
from IN B-PP
the DT B-NP
first JJ I-NP
quarter NN I-NP
and CC O
a DT B-NP
5.4 CD I-NP
% NN I-NP
increase NN I-NP
from IN B-PP
the DT B-NP
second JJ I-NP
quarter NN I-NP
of IN B-PP
1988 CD B-NP
. . O
Mr. NNP B-NP
Dillow NNP I-NP
said VBD B-VP
the DT B-NP
data NNS I-NP
show VBP B-VP
the DT B-NP
economy NN I-NP
`` `` O
is VBZ B-VP
still RB B-ADVP
quite RB B-ADJP
strong JJ I-ADJP
, , O
'' '' O
but CC O
suggestions NNS B-NP
that IN B-SBAR
much NN B-NP
of IN B-PP
the DT B-NP
spending NN I-NP
went VBD B-VP
on IN B-PP
services NNS B-NP
rather RB B-PP
than IN I-PP
consumer NN B-NP
goods NNS I-NP
should MD B-VP
reduce VB I-VP
fears NNS B-NP
of IN B-PP
more JJR B-NP
import NN I-NP
rises NNS I-NP
. . O
Certainly RB B-ADVP
, , O
the DT B-NP
chancellor NN I-NP
has VBZ B-VP
made VBN I-VP
it PRP B-NP
clear JJ B-ADJP
that IN B-SBAR
he PRP B-NP
is VBZ B-VP
prepared VBN I-VP
to TO I-VP
increase VB I-VP
interest NN B-NP
rates NNS I-NP
again RB B-ADVP
if IN B-SBAR
necessary JJ B-ADJP
to TO B-VP
both DT I-VP
ensure VB I-VP
that IN B-SBAR
a DT B-NP
substantial JJ I-NP
slowdown NN I-NP
does VBZ B-VP
take VB I-VP
place NN B-NP
and CC O
that DT O
sterling NN B-NP
does VBZ B-VP
n't RB I-VP
decline VB I-VP
further JJ B-ADVP
. . O
Thursday NNP B-NP
, , O
he PRP B-NP
reminded VBD B-VP
his PRP$ B-NP
audience NN I-NP
that IN B-SBAR
the DT B-NP
government NN I-NP
`` `` O
can MD B-VP
not RB I-VP
allow VB I-VP
the DT B-NP
necessary JJ I-NP
rigor NN I-NP
of IN B-PP
monetary JJ B-NP
policy NN I-NP
to TO B-VP
be VB I-VP
undermined VBN I-VP
by IN B-PP
exchange NN B-NP
rate NN I-NP
weakness NN I-NP
. . O
'' '' O
Analysts NNS B-NP
agree VBP B-VP
there EX B-NP
is VBZ B-VP
little JJ B-NP
holding NN B-VP
sterling NN B-NP
firm NN B-ADJP
at IN B-PP
the DT B-NP
moment NN I-NP
other JJ B-ADJP
than IN B-PP
Mr. NNP B-NP
Lawson NNP I-NP
's POS B-NP
promise NN I-NP
that IN B-SBAR
rates NNS B-NP
will MD B-VP
be VB I-VP
pushed VBN I-VP
higher JJR B-ADJP
if IN B-SBAR
necessary JJ B-ADJP
. . O
And CC O
, , O
they PRP B-NP
warn VBP B-VP
, , O
any DT B-NP
further JJ I-NP
drop NN I-NP
in IN B-PP
the DT B-NP
government NN I-NP
's POS B-NP
popularity NN I-NP
could MD B-VP
swiftly RB I-VP
make VB I-VP
this DT B-NP
promise NN I-NP
sound NN B-VP
hollow JJ B-ADJP
. . O
Sterling NNP B-NP
was VBD B-VP
already RB I-VP
showing VBG I-VP
some DT B-NP
signs NNS I-NP
of IN B-PP
a DT B-NP
lack NN I-NP
of IN B-PP
confidence NN B-NP
in IN B-PP
Mr. NNP B-NP
Lawson NNP I-NP
's POS B-NP
promise NN I-NP
Friday NNP B-NP
. . O
In IN B-PP
European JJ B-NP
trading NN I-NP
it PRP B-NP
declined VBD B-VP
to TO B-PP
$ $ B-NP
1.5890 CD I-NP
and CC O
2.9495 CD B-NP
marks NNS I-NP
from IN B-PP
$ $ B-NP
1.5940 CD I-NP
and CC O
2.9429 CD B-NP
marks NNS I-NP
late JJ B-NP
Thursday NNP I-NP
. . O
Economists NNS B-NP
suggested VBD B-VP
that IN B-SBAR
if IN B-SBAR
the DT B-NP
pound NN I-NP
falls VBZ B-VP
much JJ B-NP
below IN B-PP
2.90 CD B-NP
marks NNS I-NP
, , O
the DT B-NP
government NN I-NP
will MD B-VP
be VB I-VP
forced VBN I-VP
to TO I-VP
increase VB I-VP
rates NNS B-NP
to TO B-PP
16 CD B-NP
% NN I-NP
, , O
both DT B-VP
to TO I-VP
halt VB B-VP
any DT B-NP
further JJ I-NP
decline NN I-NP
and CC O
ensure VB B-VP
that IN B-SBAR
the DT B-NP
balance NN I-NP
of IN B-PP
monetary JJ B-NP
policy NN I-NP
remains VBZ B-VP
unchanged JJ B-ADJP
. . O
Friday NNP B-NP
's POS B-NP
Market NNP I-NP
Activity NN I-NP
The DT B-NP
dollar NN I-NP
posted VBD B-VP
gains NNS B-NP
in IN B-PP
quiet JJ B-NP
trading NN I-NP
as IN B-SBAR
concerns NNS B-NP
about IN B-PP
equities NNS B-NP
abated VBN B-VP
. . O
Foreign JJ B-NP
exchange NN I-NP
dealers NNS I-NP
said VBD B-VP
that IN B-SBAR
the DT B-NP
currency NN I-NP
market NN I-NP
has VBZ B-VP
begun VBN I-VP
to TO I-VP
distance VB I-VP
itself PRP B-NP
from IN B-PP
the DT B-NP
volatile JJ I-NP
stock NN I-NP
exchange NN I-NP
, , O
which WDT B-NP
has VBZ B-VP
preoccupied VBN I-VP
the DT B-NP
market NN I-NP
since IN B-PP
Oct. NNP B-NP
13 CD I-NP
, , O
when WRB B-ADVP
the DT B-NP
Dow NNP I-NP
Jones NNP I-NP
Industrial NNP I-NP
Average NNP I-NP
plunged VBD B-VP
more JJR B-NP
than IN I-NP
190 CD I-NP
points NNS I-NP
. . O
Currency NN B-NP
analysts NNS I-NP
predict VBP B-VP
that IN B-SBAR
in IN B-PP
the DT B-NP
coming VBG I-NP
week NN I-NP
the DT B-NP
foreign JJ I-NP
exchange NN I-NP
market NN I-NP
will MD B-VP
shift VB I-VP
its PRP$ B-NP
focus NN I-NP
back RB B-ADVP
to TO B-PP
economic JJ B-NP
fundamentals NNS I-NP
, , O
keeping VBG B-VP
a DT B-NP
close NN I-NP
eye NN I-NP
out IN B-ADVP
for IN B-PP
any DT B-NP
signs NNS I-NP
of IN B-PP
monetary JJ B-NP
easing NN I-NP
by IN B-PP
U.S. NNP B-NP
Federal NNP I-NP
Reserve NNP I-NP
. . O
Late RB B-ADVP
in IN B-PP
the DT B-NP
New NNP I-NP
York NNP I-NP
trading NN I-NP
day NN I-NP
, , O
the DT B-NP
dollar NN I-NP
was VBD B-VP
quoted VBN I-VP
at IN B-PP
1.8578 CD B-NP
marks NNS I-NP
, , O
up IN B-ADVP
from IN B-PP
1.8470 CD B-NP
marks NNS I-NP
late JJ B-NP
Thursday NNP I-NP
in IN B-PP
New NNP B-NP
York NNP I-NP
. . O
The DT B-NP
U.S. NNP I-NP
currency NN I-NP
was VBD B-VP
also RB I-VP
changing VBG I-VP
hands NNS B-NP
at IN B-PP
142.43 CD B-NP
yen NN I-NP
, , O
up IN B-ADVP
from IN B-PP
141.70 CD B-NP
yen NN I-NP
in IN B-PP
New NNP B-NP
York NNP I-NP
late JJ B-NP
Thursday NNP I-NP
. . O
In IN B-PP
Tokyo NNP B-NP
on IN B-PP
Monday NNP B-NP
, , O
the DT B-NP
U.S. NNP I-NP
currency NN I-NP
opened VBD B-VP
for IN B-PP
trading NN B-NP
at IN B-PP
141.95 CD B-NP
yen NN I-NP
, , O
up IN B-ADVP
from IN B-PP
Friday NNP B-NP
's POS B-NP
Tokyo NNP I-NP
close NN I-NP
of IN B-PP
141.35 CD B-NP
yen NN I-NP
. . O
On IN B-PP
the DT B-NP
Commodity NNP I-NP
Exchange NNP I-NP
in IN B-PP
New NNP B-NP
York NNP I-NP
, , O
gold NN B-NP
for IN B-PP
current JJ B-NP
delivery NN I-NP
settled VBD B-VP
at IN B-PP
$ $ B-NP
367.30 CD I-NP
an DT B-NP
ounce NN I-NP
, , O
up IN B-ADVP
20 CD B-NP
cents NNS I-NP
. . O
Estimated VBN B-NP
volume NN I-NP
was VBD B-VP
a DT B-NP
light NN I-NP
2.4 CD I-NP
million CD I-NP
ounces NNS I-NP
. . O
In IN B-PP
early JJ B-NP
trading NN I-NP
in IN B-PP
Hong NNP B-NP
Kong NNP I-NP
Monday NNP B-NP
, , O
gold NN B-NP
was VBD B-VP
quoted VBN I-VP
at IN B-PP
$ $ B-NP
366.50 CD I-NP
an DT B-NP
ounce NN I-NP
. . O
East NNP B-NP
Rock NNP I-NP
Partners NNP I-NP
Limited NNP I-NP
Partnership NNP I-NP
said VBD B-VP
it PRP B-NP
proposed VBD B-VP
to TO I-VP
acquire VB I-VP
A.P. NNP B-NP
Green NNP I-NP
Industries NNP I-NP
Inc. NNP I-NP
for IN B-PP
$ $ B-NP
40 CD I-NP
a DT B-NP
share NN I-NP
. . O
In IN B-PP
an DT B-NP
Oct. NNP I-NP
19 CD I-NP
letter NN I-NP
to TO B-PP
A.P. NNP B-NP
Green NNP I-NP
's POS B-NP
board NN I-NP
, , O
East NNP B-NP
Rock NNP I-NP
said VBD B-VP
the DT B-NP
offer NN I-NP
is VBZ B-VP
subject NN B-ADJP
to TO B-PP
the DT B-NP
signing NN I-NP
of IN B-PP
a DT B-NP
merger NN I-NP
agreement NN I-NP
by IN B-PP
no DT B-ADVP
later RB I-ADVP
than IN B-PP
Oct. NNP B-NP
31 CD I-NP
. . O
The DT B-NP
letter NN I-NP
, , O
attached VBN B-VP
to TO B-PP
a DT B-NP
filing NN I-NP
with IN B-PP
the DT B-NP
Securities NNP I-NP
and CC I-NP
Exchange NNP I-NP
Commission NNP I-NP
, , O
said VBD B-VP
the DT B-NP
approval NN I-NP
is VBZ B-VP
also RB B-ADVP
contingent JJ B-ADJP
upon IN B-PP
obtaining VBG B-VP
satisfactory JJ B-NP
financing NN I-NP
. . O
An DT B-NP
A.P. NNP I-NP
Green NNP I-NP
official NN I-NP
declined VBD B-VP
to TO I-VP
comment VB I-VP
on IN B-PP
the DT B-NP
filing NN I-NP
. . O
The DT B-NP
$ $ I-NP
40-a-share JJ I-NP
proposal NN I-NP
values VBZ B-VP
the DT B-NP
company NN I-NP
at IN B-PP
about RB B-NP
$ $ I-NP
106.6 CD I-NP
million CD I-NP
. . O
A.P. NNP B-NP
Green NNP I-NP
currently RB B-ADVP
has VBZ B-VP
2,664,098 CD B-NP
shares NNS I-NP
outstanding JJ B-ADJP
. . O
Its PRP$ B-NP
stock NN I-NP
closed VBD B-VP
at IN B-PP
$ $ B-NP
38 CD I-NP
, , O
up IN B-ADVP
$ $ B-NP
1.875 CD I-NP
, , O
in IN B-PP
national JJ B-NP
over-the-counter JJ I-NP
trading NN I-NP
. . O
The DT B-NP
company NN I-NP
is VBZ B-VP
a DT B-NP
Mexico NNP I-NP
, , I-NP
Mo. NNP I-NP
, , I-NP
maker NN I-NP
of IN B-PP
refractory JJ B-NP
products NNS I-NP
. . O
East NNP B-NP
Rock NNP I-NP
also RB B-ADVP
said VBD B-VP
in IN B-PP
the DT B-NP
filing NN I-NP
that IN B-SBAR
it PRP B-NP
boosted VBD B-VP
its PRP$ B-NP
stake NN I-NP
in IN B-PP
A.P. NNP B-NP
Green NNP I-NP
to TO B-PP
8.7 CD B-NP
% NN I-NP
. . O
It PRP B-NP
now RB B-ADVP
holds VBZ B-VP
233,000 CD B-NP
A.P. NNP I-NP
Green NNP I-NP
common JJ I-NP
shares NNS I-NP
, , O
including VBG B-PP
30,000 CD B-NP
shares NNS I-NP
bought VBD B-VP
last JJ B-NP
Thursday NNP I-NP
for IN B-PP
$ $ B-NP
35.50 CD I-NP
to TO I-NP
$ $ I-NP
36.50 CD I-NP
a DT B-NP
share NN I-NP
. . O
New NNP B-NP
York-based JJ I-NP
John NNP I-NP
Kuhns NNP I-NP
and CC I-NP
Robert NNP I-NP
MacDonald NNP I-NP
control NN B-VP
East NNP B-NP
Rock NNP I-NP
Partners NNP I-NP
Inc. NNP I-NP
, , O
the DT B-NP
sole JJ I-NP
general JJ I-NP
partner NN I-NP
of IN B-PP
East NNP B-NP
Rock NNP I-NP
Partners NNP I-NP
L.P NNP I-NP
. . O
The DT B-NP
sole JJ I-NP
limited JJ I-NP
partner NN I-NP
of IN B-PP
the DT B-NP
partnership NN I-NP
is VBZ B-VP
Westwood NNP B-NP
Brick NNP I-NP
Lime NNP I-NP
Inc. NNP I-NP
, , O
an DT B-NP
indirect JJ I-NP
subsidiary NN I-NP
of IN B-PP
Westwood NNP B-NP
Group NNP I-NP
Inc NNP I-NP
. . O
Both DT B-NP
Westwood NNP B-NP
Brick NNP I-NP
and CC O
Westwood NNP B-NP
Group NNP I-NP
are VBP B-VP
based VBN I-VP
in IN B-PP
Boston NNP B-NP
. . O
Freight NN B-NP
rates NNS I-NP
, , O
declining VBG B-VP
for IN B-PP
most RBS B-NP
of IN B-PP
the DT B-NP
decade NN I-NP
because IN B-PP
of IN I-PP
competition NN B-NP
spurred VBN B-VP
by IN B-PP
deregulation NN B-NP
, , O
are VBP B-VP
bottoming VBG I-VP
out IN B-PRT
, , O
turning VBG B-VP
upward RB B-ADVP
and CC O
threatening VBG B-VP
to TO I-VP
fuel VB I-VP
inflation NN B-NP
. . O
Trucking NNP B-NP
, , I-NP
shipping VBG I-NP
and CC I-NP
air-freight NN I-NP
companies NNS I-NP
have VBP B-VP
announced VBN I-VP
rate NN B-NP
increases NNS I-NP
, , O
scheduled VBN B-VP
for IN B-PP
this DT B-NP
fall NN I-NP
or CC O
early JJ B-NP
next JJ I-NP
year NN I-NP
, , O
reflecting VBG B-VP
higher JJR B-NP
costs NNS I-NP
and CC O
tightened VBD B-NP
demand NN I-NP
for IN B-PP
freight NN B-NP
transport NN I-NP
. . O
Major JJ B-NP
shippers NNS I-NP
say VBP B-VP
they PRP B-NP
expect VBP B-VP
freight NN B-NP
rates NNS I-NP
to TO B-VP
rise VB I-VP
at IN B-ADVP
least JJS I-ADVP
as RB B-ADVP
fast RB I-ADVP
as IN B-PP
inflation NN B-NP
and CC B-ADVP
maybe RB I-ADVP
faster RBR B-ADVP
in IN B-PP
the DT B-NP
next JJ I-NP
few JJ I-NP
years NNS I-NP
. . O
That DT B-NP
's VBZ B-VP
a DT B-NP
big JJ I-NP
change NN I-NP
from IN B-PP
recent JJ B-NP
years NNS I-NP
when WRB B-ADVP
freight NN B-NP
haulage NN I-NP
was VBD B-VP
a DT B-NP
bright JJ I-NP
spot NN I-NP
for IN B-PP
U.S. NNP B-NP
productivity NN I-NP
, , O
helping VBG B-VP
to TO I-VP
restrain VB I-VP
inflation NN B-NP
and CC O
make VB B-VP
U.S. NNP B-NP
industry NN I-NP
more RBR B-ADJP
competitive JJ I-ADJP
abroad RB B-ADVP
. . O
`` `` O
Demand NN B-NP
has VBZ B-VP
caught VBN I-VP
up IN B-PRT
with IN B-PP
the DT B-NP
supply NN I-NP
of IN B-PP
certain JJ B-NP
types NNS I-NP
of IN B-PP
freight NN B-NP
transportation NN I-NP
, , O
and CC O
rates NNS B-NP
are VBP B-VP
starting VBG I-VP
to TO I-VP
move VB I-VP
up IN B-ADVP
'' '' O
at IN B-PP
a DT B-NP
rate NN I-NP
`` `` O
close RB B-ADJP
to TO B-PP
or CC O
slightly RB B-ADJP
more JJR I-ADJP
than IN B-PP
the DT B-NP
inflation NN I-NP
rate NN I-NP
, , O
'' '' O
said VBD B-VP
Clifford NNP B-NP
Sayre NNP I-NP
, , O
director NN B-NP
of IN B-PP
logistics NNS B-NP
at IN B-PP
Du NNP B-NP
Pont NNP I-NP
Co NNP I-NP
. . O
Shippers NNS B-NP
surveyed VBN B-VP
recently RB B-ADVP
by IN B-PP
Ohio NNP B-NP
State NNP I-NP
University NNP I-NP
said VBD B-VP
they PRP B-NP
expect VBP B-VP
their PRP$ B-NP
freight-transport JJ I-NP
, , I-NP
storage NN I-NP
and CC I-NP
distribution NN I-NP
costs NNS I-NP
to TO B-VP
rise VB I-VP
about IN B-NP
4 CD I-NP
% NN I-NP
this DT B-NP
year NN I-NP
. . O
Only RB B-NP
10 CD I-NP
% NN I-NP
of IN B-PP
the DT B-NP
250 CD I-NP
shippers NNS I-NP
polled VBN B-VP
expected VBN B-VP
their PRP$ B-NP
freight-transport JJ I-NP
costs NNS I-NP
to TO B-VP
decrease VB I-VP
, , O
compared VBN B-PP
with IN B-PP
30 CD B-NP
% NN I-NP
who WP B-NP
had VBD B-VP
looked VBN I-VP
to TO B-PP
freight VB B-NP
transport NN I-NP
to TO B-VP
reduce VB I-VP
costs NNS B-NP
in IN B-PP
past JJ B-NP
years NNS I-NP
. . O
`` `` O
This DT B-NP
is VBZ B-VP
the DT B-NP
first JJ I-NP
year NN I-NP
since IN B-PP
transportation NN B-NP
deregulation NN I-NP
in IN B-PP
1980 CD B-NP
that IN B-ADVP
we PRP B-NP
have VBP B-VP
had VBN I-VP
such JJ B-NP
a DT I-NP
dramatic JJ I-NP
and CC I-NP
broad-based JJ I-NP
upturn NN I-NP
in IN B-PP
perceived VBN B-NP
transportation NN I-NP
rates NNS I-NP
, , O
'' '' O
said VBD B-VP
Bernard NNP B-NP
LaLonde NNP I-NP
, , O
a DT B-NP
transportation NN I-NP
logistics NNS I-NP
professor NN I-NP
at IN B-PP
Ohio NNP B-NP
State NNP I-NP
in IN B-PP
Columbus NNP B-NP
. . O
The DT B-NP
deregulation NN I-NP
of IN B-PP
railroads NNS B-NP
and CC I-NP
trucking NN I-NP
companies NNS I-NP
that WDT B-NP
began VBD B-VP
in IN B-PP
1980 CD B-NP
enabled VBD B-VP
shippers NNS B-NP
to TO B-VP
bargain VB I-VP
for IN B-PP
transportation NN B-NP
. . O
Carriers NNP B-NP
could MD B-VP
use VB I-VP
their PRP$ B-NP
equipment NN I-NP
more RBR B-ADVP
efficiently RB I-ADVP
, , O
leading VBG B-VP
to TO B-PP
overcapacity NN B-NP
they PRP B-NP
were VBD B-VP
eager JJ B-ADJP
to TO B-VP
fill VB I-VP
. . O
Shippers NNS B-NP
cut VBP B-VP
about RB B-NP
$ $ I-NP
35 CD I-NP
billion CD I-NP
from IN B-PP
their PRP$ B-NP
annual JJ I-NP
, , I-NP
inter-city JJ I-NP
truck NN I-NP
and CC I-NP
rail NN I-NP
costs NNS I-NP
, , O
to TO B-PP
about RB B-NP
$ $ I-NP
150 CD I-NP
billion CD I-NP
, , O
or CC O
about IN B-NP
6.4 CD I-NP
% NN I-NP
of IN B-PP
gross JJ B-NP
national JJ I-NP
product NN I-NP
, , O
down RB B-ADVP
from IN B-PP
8 CD B-NP
% NN I-NP
of IN B-PP
GNP NNP B-NP
in IN B-PP
1981 CD B-NP
. . O
But CC O
with IN B-PP
much NN B-NP
of IN B-PP
the DT B-NP
inefficiency NN I-NP
squeezed VBN B-VP
out IN B-PP
of IN B-PP
the DT B-NP
freight-transport JJ I-NP
system NN I-NP
, , O
rising VBG B-NP
costs NNS I-NP
are VBP B-VP
likely JJ B-ADJP
to TO B-VP
be VB I-VP
reflected VBN I-VP
directly RB B-ADVP
in IN B-PP
higher JJR B-NP
freight NN I-NP
rates NNS I-NP
. . O
`` `` O
Shippers NNS B-NP
are VBP B-VP
saying VBG I-VP
` `` O
the DT B-NP
party NN I-NP
's POS B-VP
over IN B-ADJP
, , O
' '' O
'' '' O
said VBD B-VP
Mr. NNP B-NP
LaLonde NNP I-NP
. . O
`` `` O
Shippers NNS B-NP
wo MD B-VP
n't RB I-VP
be VB I-VP
able JJ B-ADJP
to TO B-VP
look VB I-VP
for IN B-PP
transportation-cost JJ B-NP
savings NNS I-NP
as IN B-SBAR
they PRP B-NP
have VBP B-VP
for IN B-PP
the DT B-NP
last JJ I-NP
eight CD I-NP
or CC I-NP
nine CD I-NP
years NNS I-NP
. . O
Transport NN B-NP
rates NNS I-NP
wo MD B-VP
n't RB I-VP
be VB I-VP
an DT B-NP
opportunity NN I-NP
for IN B-PP
offsetting VBG B-VP
cost NN B-NP
increases NNS I-NP
in IN B-PP
other JJ B-NP
segments NNS I-NP
of IN B-PP
the DT B-NP
economy NN I-NP
. . O
'' '' O
Robert NNP B-NP
Delaney NNP I-NP
, , O
a DT B-NP
consultant NN I-NP
at IN B-PP
Arthur NNP B-NP
D. NNP I-NP
Little NNP I-NP
Inc. NNP I-NP
, , O
Cambridge NNP B-NP
, , O
Mass. NNP B-NP
, , O
said VBD B-VP
`` `` O
We PRP B-NP
've VBP B-VP
gotten VBN I-VP
all PDT B-NP
the DT I-NP
benefits NNS I-NP
of IN B-PP
deregulation NN B-NP
in IN B-PP
freight-cost JJ B-NP
reductions NNS I-NP
. . O
Now RB B-ADVP
we PRP B-NP
are VBP B-VP
starting VBG I-VP
to TO I-VP
see VB I-VP
real JJ B-NP
freight-rate JJ I-NP
increases NNS I-NP
as IN B-SBAR
carriers NNS B-NP
replace VBP B-VP
equipment NN B-NP
, , O
pay VB B-VP
higher JJR B-NP
fuel NN I-NP
costs NNS I-NP
and CC O
pay VB B-VP
more JJR B-NP
for IN B-PP
labor NN B-NP
. . O
You PRP B-NP
'll MD B-VP
see VB I-VP
carriers NNS B-NP
try VB B-VP
to TO I-VP
recoup VB I-VP
some DT B-NP
of IN B-PP
the DT B-NP
price NN I-NP
cutting VBG I-NP
that WDT B-NP
occurred VBD B-VP
previously RB B-ADVP
. . O
'' '' O
Not RB B-NP
everyone NN I-NP
believes VBZ B-VP
that IN B-SBAR
the DT B-NP
good JJ I-NP
times NNS I-NP
are VBP B-VP
over IN B-ADJP
for IN B-PP
shippers NNS B-NP
. . O
`` `` O
There EX B-NP
's VBZ B-VP
still RB B-ADVP
a DT B-NP
lot NN I-NP
of IN B-PP
pressure NN B-NP
on IN B-PP
rates NNS B-NP
in IN B-PP
both DT B-NP
rail NN I-NP
and CC I-NP
truck NN I-NP
, , O
'' '' O
said VBD B-VP
Gerard NNP B-NP
McCullough NNP I-NP
, , O
lecturer NN B-NP
in IN B-PP
transportation NN B-NP
at IN B-PP
Massachusetts NNP B-NP
Institute NNP I-NP
of IN B-PP
Technology NNP B-NP
. . O
Less-than-truckload JJ B-NP
companies NNS I-NP
, , O
which WDT B-NP
carry VBP B-VP
the DT B-NP
freight NN I-NP
of IN B-PP
several JJ B-NP
shippers NNS I-NP
in IN B-PP
each DT B-NP
truck NN I-NP
trailer NN I-NP
, , O
discounted VBD B-VP
away RB B-ADVP
a DT B-NP
4.7 CD I-NP
% NN I-NP
rate NN I-NP
increase NN I-NP
implemented VBD B-VP
last JJ B-NP
April NNP I-NP
. . O
The DT B-NP
carriers NNS I-NP
were VBD B-VP
competing VBG I-VP
fiercely RB B-ADVP
for IN B-PP
market NN B-NP
share NN I-NP
. . O
Railroad-rate JJ B-NP
increases NNS I-NP
are VBP B-VP
likely JJ B-ADJP
to TO B-VP
be VB I-VP
restrained VBN I-VP
by IN B-PP
weakening VBG B-NP
rail-traffic JJ I-NP
levels NNS I-NP
and CC O
keen JJ B-NP
competition NN I-NP
for IN B-PP
freight NN B-NP
from IN B-PP
trucks NNS B-NP
. . O
An DT B-NP
official NN I-NP
at IN B-PP
Consolidated NNP B-NP
Freightways NNP I-NP
Inc. NNP I-NP
, , O
a DT B-NP
Menlo NNP I-NP
Park NNP I-NP
, , I-NP
Calif. NNP I-NP
, , I-NP
less-than-truckload JJ I-NP
carrier NN I-NP
, , O
said VBD B-VP
rate NN B-NP
discounting NN I-NP
in IN B-PP
that DT B-NP
industry NN I-NP
has VBZ B-VP
begun VBN I-VP
to TO I-VP
`` `` O
stabilize VB B-VP
. . O
'' '' O
Consolidated NNP B-NP
Freightways NNP I-NP
plans VBZ B-VP
to TO I-VP
raise VB I-VP
its PRP$ B-NP
rates NNS I-NP
5.3 CD B-NP
% NN I-NP
late JJ B-NP
this DT I-NP
year NN I-NP
or CC O
early JJ B-NP
next JJ I-NP
year NN I-NP
, , O
and CC O
at IN B-NP
least JJS I-NP
two CD I-NP
competitors NNS I-NP
have VBP B-VP
announced VBN I-VP
similar JJ B-NP
increases NNS I-NP
. . O
Truckers NNS B-NP
are VBP B-VP
`` `` O
trying VBG B-VP
to TO I-VP
send VB I-VP
signals NNS B-NP
that IN B-SBAR
they PRP B-NP
need VBP B-VP
to TO I-VP
stop VB I-VP
the DT B-NP
bloodletting NN I-NP
, , O
forget VB B-VP
about IN B-PP
market NN B-NP
share NN I-NP
and CC O
go VB B-VP
for IN B-PP
higher JJR B-NP
rates NNS I-NP
, , O
'' '' O
said VBD B-VP
Michael NNP B-NP
Lloyd NNP I-NP
, , O
an DT B-NP
analyst NN I-NP
at IN B-PP
Salomon NNP B-NP
Bros NNP I-NP
. . O
And CC O
`` `` O
shippers NNS B-NP
are VBP B-VP
getting VBG I-VP
the DT B-NP
feeling NN I-NP
that IN B-SBAR
they PRP B-NP
have VBP B-VP
played VBN I-VP
one CD B-NP
trucker NN I-NP
off IN B-ADVP
against IN B-PP
another DT B-NP
as RB B-NP
much JJ I-NP
as IN B-SBAR
they PRP B-NP
can MD B-VP
, , O
'' '' O
he PRP B-NP
said VBD B-VP
. . O
Air-freight NN B-NP
carriers NNS I-NP
raised VBD B-VP
their PRP$ B-NP
rates NNS I-NP
for IN B-PP
U.S. NNP B-NP
products NNS I-NP
going VBG B-VP
across IN B-PP
the DT B-NP
Pacific NNP I-NP
to TO B-PP
Asia NNP B-NP
by IN B-PP
about IN B-NP
20 CD I-NP
% NN I-NP
earlier RBR B-NP
this DT I-NP
month NN I-NP
. . O
And CC O
Japan NNP B-NP
Air NNP I-NP
Lines NNPS I-NP
said VBD B-VP
it PRP B-NP
plans VBZ B-VP
to TO I-VP
boost VB I-VP
its PRP$ B-NP
rates NNS I-NP
a DT B-NP
further JJ I-NP
25 CD I-NP
% NN I-NP
over IN B-PP
the DT B-NP
next JJ I-NP
two CD I-NP
years NNS I-NP
. . O
Such JJ B-NP
rate NN I-NP
increases NNS I-NP
`` `` O
will MD B-VP
increase VB I-VP
the DT B-NP
total JJ I-NP
cost NN I-NP
of IN B-PP
U.S. NNP B-NP
products NNS I-NP
and CC O
slow JJ B-VP
down RP B-PRT
the DT B-NP
rate NN I-NP
of IN B-PP
increase NN B-NP
of IN B-PP
U.S. NNP B-NP
exports NNS I-NP
, , O
'' '' O
said VBD B-VP
Richard NNP B-NP
Connors NNP I-NP
, , O
a DT B-NP
senior JJ I-NP
vice NN I-NP
president NN I-NP
of IN B-PP
Yusen NNP B-NP
Air NNP I-NP
& CC I-NP
Sea NNP I-NP
Service NNP I-NP
U.S.A. NNP I-NP
Inc. NNP I-NP
, , O
the DT B-NP
U.S. NNP I-NP
air-freight-forwarding JJ I-NP
subsidiary NN I-NP
of IN B-PP
Nippon NNP B-NP
Yusen NNP I-NP
Kaisha NNP I-NP
of IN B-PP
Japan NNP B-NP
. . O
Ship NN B-NP
companies NNS I-NP
carrying VBG B-VP
bulk NN B-NP
commodities NNS I-NP
, , O
such JJ B-PP
as IN I-PP
oil NN B-NP
, , O
grain NN B-NP
, , O
coal NN B-NP
and CC O
iron NN B-NP
ore NN I-NP
, , O
have VBP B-VP
been VBN I-VP
able JJ B-ADJP
to TO B-VP
increase VB I-VP
their PRP$ B-NP
rates NNS I-NP
in IN B-PP
the DT B-NP
last JJ I-NP
couple NN I-NP
of IN B-PP
years NNS B-NP
. . O
Some DT B-NP
bulk NN I-NP
shipping VBG I-NP
rates NNS I-NP
have VBP B-VP
increased VBN I-VP
`` `` O
3 CD B-NP
% NN I-NP
to TO I-NP
4 CD I-NP
% NN I-NP
in IN B-PP
the DT B-NP
past JJ I-NP
few JJ I-NP
months NNS I-NP
, , O
'' '' O
said VBD B-VP
Salomon NNP B-NP
's POS B-NP
Mr. NNP I-NP
Lloyd NNP I-NP
. . O
And CC O
ship NN B-NP
lines NNS I-NP
carrying VBG B-VP
containers NNS B-NP
are VBP B-VP
also RB I-VP
trying VBG I-VP
to TO I-VP
raise VB I-VP
their PRP$ B-NP
rates NNS I-NP
. . O
Carriers NNP B-NP
boosted VBD B-VP
rates NNS B-NP
more JJR B-NP
than IN I-NP
10 CD I-NP
% NN I-NP
in IN B-PP
the DT B-NP
North NNP I-NP
Atlantic NNP I-NP
between IN B-PP
the DT B-NP
U.S. NNP I-NP
and CC O
Europe NNP B-NP
last JJ B-NP
September NNP I-NP
, , O
hoping VBG B-VP
to TO I-VP
partly RB I-VP
restore VB I-VP
rates NNS B-NP
to TO B-PP
earlier JJR B-NP
levels NNS I-NP
. . O
Ship NN B-NP
lines NNS I-NP
operating VBG B-VP
in IN B-PP
the DT B-NP
Pacific NNP I-NP
plan NN B-VP
to TO I-VP
raise VB I-VP
rates NNS B-NP
on IN B-PP
containers NNS B-NP
carrying VBG B-VP
U.S. NNP B-NP
exports NNS I-NP
to TO B-PP
Asia NNP B-NP
about IN B-NP
10 CD I-NP
% NN I-NP
, , O
effective JJ B-ADJP
next JJ B-NP
April NNP I-NP
. . O
MGM NNP B-NP
Grand NNP I-NP
Inc. NNP I-NP
said VBD B-VP
it PRP B-NP
filed VBD B-VP
a DT B-NP
registration NN I-NP
statement NN I-NP
with IN B-PP
the DT B-NP
Securities NNP I-NP
and CC I-NP
Exchange NNP I-NP
Commission NNP I-NP
for IN B-PP
a DT B-NP
public JJ I-NP
offering NN I-NP
of IN B-PP
six CD B-NP
million CD I-NP
common JJ I-NP
shares NNS I-NP
. . O
The DT B-NP
Beverly NNP I-NP
Hills NNP I-NP
, , I-NP
Calif.-based JJ I-NP
company NN I-NP
said VBD B-VP
it PRP B-NP
would MD B-VP
have VB I-VP
26.9 CD B-NP
million CD I-NP
common JJ I-NP
shares NNS I-NP
outstanding JJ B-ADJP
after IN B-PP
the DT B-NP
offering NN I-NP
. . O
The DT B-NP
hotel NN I-NP
and CC I-NP
Gaming NNP I-NP
company NN I-NP
said VBD B-VP
Merrill NNP B-NP
Lynch NNP I-NP
Capital NNP I-NP
Markets NNPS I-NP
will MD B-VP
lead VB I-VP
the DT B-NP
underwriters NNS I-NP
. . O
Proceeds NNS B-NP
from IN B-PP
the DT B-NP
sale NN I-NP
will MD B-VP
be VB I-VP
used VBN I-VP
for IN B-PP
remodeling VBG B-NP
and CC I-NP
refurbishing VBG I-NP
projects NNS I-NP
, , B-PP
as RB I-PP
well RB I-PP
as IN I-PP
for IN B-PP
the DT B-NP
planned VBN I-NP
MGM NNP I-NP
Grand NNP I-NP
hotel\/casino NN I-NP
and CC I-NP
theme NN I-NP
park NN I-NP
. . O
Bob NNP B-NP
Stone NNP I-NP
stewed JJ B-VP
over IN B-PP
a DT B-NP
letter NN I-NP
from IN B-PP
his PRP$ B-NP
manager NN I-NP
putting VBG B-VP
him PRP B-NP
on IN B-PP
probation NN B-NP
for IN B-PP
insubordination NN B-NP
. . O
Mr. NNP B-NP
Stone NNP I-NP
thought VBD B-VP
the DT B-NP
discipline NN I-NP
was VBD B-VP
unfair JJ B-ADJP
; : O
he PRP B-NP
believed VBD B-VP
that IN B-SBAR
his PRP$ B-NP
manager NN I-NP
wanted VBD B-VP
to TO I-VP
get VB I-VP
rid JJ B-ADJP
of IN B-PP
him PRP B-NP
for IN B-PP
personal JJ B-NP
reasons NNS I-NP
. . O
Unable JJ B-ADJP
to TO B-VP
persuade VB I-VP
the DT B-NP
manager NN I-NP
to TO B-VP
change VB I-VP
his PRP$ B-NP
decision NN I-NP
, , O
he PRP B-NP
went VBD B-VP
to TO B-PP
a DT B-NP
`` `` I-NP
company NN I-NP
court NN I-NP
'' '' O
for IN B-PP
a DT B-NP
hearing NN I-NP
. . O
At IN B-PP
the DT B-NP
scheduled VBN I-NP
time NN I-NP
, , O
Mr. NNP B-NP
Stone NNP I-NP
entered VBD B-VP
a DT B-NP
conference NN I-NP
room NN I-NP
in IN B-PP
a DT B-NP
building NN I-NP
near IN B-PP
where WRB B-ADVP
he PRP B-NP
worked VBD B-VP
. . O
After IN B-SBAR
the DT B-NP
three CD I-NP
members NNS I-NP
of IN B-PP
the DT B-NP
court NN I-NP
introduced VBD B-VP
themselves PRP B-NP
, , O
the DT B-NP
chairman NN I-NP
of IN B-PP
the DT B-NP
panel NN I-NP
said VBD B-VP
: : O
`` `` O
Go VB B-VP
ahead RB B-ADVP
and CC O
tell VB B-VP
us PRP B-NP
what WP B-NP
happened VBD B-VP
. . O
We PRP B-NP
may MD B-VP
ask VB I-VP
questions NNS B-NP
as IN B-SBAR
you PRP B-NP
go VBP B-VP
along IN B-PRT
, , O
or CC O
we PRP B-NP
may MD B-VP
wait VB I-VP
until IN B-PP
the DT B-NP
end NN I-NP
. . O
'' '' O
No DT B-NP
lawyers NNS I-NP
or CC I-NP
tape NN I-NP
recorders NNS I-NP
were VBD B-VP
present JJ B-ADJP
. . O
The DT B-NP
only RB I-NP
extra JJ I-NP
people NNS I-NP
were VBD B-VP
a DT B-NP
couple NN I-NP
of IN B-PP
personnel NNS B-NP
specialists NNS I-NP
, , O
one CD B-NP
of IN B-PP
whom WP B-NP
knew VBD B-VP
Mr. NNP B-NP
Stone NNP I-NP
's POS B-NP
case NN I-NP
intimately RB B-ADVP
and CC O
would MD B-VP
help VB I-VP
fill VB I-VP
in IN B-PRT
any DT B-NP
facts NNS I-NP
needed VBN B-VP
to TO B-VP
give VB I-VP
the DT B-NP
court NN I-NP
the DT B-NP
full JJ I-NP
picture NN I-NP
. . O
Over IN B-PP
a DT B-NP
cup NN I-NP
of IN B-PP
coffee NN B-NP
, , O
Mr. NNP B-NP
Stone NNP I-NP
told VBD B-VP
his PRP$ B-NP
story NN I-NP
. . O
He PRP B-NP
talked VBD B-VP
about IN B-NP
20 CD I-NP
minutes NNS I-NP
. . O
When WRB B-ADVP
he PRP B-NP
was VBD B-VP
through IN B-ADJP
, , O
the DT B-NP
court NN I-NP
members NNS I-NP
asked VBD B-VP
many JJ B-NP
questions NNS I-NP
, , O
then RB B-ADVP
the DT B-NP
chairman NN I-NP
said VBD B-VP
they PRP B-NP
would MD B-VP
like VB I-VP
to TO I-VP
hear VB I-VP
his PRP$ B-NP
manager NN I-NP
's POS B-NP
side NN I-NP
and CC O
talk VB B-VP
to TO B-PP
witnesses NNS B-NP
. . O
The DT B-NP
chairman NN I-NP
promised VBD B-VP
Mr. NNP B-NP
Stone NNP I-NP
a DT B-NP
decision NN I-NP
within IN B-PP
two CD B-NP
weeks NNS I-NP
. . O
Bob NNP B-NP
Stone NNP I-NP
is VBZ B-VP
a DT B-NP
fictional JJ I-NP
name NN I-NP
, , O
but CC O
the DT B-NP
incident NN I-NP
described VBN B-VP
is VBZ B-VP
real JJ B-ADJP
. . O
It PRP B-NP
happened VBD B-VP
at IN B-PP
Northrop NNP B-NP
Corp. NNP I-NP
in IN B-PP
Los NNP B-NP
Angeles NNP I-NP
. . O
The DT B-NP
court NN I-NP
is VBZ B-VP
called VBN I-VP
the DT B-NP
Management NNP I-NP
Appeals NNP I-NP
Committee NNP I-NP
, , O
or CC O
just RB B-NP
`` `` I-NP
MAC NNP I-NP
, , O
'' '' O
and CC O
it PRP B-NP
is VBZ B-VP
likely JJ B-ADJP
to TO B-VP
hear VB I-VP
a DT B-NP
couple NN I-NP
of IN I-NP
dozen NN I-NP
cases VBZ I-NP
a DT B-NP
year NN I-NP
. . O
Alter VB B-VP
some DT B-NP
details NNS I-NP
of IN B-PP
this DT B-NP
example NN I-NP
and CC O
it PRP B-NP
could MD B-VP
be VB I-VP
taking VBG I-VP
place NN B-NP
today NN B-ADVP
at IN B-PP
Federal NNP B-NP
Express NNP I-NP
in IN B-PP
Memphis NNP B-NP
, , O
the DT B-NP
Defense NNP I-NP
and CC I-NP
Underseas NNP I-NP
Systems NNP I-NP
divisions NNS I-NP
of IN B-PP
Honeywell NNP B-NP
in IN B-PP
Minneapolis NNP B-NP
, , O
a DT B-NP
General NNP I-NP
Electric NNP I-NP
plant NN I-NP
in IN B-PP
Columbia NNP B-NP
, , O
Md. NNP B-NP
, , O
or CC O
a DT B-NP
number NN I-NP
of IN B-PP
other JJ B-NP
companies NNS I-NP
. . O
These DT B-NP
firms NNS I-NP
are VBP B-VP
pioneers NNS B-NP
in IN B-PP
a DT B-NP
significant JJ I-NP
new JJ I-NP
trend NN I-NP
in IN B-PP
the DT B-NP
corporate JJ I-NP
world NN I-NP
: : O
the DT B-NP
rise NN I-NP
of IN B-PP
what WP B-NP
I PRP B-NP
call VBP B-VP
corporate JJ B-NP
due JJ I-NP
process NN I-NP
. . O
Although IN B-SBAR
corporate JJ B-NP
due JJ I-NP
process NN I-NP
is VBZ B-VP
practiced VBN I-VP
today NN B-NP
in IN B-PP
few JJ B-NP
companies NNS I-NP
-- : O
perhaps RB B-ADVP
40 CD B-NP
to TO I-NP
60 CD I-NP
-- : O
it PRP B-NP
is VBZ B-VP
one CD B-NP
of IN B-PP
the DT B-NP
fastest JJS I-NP
developing VBG I-NP
trends NNS I-NP
in IN B-PP
industry NN B-NP
. . O
In IN B-PP
the DT B-NP
coming VBG I-NP
decade NN I-NP
a DT B-NP
majority NN I-NP
of IN B-PP
people-oriented JJ B-NP
companies NNS I-NP
are VBP B-VP
likely JJ B-ADJP
to TO B-VP
adopt VB I-VP
it PRP B-NP
. . O
Corporate JJ B-NP
due JJ I-NP
process NN I-NP
appeals NNS B-VP
to TO B-PP
management NN B-NP
for IN B-PP
a DT B-NP
variety NN I-NP
of IN B-PP
reasons NNS B-NP
. . O
It PRP B-NP
reduces VBZ B-VP
lawsuits NNS B-NP
from IN B-PP
disgruntled JJ B-NP
employees NNS I-NP
and CC I-NP
ex-employees NNS I-NP
, , O
with IN B-PP
all DT B-NP
that WDT B-NP
means VBZ B-VP
for IN B-PP
reduced VBN B-NP
legal JJ I-NP
costs NNS I-NP
and CC O
better RBR B-NP
public JJ I-NP
relations NNS I-NP
. . O
It PRP B-NP
helps VBZ B-VP
to TO I-VP
keep VB I-VP
out IN B-PRT
unions NNS B-NP
. . O
It PRP B-NP
increases VBZ B-VP
employee NN B-NP
commitment NN I-NP
to TO B-PP
the DT B-NP
company NN I-NP
, , O
with IN B-PP
all DT B-NP
that WDT B-NP
means VBZ B-VP
for IN B-PP
efficiency NN B-NP
and CC O
quality NN B-NP
control NN I-NP
. . O
What WP B-NP
must MD O
your PRP$ B-NP
management NN I-NP
team NN I-NP
do VBP B-VP
to TO B-VP
establish VB I-VP
corporate JJ B-NP
due JJ I-NP
process NN I-NP
? . O
Here RB B-ADVP
are VBP B-VP
four CD B-NP
key JJ I-NP
steps NNS I-NP
: : O
1 CD B-LST
. . O
Make VB B-VP
sure JJ B-ADJP
you PRP B-NP
have VBP B-VP
a DT B-NP
strong JJ I-NP
personnel NNS I-NP
department NN I-NP
. . O
It PRP B-NP
must MD B-VP
be VB I-VP
able JJ B-ADJP
to TO B-VP
handle VB I-VP
most RBS B-NP
of IN B-PP
the DT B-NP
complaints NNS I-NP
that WDT B-NP
can MD B-VP
not RB I-VP
be VB I-VP
solved VBN I-VP
in IN B-PP
the DT B-NP
trenches NNS I-NP
by IN B-PP
managers NNS B-NP
and CC O
their PRP$ B-NP
subordinates NNS I-NP
, , O
else RB B-ADVP
the DT B-NP
company NN I-NP
court NN I-NP
or CC I-NP
adjudicators NNS I-NP
will MD B-VP
be VB B-VP
inundated VBN I-VP
with IN B-PP
cases NNS B-NP
. . O
At IN B-PP
Polaroid NNP B-NP
, , O
the DT B-NP
Personnel NNP I-NP
Policy NNP I-NP
Planning NNP I-NP
Committee NNP I-NP
may MD B-VP
hear VB I-VP
only RB B-NP
about IN I-NP
20 CD I-NP
cases VBZ I-NP
a DT B-NP
year NN I-NP
; : O
the DT B-NP
rest NN I-NP
of IN B-PP
the DT B-NP
many JJ I-NP
hundreds NNS I-NP
of IN B-PP
complaints NNS B-NP
are VBP B-VP
resolved VBN I-VP
at IN B-PP
earlier JJR B-NP
stages NNS I-NP
. . O
At IN B-PP
TWA NNP B-NP
, , O
the DT B-NP
System NNP I-NP
Board NNP I-NP
of IN B-PP
Adjustment NNP B-NP
hears VBZ B-VP
50 CD B-NP
to TO I-NP
75 CD I-NP
cases VBZ I-NP
a DT B-NP
year NN I-NP
, , O
only RB B-NP
a DT I-NP
fraction NN I-NP
of IN B-PP
the DT B-NP
complaints NNS I-NP
brought VBN B-VP
to TO B-PP
personnel NNS B-NP
specialists NNS I-NP
. . O
At IN B-PP
Citicorp NNP B-NP
, , O
the DT B-NP
Problem NNP I-NP
Review NNP I-NP
Board NNP I-NP
may MD B-VP
hear VB I-VP
only RB B-NP
12 CD I-NP
or CC I-NP
so RB I-NP
cases VBZ I-NP
because IN B-PP
of IN I-PP
personnel NNS B-NP
's POS B-NP
skill NN I-NP
in IN B-PP
complaint-resolution NN B-NP
. . O
In IN B-PP
a DT B-NP
typical JJ I-NP
year NN I-NP
, , O
up IN B-NP
to TO I-NP
20 CD I-NP
% NN I-NP
of IN B-PP
the DT B-NP
work NN I-NP
force NN I-NP
goes VBZ B-VP
to TO B-PP
personnel NNS B-NP
specialists NNS I-NP
with IN B-PP
complaints NNS B-NP
of IN B-PP
unfair JJ B-NP
treatment NN I-NP
. . O
In IN B-PP
a DT B-NP
large JJ I-NP
company NN I-NP
that WDT B-NP
means VBZ B-VP
many JJ B-NP
hundreds NNS I-NP
of IN B-PP
complaints NNS B-NP
for IN B-PP
personnel NNS B-NP
to TO B-VP
handle VB I-VP
. . O
2 CD B-LST
. . O
Formally RB B-ADVP
or CC I-ADVP
informally RB I-ADVP
, , O
train NN B-VP
all DT B-NP
your PRP$ I-NP
managers NNS I-NP
and CC I-NP
supervisors NNS I-NP
in IN B-PP
the DT B-NP
company NN I-NP
's POS B-NP
due-process NN I-NP
approach NN I-NP
. . O
See VB B-VP
that IN B-SBAR
they PRP B-NP
know VBP B-VP
company NN B-NP
personnel NNS I-NP
policy NN I-NP
backwards RB B-ADVP
and CC I-ADVP
forwards RB I-ADVP
, , O
for IN O
it PRP B-NP
is VBZ B-VP
the DT B-NP
`` `` I-NP
law NN I-NP
'' '' O
governing VBG B-VP
company NN B-NP
courts NNS I-NP
and CC I-NP
adjudicators NNS I-NP
. . O
Coach NNP B-VP
them PRP B-NP
in IN B-PP
handling NN B-VP
complaints NNS B-NP
so RB B-SBAR
that IN I-SBAR
they PRP B-NP
can MD B-VP
resolve VB I-VP
problems NNS B-NP
immediately RB B-ADVP
. . O
In IN B-SBAR
case NN O
managers NNS B-NP
and CC O
personnel NNS B-NP
specialists NNS I-NP
are VBP B-VP
unsuccessful JJ B-ADJP
and CC O
subordinates NNS B-NP
take VBP B-VP
their PRP$ B-NP
complaints NNS I-NP
to TO B-PP
a DT B-NP
company NN I-NP
court NN I-NP
or CC I-NP
adjudicator NN I-NP
, , O
teach VB B-VP
managers NNS B-NP
to TO B-VP
accept VB I-VP
reversals NNS B-NP
as IN B-PP
a DT B-NP
fact NN I-NP
of IN B-PP
business NN B-NP
life NN I-NP
, , O
for IN O
in IN B-PP
a DT B-NP
good JJ I-NP
due-process NN I-NP
system NN I-NP
they PRP B-NP
are VBP B-VP
bound VBN I-VP
to TO I-VP
happen VB I-VP
. . O
In IN B-PP
the DT B-NP
15 CD I-NP
companies NNS I-NP
I PRP B-NP
studied VBD B-VP
, , O
reversal NN B-NP
rates NNS I-NP
range VBP B-VP
on IN B-PP
the DT B-NP
average NN I-NP
from IN B-PP
20 CD B-NP
% NN I-NP
to TO B-PP
40 CD B-NP
% NN I-NP
. . O
3 CD B-LST
. . O
Decide VB B-VP
whether IN O
you PRP B-NP
want VBP B-VP
a DT B-NP
panel NN I-NP
system NN I-NP
or CC O
a DT B-NP
single JJ I-NP
adjudicator NN I-NP
. . O
A DT B-NP
panel NN I-NP
system NN I-NP
like IN B-PP
that DT B-NP
in NN B-PP
the DT B-NP
Bob NNP I-NP
Stone NNP I-NP
example NN I-NP
enjoys VBZ B-VP
such JJ B-NP
advantages NNS I-NP
as IN B-PP
high JJ B-NP
credibility NN I-NP
and CC O
, , O
for IN B-PP
the DT B-NP
panelists NNS I-NP
, , O
mutual JJ B-NP
support NN I-NP
. . O
An DT B-NP
adjudicator NN I-NP
system NN I-NP
-- : O
that DT B-INTJ
is VBZ I-INTJ
, , O
an DT B-NP
investigator NN I-NP
who WP B-NP
acts VBZ B-VP
first JJ B-ADVP
as IN B-PP
a DT B-NP
fact-finder NN I-NP
and CC O
then RB O
switches VBZ B-VP
hats NNS B-NP
and CC O
arbitrates VBZ B-VP
the DT B-NP
facts NNS I-NP
-- : O
has VBZ B-VP
such JJ B-NP
advantages NNS I-NP
as IN B-PP
speed NN B-NP
, , O
flexibility NN B-NP
and CC O
maximum JJ B-NP
privacy NN I-NP
. . O
International NNP B-NP
Business NNP I-NP
Machines NNPS I-NP
and CC O
Bank NNP B-NP
of IN B-PP
America NNP B-NP
are VBP B-VP
among IN B-PP
the DT B-NP
companies NNS I-NP
using VBG B-VP
the DT B-NP
single-adjudicator JJ I-NP
approach NN I-NP
. . O
4 CD B-LST
. . O
Make VB B-VP
your PRP$ B-NP
due-process NN I-NP
system NN I-NP
visible JJ B-ADJP
. . O
It PRP B-NP
wo MD B-VP
n't RB I-VP
do VB I-VP
any DT B-NP
good NN I-NP
for IN B-PP
anybody NN B-NP
unless IN B-SBAR
employees NNS B-NP
know VBP B-VP
about IN B-PP
it PRP B-NP
. . O
Most JJS B-NP
managements NNS I-NP
hesitate VBP B-VP
to TO I-VP
go VB I-VP
all DT B-ADVP
out NN I-ADVP
in IN B-PP
advertising VBG B-VP
their PRP$ B-NP
due-process NN I-NP
systems NNS I-NP
for IN B-PP
fear NN B-NP
of IN B-PP
encouraging VBG B-VP
cranks NNS B-NP
and CC O
chronic JJ B-NP
soreheads NNS I-NP
to TO B-VP
file VB I-VP
complaints NNS B-NP
. . O
On IN B-PP
the DT B-NP
other JJ I-NP
hand NN I-NP
, , O
they PRP B-NP
make VBP B-VP
sure JJ B-ADJP
at IN B-PP
a DT B-NP
minimum NN I-NP
that IN B-SBAR
their PRP$ B-NP
systems NNS I-NP
are VBP B-VP
described VBN I-VP
in IN B-PP
their PRP$ B-NP
employee NN I-NP
handbooks NNS I-NP
and CC O
talked VBD B-VP
up IN B-PRT
by IN B-PP
personnel NNS B-NP
specialists NNS I-NP
. . O
Smith-Kline NNP B-NP
Beecham NNP I-NP
goes VBZ B-VP
further JJ B-ADVP
and CC O
sometimes RB B-VP
features VBZ I-VP
its PRP$ B-NP
grievance NN I-NP
procedure NN I-NP
in IN B-PP
closed-circuit JJ B-NP
TV NN I-NP
programs NNS I-NP
. . O
Naturally RB B-ADVP
, , O
one CD B-NP
of IN B-PP
the DT B-NP
best JJS I-NP
ways NNS I-NP
to TO B-VP
guarantee VB I-VP
visibility NN B-NP
for IN B-PP
your PRP$ B-NP
due-process NN I-NP
system NN I-NP
is VBZ B-VP
for IN B-SBAR
top JJ B-NP
management NN I-NP
to TO B-VP
support VB I-VP
it PRP B-NP
. . O
At IN B-PP
IBM NNP B-NP
, , O
the DT B-NP
company NN I-NP
's POS B-NP
Open NNP I-NP
Door NNP I-NP
system NN I-NP
is VBZ B-VP
sometimes RB B-ADVP
the DT B-NP
subject NN I-NP
of IN B-PP
memorandums NNS B-NP
from IN B-PP
the DT B-NP
chief JJ I-NP
executive NN I-NP
. . O
Federal NNP B-NP
Express NNP I-NP
goes VBZ B-VP
further JJ B-ADVP
in IN B-PP
this DT B-NP
respect NN I-NP
than IN B-PP
any DT B-NP
company NN I-NP
I PRP B-NP
know VBP B-VP
of IN B-PP
with IN B-PP
both DT B-NP
Frederick NNP B-NP
Smith NNP I-NP
and CC O
James NNP B-NP
Barksdale NNP I-NP
, , O
chief JJ B-NP
executive NN I-NP
and CC O
chief JJ B-NP
operating VBG I-NP
officer NN I-NP
, , O
respectively RB B-ADVP
, , O
sitting VBG B-VP
in IN B-PRT
on IN B-PP
the DT B-NP
Appeals NNP I-NP
Board NNP I-NP
almost RB B-NP
every DT I-NP
Tuesday NNP I-NP
to TO B-VP
decide VB I-VP
cases NNS B-NP
. . O
Mr. NNP B-NP
Ewing NNP I-NP
is VBZ B-VP
a DT B-NP
consultant NN I-NP
based VBN B-VP
in IN B-PP
Winchester NNP B-NP
, , O
Mass. NNP B-NP
, , O
and CC O
author NN B-NP
of IN B-PP
`` `` O
Justice NNP B-NP
on IN B-PP
the DT B-NP
Job NNP I-NP
: : O
Resolving NNP B-VP
Grievances NNP B-NP
in IN B-PP
the DT B-NP
Nonunion NNP I-NP
Workplace NN I-NP
'' '' O
-LRB- ( O
Harvard NNP B-NP
Business NNP I-NP
School NNP I-NP
Press NNP I-NP
, , O
1989 CD B-NP
-RRB- ) O
. . O
Tokyo NNP B-NP
stocks NNS I-NP
closed VBD B-VP
higher JJR B-ADVP
in IN B-PP
active JJ B-NP
trading NN I-NP
Friday NNP B-NP
, , O
marking VBG B-VP
the DT B-NP
fourth JJ I-NP
consecutive JJ I-NP
daily JJ I-NP
gain NN I-NP
since IN B-PP
Monday NNP B-NP
's POS B-NP
sharp JJ I-NP
fall NN I-NP
. . O
London JJ B-NP
shares NNS I-NP
closed VBD B-VP
moderately RB B-ADVP
lower JJR I-ADVP
in IN B-PP
thin JJ B-NP
trading NN I-NP
. . O
At IN B-PP
Tokyo NNP B-NP
, , O
the DT B-NP
Nikkei NNP I-NP
index NN I-NP
of IN B-PP
225 CD B-NP
selected VBN I-NP
issues NNS I-NP
was VBD B-VP
up IN B-ADVP
112.16 CD B-NP
points NNS I-NP
to TO B-PP
35486.38 CD B-NP
. . O
The DT B-NP
index NN I-NP
advanced VBD B-VP
266.66 CD B-NP
points NNS I-NP
Thursday NNP B-NP
. . O
In IN B-PP
early JJ B-NP
trading NN I-NP
in IN B-PP
Tokyo NNP B-NP
Monday NNP B-NP
, , O
the DT B-NP
Nikkei NNP I-NP
index NN I-NP
rose VBD B-VP
101.98 CD B-NP
points NNS I-NP
to TO B-PP
35588.36 CD B-NP
. . O
Friday NNP B-NP
's POS B-NP
volume NN I-NP
on IN B-PP
the DT B-NP
First NNP I-NP
Section NN I-NP
was VBD B-VP
estimated VBN I-VP
at IN B-PP
one CD B-NP
billion CD I-NP
shares NNS I-NP
, , O
up IN B-ADVP
from IN B-PP
862 CD B-NP
million CD I-NP
Thursday NNP B-NP
. . O
Winners NNS B-NP
outpaced VBD B-VP
losers NNS B-NP
, , O
572 CD B-ADVP
to TO I-ADVP
368 CD I-ADVP
, , O
while IN B-SBAR
181 CD B-NP
issues NNS I-NP
remained VBD B-VP
unchanged JJ B-ADJP
. . O
With IN B-SBAR
investors NNS B-NP
relieved VBN B-ADJP
at IN B-PP
the DT B-NP
overnight JJ I-NP
gain NN I-NP
in IN B-PP
New NNP B-NP
York NNP I-NP
stocks NNS I-NP
, , O
small-lot JJ B-NP
buying NN I-NP
orders NNS I-NP
streamed VBD B-VP
into IN B-PP
the DT B-NP
market NN I-NP
from IN B-PP
early JJ B-NP
morning NN I-NP
, , O
making VBG B-VP
traders NNS B-NP
believe VBP B-VP
the DT B-NP
market NN I-NP
was VBD B-VP
back RB B-ADVP
to TO B-PP
normal JJ B-NP
. . O
The DT B-NP
Nikkei NNP I-NP
, , O
which WDT B-NP
reached VBD B-VP
as RB B-ADJP
high JJ I-ADJP
as IN B-PP
35611.38 CD B-NP
right NN B-ADVP
after IN B-PP
the DT B-NP
opening NN I-NP
, , O
surrendered VBD B-VP
part NN B-NP
of IN B-PP
its PRP$ B-NP
early JJ I-NP
advance NN I-NP
toward IN B-PP
the DT B-NP
end NN I-NP
of IN B-PP
the DT B-NP
day NN I-NP
because IN B-PP
of IN I-PP
profit-taking NN B-NP
. . O
`` `` O
Investors NNS B-NP
, , B-NP
especially RB I-NP
dealers NNS B-NP
, , O
do VBP B-VP
n't RB I-VP
want VB I-VP
to TO I-VP
hold VB I-VP
a DT B-NP
position NN I-NP
over IN B-PP
the DT B-NP
weekend NN I-NP
, , O
'' '' O
a DT B-NP
trader NN I-NP
at IN B-PP
Dai-ichi NNP B-NP
Securities NNP I-NP
said VBD B-VP
, , O
adding VBG B-VP
, , O
though RB B-ADVP
, , O
that IN B-SBAR
the DT B-NP
trading NN I-NP
mood NN I-NP
remained VBD B-VP
positive JJ B-ADJP
through IN B-PP
the DT B-NP
afternoon NN I-NP
session NN I-NP
. . O
The DT B-NP
Tokyo NNP I-NP
Stock NNP I-NP
Price NNP I-NP
Index NNP I-NP
-LRB- ( O
Topix NNP B-NP
-RRB- ) O
of IN B-PP
all DT B-NP
issues NNS I-NP
listed VBN B-VP
in IN B-PP
the DT B-NP
First NNP I-NP
Section NN I-NP
, , O
which WDT B-NP
gained VBD B-VP
22.78 CD B-NP
points NNS I-NP
Thursday NNP B-NP
, , O
was VBD B-VP
up IN B-ADVP
14.06 CD B-NP
points NNS I-NP
, , O
or CC O
0.53 CD B-NP
% NN I-NP
, , O
at IN B-PP
2679.72 CD B-NP
. . O
The DT B-NP
Second JJ I-NP
Section NN I-NP
index NN I-NP
, , O
which WDT B-NP
rose VBD B-VP
15.72 CD B-NP
points NNS I-NP
Thursday NNP B-NP
, , O
was VBD B-VP
up IN B-ADVP
11.88 CD B-NP
points NNS I-NP
, , O
or CC O
0.32 CD B-NP
% NN I-NP
, , O
to TO B-VP
close VB I-VP
at IN B-PP
3717.46 CD B-NP
. . O
Volume NN B-NP
in IN B-PP
the DT B-NP
second JJ I-NP
section NN I-NP
was VBD B-VP
estimated VBN I-VP
at IN B-PP
30 CD B-NP
million CD I-NP
shares NNS I-NP
, , O
up IN B-ADVP
from IN B-PP
28 CD B-NP
million CD I-NP
Thursday NNP B-NP
. . O
In IN B-PP
turmoil NN B-NP
caused VBN B-VP
by IN B-PP
the DT O
previous JJ B-NP
Friday NNP I-NP
's POS B-NP
plunge NN I-NP
in IN B-PP
New NNP B-NP
York NNP I-NP
stocks NNS I-NP
, , O
the DT B-NP
Nikkei NNP I-NP
marked VBD B-VP
a DT B-NP
sharp JJ I-NP
647.33-point JJ I-NP
fall NN I-NP
Monday NNP B-NP
. . O
But CC O
the DT B-NP
Nikkei NNP I-NP
fell VBD B-VP
an DT B-NP
overall JJ I-NP
1.8 CD I-NP
% NN I-NP
in IN B-PP
value NN B-NP
that DT B-NP
day NN I-NP
compared VBN B-PP
with IN B-PP
Wall NNP B-NP
Street NNP I-NP
's POS I-NP
far RB B-ADJP
sharper JJR I-ADJP
6.9 CD B-ADJP
% NN I-ADJP
drop NN B-NP
on IN B-PP
Oct. NNP B-NP
13 CD I-NP
. . O
The DT B-NP
Tokyo NNP I-NP
market NN I-NP
's POS B-NP
resiliency NN I-NP
helped VBD B-VP
participants NNS B-NP
to TO B-VP
regain VB I-VP
confidence NN B-NP
gradually RB B-ADVP
as IN B-SBAR
they PRP B-NP
spent VBD B-VP
more JJR B-NP
time NN I-NP
on IN B-PP
analyzing VBG B-VP
factors NNS B-NP
that WDT B-NP
caused VBD B-VP
the DT B-NP
Friday NNP I-NP
plunge NN I-NP
and CC O
realized VBD B-VP
these DT B-NP
problems NNS I-NP
were VBD B-VP
unique JJ B-ADJP
to TO B-PP
New NNP B-NP
York NNP I-NP
stocks NNS I-NP
and CC B-ADJP
not RB I-ADJP
directly RB B-ADJP
related VBN I-ADJP
to TO B-PP
Tokyo NNP B-NP
. . O
The DT B-NP
Nikkei NNP I-NP
continued VBD B-VP
to TO I-VP
gain VB I-VP
for IN B-PP
the DT B-NP
rest NN I-NP
of IN B-PP
the DT B-NP
week NN I-NP
, , O
adding VBG B-VP
1017.69 CD B-NP
points NNS I-NP
in IN B-PP
four CD B-NP
days NNS I-NP
-- : O
more JJR B-VP
than IN I-VP
erasing VBG I-VP
Monday NNP B-NP
's POS B-NP
losses NNS I-NP
. . O
But CC O
further JJ B-NP
major JJ I-NP
advances NNS I-NP
on IN B-PP
the DT B-NP
Nikkei NNP I-NP
are VBP B-VP
n't RB I-VP
foreseen VBN I-VP
this DT B-NP
week NN I-NP
by IN B-PP
market NN B-NP
observers NNS I-NP
. . O
Investors NNS B-NP
are VBP B-VP
still RB I-VP
waiting VBG I-VP
to TO I-VP
see VB I-VP
how WRB B-ADVP
the DT B-NP
U.S. NNP I-NP
government NN I-NP
will MD B-VP
decide VB I-VP
on IN B-PP
interest NN B-NP
rates NNS I-NP
and CC O
how WRB B-ADVP
the DT B-NP
dollar NN I-NP
will MD B-VP
be VB I-VP
stabilized VBN I-VP
. . O
Some DT B-NP
high-priced JJ I-NP
issues NNS I-NP
made VBD B-VP
a DT B-NP
comeback NN I-NP
Friday NNP B-NP
. . O
Pioneer NNP B-NP
surged VBD B-VP
450 CD B-NP
yen NN I-NP
-LRB- ( O
$ $ B-NP
3.16 CD I-NP
-RRB- ) O
to TO B-PP
6,050 CD B-NP
yen NN I-NP
-LRB- ( O
$ $ B-NP
42.60 CD I-NP
-RRB- ) O
. . O
Kyocera NNP B-NP
advanced VBD B-VP
80 CD B-NP
yen NN I-NP
to TO B-PP
5,440 CD B-NP
. . O
Fanuc NNP B-NP
gained VBD B-VP
100 CD B-NP
to TO B-PP
7,580 CD B-NP
. . O
Breweries NNP B-NP
attracted VBD B-VP
investors NNS B-NP
because IN B-PP
of IN I-PP
their PRP$ B-NP
land NN I-NP
property NN I-NP
holdings NNS I-NP
that WDT B-NP
could MD B-VP
figure VB I-VP
in IN B-PP
development NN B-NP
or CC O
other JJ B-NP
plans NNS I-NP
, , O
traders NNS B-NP
said VBD B-VP
. . O
Sapporo NNP B-NP
gained VBD B-VP
80 CD B-NP
to TO B-PP
1,920 CD B-NP
and CC O
Kirin NNP B-NP
added VBD B-VP
60 CD B-NP
to TO B-PP
2,070 CD B-NP
. . O
Housings NNS B-NP
, , I-NP
constructions NNS I-NP
and CC I-NP
pharmaceuticals NNS I-NP
continued VBD B-VP
to TO I-VP
be VB I-VP
bought VBN I-VP
following VBG B-PP
Thursday NNP B-NP
's POS B-NP
gains NNS I-NP
because IN B-PP
of IN I-PP
strong JJ B-NP
earnings NNS I-NP
outlooks NNS I-NP
. . O
Daiwa NNP B-NP
House NNP I-NP
gained VBD B-VP
50 CD B-NP
to TO B-PP
2,660 CD B-NP
. . O
Misawa NNP B-NP
Homes NNP I-NP
was VBD B-VP
up IN B-ADVP
20 CD B-NP
at IN B-PP
2,960 CD B-NP
. . O
Kajima NNP B-NP
advanced VBD B-VP
40 CD B-NP
to TO B-PP
2,120 CD B-NP
and CC O
Ohbayashi NNP B-NP
added VBD B-VP
50 CD B-NP
to TO B-PP
1,730 CD B-NP
. . O
Fujisawa NNP B-NP
added VBD B-VP
80 CD B-NP
to TO B-PP
2,010 CD B-NP
and CC O
Mochida NNP B-NP
advanced VBD B-VP
230 CD B-NP
to TO B-PP
4,400 CD B-NP
. . O
London JJ B-NP
share NN I-NP
prices NNS I-NP
were VBD B-VP
influenced VBN I-VP
largely RB B-ADVP
by IN B-PP
declines NNS B-NP
on IN B-PP
Wall NNP B-NP
Street NNP I-NP
and CC O
weakness NN B-NP
in IN B-PP
the DT B-NP
British JJ I-NP
pound NN I-NP
. . O
The DT B-NP
key JJ I-NP
Financial NNP I-NP
Times-Stock NNP I-NP
Exchange NNP I-NP
100-share JJ I-NP
index NN I-NP
ended VBD B-VP
10.2 CD B-NP
points NNS I-NP
lower JJR B-ADVP
at IN B-PP
2179.1 CD B-NP
, , O
above IN B-ADVP
its PRP$ B-NP
intraday JJ I-NP
low NN I-NP
of IN B-PP
2176.9 CD B-NP
, , B-ADVP
but CC I-ADVP
off IN B-ADVP
the DT B-NP
day NN I-NP
's POS I-NP
high NN B-NP
of IN B-PP
2189 CD B-NP
. . O
The DT B-NP
index NN I-NP
finished VBD B-VP
2.4 CD B-NP
% NN I-NP
under IN B-PP
its PRP$ B-NP
close NN I-NP
of IN B-PP
2233.9 CD B-NP
the DT B-NP
previous JJ I-NP
Friday NNP I-NP
, , O
although IN B-SBAR
it PRP B-NP
recouped VBD B-VP
some DT B-NP
of IN B-PP
the DT B-NP
sharp JJ I-NP
losses NNS I-NP
staged VBD B-VP
early JJ B-NP
last JJ I-NP
week NN I-NP
on IN B-PP
the DT B-NP
back RB I-NP
of IN B-PP
Wall NNP B-NP
Street NNP I-NP
's POS B-NP
fall NN I-NP
. . O
London NNP B-NP
was VBD B-VP
weak JJ B-ADJP
throughout IN B-PP
Friday NNP B-NP
's POS B-NP
trading NN I-NP
, , O
however RB B-ADVP
, , O
on IN B-PP
what WP B-NP
dealers NNS B-NP
attributed VBD B-VP
to TO B-PP
generally RB B-NP
thin JJ I-NP
interest NN I-NP
ahead RB B-ADVP
of IN B-PP
the DT B-NP
weekend NN I-NP
and CC O
this DT B-NP
week NN I-NP
's POS I-NP
potentially RB B-ADJP
important JJ I-ADJP
U.K. NNP B-NP
trade NN I-NP
figures NNS I-NP
for IN B-PP
September NNP B-NP
. . O
The DT B-NP
FT-SE NNP I-NP
100 CD I-NP
largely RB B-ADVP
remained VBD B-VP
within IN B-PP
an DT B-NP
11-point JJ I-NP
range NN I-NP
establshed VBN B-VP
within IN B-PP
the DT B-NP
first JJ I-NP
hour NN I-NP
of IN B-PP
trading NN B-NP
before IN B-PP
it PRP B-NP
eased VBD B-VP
to TO B-PP
an DT B-NP
intraday JJ I-NP
low JJ I-NP
late RB B-ADVP
in IN B-PP
the DT B-NP
session NN I-NP
when WRB B-ADVP
a DT B-NP
flurry NN I-NP
of IN B-PP
program NN B-NP
selling VBG I-NP
pushed VBN B-VP
Wall NNP B-NP
Street NNP I-NP
lower JJR B-ADVP
. . O
The DT B-NP
FT NNP I-NP
30-share JJ I-NP
index NN I-NP
closed VBD B-VP
11.0 CD B-NP
points NNS I-NP
lower JJR B-ADVP
at IN B-PP
1761.0 CD B-NP
. . O
Volume NN B-NP
was VBD B-VP
extremely RB B-ADJP
thin JJ I-ADJP
at IN B-PP
351.3 CD B-NP
million CD I-NP
shares NNS I-NP
, , O
the DT B-NP
lightest JJS I-NP
volume NN I-NP
of IN B-PP
the DT B-NP
week NN I-NP
and CC O
modestly RB B-ADVP
under IN B-PP
Thursday NNP B-NP
's POS B-NP
387.4 CD I-NP
million CD I-NP
shares NNS I-NP
. . O
Dealers NNS B-NP
said VBD B-VP
the DT B-NP
day NN I-NP
's POS B-NP
action NN I-NP
was VBD B-VP
featureless JJ B-ADJP
outside IN B-PP
some DT B-NP
response NN I-NP
to TO B-PP
sterling NN B-NP
's POS B-NP
early JJ I-NP
weakness NN I-NP
against IN B-PP
the DT B-NP
mark NN I-NP
, , O
and CC O
fears NNS B-NP
that IN B-SBAR
Wall NNP B-NP
Street NNP I-NP
might MD B-VP
open RB I-VP
lower JJR B-ADVP
after IN B-PP
its PRP$ B-NP
strong JJ I-NP
leap NN I-NP
forward RB B-ADVP
Thursday NNP B-NP
. . O
They PRP B-NP
added VBD B-VP
that IN B-SBAR
market-makers NNS B-NP
were VBD B-VP
largely RB I-VP
sidelined VBN I-VP
after IN B-PP
aggressively RB B-VP
supporting VBG I-VP
the DT B-NP
market NN I-NP
Thursday NNP B-NP
in IN B-PP
their PRP$ B-NP
quest NN I-NP
to TO B-VP
cover VB I-VP
internal JJ B-NP
shortages NNS I-NP
of IN B-PP
FT-SE NNP B-NP
100 CD I-NP
shares NNS I-NP
. . O
Interest NN B-NP
may MD B-VP
remain VB I-VP
limited JJ B-ADJP
into IN B-PP
tomorrow NN B-NP
's POS B-NP
U.K. NNP I-NP
trade NN I-NP
figures NNS I-NP
, , O
which WDT B-NP
the DT B-NP
market NN I-NP
will MD B-VP
be VB I-VP
watching VBG I-VP
closely RB B-ADVP
to TO B-VP
see VB I-VP
if IN B-SBAR
there EX B-NP
is VBZ B-VP
any DT B-NP
improvement NN I-NP
after IN B-PP
disappointing JJ B-NP
numbers NNS I-NP
in IN B-PP
the DT B-NP
previous JJ I-NP
two CD I-NP
months NNS I-NP
. . O
The DT B-NP
key JJ I-NP
corporate JJ I-NP
news NN I-NP
of IN B-PP
the DT B-NP
day NN I-NP
was VBD B-VP
that IN B-SBAR
British JJ B-NP
Airways NNPS I-NP
decided VBD B-VP
to TO I-VP
withdraw VB I-VP
from IN B-PP
a DT B-NP
management-led JJ I-NP
bid NN I-NP
for IN B-PP
UAL NNP B-NP
Corp. NNP I-NP
, , O
the DT B-NP
parent NN I-NP
of IN B-PP
United NNP B-NP
Airlines NNPS I-NP
. . O
British JJ B-NP
Airways NNPS I-NP
rose VBD B-VP
initially RB B-ADVP
after IN B-PP
announcing VBG B-VP
its PRP$ B-NP
withdrawal NN I-NP
from IN B-PP
the DT B-NP
UAL NNP I-NP
deal NN I-NP
. . O
Dealers NNS B-NP
said VBD B-VP
they PRP B-NP
viewed VBD B-VP
the DT O
initial JJ O
# # O
390-million CD O
-LRB- ( O
$ $ B-ADJP
622 CD O
million CD O
-RRB- ) O
outlay NN B-NP
for IN B-PP
a DT B-NP
15 CD I-NP
% NN I-NP
stake NN I-NP
in IN B-PP
the DT B-NP
airline NN I-NP
as IN B-PP
a DT B-NP
bit NN I-NP
much JJ I-NP
. . O
Its PRP$ B-NP
shares NNS I-NP
slid VBD B-VP
in IN B-PP
late JJ B-NP
dealings NNS I-NP
to TO B-VP
close VB I-VP
a DT B-NP
penny NN I-NP
per IN B-PP
share NN B-NP
lower JJR B-ADVP
at IN B-PP
197 CD B-NP
pence NN I-NP
. . O
The DT B-NP
airline NN I-NP
was VBD B-VP
the DT B-NP
most RBS I-NP
active JJ I-NP
FT-SE NNP I-NP
100 CD I-NP
at IN B-PP
8.2 CD B-NP
million CD I-NP
shares NNS I-NP
traded VBN B-VP
. . O
The DT B-NP
next JJ I-NP
most RBS I-NP
active JJ I-NP
top-tier JJ I-NP
stock NN I-NP
was VBD B-VP
B.A.T NNP B-NP
Industries NNPS I-NP
, , O
the DT B-NP
target NN I-NP
of IN B-PP
Sir NNP B-NP
James NNP I-NP
Goldsmith NNP I-NP
's POS B-NP
# # B-ADJP
13.4 CD O
billion CD O
bid NN B-NP
. . O
The DT B-NP
company NN I-NP
gained VBD B-VP
shareholder NN B-NP
approval NN I-NP
Thursday NNP B-NP
to TO B-VP
restructure VB I-VP
in IN B-PP
a DT B-NP
bid NN I-NP
to TO B-VP
fend VB I-VP
off IN B-PRT
the DT B-NP
hostile JJ I-NP
takeover NN I-NP
. . O
Sir NNP B-NP
James NNP I-NP
said VBD B-VP
Thursday NNP B-NP
night NN I-NP
that IN B-SBAR
his PRP$ B-NP
plans NNS I-NP
for IN B-PP
the DT B-NP
takeover NN I-NP
had VBD B-VP
n't RB I-VP
changed VBN I-VP
. . O
B.A.T NNP B-NP
ended VBD B-VP
the DT B-NP
day NN I-NP
at IN B-PP
778 CD B-NP
, , O
down JJ B-ADVP
5 NN B-NP
, , O
on IN B-PP
turnover NN B-NP
of IN B-PP
7.5 CD B-NP
million CD I-NP
shares NNS I-NP
. . O
Dealers NNS B-NP
said VBD B-VP
it PRP B-NP
was VBD B-VP
hit VBN I-VP
by IN B-PP
some DT B-NP
profit-taking NN I-NP
after IN B-PP
gains NNS B-NP
since IN B-PP
mid-week NN B-NP
. . O
In IN B-PP
other JJ B-NP
active JJ I-NP
shares NNS I-NP
, , O
Trusthouse NNP B-NP
Forte NNP I-NP
shed VB B-VP
10 CD B-NP
to TO B-PP
294 CD B-NP
on IN B-PP
volume NN B-NP
of IN B-PP
6.4 CD B-NP
million CD I-NP
shares NNS I-NP
after IN B-PP
a DT B-NP
Barclays NNP I-NP
De NNP I-NP
Zoete NNP I-NP
Wedd NNP I-NP
downgrading NN I-NP
, , O
while IN B-SBAR
Hillsdown NNP B-NP
Holdings NNP I-NP
, , O
a DT B-NP
food NN I-NP
products NNS I-NP
concern VBP I-NP
, , O
was VBD B-VP
boosted VBN I-VP
2 CD B-NP
to TO B-PP
271 CD B-NP
after IN O
it PRP B-NP
disclosed VBD B-VP
it PRP B-NP
would MD B-VP
seek VB I-VP
shareholder NN B-NP
approval NN I-NP
to TO B-VP
begin VB I-VP
share NN B-NP
repurchases NNS I-NP
. . O
Elsewhere RB B-ADVP
in IN B-PP
Europe NNP B-NP
, , O
share NN B-NP
prices NNS I-NP
closed VBD B-VP
higher JJR B-ADVP
in IN B-PP
Stockholm NNP B-NP
, , I-NP
Brussels NNP I-NP
and CC I-NP
Milan NNP I-NP
. . O
Prices NNS B-NP
were VBD B-VP
lower JJR B-ADJP
in IN B-PP
Frankfurt NNP B-NP
, , I-NP
Zurich NNP I-NP
, , I-NP
Paris NNP I-NP
and CC I-NP
Amsterdam NNP I-NP
. . O
South JJ B-NP
African JJ I-NP
gold NN I-NP
stocks NNS I-NP
closed VBD B-VP
moderately RB B-ADVP
lower JJR I-ADVP
. . O
Share NN B-NP
prices NNS I-NP
closed VBD B-VP
higher JJR B-ADVP
in IN B-PP
Sydney NNP B-NP
, , O
Taipei NNP B-NP
, , O
Wellington NNP B-NP
, , O
Manila NNP B-NP
, , O
Hong NNP B-NP
Kong NNP I-NP
and CC O
Singapore NNP B-NP
and CC O
were VBD B-VP
lower JJR B-ADJP
in IN B-PP
Seoul NNP B-NP
. . O
Here RB B-ADVP
are VBP B-VP
price NN B-NP
trends NNS I-NP
on IN B-PP
the DT B-NP
world NN I-NP
's POS B-NP
major JJ I-NP
stock NN I-NP
markets NNS I-NP
, , O
as IN B-SBAR
calculated VBN B-VP
by IN B-PP
Morgan NNP B-NP
Stanley NNP I-NP
Capital NNP I-NP
International NNP I-NP
Perspective NNP I-NP
, , O
Geneva NNP B-NP
. . O
To TO B-VP
make VB I-VP
them PRP B-NP
directly RB B-ADJP
comparable JJ I-ADJP
, , O
each DT B-NP
index NN I-NP
is VBZ B-VP
based VBN I-VP
on IN B-PP
the DT B-NP
close NN I-NP
of IN B-PP
1969 CD B-NP
equaling VBG B-VP
100 CD B-NP
. . O
The DT B-NP
percentage NN I-NP
change NN I-NP
is VBZ B-VP
since IN B-PP
year-end NN B-NP
. . O
The DT B-NP
U.S. NNP I-NP
is VBZ B-VP
required VBN I-VP
to TO I-VP
notify VB I-VP
foreign JJ B-NP
dictators NNS I-NP
if IN B-SBAR
it PRP B-NP
knows VBZ B-VP
of IN B-PP
coup NN B-NP
plans NNS I-NP
likely JJ B-ADJP
to TO B-VP
endanger VB I-VP
their PRP$ B-NP
lives NNS I-NP
, , O
government NN B-NP
officials NNS I-NP
said VBD B-VP
. . O
The DT B-NP
notification NN I-NP
policy NN I-NP
was VBD B-VP
part NN B-NP
of IN B-PP
a DT B-NP
set NN I-NP
of IN B-PP
guidelines NNS B-NP
on IN B-PP
handling NN B-VP
coups NNS B-NP
outlined VBN B-VP
in IN B-PP
a DT B-NP
secret JJ I-NP
1988 CD I-NP
exchange NN I-NP
of IN B-PP
letters NNS B-NP
between IN B-PP
the DT B-NP
Reagan NNP I-NP
administration NN I-NP
and CC O
the DT B-NP
Senate NNP I-NP
Intelligence NNP I-NP
Committee NNP I-NP
. . O
The DT B-NP
existence NN I-NP
of IN B-PP
the DT B-NP
guidelines NNS I-NP
has VBZ B-VP
become VBN I-VP
known VBN I-VP
since IN B-SBAR
President NNP B-NP
Bush NNP I-NP
disclosed VBD B-VP
them PRP B-NP
privately RB B-ADVP
to TO B-PP
seven CD B-NP
Republican NNP I-NP
senators NNS I-NP
at IN B-PP
a DT B-NP
White NNP I-NP
House NNP I-NP
meeting NN I-NP
last JJ B-NP
Monday NNP I-NP
. . O
Officials NNS B-NP
familiar JJ B-ADJP
with IN B-PP
the DT B-NP
meeting NN I-NP
said VBD B-VP
Mr. NNP B-NP
Bush NNP I-NP
cited VBD B-VP
the DT B-NP
policy NN I-NP
as IN B-PP
an DT B-NP
example NN I-NP
of IN B-PP
the DT B-NP
sort NN I-NP
of IN B-PP
congressional JJ B-NP
requirements NNS I-NP
the DT B-NP
administration NN I-NP
contends VBZ B-VP
contribute VB B-VP
to TO B-PP
the DT B-NP
failure NN I-NP
of IN B-PP
such JJ B-NP
covert JJ I-NP
actions NNS I-NP
as IN B-PP
this DT B-NP
month NN I-NP
's POS B-NP
futile JJ I-NP
effort NN I-NP
to TO B-VP
oust VB I-VP
Panamanian JJ B-NP
dictator NN I-NP
Manuel NNP I-NP
Noriega NNP I-NP
. . O
According VBG B-PP
to TO B-PP
the DT B-NP
officials NNS I-NP
, , O
Mr. NNP B-NP
Bush NNP I-NP
even RB B-ADVP
read VB B-VP
to TO B-PP
the DT B-NP
senators NNS I-NP
selections NNS B-NP
from IN B-PP
a DT B-NP
highly RB I-NP
classified VBN I-NP
letter NN I-NP
from IN B-PP
the DT B-NP
committee NN I-NP
to TO B-PP
the DT B-NP
White NNP I-NP
House NNP I-NP
discussing VBG B-VP
the DT B-NP
guidelines NNS I-NP
. . O
They PRP B-NP
said VBD B-VP
the DT B-NP
president NN I-NP
conceded VBD B-VP
the DT B-NP
notification NN I-NP
requirement NN I-NP
did VBD B-VP
n't RB I-VP
affect VB I-VP
his PRP$ B-NP
decision NN I-NP
to TO B-VP
lend VB I-VP
only RB B-NP
minor JJ I-NP
support NN I-NP
to TO B-PP
this DT B-NP
month NN I-NP
's POS B-NP
Panama NNP I-NP
coup NN I-NP
effort NN I-NP
. . O
No DT B-NP
notification NN I-NP
was VBD B-VP
ever RB I-VP
considered VBN I-VP
, , O
officials NNS B-NP
said VBD B-VP
, , O
apparently RB B-ADVP
because IN B-SBAR
the DT B-NP
U.S. NNP I-NP
did VBD B-VP
n't RB I-VP
think VB I-VP
the DT B-NP
coup NN I-NP
plotters NNS I-NP
intended VBN B-VP
to TO I-VP
kill VB I-VP
Mr. NNP B-NP
Noriega NNP I-NP
, , O
but CC O
merely RB B-VP
sought VBD I-VP
to TO I-VP
imprison VB I-VP
him PRP B-NP
. . O
What WP B-NP
's VBZ B-VP
more JJR B-NP
, , O
both DT B-NP
administration NN B-NP
and CC O
congressional JJ B-NP
officials NNS I-NP
hint VBP B-VP
that IN B-SBAR
the DT B-NP
notification NN I-NP
requirement NN I-NP
is VBZ B-VP
likely JJ B-ADJP
to TO B-VP
be VB I-VP
dropped VBN I-VP
from IN B-PP
the DT B-NP
guidelines NNS I-NP
on IN B-PP
coup NN B-NP
attempts NNS I-NP
that WDT B-NP
are VBP B-VP
being VBG I-VP
rewritten VBN I-VP
by IN B-PP
the DT B-NP
panel NN I-NP
and CC O
the DT B-NP
White NNP I-NP
House NNP I-NP
. . O
The DT B-NP
rewriting VBG I-NP
was VBD B-VP
launched VBN I-VP
at IN B-PP
a DT B-NP
meeting NN I-NP
between IN B-PP
Mr. NNP B-NP
Bush NNP I-NP
and CC O
intelligence NN B-NP
committee NN I-NP
leaders NNS I-NP
Oct. NNP B-NP
12 CD I-NP
, , O
a DT B-NP
few JJ I-NP
days NNS I-NP
before IN B-PP
the DT B-NP
meeting NN I-NP
at IN B-PP
which WDT B-NP
the DT B-NP
president NN I-NP
complained VBD B-VP
about IN B-PP
the DT B-NP
rules NNS I-NP
. . O
However RB B-ADVP
, , O
the DT B-NP
disclosure NN I-NP
of IN B-PP
......@@ -1116,35 +1116,6 @@ def PyData(files=None,
return data_config
@config_func
def ProtoData(files=None,
type=None,
file_group_queue_capacity=None,
load_file_count=None,
constant_slots=None,
load_thread_num=None,
**xargs):
data_config = create_data_config_proto(**xargs)
if type is None:
data_config.type = 'proto'
else:
data_config.type = type
data_config.files = files
# When type="proto_group", one data provider contains at most
# load_file_count files, and there are at most
# (queue_capacity + load_thread_num + 1) data providers in memory
if file_group_queue_capacity is not None:
data_config.file_group_conf.queue_capacity = file_group_queue_capacity
if load_file_count is not None:
data_config.file_group_conf.load_file_count = load_file_count
if load_thread_num is not None:
data_config.file_group_conf.load_thread_num = load_thread_num
if constant_slots:
data_config.constant_slots.extend(constant_slots)
return data_config
#real data for training is actually provided by "sub_data" data providers.
@config_func
def MultiData(sub_data=[]):
......@@ -1826,7 +1797,7 @@ class FCLayer(LayerBase):
self.layer_type = 'mkldnn_fc'
config_assert(
len(inputs) == 1,
"MkldnnFCLayer support one and only one input!")
"MKLDNNFCLayer support one and only one input!")
super(FCLayer, self).__init__(
name, self.layer_type, size, inputs=inputs, **xargs)
for input_index in xrange(len(self.inputs)):
......@@ -1837,7 +1808,7 @@ class FCLayer(LayerBase):
sparse = format == "csr" or format == "csc"
if use_mkldnn:
config_assert(not sparse,
"MkldnnFCLayer do not support sparse format yet")
"MKLDNNFCLayer do not support sparse format yet")
if use_mkldnn_wgt:
dims = [self.config.size, input_layer.size]
if sparse:
......@@ -1853,7 +1824,7 @@ class FCLayer(LayerBase):
@config_layer('mkldnn_fc')
class MkldnnFcLayer(FCLayer):
class MKLDNNFcLayer(FCLayer):
layer_type = 'mkldnn_fc'
......@@ -2066,13 +2037,20 @@ class ParameterReluLayer(LayerBase):
def __init__(self, name, inputs, partial_sum=1, **args):
super(ParameterReluLayer, self).__init__(
name, self.layer_type, 0, inputs=inputs, **args)
input_layer = self.get_input_layer(0)
config_assert(len(self.inputs) == 1, "prelu layer has only one input.")
config_assert(input_layer.size % partial_sum == 0,
"a wrong setting for partial_sum")
dims = [1, input_layer.size / partial_sum]
self.set_layer_size(input_layer.size)
self.config.partial_sum = partial_sum
self.create_input_parameter(0, input_layer.size / partial_sum)
self.create_input_parameter(0, input_layer.size / partial_sum, dims)
self.set_layer_height_width(self.get_input_layer(0).height, \
self.get_input_layer(0).width)
self.set_layer_depth(self.get_input_layer(0).depth)
@config_layer('conv')
......@@ -2718,7 +2696,7 @@ Usage:
max_sort_size = -1, inputs = ["output", "score"])
Input data: Samples of the same query should be loaded as a sequence,
by ProtoDataProvider or PyDataProvider etc.. User should provide
by PyDataProvider etc.. User should provide
scores for each sample. The score slot should be the 2nd
input of lambdaRank layer.
......@@ -3213,6 +3191,18 @@ class SubNestedSequenceLayer(LayerBase):
self.set_layer_size(size)
@config_layer('dot_prod')
class DotProdLayer(LayerBase):
def __init__(self, name, inputs, device=None):
super(DotProdLayer, self).__init__(
name, 'dot_prod', 0, inputs, device=device)
config_assert(len(inputs) == 2, 'DotProdLayer must have 2 inputs.')
config_assert(
self.get_input_layer(0).size == self.get_input_layer(1).size,
"Two inputs should have the same size.")
self.set_layer_size(1)
@config_layer('out_prod')
class OuterProdLayer(LayerBase):
def __init__(self, name, inputs, device=None):
......@@ -3334,6 +3324,20 @@ class RowL2NormLayer(LayerBase):
self.set_layer_size(input_layer.size)
@config_layer('cos')
class CosSimLayer(LayerBase):
def __init__(self, name, inputs, cos_scale=1, device=None):
super(CosSimLayer, self).__init__(
name, 'cos', 1, inputs=inputs, device=device)
config_assert(
len(self.inputs) == 2,
'The CosSimLayer expects two and only two inputs.')
config_assert(
self.get_input_layer(0).size == self.get_input_layer(1).size,
'The two inputs of CosSimLayer must have the same dimensionality.')
self.config.cos_scale = cos_scale
@config_layer('cos_vm')
class CosSimVecMatLayer(LayerBase):
def __init__(self, name, size, inputs, cos_scale=1.0, device=None):
......@@ -3341,10 +3345,24 @@ class CosSimVecMatLayer(LayerBase):
name, 'cos_vm', size, inputs=inputs, device=device)
self.config.cos_scale = cos_scale
config_assert(
len(self.inputs) == 2, 'CosSimVecMatLayer must have 2 inputs')
len(self.inputs) == 2, 'The CosSimVecMatLayer must have 2 inputs.')
config_assert(
size * self.get_input_layer(0).size == self.get_input_layer(1).size,
'Wrong input size for CosSimVecMatLayer')
'Wrong input size for CosSimVecMatLayer.')
@config_layer('l2_distance')
class L2DistanceLayer(LayerBase):
def __init__(self, name, inputs, device=None):
super(L2DistanceLayer, self).__init__(
name, 'l2_distance', 1, inputs=inputs, device=device)
config_assert(
len(self.inputs) == 2, ('The L2DistanceLayer must have '
'and only have 2 inputs.'))
config_assert(
self.get_input_layer(0).size == self.get_input_layer(1).size,
('Two inputs of the L2DistanceLayer must have '
'the same dimensionality.'))
@config_layer('sampling_id')
......@@ -3388,18 +3406,6 @@ class AverageLayer(LayerBase):
self.create_bias_parameter(bias, self.config.size)
@config_layer('cos')
class CosSimLayer(LayerBase):
def __init__(self, name, inputs, cos_scale=1, device=None):
super(CosSimLayer, self).__init__(
name, 'cos', 1, inputs=inputs, device=device)
config_assert(len(self.inputs) == 2, 'CosSimLayer must have 2 inputs')
config_assert(
self.get_input_layer(0).size == self.get_input_layer(1).size,
'inputs of CosSimLayer must have same dim')
self.config.cos_scale = cos_scale
@config_layer('tensor')
class TensorLayer(LayerBase):
def __init__(self, name, size, inputs, bias=True, **xargs):
......@@ -3510,11 +3516,17 @@ def ExpressionLayer(name, inputs, **xargs):
@config_layer('concat')
class ConcatenateLayer(LayerBase):
layer_type = 'concat'
def __init__(self, name, inputs, bias=False, **xargs):
config_assert(inputs, 'inputs cannot be empty')
config_assert(not bias, 'ConcatenateLayer cannot support bias.')
use_mkldnn = bool(int(g_command_config_args.get("use_mkldnn", 0)))
if self.layer_type == "mkldnn_concat":
config_assert(use_mkldnn, "mkldnn_concat only support MKLDNN")
self.layer_type = 'mkldnn_concat' if use_mkldnn else 'concat'
super(ConcatenateLayer, self).__init__(
name, 'concat', 0, inputs=inputs, **xargs)
name, self.layer_type, 0, inputs=inputs, **xargs)
size = 0
for input_index in xrange(len(self.inputs)):
assert self.get_input_layer(0).height == self.get_input_layer(
......@@ -3534,6 +3546,11 @@ class ConcatenateLayer(LayerBase):
self.set_layer_size(size)
@config_layer('mkldnn_concat')
class MKLDNNConcatLayer(ConcatenateLayer):
layer_type = 'mkldnn_concat'
# like concat layer, but each input layer was processed by a Projection.
@config_layer('concat2')
class ConcatenateLayer2(LayerBase):
......
......@@ -17,7 +17,8 @@ __all__ = [
"IdentityActivation", "LinearActivation", 'SequenceSoftmaxActivation',
'ExpActivation', "ReluActivation", "BReluActivation", "SoftReluActivation",
"STanhActivation", "AbsActivation", "SquareActivation", "BaseActivation",
"LogActivation", "SqrtActivation", "ReciprocalActivation"
"LogActivation", "SqrtActivation", "ReciprocalActivation",
"SoftSignActivation"
]
......@@ -243,8 +244,20 @@ class ReciprocalActivation(BaseActivation):
Reciprocal Activation.
.. math::
f(z) = 1/z
f(z)=\\frac{1}{z}
"""
def __init__(self):
BaseActivation.__init__(self, 'reciprocal', False)
class SoftSignActivation(BaseActivation):
"""
SoftSign Activation.
.. math::
f(z)=\\frac{z}{1 + |z|}
"""
def __init__(self):
BaseActivation.__init__(self, 'softsign', False)
......@@ -297,7 +297,7 @@ def auc_evaluator(
def pnpair_evaluator(
input,
label,
info,
query_id,
weight=None,
name=None, ):
"""
......@@ -308,16 +308,20 @@ def pnpair_evaluator(
.. code-block:: python
eval = pnpair_evaluator(input, label, info)
eval = pnpair_evaluator(input, label, query_id)
:param input: Input Layer name. The output prediction of network.
:type input: LayerOutput
:param label: Label layer name.
:type label: LayerOutput
:param info: Info layer name. (TODO, explaination)
:type info: LayerOutput
:param query_id: Query_id layer name. Query_id indicates that which query
each sample belongs to. Its shape should be
the same as output of Label layer.
:type query_id: LayerOutput
:param weight: Weight Layer name. It should be a matrix with size
[sample_num, 1]. (TODO, explaination)
[sample_num, 1] which indicates the weight of each sample.
The default weight of sample is 1 if the weight layer is None.
And the pair weight is the mean of the two samples' weight.
:type weight: LayerOutput
:param name: Evaluator name.
:type name: None|basestring
......@@ -326,8 +330,8 @@ def pnpair_evaluator(
input = [input]
if label:
input.append(label)
if info:
input.append(info)
if query_id:
input.append(query_id)
evaluator_base(
input=input,
type="pnpair",
......
......@@ -51,6 +51,7 @@ __all__ = [
'last_seq',
'first_seq',
'cos_sim',
'l2_distance_layer',
'hsigmoid',
'conv_projection',
'square_error_cost',
......@@ -115,6 +116,7 @@ __all__ = [
'huber_classification_cost',
'block_expand_layer',
'maxout_layer',
'dot_prod_layer',
'out_prod_layer',
'printer_layer',
'print_layer',
......@@ -167,6 +169,7 @@ class LayerType(object):
COST = 'cost'
COSINE_SIM_VEC = 'cos_vm'
COSINE_SIM = 'cos'
L2_DISTANCE = 'l2_distance'
HSIGMOID = 'hsigmoid'
CONV_LAYER = 'conv'
CONVTRANS_LAYER = 'convt'
......@@ -197,6 +200,7 @@ class LayerType(object):
SCALING_LAYER = 'scaling'
TRANS_LAYER = 'trans'
ROTATE_LAYER = 'rotate'
DOT_PROD_LAYER = 'dot_prod'
OUT_PROD_LAYER = 'out_prod'
FEATURE_MAP_EXPAND_LAYER = 'featmap_expand'
......@@ -2332,6 +2336,51 @@ def cos_sim(a, b, scale=1, size=1, name=None, layer_attr=None):
return LayerOutput(name, LayerType.COSINE_SIM, parents=[a, b], size=size)
@wrap_name_default()
@layer_support()
def l2_distance_layer(x, y, name=None, layer_attr=None):
"""
This layer calculates and returns the Euclidean distance between two input
vectors x and y. The equation is as follows:
.. math::
l2_distance(\\mathbf{x}, \\mathbf{y}) = \\sqrt{\\sum_{i=1}^D(x_i - y_i)}
The output size of this layer is fixed to be 1. Note that the above
computation is for one sample. Multiple samples are processed in one batch.
The example usage is:
.. code-block:: python
l2_sim = l2_distance(x=layer1, y=layer2)
:param name: The name of this layer. It is optional.
:type name: basestring
:param x: The first input x for this layer, whose output is a matrix with
dimensionality N x D. N is the sample number in a mini-batch.
D is the dimensionality of x's output.
:type x: LayerOutput
:param y: The second input y for this layer, whose output is a matrix with
dimensionality N x D. N is the sample number in a mini-batch.
D is the dimensionality of y's output.
:type y: LayerOutput
:param layer_attr: The extra layer attributes, for example, drop rate.
See ExtraLayerAttribute for more details.
:type layer_attr: ExtraLayerAttribute
:return: The returned LayerOutput object.
:rtype: LayerOutput
"""
assert isinstance(x, LayerOutput) and isinstance(y, LayerOutput)
Layer(
name=name,
type=LayerType.L2_DISTANCE,
inputs=[x.name, y.name],
**ExtraLayerAttribute.to_kwargs(layer_attr))
return LayerOutput(name, LayerType.L2_DISTANCE, parents=[x, y], size=1)
@wrap_name_default()
@wrap_bias_attr_default(has_bias=True)
@wrap_param_attr_default()
......@@ -2458,12 +2507,12 @@ def img_conv_layer(input,
input is raw pixels of image(mono or RGB), or it may be the previous layer's
num_filters * num_group.
There are several group of filter in PaddlePaddle implementation.
Each group will process some channel of the inputs. For example, if an input
There are several groups of filters in PaddlePaddle implementation.
Each group will process some channels of the input. For example, if
num_channel = 256, group = 4, num_filter=32, the PaddlePaddle will create
32*4 = 128 filters to process inputs. The channels will be split into 4
pieces. First 256/4 = 64 channels will process by first 32 filters. The
rest channels will be processed by rest group of filters.
32*4 = 128 filters to process the input. The channels will be split into 4
pieces. First 256/4 = 64 channels will be processed by first 32 filters. The
rest channels will be processed by the rest groups of filters.
The example usage is:
......@@ -2479,53 +2528,68 @@ def img_conv_layer(input,
:type name: basestring
:param input: The input of this layer.
:type input: LayerOutput
:param filter_size: The x dimension of a filter kernel. Or input a tuple for
two image dimension.
:param filter_size: The dimensions of the filter kernel. If the parameter is
set to one integer, the two dimensions on x and y axises
will be same when filter_size_y is not set. If it is set
to a list, the first element indicates the dimension on
the x axis, and the second is used to specify the dimension
on the y axis when filter_size_y is not provided.
:type filter_size: int | tuple | list
:param filter_size_y: The y dimension of a filter kernel. Since PaddlePaddle
currently supports rectangular filters, the filter's
shape will be (filter_size, filter_size_y).
:type filter_size_y: int | None
:param filter_size_y: The dimension of the filter kernel on the y axis. If the parameter
is not set, it will be set automatically according to filter_size.
:type filter_size_y: int
:param num_filters: Each filter group's number of filter
:param act: Activation type. ReluActivation is the default activation.
:type act: BaseActivation
:param groups: Group size of filters.
:param groups: The group number. 1 is the default group number.
:type groups: int
:param stride: The x dimension of the stride. Or input a tuple for two image
dimension.
:param stride: The strides. If the parameter is set to one integer, the strides
on x and y axises will be same when stride_y is not set. If it is
set to a list, the first element indicates the stride on the x axis,
and the second is used to specify the stride on the y axis when
stride_y is not provided. 1 is the default value.
:type stride: int | tuple | list
:param stride_y: The y dimension of the stride.
:param stride_y: The stride on the y axis.
:type stride_y: int
:param padding: The x dimension of the padding. Or input a tuple for two
image dimension
:param padding: The padding sizes. If the parameter is set to one integer, the padding
sizes on x and y axises will be same when padding_y is not set. If it
is set to a list, the first element indicates the padding size on the
x axis, and the second is used to specify the padding size on the y axis
when padding_y is not provided. 0 is the default padding size.
:type padding: int | tuple | list
:param padding_y: The y dimension of the padding.
:param padding_y: The padding size on the y axis.
:type padding_y: int
:param dilation: The x dimension of the dilation. Or input a tuple for two
image dimension
:param dilation: The dimensions of the dilation. If the parameter is set to one integer,
the two dimensions on x and y axises will be same when dilation_y is not
set. If it is set to a list, the first element indicates the dimension
on the x axis, and the second is used to specify the dimension on the y
axis when dilation_y is not provided. 1 is the default dimension.
:type dilation: int | tuple | list
:param dilation_y: The y dimension of the dilation.
:param dilation_y: The dimension of the dilation on the y axis.
:type dilation_y: int
:param bias_attr: The bias attribute. If the parameter is set to False or an object
whose type is not ParameterAttribute, no bias is defined. If the
parameter is set to True, the bias is initialized to zero.
:type bias_attr: ParameterAttribute | None | bool | Any
:param num_channels: number of input channels. If None will be set
automatically from previous output.
:param num_channels: The number of input channels. If the parameter is not set or
set to None, its actual value will be automatically set to
the channel number of the input.
:type num_channels: int
:param param_attr: Convolution param attribute. None means default attribute
:param param_attr: The parameter attribute. See ParameterAttribute for
details.
:type param_attr: ParameterAttribute
:param shared_biases: Is biases will be shared between filters or not.
:param shared_biases: Whether biases will be shared between filters or not.
:type shared_biases: bool
:param layer_attr: Layer Extra Attribute.
:param layer_attr: The extra layer attributes. See ExtraLayerAttribute for
details.
:type layer_attr: ExtraLayerAttribute
:param trans: true if it is a convTransLayer, false if it is a convLayer
:param trans: True if it is a convTransLayer, False if it is a convLayer
:type trans: bool
:param layer_type: specify the layer_type, default is None. If trans=True,
layer_type has to be "exconvt" or "cudnn_convt",
otherwise layer_type has to be either "exconv" or
"cudnn_conv"
:type layer_type: String
:param layer_type: Specify the layer type. If the dilation's dimension on one axis is
larger than 1, layer_type has to be "cudnn_conv" or "cudnn_convt".
If trans=True, layer_type has to be "exconvt" or "cudnn_convt",
otherwise layer_type has to be either "exconv" or "cudnn_conv".
:type layer_type: basestring
:return: LayerOutput object.
:rtype: LayerOutput
"""
......@@ -2630,7 +2694,7 @@ def img_pool_layer(input,
"""
Image pooling Layer.
The details of pooling layer, please refer ufldl's pooling_ .
The details of pooling layer, please refer to ufldl's pooling_ .
.. _pooling: http://ufldl.stanford.edu/tutorial/supervised/Pooling/
......@@ -2662,32 +2726,37 @@ def img_pool_layer(input,
padding_y=2,
pool_type=MaxPooling())
:param padding: pooling padding width.
:param padding: The padding size on the x axis. 0 is the default padding size.
:type padding: int
:param padding_y: pooling padding height. It's equal to padding by default.
:type padding_y: int | None
:param name: name of pooling layer
:type name: basestring.
:param padding_y: The padding size on the y axis. If the parameter is not set
or set to None, it will be set to 'padding' automatically.
:param name: The name of this layer. It is optional.
:type name: basestring
:param input: The input of this layer.
:type input: LayerOutput
:param pool_size: pooling window width
:param pool_size: The pooling window length on the x axis.
:type pool_size: int
:param pool_size_y: pooling window height. It's eaqual to pool_size by default.
:type pool_size_y: int | None
:param num_channels: number of input channel.
:param pool_size_y: The pooling window length on the y axis. If the parameter is
not set or set to None, its actual value will be automatically
set to pool_size.
:type pool_size_y: int
:param num_channels: The number of input channels. If the parameter is not set or
set to None, its actual value will be automatically set to
the channels number of the input.
:type num_channels: int
:param pool_type: pooling type. MaxPooling or AvgPooling. Default is
MaxPooling.
:param pool_type: Pooling type. MaxPooling is the default pooling.
:type pool_type: BasePoolingType
:param stride: stride width of pooling.
:param stride: The stride on the x axis. 1 is the default value.
:type stride: int
:param stride_y: stride height of pooling. It is equal to stride by default.
:type stride_y: int | None
:param layer_attr: Extra Layer attribute.
:param stride_y: The stride on the y axis. If the parameter is not set or set to
None, its actual value will be automatically set to 'stride'.
:type stride_y: int
:param layer_attr: The extra layer attribute. See ExtraLayerAttribute for
details.
:type layer_attr: ExtraLayerAttribute
:param ceil_mode: Wether to use ceil mode to calculate output height and with.
Defalut is True. If set false, Otherwise use floor.
:param ceil_mode: Wether to use the ceil function to calculate output height and width.
True is the default. If it is set to False, the floor function will
be used.
:type ceil_mode: bool
:return: LayerOutput object.
:rtype: LayerOutput
......@@ -2793,24 +2862,32 @@ def img_pool3d_layer(input,
:param padding: pooling padding width.
:type padding: int | tuple | list
:param name: name of pooling layer
:param name: The name of this layer. It is optional.
:type name: basestring.
:param input: The input of this layer.
:type input: LayerOutput
:param pool_size: pooling window width
:param pool_size: The pooling window lengths along three axises. If the parameter
is set to one integer, the three lengths will be same.
:type pool_size: int | tuple | list
:param num_channels: number of input channel.
:param num_channels: The number of input channels. If the parameter is not set or
set to None, its actual value will be automatically set to
the channels number of the input.
:type num_channels: int
:param pool_type: pooling type. MaxPooling or AvgPooling. Default is
MaxPooling.
:param pool_type: Pooling type. MaxPooling is the default pooling.
:type pool_type: BasePoolingType
:param stride: stride width of pooling.
:param stride: The strides of the pooling along three axises. If the parameter
is set to one integer, the three strides will be same. 1 is the
default value.
:type stride: int | tuple | list
:param layer_attr: Extra Layer attribute.
:param padding: The sizes of padding along three axises. If the parameter is set to
one integer, they will be same. 0 is the default padding size.
:type padding: int | tuple | list
:param layer_attr: The extra layer attribute. See ExtraLayerAttribute for
details.
:type layer_attr: ExtraLayerAttribute
:param ceil_mode: Wether to use ceil mode to calculate output height and with.
Defalut is True. If set false, Otherwise use floor.
:param ceil_mode: Wether to use the ceil function to calculate output height and width.
True is the default. If it is set to False, the floor function will
be used.
:type ceil_mode: bool
:return: LayerOutput object.
:rtype: LayerOutput
......@@ -2889,9 +2966,11 @@ def spp_layer(input,
pyramid_height=None,
layer_attr=None):
"""
Spatial Pyramid Pooling in Deep Convolutional Networks for Visual Recognition.
The details please refer to
`Kaiming He's paper <https://arxiv.org/abs/1406.4729>`_.
A layer performs spatial pyramid pooling.
Reference:
Spatial Pyramid Pooling in Deep Convolutional Networks for Visual Recognition
https://arxiv.org/abs/1406.4729
The example usage is:
......@@ -2906,13 +2985,16 @@ def spp_layer(input,
:type name: basestring
:param input: The input of this layer.
:type input: LayerOutput
:param num_channels: number of input channel.
:param num_channels: The number of input channels. If the parameter is not set or
set to None, its actual value will be automatically set to
the channels number of the input.
:type num_channels: int
:param pool_type: Pooling type. MaxPooling or AveragePooling. Default is MaxPooling.
:param pool_type: Pooling type. MaxPooling is the default pooling.
:type scale: BasePoolingType
:param pyramid_height: pyramid height.
:param pyramid_height: The pyramid height of this pooling.
:type pyramid_height: int
:param layer_attr: Extra Layer Attribute.
:param layer_attr: The extra layer attribute. See ExtraLayerAttribute for
details.
:type layer_attr: ExtraLayerAttribute
:return: LayerOutput object.
:rtype: LayerOutput
......@@ -3876,7 +3958,7 @@ def recurrent_layer(input,
:type input: LayerOutput
:param act: Activation type. TanhActivation is the default activation.
:type act: BaseActivation
:param bias_attr: The parameter attribute for bias. If this parameter is set to
:param bias_attr: The parameter attribute for bias. If this parameter is set to
False or an object whose type is not ParameterAttribute,
no bias is defined. If the parameter is set to True,
the bias is initialized to zero.
......@@ -4145,6 +4227,45 @@ def maxid_layer(input, name=None, layer_attr=None):
size=l.config.size)
@wrap_name_default()
def dot_prod_layer(input1, input2, name=None, layer_attr=None):
"""
A layer for computing the dot product of two vectors.
The example usage is:
.. code-block:: python
dot_prod = dot_prod_layer(input1=vec1, input2=vec2)
:param name: The name of this layer. It is optional.
:type name: basestring
:param input1: The first input layer.
:type input: LayerOutput
:param input2: The second input layer.
:type input2: LayerOutput
:param layer_attr: The extra layer attribute. See ExtraLayerAttribute for
details.
:type layer_attr: ExtraLayerAttribute.
:return: LayerOutput object.
:rtype: LayerOutput
"""
assert isinstance(input1, LayerOutput)
assert isinstance(input2, LayerOutput)
assert input1.size == input2.size, ("Two inputs should have the same size.")
l = Layer(
name=name,
type=LayerType.DOT_PROD_LAYER,
inputs=[input1.name, input2.name],
**ExtraLayerAttribute.to_kwargs(layer_attr))
return LayerOutput(
name=name,
layer_type=LayerType.DOT_PROD_LAYER,
parents=[input1, input2],
size=l.config.size)
@wrap_name_default()
def out_prod_layer(input1, input2, name=None, layer_attr=None):
"""
......@@ -4611,7 +4732,7 @@ def conv_projection(input,
will be same when filter_size_y is not set. If it is set
to a list, the first element indicates the dimension on
the x axis, and the second is used to specify the dimension
on the y axis when filter_size is not provided.
on the y axis when filter_size_y is not provided.
:type filter_size: int | tuple | list
:param filter_size_y: The dimension of the filter kernel on the y axis. If the parameter
is not set, it will be set automatically according to filter_size.
......@@ -6488,10 +6609,11 @@ def row_conv_layer(input,
@layer_support()
@wrap_name_default()
@wrap_param_attr_default()
def prelu_layer(input,
name=None,
partial_sum=1,
channel_shared=None,
num_channels=None,
param_attr=None,
layer_attr=None):
"""
......@@ -6522,6 +6644,14 @@ def prelu_layer(input,
- partial_sum = number of outputs, indicates all elements share the same weight.
:type partial_sum: int
:param channel_shared: whether or not the parameter are shared across channels.
- channel_shared = True, we set the partial_sum to the number of outputs.
- channel_shared = False, we set the partial_sum to the number of elements in one channel.
:type channel_shared: bool
:param num_channels: number of input channel.
:type num_channels: int
:param param_attr: The parameter attribute. See ParameterAttribute for details.
:type param_attr: ParameterAttribute
:param layer_attr: The extra layer attribute. See ExtraLayerAttribute for
......@@ -6532,7 +6662,25 @@ def prelu_layer(input,
"""
assert isinstance(input, LayerOutput), 'prelu_layer accepts only one input.'
assert isinstance(param_attr, ParameterAttribute)
if not param_attr:
param_attr = ParamAttr(initial_mean=0.25, initial_std=0.0)
else:
assert isinstance(param_attr, ParameterAttribute)
if num_channels is None:
assert input.num_filters is not None, \
'the input channel cannot be detected, please specify the num_channels parameter'
num_channels = input.num_filters
if channel_shared is not None:
assert isinstance(channel_shared, bool)
assert (input.height != 0 and input.width != 0), \
'input height and widht must be setted'
if channel_shared:
partial_sum = input.height * input.width * num_channels
else:
partial_sum = input.height * input.width
l = Layer(
name=name,
......@@ -6544,6 +6692,7 @@ def prelu_layer(input,
name=name,
layer_type=LayerType.PRELU,
parents=input,
num_filters=num_channels,
size=l.config.size)
......@@ -6993,7 +7142,7 @@ def img_conv3d_layer(input,
:type layer_attr: ExtraLayerAttribute
:param trans: True if it is a convTransLayer, False if it is a convLayer
:type trans: bool
:param layer_type: Specify the layer_type. If the parameter is set, it must be "deconv3d"
:param layer_type: Specify the layer type. If the parameter is set, it must be "deconv3d"
when trans=True. If not set, it will be automatically set to "deconv3d"
when trans=True and "conv3d" when trans=False.
:type layer_type: basestring
......
......@@ -11,7 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from activations import LinearActivation, ReluActivation, SoftmaxActivation, \
IdentityActivation, TanhActivation, SequenceSoftmaxActivation
......@@ -26,9 +26,9 @@ __all__ = [
'sequence_conv_pool', 'simple_lstm', "simple_img_conv_pool",
"img_conv_bn_pool", 'lstmemory_group', 'lstmemory_unit', 'small_vgg',
'img_conv_group', 'vgg_16_network', 'gru_unit', 'gru_group', 'simple_gru',
'simple_attention', 'dot_product_attention', 'simple_gru2',
'bidirectional_gru', 'text_conv_pool', 'bidirectional_lstm', 'inputs',
'outputs'
'simple_attention', 'dot_product_attention', 'multi_head_attention',
'simple_gru2', 'bidirectional_gru', 'text_conv_pool', 'bidirectional_lstm',
'inputs', 'outputs'
]
######################################################
......@@ -1476,10 +1476,8 @@ def dot_product_attention(encoded_sequence,
expand_as=encoded_sequence,
name='%s_expand' % name)
m = linear_comb_layer(
weights=expanded,
vectors=encoded_sequence,
name='%s_dot-product' % name)
m = dot_prod_layer(
input1=expanded, input2=encoded_sequence, name='%s_dot-product' % name)
attention_weight = fc_layer(
input=m,
......@@ -1498,6 +1496,134 @@ def dot_product_attention(encoded_sequence,
input=scaled, pooling_type=SumPooling(), name="%s_pooling" % name)
@wrap_name_default()
def multi_head_attention(query,
key,
value,
key_proj_size,
value_proj_size,
head_num,
attention_type,
softmax_param_attr=None,
name=None):
"""
Calculate and return a context vector with dot-product attention mechanism.
The dimension of the context vector equals to value_proj_size * head_num.
Please refer to **Attention Is All You Need** for more details. The link is
as follows:
https://arxiv.org/abs/1706.03762.
The example usage is:
.. code-block:: python
context = multi_head_attention(query=decoder_state,
key=enc_seq,
value=enc_seq,
key_proj_size=64,
value_pro_size=64,
head_num=8,
attention_type='dot-product attention')
:param name: A prefix attached to the name of each layer that defined inside
the multi_head_attention.
:type name: basestring
:param softmax_param_attr: The parameter attribute of sequence softmax
that is used to produce attention weight.
:type softmax_param_attr: ParameterAttribute
:param query: query is used to calculate attention weights over values at current step.
:type query: LayerOutput
:param key: key is used to calculate the attention weight of the corresponding value.
:type key: LayerOutput
:param value: value is the sequence to be attended.
:type value: LayerOutput
:param key_proj_size: The dimension of the linear projection performed on key and query.
:type key_proj_size: int
:param value_proj_size: The dimension of the linear projection performed on value.
:type value_proj_size: int
:param head_num: The number of attention heads.
:type head_num: int
:param attention_type: The type of the attention mechanism used in each attention
heads. Now, we only support scaled dot-product attention and
additive attention.
:type attention_type: basestring
:return: The context vector.
:rtype: LayerOutput
"""
assert attention_type in ['dot-product attention', 'additive attention']
with mixed_layer(
size=key_proj_size * head_num,
name='%s_query_proj' % name) as query_proj:
query_proj += full_matrix_projection(query)
query_proj = expand_layer(input=query_proj, expand_as=key)
with mixed_layer(
size=key_proj_size * head_num,
name='%s_key_proj' % name) as key_proj:
key_proj += full_matrix_projection(key)
with mixed_layer(
size=value_proj_size * head_num,
name='%s_value_proj' % name) as value_proj:
value_proj += full_matrix_projection(value)
head_list = []
for i in range(head_num):
with mixed_layer(size=key_proj_size) as sub_query_proj:
sub_query_proj += identity_projection(
query_proj, offset=key_proj_size * i, size=key_proj_size)
with mixed_layer(size=key_proj_size) as sub_key_proj:
sub_key_proj += identity_projection(
key_proj, offset=key_proj_size * i, size=key_proj_size)
with mixed_layer(size=value_proj_size) as sub_value_proj:
sub_value_proj += identity_projection(
value_proj, offset=value_proj_size * i, size=value_proj_size)
if attention_type == 'dot-product attention':
m = dot_prod_layer(
input1=sub_query_proj,
input2=sub_key_proj,
name='%s_dot-product_%d' % (name, i))
m = slope_intercept_layer(
input=m,
slope=math.sqrt(1.0 / key_proj_size),
name='%s_dot-product_scaling_%d' % (name, i))
else:
with mixed_layer(
size=key_proj_size,
act=TanhActivation(),
name='%s_combine_%d' % (name, i)) as m:
m += identity_projection(sub_query_proj)
m += identity_projection(sub_key_proj)
attention_weight = fc_layer(
input=m,
size=1,
act=SequenceSoftmaxActivation(),
param_attr=softmax_param_attr,
name="%s_softmax_%d" % (name, i),
bias_attr=False)
scaled = scaling_layer(
weight=attention_weight,
input=sub_value_proj,
name='%s_scaling_%d' % (name, i))
head = pooling_layer(
input=scaled,
pooling_type=SumPooling(),
name="%s_pooling_%d" % (name, i))
head_list.append(head)
attended = concat_layer(head_list)
return attended
def inputs(layers, *args):
"""
Declare the inputs of network. The order of input should be as same as
......
......@@ -10,6 +10,7 @@ test_prelu_layer test_row_conv test_detection_output_layer test_multibox_loss_la
test_recursive_topology test_gated_unit_layer test_clip_layer test_row_l2_norm_layer
test_kmax_seq_socre_layer test_sub_nested_seq_select_layer test_scale_shift_layer
test_seq_slice_layer test_cross_entropy_over_beam test_roi_pool_layer test_pooling3D_layer
test_conv3d_layer test_deconv3d_layer test_BatchNorm3D test_resize_layer test_scale_sub_region_layer)
test_conv3d_layer test_deconv3d_layer test_BatchNorm3D test_resize_layer
test_scale_sub_region_layer test_dot_prod_layer test_l2_distance_layer)
export whole_configs=(test_split_datasource)
type: "nn"
layers {
name: "vector1"
type: "data"
size: 10
active_type: ""
}
layers {
name: "vector2"
type: "data"
size: 10
active_type: ""
}
layers {
name: "__dot_prod_layer_0__"
type: "dot_prod"
size: 1
active_type: ""
inputs {
input_layer_name: "vector1"
}
inputs {
input_layer_name: "vector2"
}
}
input_layer_names: "vector1"
input_layer_names: "vector2"
output_layer_names: "__dot_prod_layer_0__"
sub_models {
name: "root"
layer_names: "vector1"
layer_names: "vector2"
layer_names: "__dot_prod_layer_0__"
input_layer_names: "vector1"
input_layer_names: "vector2"
output_layer_names: "__dot_prod_layer_0__"
is_recurrent_layer_group: false
}
type: "nn"
layers {
name: "x"
type: "data"
size: 128
active_type: ""
}
layers {
name: "y"
type: "data"
size: 128
active_type: ""
}
layers {
name: "__l2_distance_layer_0__"
type: "l2_distance"
size: 1
active_type: ""
inputs {
input_layer_name: "x"
}
inputs {
input_layer_name: "y"
}
}
input_layer_names: "x"
input_layer_names: "y"
output_layer_names: "__l2_distance_layer_0__"
sub_models {
name: "root"
layer_names: "x"
layer_names: "y"
layer_names: "__l2_distance_layer_0__"
input_layer_names: "x"
input_layer_names: "y"
output_layer_names: "__l2_distance_layer_0__"
is_recurrent_layer_group: false
}
......@@ -4,6 +4,8 @@ layers {
type: "data"
size: 300
active_type: ""
height: 10
width: 10
}
layers {
name: "__prelu_layer_0__"
......@@ -15,6 +17,9 @@ layers {
input_parameter_name: "___prelu_layer_0__.w0"
}
partial_sum: 1
height: 10
width: 10
depth: 1
}
layers {
name: "__prelu_layer_1__"
......@@ -26,6 +31,9 @@ layers {
input_parameter_name: "___prelu_layer_1__.w0"
}
partial_sum: 1
height: 10
width: 10
depth: 1
}
layers {
name: "__prelu_layer_2__"
......@@ -37,41 +45,100 @@ layers {
input_parameter_name: "___prelu_layer_2__.w0"
}
partial_sum: 5
height: 10
width: 10
depth: 1
}
layers {
name: "__prelu_layer_3__"
type: "prelu"
size: 300
active_type: ""
inputs {
input_layer_name: "input"
input_parameter_name: "___prelu_layer_3__.w0"
}
partial_sum: 300
height: 10
width: 10
depth: 1
}
layers {
name: "__prelu_layer_4__"
type: "prelu"
size: 300
active_type: ""
inputs {
input_layer_name: "input"
input_parameter_name: "___prelu_layer_4__.w0"
}
partial_sum: 100
height: 10
width: 10
depth: 1
}
parameters {
name: "___prelu_layer_0__.w0"
size: 300
initial_mean: 0.0
initial_std: 0.057735026919
initial_mean: 0.25
initial_std: 0.0
dims: 1
dims: 300
initial_strategy: 0
initial_smart: true
initial_smart: false
}
parameters {
name: "___prelu_layer_1__.w0"
size: 300
initial_mean: 0.0
initial_std: 0.057735026919
initial_mean: 0.25
initial_std: 0.0
dims: 1
dims: 300
initial_strategy: 0
initial_smart: true
initial_smart: false
}
parameters {
name: "___prelu_layer_2__.w0"
size: 60
initial_mean: 0.0
initial_std: 0.129099444874
initial_mean: 0.25
initial_std: 0.0
dims: 1
dims: 60
initial_strategy: 0
initial_smart: false
}
parameters {
name: "___prelu_layer_3__.w0"
size: 1
initial_mean: 0.25
initial_std: 0.0
dims: 1
dims: 1
initial_strategy: 0
initial_smart: false
}
parameters {
name: "___prelu_layer_4__.w0"
size: 3
initial_mean: 0.25
initial_std: 0.0
dims: 1
dims: 3
initial_strategy: 0
initial_smart: true
initial_smart: false
}
input_layer_names: "input"
output_layer_names: "__prelu_layer_2__"
output_layer_names: "__prelu_layer_4__"
sub_models {
name: "root"
layer_names: "input"
layer_names: "__prelu_layer_0__"
layer_names: "__prelu_layer_1__"
layer_names: "__prelu_layer_2__"
layer_names: "__prelu_layer_3__"
layer_names: "__prelu_layer_4__"
input_layer_names: "input"
output_layer_names: "__prelu_layer_2__"
output_layer_names: "__prelu_layer_4__"
is_recurrent_layer_group: false
}
from paddle.trainer_config_helpers import *
vec1 = data_layer(name='vector1', size=10)
vec2 = data_layer(name='vector2', size=10)
dot_product = dot_prod_layer(input1=vec1, input2=vec2)
outputs(dot_product)
from paddle.trainer_config_helpers import *
outputs(
l2_distance_layer(
x=data_layer(
name='x', size=128), y=data_layer(
name='y', size=128)))
from paddle.trainer_config_helpers import *
data = data_layer(name='input', size=300)
prelu = prelu_layer(input=data)
prelu = prelu_layer(input=data, partial_sum=1)
prelu = prelu_layer(input=data, partial_sum=5)
data = data_layer(name='input', size=300, height=10, width=10)
prelu = prelu_layer(input=data, num_channels=3)
prelu = prelu_layer(input=data, partial_sum=1, num_channels=3)
prelu = prelu_layer(input=data, partial_sum=5, num_channels=3)
prelu = prelu_layer(input=data, channel_shared=True, num_channels=3)
prelu = prelu_layer(input=data, channel_shared=False, num_channels=3)
outputs(prelu)
......@@ -4,7 +4,10 @@ import collections
import numpy as np
import copy
__all__ = ['Block', 'Variable', 'Program', 'Operator', 'default_startup_program', 'default_main_program']
__all__ = [
'Block', 'Variable', 'Program', 'Operator', 'default_startup_program',
'default_main_program'
]
def unique_name(prefix):
......@@ -12,6 +15,37 @@ def unique_name(prefix):
return "_".join([prefix, str(uid)])
def convert_np_dtype_to_dtype_(np_dtype):
dtype = np.dtype(np_dtype)
if dtype == np.float32:
return core.DataType.FP32
elif dtype == np.float64:
return core.DataType.FP64
elif dtype == np.float16:
return core.DataType.FP16
elif dtype == np.int32:
return core.DataType.INT32
elif dtype == np.int16:
return core.DataType.INT16
elif dtype == np.int64:
return core.DataType.INT64
elif dtype == np.bool:
return core.DataType.BOOL
else:
raise ValueError("Not supported numpy dtype " + str(dtype))
def dtype_is_floating(dtype):
if not isinstance(dtype, core.DataType):
dtype = convert_np_dtype_to_dtype_(dtype)
if (dtype == core.DataType.FP16 or dtype == core.DataType.FP32 or
dtype == core.DataType.FP64):
return True
else:
return False
def _debug_string_(proto, throw_on_error=True):
error_fields = list()
if not proto.IsInitialized(error_fields) and throw_on_error:
......@@ -63,7 +97,7 @@ class Variable(object):
"matched.".format(self.name, old_shape, shape))
if dtype is not None:
if not isinstance(dtype, core.DataType):
dtype = Variable._convert_np_dtype_to_dtype_(dtype)
dtype = convert_np_dtype_to_dtype_(dtype)
if is_new_var:
self.desc.set_data_type(dtype)
else:
......@@ -145,26 +179,6 @@ class Variable(object):
uid = core.unique_integer(prefix) # unique during whole process.
return "_".join([prefix, str(uid)])
@staticmethod
def _convert_np_dtype_to_dtype_(np_dtype):
dtype = np.dtype(np_dtype)
if dtype == np.float32:
return core.DataType.FP32
elif dtype == np.float64:
return core.DataType.FP64
elif dtype == np.float16:
return core.DataType.FP16
elif dtype == np.int32:
return core.DataType.INT32
elif dtype == np.int16:
return core.DataType.INT16
elif dtype == np.int64:
return core.DataType.INT64
elif dtype == np.bool:
return core.DataType.BOOL
else:
raise ValueError("Not supported numpy dtype " + str(dtype))
def get_all_op_protos():
"""
......@@ -232,17 +246,17 @@ class Operator(object):
in_proto.name)
if found:
in_argus = inputs[in_proto.name]
if not isinstance(in_argus, list):
in_argus = [in_argus]
if not in_proto.duplicable and len(in_argus) > 1:
in_args = inputs[in_proto.name]
if not isinstance(in_args, list):
in_args = [in_args]
if not in_proto.duplicable and len(in_args) > 1:
raise ValueError(
"Input %s expects only one input, but %d are given."
% (in_proto.name, len(in_argus)))
in_argu_names = []
for argu in in_argus:
in_argu_names.append(argu.name)
self.desc.set_input(in_proto.name, in_argu_names)
% (in_proto.name, len(in_args)))
in_arg_names = []
for arg in in_args:
in_arg_names.append(arg.name)
self.desc.set_input(in_proto.name, in_arg_names)
else:
self.desc.set_input(in_proto.name, [])
......@@ -260,18 +274,18 @@ class Operator(object):
str(e) for e in given)))
for out_proto in proto.outputs:
out_argus = outputs[out_proto.name]
if not isinstance(out_argus, list):
out_argus = [out_argus]
if not out_proto.duplicable and len(out_argus) > 1:
out_args = outputs[out_proto.name]
if not isinstance(out_args, list):
out_args = [out_args]
if not out_proto.duplicable and len(out_args) > 1:
raise ValueError(
"Output %s expects only one output, but %d are given." %
(out_proto.name, len(out_argus)))
out_argu_names = []
for argu in out_argus:
out_argu_names.append(argu.name)
argu.op = self
self.desc.set_output(out_proto.name, out_argu_names)
(out_proto.name, len(out_args)))
out_arg_names = []
for arg in out_args:
out_arg_names.append(arg.name)
arg.op = self
self.desc.set_output(out_proto.name, out_arg_names)
if attrs is not None:
if not isinstance(attrs, dict):
......@@ -582,8 +596,10 @@ class Parameter(Variable):
g_main_program = Program()
g_startup_program = Program()
def default_startup_program():
return g_startup_program
def default_main_program():
return g_main_program
......@@ -2,7 +2,7 @@ import copy
import itertools
from paddle.v2.fluid.framework import Variable, g_main_program, \
g_startup_program, unique_name, Program
g_startup_program, unique_name, Program, dtype_is_floating
from paddle.v2.fluid.initializer import ConstantInitializer, \
UniformInitializer, XavierInitializer
......@@ -61,7 +61,7 @@ class LayerHelper(object):
@property
def param_attr(self):
default = {'name': None, 'initializer': XavierInitializer()}
default = {'name': None}
actual = self.kwargs.get('param_attr', None)
if actual is None:
actual = default
......@@ -72,7 +72,7 @@ class LayerHelper(object):
@property
def bias_attr(self):
default = {'name': None, 'initializer': ConstantInitializer()}
default = {'name': None}
bias_attr = self.kwargs.get('bias_attr', None)
if bias_attr is None:
bias_attr = default
......@@ -119,6 +119,8 @@ class LayerHelper(object):
attr_copy = copy.deepcopy(attr)
if initializer is not None:
attr_copy['initializer'] = initializer
else:
attr_copy['initializer'] = self._get_default_initializer(dtype)
if attr_copy['name'] is None:
attr_copy['name'] = unique_name(".".join([self.name, suffix]))
self.startup_program.global_block().create_parameter(
......@@ -149,13 +151,19 @@ class LayerHelper(object):
persistable=True,
initializer=initializer)
def append_bias_op(self, input_var, dim_start=1, dim_end=None):
def append_bias_op(self,
input_var,
bias_initializer,
dim_start=1,
dim_end=None):
"""
Append bias operator and return its output. If the user does not set
bias_attr, append_bias_op will return input_var
:param input_var: the input variable. The len(input_var.shape) is larger
or equal than 2.
:param input_var: the input variable. The len(input_var.shape) is
larger or equal than 2.
:bias_initializer: an instance of a subclass of Initializer used to
initialize the bias
:param dim_start:
:param dim_end: the shape of the bias will be
input_var.shape[dim_start:dim_end]. The bias is broadcasted to other
......@@ -167,7 +175,11 @@ class LayerHelper(object):
return input_var
b = self.create_parameter(
attr=bias_attr, shape=size, dtype=input_var.data_type, suffix='b')
attr=bias_attr,
shape=size,
dtype=input_var.data_type,
suffix='b',
initializer=bias_initializer)
tmp = self.create_tmp_variable(dtype=input_var.data_type)
self.append_op(
type='elementwise_add',
......@@ -191,3 +203,10 @@ class LayerHelper(object):
outputs={"Y": [tmp]},
attrs=act)
return tmp
def _get_default_initializer(self, dtype):
if dtype is None or dtype_is_floating(dtype) is True:
return XavierInitializer()
else:
# For integer and boolean types, initialize with all zeros
return ConstantInitializer()
......@@ -3,7 +3,7 @@ import paddle.v2.fluid.proto.framework_pb2 as framework_pb2
from paddle.v2.fluid.framework import OpProtoHolder, Variable, Program, \
Operator
from paddle.v2.fluid.initializer import ConstantInitializer, \
NormalInitializer
NormalInitializer, XavierInitializer
from paddle.v2.fluid.layer_helper import LayerHelper, unique_name
import re
import cStringIO
......@@ -17,11 +17,13 @@ __all__ = [
def fc(input,
size,
num_flatten_dims=1,
param_attr=None,
param_initializer=None,
bias_attr=None,
name=None,
bias_initializer=None,
act=None,
num_flatten_dims=1,
name=None,
main_program=None,
startup_program=None):
"""
......@@ -30,11 +32,15 @@ def fc(input,
Args:
input: The input tensor to the function
size: The size of the layer
num_flatten_dims: Number of columns in input
param_attr: The parameters/weights to the FC Layer
param_initializer: Initializer used for the weight/parameter.
If None, XavierInitializer() is used
bias_attr: The bias parameter for the FC layer
name: Name/alias of the function
bias_initializer: Initializer used for the bias.
If None, then ConstantInitializer() is used
act: Activation to be applied to the output of FC layer
num_flatten_dims: Number of columns in input
name: Name/alias of the function
main_program: Name of the main program that calls this
startup_program: Name of the startup program
......@@ -50,10 +56,23 @@ def fc(input,
to the LayerHelper constructor.
"""
def _get_default_param_initializer():
return XavierInitializer()
def _get_default_bias_initializer():
return ConstantInitializer()
helper = LayerHelper('fc', **locals())
dtype = helper.input_dtype()
if param_initializer is None:
param_initializer = _get_default_param_initializer()
if bias_initializer is None:
bias_initializer = _get_default_bias_initializer()
mul_results = []
for input_var, param_attr in helper.iter_inputs_and_params():
input_shape = input_var.shape
......@@ -61,7 +80,10 @@ def fc(input,
reduce(lambda a, b: a * b, input_shape[num_flatten_dims:], 1)
] + [size]
w = helper.create_parameter(
attr=param_attr, shape=param_shape, dtype=dtype)
attr=param_attr,
initializer=param_initializer,
shape=param_shape,
dtype=dtype)
tmp = helper.create_tmp_variable(dtype)
helper.append_op(
type="mul",
......@@ -82,16 +104,16 @@ def fc(input,
helper.append_op(
type="sum", inputs={"X": mul_results}, outputs={"Out": pre_bias})
# add bias
pre_activation = helper.append_bias_op(pre_bias)
pre_activation = helper.append_bias_op(pre_bias, bias_initializer)
# add activation
return helper.append_activation(pre_activation)
def embedding(input,
size,
data_type='float32',
is_sparse=False,
param_attr=None,
data_type='float32',
main_program=None,
startup_program=None):
"""
......@@ -100,9 +122,9 @@ def embedding(input,
Args:
input: The input to the function
size: The size of the layer
data_type: The type of data : float32, float_16, int etc
is_sparse: A flag that decleares whether the input is sparse
param_attr: Parameters for this layer
data_type: The type of data : float32, float_16, int etc
main_program: Name of the main program that calls this
startup_program: Name of the startup program
......@@ -130,7 +152,6 @@ def embedding(input,
# TODO(qijun): expose H0 and C0
def dynamic_lstm(input,
size,
data_type='float32',
param_attr=None,
bias_attr=None,
use_peepholes=True,
......@@ -138,6 +159,7 @@ def dynamic_lstm(input,
gate_activation='sigmoid',
cell_activation='tanh',
candidate_activation='tanh',
data_type='float32',
main_program=None,
startup_program=None):
helper = LayerHelper('lstm', **locals())
......@@ -178,9 +200,9 @@ def dynamic_lstm(input,
def data(name,
shape,
append_batch_size=True,
data_type='float32',
type=core.VarDesc.VarType.LOD_TENSOR,
append_batch_size=True,
main_program=None,
startup_program=None,
stop_gradient=True):
......@@ -190,9 +212,9 @@ def data(name,
Args:
name: The name/alias of the function
shape: Tuple declaring the shape.
append_batch_size: Whether or not to append the data as a batch.
data_type: The type of data : float32, float_16, int etc
type: The output type. By default it is LOD_TENSOR.
append_batch_size: Whether or not to append the data as a batch.
main_program: Name of the main program that calls this
startup_program: Name of the startup program
stop_gradient: A boolean that mentions whether gradient should flow.
......@@ -226,7 +248,7 @@ def data(name,
stop_gradient=stop_gradient)
def create_tensor(dtype, name=None, main_program=None):
def create_tensor(dtype, name=None, main_program=None, startup_program=None):
helper = LayerHelper("create_tensor", **locals())
return helper.create_variable(name=helper.name, dtype=dtype)
......@@ -390,30 +412,12 @@ _create_op_func_('mul')
_create_op_func_('elementwise_add')
_create_op_func_('dropout')
_create_op_func_('reshape')
_create_op_func_('elementwise_add')
_create_op_func_('sigmoid')
_create_op_func_('scale')
_create_op_func_('reshape')
_create_op_func_('transpose')
def fill_constant(data_type, shape, value=None, program=None):
"""
This function creates a tensor , with shape as mentioned in the input and
specified data_type and fills this up with a constant value that
comes in the input.
"""
helper = LayerHelper('fill_constant', **locals())
out = helper.create_tmp_variable(dtype=data_type)
helper.append_op(
type='fill_constant',
outputs={'Out': [out]},
attrs={'data_type': data_type,
'shape': shape,
'value': value})
return out
def cast(x, data_type, main_program=None):
"""
This function takes in the input with input_data_type
......@@ -456,7 +460,7 @@ def sums(input, main_program=None, startup_program=None):
return out
def assign(input, output, main_program=None):
def assign(input, output, main_program=None, startup_program=None):
helper = LayerHelper('assign', **locals())
helper.append_op(
type='scale',
......@@ -468,7 +472,7 @@ def assign(input, output, main_program=None):
def split_lod_tensor(input,
mask,
level,
level=0,
main_program=None,
startup_program=None):
helper = LayerHelper('split_lod_tensor', **locals())
......@@ -490,11 +494,11 @@ def merge_lod_tensor(in_true,
in_false,
x,
mask,
level,
level=0,
main_program=None,
startup_program=None):
helper = LayerHelper('merge_lod_tensor', **locals())
out = helper.create_tmp_variable(dtype=x.data_type)
out = helper.create_tmp_variable(dtype=in_true.data_type)
helper.append_op(
type='merge_lod_tensor',
inputs={'X': x,
......@@ -596,10 +600,12 @@ def sequence_conv(input,
num_filters,
filter_size=3,
filter_stride=1,
act=None,
padding=None,
bias_attr=None,
bias_initializer=None,
param_attr=None,
param_initializer=None,
act=None,
main_program=None,
startup_program=None):
"""
......@@ -607,6 +613,13 @@ def sequence_conv(input,
other convolutional configurations for the filters and stride as given
in the input parameters to the function.
"""
def _get_default_bias_initializer():
return ConstantInitializer()
def _get_default_param_initializer():
return XavierInitializer()
# FIXME(dzh) : want to unify the argument of python layer
# function. So we ignore some unecessary attributes.
# such as, padding_trainable, context_start.
......@@ -614,9 +627,17 @@ def sequence_conv(input,
helper = LayerHelper('sequence_conv', **locals())
dtype = helper.input_dtype()
if param_initializer is None:
param_initializer = _get_default_param_initializer()
if bias_initializer is None:
bias_initializer = _get_default_bias_initializer()
filter_shape = [filter_size * input.shape[1], num_filters]
filter = helper.create_parameter(
attr=helper.param_attr, shape=filter_shape, dtype=dtype)
attr=helper.param_attr,
shape=filter_shape,
dtype=dtype,
initializer=param_initializer)
pre_bias = helper.create_tmp_variable(dtype)
helper.append_op(
......@@ -631,20 +652,22 @@ def sequence_conv(input,
'contextStart': -int(filter_size / 2),
'contextLength': filter_size
})
pre_act = helper.append_bias_op(pre_bias)
pre_act = helper.append_bias_op(pre_bias, bias_initializer)
return helper.append_activation(pre_act)
def conv2d(input,
num_filters,
name=None,
filter_size=[1, 1],
act=None,
groups=None,
filter_size,
stride=[1, 1],
padding=None,
bias_attr=None,
groups=None,
param_attr=None,
param_initializer=None,
bias_attr=None,
bias_initializer=None,
act=None,
name=None,
main_program=None,
startup_program=None):
"""
......@@ -654,6 +677,14 @@ def conv2d(input,
This funciton can also append an activation on top of the
conv-2d output, if mentioned in the input parameters.
"""
def _get_default_bias_initializer():
return ConstantInitializer()
def _get_default_param_initializer(filter_size, num_channels):
std = (2.0 / (filter_size[0]**2 * num_channels))**0.5
return NormalInitializer(0.0, std, 0)
helper = LayerHelper('conv2d', **locals())
dtype = helper.input_dtype()
......@@ -661,7 +692,7 @@ def conv2d(input,
if groups is None:
num_filter_channels = num_channels
else:
if num_channels % groups is not 0:
if num_channels % groups != 0:
raise ValueError("num_channels must be divisible by groups.")
num_filter_channels = num_channels / groups
......@@ -675,12 +706,17 @@ def conv2d(input,
input_shape = input.shape
filter_shape = [num_filters, num_filter_channels] + filter_size
std = (2.0 / (filter_size[0]**2 * num_channels))**0.5
if param_initializer is None:
param_initializer = _get_default_param_initializer(filter_size,
num_channels)
if bias_initializer is None:
bias_initializer = _get_default_bias_initializer()
filter = helper.create_parameter(
attr=helper.param_attr,
shape=filter_shape,
dtype=dtype,
initializer=NormalInitializer(0.0, std, 0))
initializer=param_initializer)
pre_bias = helper.create_tmp_variable(dtype)
helper.append_op(
......@@ -694,7 +730,8 @@ def conv2d(input,
'paddings': padding,
'groups': groups})
pre_act = helper.append_bias_op(pre_bias, dim_start=1, dim_end=2)
pre_act = helper.append_bias_op(
pre_bias, bias_initializer, dim_start=1, dim_end=2)
return helper.append_activation(pre_act)
......@@ -1311,7 +1348,7 @@ def array_to_lod_tensor(x, table, main_program=None):
return tmp
def fill_constant(shape, dtype, value, main_program=None):
def fill_constant(shape, dtype, value, main_program=None, startup_program=None):
"""
This function creates a tensor , with shape as mentioned in the input and
specified data_type and fills this up with a constant value that
......@@ -1332,6 +1369,31 @@ def fill_constant(shape, dtype, value, main_program=None):
return out
def fill_constant_batch_size_like(input,
shape,
dtype,
value,
input_dim_idx=0,
output_dim_idx=0,
main_program=None,
startup_program=None):
helper = LayerHelper("fill_constant_batch_size_like", **locals())
out = helper.create_tmp_variable(dtype=dtype)
helper.append_op(
type='fill_constant_batch_size_like',
inputs={'Input': input},
outputs={'Out': [out]},
attrs={
'shape': shape,
'data_type': out.data_type,
'value': float(value),
'input_dim_idx': input_dim_idx,
'output_dim_idx': output_dim_idx
})
out.stop_gradient = True
return out
def ones(shape, dtype, main_program=None):
"""
This function performs the same function as fill_constant() declared above
......@@ -1394,7 +1456,7 @@ def create_array(dtype, main_program=None):
dtype=dtype)
def less_than(x, y, cond=None, main_program=None):
def less_than(x, y, cond=None, main_program=None, **ignored):
helper = LayerHelper("less_than", **locals())
if cond is None:
cond = helper.create_tmp_variable(dtype='bool')
......@@ -1472,13 +1534,20 @@ class ConditionalBlockGuard(BlockGuard):
class ConditionalBlock(object):
def __init__(self, inputs, name=None, main_program=None):
def __init__(self,
inputs,
name=None,
main_program=None,
startup_program=None):
for each_input in inputs:
if not isinstance(each_input, Variable):
raise TypeError("Each input should be variable")
self.inputs = inputs
self.helper = LayerHelper(
'conditional_block', name=name, main_program=main_program)
'conditional_block',
name=name,
main_program=main_program,
startup_program=startup_program)
def block(self):
return ConditionalBlockGuard(self)
......@@ -1523,3 +1592,148 @@ class ConditionalBlock(object):
outputs={'Out': out_list,
'Scope': [step_scope]},
attrs={'block': inside_block})
class IfElseBlockGuard(object):
def __init__(self, is_true, ifelse):
if not isinstance(ifelse, IfElse):
raise TypeError("ifelse must be an instance of IfElse class")
if ifelse.status != IfElse.OUT_IF_ELSE_BLOCKS:
raise ValueError("You cannot invoke IfElse.block() inside a block")
self.is_true = is_true
self.ie = ifelse
if is_true:
self.cond_block = ifelse.conditional_true_block
else:
self.cond_block = ifelse.conditional_false_block
if not isinstance(self.cond_block, ConditionalBlock):
raise TypeError("Unexpected situation")
self.cond_block = self.cond_block.block()
def __enter__(self):
self.ie.status = IfElse.IN_IF_ELSE_TRUE_BLOCKS if self.is_true else IfElse.IN_IF_ELSE_FALSE_BLOCKS
self.cond_block.__enter__()
def __exit__(self, exc_type, exc_val, exc_tb):
if not self.cond_block.__exit__(exc_type, exc_val, exc_tb):
# re-raise inside exception
return False
if len(self.ie.output_table[1 if self.is_true else 0]) == 0:
raise ValueError("Must set output inside block")
self.ie.status = IfElse.OUT_IF_ELSE_BLOCKS
class IfElse(object):
OUT_IF_ELSE_BLOCKS = 0
IN_IF_ELSE_TRUE_BLOCKS = 1
IN_IF_ELSE_FALSE_BLOCKS = 2
def __init__(self, cond, name=None, main_program=None,
startup_program=None):
if not isinstance(cond, Variable):
raise TypeError("cond must be a Variable")
self.helper = LayerHelper(
'ifelse',
name=name,
main_program=main_program,
startup_program=startup_program)
self.cond = cond
self.input_table = {}
self.status = IfElse.OUT_IF_ELSE_BLOCKS
self.conditional_true_block = ConditionalBlock(inputs=[self.cond])
self.conditional_false_block = ConditionalBlock(inputs=[self.cond])
self.output_table = ([], []) # (true_outs, false_outs)
def input(self, x):
if self.status == IfElse.OUT_IF_ELSE_BLOCKS:
raise ValueError("input must in true/false blocks")
if id(x) not in self.input_table:
parent_block = self.parent_block()
out_true = parent_block.create_var(
name=unique_name('ifelse_input' + self.helper.name),
dtype=x.data_type)
out_false = parent_block.create_var(
name=unique_name('ifelse_input' + self.helper.name),
dtype=x.data_type)
parent_block.append_op(
type='split_lod_tensor',
inputs={
'X': x,
'Mask': self.cond,
},
outputs={'OutTrue': out_true,
'OutFalse': out_false},
attrs={'level': 0})
self.input_table[id(x)] = (out_true, out_false)
else:
out_true, out_false = self.input_table[id(x)]
if self.status == IfElse.IN_IF_ELSE_TRUE_BLOCKS:
return out_true
else:
return out_false
def parent_block(self):
current_block = self.helper.main_program.current_block()
return self.helper.main_program.block(current_block.parent_idx)
def true_block(self):
return IfElseBlockGuard(True, self)
def false_block(self):
return IfElseBlockGuard(False, self)
def output(self, *outs):
if self.status == self.OUT_IF_ELSE_BLOCKS:
raise ValueError("output can only be invoked in the sub-block")
out_table = self.output_table[1 if self.status ==
self.IN_IF_ELSE_TRUE_BLOCKS else 0]
parent_block = self.parent_block()
for each_out in outs:
if not isinstance(each_out, Variable):
raise TypeError("Each output should be a variable")
# create outside tensor
outside_out = parent_block.create_var(
name=unique_name("_".join([self.helper.name, 'output'])),
dtype=each_out.data_type)
out_table.append(outside_out)
# assign local var to outside
assign(
input=each_out,
output=outside_out,
main_program=self.helper.main_program,
startup_program=self.helper.startup_program)
def __call__(self):
if self.status != self.OUT_IF_ELSE_BLOCKS:
raise ValueError("IfElse::__call__ must be out of sub-block")
false_len, true_len = map(len, self.output_table)
if false_len == 0 and true_len == 0:
raise ValueError("Must invoke true_block/false_block before "
"__call__")
elif false_len != true_len and false_len != 0 and true_len != 0:
raise ValueError("The output side must be same")
elif false_len == 0 or true_len == 0:
return self.output_table[0 if false_len != 0 else 1]
# else none of false_len/true_len is zero
# merge together
rlist = []
for false_var, true_var in zip(*self.output_table):
rlist.append(
merge_lod_tensor(
in_true=true_var,
in_false=false_var,
mask=self.cond,
x=self.cond,
level=0,
main_program=self.helper.main_program,
startup_program=self.helper.startup_program))
return rlist
import numpy as np
import paddle.v2 as paddle
import paddle.v2.fluid.layers as layers
import paddle.v2.fluid.core as core
import paddle.v2.fluid.optimizer as optimizer
import paddle.v2.fluid.framework as framework
from paddle.v2.fluid.io import save_persistables, load_persistables
import paddle.v2.fluid.layers as layers
from paddle.v2.fluid.executor import Executor
from paddle.v2.fluid.io import save_persistables, load_persistables
from paddle.v2.fluid.optimizer import SGDOptimizer
import numpy as np
x = layers.data(
name='x',
shape=[13],
data_type='float32')
x = layers.data(name='x', shape=[13], data_type='float32')
y_predict = layers.fc(input=x,
size=1,
act=None)
y_predict = layers.fc(input=x, size=1, act=None)
y = layers.data(
name='y',
shape=[1],
data_type='float32')
y = layers.data(name='y', shape=[1], data_type='float32')
cost = layers.square_error_cost(
input=y_predict,
label=y)
cost = layers.square_error_cost(input=y_predict, label=y)
avg_cost = layers.mean(x=cost)
sgd_optimizer = optimizer.SGDOptimizer(learning_rate=0.001)
sgd_optimizer = SGDOptimizer(learning_rate=0.001)
opts = sgd_optimizer.minimize(avg_cost)
BATCH_SIZE = 20
......
import numpy as np
import paddle.v2 as paddle
import paddle.v2.fluid.core as core
import paddle.v2.fluid.framework as framework
import paddle.v2.fluid.layers as layers
import paddle.v2.fluid.nets as nets
import paddle.v2.fluid.optimizer as optimizer
import paddle.v2.fluid.evaluator as evaluator
from paddle.v2.fluid.executor import Executor
import paddle.v2.fluid.framework as framework
from paddle.v2.fluid.initializer import XavierInitializer
from paddle.v2.fluid.optimizer import AdamOptimizer
def resnet_cifar10(input, depth=32):
def conv_bn_layer(input,
ch_out,
filter_size,
stride,
padding,
act='relu'):
def conv_bn_layer(input, ch_out, filter_size, stride, padding, act='relu'):
tmp = layers.conv2d(
input=input,
filter_size=filter_size,
......@@ -24,9 +20,7 @@ def resnet_cifar10(input, depth=32):
padding=padding,
act=None,
bias_attr=False)
return layers.batch_norm(
input=tmp,
act=act)
return layers.batch_norm(input=tmp, act=act)
def shortcut(input, ch_in, ch_out, stride, program, init_program):
if ch_in != ch_out:
......@@ -35,28 +29,11 @@ def resnet_cifar10(input, depth=32):
else:
return input
def basicblock(input,
ch_in,
ch_out,
stride):
tmp = conv_bn_layer(
input,
ch_out,
3,
stride,
1)
tmp = conv_bn_layer(
tmp,
ch_out,
3,
1,
1,
act=None)
def basicblock(input, ch_in, ch_out, stride):
tmp = conv_bn_layer(input, ch_out, 3, stride, 1)
tmp = conv_bn_layer(tmp, ch_out, 3, 1, 1, act=None)
short = shortcut(input, ch_in, ch_out, stride)
return layers.elementwise_add(
x=tmp,
y=short,
act='relu')
return layers.elementwise_add(x=tmp, y=short, act='relu')
def layer_warp(block_func, input, ch_in, ch_out, count, stride):
tmp = block_func(input, ch_in, ch_out, stride)
......@@ -67,45 +44,17 @@ def resnet_cifar10(input, depth=32):
assert (depth - 2) % 6 == 0
n = (depth - 2) / 6
conv1 = conv_bn_layer(
input=input,
ch_out=16,
filter_size=3,
stride=1,
padding=1)
res1 = layer_warp(
basicblock,
conv1,
16,
16,
n,
1)
res2 = layer_warp(
basicblock,
res1,
16,
32,
n,
2)
res3 = layer_warp(
basicblock,
res2,
32,
64,
n,
2)
input=input, ch_out=16, filter_size=3, stride=1, padding=1)
res1 = layer_warp(basicblock, conv1, 16, 16, n, 1)
res2 = layer_warp(basicblock, res1, 16, 32, n, 2)
res3 = layer_warp(basicblock, res2, 32, 64, n, 2)
pool = layers.pool2d(
input=res3,
pool_size=8,
pool_type='avg',
pool_stride=1)
input=res3, pool_size=8, pool_type='avg', pool_stride=1)
return pool
def vgg16_bn_drop(input):
def conv_block(input,
num_filter,
groups,
dropouts):
def conv_block(input, num_filter, groups, dropouts):
return nets.img_conv_group(
input=input,
pool_size=2,
......@@ -123,22 +72,14 @@ def vgg16_bn_drop(input):
conv4 = conv_block(conv3, 512, 3, [0.4, 0.4, 0])
conv5 = conv_block(conv4, 512, 3, [0.4, 0.4, 0])
drop = layers.dropout(
x=conv5,
dropout_prob=0.5)
drop = layers.dropout(x=conv5, dropout_prob=0.5)
fc1 = layers.fc(input=drop,
size=512,
act=None,
param_attr={"initializer": XavierInitializer()})
reshape1 = layers.reshape(
x=fc1,
shape=list(fc1.shape + (1, 1)))
bn = layers.batch_norm(
input=reshape1,
act='relu')
drop2 = layers.dropout(
x=bn,
dropout_prob=0.5)
reshape1 = layers.reshape(x=fc1, shape=list(fc1.shape + (1, 1)))
bn = layers.batch_norm(input=reshape1, act='relu')
drop2 = layers.dropout(x=bn, dropout_prob=0.5)
fc2 = layers.fc(input=drop2,
size=512,
act=None,
......@@ -163,12 +104,13 @@ net = vgg16_bn_drop(images)
predict = layers.fc(input=net, size=classdim, act='softmax')
cost = layers.cross_entropy(input=predict, label=label)
avg_cost = layers.mean(x=cost)
accuracy = layers.accuracy(input=predict, label=label)
# optimizer = optimizer.SGDOptimizer(learning_rate=0.001)
optimizer = optimizer.AdamOptimizer(learning_rate=0.001)
# optimizer = SGDOptimizer(learning_rate=0.001)
optimizer = AdamOptimizer(learning_rate=0.001)
opts = optimizer.minimize(avg_cost)
accuracy, acc_out = evaluator.accuracy(input=predict, label=label)
BATCH_SIZE = 128
PASS_NUM = 1
......@@ -184,6 +126,7 @@ exe.run(framework.default_startup_program())
for pass_id in range(PASS_NUM):
batch_id = 0
accuracy.reset(exe)
for data in train_reader():
img_data = np.array(map(lambda x: x[0].reshape(data_shape),
data)).astype("float32")
......@@ -201,12 +144,14 @@ for pass_id in range(PASS_NUM):
outs = exe.run(framework.default_main_program(),
feed={"pixel": tensor_img,
"label": tensor_y},
fetch_list=[avg_cost, accuracy])
fetch_list=[avg_cost, acc_out])
loss = np.array(outs[0])
acc = np.array(outs[1])
pass_acc = accuracy.eval(exe)
print("pass_id:" + str(pass_id) + " batch_id:" + str(batch_id) +
" loss:" + str(loss) + " acc:" + str(acc))
" loss:" + str(loss) + " acc:" + str(acc) + " pass_acc:" + str(
pass_acc))
batch_id = batch_id + 1
if batch_id > 1:
......
import numpy as np
import paddle.v2 as paddle
import paddle.v2.fluid.layers as layers
import paddle.v2.fluid.nets as nets
import paddle.v2.fluid.core as core
import paddle.v2.fluid.optimizer as optimizer
import paddle.v2.fluid.evaluator as evaluator
import paddle.v2.fluid.framework as framework
import paddle.v2.fluid.layers as layers
import paddle.v2.fluid.nets as nets
from paddle.v2.fluid.executor import Executor
from paddle.v2.fluid.optimizer import AdamOptimizer
import numpy as np
images = layers.data(
name='pixel',
shape=[1, 28, 28],
data_type='float32')
label = layers.data(
name='label',
shape=[1],
data_type='int64')
images = layers.data(name='pixel', shape=[1, 28, 28], data_type='float32')
label = layers.data(name='label', shape=[1], data_type='int64')
conv_pool_1 = nets.simple_img_conv_pool(
input=images,
filter_size=5,
......@@ -32,17 +25,13 @@ conv_pool_2 = nets.simple_img_conv_pool(
pool_stride=2,
act="relu")
predict = layers.fc(input=conv_pool_2,
size=10,
act="softmax")
predict = layers.fc(input=conv_pool_2, size=10, act="softmax")
cost = layers.cross_entropy(input=predict, label=label)
avg_cost = layers.mean(x=cost)
optimizer = optimizer.AdamOptimizer(learning_rate=0.01, beta1=0.9, beta2=0.999)
optimizer = AdamOptimizer(learning_rate=0.01, beta1=0.9, beta2=0.999)
opts = optimizer.minimize(avg_cost)
accuracy, acc_out = evaluator.accuracy(
input=predict,
label=label)
accuracy, acc_out = evaluator.accuracy(input=predict, label=label)
BATCH_SIZE = 50
PASS_NUM = 3
......@@ -57,7 +46,6 @@ exe = Executor(place)
exe.run(framework.default_startup_program())
for pass_id in range(PASS_NUM):
count = 0
accuracy.reset(exe)
for data in train_reader():
img_data = np.array(map(lambda x: x[0].reshape([1, 28, 28]),
......@@ -77,13 +65,14 @@ for pass_id in range(PASS_NUM):
loss = np.array(outs[0])
acc = np.array(outs[1])
pass_acc = accuracy.eval(exe)
print "pass id : ", pass_id, pass_acc
print("pass_id=" + str(pass_id) + " acc=" + str(acc) + " pass_acc=" +
str(pass_acc))
# print loss, acc
if loss < 10.0 and acc > 0.9:
if loss < 10.0 and pass_acc > 0.9:
# if avg cost less than 10.0 and accuracy is larger than 0.9, we think our code is good.
exit(0)
pass_acc = accuracy.eval(exe)
print "pass id : ", pass_id, pass_acc
print("pass_id=" + str(pass_id) + " pass_acc=" + str(pass_acc))
exit(1)
import numpy as np
import paddle.v2 as paddle
import paddle.v2.fluid.layers as layers
import paddle.v2.fluid.core as core
import paddle.v2.fluid.optimizer as optimizer
import paddle.v2.fluid.framework as framework
import paddle.v2.fluid.layers as layers
import paddle.v2.fluid.evaluator as evaluator
from paddle.v2.fluid.executor import Executor
from paddle.v2.fluid.regularizer import L2DecayRegularizer
from paddle.v2.fluid.initializer import UniformInitializer
import numpy as np
from paddle.v2.fluid.optimizer import MomentumOptimizer
from paddle.v2.fluid.regularizer import L2DecayRegularizer
BATCH_SIZE = 128
image = layers.data(
name='x',
shape=[784],
data_type='float32')
image = layers.data(name='x', shape=[784], data_type='float32')
param_attr = {
'name': None,
......@@ -22,34 +19,24 @@ param_attr = {
'regularization': L2DecayRegularizer(0.0005 * BATCH_SIZE)
}
hidden1 = layers.fc(input=image,
size=128,
act='relu',
param_attr=param_attr)
hidden2 = layers.fc(input=hidden1,
size=64,
act='relu',
param_attr=param_attr)
hidden1 = layers.fc(input=image, size=128, act='relu', param_attr=param_attr)
hidden2 = layers.fc(input=hidden1, size=64, act='relu', param_attr=param_attr)
predict = layers.fc(input=hidden2,
size=10,
act='softmax',
param_attr=param_attr)
label = layers.data(
name='y',
shape=[1],
data_type='int64')
label = layers.data(name='y', shape=[1], data_type='int64')
cost = layers.cross_entropy(input=predict, label=label)
avg_cost = layers.mean(x=cost)
accuracy = layers.accuracy(
input=predict,
label=label)
optimizer = optimizer.MomentumOptimizer(learning_rate=0.001, momentum=0.9)
optimizer = MomentumOptimizer(learning_rate=0.001, momentum=0.9)
opts = optimizer.minimize(avg_cost)
accuracy, acc_out = evaluator.accuracy(input=predict, label=label)
train_reader = paddle.batch(
paddle.reader.shuffle(
paddle.dataset.mnist.train(), buf_size=8192),
......@@ -62,6 +49,7 @@ exe.run(framework.default_startup_program())
PASS_NUM = 100
for pass_id in range(PASS_NUM):
accuracy.reset(exe)
for data in train_reader():
x_data = np.array(map(lambda x: x[0], data)).astype("float32")
y_data = np.array(map(lambda x: x[1], data)).astype("int64")
......@@ -76,9 +64,13 @@ for pass_id in range(PASS_NUM):
outs = exe.run(framework.default_main_program(),
feed={'x': tensor_x,
'y': tensor_y},
fetch_list=[avg_cost, accuracy])
fetch_list=[avg_cost, acc_out])
out = np.array(outs[0])
acc = np.array(outs[1])
if out[0] < 5.0:
exit(0) # if avg cost less than 5.0, we think our code is good.
pass_acc = accuracy.eval(exe)
if pass_acc > 0.7:
exit(0)
# print("pass_id=" + str(pass_id) + " auc=" +
# str(acc) + " pass_acc=" + str(pass_acc))
exit(1)
import numpy as np
import paddle.v2 as paddle
import paddle.v2.fluid.layers as layers
import paddle.v2.fluid.nets as nets
import paddle.v2.fluid.core as core
import paddle.v2.fluid.optimizer as optimizer
import paddle.v2.fluid.framework as framework
import paddle.v2.fluid.layers as layers
import paddle.v2.fluid.nets as nets
from paddle.v2.fluid.executor import Executor
import numpy as np
from paddle.v2.fluid.optimizer import SGDOptimizer
IS_SPARSE = True
USE_GPU = False
......@@ -19,10 +18,7 @@ def get_usr_combined_features():
USR_DICT_SIZE = paddle.dataset.movielens.max_user_id() + 1
uid = layers.data(
name='user_id',
shape=[1],
data_type='int64')
uid = layers.data(name='user_id', shape=[1], data_type='int64')
usr_emb = layers.embedding(
input=uid,
......@@ -31,15 +27,11 @@ def get_usr_combined_features():
param_attr={'name': 'user_table'},
is_sparse=IS_SPARSE)
usr_fc = layers.fc(input=usr_emb,
size=32)
usr_fc = layers.fc(input=usr_emb, size=32)
USR_GENDER_DICT_SIZE = 2
usr_gender_id = layers.data(
name='gender_id',
shape=[1],
data_type='int64')
usr_gender_id = layers.data(name='gender_id', shape=[1], data_type='int64')
usr_gender_emb = layers.embedding(
input=usr_gender_id,
......@@ -47,14 +39,10 @@ def get_usr_combined_features():
param_attr={'name': 'gender_table'},
is_sparse=IS_SPARSE)
usr_gender_fc = layers.fc(input=usr_gender_emb,
size=16)
usr_gender_fc = layers.fc(input=usr_gender_emb, size=16)
USR_AGE_DICT_SIZE = len(paddle.dataset.movielens.age_table)
usr_age_id = layers.data(
name='age_id',
shape=[1],
data_type="int64")
usr_age_id = layers.data(name='age_id', shape=[1], data_type="int64")
usr_age_emb = layers.embedding(
input=usr_age_id,
......@@ -62,14 +50,10 @@ def get_usr_combined_features():
is_sparse=IS_SPARSE,
param_attr={'name': 'age_table'})
usr_age_fc = layers.fc(input=usr_age_emb,
size=16)
usr_age_fc = layers.fc(input=usr_age_emb, size=16)
USR_JOB_DICT_SIZE = paddle.dataset.movielens.max_job_id() + 1
usr_job_id = layers.data(
name='job_id',
shape=[1],
data_type="int64")
usr_job_id = layers.data(name='job_id', shape=[1], data_type="int64")
usr_job_emb = layers.embedding(
input=usr_job_id,
......@@ -77,16 +61,12 @@ def get_usr_combined_features():
param_attr={'name': 'job_table'},
is_sparse=IS_SPARSE)
usr_job_fc = layers.fc(input=usr_job_emb,
size=16)
usr_job_fc = layers.fc(input=usr_job_emb, size=16)
concat_embed = layers.concat(
input=[usr_fc, usr_gender_fc, usr_age_fc, usr_job_fc],
axis=1)
input=[usr_fc, usr_gender_fc, usr_age_fc, usr_job_fc], axis=1)
usr_combined_features = layers.fc(input=concat_embed,
size=200,
act="tanh")
usr_combined_features = layers.fc(input=concat_embed, size=200, act="tanh")
return usr_combined_features
......@@ -95,10 +75,7 @@ def get_mov_combined_features():
MOV_DICT_SIZE = paddle.dataset.movielens.max_movie_id() + 1
mov_id = layers.data(
name='movie_id',
shape=[1],
data_type='int64')
mov_id = layers.data(name='movie_id', shape=[1], data_type='int64')
mov_emb = layers.embedding(
input=mov_id,
......@@ -107,36 +84,24 @@ def get_mov_combined_features():
param_attr={'name': 'movie_table'},
is_sparse=IS_SPARSE)
mov_fc = layers.fc(input=mov_emb,
size=32)
mov_fc = layers.fc(input=mov_emb, size=32)
CATEGORY_DICT_SIZE = len(paddle.dataset.movielens.movie_categories())
category_id = layers.data(
name='category_id',
shape=[1],
data_type='int64')
category_id = layers.data(name='category_id', shape=[1], data_type='int64')
mov_categories_emb = layers.embedding(
input=category_id,
size=[CATEGORY_DICT_SIZE, 32],
is_sparse=IS_SPARSE)
input=category_id, size=[CATEGORY_DICT_SIZE, 32], is_sparse=IS_SPARSE)
mov_categories_hidden = layers.sequence_pool(
input=mov_categories_emb,
pool_type="sum")
input=mov_categories_emb, pool_type="sum")
MOV_TITLE_DICT_SIZE = len(paddle.dataset.movielens.get_movie_title_dict())
mov_title_id = layers.data(
name='movie_title',
shape=[1],
data_type='int64')
mov_title_id = layers.data(name='movie_title', shape=[1], data_type='int64')
mov_title_emb = layers.embedding(
input=mov_title_id,
size=[MOV_TITLE_DICT_SIZE, 32],
is_sparse=IS_SPARSE)
input=mov_title_id, size=[MOV_TITLE_DICT_SIZE, 32], is_sparse=IS_SPARSE)
mov_title_conv = nets.sequence_conv_pool(
input=mov_title_emb,
......@@ -146,13 +111,10 @@ def get_mov_combined_features():
pool_type="sum")
concat_embed = layers.concat(
input=[mov_fc, mov_categories_hidden, mov_title_conv],
axis=1)
input=[mov_fc, mov_categories_hidden, mov_title_conv], axis=1)
# FIXME(dzh) : need tanh operator
mov_combined_features = layers.fc(input=concat_embed,
size=200,
act="tanh")
mov_combined_features = layers.fc(input=concat_embed, size=200, act="tanh")
return mov_combined_features
......@@ -162,18 +124,11 @@ def model():
mov_combined_features = get_mov_combined_features()
# need cos sim
inference = layers.cos_sim(
X=usr_combined_features,
Y=mov_combined_features)
inference = layers.cos_sim(X=usr_combined_features, Y=mov_combined_features)
label = layers.data(
name='score',
shape=[1],
data_type='float32')
label = layers.data(name='score', shape=[1], data_type='float32')
square_cost = layers.square_error_cost(
input=inference,
label=label)
square_cost = layers.square_error_cost(input=inference, label=label)
avg_cost = layers.mean(x=square_cost)
......@@ -182,7 +137,7 @@ def model():
def main():
cost = model()
sgd_optimizer = optimizer.SGDOptimizer(learning_rate=0.2)
sgd_optimizer = SGDOptimizer(learning_rate=0.2)
opts = sgd_optimizer.minimize(cost)
if USE_GPU:
......
import numpy as np
import paddle.v2 as paddle
import paddle.v2.fluid.layers as layers
import paddle.v2.fluid.nets as nets
import paddle.v2.fluid.core as core
import paddle.v2.fluid.optimizer as optimizer
import paddle.v2.fluid.evaluator as evaluator
import paddle.v2.fluid.framework as framework
import paddle.v2.fluid.layers as layers
import paddle.v2.fluid.nets as nets
from paddle.v2.fluid.executor import Executor
import numpy as np
from paddle.v2.fluid.optimizer import AdamOptimizer
def convolution_net(input_dim, class_dim=2, emb_dim=32, hid_dim=32):
......@@ -31,10 +31,10 @@ def convolution_net(input_dim, class_dim=2, emb_dim=32, hid_dim=32):
act="softmax")
cost = layers.cross_entropy(input=prediction, label=label)
avg_cost = layers.mean(x=cost)
adam_optimizer = optimizer.AdamOptimizer(learning_rate=0.002)
adam_optimizer = AdamOptimizer(learning_rate=0.002)
opts = adam_optimizer.minimize(avg_cost)
acc = layers.accuracy(input=prediction, label=label)
return avg_cost, acc
accuracy, acc_out = evaluator.accuracy(input=prediction, label=label)
return avg_cost, accuracy, acc_out
def to_lodtensor(data, place):
......@@ -60,7 +60,8 @@ def main():
dict_dim = len(word_dict)
class_dim = 2
cost, acc = convolution_net(input_dim=dict_dim, class_dim=class_dim)
cost, accuracy, acc_out = convolution_net(
input_dim=dict_dim, class_dim=class_dim)
train_data = paddle.batch(
paddle.reader.shuffle(
......@@ -72,6 +73,7 @@ def main():
exe.run(framework.default_startup_program())
for pass_id in xrange(PASS_NUM):
accuracy.reset(exe)
for data in train_data():
tensor_words = to_lodtensor(map(lambda x: x[0], data), place)
......@@ -84,12 +86,13 @@ def main():
outs = exe.run(framework.default_main_program(),
feed={"words": tensor_words,
"label": tensor_label},
fetch_list=[cost, acc])
fetch_list=[cost, acc_out])
cost_val = np.array(outs[0])
acc_val = np.array(outs[1])
print("cost=" + str(cost_val) + " acc=" + str(acc_val))
if cost_val < 1.0 and acc_val > 0.7:
pass_acc = accuracy.eval(exe)
print("cost=" + str(cost_val) + " acc=" + str(acc_val) +
" pass_acc=" + str(pass_acc))
if cost_val < 1.0 and pass_acc > 0.8:
exit(0)
exit(1)
......
import numpy as np
import paddle.v2 as paddle
import paddle.v2.fluid.layers as layers
import paddle.v2.fluid.nets as nets
import paddle.v2.fluid.core as core
import paddle.v2.fluid.optimizer as optimizer
import paddle.v2.fluid.evaluator as evaluator
import paddle.v2.fluid.framework as framework
import paddle.v2.fluid.layers as layers
from paddle.v2.fluid.executor import Executor
import numpy as np
from paddle.v2.fluid.optimizer import AdamOptimizer
def stacked_lstm_net(input_dim,
......@@ -41,10 +40,10 @@ def stacked_lstm_net(input_dim,
act='softmax')
cost = layers.cross_entropy(input=prediction, label=label)
avg_cost = layers.mean(x=cost)
adam_optimizer = optimizer.AdamOptimizer(learning_rate=0.002)
adam_optimizer = AdamOptimizer(learning_rate=0.002)
opts = adam_optimizer.minimize(avg_cost)
acc = layers.accuracy(input=prediction, label=label)
return avg_cost, acc
accuracy, acc_out = evaluator.accuracy(input=prediction, label=label)
return avg_cost, accuracy, acc_out
def to_lodtensor(data, place):
......@@ -71,7 +70,8 @@ def main():
dict_dim = len(word_dict)
class_dim = 2
cost, acc = stacked_lstm_net(input_dim=dict_dim, class_dim=class_dim)
cost, accuracy, acc_out = stacked_lstm_net(
input_dim=dict_dim, class_dim=class_dim)
train_data = paddle.batch(
paddle.reader.shuffle(
......@@ -83,6 +83,7 @@ def main():
exe.run(framework.default_startup_program())
for pass_id in xrange(PASS_NUM):
accuracy.reset(exe)
for data in train_data():
tensor_words = to_lodtensor(map(lambda x: x[0], data), place)
......@@ -95,12 +96,13 @@ def main():
outs = exe.run(framework.default_main_program(),
feed={"words": tensor_words,
"label": tensor_label},
fetch_list=[cost, acc])
fetch_list=[cost, acc_out])
cost_val = np.array(outs[0])
acc_val = np.array(outs[1])
print("cost=" + str(cost_val) + " acc=" + str(acc_val))
if cost_val < 1.0 and acc_val > 0.7:
pass_acc = accuracy.eval(exe)
print("cost=" + str(cost_val) + " acc=" + str(acc_val) +
" pass_acc=" + str(pass_acc))
if cost_val < 1.0 and acc_val > 0.8:
exit(0)
exit(1)
......
import numpy as np
import paddle.v2 as paddle
import paddle.v2.fluid.layers as layers
import paddle.v2.fluid.core as core
import paddle.v2.fluid.optimizer as optimizer
import paddle.v2.fluid.framework as framework
import paddle.v2.fluid.layers as layers
from paddle.v2.fluid.executor import Executor
import numpy as np
from paddle.v2.fluid.optimizer import AdamOptimizer
def lstm_net(dict_dim, class_dim=2, emb_dim=32, seq_len=80, batch_size=50):
......@@ -33,7 +32,7 @@ def lstm_net(dict_dim, class_dim=2, emb_dim=32, seq_len=80, batch_size=50):
cost = layers.cross_entropy(input=prediction, label=label)
avg_cost = layers.mean(x=cost)
adam_optimizer = optimizer.AdamOptimizer(learning_rate=0.002)
adam_optimizer = AdamOptimizer(learning_rate=0.002)
opts = adam_optimizer.minimize(avg_cost)
acc = layers.accuracy(input=prediction, label=label)
......@@ -55,17 +54,17 @@ def to_lodtensor(data, place):
return res
def chop_data(data, chop_len=80, batch_len=50):
def chop_data(data, chop_len=80, batch_size=50):
data = [(x[0][:chop_len], x[1]) for x in data if len(x[0]) >= chop_len]
return data[:batch_len]
return data[:batch_size]
def prepare_feed_data(data, place):
tensor_words = to_lodtensor(map(lambda x: x[0], data), place)
label = np.array(map(lambda x: x[1], data)).astype("int64")
label = label.reshape([50, 1])
label = label.reshape([len(label), 1])
tensor_label = core.LoDTensor()
tensor_label.set(label, place)
......@@ -73,33 +72,41 @@ def prepare_feed_data(data, place):
def main():
word_dict = paddle.dataset.imdb.word_dict()
cost, acc = lstm_net(dict_dim=len(word_dict), class_dim=2)
BATCH_SIZE = 100
PASS_NUM = 5
batch_size = 100
train_data = paddle.batch(
paddle.reader.buffered(
paddle.dataset.imdb.train(word_dict), size=batch_size * 10),
batch_size=batch_size)
word_dict = paddle.dataset.imdb.word_dict()
print "load word dict successfully"
dict_dim = len(word_dict)
class_dim = 2
data = chop_data(next(train_data()))
cost, acc = lstm_net(dict_dim=dict_dim, class_dim=class_dim)
train_data = paddle.batch(
paddle.reader.shuffle(
paddle.dataset.imdb.train(word_dict), buf_size=BATCH_SIZE * 10),
batch_size=BATCH_SIZE)
place = core.CPUPlace()
tensor_words, tensor_label = prepare_feed_data(data, place)
exe = Executor(place)
exe.run(framework.default_startup_program())
while True:
outs = exe.run(framework.default_main_program(),
feed={"words": tensor_words,
"label": tensor_label},
fetch_list=[cost, acc])
cost_val = np.array(outs[0])
acc_val = np.array(outs[1])
print("cost=" + str(cost_val) + " acc=" + str(acc_val))
if acc_val > 0.9:
break
for pass_id in xrange(PASS_NUM):
for data in train_data():
chopped_data = chop_data(data)
tensor_words, tensor_label = prepare_feed_data(chopped_data, place)
outs = exe.run(framework.default_main_program(),
feed={"words": tensor_words,
"label": tensor_label},
fetch_list=[cost, acc])
cost_val = np.array(outs[0])
acc_val = np.array(outs[1])
print("cost=" + str(cost_val) + " acc=" + str(acc_val))
if acc_val > 0.7:
exit(0)
exit(1)
if __name__ == '__main__':
......
import numpy as np
import paddle.v2 as paddle
import paddle.v2.fluid.layers as layers
import paddle.v2.fluid.core as core
import paddle.v2.fluid.optimizer as optimizer
import paddle.v2.fluid.framework as framework
import paddle.v2.fluid.layers as layers
from paddle.v2.fluid.executor import Executor
import numpy as np
from paddle.v2.fluid.optimizer import SGDOptimizer
PASS_NUM = 100
EMBED_SIZE = 32
......@@ -17,26 +16,11 @@ IS_SPARSE = True
word_dict = paddle.dataset.imikolov.build_dict()
dict_size = len(word_dict)
first_word = layers.data(
name='firstw',
shape=[1],
data_type='int64')
second_word = layers.data(
name='secondw',
shape=[1],
data_type='int64')
third_word = layers.data(
name='thirdw',
shape=[1],
data_type='int64')
forth_word = layers.data(
name='forthw',
shape=[1],
data_type='int64')
next_word = layers.data(
name='nextw',
shape=[1],
data_type='int64')
first_word = layers.data(name='firstw', shape=[1], data_type='int64')
second_word = layers.data(name='secondw', shape=[1], data_type='int64')
third_word = layers.data(name='thirdw', shape=[1], data_type='int64')
forth_word = layers.data(name='forthw', shape=[1], data_type='int64')
next_word = layers.data(name='nextw', shape=[1], data_type='int64')
embed_first = layers.embedding(
input=first_word,
......@@ -64,19 +48,12 @@ embed_forth = layers.embedding(
param_attr={'name': 'shared_w'})
concat_embed = layers.concat(
input=[embed_first, embed_second, embed_third, embed_forth],
axis=1)
hidden1 = layers.fc(input=concat_embed,
size=HIDDEN_SIZE,
act='sigmoid')
predict_word = layers.fc(input=hidden1,
size=dict_size,
act='softmax')
cost = layers.cross_entropy(
input=predict_word,
label=next_word)
input=[embed_first, embed_second, embed_third, embed_forth], axis=1)
hidden1 = layers.fc(input=concat_embed, size=HIDDEN_SIZE, act='sigmoid')
predict_word = layers.fc(input=hidden1, size=dict_size, act='softmax')
cost = layers.cross_entropy(input=predict_word, label=next_word)
avg_cost = layers.mean(x=cost)
sgd_optimizer = optimizer.SGDOptimizer(learning_rate=0.001)
sgd_optimizer = SGDOptimizer(learning_rate=0.001)
opts = sgd_optimizer.minimize(avg_cost)
train_reader = paddle.batch(
......
......@@ -110,13 +110,30 @@ class TestConv2dOp(OpTest):
self.op_type = "conv2d"
class TestWithPad(TestConv2dOp):
def init_test_case(self):
self.pad = [1, 1]
self.stride = [1, 1]
self.input_size = [2, 3, 5, 5] # NCHW
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] / self.groups
self.filter_size = [6, f_c, 3, 3]
class TestWithStride(TestConv2dOp):
def init_test_case(self):
self.pad = [1, 1]
self.stride = [2, 2]
self.input_size = [2, 3, 6, 6] # NCHW
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] / self.groups
self.filter_size = [6, f_c, 3, 3]
class TestWithGroup(TestConv2dOp):
def init_group(self):
self.groups = 3
def init_op_type(self):
self.op_type = "conv2d"
class TestWith1x1(TestConv2dOp):
def init_test_case(self):
......@@ -127,15 +144,9 @@ class TestWith1x1(TestConv2dOp):
f_c = self.input_size[1] / self.groups
self.filter_size = [6, f_c, 1, 1]
def init_dilation(self):
self.dilations = [1, 1]
def init_group(self):
self.groups = 3
def init_op_type(self):
self.op_type = "conv2d"
class TestWithDilation(TestConv2dOp):
def init_test_case(self):
......@@ -152,14 +163,19 @@ class TestWithDilation(TestConv2dOp):
def init_group(self):
self.groups = 3
#----------------Conv2dCudnn----------------
class TestCudnn(TestConv2dOp):
def init_op_type(self):
self.op_type = "conv2d"
self.op_type = "conv_cudnn"
#----------------Conv2dCudnn----------------
class TestCudnnWithPad(TestWithPad):
def init_op_type(self):
self.op_type = "conv_cudnn"
class TestCudnn(TestConv2dOp):
class TestCudnnWithStride(TestWithStride):
def init_op_type(self):
self.op_type = "conv_cudnn"
......
......@@ -4,9 +4,7 @@ from op_test import OpTest
def conv2dtranspose_forward_naive(input_, filter_, conv2dtranspose_param):
# [2, 3, 5, 5]
in_n, in_c, in_h, in_w = input_.shape
# [3, 6, 3, 3]
f_c, out_c, f_h, f_w = filter_.shape
assert in_c == f_c
......@@ -29,6 +27,7 @@ def conv2dtranspose_forward_naive(input_, filter_, conv2dtranspose_param):
j1, j2 = j * stride[0], j * stride[0] + f_w
out[n, k, i1:i2, j1:j2] += tmp_out
out = out[:, :, pad[0]:out_h - pad[0], pad[1]:out_w - pad[1]]
return out
......@@ -36,8 +35,6 @@ class TestConv2dTransposeOp(OpTest):
def setUp(self):
# init as conv transpose
self.init_op_type()
# [2, 3, 5, 5] -> kernel [3, 6, 3, 3] -> output [2, 6, 7, 7]
self.init_test_case()
conv2dtranspose_param = {'stride': self.stride, 'pad': self.pad}
......@@ -55,7 +52,6 @@ class TestConv2dTransposeOp(OpTest):
self.outputs = {'Output': output}
def test_check_output(self):
print 'check output here for', self.op_type
self.check_output()
def test_check_grad_no_input(self):
......@@ -88,6 +84,26 @@ class TestConv2dTransposeOp(OpTest):
self.op_type = "conv2d_transpose"
class TestWithPad(TestConv2dTransposeOp):
def init_test_case(self):
self.pad = [1, 1]
self.stride = [1, 1]
self.dilations = [1, 1]
self.input_size = [2, 3, 5, 5] # NCHW
f_c = self.input_size[1]
self.filter_size = [f_c, 6, 3, 3]
class TestWithStride(TestConv2dTransposeOp):
def init_test_case(self):
self.pad = [1, 1]
self.stride = [2, 2]
self.dilations = [1, 1]
self.input_size = [2, 3, 5, 5] # NCHW
f_c = self.input_size[1]
self.filter_size = [f_c, 6, 3, 3]
# ------------ test_cudnn ------------
class TestCudnn(TestConv2dTransposeOp):
def init_op_type(self):
......
......@@ -4,9 +4,7 @@ from op_test import OpTest
def conv3dtranspose_forward_naive(input_, filter_, conv3dtranspose_param):
# [2, 3, 5, 5, 5]
in_n, in_c, in_d, in_h, in_w = input_.shape
# [3, 6, 3, 3, 3]
f_c, out_c, f_d, f_h, f_w = filter_.shape
assert in_c == f_c
......@@ -14,7 +12,6 @@ def conv3dtranspose_forward_naive(input_, filter_, conv3dtranspose_param):
out_d = (in_d - 1) * stride[0] + f_d
out_h = (in_h - 1) * stride[1] + f_h
out_w = (in_w - 1) * stride[2] + f_w
out = np.zeros((in_n, out_c, out_d, out_h, out_w))
for n in range(in_n):
......@@ -33,6 +30,8 @@ def conv3dtranspose_forward_naive(input_, filter_, conv3dtranspose_param):
j1, j2 = j * stride[2], j * stride[2] + f_w
out[n, k, d1:d2, i1:i2, j1:j2] += tmp_out
out = out[:, :, pad[0]:out_d - pad[0], pad[1]:out_h - pad[1], pad[2]:out_w -
pad[2]]
return out
......@@ -40,8 +39,6 @@ class TestConv3dTransposeOp(OpTest):
def setUp(self):
# init as conv transpose
self.init_op_type()
# [2, 3, 5, 5, 5] -> kernel [3, 6, 3, 3, 3] -> output [2, 6, 7, 7, 7]
self.init_test_case()
conv3dtranspose_param = {'stride': self.stride, 'pad': self.pad}
......@@ -49,7 +46,6 @@ class TestConv3dTransposeOp(OpTest):
filter_ = np.random.random(self.filter_size).astype("float32")
output = conv3dtranspose_forward_naive(
input_, filter_, conv3dtranspose_param).astype("float32")
# print 'deconv output py', output, output.shape
self.inputs = {'Input': input_, 'Filter': filter_}
self.attrs = {
......@@ -60,7 +56,6 @@ class TestConv3dTransposeOp(OpTest):
self.outputs = {'Output': output}
def test_check_output(self):
print 'check output here'
self.check_output()
def test_check_grad(self):
......@@ -85,7 +80,7 @@ class TestConv3dTransposeOp(OpTest):
self.pad = [0, 0, 0]
self.stride = [1, 1, 1]
self.dilations = [1, 1, 1]
self.input_size = [2, 3, 5, 5, 5] # NCHW
self.input_size = [2, 3, 5, 5, 5] # NCDHW
f_c = self.input_size[1]
self.filter_size = [f_c, 6, 3, 3, 3]
......@@ -93,5 +88,31 @@ class TestConv3dTransposeOp(OpTest):
self.op_type = "conv3d_transpose"
class TestWithPad(TestConv3dTransposeOp):
def init_test_case(self):
self.pad = [1, 1, 1]
self.stride = [1, 1, 1]
self.dilations = [1, 1, 1]
self.input_size = [2, 3, 5, 5, 5] # NCDHW
f_c = self.input_size[1]
self.filter_size = [f_c, 6, 3, 3, 3]
class TestWithStride(TestConv3dTransposeOp):
def init_test_case(self):
self.pad = [1, 1, 1]
self.stride = [2, 2, 2]
self.dilations = [1, 1, 1]
self.input_size = [2, 3, 5, 5, 5] # NCDHW
f_c = self.input_size[1]
self.filter_size = [f_c, 6, 3, 3, 3]
# ------------ test_cudnn ------------
class TestCudnn(TestConv3dTransposeOp):
def init_op_type(self):
self.op_type = "conv3d_transpose_cudnn"
if __name__ == '__main__':
unittest.main()
......@@ -6,7 +6,8 @@ from test_lstm_op import identity, sigmoid, tanh, relu
class TestGRUOp(OpTest):
batch_size = 9
lod = [[0, 2, 6, 9]]
batch_size = lod[0][-1]
frame_size = 5
activate = {
'identity': identity,
......@@ -35,7 +36,7 @@ class TestGRUOp(OpTest):
seq_starts[sorted_seqs[i]] + batch_idx)
idx_in_seq.append(idx)
idx_in_seq_list.append(idx_in_seq)
return idx_in_seq_list
return idx_in_seq_list, sorted_seqs
def gru_step(self, x, h_p, w, b):
batch_size = x.shape[0]
......@@ -66,8 +67,8 @@ class TestGRUOp(OpTest):
batch_hidden = self.outputs['BatchHidden']
hidden = self.outputs['Hidden']
idx_in_seq_list = self.idx_in_seq_list
h_p = self.inputs['H0'] if self.inputs.has_key('H0') else np.zeros(
(len(idx_in_seq_list[0]), self.frame_size))
h_p = self.inputs['H0'][self.sorted_seqs] if self.inputs.has_key(
'H0') else np.zeros((len(idx_in_seq_list[0]), self.frame_size))
num_batch = len(idx_in_seq_list)
end_idx = 0
for batch_idx in range(num_batch):
......@@ -84,8 +85,9 @@ class TestGRUOp(OpTest):
return batch_gate, batch_reset_hidden_prev, hidden
def set_data(self):
lod = [[0, 2, 6, self.batch_size]]
self.idx_in_seq_list = self.seq_to_batch(lod, self.is_reverse)
lod = self.lod
self.idx_in_seq_list, self.sorted_seqs = self.seq_to_batch(
lod, self.is_reverse)
batch_size = self.batch_size
frame_size = self.frame_size
input = np.random.rand(batch_size, frame_size * 3).astype('float64')
......@@ -146,7 +148,7 @@ class TestGRUOpReverse(TestGRUOp):
def set_confs(self):
self.is_reverse = True
self.attrs = {
'activation': 'identity',
'activation': 'tanh',
'gate_activation': 'sigmoid',
'is_reverse': self.is_reverse
}
......
import unittest
import numpy as np
from paddle.v2.framework.op import Operator
import paddle.v2.framework.core as core
from paddle.v2.fluid.op import Operator
import paddle.v2.fluid.core as core
def create_tensor(scope, name, np_data):
......
import op_test
import unittest
import numpy as np
def create_test_class(op_type, callback, binary_op=True):
class Cls(op_test.OpTest):
def setUp(self):
a = np.random.choice(a=[True, False], size=(10, 7)).astype(bool)
if binary_op:
b = np.random.choice(a=[True, False], size=(10, 7)).astype(bool)
c = callback(a, b)
else:
c = callback(a)
self.outputs = {'Out': c}
self.op_type = op_type
if binary_op:
self.inputs = {'X': a, 'Y': b}
else:
self.inputs = {'X': a}
def test_output(self):
self.check_output()
Cls.__name__ = op_type
globals()[op_type] = Cls
create_test_class('logical_and', lambda _a, _b: np.logical_and(_a, _b))
create_test_class('logical_or', lambda _a, _b: np.logical_or(_a, _b))
create_test_class('logical_not', lambda _a: np.logical_not(_a), False)
create_test_class('logical_xor', lambda _a, _b: np.logical_xor(_a, _b))
if __name__ == '__main__':
unittest.main()
import unittest
import numpy as np
from op_test import OpTest
def maxout_forward_naive(input, groups):
s0, s1, s2, s3 = input.shape
return np.ndarray([s0, s1 / groups, groups, s2, s3], \
buffer = input, dtype=input.dtype).max(axis=(2))
class TestMaxOutOp(OpTest):
def setUp(self):
self.op_type = "maxout"
self.init_test_case()
input = np.random.random(self.shape).astype("float32")
output = self.MaxOut_forward_naive(input, self.groups).astype("float32")
self.inputs = {'X': input}
self.attrs = {'groups': self.groups}
self.outputs = {'Out': output.astype('float32')}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out')
def init_test_case(self):
self.MaxOut_forward_naive = maxout_forward_naive
self.shape = [100, 6, 2, 2]
self.groups=2
if __name__ == '__main__':
unittest.main()
import paddle.v2.fluid.layers as layers
from paddle.v2.fluid.framework import Program
from paddle.v2.fluid.executor import Executor
from paddle.v2.fluid.optimizer import MomentumOptimizer
import paddle.v2.fluid.core as core
import paddle.v2 as paddle
import unittest
import numpy as np
class TestMNISTIfElseOp(unittest.TestCase):
def test_raw_api(self):
kwargs = {'startup_program': Program(), 'main_program': Program()}
image = layers.data(
name='x', shape=[784], data_type='float32', **kwargs)
label = layers.data(name='y', shape=[1], data_type='int64', **kwargs)
limit = layers.fill_constant_batch_size_like(
input=label, dtype='int64', shape=[1], value=5.0, **kwargs)
cond = layers.less_than(x=label, y=limit, **kwargs)
true_image, false_image = layers.split_lod_tensor(
input=image, mask=cond, **kwargs)
true_out = layers.create_tensor(dtype='float32', **kwargs)
true_cond = layers.ConditionalBlock([true_image], **kwargs)
with true_cond.block():
hidden = layers.fc(input=true_image, size=100, act='tanh', **kwargs)
prob = layers.fc(input=hidden, size=10, act='softmax', **kwargs)
layers.assign(input=prob, output=true_out, **kwargs)
false_out = layers.create_tensor(dtype='float32', **kwargs)
false_cond = layers.ConditionalBlock([false_image], **kwargs)
with false_cond.block():
hidden = layers.fc(input=false_image,
size=200,
act='tanh',
**kwargs)
prob = layers.fc(input=hidden, size=10, act='softmax', **kwargs)
layers.assign(input=prob, output=false_out, **kwargs)
prob = layers.merge_lod_tensor(
in_true=true_out, in_false=false_out, mask=cond, x=image, **kwargs)
loss = layers.cross_entropy(input=prob, label=label, **kwargs)
avg_loss = layers.mean(x=loss, **kwargs)
optimizer = MomentumOptimizer(learning_rate=0.001, momentum=0.9)
optimizer.minimize(avg_loss, kwargs['startup_program'])
train_reader = paddle.batch(
paddle.reader.shuffle(
paddle.dataset.mnist.train(), buf_size=8192),
batch_size=200)
place = core.CPUPlace()
exe = Executor(place)
exe.run(kwargs['startup_program'])
PASS_NUM = 100
for pass_id in range(PASS_NUM):
for data in train_reader():
x_data = np.array(map(lambda x: x[0], data)).astype("float32")
y_data = np.array(map(lambda x: x[1], data)).astype("int64")
y_data = np.expand_dims(y_data, axis=1)
tensor_x = core.LoDTensor()
tensor_x.set(x_data, place)
tensor_y = core.LoDTensor()
tensor_y.set(y_data, place)
outs = map(np.array,
exe.run(kwargs['main_program'],
feed={'x': tensor_x,
'y': tensor_y},
fetch_list=[avg_loss]))
print outs[0]
if outs[0] < 1.0:
return
self.assertFalse(True)
def test_ifelse(self):
kwargs = {'startup_program': Program(), 'main_program': Program()}
image = layers.data(
name='x', shape=[784], data_type='float32', **kwargs)
label = layers.data(name='y', shape=[1], data_type='int64', **kwargs)
limit = layers.fill_constant_batch_size_like(
input=label, dtype='int64', shape=[1], value=5.0, **kwargs)
cond = layers.less_than(x=label, y=limit, **kwargs)
ie = layers.IfElse(cond, **kwargs)
with ie.true_block():
true_image = ie.input(image)
hidden = layers.fc(input=true_image, size=100, act='tanh', **kwargs)
prob = layers.fc(input=hidden, size=10, act='softmax', **kwargs)
ie.output(prob)
with ie.false_block():
false_image = ie.input(image)
hidden = layers.fc(input=false_image,
size=200,
act='tanh',
**kwargs)
prob = layers.fc(input=hidden, size=10, act='softmax', **kwargs)
ie.output(prob)
prob = ie()
loss = layers.cross_entropy(input=prob[0], label=label, **kwargs)
avg_loss = layers.mean(x=loss, **kwargs)
optimizer = MomentumOptimizer(learning_rate=0.001, momentum=0.9)
optimizer.minimize(avg_loss, kwargs['startup_program'])
train_reader = paddle.batch(
paddle.reader.shuffle(
paddle.dataset.mnist.train(), buf_size=8192),
batch_size=200)
place = core.CPUPlace()
exe = Executor(place)
exe.run(kwargs['startup_program'])
PASS_NUM = 100
for pass_id in range(PASS_NUM):
for data in train_reader():
x_data = np.array(map(lambda x: x[0], data)).astype("float32")
y_data = np.array(map(lambda x: x[1], data)).astype("int64")
y_data = np.expand_dims(y_data, axis=1)
tensor_x = core.LoDTensor()
tensor_x.set(x_data, place)
tensor_y = core.LoDTensor()
tensor_y.set(y_data, place)
outs = map(np.array,
exe.run(kwargs['main_program'],
feed={'x': tensor_x,
'y': tensor_y},
fetch_list=[avg_loss]))
print outs[0]
if outs[0] < 1.0:
return
self.assertFalse(True)
if __name__ == '__main__':
unittest.main()
......@@ -16,14 +16,18 @@ class TestOptimizer(unittest.TestCase):
dtype="float32", shape=[10, 8], lod_level=0, name="mul.y")
mul_out = block.create_var(
dtype="float32", shape=[5, 8], lod_level=0, name="mul.out")
mean_out = block.create_var(
dtype="float32", shape=[1], lod_level=0, name="mean.out")
block.append_op(
type="mul",
inputs={"X": mul_x,
"Y": mul_y},
outputs={"Out": mul_out},
attrs={"x_num_col_dims": 1})
block.append_op(
type="mean", inputs={"X": mul_out}, outputs={"Out": mean_out})
sgd_optimizer = optimizer.SGDOptimizer(learning_rate=0.01)
opts = sgd_optimizer.minimize(mul_out, init_program)
opts = sgd_optimizer.minimize(mean_out, init_program)
self.assertEqual(len(opts), 1)
sgd_op = opts[0]
self.assertEqual(sgd_op.type, "sgd")
......@@ -44,12 +48,16 @@ class TestOptimizer(unittest.TestCase):
"Y": mul_y},
outputs={"Out": mul_out},
attrs={"x_num_col_dims": 1})
mean_out = block.create_var(
dtype="float32", shape=[1], lod_level=0, name="mean.out")
block.append_op(
type="mean", inputs={"X": mul_out}, outputs={"Out": mean_out})
global_step = block.create_var(
dtype="float32", shape=[1], lod_level=0, name="step")
learning_rate = 0.01
sgd_optimizer = optimizer.SGDOptimizer(
learning_rate=learning_rate, global_step=global_step)
opts = sgd_optimizer.minimize(mul_out, init_program)
opts = sgd_optimizer.minimize(mean_out, init_program)
self.assertEqual(len(opts), 2)
sgd_op = opts[0]
self.assertEqual(sgd_op.type, "sgd")
......@@ -90,7 +98,11 @@ class TestMomentumOptimizer(unittest.TestCase):
learning_rate = 0.01
momentum_optimizer = self.MockMomentum(
learning_rate=learning_rate, momentum=0.2)
params_grads = append_backward_ops(mul_out)
mean_out = block.create_var(
dtype="float32", shape=[1], lod_level=0, name="mean.out")
block.append_op(
type="mean", inputs={"X": mul_out}, outputs={"Out": mean_out})
params_grads = append_backward_ops(mean_out)
self.assertEqual(len(params_grads), 1)
self.assertEqual(len(momentum_optimizer.get_accumulators()), 0)
opts = momentum_optimizer.create_optimization_pass(
......@@ -132,10 +144,14 @@ class TestMomentumOptimizer(unittest.TestCase):
"Y": mul_y},
outputs={"Out": mul_out},
attrs={"x_num_col_dims": 1})
mean_out = block.create_var(
dtype="float32", shape=[1], lod_level=0, name="mean.out")
block.append_op(
type="mean", inputs={"X": mul_out}, outputs={"Out": mean_out})
learning_rate = 0.01
momentum_optimizer = self.MockMomentum(
learning_rate=learning_rate, momentum=0.2, use_nesterov=True)
params_grads = append_backward_ops(mul_out)
params_grads = append_backward_ops(mean_out)
self.assertEqual(len(params_grads), 1)
self.assertEqual(len(momentum_optimizer.get_accumulators()), 0)
opts = momentum_optimizer.create_optimization_pass(
......@@ -186,10 +202,14 @@ class TestAdagradOptimizer(unittest.TestCase):
"Y": mul_y},
outputs={"Out": mul_out},
attrs={"x_num_col_dims": 1})
mean_out = block.create_var(
dtype="float32", shape=[1], lod_level=0, name="mean.out")
block.append_op(
type="mean", inputs={"X": mul_out}, outputs={"Out": mean_out})
learning_rate = 0.01
adagrad_optimizer = self.MockAdagrad(
learning_rate=learning_rate, epsilon=1.0e-6)
params_grads = append_backward_ops(mul_out)
params_grads = append_backward_ops(mean_out)
self.assertEqual(len(params_grads), 1)
self.assertEqual(len(adagrad_optimizer.get_accumulators()), 0)
opts = adagrad_optimizer.create_optimization_pass(params_grads, mul_out,
......@@ -242,10 +262,14 @@ class TestAdamOptimizer(unittest.TestCase):
"Y": mul_y},
outputs={"Out": mul_out},
attrs={"x_num_col_dims": 1})
mean_out = block.create_var(
dtype="float32", shape=[1], lod_level=0, name="mean.out")
block.append_op(
type="mean", inputs={"X": mul_out}, outputs={"Out": mean_out})
learning_rate = 0.01
adam_optimizer = self.MockAdam(
learning_rate=learning_rate, beta1=0.9, beta2=0.999)
params_grads = append_backward_ops(mul_out)
params_grads = append_backward_ops(mean_out)
self.assertEqual(len(params_grads), 1)
self.assertEqual(len(adam_optimizer.get_accumulators()), 0)
opts = adam_optimizer.create_optimization_pass(params_grads, mul_out,
......@@ -300,10 +324,14 @@ class TestAdamaxOptimizer(unittest.TestCase):
"Y": mul_y},
outputs={"Out": mul_out},
attrs={"x_num_col_dims": 1})
mean_out = block.create_var(
dtype="float32", shape=[1], lod_level=0, name="mean.out")
block.append_op(
type="mean", inputs={"X": mul_out}, outputs={"Out": mean_out})
learning_rate = 0.01
adamax_optimizer = self.MockAdamax(
learning_rate=learning_rate, beta1=0.9, beta2=0.999)
params_grads = append_backward_ops(mul_out)
params_grads = append_backward_ops(mean_out)
self.assertEqual(len(params_grads), 1)
self.assertEqual(len(adamax_optimizer.get_accumulators()), 0)
opts = adamax_optimizer.create_optimization_pass(params_grads, mul_out,
......@@ -355,10 +383,14 @@ class TestDecayedAdagradOptimizer(unittest.TestCase):
"Y": mul_y},
outputs={"Out": mul_out},
attrs={"x_num_col_dims": 1})
mean_out = block.create_var(
dtype="float32", shape=[1], lod_level=0, name="mean.out")
block.append_op(
type="mean", inputs={"X": mul_out}, outputs={"Out": mean_out})
learning_rate = 0.01
decayed_adagrad_optimizer = self.MockDecayedAdagrad(
learning_rate=learning_rate, decay=0.95, epsilon=1.0e-6)
params_grads = append_backward_ops(mul_out)
params_grads = append_backward_ops(mean_out)
self.assertEqual(len(params_grads), 1)
self.assertEqual(len(decayed_adagrad_optimizer.get_accumulators()), 0)
opts = decayed_adagrad_optimizer.create_optimization_pass(
......
......@@ -3,8 +3,7 @@ import numpy as np
from op_test import OpTest
def max_pool2D_forward_naive(x, ksize, strides, paddings=[0, 0], global_pool=0):
def max_pool2D_forward_naive(x, ksize, strides, paddings, global_pool=0):
N, C, H, W = x.shape
if global_pool == 1:
ksize = [H, W]
......@@ -23,8 +22,7 @@ def max_pool2D_forward_naive(x, ksize, strides, paddings=[0, 0], global_pool=0):
return out
def avg_pool2D_forward_naive(x, ksize, strides, paddings=[0, 0], global_pool=0):
def avg_pool2D_forward_naive(x, ksize, strides, paddings, global_pool=0):
N, C, H, W = x.shape
if global_pool == 1:
ksize = [H, W]
......@@ -47,6 +45,7 @@ def avg_pool2D_forward_naive(x, ksize, strides, paddings=[0, 0], global_pool=0):
class TestPool2d_Op(OpTest):
def setUp(self):
self.init_test_case()
self.init_global_pool()
self.init_op_type()
self.init_pool_type()
if self.global_pool:
......@@ -75,8 +74,6 @@ class TestPool2d_Op(OpTest):
self.check_grad(set(['X']), 'Out', max_relative_error=0.07)
def init_test_case(self):
self.global_pool = True
self.pool2D_forward_naive = avg_pool2D_forward_naive
self.shape = [2, 3, 5, 5]
self.ksize = [3, 3]
self.strides = [1, 1]
......@@ -87,12 +84,14 @@ class TestPool2d_Op(OpTest):
def init_pool_type(self):
self.pool_type = "avg"
self.pool2D_forward_naive = avg_pool2D_forward_naive
def init_global_pool(self):
self.global_pool = True
class TestCase1(TestPool2d_Op):
def init_test_case(self):
self.global_pool = False
self.pool2D_forward_naive = avg_pool2D_forward_naive
self.shape = [2, 3, 7, 7]
self.ksize = [3, 3]
self.strides = [1, 1]
......@@ -103,12 +102,14 @@ class TestCase1(TestPool2d_Op):
def init_pool_type(self):
self.pool_type = "avg"
self.pool2D_forward_naive = avg_pool2D_forward_naive
def init_global_pool(self):
self.global_pool = False
class TestCase2(TestPool2d_Op):
def init_test_case(self):
self.global_pool = False
self.pool2D_forward_naive = avg_pool2D_forward_naive
self.shape = [2, 3, 7, 7]
self.ksize = [3, 3]
self.strides = [1, 1]
......@@ -119,152 +120,69 @@ class TestCase2(TestPool2d_Op):
def init_pool_type(self):
self.pool_type = "avg"
self.pool2D_forward_naive = avg_pool2D_forward_naive
def init_global_pool(self):
self.global_pool = False
class TestCase3(TestPool2d_Op):
def init_test_case(self):
self.global_pool = True
self.pool2D_forward_naive = max_pool2D_forward_naive
self.shape = [2, 3, 5, 5]
self.ksize = [3, 3]
self.strides = [1, 1]
self.paddings = [0, 0]
class TestCase3(TestPool2d_Op):
def init_op_type(self):
self.op_type = "pool2d"
def init_pool_type(self):
self.pool_type = "max"
class TestCase4(TestPool2d_Op):
def init_test_case(self):
self.global_pool = False
self.pool2D_forward_naive = max_pool2D_forward_naive
self.shape = [2, 3, 7, 7]
self.ksize = [3, 3]
self.strides = [1, 1]
self.paddings = [0, 0]
class TestCase4(TestCase1):
def init_op_type(self):
self.op_type = "pool2d"
def init_pool_type(self):
self.pool_type = "max"
class TestCase5(TestPool2d_Op):
def init_test_case(self):
self.global_pool = False
self.pool2D_forward_naive = max_pool2D_forward_naive
self.shape = [2, 3, 7, 7]
self.ksize = [3, 3]
self.strides = [1, 1]
self.paddings = [1, 1]
class TestCase5(TestCase2):
def init_op_type(self):
self.op_type = "pool2d"
def init_pool_type(self):
self.pool_type = "max"
self.pool2D_forward_naive = max_pool2D_forward_naive
#--------------------test pool2d_cudnn--------------------
class TestCaseCudnn1(TestPool2d_Op):
def init_test_case(self):
self.global_pool = True
self.pool2D_forward_naive = avg_pool2D_forward_naive
self.shape = [2, 3, 5, 5]
self.ksize = [3, 3]
self.strides = [1, 1]
self.paddings = [0, 0]
class TestCudnnCase1(TestPool2d_Op):
def init_op_type(self):
self.op_type = "pool2d_cudnn"
def init_pool_type(self):
self.pool_type = "avg"
class TestCaseCudnn2(TestPool2d_Op):
def init_test_case(self):
self.global_pool = False
self.pool2D_forward_naive = avg_pool2D_forward_naive
self.shape = [2, 3, 7, 7]
self.ksize = [3, 3]
self.strides = [1, 1]
self.paddings = [0, 0]
class TestCudnnCase2(TestCase1):
def init_op_type(self):
self.op_type = "pool2d_cudnn"
def init_pool_type(self):
self.pool_type = "avg"
class TestCaseCudnn3(TestPool2d_Op):
def init_test_case(self):
self.global_pool = False
self.pool2D_forward_naive = avg_pool2D_forward_naive
self.shape = [2, 3, 7, 7]
self.ksize = [3, 3]
self.strides = [1, 1]
self.paddings = [1, 1]
class TestCudnnCase3(TestCase2):
def init_op_type(self):
self.op_type = "pool2d_cudnn"
def init_pool_type(self):
self.pool_type = "avg"
class TestCaseCudnn4(TestPool2d_Op):
def init_test_case(self):
self.global_pool = True
self.pool2D_forward_naive = max_pool2D_forward_naive
self.shape = [2, 3, 5, 5]
self.ksize = [3, 3]
self.strides = [1, 1]
self.paddings = [0, 0]
class TestCudnnCase4(TestCase3):
def init_op_type(self):
self.op_type = "pool2d_cudnn"
def init_pool_type(self):
self.pool_type = "max"
class TestCaseCudnn5(TestPool2d_Op):
def init_test_case(self):
self.global_pool = False
self.pool2D_forward_naive = max_pool2D_forward_naive
self.shape = [2, 3, 7, 7]
self.ksize = [3, 3]
self.strides = [1, 1]
self.paddings = [0, 0]
class TestCudnnCase5(TestCase4):
def init_op_type(self):
self.op_type = "pool2d_cudnn"
def init_pool_type(self):
self.pool_type = "max"
class TestCaseCudnn6(TestPool2d_Op):
def init_test_case(self):
self.global_pool = False
self.pool2D_forward_naive = max_pool2D_forward_naive
self.shape = [2, 3, 7, 7]
self.ksize = [3, 3]
self.strides = [1, 1]
self.paddings = [1, 1]
class TestCudnnCase6(TestCase5):
def init_op_type(self):
self.op_type = "pool2d_cudnn"
def init_pool_type(self):
self.pool_type = "max"
if __name__ == '__main__':
unittest.main()
......@@ -3,8 +3,7 @@ import numpy as np
from op_test import OpTest
def max_pool3D_forward_naive(x, ksize, strides, paddings=[0, 0], global_pool=0):
def max_pool3D_forward_naive(x, ksize, strides, paddings, global_pool=0):
N, C, D, H, W = x.shape
if global_pool == 1:
ksize = [D, H, W]
......@@ -27,8 +26,7 @@ def max_pool3D_forward_naive(x, ksize, strides, paddings=[0, 0], global_pool=0):
return out
def avg_pool3D_forward_naive(x, ksize, strides, paddings=[0, 0], global_pool=0):
def avg_pool3D_forward_naive(x, ksize, strides, paddings, global_pool=0):
N, C, D, H, W = x.shape
if global_pool == 1:
ksize = [D, H, W]
......@@ -55,6 +53,10 @@ def avg_pool3D_forward_naive(x, ksize, strides, paddings=[0, 0], global_pool=0):
class TestPool3d_Op(OpTest):
def setUp(self):
self.init_test_case()
self.init_global_pool()
self.init_op_type()
self.init_pool_type()
if self.global_pool:
self.paddings = [0 for _ in range(len(self.paddings))]
input = np.random.random(self.shape).astype("float32")
......@@ -81,74 +83,115 @@ class TestPool3d_Op(OpTest):
self.check_grad(set(['X']), 'Out', max_relative_error=0.07)
def init_test_case(self):
self.global_pool = True
self.op_type = "pool3d"
self.pool_type = "avg"
self.pool3D_forward_naive = avg_pool3D_forward_naive
self.shape = [2, 3, 5, 5, 5]
self.ksize = [3, 3, 3]
self.strides = [1, 1, 1]
self.paddings = [0, 0, 0]
def init_op_type(self):
self.op_type = "pool3d"
def init_pool_type(self):
self.pool_type = "avg"
self.pool3D_forward_naive = avg_pool3D_forward_naive
def init_global_pool(self):
self.global_pool = True
class TestCase1(TestPool3d_Op):
def init_test_case(self):
self.global_pool = False
self.op_type = "pool3d"
self.pool_type = "avg"
self.pool3D_forward_naive = avg_pool3D_forward_naive
self.shape = [2, 3, 7, 7, 7]
self.ksize = [3, 3, 3]
self.strides = [1, 1, 1]
self.paddings = [0, 0, 0]
class TestCase2(TestPool3d_Op):
def init_test_case(self):
self.global_pool = False
def init_op_type(self):
self.op_type = "pool3d"
def init_pool_type(self):
self.pool_type = "avg"
self.pool3D_forward_naive = avg_pool3D_forward_naive
def init_global_pool(self):
self.global_pool = False
class TestCase2(TestPool3d_Op):
def init_test_case(self):
self.shape = [2, 3, 7, 7, 7]
self.ksize = [3, 3, 3]
self.strides = [1, 1, 1]
self.paddings = [1, 1, 1]
def init_op_type(self):
self.op_type = "pool3d"
def init_pool_type(self):
self.pool_type = "avg"
self.pool3D_forward_naive = avg_pool3D_forward_naive
def init_global_pool(self):
self.global_pool = False
class TestCase3(TestPool3d_Op):
def init_test_case(self):
self.global_pool = True
def init_op_type(self):
self.op_type = "pool3d"
def init_pool_type(self):
self.pool_type = "max"
self.pool3D_forward_naive = max_pool3D_forward_naive
self.shape = [2, 3, 5, 5, 5]
self.ksize = [3, 3, 3]
self.strides = [1, 1, 1]
self.paddings = [0, 0, 0]
class TestCase4(TestPool3d_Op):
def init_test_case(self):
self.global_pool = False
class TestCase4(TestCase1):
def init_op_type(self):
self.op_type = "pool3d"
def init_pool_type(self):
self.pool_type = "max"
self.pool3D_forward_naive = max_pool3D_forward_naive
self.shape = [2, 3, 7, 7, 7]
self.ksize = [3, 3, 3]
self.strides = [1, 1, 1]
self.paddings = [0, 0, 0]
class TestCase5(TestPool3d_Op):
def init_test_case(self):
self.global_pool = False
class TestCase5(TestCase2):
def init_op_type(self):
self.op_type = "pool3d"
def init_pool_type(self):
self.pool_type = "max"
self.pool3D_forward_naive = max_pool3D_forward_naive
self.shape = [2, 3, 7, 7, 7]
self.ksize = [3, 3, 3]
self.strides = [1, 1, 1]
self.paddings = [1, 1, 1]
#--------------------test pool3d_cudnn--------------------
class TestCudnnCase1(TestPool3d_Op):
def init_op_type(self):
self.op_type = "pool3d_cudnn"
class TestCudnnCase2(TestCase1):
def init_op_type(self):
self.op_type = "pool3d_cudnn"
class TestCudnnCase3(TestCase2):
def init_op_type(self):
self.op_type = "pool3d_cudnn"
class TestCudnnCase4(TestCase3):
def init_op_type(self):
self.op_type = "pool3d_cudnn"
class TestCudnnCase5(TestCase4):
def init_op_type(self):
self.op_type = "pool3d_cudnn"
class TestCudnnCase6(TestCase5):
def init_op_type(self):
self.op_type = "pool3d_cudnn"
if __name__ == '__main__':
......
......@@ -3,11 +3,13 @@ import numpy as np
from op_test import OpTest
def max_pool3D_forward_naive(x, ksize, strides, paddings, global_pool=0):
def max_pool3D_forward_naive(x, ksize, strides, paddings, global_pool=False):
N, C, D, H, W = x.shape
if global_pool == 1:
if global_pool:
ksize = [D, H, W]
paddings = [0, 0, 0]
D_out = (D - ksize[0] + 2 * paddings[0]) / strides[0] + 1
H_out = (H - ksize[1] + 2 * paddings[1]) / strides[1] + 1
W_out = (W - ksize[2] + 2 * paddings[2]) / strides[2] + 1
......@@ -40,11 +42,13 @@ def max_pool3D_forward_naive(x, ksize, strides, paddings, global_pool=0):
return out, mask
def max_pool2D_forward_naive(x, ksize, strides, paddings, global_pool=0):
def max_pool2D_forward_naive(x, ksize, strides, paddings, global_pool=False):
N, C, H, W = x.shape
if global_pool == 1:
if global_pool:
ksize = [H, W]
paddings = [0, 0]
H_out = (H - ksize[0] + 2 * paddings[0]) / strides[0] + 1
W_out = (W - ksize[1] + 2 * paddings[1]) / strides[1] + 1
out = np.zeros((N, C, H_out, W_out))
......@@ -74,13 +78,13 @@ def max_pool2D_forward_naive(x, ksize, strides, paddings, global_pool=0):
class TestMaxPoolWithIndex_Op(OpTest):
def setUp(self):
self.init_test_case()
if self.global_pool:
self.paddings = [0 for _ in range(len(self.paddings))]
self.init_global()
input = np.random.random(self.shape).astype("float32")
output, mask = self.pool_forward_naive(input, self.ksize, self.strides,
self.paddings, self.global_pool)
output = output.astype("float32")
mask = mask.astype("float32")
mask = mask.astype("int32")
self.attrs = {
'strides': self.strides,
......@@ -99,41 +103,24 @@ class TestMaxPoolWithIndex_Op(OpTest):
# self.check_grad(set(['X']), ['Out'], max_relative_error=0.07)
def init_test_case(self):
self.global_pool = True
self.index = "max_pool3d_with_index"
self.op_type = "%s" % self.index
self.op_type = "max_pool3d_with_index"
self.pool_forward_naive = max_pool3D_forward_naive
self.shape = [2, 3, 5, 5, 5]
self.ksize = [3, 3, 3]
self.strides = [1, 1, 1]
self.paddings = [1, 1, 1]
def init_global(self):
self.global_pool = False
class TestCase1(TestMaxPoolWithIndex_Op):
def init_test_case(self):
def init_global(self):
self.global_pool = True
self.op_type = "max_pool3d_with_index"
self.pool_forward_naive = max_pool3D_forward_naive
self.shape = [2, 3, 5, 5, 5]
self.ksize = [3, 3, 3]
self.strides = [1, 1, 1]
self.paddings = [1, 1, 1]
class TestCase2(TestMaxPoolWithIndex_Op):
def init_test_case(self):
self.global_pool = False
self.op_type = "max_pool3d_with_index"
self.pool_forward_naive = max_pool3D_forward_naive
self.shape = [2, 3, 7, 7, 7]
self.ksize = [3, 3, 3]
self.strides = [1, 1, 1]
self.paddings = [1, 1, 1]
class TestCase3(TestMaxPoolWithIndex_Op):
def init_test_case(self):
self.global_pool = False
self.op_type = "max_pool3d_with_index"
self.pool_forward_naive = max_pool3D_forward_naive
self.shape = [2, 3, 7, 7, 7]
......@@ -141,32 +128,18 @@ class TestCase3(TestMaxPoolWithIndex_Op):
self.strides = [2, 2, 2]
self.paddings = [0, 0, 0]
class TestCase4(TestMaxPoolWithIndex_Op):
def init_test_case(self):
def init_global(self):
self.global_pool = True
self.op_type = "max_pool3d_with_index"
self.pool_forward_naive = max_pool3D_forward_naive
self.shape = [2, 3, 5, 5, 5]
self.ksize = [3, 3, 3]
self.strides = [1, 1, 1]
self.paddings = [1, 1, 1]
class TestCase5(TestMaxPoolWithIndex_Op):
def init_test_case(self):
self.global_pool = True
self.op_type = "max_pool3d_with_index"
self.pool_forward_naive = max_pool3D_forward_naive
self.shape = [2, 3, 5, 5, 5]
self.ksize = [3, 3, 3]
self.strides = [2, 2, 2]
self.paddings = [0, 0, 0]
class TestCase3(TestCase2):
def init_global(self):
self.global_pool = False
class TestCase6(TestMaxPoolWithIndex_Op):
#----------------max_pool2d_with_index----------------
class TestCase4(TestMaxPoolWithIndex_Op):
def init_test_case(self):
self.global_pool = False
self.op_type = "max_pool2d_with_index"
self.pool_forward_naive = max_pool2D_forward_naive
self.shape = [2, 3, 7, 7]
......@@ -174,10 +147,17 @@ class TestCase6(TestMaxPoolWithIndex_Op):
self.strides = [1, 1]
self.paddings = [1, 1]
def init_global(self):
self.global_pool = True
class TestCase7(TestMaxPoolWithIndex_Op):
def init_test_case(self):
class TestCase5(TestCase4):
def init_global(self):
self.global_pool = False
class TestCase6(TestMaxPoolWithIndex_Op):
def init_test_case(self):
self.op_type = "max_pool2d_with_index"
self.pool_forward_naive = max_pool2D_forward_naive
self.shape = [2, 3, 7, 7]
......@@ -185,27 +165,13 @@ class TestCase7(TestMaxPoolWithIndex_Op):
self.strides = [2, 2]
self.paddings = [0, 0]
class TestCase8(TestMaxPoolWithIndex_Op):
def init_test_case(self):
def init_global(self):
self.global_pool = True
self.op_type = "max_pool2d_with_index"
self.pool_forward_naive = max_pool2D_forward_naive
self.shape = [2, 3, 5, 5]
self.ksize = [3, 3]
self.strides = [1, 1]
self.paddings = [1, 1]
class TestCase9(TestMaxPoolWithIndex_Op):
def init_test_case(self):
self.global_pool = True
self.op_type = "max_pool2d_with_index"
self.pool_forward_naive = max_pool2D_forward_naive
self.shape = [2, 3, 5, 5]
self.ksize = [3, 3]
self.strides = [2, 2]
self.paddings = [0, 0]
class TestCase7(TestCase6):
def init_global(self):
self.global_pool = False
if __name__ == '__main__':
......
import unittest
import paddle.v2.fluid.core as core
from paddle.v2.fluid.framework import Program
from paddle.v2.fluid.framework import g_main_program
......@@ -98,21 +97,26 @@ class TestProgram(unittest.TestCase):
"Y": add_y},
outputs={"Out": add_out},
attrs={"x_num_col_dims": 1})
mean_out = block.create_var(
dtype="float32", shape=[1], lod_level=0, name="mean.out")
block.append_op(
type="mean", inputs={"X": add_out}, outputs={"Out": mean_out})
self.assertEqual(mul_op.idx, 0)
self.assertEqual(add_op.idx, 1)
param_to_grad = prog.append_backward(add_out, set())
param_to_grad = prog.append_backward(mean_out, set())
def grad_name(name):
return name + "@GRAD"
for var_name in ("mul.x", "mul.y", "mul.out", "add.y", "add.out"):
for var_name in ("mul.x", "mul.y", "mul.out", "add.y", "add.out",
"mean.out"):
self.assertEqual(param_to_grad[var_name][0], grad_name(var_name))
self.assertEqual(param_to_grad[var_name][1], 0)
expect_ops = [
"mul", "elementwise_add", "fill_constant", "elementwise_add_grad",
"mul_grad"
"mul", "elementwise_add", "mean", "fill_constant", "mean_grad",
"elementwise_add_grad", "mul_grad"
]
actual_ops = []
for op in block.ops:
......
......@@ -29,7 +29,11 @@ class TestL2DecayRegularizer(unittest.TestCase):
"Y": mul_y},
outputs={"Out": mul_out},
attrs={"x_num_col_dims": 1})
params_grads = append_backward_ops(mul_out)
mean_out = block.create_var(
dtype="float32", shape=[1], lod_level=0, name="mean.out")
block.append_op(
type="mean", inputs={"X": mul_out}, outputs={"Out": mean_out})
params_grads = append_backward_ops(mean_out)
self.assertEqual(len(params_grads), 1)
count_ops = len(block.ops)
params_grads = optimizer.append_regularization_ops(params_grads)
......@@ -62,7 +66,11 @@ class TestL1DecayRegularizer(unittest.TestCase):
"Y": mul_y},
outputs={"Out": mul_out},
attrs={"x_num_col_dims": 1})
params_grads = append_backward_ops(mul_out)
mean_out = block.create_var(
dtype="float32", shape=[1], lod_level=0, name="mean.out")
block.append_op(
type="mean", inputs={"X": mul_out}, outputs={"Out": mean_out})
params_grads = append_backward_ops(mean_out)
self.assertEqual(len(params_grads), 1)
count_ops = len(block.ops)
params_grads = optimizer.append_regularization_ops(params_grads)
......
import unittest
import numpy as np
import sys
from op_test import OpTest
class TestSequenceSliceOp(OpTest):
def set_data(self):
self.init_test_case()
# only supprot one level LoD
x = np.random.random(self.x_dim).astype('float32')
lod = self.x_lod
offset = np.array(self.offset).astype("int64")
length = np.array(self.length).astype("int64")
self.inputs = {'X': (x, lod), 'Offset': offset, 'Length': length}
outs = [] #np.zeros((100, 3, 2)).astype('float32')
out_lod = [[0]]
out_lod_offset = 0
for i in range(len(offset)):
sub_x = x[lod[0][i] + offset[i, 0]:lod[0][i] + offset[i, 0] +
length[i, 0], :]
out_lod_offset = out_lod_offset + len(sub_x)
outs.append(sub_x)
out_lod[0].append(out_lod_offset)
outs = np.concatenate(outs, axis=0)
self.outputs = {'Out': (outs, out_lod)}
def init_test_case(self):
self.x_dim = (100, 3, 2)
self.x_lod = [[0, 20, 40, 60, 80, 100]]
self.offset = [[1], [2], [3], [4], [5]]
self.length = [[10], [8], [6], [4], [2]]
def setUp(self):
self.op_type = "sequence_slice"
self.set_data()
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out')
if __name__ == '__main__':
unittest.main()
import unittest
from paddle.v2.fluid.framework import Variable, g_main_program, Program
from paddle.v2.fluid.framework import g_main_program, Program, convert_np_dtype_to_dtype_
import paddle.v2.fluid.core as core
import numpy as np
......@@ -7,7 +7,7 @@ import numpy as np
class TestVariable(unittest.TestCase):
def test_np_dtype_convert(self):
DT = core.DataType
convert = Variable._convert_np_dtype_to_dtype_
convert = convert_np_dtype_to_dtype_
self.assertEqual(DT.FP32, convert(np.float32))
self.assertEqual(DT.FP16, convert("float16"))
self.assertEqual(DT.FP64, convert("float64"))
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册