diff --git a/.gitignore b/.gitignore
index 1512c1438e9e0b0b7b6e0c273a24b273cb652b04..020d3f0c303f7d850f4ec9c0efe58ab2d57dce2e 100644
--- a/.gitignore
+++ b/.gitignore
@@ -21,11 +21,10 @@ third_party/
cmake-build-*
# generated while compiling
-python/paddle/v2/framework/core.so
+python/paddle/v2/fluid/core.so
paddle/pybind/pybind.h
CMakeFiles
cmake_install.cmake
paddle/.timestamp
python/paddlepaddle.egg-info/
paddle/pybind/pybind.h
-python/paddle/v2/framework/tests/tmp/*
diff --git a/benchmark/paddle/image/resnet.py b/benchmark/paddle/image/resnet.py
new file mode 100644
index 0000000000000000000000000000000000000000..6ae1857642e8df4b3859eec68a3a5227d1c4fcb3
--- /dev/null
+++ b/benchmark/paddle/image/resnet.py
@@ -0,0 +1,213 @@
+#!/usr/bin/env python
+from paddle.trainer_config_helpers import *
+
+height = 224
+width = 224
+num_class = 1000
+batch_size = get_config_arg('batch_size', int, 64)
+layer_num = get_config_arg("layer_num", int, 50)
+is_test = get_config_arg("is_test", bool, False)
+
+args = {'height': height, 'width': width, 'color': True, 'num_class': num_class}
+define_py_data_sources2(
+ "train.list", None, module="provider", obj="process", args=args)
+
+settings(
+ batch_size=batch_size,
+ learning_rate=0.01 / batch_size,
+ learning_method=MomentumOptimizer(0.9),
+ regularization=L2Regularization(0.0005 * batch_size))
+
+
+#######################Network Configuration #############
+def conv_bn_layer(name,
+ input,
+ filter_size,
+ num_filters,
+ stride,
+ padding,
+ channels=None,
+ active_type=ReluActivation()):
+ """
+ A wrapper for conv layer with batch normalization layers.
+ Note:
+ conv layer has no activation.
+ """
+
+ tmp = img_conv_layer(
+ name=name + "_conv",
+ input=input,
+ filter_size=filter_size,
+ num_channels=channels,
+ num_filters=num_filters,
+ stride=stride,
+ padding=padding,
+ act=LinearActivation(),
+ bias_attr=False)
+ return batch_norm_layer(
+ name=name + "_bn", input=tmp, act=active_type, use_global_stats=is_test)
+
+
+def bottleneck_block(name, input, num_filters1, num_filters2):
+ """
+ A wrapper for bottlenect building block in ResNet.
+ Last conv_bn_layer has no activation.
+ Addto layer has activation of relu.
+ """
+ last_name = conv_bn_layer(
+ name=name + '_branch2a',
+ input=input,
+ filter_size=1,
+ num_filters=num_filters1,
+ stride=1,
+ padding=0)
+ last_name = conv_bn_layer(
+ name=name + '_branch2b',
+ input=last_name,
+ filter_size=3,
+ num_filters=num_filters1,
+ stride=1,
+ padding=1)
+ last_name = conv_bn_layer(
+ name=name + '_branch2c',
+ input=last_name,
+ filter_size=1,
+ num_filters=num_filters2,
+ stride=1,
+ padding=0,
+ active_type=LinearActivation())
+
+ return addto_layer(
+ name=name + "_addto", input=[input, last_name], act=ReluActivation())
+
+
+def mid_projection(name, input, num_filters1, num_filters2, stride=2):
+ """
+ A wrapper for middile projection in ResNet.
+ projection shortcuts are used for increasing dimensions,
+ and other shortcuts are identity
+ branch1: projection shortcuts are used for increasing
+ dimensions, has no activation.
+ branch2x: bottleneck building block, shortcuts are identity.
+ """
+ # stride = 2
+ branch1 = conv_bn_layer(
+ name=name + '_branch1',
+ input=input,
+ filter_size=1,
+ num_filters=num_filters2,
+ stride=stride,
+ padding=0,
+ active_type=LinearActivation())
+
+ last_name = conv_bn_layer(
+ name=name + '_branch2a',
+ input=input,
+ filter_size=1,
+ num_filters=num_filters1,
+ stride=stride,
+ padding=0)
+ last_name = conv_bn_layer(
+ name=name + '_branch2b',
+ input=last_name,
+ filter_size=3,
+ num_filters=num_filters1,
+ stride=1,
+ padding=1)
+
+ last_name = conv_bn_layer(
+ name=name + '_branch2c',
+ input=last_name,
+ filter_size=1,
+ num_filters=num_filters2,
+ stride=1,
+ padding=0,
+ active_type=LinearActivation())
+
+ return addto_layer(
+ name=name + "_addto", input=[branch1, last_name], act=ReluActivation())
+
+
+img = data_layer(name='image', size=height * width * 3)
+
+
+def deep_res_net(res2_num=3, res3_num=4, res4_num=6, res5_num=3):
+ """
+ A wrapper for 50,101,152 layers of ResNet.
+ res2_num: number of blocks stacked in conv2_x
+ res3_num: number of blocks stacked in conv3_x
+ res4_num: number of blocks stacked in conv4_x
+ res5_num: number of blocks stacked in conv5_x
+ """
+ # For ImageNet
+ # conv1: 112x112
+ tmp = conv_bn_layer(
+ "conv1",
+ input=img,
+ filter_size=7,
+ channels=3,
+ num_filters=64,
+ stride=2,
+ padding=3)
+ tmp = img_pool_layer(name="pool1", input=tmp, pool_size=3, stride=2)
+
+ # conv2_x: 56x56
+ tmp = mid_projection(
+ name="res2_1", input=tmp, num_filters1=64, num_filters2=256, stride=1)
+ for i in xrange(2, res2_num + 1, 1):
+ tmp = bottleneck_block(
+ name="res2_" + str(i), input=tmp, num_filters1=64, num_filters2=256)
+
+ # conv3_x: 28x28
+ tmp = mid_projection(
+ name="res3_1", input=tmp, num_filters1=128, num_filters2=512)
+ for i in xrange(2, res3_num + 1, 1):
+ tmp = bottleneck_block(
+ name="res3_" + str(i),
+ input=tmp,
+ num_filters1=128,
+ num_filters2=512)
+
+ # conv4_x: 14x14
+ tmp = mid_projection(
+ name="res4_1", input=tmp, num_filters1=256, num_filters2=1024)
+ for i in xrange(2, res4_num + 1, 1):
+ tmp = bottleneck_block(
+ name="res4_" + str(i),
+ input=tmp,
+ num_filters1=256,
+ num_filters2=1024)
+
+ # conv5_x: 7x7
+ tmp = mid_projection(
+ name="res5_1", input=tmp, num_filters1=512, num_filters2=2048)
+ for i in xrange(2, res5_num + 1, 1):
+ tmp = bottleneck_block(
+ name="res5_" + str(i),
+ input=tmp,
+ num_filters1=512,
+ num_filters2=2048)
+
+ tmp = img_pool_layer(
+ name='avgpool',
+ input=tmp,
+ pool_size=7,
+ stride=1,
+ pool_type=AvgPooling())
+
+ return fc_layer(input=tmp, size=num_class, act=SoftmaxActivation())
+
+
+if layer_num == 50:
+ resnet = deep_res_net(3, 4, 6, 3)
+elif layer_num == 101:
+ resnet = deep_res_net(3, 4, 23, 3)
+elif layer_num == 152:
+ resnet = deep_res_net(3, 8, 36, 3)
+else:
+ print("Wrong layer number.")
+
+lbl = data_layer(name="label", size=num_class)
+loss = cross_entropy(name='loss', input=resnet, label=lbl)
+inputs(img, lbl)
+outputs(loss)
diff --git a/benchmark/paddle/image/run_mkldnn.sh b/benchmark/paddle/image/run_mkldnn.sh
index e31fec1cd850157d90ddcab2d559d52381ecd317..a4527e04968cf8c8c3c31d16f50bc3e28381f6d8 100755
--- a/benchmark/paddle/image/run_mkldnn.sh
+++ b/benchmark/paddle/image/run_mkldnn.sh
@@ -5,22 +5,23 @@ function train() {
export OMP_DYNAMIC="FALSE"
export KMP_AFFINITY="granularity=fine,compact,0,0"
topology=$1
- bs=$2
- use_mkldnn=$3
- if [ $3 == "True" ]; then
+ layer_num=$2
+ bs=$3
+ use_mkldnn=$4
+ if [ $4 == "True" ]; then
thread=1
- log="logs/${topology}-mkldnn-${bs}.log"
- elif [ $3 == "False" ]; then
+ log="logs/${topology}-${layer_num}-mkldnn-${bs}.log"
+ elif [ $4 == "False" ]; then
thread=`nproc`
# each trainer_count use only 1 core to avoid conflict
export OMP_NUM_THREADS=1
export MKL_NUM_THREADS=1
- log="logs/${topology}-${thread}mklml-${bs}.log"
+ log="logs/${topology}-${layer_num}-${thread}mklml-${bs}.log"
else
echo "Wrong input $3, use True or False."
exit 0
fi
- args="batch_size=${bs}"
+ args="batch_size=${bs},layer_num=${layer_num}"
config="${topology}.py"
paddle train --job=time \
--config=$config \
@@ -40,12 +41,9 @@ if [ ! -d "logs" ]; then
mkdir logs
fi
-#========== mkldnn ==========#
-train vgg 64 True
-train vgg 128 True
-train vgg 256 True
-
-#========== mklml ===========#
-train vgg 64 False
-train vgg 128 False
-train vgg 256 False
+for use_mkldnn in True False; do
+ for batchsize in 64 128 256; do
+ train vgg 19 $batchsize $use_mkldnn
+ train resnet 50 $batchsize $use_mkldnn
+ done
+done
diff --git a/benchmark/paddle/image/vgg.py b/benchmark/paddle/image/vgg.py
index b8429975f5c83df6996e71478fe276b246e8b77b..420884ed8e1ae36a3f1772bfbe8323f3d0ea71e6 100644
--- a/benchmark/paddle/image/vgg.py
+++ b/benchmark/paddle/image/vgg.py
@@ -13,7 +13,7 @@ define_py_data_sources2(
settings(
batch_size=batch_size,
- learning_rate=0.01 / batch_size,
+ learning_rate=0.001 / batch_size,
learning_method=MomentumOptimizer(0.9),
regularization=L2Regularization(0.0005 * batch_size))
diff --git a/cmake/cblas.cmake b/cmake/cblas.cmake
index 8fdc382f0c1c453a01dba884a3dad216e1c3092c..b21fc43904d9aafe9f7d019dfbe5b1c0d3f9e2d6 100644
--- a/cmake/cblas.cmake
+++ b/cmake/cblas.cmake
@@ -1,17 +1,12 @@
# Find the CBlas and lapack libraries
#
-# It will search MKL, atlas, OpenBlas, reference-cblas in order.
+# It will search MKLML, atlas, OpenBlas, reference-cblas in order.
#
# If any cblas implementation found, the following variable will be set.
-# CBLAS_PROVIDER # one of MKL, ATLAS, OPENBLAS, REFERENCE
+# CBLAS_PROVIDER # one of MKLML, ATLAS, OPENBLAS, REFERENCE
# CBLAS_INC_DIR # the include directory for cblas.
# CBLAS_LIBS # a list of libraries should be linked by paddle.
# # Each library should be full path to object file.
-#
-# User should set one of MKL_ROOT, ATLAS_ROOT, OPENBLAS_ROOT, REFERENCE_CBLAS_ROOT
-# during cmake. If none of them set, it will try to find cblas implementation in
-# system paths.
-#
set(CBLAS_FOUND OFF)
@@ -30,44 +25,6 @@ if(WITH_MKLML AND MKLML_INC_DIR AND MKLML_LIB)
return()
endif()
-## Then find MKL.
-set(INTEL_MKL_ROOT "/opt/intel/mkl" CACHE PATH "Folder contains intel mkl libs")
-set(MKL_ROOT $ENV{MKL_ROOT} CACHE PATH "Folder contains env MKL")
-
-set(MKL_INCLUDE_SEARCH_PATHS
- ${MKL_ROOT}/include
- ${INTEL_MKL_ROOT}/include)
-set(MKL_LIB_SEARCH_PATHS
- ${MKL_ROOT}/lib
- ${MKL_ROOT}/lib/intel64
- ${INTEL_MKL_ROOT}/lib
- ${INTEL_MKL_ROOT}/lib/intel64)
-
-find_path(MKL_INC_DIR mkl.h PATHS
- ${MKL_INCLUDE_SEARCH_PATHS})
-find_path(MKL_LAPACK_INC_DIR mkl_lapacke.h PATHS
- ${MKL_INCLUDE_SEARCH_PATHS})
-find_library(MKL_CORE_LIB NAMES mkl_core PATHS
- ${MKL_LIB_SEARCH_PATHS})
-find_library(MKL_SEQUENTIAL_LIB NAMES mkl_sequential PATHS
- ${MKL_LIB_SEARCH_PATHS})
-find_library(MKL_INTEL_LP64 NAMES mkl_intel_lp64 PATHS
- ${MKL_LIB_SEARCH_PATHS})
-
-if(MKL_LAPACK_INC_DIR AND MKL_INC_DIR AND MKL_CORE_LIB AND MKL_SEQUENTIAL_LIB AND MKL_INTEL_LP64)
- set(CBLAS_FOUND ON)
- set(CBLAS_PROVIDER MKL)
- set(CBLAS_INC_DIR ${MKL_INC_DIR} ${MKL_LAPACK_INC_DIR})
- set(CBLAS_LIBRARIES ${MKL_INTEL_LP64} ${MKL_SEQUENTIAL_LIB} ${MKL_CORE_LIB})
-
- add_definitions(-DPADDLE_USE_MKL)
- add_definitions(-DLAPACK_FOUND)
-
- message(STATUS "Found MKL (include: ${MKL_INC_DIR}, library: ${CBLAS_LIBRARIES})")
- message(STATUS "Found lapack in MKL (include: ${MKL_LAPACK_INC_DIR})")
- return()
-endif()
-
## Then find atlas.
set(ATLAS_ROOT $ENV{ATLAS_ROOT} CACHE PATH "Folder contains Atlas")
set(ATLAS_INCLUDE_SEARCH_PATHS
diff --git a/cmake/external/mkldnn.cmake b/cmake/external/mkldnn.cmake
index 9686df00219001769d074ee815d9cc8db0258496..5a06825beb73e85d8a55b7b578b187bee2c4340c 100644
--- a/cmake/external/mkldnn.cmake
+++ b/cmake/external/mkldnn.cmake
@@ -46,16 +46,20 @@ IF(${CBLAS_PROVIDER} STREQUAL "MKLML")
MESSAGE(STATUS "Build MKLDNN with ${MKLDNN_MKLROOT}")
ENDIF()
+SET(MKLDNN_CFLAG "${CMAKE_C_FLAGS} -Wno-error=strict-overflow")
+SET(MKLDNN_CXXFLAG "${CMAKE_CXX_FLAGS} -Wno-error=strict-overflow")
ExternalProject_Add(
${MKLDNN_PROJECT}
${EXTERNAL_PROJECT_LOG_ARGS}
DEPENDS ${MKLDNN_DEPENDS}
GIT_REPOSITORY "https://github.com/01org/mkl-dnn.git"
- GIT_TAG "v0.10"
+ GIT_TAG "v0.11"
PREFIX ${MKLDNN_SOURCES_DIR}
UPDATE_COMMAND ""
CMAKE_ARGS -DCMAKE_INSTALL_PREFIX=${MKLDNN_INSTALL_DIR}
CMAKE_ARGS -DMKLROOT=${MKLDNN_MKLROOT}
+ CMAKE_ARGS -DCMAKE_C_FLAGS=${MKLDNN_CFLAG}
+ CMAKE_ARGS -DCMAKE_CXX_FLAGS=${MKLDNN_CXXFLAG}
CMAKE_CACHE_ARGS -DCMAKE_INSTALL_PREFIX:PATH=${MKLDNN_INSTALL_DIR}
-DMKLROOT:PATH=${MKLDNN_MKLROOT}
)
diff --git a/cmake/external/mklml.cmake b/cmake/external/mklml.cmake
index 74f3279831357c21038df133df0f5a432a6dfd20..20dbc32a738d982df2d3f035206279c82c8de264 100644
--- a/cmake/external/mklml.cmake
+++ b/cmake/external/mklml.cmake
@@ -27,8 +27,8 @@ ENDIF()
INCLUDE(ExternalProject)
SET(MKLML_PROJECT "extern_mklml")
-SET(MKLML_VER "mklml_lnx_2018.0.20170720")
-SET(MKLML_URL "https://github.com/01org/mkl-dnn/releases/download/v0.10/${MKLML_VER}.tgz")
+SET(MKLML_VER "mklml_lnx_2018.0.1.20171007")
+SET(MKLML_URL "https://github.com/01org/mkl-dnn/releases/download/v0.11/${MKLML_VER}.tgz")
SET(MKLML_SOURCE_DIR "${THIRD_PARTY_PATH}/mklml")
SET(MKLML_DOWNLOAD_DIR "${MKLML_SOURCE_DIR}/src/${MKLML_PROJECT}")
SET(MKLML_DST_DIR "mklml")
diff --git a/cmake/external/openblas.cmake b/cmake/external/openblas.cmake
index 3f86e456cfbe55fe47e5b18e755e34829ebe9930..05d83ad58ef8485d36829e7aeede79f625cfdc43 100644
--- a/cmake/external/openblas.cmake
+++ b/cmake/external/openblas.cmake
@@ -86,7 +86,7 @@ IF(NOT ${CBLAS_FOUND})
UPDATE_COMMAND ""
CONFIGURE_COMMAND ""
)
-
+ SET(CBLAS_PROVIDER openblas)
IF(WITH_C_API)
INSTALL(DIRECTORY ${CBLAS_INC_DIR} DESTINATION third_party/openblas)
# Because libopenblas.a is a symbolic link of another library, thus need to
@@ -115,7 +115,7 @@ INCLUDE_DIRECTORIES(${CBLAS_INC_DIR})
# linear algebra libraries for cc_library(xxx SRCS xxx.c DEPS cblas)
SET(dummyfile ${CMAKE_CURRENT_BINARY_DIR}/cblas_dummy.c)
FILE(WRITE ${dummyfile} "const char * dummy = \"${dummyfile}\";")
-IF(${CBLAS_PROVIDER} MATCHES MKL)
+IF("${CBLAS_PROVIDER}" STREQUAL "MKLML")
ADD_LIBRARY(cblas SHARED ${dummyfile})
ELSE()
ADD_LIBRARY(cblas STATIC ${dummyfile})
diff --git a/cmake/generic.cmake b/cmake/generic.cmake
index c311783aa3187678c31c27ddbbd074790ca444f3..b9c1dde97bc444d793d67ff622fd6b13c6435a9a 100644
--- a/cmake/generic.cmake
+++ b/cmake/generic.cmake
@@ -93,7 +93,7 @@ include_directories(${CMAKE_CURRENT_BINARY_DIR})
if(NOT APPLE AND NOT ANDROID)
find_package(Threads REQUIRED)
link_libraries(${CMAKE_THREAD_LIBS_INIT})
- set(CMAKE_CXX_LINK_EXECUTABLE "${CMAKE_CXX_LINK_EXECUTABLE} -ldl -lrt")
+ set(CMAKE_CXX_LINK_EXECUTABLE "${CMAKE_CXX_LINK_EXECUTABLE} -pthread -ldl -lrt")
endif(NOT APPLE AND NOT ANDROID)
function(merge_static_libs TARGET_NAME)
diff --git a/doc/api/v2/config/layer.rst b/doc/api/v2/config/layer.rst
index d4e9d53e5c0955912a594fe8cd9cd41a4080a2d2..203506d7ab84e5a5be2232b077eac2d433a99766 100644
--- a/doc/api/v2/config/layer.rst
+++ b/doc/api/v2/config/layer.rst
@@ -82,6 +82,11 @@ maxout
.. autoclass:: paddle.v2.layer.maxout
:noindex:
+roi_pool
+--------
+.. autoclass:: paddle.v2.layer.roi_pool
+ :noindex:
+
Norm Layer
==========
diff --git a/doc/api/v2/data.rst b/doc/api/v2/data.rst
index fef87c4fbdb452771ecdb361c6eeae5b32bcee14..b56c7332cc284649c7e04328e51a7faa78593a39 100644
--- a/doc/api/v2/data.rst
+++ b/doc/api/v2/data.rst
@@ -2,112 +2,9 @@
Data Reader Interface and DataSets
==================================
+.. toctree::
+ :maxdepth: 1
-DataTypes
-=========
-
-.. automodule:: paddle.v2.data_type
- :members:
- :noindex:
-
-DataFeeder
-==========
-
-.. automodule:: paddle.v2.data_feeder
- :members:
- :noindex:
-
-Reader
-======
-
-.. automodule:: paddle.v2.reader
- :members:
- :noindex:
-
-.. automodule:: paddle.v2.reader.creator
- :members:
- :noindex:
-
-minibatch
-=========
-
-.. automodule:: paddle.v2.minibatch
- :members:
- :noindex:
-
-Dataset
-=======
-
-.. automodule:: paddle.v2.dataset
- :members:
- :noindex:
-
-mnist
-+++++
-
-.. automodule:: paddle.v2.dataset.mnist
- :members:
- :noindex:
-
-cifar
-+++++
-
-.. automodule:: paddle.v2.dataset.cifar
- :members:
- :noindex:
-
-conll05
-+++++++
-
-.. automodule:: paddle.v2.dataset.conll05
- :members: get_dict,get_embedding,test
- :noindex:
-
-imdb
-++++
-
-.. automodule:: paddle.v2.dataset.imdb
- :members:
- :noindex:
-
-imikolov
-++++++++
-
-.. automodule:: paddle.v2.dataset.imikolov
- :members:
- :noindex:
-
-movielens
-+++++++++
-
-.. automodule:: paddle.v2.dataset.movielens
- :members:
- :noindex:
-
-.. autoclass:: paddle.v2.dataset.movielens.MovieInfo
- :noindex:
-
-.. autoclass:: paddle.v2.dataset.movielens.UserInfo
- :noindex:
-
-sentiment
-+++++++++
-
-.. automodule:: paddle.v2.dataset.sentiment
- :members:
- :noindex:
-
-uci_housing
-+++++++++++
-
-.. automodule:: paddle.v2.dataset.uci_housing
- :members:
- :noindex:
-
-wmt14
-+++++
-
-.. automodule:: paddle.v2.dataset.wmt14
- :members:
- :noindex:
-
+ data/data_reader.rst
+ data/image.rst
+ data/dataset.rst
diff --git a/doc/api/v2/data/data_reader.rst b/doc/api/v2/data/data_reader.rst
new file mode 100644
index 0000000000000000000000000000000000000000..2ccfec9c284877a7576e9751526b169a4ac78d8e
--- /dev/null
+++ b/doc/api/v2/data/data_reader.rst
@@ -0,0 +1,36 @@
+=====================
+Data Reader Interface
+=====================
+
+
+DataTypes
+=========
+
+.. automodule:: paddle.v2.data_type
+ :members:
+ :noindex:
+
+DataFeeder
+==========
+
+.. automodule:: paddle.v2.data_feeder
+ :members:
+ :noindex:
+
+Reader
+======
+
+.. automodule:: paddle.v2.reader
+ :members:
+ :noindex:
+
+.. automodule:: paddle.v2.reader.creator
+ :members:
+ :noindex:
+
+minibatch
+=========
+
+.. automodule:: paddle.v2.minibatch
+ :members:
+ :noindex:
diff --git a/doc/api/v2/data/dataset.rst b/doc/api/v2/data/dataset.rst
new file mode 100644
index 0000000000000000000000000000000000000000..6a8ecc5bb1d855e0ded3719943ab3adb810de365
--- /dev/null
+++ b/doc/api/v2/data/dataset.rst
@@ -0,0 +1,75 @@
+Dataset
+=======
+
+.. automodule:: paddle.v2.dataset
+ :members:
+ :noindex:
+
+mnist
++++++
+
+.. automodule:: paddle.v2.dataset.mnist
+ :members:
+ :noindex:
+
+cifar
++++++
+
+.. automodule:: paddle.v2.dataset.cifar
+ :members:
+ :noindex:
+
+conll05
++++++++
+
+.. automodule:: paddle.v2.dataset.conll05
+ :members: get_dict,get_embedding,test
+ :noindex:
+
+imdb
+++++
+
+.. automodule:: paddle.v2.dataset.imdb
+ :members:
+ :noindex:
+
+imikolov
+++++++++
+
+.. automodule:: paddle.v2.dataset.imikolov
+ :members:
+ :noindex:
+
+movielens
++++++++++
+
+.. automodule:: paddle.v2.dataset.movielens
+ :members:
+ :noindex:
+
+.. autoclass:: paddle.v2.dataset.movielens.MovieInfo
+ :noindex:
+
+.. autoclass:: paddle.v2.dataset.movielens.UserInfo
+ :noindex:
+
+sentiment
++++++++++
+
+.. automodule:: paddle.v2.dataset.sentiment
+ :members:
+ :noindex:
+
+uci_housing
++++++++++++
+
+.. automodule:: paddle.v2.dataset.uci_housing
+ :members:
+ :noindex:
+
+wmt14
++++++
+
+.. automodule:: paddle.v2.dataset.wmt14
+ :members:
+ :noindex:
diff --git a/doc/api/v2/data/image.rst b/doc/api/v2/data/image.rst
new file mode 100644
index 0000000000000000000000000000000000000000..97651ffa6be56cf3ecaca2caca38a353fa5c1f49
--- /dev/null
+++ b/doc/api/v2/data/image.rst
@@ -0,0 +1,5 @@
+Image Interface
+===============
+
+.. automodule:: paddle.v2.image
+ :members:
diff --git a/doc/design/evaluator.md b/doc/design/evaluator.md
new file mode 100644
index 0000000000000000000000000000000000000000..a62d75ffef14962aec8c7587e172d78dfe0cb4be
--- /dev/null
+++ b/doc/design/evaluator.md
@@ -0,0 +1,58 @@
+## Evaluator Design
+
+### The Problem
+
+During training or serving, we provide the evaluation function to measure the model performance, e.g., accuracy, precision. In the operator based framework design, the data go through the network pipeline batch by batch. As a result, inside the operator, we only can calculate one minibatch metrics. We need to provide a mechanism to calculate the metrics for each N pass/batch the user wanted.
+
+### Evaluator Design
+Currently, every operation is expressed in the graph. we divide the evaluator process into three steps.
+
+1. Initialize the metric state and add it into the block.
+
+2. Calculate the statistic of the metric state in every mini-batch. The single operator is only responsible for calculating necessary statistics for one mini-batch. For example, accuracy operator only calculate a minibatch data if run once.
+
+
+3. Merge the mini-batch statistics to form the evaluation result for multiple mini-batches. When it comes to distributed training/Multi-GPU training, aggregate the value from different devices.
+
+### Implementation
+This design is shown in python API.
+Each metric operator need to caculate the metric statistic and return the batch aware states, Python side responsible for accumulate the states for each pass.
+
+
+```python
+class Evaluator(object):
+ """
+ Evaluator Base class.
+ """
+ def __init__(self, name, **kwargs):
+ """
+ Different evaluator may has different metric states. E.g, Accuracy need two variables, total and right sample counts.
+ Auc need four variables, `true_positives`,
+ `true_negatives`, `false_positives` and `false_negatives`. So every evaluator should create its needed variables and append to main_program
+
+ The initialization of Evaluator should be responsible for:
+ create metric states and append to the main_program
+ """
+ pass
+
+ def _update_ops(self, input, label, **kwargs)
+ """
+ Add mini-batch evaluator caculate operators to the main_program.
+ Add increment operator to accumulate the metric states.
+ """
+
+
+ def reset(self, executor, reset_program=None):
+ """
+ Reset metric states at the begin of each pass/user specified batch number.
+ Execute the reset_program to reset the states.
+ """
+
+
+ def eval(self, executor, eval_program=None):
+ """
+ Merge the mini-batch statistics to form the evaluation result for multiple mini-batches.
+ Execute the eval_program and return the result.
+ """
+ return eval_result
+```
diff --git a/doc/design/mkldnn/README.MD b/doc/design/mkldnn/README.MD
index fe8da907d9d45a2164031430ac5b7a3d5523967a..16236763a73770f3fe5eadf67645765d0456f875 100644
--- a/doc/design/mkldnn/README.MD
+++ b/doc/design/mkldnn/README.MD
@@ -15,6 +15,7 @@
- [CMake](#cmake)
- [Layers](#layers)
- [Activations](#activations)
+ - [Weights](#weights)
- [Unit Tests](#unit-tests)
- [Protobuf Messages](#protobuf-messages)
- [Python API](#python-api)
@@ -45,17 +46,23 @@ Figure 1. PaddlePaddle on IA.
### Layers
所有MKL-DNN相关的C++ layers,都会按照PaddlePaddle的目录结构存放在
-`paddle/gserver/layers`中,并且文件名都会一以*Mkldnn*开头。
+`paddle/gserver/layers`中,并且文件名都会一以*MKLDNN*开头。
-所有MKL-DNN的layers都会继承于一个叫做`MkldnnLayer`的父类,该父类继承于PaddlePaddle的基类`Layer`。
+所有MKL-DNN的layers都会继承于一个叫做`MKLDNNLayer`的父类,该父类继承于PaddlePaddle的基类`Layer`。
+
+在`MKLDNNLayer`中会提供一些必要的接口和函数,并且会写好`forward`和`backward`的基本逻辑。部分函数定义为纯虚函数,子类只需要实现这些函数即可。
### Activations
-由于在PaddlePaddle中,激活函数是独立于layer概念的,所以会在`paddle/gserver/activations`目录下添加一个`MkldnnActivation.h`文件定义一些用于MKL-DNN的接口,实现方法还是会在`ActivationFunction.cpp`文件。
+由于在PaddlePaddle中,激活函数是独立于layer概念的,所以会在`paddle/gserver/activations`目录下添加`MKLDNNActivation.h`和`MKLDNNActivation.cpp`文件用于定义和使用MKL-DNN的接口。
-### Unit Tests
-会在`paddle/gserver/test`目录下添加`test_Mkldnn.cpp`和`MkldnnTester.*`用于MKL-DNN的测试。
+### Weights
+由于有些layer是含有参数的,我们会尽量让MKL-DNN的参数与PaddlePaddle中`parameter`共享一块内存。
+同时,由于MKL-DNN在训练时使用的参数layout可能与PaddlePaddle默认的`nchw`不一致,我们会在网络训练的开始和结束时分别转换这个layout,使得最终保存的参数格式与PaddlePaddle一致。
-Activation的测试,计划在PaddlePaddle原有的测试文件上直接添加新的测试type。
+### Unit Tests
+会在`paddle/gserver/test`目录下添加`test_MKLDNN.cpp`和`MKLDNNTester.*`用于MKL-DNN的测试。
+测试分为每个layer(或activation)的单元测试和简单网络的整体测试。
+每个测试会对比PaddlePaddle中CPU算出的结果与MKL-DNN的结果,小于某个比较小的阈值认为通过。
### Protobuf Messages
根据具体layer的需求可能会在`proto/ModelConfig.proto`里面添加必要的选项。
@@ -82,7 +89,7 @@ if use_mkldnn
会在`v1_api_demo`目录下添加一个`mkldnn`的文件夹,里面放入一些用于MKL-DNN测试的demo脚本。
### Benchmarking
-会考虑添加部分逻辑在`benchmark/paddle/image/run.sh`,添加使用MKL-DNN的测试。
+会添加`benchmark/paddle/image/run_mkldnn.sh`,用于测试使用MKL-DNN之后的性能。
### Others
1. 如果在使用MKL-DNN的情况下,会把CPU的Buffer对齐为64。
@@ -94,14 +101,16 @@ if use_mkldnn
我们总结出一些特别需要注意的点:
-1. 使用**deviceId_**。为了尽可能少的在父类Layer中添加变量或者函数,我们决定使用已有的`deviceId_`变量来区分layer的属性,定义`-2`为`MkldnnLayer`特有的设备ID。
+1. 使用**deviceId_**。为了尽可能少的在父类Layer中添加变量或者函数,我们决定使用已有的`deviceId_`变量来区分layer的属性,定义`-2`为`MKLDNNLayer`特有的设备ID。
2. 重写父类Layer的**init**函数,修改`deviceId_`为`-2`,代表这个layer是用于跑在MKL-DNN的环境下。
-3. 创建`MkldnnMatrix`,用于管理MKL-DNN会用到的相关memory函数、接口以及会用的到格式信息。
-4. 创建`MkldnnBase`,定义一些除了layer和memory相关的类和函数。包括MKL-DNN会用到`MkldnnStream`和`CpuEngine`,和未来可能还会用到`FPGAEngine`等。
-5. 在**Argument**里添加两个`MkldnnMatrixPtr`,取名为`mkldnnValue`和`mkldnnGrad`,用于存放`MkldnnLayer`会用到的memory buffer。 并且添加函数cvt(会修改为一个更加合适的函数名),用于处理"CPU device"和"MKL-DNN device"之间memory的相互转化。
-6. 在父类`Layer`中的`getOutput`函数中添加一段逻辑,用于判断`deviceId`,并针对device在MKL-DNN和CPU之间不统一的情况,做一个前期转换。 也就是调用`Argument`的cvt函数把output统一到需要的device上。
-7. 在原来的`FLAGS`中添加一个`use_mkldnn`的flag,用于选择是否使用MKL-DNN的相关功能。
-8. 关于MKLDNN参数的保存。由于MKLDNN参数的格式与PaddlePaddle原有的格式存在不一样的情况,所以需要在保存参数时同时保存该格式信息。目前准备扩展[Header](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/parameter/Parameter.h#L247)里面的`int32_t version`。这个值不管是在v1还是在v2里面,一直保存的是0,所以可以充分利用这个信息,定义一个枚举处理所有MKLDNN的参数格式,从而`MKLDNNLayer`就可以从输入的参数中获取需要的格式信息。
+3. 创建`MKLDNNMatrix`,同时继承`CpuMatrix`和`mkldnn::memory`。用于管理MKL-DNN会用到的相关memory函数、接口以及会用的到格式信息。
+4. 创建`MKLDNNBase`,定义一些除了layer和memory相关的类和函数。包括MKL-DNN会用到`MKLDNNStream`和`CPUEngine`,和未来可能还会用到`FPGAEngine`等。
+5. 每个`MKLDNNlayer`都会有`inVal_`,`inGrad_`,`outVal_`和`outGrad_`,分别代表input value, input gradient,output value和output gradient。他们会存放MKL-DNN用到的internal memory。同时还会定义以*ext*开头的`MKLDNNMatrix`(表示external的memory),主要是在格式与PaddlePaddle默认的`nchw`格式不匹配时,用于转换内存的工作。必要的转换函数也会在`MKLDNNLayer`中提前定义好,每个子类只需要调用定义好的reset buffer函数即可。
+6. 每个`MKLDNNlayer`的resetbuffer相关的函数(包括reset input、output的Value和grad),他们会根据输入参数reset internal和external的memory,当然这两者也可以相等,即表示不需要转换。只需要把握一个原则,每个`MKLDNNlayer`的子类,只需要使用internal的memory就可以了,所有external的转换工作在父类的reset函数中都提前准备好了。
+7. 一般来说,external的memory会尽量与PaddlePaddle中的`value`和`grad`共享内存。同时每个`MKLDNNLayer`中的external output value和gradient(也就是`extOutVal_`和`extOutGrad_`)必须分别与`output_.value`和`output_.grad`共享内存,因为PaddlePaddle的activation会直接使用`output_.value`和`output_.grad`。如果不需要external的buffer用于转换,那么internal的buffer也会与他们共享内存。
+8. 如果MKL-DNN layer的后面接有cpu device,那么就会使`output_.value`与`extOutVal_`共享内存,同时数据格式就是`nchw`,这样下一个cpu device就能拿到正确的数据。在有cpu device的时候,external的memory的格式始终是`nchw`或者`nc`。
+9. 由于MKL-DNN的输出操作都是覆盖data的,不是在原来的数据上累加,所以当网络出现分支时,在`backward`时会需要merge不同layer的梯度。`MKLDNNlayer`中会实现merge的方法,此时每个小分支的input gradient会先临时保存在一个`MKLDNNMatrix`中,由分支处的layer负责求和,并把结果放到这个layer的`output_.grad`中。所以整体上,每个子类并不会需要关心分支的事情,也是在父类都实现好了。
+10. 在原来的`FLAGS`中添加一个`use_mkldnn`的flag,用于选择是否使用MKL-DNN的相关功能。
## References
diff --git a/doc/design/ops/images/LOD-and-shape-changes-during-decoding.jpg b/doc/design/ops/images/LOD-and-shape-changes-during-decoding.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..8b0d90f7b9d8184b314b0ee4e521f53eb5f1b455
Binary files /dev/null and b/doc/design/ops/images/LOD-and-shape-changes-during-decoding.jpg differ
diff --git a/doc/design/ops/sequence_decoder.md b/doc/design/ops/sequence_decoder.md
new file mode 100644
index 0000000000000000000000000000000000000000..9007aae7a8355ed06c6720a921351f81b859c1fe
--- /dev/null
+++ b/doc/design/ops/sequence_decoder.md
@@ -0,0 +1,245 @@
+# Design: Sequence Decoder Generating LoDTensors
+In tasks such as machine translation and image to text,
+a [sequence decoder](https://github.com/PaddlePaddle/book/blob/develop/08.machine_translation/README.md) is necessary to generate sequences.
+
+This documentation describes how to implement the sequence decoder as an operator.
+
+## Beam Search based Decoder
+The [beam search algorithm](https://en.wikipedia.org/wiki/Beam_search) is necessary when generating sequences,
+it is a heuristic search algorithm that explores the paths by expanding the most promising node in a limited set.
+
+In the old version of PaddlePaddle, a C++ class `RecurrentGradientMachine` implements the general sequence decoder based on beam search,
+due to the complexity, the implementation relays on a lot of special data structures,
+quite trivial and hard to be customized by users.
+
+There are a lot of heuristic tricks in the sequence generation tasks,
+so the flexibility of sequence decoder is very important to users.
+
+During PaddlePaddle's refactoring work,
+some new concept is proposed such as [LoDTensor](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/lod_tensor.md) and [TensorArray](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/tensor_array.md) that can better support sequence usage,
+and they can help to make the implementation of beam search based sequence decoder **more transparent and modular** .
+
+For example, the RNN sates, candidates IDs and probabilities of beam search can be represented as `LoDTensors`;
+the selected candidate's IDs in each time step can be stored in a `TensorArray`, and `Packed` to the sentences translated.
+
+## Changing LoD's absolute offset to relative offsets
+The current `LoDTensor` is designed to store levels of variable-length sequences,
+it stores several arrays of integers each represents a level.
+
+The integers in each level represents the begin and end (not inclusive) offset of a sequence **in the underlying tensor**,
+let's call this format the **absolute-offset LoD** for clear.
+
+The relative-offset LoD can fast retrieve any sequence but fails to represent empty sequences, for example, a two-level LoD is as follows
+```python
+[[0, 3, 9]
+ [0, 2, 3, 3, 3, 9]]
+```
+The first level tells that there are two sequences:
+- the first's offset is `[0, 3)`
+- the second's offset is `[3, 9)`
+
+while on the second level, there are several empty sequences that both begin and end at `3`.
+It is impossible to tell how many empty second-level sequences exist in the first-level sequences.
+
+There are many scenarios that relay on empty sequence representation,
+such as machine translation or image to text, one instance has no translations or the empty candidate set for a prefix.
+
+So let's introduce another format of LoD,
+it stores **the offsets of the lower level sequences** and is called **relative-offset** LoD.
+
+For example, to represent the same sequences of the above data
+
+```python
+[[0, 3, 6]
+ [0, 2, 3, 3, 3, 9]]
+```
+
+the first level represents that there are two sequences,
+their offsets in the second-level LoD is `[0, 3)` and `[3, 5)`.
+
+The second level is the same with the relative offset example because the lower level is a tensor.
+It is easy to find out the second sequence in the first-level LoD has two empty sequences.
+
+The following demos are based on relative-offset LoD.
+
+## Usage in a simple machine translation model
+Let's start from a simple machine translation model that is simplified from [machine translation chapter](https://github.com/PaddlePaddle/book/tree/develop/08.machine_translation) to draw a simple blueprint of what a sequence decoder can do and how to use it.
+
+The model has an encoder that learns the semantic vector from a sequence,
+and a decoder which uses the sequence decoder to generate new sentences.
+
+**Encoder**
+```python
+import paddle as pd
+
+dict_size = 8000
+source_dict_size = dict_size
+target_dict_size = dict_size
+word_vector_dim = 128
+encoder_dim = 128
+decoder_dim = 128
+beam_size = 5
+max_length = 120
+
+# encoder
+src_word_id = pd.data(
+ name='source_language_word',
+ type=pd.data.integer_value_sequence(source_dict_dim))
+src_embedding = pd.embedding(size=source_dict_size, size=word_vector_dim)
+
+src_word_vec = pd.lookup(src_embedding, src_word_id)
+
+encoder_out_seq = pd.gru(input=src_word_vec, size=encoder_dim)
+
+encoder_ctx = pd.last_seq(encoder_out_seq)
+# encoder_ctx_proj is the learned semantic vector
+encoder_ctx_proj = pd.fc(
+ encoder_ctx, size=decoder_dim, act=pd.activation.Tanh(), bias=None)
+```
+
+**Decoder**
+
+```python
+def generate():
+ decoder = pd.while_loop()
+ with decoder.step():
+ decoder_mem = decoder.memory(init=encoder_ctx) # mark the memory
+ generated_ids = decoder.memory() # TODO init to batch_size s
+ generated_scores = decoder.memory() # TODO init to batch_size 1s or 0s
+
+ target_word = pd.lookup(trg_embedding, gendrated_ids)
+ # expand encoder_ctx's batch to fit target_word's lod
+ # for example
+ # decoder_mem.lod is
+ # [[0 1 3],
+ # [0 1 3 6]]
+ # its tensor content is [a1 a2 a3 a4 a5]
+ # which means there are 2 sentences to translate
+ # - the first sentence has 1 translation prefixes, the offsets are [0, 1)
+ # - the second sentence has 2 translation prefixes, the offsets are [1, 3) and [3, 6)
+ # the target_word.lod is
+ # [[0, 1, 6]
+ # [0, 2, 4, 7, 9 12]]
+ # which means 2 sentences to translate, each has 1 and 5 prefixes
+ # the first prefix has 2 candidates
+ # the following has 2, 3, 2, 3 candidates
+ # the encoder_ctx_expanded's content will be
+ # [a1 a1 a2 a2 a3 a3 a3 a4 a4 a5 a5 a5]
+ encoder_ctx_expanded = pd.lod_expand(encoder_ctx, target_word)
+ decoder_input = pd.fc(
+ act=pd.activation.Linear(),
+ input=[target_word, encoder_ctx],
+ size=3 * decoder_dim)
+ gru_out, cur_mem = pd.gru_step(
+ decoder_input, mem=decoder_mem, size=decoder_dim)
+ scores = pd.fc(
+ gru_out,
+ size=trg_dic_size,
+ bias=None,
+ act=pd.activation.Softmax())
+ # K is an config
+ topk_scores, topk_ids = pd.top_k(scores, K)
+ topk_generated_scores = pd.add_scalar(topk_scores, generated_scores)
+
+ selected_ids, selected_generation_scores = decoder.beam_search(
+ topk_ids, topk_generated_scores)
+
+ # update the states
+ decoder_mem.update(cur_mem) # tells how to update state
+ generated_ids.update(selected_ids)
+ generated_scores.update(selected_generation_scores)
+
+ decoder.output(selected_ids)
+ decoder.output(selected_generation_scores)
+
+translation_ids, translation_scores = decoder()
+```
+The `decoder.beam_search` is a operator that given the candidates and the scores of translations including the candidates,
+return the result of the beam search algorithm.
+
+In this way, users can customize anything on the inputs or outputs of beam search, for example, two ways to prune some translation prefixes
+
+1. meke the correspondind elements in `topk_generated_scores` zero or some small values, beam_search will discard this candidate.
+2. remove some specific candidate in `selected_ids`
+3. get the final `translation_ids`, remove the translation sequence in it.
+
+The implementation of sequence decoder can reuse the C++ class [RNNAlgorithm](https://github.com/Superjom/Paddle/blob/68cac3c0f8451fe62a4cdf156747d6dc0ee000b3/paddle/operators/dynamic_recurrent_op.h#L30),
+so the python syntax is quite similar to a [RNN](https://github.com/Superjom/Paddle/blob/68cac3c0f8451fe62a4cdf156747d6dc0ee000b3/doc/design/block.md#blocks-with-for-and-rnnop).
+
+Both of them are two-level `LoDTensors`
+
+- the first level represents `batch_size` of (source) sentences;
+- the second level represents the candidate ID sets for translation prefix.
+
+for example, 3 source sentences to translate, and has 2, 3, 1 candidates.
+
+Unlike an RNN, in sequence decoder, the previous state and the current state have different LoD and shape,
+a `lod_expand` operator is used to expand the LoD of the previous state to fit the current state.
+
+For example, the previous state
+
+* LoD is `[0, 1, 3][0, 2, 5, 6]`
+* content of tensor is `a1 a2 b1 b2 b3 c1`
+
+the current state stored in `encoder_ctx_expanded`
+
+* LoD is `[0, 2, 7][0 3 5 8 9 11 11]`
+* the content is
+ - a1 a1 a1 (a1 has 3 candidates, so the state should be copied 3 times for each candidates)
+ - a2 a2
+ - b1 b1 b1
+ - b2
+ - b3 b3
+ - None (c1 has 0 candidates, so c1 is dropped)
+
+Benefit from the relative offset LoD, empty candidate set can be represented naturally.
+
+the status in each time step can be stored in `TensorArray`, and `Pack`ed to a final LoDTensor, the corresponding syntax is
+
+```python
+decoder.output(selected_ids)
+decoder.output(selected_generation_scores)
+```
+
+the `selected_ids` is the candidate ids for the prefixes,
+it will be `Packed` by `TensorArray` to a two-level `LoDTensor`,
+the first level represents the source sequences,
+the second level represents generated sequences.
+
+Pack the `selected_scores` will get a `LoDTensor` that stores scores of each candidate of translations.
+
+Pack the `selected_generation_scores` will get a `LoDTensor`, and each tail is the probability of the translation.
+
+## LoD and shape changes during decoding
+
+
+
+
+According the image above, the only phrase to change LoD is beam search.
+
+## Beam search design
+The beam search algorthm will be implemented as one method of the sequence decoder, it has 3 inputs
+
+1. `topk_ids`, top K candidate ids for each prefix.
+2. `topk_scores`, the corresponding scores for `topk_ids`
+3. `generated_scores`, the score of the prefixes.
+
+All of the are LoDTensors, so that the sequence affilication is clear.
+Beam search will keep a beam for each prefix and select a smaller candidate set for each prefix.
+
+It will return three variables
+
+1. `selected_ids`, the final candidate beam search function selected for the next step.
+2. `selected_scores`, the scores for the candidates.
+3. `generated_scores`, the updated scores for each prefixes (with the new candidates appended).
+
+## Introducing the LoD-based `Pack` and `Unpack` methods in `TensorArray`
+The `selected_ids`, `selected_scores` and `generated_scores` are LoDTensors,
+and they exist in each time step,
+so it is natural to store them in arrays.
+
+Currently, PaddlePaddle has a module called `TensorArray` which can store an array of tensors,
+the results of beam search are better to store in a `TensorArray`.
+
+The `Pack` and `UnPack` in `TensorArray` are used to package tensors in the array to a `LoDTensor` or split the `LoDTensor` to an array of tensors.
+It needs some extensions to support pack or unpack an array of `LoDTensors`.
diff --git a/doc/faq/local/index_cn.rst b/doc/faq/local/index_cn.rst
index 0e939a2671ace8682c90cdc1c1bb2da1dda0d568..b331d9d36e6a279881c3b1a5586835e7186957fb 100644
--- a/doc/faq/local/index_cn.rst
+++ b/doc/faq/local/index_cn.rst
@@ -99,7 +99,7 @@ PaddlePaddle支持Sparse的训练,sparse训练需要训练特征是 :code:`spa
利用更多的计算资源
++++++++++++++++++
-利用更多的计算资源可以分为一下几个方式来进行\:
+利用更多的计算资源可以分为以下几个方式来进行\:
* 单机CPU训练
diff --git a/doc/howto/dev/new_op_cn.md b/doc/howto/dev/new_op_cn.md
index c823d7e9fcd63dd7719ac1403952b03c2d2f03c0..6cfc9536f20e88571a9845a50be0341fe4d9f78b 100644
--- a/doc/howto/dev/new_op_cn.md
+++ b/doc/howto/dev/new_op_cn.md
@@ -214,7 +214,7 @@ MulOp(const std::string &type, const framework::VariableNameMap &inputs,
```cpp
// if use Eigen unsupported module before include head files
- #define EIGEN_USE_GPU
+ // #define EIGEN_USE_GPU
namespace ops = paddle::operators;
REGISTER_OP_GPU_KERNEL(mul, ops::MulKernel);
diff --git a/paddle/capi/Matrix.cpp b/paddle/capi/Matrix.cpp
index 4547afaf1dc9af8bc7909a684db766fdd7b159c0..d5b55e1c95f248f551e6a0a3b39123169dd7784f 100644
--- a/paddle/capi/Matrix.cpp
+++ b/paddle/capi/Matrix.cpp
@@ -54,6 +54,46 @@ paddle_error paddle_matrix_set_row(paddle_matrix mat,
return kPD_NO_ERROR;
}
+PD_API paddle_error paddle_matrix_set_value(paddle_matrix mat,
+ paddle_real* value) {
+ if (mat == nullptr || value == nullptr) return kPD_NULLPTR;
+ auto ptr = cast(mat);
+ if (ptr->mat == nullptr) return kPD_NULLPTR;
+ paddle::real* buf = ptr->mat->getRowBuf(0);
+ size_t width = ptr->mat->getWidth();
+ size_t height = ptr->mat->getHeight();
+ if (ptr->mat->useGpu()) {
+#ifdef PADDLE_WITH_CUDA
+ hl_memcpy(buf, value, sizeof(paddle::real) * width * height);
+#else
+ return kPD_NOT_SUPPORTED;
+#endif
+ } else {
+ std::copy(value, value + width * height, buf);
+ }
+ return kPD_NO_ERROR;
+}
+
+PD_API paddle_error paddle_matrix_get_value(paddle_matrix mat,
+ paddle_real* result) {
+ if (mat == nullptr || result == nullptr) return kPD_NULLPTR;
+ auto ptr = cast(mat);
+ if (ptr->mat == nullptr) return kPD_NULLPTR;
+ paddle::real* buf = ptr->mat->getRowBuf(0);
+ size_t width = ptr->mat->getWidth();
+ size_t height = ptr->mat->getHeight();
+ if (ptr->mat->useGpu()) {
+#ifdef PADDLE_WITH_CUDA
+ hl_memcpy(result, buf, width * height * sizeof(paddle::real));
+#else
+ return kPD_NOT_SUPPORTED;
+#endif
+ } else {
+ std::copy(buf, buf + width * height, result);
+ }
+ return kPD_NO_ERROR;
+}
+
paddle_error paddle_matrix_get_row(paddle_matrix mat,
uint64_t rowID,
paddle_real** rawRowBuffer) {
@@ -81,6 +121,7 @@ paddle_error paddle_matrix_get_shape(paddle_matrix mat,
paddle_matrix paddle_matrix_create_sparse(
uint64_t height, uint64_t width, uint64_t nnz, bool isBinary, bool useGpu) {
+#ifndef PADDLE_MOBILE_INFERENCE
auto ptr = new paddle::capi::CMatrix();
ptr->mat = paddle::Matrix::createSparseMatrix(
height,
@@ -91,6 +132,9 @@ paddle_matrix paddle_matrix_create_sparse(
false,
useGpu);
return ptr;
+#else
+ return nullptr;
+#endif
}
paddle_error paddle_matrix_sparse_copy_from(paddle_matrix mat,
@@ -100,6 +144,7 @@ paddle_error paddle_matrix_sparse_copy_from(paddle_matrix mat,
uint64_t colSize,
float* valueArray,
uint64_t valueSize) {
+#ifndef PADDLE_MOBILE_INFERENCE
if (mat == nullptr) return kPD_NULLPTR;
auto ptr = cast(mat);
if (rowArray == nullptr || colArray == nullptr ||
@@ -120,4 +165,7 @@ paddle_error paddle_matrix_sparse_copy_from(paddle_matrix mat,
} else {
return kPD_NOT_SUPPORTED;
}
+#else
+ return kPD_NOT_SUPPORTED;
+#endif
}
diff --git a/paddle/capi/examples/model_inference/dense/main.c b/paddle/capi/examples/model_inference/dense/main.c
index 3e6bd5285058a297c4574631e2a5c033b83936e8..876af2aa7615c098d225b56ce2ea0b1529a6e3c6 100644
--- a/paddle/capi/examples/model_inference/dense/main.c
+++ b/paddle/capi/examples/model_inference/dense/main.c
@@ -27,18 +27,20 @@ int main() {
CHECK(paddle_arguments_resize(in_args, 1));
// Create input matrix.
- paddle_matrix mat = paddle_matrix_create(/* sample_num */ 1,
+ paddle_matrix mat = paddle_matrix_create(/* sample_num */ 10,
/* size */ 784,
/* useGPU */ false);
srand(time(0));
- paddle_real* array;
- // Get First row.
- CHECK(paddle_matrix_get_row(mat, 0, &array));
+ std::vector input;
+ input.resize(784 * 10);
- for (int i = 0; i < 784; ++i) {
- array[i] = rand() / ((float)RAND_MAX);
+ for (int i = 0; i < input.size(); ++i) {
+ input[i] = rand() / ((float)RAND_MAX);
}
+
+ // Set value for the input matrix
+ CHECK(paddle_matrix_set_value(mat, input.data()));
CHECK(paddle_arguments_set_value(in_args, 0, mat));
@@ -51,11 +53,17 @@ int main() {
CHECK(paddle_arguments_get_value(out_args, 0, prob));
- CHECK(paddle_matrix_get_row(prob, 0, &array));
+ std::std::vector result;
+ int height;
+ int width;
+
+ CHECK(paddle_matrix_get_shape(prob, &height, &width);
+ result.resize(height * width);
+ CHECK(paddle_matrix_get_value(prob, result.data()));
printf("Prob: ");
- for (int i = 0; i < 10; ++i) {
- printf("%.2f ", array[i]);
+ for (int i = 0; i < height * width; ++i) {
+ printf("%.2f ", result[i]);
}
printf("\n");
diff --git a/paddle/capi/matrix.h b/paddle/capi/matrix.h
index f15f7f3bbbd1457617111f827d2182ae6b7d9fdb..01b8bad2ee9f528f8622346f43b9ff82225a7e73 100644
--- a/paddle/capi/matrix.h
+++ b/paddle/capi/matrix.h
@@ -48,6 +48,7 @@ PD_API paddle_matrix paddle_matrix_create(uint64_t height,
* @param isBinary is binary (either 1 or 0 in matrix) or not.
* @param useGpu is using GPU or not.
* @return paddle_matrix.
+ * @note Mobile inference does not support this interface.
*/
PD_API paddle_matrix paddle_matrix_create_sparse(
uint64_t height, uint64_t width, uint64_t nnz, bool isBinary, bool useGpu);
@@ -70,6 +71,16 @@ PD_API paddle_error paddle_matrix_set_row(paddle_matrix mat,
uint64_t rowID,
paddle_real* rowArray);
+/**
+ * @brief paddle_matrix_set_value Set value to matrix.
+ * @param mat Target Matrix
+ * @param value Row data.
+ * @return paddle_error
+ * @note value should contain enough element of data to init the mat
+ */
+PD_API paddle_error paddle_matrix_set_value(paddle_matrix mat,
+ paddle_real* value);
+
/**
* @brief PDMatGetRow Get raw row buffer from matrix
* @param [in] mat Target matrix
@@ -81,6 +92,15 @@ PD_API paddle_error paddle_matrix_get_row(paddle_matrix mat,
uint64_t rowID,
paddle_real** rawRowBuffer);
+/**
+ * @brief copy data from the matrix
+ * @param [in] mat Target matrix
+ * @param [out] result pointer to store the matrix data
+ * @return paddle_error
+ * @note the space of the result should allocated before invoke this API
+ */
+PD_API paddle_error paddle_matrix_get_value(paddle_matrix mat,
+ paddle_real* result);
/**
* @brief PDMatCreateNone Create None Matrix
* @return
@@ -110,6 +130,7 @@ PD_API paddle_error paddle_matrix_get_shape(paddle_matrix mat,
* NULL if the matrix is binary.
* @param [in] valueSize length of value array. Zero if the matrix is binary.
* @return paddle_error
+ * @note Mobile inference does not support this interface.
*/
PD_API paddle_error paddle_matrix_sparse_copy_from(paddle_matrix mat,
int* rowArray,
diff --git a/paddle/capi/tests/test_Matrix.cpp b/paddle/capi/tests/test_Matrix.cpp
index 4bf9a9d6a9f9161561e9e5612edd2c93cab7ac5b..6940c28448a897cecd78b718fe720441086a5a99 100644
--- a/paddle/capi/tests/test_Matrix.cpp
+++ b/paddle/capi/tests/test_Matrix.cpp
@@ -45,3 +45,49 @@ TEST(CAPIMatrix, createNone) {
paddle_matrix mat = paddle_matrix_create_none();
ASSERT_EQ(kPD_NO_ERROR, paddle_matrix_destroy(mat));
}
+
+TEST(CAPIMatrix, cpu_get_set_value) {
+ paddle_matrix mat = paddle_matrix_create(128, 32, false);
+ std::vector sample;
+ std::vector result;
+ sample.resize(128 * 32);
+ result.resize(128 * 32);
+ for (size_t i = 0; i < sample.size(); ++i) {
+ sample[i] = 1.0 / (i + 1.0);
+ }
+ ASSERT_EQ(kPD_NO_ERROR, paddle_matrix_set_value(mat, sample.data()));
+ ASSERT_EQ(kPD_NO_ERROR, paddle_matrix_get_value(mat, result.data()));
+ for (size_t i = 0; i < sample.size(); ++i) {
+ ASSERT_NEAR(sample[i], result[i], 1e-5);
+ }
+
+ uint64_t height, width;
+ ASSERT_EQ(kPD_NO_ERROR, paddle_matrix_get_shape(mat, &height, &width));
+ ASSERT_EQ(128UL, height);
+ ASSERT_EQ(32UL, width);
+ ASSERT_EQ(kPD_NO_ERROR, paddle_matrix_destroy(mat));
+}
+
+#ifdef PADDLE_WITH_CUDA
+TEST(CAPIMatrix, gpu_get_set_value) {
+ paddle_matrix mat = paddle_matrix_create(128, 32, true);
+ std::vector sample;
+ std::vector result;
+ sample.resize(128 * 32);
+ result.resize(128 * 32);
+ for (size_t i = 0; i < sample.size(); ++i) {
+ sample[i] = 1.0 / (i + 1.0);
+ }
+ ASSERT_EQ(kPD_NO_ERROR, paddle_matrix_set_value(mat, sample.data()));
+ ASSERT_EQ(kPD_NO_ERROR, paddle_matrix_get_value(mat, result.data()));
+ for (size_t i = 0; i < sample.size(); ++i) {
+ ASSERT_NEAR(sample[i], result[i], 1e-5);
+ }
+
+ uint64_t height, width;
+ ASSERT_EQ(kPD_NO_ERROR, paddle_matrix_get_shape(mat, &height, &width));
+ ASSERT_EQ(128UL, height);
+ ASSERT_EQ(32UL, width);
+ ASSERT_EQ(kPD_NO_ERROR, paddle_matrix_destroy(mat));
+}
+#endif
diff --git a/paddle/cuda/CMakeLists.txt b/paddle/cuda/CMakeLists.txt
index 0865b02c4f275f3d5069109917b05dff1393fc1e..efd1b7a73e1655f95eb83a5e2f59e82cbf7eba16 100755
--- a/paddle/cuda/CMakeLists.txt
+++ b/paddle/cuda/CMakeLists.txt
@@ -27,7 +27,9 @@ if(WITH_GPU)
set_source_files_properties(${CUDA_CXX_SOURCES}
PROPERTIES COMPILE_FLAGS "-D__NVCC__")
else()
+ if (NOT MOBILE_INFERENCE)
set(CUDA_CXX_SOURCES src/hl_warpctc_wrap.cc)
+ endif()
endif()
set(CUDA_CU_SOURCES
diff --git a/paddle/cuda/include/hl_cnn.h b/paddle/cuda/include/hl_cnn.h
index 6b56d9ec8d3daae96aaaa04ed79cb637331e2281..89c1f48edacbe0a4432957fe066481412db7e6e1 100644
--- a/paddle/cuda/include/hl_cnn.h
+++ b/paddle/cuda/include/hl_cnn.h
@@ -18,7 +18,7 @@ limitations under the License. */
#include "hl_base.h"
/**
- * @brief Maximum pool forward.
+ * @brief Maximum pool forward with Mask output.
*
* @param[in] frameCnt batch size of input image.
* @param[in] inputData input data.
@@ -35,7 +35,7 @@ limitations under the License. */
* @param[in] paddingW padding width.
* @param[out] tgtData output data.
* @param[in] tgtStride stride between output data samples.
- *
+ * @param[out] maskData the location indices of select max data.
*/
extern void hl_maxpool_forward(const int frameCnt,
const real* inputData,
@@ -51,7 +51,8 @@ extern void hl_maxpool_forward(const int frameCnt,
const int paddingH,
const int paddingW,
real* tgtData,
- const int tgtStride);
+ const int tgtStride,
+ real* maskData = NULL);
/**
* @brief Maximum pool backward.
diff --git a/paddle/cuda/include/stub/hl_cnn_stub.h b/paddle/cuda/include/stub/hl_cnn_stub.h
index a76dbf0b6578de0606702ad1af227fbf6e1cd62e..968ed4840ffb0623b57bd6e6d839973e109394de 100644
--- a/paddle/cuda/include/stub/hl_cnn_stub.h
+++ b/paddle/cuda/include/stub/hl_cnn_stub.h
@@ -31,7 +31,8 @@ inline void hl_maxpool_forward(const int frameCnt,
const int paddingH,
const int paddingW,
real* tgtData,
- const int tgtStride) {}
+ const int tgtStride,
+ real* MaskData) {}
inline void hl_maxpool_backward(const int frameCnt,
const real* inputData,
diff --git a/paddle/cuda/src/hl_cuda_cnn.cu b/paddle/cuda/src/hl_cuda_cnn.cu
index 58674febdc4a094c95ff03701e4586c32729847d..3699b1e8ae9d8f813439eaeaa760c4a9f6e100a0 100644
--- a/paddle/cuda/src/hl_cuda_cnn.cu
+++ b/paddle/cuda/src/hl_cuda_cnn.cu
@@ -31,7 +31,8 @@ __global__ void KeMaxPoolForward(const int nthreads,
const int offsetH,
const int offsetW,
real* tgtData,
- const int tgtStride) {
+ const int tgtStride,
+ real* maskData) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < nthreads) {
int pw = index % pooledW;
@@ -45,16 +46,22 @@ __global__ void KeMaxPoolForward(const int nthreads,
hstart = max(hstart, 0);
wstart = max(wstart, 0);
real maxval = -FLT_MAX;
+ int max_index = -1;
inputData += (frameNum * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
- if (maxval < inputData[h * width + w])
- maxval = inputData[h * width + w];
+ if (maxval < inputData[h * width + w]) {
+ max_index = h * width + w;
+ maxval = inputData[max_index];
+ }
}
}
int tgtIndex =
index % (pooledW * pooledH * channels) + frameNum * tgtStride;
tgtData[tgtIndex] = maxval;
+ if (maskData != NULL) {
+ maskData[tgtIndex] = max_index;
+ }
}
}
@@ -72,7 +79,8 @@ void hl_maxpool_forward(const int frameCnt,
const int paddingH,
const int paddingW,
real* tgtData,
- const int tgtStride) {
+ const int tgtStride,
+ real* maskData) {
int num_kernels = pooledH * pooledW * channels * frameCnt;
int blocks = (num_kernels + 1024 - 1) / 1024;
dim3 threads(1024, 1);
@@ -92,7 +100,8 @@ void hl_maxpool_forward(const int frameCnt,
paddingH,
paddingW,
tgtData,
- tgtStride);
+ tgtStride,
+ maskData);
CHECK_SYNC("hl_maxpool_forward failed");
}
diff --git a/paddle/framework/CMakeLists.txt b/paddle/framework/CMakeLists.txt
index 1afc5242081e7f7b12527a15d29421cebeb3d3b8..c08e844847737b1172f6453767cc7f5e7b1a2bda 100644
--- a/paddle/framework/CMakeLists.txt
+++ b/paddle/framework/CMakeLists.txt
@@ -38,9 +38,9 @@ py_proto_compile(framework_py_proto SRCS framework.proto)
add_custom_target(framework_py_proto_init ALL COMMAND ${CMAKE_COMMAND} -E touch __init__.py)
add_dependencies(framework_py_proto framework_py_proto_init)
add_custom_command(TARGET framework_py_proto POST_BUILD
- COMMAND ${CMAKE_COMMAND} -E make_directory ${PADDLE_SOURCE_DIR}/python/paddle/v2/framework/proto
- COMMAND cp *.py ${PADDLE_SOURCE_DIR}/python/paddle/v2/framework/proto/
- COMMENT "Copy generated python proto into directory paddle/v2/framework/proto."
+ COMMAND ${CMAKE_COMMAND} -E make_directory ${PADDLE_SOURCE_DIR}/python/paddle/v2/fluid/proto
+ COMMAND cp *.py ${PADDLE_SOURCE_DIR}/python/paddle/v2/fluid/proto/
+ COMMENT "Copy generated python proto into directory paddle/v2/fluid/proto."
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR})
cc_library(backward SRCS backward.cc DEPS net_op)
diff --git a/paddle/framework/backward.cc b/paddle/framework/backward.cc
index ed94540c268e5ed990c1d92859c6a2093c052868..b3b9c45ded95ce2e735b8898d47760956dcacdce 100644
--- a/paddle/framework/backward.cc
+++ b/paddle/framework/backward.cc
@@ -321,8 +321,6 @@ static void CreateGradVarInBlock(
auto* param = block_desc->FindVarRecursive(pname);
auto* grad = block_desc->FindVar(arg);
if (param == nullptr) {
- LOG(WARNING) << "Cannot find forward variable of " << arg
- << ". Set its gradient to FP32";
grad->SetDataType(DataType::FP32);
} else {
grad->SetDataType(param->GetDataType());
@@ -379,6 +377,12 @@ std::vector> MakeOpGrad(
return grad_op_descs;
}
+static BlockDescBind* CreateStepBlock(
+ ProgramDescBind& program_desc,
+ std::unordered_set* no_grad_vars,
+ std::unordered_map* grad_to_var,
+ int step_block_idx);
+
std::vector> MakeBlockBackward(
ProgramDescBind& program_desc, int block_idx,
std::unordered_set* no_grad_vars,
@@ -394,13 +398,13 @@ std::vector> MakeBlockBackward(
if ((*it)->Type() == "recurrent") {
int step_block_idx = (*it)->GetBlockAttr("step_block");
- auto backward_block_op_descs = MakeBlockBackward(
- program_desc, step_block_idx, no_grad_vars, grad_to_var);
+ BlockDescBind* backward_block = CreateStepBlock(
+ program_desc, no_grad_vars, grad_to_var, step_block_idx);
+ op_grads = MakeOpGrad(*it, no_grad_vars, grad_to_var, {backward_block});
+ } else if ((*it)->Type() == "conditional_block") {
BlockDescBind* backward_block =
- program_desc.AppendBlock(*program_desc.MutableBlock(step_block_idx));
- for (auto& ptr : backward_block_op_descs) {
- backward_block->AppendAllocatedOp(std::move(ptr));
- }
+ CreateStepBlock(program_desc, no_grad_vars, grad_to_var,
+ (*it)->GetBlockAttr("block"));
op_grads = MakeOpGrad(*it, no_grad_vars, grad_to_var, {backward_block});
} else {
op_grads = MakeOpGrad(*it, no_grad_vars, grad_to_var);
@@ -408,6 +412,11 @@ std::vector> MakeBlockBackward(
for (const auto& desc : op_grads) {
for (const std::string& out_name : desc->OutputArgumentNames()) {
+ if (out_name.find("@GRAD") == std::string::npos) {
+ // Not all outputs of a backward operator is a gradient. Only gradient
+ // need to be sum. Skip variables are not gradient.
+ continue;
+ }
dup_out_ops[out_name].emplace_back(grad_desc_idx);
}
++grad_desc_idx;
@@ -446,6 +455,21 @@ std::vector> MakeBlockBackward(
return backward_descs;
}
+static BlockDescBind* CreateStepBlock(
+ ProgramDescBind& program_desc,
+ std::unordered_set* no_grad_vars,
+ std::unordered_map* grad_to_var,
+ int step_block_idx) {
+ auto backward_block_op_descs = MakeBlockBackward(program_desc, step_block_idx,
+ no_grad_vars, grad_to_var);
+ BlockDescBind* backward_block =
+ program_desc.AppendBlock(*program_desc.MutableBlock(step_block_idx));
+ for (auto& ptr : backward_block_op_descs) {
+ backward_block->AppendAllocatedOp(move(ptr));
+ }
+ return backward_block;
+}
+
ParamGradInfoMap AppendBackward(
ProgramDescBind& program_desc, const VarDescBind& target,
const std::unordered_set& no_grad_vars) {
diff --git a/paddle/framework/backward_test.cc b/paddle/framework/backward_test.cc
index 4e8d630c2634682ff63b38182108eadebb5c7ff9..d485cdf6109274377ad0057223bdd8401e964aa7 100644
--- a/paddle/framework/backward_test.cc
+++ b/paddle/framework/backward_test.cc
@@ -21,7 +21,7 @@
#include "paddle/framework/var_desc.h"
#include "paddle/operators/net_op.h"
-USE_OP(fill_constant);
+USE_NO_KERNEL_OP(fill_constant);
namespace paddle {
namespace framework {
diff --git a/paddle/framework/block_desc.cc b/paddle/framework/block_desc.cc
index 9e3d597f3a2c84623a1ce9e4b6f4b956cffde211..11764810e1d40e5e6eb3cd0d8e9b4b63a79855b4 100644
--- a/paddle/framework/block_desc.cc
+++ b/paddle/framework/block_desc.cc
@@ -50,6 +50,15 @@ VarDescBind *BlockDescBind::FindVarRecursive(const std::string &name) const {
return it->second.get();
}
+VarDescBind *BlockDescBind::FindRecursiveOrCreateVar(
+ const std::string &name_bytes) {
+ VarDescBind *res = FindVarRecursive(name_bytes);
+ if (res == nullptr) {
+ res = Var(name_bytes);
+ }
+ return res;
+}
+
bool BlockDescBind::HasVarRecursive(const std::string &name) const {
return FindVarRecursive(name) != nullptr;
}
diff --git a/paddle/framework/block_desc.h b/paddle/framework/block_desc.h
index 26adf6a20ff09483b84f479db08efcf402135053..8e967e5378eb47a7869efb59cc96a271f1cbb9a1 100644
--- a/paddle/framework/block_desc.h
+++ b/paddle/framework/block_desc.h
@@ -58,6 +58,8 @@ class BlockDescBind {
VarDescBind *FindVarRecursive(const std::string &name_bytes) const;
+ VarDescBind *FindRecursiveOrCreateVar(const std::string &name_bytes);
+
bool HasVarRecursive(const std::string &var_name) const;
std::set LocalVarNames() const {
diff --git a/paddle/framework/data_type.h b/paddle/framework/data_type.h
index c5ae7b185460c8b0d68ba38bb9db9bd3d3fb14ea..3ec88d7a72c3339bf5e7d0ca3957a3f608f039b7 100644
--- a/paddle/framework/data_type.h
+++ b/paddle/framework/data_type.h
@@ -34,6 +34,21 @@ inline DataType ToDataType(std::type_index type) {
}
}
+inline std::type_index ToTypeIndex(DataType type) {
+ switch (type) {
+ case DataType::FP32:
+ return typeid(float);
+ case DataType::FP64:
+ return typeid(double);
+ case DataType::INT32:
+ return typeid(int);
+ case DataType::INT64:
+ return typeid(int64_t);
+ default:
+ PADDLE_THROW("Not support type %d", type);
+ }
+}
+
template
inline void VisitDataType(DataType type, Visitor visitor) {
switch (type) {
diff --git a/paddle/framework/ddim.cc b/paddle/framework/ddim.cc
index 10c785e04c4fa2192f9c95513009cf7d8c123868..53b899a23997b71e723a298ec360a4e018d89878 100644
--- a/paddle/framework/ddim.cc
+++ b/paddle/framework/ddim.cc
@@ -79,6 +79,13 @@ DDim make_ddim(const std::vector& dims) {
return result;
}
+DDim make_ddim(const std::vector& dims) {
+ std::vector res(dims.size());
+ std::transform(dims.begin(), dims.end(), res.begin(),
+ [](int d) { return static_cast(d); });
+ return make_ddim(res);
+}
+
/// @cond HIDDEN
// XXX For some reason, putting this in an anonymous namespace causes errors
class DynamicMutableIndexer : public boost::static_visitor {
diff --git a/paddle/framework/ddim.h b/paddle/framework/ddim.h
index aa773868ab4b68acbc46dfa2cd2569d8b8b7789d..4ca5e49566b7ec006eba80f3f9808bacb1ff2615 100644
--- a/paddle/framework/ddim.h
+++ b/paddle/framework/ddim.h
@@ -81,6 +81,8 @@ struct DDim {
*/
DDim make_ddim(const std::vector& dims);
+DDim make_ddim(const std::vector& dims);
+
/**
* \brief Make a DDim from an initializer list
*
diff --git a/paddle/framework/op_desc.cc b/paddle/framework/op_desc.cc
index e7cba9e702ce0f96a9680169f0593130df2fd096..39c8def82e1ebb10a0e357a648af760099020c32 100644
--- a/paddle/framework/op_desc.cc
+++ b/paddle/framework/op_desc.cc
@@ -357,7 +357,8 @@ void OpDescBind::InferVarType(BlockDescBind *block) const {
"LOD_TENSOR";
for (auto &out_pair : this->outputs_) {
for (auto &out_var_name : out_pair.second) {
- block->Var(out_var_name)->SetType(VarDesc::LOD_TENSOR);
+ block->FindRecursiveOrCreateVar(out_var_name)
+ ->SetType(VarDesc::LOD_TENSOR);
}
}
}
diff --git a/paddle/framework/scope.cc b/paddle/framework/scope.cc
index fb2c69105627f663ddcce07d31526c9e4278e863..9428b8a07ea0af005f6e960ddaa02da624ad9d97 100644
--- a/paddle/framework/scope.cc
+++ b/paddle/framework/scope.cc
@@ -98,5 +98,23 @@ void Scope::DeleteScope(Scope* scope) {
delete scope;
}
+void Scope::Rename(const std::string& origin_name,
+ const std::string& new_name) const {
+ auto origin_it = vars_.find(origin_name);
+ PADDLE_ENFORCE(origin_it != vars_.end(),
+ "Cannot find original variable with name %s", origin_name);
+ auto new_it = vars_.find(new_name);
+ PADDLE_ENFORCE(new_it == vars_.end(),
+ "The variable with name %s is already in the scope", new_name);
+ vars_[new_name] = origin_it->second;
+ vars_.erase(origin_it);
+}
+
+std::string Scope::Rename(const std::string& origin_name) const {
+ auto var_name = string::Sprintf("%p.%d", this, vars_.size());
+ Rename(origin_name, var_name);
+ return var_name;
+}
+
} // namespace framework
} // namespace paddle
diff --git a/paddle/framework/scope.h b/paddle/framework/scope.h
index fb660949394149ebf2c6172a0ac3f4c7594f4286..c2aafb6ad825f9bd9ffef754923a15afdeaa8e5c 100644
--- a/paddle/framework/scope.h
+++ b/paddle/framework/scope.h
@@ -68,11 +68,18 @@ class Scope {
// enumerate all the variables current contains.
std::vector GetAllNames(bool recursive = false) const;
+ // Rename variable to a new name
+ void Rename(const std::string& origin_name,
+ const std::string& new_name) const;
+
+ // Rename variable to a new name and return the new name
+ std::string Rename(const std::string& origin_name) const;
+
private:
// Call Scope::NewScope for a sub-scope.
explicit Scope(Scope const* parent) : parent_(parent) {}
- std::unordered_map vars_;
+ mutable std::unordered_map vars_;
mutable std::list kids_;
Scope const* parent_{nullptr};
diff --git a/paddle/framework/var_type.h b/paddle/framework/var_type.h
index d060196bb2c478b776851288cb71a1880d60660d..0f19870bec3e69d07278507cc556a86bbd25d12d 100644
--- a/paddle/framework/var_type.h
+++ b/paddle/framework/var_type.h
@@ -27,10 +27,32 @@ inline VarDesc::VarType ToVarType(std::type_index type) {
return VarDesc_VarType_LOD_RANK_TABLE;
} else if (type.hash_code() == typeid(LoDTensorArray).hash_code()) {
return VarDesc_VarType_LOD_TENSOR_ARRAY;
+ } else if (type.hash_code() == typeid(SelectedRows).hash_code()) {
+ return VarDesc_VarType_SELECTED_ROWS;
} else {
PADDLE_THROW("ToVarType:Unsupported type %s", type.name());
}
}
+template
+inline void VisitVarType(const Variable& var, Visitor visitor) {
+ switch (ToVarType(var.Type())) {
+ case VarDesc_VarType_LOD_TENSOR:
+ visitor(var.Get());
+ return;
+ case VarDesc_VarType_LOD_RANK_TABLE:
+ visitor(var.Get());
+ return;
+ case VarDesc_VarType_LOD_TENSOR_ARRAY:
+ visitor(var.Get());
+ return;
+ case VarDesc_VarType_SELECTED_ROWS:
+ visitor(var.Get());
+ return;
+ default:
+ PADDLE_THROW("Not supported visit type, %d", ToVarType(var.Type()));
+ }
+}
+
} // namespace framework
} // namespace paddle
diff --git a/paddle/function/CMakeLists.txt b/paddle/function/CMakeLists.txt
index 4fd72d64a90ae6f16dd1499ceb7fba6e40fe4cea..9b2779b42cad324253dadf27dbff20fd8e8c8e16 100644
--- a/paddle/function/CMakeLists.txt
+++ b/paddle/function/CMakeLists.txt
@@ -45,6 +45,7 @@ if(WITH_GPU)
add_simple_unittest(BlockExpandOpTest)
add_simple_unittest(CropOpTest)
add_simple_unittest(SwitchOpTest)
+ add_simple_unittest(ScaleSubRegionOpTest)
endif()
add_simple_unittest(Im2ColTest)
diff --git a/paddle/function/ConvOp.h b/paddle/function/ConvOp.h
index baf78bc6c88d0d294f4457b81c52b22e425d9fdb..062ea25a11470dd9ecdafb278dee9a2e0979f00b 100644
--- a/paddle/function/ConvOp.h
+++ b/paddle/function/ConvOp.h
@@ -61,6 +61,7 @@ public:
// function arguments
strides_ = config.get>("strides");
paddings_ = config.get>("paddings");
+ dilations_ = config.get>("dilations");
groups_ = config.get("groups");
// number of inputs and outputs
@@ -118,6 +119,7 @@ protected:
std::vector strides_;
std::vector paddings_;
+ std::vector dilations_;
/// Group size, refer to grouped convolution in
/// Alex Krizhevsky's paper: when group=2, the first half of the
@@ -133,6 +135,10 @@ protected:
inline int paddingW() const { return paddings_[1]; }
+ inline int dilationH() const { return dilations_[0]; }
+
+ inline int dilationW() const { return dilations_[1]; }
+
// A temporary memory in convolution calculation.
MemoryHandlePtr memory_;
diff --git a/paddle/function/ConvOpTest.h b/paddle/function/ConvOpTest.h
index cb02a96d0dbef6f64fd9e7576179572e68bf5513..d8d3c792df236ab0fd412b0cf77f275355848627 100644
--- a/paddle/function/ConvOpTest.h
+++ b/paddle/function/ConvOpTest.h
@@ -79,45 +79,59 @@ void Convolution(const std::string& conv1,
if (outputChannels < inputChannels) continue;
for (size_t stride : {1, 2}) {
for (size_t padding : {0, 1}) {
- if (padding >= filterSize) break;
+ for (size_t dilation : {1, 3}) {
+ if (padding >= filterSize) break;
+ size_t filterS = (filterSize - 1) * dilation + 1;
- // NNPACK only supports stride = 1 if batchSize > 1
- if ((conv1 == "NNPACKConv-CPU" || conv2 == "NNPACKConv-CPU") &&
- batchSize > 1 && stride > 1)
- break;
+ if (inputSize + 2 * padding < filterS) break;
- size_t outputSize =
- (inputSize - filterSize + 2 * padding + stride) / stride;
- VLOG(3) << " batchSize=" << batchSize
- << " inputChannels=" << inputChannels
- << " inputHeight=" << inputSize
- << " inputWidth=" << inputSize
- << " outputChannels=" << outputChannels
- << " filterHeight=" << filterSize
- << " filterWidth=" << filterSize
- << " outputHeight=" << outputSize
- << " outputWidth=" << outputSize << " stride=" << stride
- << " padding=" << padding;
+ if ((conv1 == "NaiveConv-CPU" || conv2 == "NaiveConv-CPU" ||
+ conv1 == "NNPACKConv-CPU" ||
+ conv2 == "NNPACKConv-CPU") &&
+ dilation > 1)
+ break;
- std::vector paddings = {padding, padding};
- std::vector strides = {stride, stride};
- Compare2Function test(
- conv1,
- conv2,
- FuncConfig()
- .set("paddings", paddings)
- .set("strides", strides)
- .set("groups", (size_t)1)
- .set("algo", (std::string) "auto"));
+ // NNPACK only supports stride = 1 if batchSize > 1
+ if ((conv1 == "NNPACKConv-CPU" ||
+ conv2 == "NNPACKConv-CPU") &&
+ batchSize > 1 && stride > 1)
+ break;
- TensorShape input{
- batchSize, inputChannels, inputSize, inputSize};
- TensorShape filter{
- outputChannels, inputChannels, filterSize, filterSize};
- TensorShape output{
- batchSize, outputChannels, outputSize, outputSize};
+ size_t outputSize =
+ (inputSize - filterS + 2 * padding + stride) / stride;
+ VLOG(3) << " batchSize=" << batchSize
+ << " inputChannels=" << inputChannels
+ << " inputHeight=" << inputSize
+ << " inputWidth=" << inputSize
+ << " outputChannels=" << outputChannels
+ << " filterHeight=" << filterSize
+ << " filterWidth=" << filterSize
+ << " outputHeight=" << outputSize
+ << " outputWidth=" << outputSize
+ << " stride=" << stride << " padding=" << padding;
- function(test, input, filter, output);
+ std::vector paddings = {padding, padding};
+ std::vector strides = {stride, stride};
+ std::vector dilations = {dilation, dilation};
+ Compare2Function test(
+ conv1,
+ conv2,
+ FuncConfig()
+ .set("paddings", paddings)
+ .set("strides", strides)
+ .set("dilations", dilations)
+ .set("groups", (size_t)1)
+ .set("algo", (std::string) "auto"));
+
+ TensorShape input{
+ batchSize, inputChannels, inputSize, inputSize};
+ TensorShape filter{
+ outputChannels, inputChannels, filterSize, filterSize};
+ TensorShape output{
+ batchSize, outputChannels, outputSize, outputSize};
+
+ function(test, input, filter, output);
+ }
}
}
}
@@ -144,6 +158,7 @@ void Convolution2(const std::string& conv1,
for (size_t outputChannels : {7}) {
size_t stride = 1;
size_t padding = 0;
+ size_t dilation = 1;
size_t outputHeight =
(inputHeight - filterHeight + 2 * padding + stride) /
stride;
@@ -162,6 +177,7 @@ void Convolution2(const std::string& conv1,
std::vector paddings = {padding, padding};
std::vector strides = {stride, stride};
+ std::vector dilations = {dilation, dilation};
Compare2Function test(
conv1,
conv2,
@@ -169,6 +185,7 @@ void Convolution2(const std::string& conv1,
.set("paddings", paddings)
.set("strides", strides)
.set("groups", (size_t)1)
+ .set("dilations", dilations)
.set("algo", (std::string) "auto"));
TensorShape input{
@@ -223,6 +240,7 @@ void DepthwiseConvolution(const std::string& conv1,
std::vector paddings = {padding, padding};
std::vector strides = {stride, stride};
+ std::vector dilations = {1, 1};
size_t groups = inputChannels;
Compare2Function test(
conv1,
@@ -231,6 +249,7 @@ void DepthwiseConvolution(const std::string& conv1,
.set("paddings", paddings)
.set("strides", strides)
.set("groups", groups)
+ .set("dilations", dilations)
.set("algo", (std::string) "auto"));
TensorShape input{
diff --git a/paddle/function/FunctionTest.h b/paddle/function/FunctionTest.h
index ba446bf92da264fafa1fb47a2c30da9cb13176ce..370940532ef40335be54a3e6467de0409e923ec4 100644
--- a/paddle/function/FunctionTest.h
+++ b/paddle/function/FunctionTest.h
@@ -110,6 +110,7 @@ public:
function2_(FunctionBase::funcRegistrar_.createByType(name2)) {
function1_->init(config);
function2_->init(config);
+ initArgsCallback_ = nullptr;
}
~Compare2Function() {}
@@ -170,6 +171,10 @@ public:
*seq2_));
}
+ void registerInitCallback(std::function callback) {
+ initArgsCallback_ = callback;
+ }
+
// output need only contains shape, do not contains data.
void addOutputs(const BufferArg& output, ArgType argType = ASSIGN_TO) {
size_t size =
@@ -340,6 +345,10 @@ protected:
initArg(*func1Inputs_[i]);
}
+ if (initArgsCallback_ != nullptr) {
+ initArgsCallback_(*func1Inputs_[i], i);
+ }
+
copyArg_(*func1Inputs_[i], *func2Inputs_[i]);
}
}
@@ -386,6 +395,7 @@ protected:
std::shared_ptr seq1_;
std::shared_ptr seq2_;
test::CopyArgument copyArg_;
+ std::function initArgsCallback_;
};
class CpuGpuFuncCompare
diff --git a/paddle/function/GemmConvOp.cpp b/paddle/function/GemmConvOp.cpp
index bdb56ddac38b91d756fc6f31282f29c0489fd660..8d34eee886a6202691e5dec2ab62e7c5b0ac7fb1 100644
--- a/paddle/function/GemmConvOp.cpp
+++ b/paddle/function/GemmConvOp.cpp
@@ -100,7 +100,9 @@ public:
strideH(),
strideW(),
paddingH(),
- paddingW());
+ paddingW(),
+ dilationH(),
+ dilationW());
} else {
colData = inputData + g * inputOffset;
}
@@ -223,7 +225,9 @@ public:
strideH(),
strideW(),
paddingH(),
- paddingW());
+ paddingW(),
+ dilationH(),
+ dilationW());
}
}
inputGrad += inputChannels * inputHeight * inputWidth;
@@ -310,7 +314,9 @@ public:
strideH(),
strideW(),
paddingH(),
- paddingW());
+ paddingW(),
+ dilationH(),
+ dilationW());
} else {
colData = inputData + g * inputOffset;
}
diff --git a/paddle/function/Im2Col.h b/paddle/function/Im2Col.h
index 1e0cff436ff60d5a029e89657d00af2b0bf8b454..0c37fc972484bfbede01d23652e384071bf883af 100644
--- a/paddle/function/Im2Col.h
+++ b/paddle/function/Im2Col.h
@@ -78,7 +78,9 @@ public:
int strideHeight,
int strideWidth,
int paddingHeight,
- int paddingWidth);
+ int paddingWidth,
+ int dilationHeight = 1,
+ int dilationWidth = 1);
};
template
@@ -91,7 +93,9 @@ public:
int strideHeight,
int strideWidth,
int paddingHeight,
- int paddingWidth);
+ int paddingWidth,
+ int dilationHeight = 1,
+ int dilationWidth = 1);
};
} // namespace paddle
diff --git a/paddle/function/Im2ColOp.cpp b/paddle/function/Im2ColOp.cpp
index b7d1eb1eded7a7471fd5833a649916d3ee3e598e..f864d42f8075209c70ca2e16a70e4f2c9d58eef4 100644
--- a/paddle/function/Im2ColOp.cpp
+++ b/paddle/function/Im2ColOp.cpp
@@ -31,7 +31,9 @@ public:
int strideHeight,
int strideWidth,
int paddingHeight,
- int paddingWidth) {
+ int paddingWidth,
+ int dilationHeight,
+ int dilationWidth) {
int inputChannels = imShape[0];
int inputHeight = imShape[1];
int inputWidth = imShape[2];
@@ -47,8 +49,8 @@ public:
int c_im = c / filterWidth / filterHeight;
for (int h = 0; h < outputHeight; ++h) {
for (int w = 0; w < outputWidth; ++w) {
- int imRowIdx = h * strideHeight + hOffset;
- int imColIdx = w * strideWidth + wOffset;
+ int imRowIdx = h * strideHeight + hOffset * dilationHeight;
+ int imColIdx = w * strideWidth + wOffset * dilationWidth;
if ((imRowIdx - paddingHeight) < 0 ||
(imRowIdx - paddingHeight) >= inputHeight ||
(imColIdx - paddingWidth) < 0 ||
@@ -81,7 +83,9 @@ public:
int strideHeight,
int strideWidth,
int paddingHeight,
- int paddingWidth) {
+ int paddingWidth,
+ int dilationHeight,
+ int dilationWidth) {
int inputChannels = imShape[0];
int inputHeight = imShape[1];
int inputWidth = imShape[2];
@@ -97,8 +101,8 @@ public:
int c_im = c / filterWidth / filterHeight;
for (int h = 0; h < outputHeight; ++h) {
for (int w = 0; w < outputWidth; ++w) {
- int imRowIdx = h * strideHeight + hOffset;
- int imColIdx = w * strideWidth + wOffset;
+ int imRowIdx = h * strideHeight + hOffset * dilationHeight;
+ int imColIdx = w * strideWidth + wOffset * dilationWidth;
if ((imRowIdx - paddingHeight) >= 0 &&
(imRowIdx - paddingHeight) < inputHeight &&
(imColIdx - paddingWidth) >= 0 &&
@@ -134,7 +138,9 @@ public:
int strideHeight,
int strideWidth,
int paddingHeight,
- int paddingWidth) {
+ int paddingWidth,
+ int dilationHeight = 1,
+ int dilationWidth = 1) {
int inputChannels = imShape[0];
int inputHeight = imShape[1];
int inputWidth = imShape[2];
@@ -147,9 +153,10 @@ public:
for (int channel = 0; channel < inputChannels; ++channel) {
for (int filterH = 0; filterH < filterHeight; ++filterH) {
for (int filterW = 0; filterW < filterWidth; ++filterW) {
- int imRowOffset =
- outputH * strideHeight + filterH - paddingHeight;
- int imColOffset = outputW * strideWidth + filterW - paddingWidth;
+ int imRowOffset = outputH * strideHeight +
+ filterH * dilationHeight - paddingHeight;
+ int imColOffset = outputW * strideWidth +
+ filterW * dilationWidth - paddingWidth;
int colDataOffset =
(((outputH * outputWidth + outputW) * inputChannels +
channel) *
@@ -189,7 +196,9 @@ public:
int strideHeight,
int strideWidth,
int paddingHeight,
- int paddingWidth) {
+ int paddingWidth,
+ int dilationHeight = 1,
+ int dilationWidth = 1) {
int inputChannels = imShape[0];
int inputHeight = imShape[1];
int inputWidth = imShape[2];
@@ -202,9 +211,10 @@ public:
for (int channel = 0; channel < inputChannels; ++channel) {
for (int filterH = 0; filterH < filterHeight; ++filterH) {
for (int filterW = 0; filterW < filterWidth; ++filterW) {
- int imRowOffset =
- outputH * strideHeight + filterH - paddingHeight;
- int imColOffset = outputW * strideWidth + filterW - paddingWidth;
+ int imRowOffset = outputH * strideHeight +
+ filterH * dilationHeight - paddingHeight;
+ int imColOffset = outputW * strideWidth +
+ filterW * dilationWidth - paddingWidth;
int colDataOffset =
(((outputH * outputWidth + outputW) * inputChannels +
channel) *
diff --git a/paddle/function/Im2ColOpGpu.cu b/paddle/function/Im2ColOpGpu.cu
index bd98610498b1af003574129118be4684d38e5813..71da11b95557d7b59de5ea6c65d1d43db42f211c 100644
--- a/paddle/function/Im2ColOpGpu.cu
+++ b/paddle/function/Im2ColOpGpu.cu
@@ -28,6 +28,8 @@ __global__ void im2col(const T* data_im,
int strideW,
int paddingH,
int paddingW,
+ int dilationH,
+ int dilationW,
int height_col,
int width_col,
T* data_col) {
@@ -44,8 +46,8 @@ __global__ void im2col(const T* data_im,
data_col += (channel_out * height_col + h_out) * width_col + w_out;
for (int i = 0; i < blockH; ++i) {
for (int j = 0; j < blockW; ++j) {
- int rIdx = int(h_in + i);
- int cIdx = int(w_in + j);
+ int rIdx = int(h_in + i * dilationH);
+ int cIdx = int(w_in + j * dilationW);
if ((rIdx - (int)paddingH) >= (int)height ||
(rIdx - (int)paddingH) < 0 ||
(cIdx - (int)paddingW) >= (int)width ||
@@ -77,7 +79,9 @@ public:
int strideHeight,
int strideWidth,
int paddingHeight,
- int paddingWidth) {
+ int paddingWidth,
+ int dilationHeight,
+ int dilationWidth) {
int inputChannels = imShape[0];
int inputHeight = imShape[1];
int inputWidth = imShape[2];
@@ -102,6 +106,8 @@ public:
strideWidth,
paddingHeight,
paddingWidth,
+ dilationHeight,
+ dilationWidth,
outputHeight,
outputWidth,
colData);
@@ -121,6 +127,8 @@ __global__ void col2im(size_t n,
size_t strideW,
size_t paddingH,
size_t paddingW,
+ size_t dilationH,
+ size_t dilationW,
size_t height_col,
size_t width_col,
T* data_im) {
@@ -131,23 +139,34 @@ __global__ void col2im(size_t n,
int w = int(index % width);
int h = int((index / width) % height);
int c = int(index / (width * height));
+ int filterH = (blockH - 1) * dilationH + 1;
+ int filterW = (blockW - 1) * dilationW + 1;
+
if ((w - (int)paddingW) >= 0 &&
(w - (int)paddingW) < (width - 2 * paddingW) &&
(h - (int)paddingH) >= 0 && (h - paddingH) < (height - 2 * paddingH)) {
// compute the start and end of the output
int w_col_start =
- (w < (int)blockW) ? 0 : (w - int(blockW)) / (int)strideW + 1;
+ (w < (int)filterW) ? 0 : (w - int(filterW)) / (int)strideW + 1;
int w_col_end = min((int)(w / (int)strideW + 1), (int)(width_col));
int h_col_start =
- (h < (int)blockH) ? 0 : (h - (int)blockH) / (int)strideH + 1;
+ (h < (int)filterH) ? 0 : (h - (int)filterH) / (int)strideH + 1;
int h_col_end = min(int(h / strideH + 1), int(height_col));
+
for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
// the col location: [c * width * height + h_out, w_out]
- int c_col = int(c * blockH * blockW) +
- (h - h_col * (int)strideH) * (int)blockW +
- (w - w_col * (int)strideW);
- val += data_col[(c_col * height_col + h_col) * width_col + w_col];
+ int h_k = (h - h_col * strideH);
+ int w_k = (w - w_col * strideW);
+ if (h_k % dilationH == 0 && w_k % dilationW == 0) {
+ h_k /= dilationH;
+ w_k /= dilationW;
+ int c_col =
+ (((c * blockH + h_k) * blockW + w_k) * height_col + h_col) *
+ width_col +
+ w_col;
+ val += data_col[c_col];
+ }
}
}
h -= paddingH;
@@ -173,7 +192,9 @@ public:
int strideHeight,
int strideWidth,
int paddingHeight,
- int paddingWidth) {
+ int paddingWidth,
+ int dilationHeight,
+ int dilationWidth) {
int inputChannels = imShape[0];
int inputHeight = imShape[1];
int inputWidth = imShape[2];
@@ -205,6 +226,8 @@ public:
strideWidth,
paddingHeight,
paddingWidth,
+ dilationHeight,
+ dilationWidth,
outputHeight,
outputWidth,
imData);
@@ -229,6 +252,8 @@ __global__ void im2colOCF(const T* imData,
int strideWidth,
int paddingHeight,
int paddingWidth,
+ int dilationHeight,
+ int dilationWidth,
int outputHeight,
int outputWidth) {
int swId = blockIdx.x;
@@ -237,8 +262,10 @@ __global__ void im2colOCF(const T* imData,
channelId += blockDim.z) {
for (int idy = threadIdx.y; idy < filterHeight; idy += blockDim.y) {
for (int idx = threadIdx.x; idx < filterWidth; idx += blockDim.x) {
- int widthOffset = idx + swId * strideWidth - paddingWidth;
- int heightOffset = idy + shId * strideHeight - paddingHeight;
+ int widthOffset =
+ idx * dilationHeight + swId * strideWidth - paddingWidth;
+ int heightOffset =
+ idy * dilationWidth + shId * strideHeight - paddingHeight;
int imOffset = widthOffset + heightOffset * inputWidth +
channelId * inputHeight * inputWidth;
@@ -273,7 +300,9 @@ public:
int strideHeight,
int strideWidth,
int paddingHeight,
- int paddingWidth) {
+ int paddingWidth,
+ int dilationHeight,
+ int dilationWidth) {
int inputChannels = imShape[0];
int inputHeight = imShape[1];
int inputWidth = imShape[2];
@@ -312,6 +341,8 @@ public:
strideWidth,
paddingHeight,
paddingWidth,
+ dilationHeight,
+ dilationWidth,
outputHeight,
outputWidth);
CHECK_SYNC("Im2ColFunctor GPU failed");
@@ -330,6 +361,8 @@ __global__ void col2imOCF(T* imData,
int strideWidth,
int paddingHeight,
int paddingWidth,
+ int dilationHeight,
+ int dilationWidth,
int outputHeight,
int outputWidth) {
int swId = blockIdx.x;
@@ -338,8 +371,10 @@ __global__ void col2imOCF(T* imData,
channelId += blockDim.z) {
for (int idy = threadIdx.y; idy < filterHeight; idy += blockDim.y) {
for (int idx = threadIdx.x; idx < filterWidth; idx += blockDim.x) {
- int widthOffset = idx + swId * strideWidth - paddingWidth;
- int heightOffset = idy + shId * strideHeight - paddingHeight;
+ int widthOffset =
+ idx * dilationWidth + swId * strideWidth - paddingWidth;
+ int heightOffset =
+ idy * dilationHeight + shId * strideHeight - paddingHeight;
int imOffset = widthOffset + heightOffset * inputWidth +
channelId * inputHeight * inputWidth;
@@ -372,7 +407,9 @@ public:
int strideHeight,
int strideWidth,
int paddingHeight,
- int paddingWidth) {
+ int paddingWidth,
+ int dilationHeight,
+ int dilationWidth) {
int inputChannels = imShape[0];
int inputHeight = imShape[1];
int inputWidth = imShape[2];
@@ -411,6 +448,8 @@ public:
strideWidth,
paddingHeight,
paddingWidth,
+ dilationHeight,
+ dilationWidth,
outputHeight,
outputWidth);
CHECK_SYNC("Col2ImFunctor GPU failed");
diff --git a/paddle/function/Im2ColTest.cpp b/paddle/function/Im2ColTest.cpp
index a0a01a5fc7fc055dce6ddb3ee51c7ab18f8a4ca7..1f085538d81904dbd5b5d6bcd014adaed22e37d7 100644
--- a/paddle/function/Im2ColTest.cpp
+++ b/paddle/function/Im2ColTest.cpp
@@ -29,82 +29,98 @@ void TestIm2ColFunctor() {
for (size_t filterWidth : {3, 7}) {
for (size_t stride : {1, 2}) {
for (size_t padding : {0, 1}) {
- if (inputHeight <= filterHeight || inputWidth <= filterWidth)
- break;
- if (padding >= filterHeight || padding >= filterWidth) break;
- size_t outputHeight =
- (inputHeight - filterHeight + 2 * padding + stride) /
- stride;
- size_t outputWidth =
- (inputWidth - filterWidth + 2 * padding + stride) / stride;
-
- TensorShape imShape =
- TensorShape({channels, inputHeight, inputWidth});
- TensorShape colShape1 = TensorShape({channels,
- filterHeight,
- filterWidth,
- outputHeight,
- outputWidth});
- TensorShape colShape2 = TensorShape({outputHeight,
- outputWidth,
- channels,
- filterHeight,
- filterWidth});
-
- size_t height = channels * filterHeight * filterWidth;
- size_t width = outputHeight * outputWidth;
- VectorPtr input1 = Vector::create(imShape.getElements(), false);
- VectorPtr input2 = Vector::create(imShape.getElements(), false);
- MatrixPtr output1 = Matrix::create(height, width, false, false);
- MatrixPtr output2 = Matrix::create(width, height, false, false);
- input1->uniform(0.001, 1);
- input2->copyFrom(*input1);
-
- Im2ColFunctor im2Col1;
- Im2ColFunctor im2Col2;
- im2Col1(input1->getData(),
- imShape,
- output1->getData(),
- colShape1,
- stride,
- stride,
- padding,
- padding);
- im2Col2(input2->getData(),
- imShape,
- output2->getData(),
- colShape2,
- stride,
- stride,
- padding,
- padding);
-
- // The transposition of the result of ColFormat == kCFO
- // is equal to the result of ColFormat == kOCF.
- MatrixPtr test;
- output2->transpose(test, true);
- autotest::TensorCheckErr(*output1, *test);
-
- Col2ImFunctor col2Im1;
- Col2ImFunctor col2Im2;
- col2Im1(input1->getData(),
- imShape,
- output1->getData(),
- colShape1,
- stride,
- stride,
- padding,
- padding);
- col2Im2(input2->getData(),
- imShape,
- output2->getData(),
- colShape2,
- stride,
- stride,
- padding,
- padding);
-
- autotest::TensorCheckErr(*input1, *input2);
+ for (size_t dilation : {1, 3}) {
+ size_t filterSizeH = (filterHeight - 1) * dilation + 1;
+ size_t filterSizeW = (filterWidth - 1) * dilation + 1;
+ if (inputHeight + 2 * padding < filterSizeH ||
+ inputWidth + 2 * padding < filterSizeW)
+ break;
+ if (padding >= filterSizeH || padding >= filterSizeW) break;
+ size_t outputHeight =
+ (inputHeight - filterSizeH + 2 * padding) / stride + 1;
+ size_t outputWidth =
+ (inputWidth - filterSizeW + 2 * padding) / stride + 1;
+
+ TensorShape imShape =
+ TensorShape({channels, inputHeight, inputWidth});
+ TensorShape colShape1 = TensorShape({channels,
+ filterHeight,
+ filterWidth,
+ outputHeight,
+ outputWidth});
+ TensorShape colShape2 = TensorShape({outputHeight,
+ outputWidth,
+ channels,
+ filterHeight,
+ filterWidth});
+
+ size_t height = channels * filterHeight * filterWidth;
+ size_t width = outputHeight * outputWidth;
+ VectorPtr input1 =
+ Vector::create(imShape.getElements(), false);
+ VectorPtr input2 =
+ Vector::create(imShape.getElements(), false);
+ MatrixPtr output1 =
+ Matrix::create(height, width, false, false);
+ MatrixPtr output2 =
+ Matrix::create(width, height, false, false);
+ input1->uniform(0.001, 1);
+ input2->copyFrom(*input1);
+
+ Im2ColFunctor im2Col1;
+ Im2ColFunctor im2Col2;
+ im2Col1(input1->getData(),
+ imShape,
+ output1->getData(),
+ colShape1,
+ stride,
+ stride,
+ padding,
+ padding,
+ dilation,
+ dilation);
+ im2Col2(input2->getData(),
+ imShape,
+ output2->getData(),
+ colShape2,
+ stride,
+ stride,
+ padding,
+ padding,
+ dilation,
+ dilation);
+
+ // The transposition of the result of ColFormat == kCFO
+ // is equal to the result of ColFormat == kOCF.
+ MatrixPtr test;
+ output2->transpose(test, true);
+ autotest::TensorCheckErr(*output1, *test);
+
+ Col2ImFunctor col2Im1;
+ Col2ImFunctor col2Im2;
+
+ col2Im1(input1->getData(),
+ imShape,
+ output1->getData(),
+ colShape1,
+ stride,
+ stride,
+ padding,
+ padding,
+ dilation,
+ dilation);
+ col2Im2(input2->getData(),
+ imShape,
+ output2->getData(),
+ colShape2,
+ stride,
+ stride,
+ padding,
+ padding,
+ dilation,
+ dilation);
+ autotest::TensorCheckErr(*input1, *input2);
+ }
}
}
}
diff --git a/paddle/function/ScaleSubRegionOp.cpp b/paddle/function/ScaleSubRegionOp.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..a080505d7df83a6c0a9d88fbcb7863fc0e1f7b21
--- /dev/null
+++ b/paddle/function/ScaleSubRegionOp.cpp
@@ -0,0 +1,155 @@
+/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License. */
+
+#include "ScaleSubRegionOp.h"
+#include "paddle/function/TensorShape.h"
+
+namespace paddle {
+
+template <>
+void ScaleSubRegion(real* outputs,
+ const real* inputs,
+ const real* indices,
+ const TensorShape shape,
+ const FuncConfig& conf) {
+ real value = conf.get("value");
+
+ int number = shape[0];
+ int channel = shape[1];
+ int height = shape[2];
+ int width = shape[3];
+
+ memcpy(outputs, inputs, number * channel * height * width * sizeof(real));
+
+ for (int n = 0; n < number; ++n) {
+ // indices start from 1
+ int offset = n * 6;
+ for (int c = indices[offset] - 1; c < indices[offset + 1]; ++c) {
+ for (int h = indices[offset + 2] - 1; h < indices[offset + 3]; ++h) {
+ for (int w = indices[offset + 4] - 1; w < indices[offset + 5]; ++w) {
+ int idx = ((n * channel + c) * height + h) * width + w;
+ outputs[idx] *= value;
+ }
+ }
+ }
+ }
+}
+
+template <>
+void ScaleSubRegionGrad(const real* inGrad,
+ real* outGrad,
+ const real* indices,
+ const TensorShape shape,
+ const FuncConfig& conf) {
+ real value = conf.get("value");
+
+ int number = shape[0];
+ int channel = shape[1];
+ int height = shape[2];
+ int width = shape[3];
+
+ for (int n = 0; n < number; ++n) {
+ for (int c = 0; c < channel; ++c) {
+ for (int h = 0; h < height; ++h) {
+ for (int w = 0; w < width; ++w) {
+ int idx = ((n * channel + c) * height + h) * width + w;
+ int offset = n * 6;
+ if (c >= (indices[offset] - 1) && c <= (indices[offset + 1] - 1) &&
+ h >= (indices[offset + 2] - 1) &&
+ h <= (indices[offset + 3] - 1) &&
+ w >= (indices[offset + 4] - 1) &&
+ w <= (indices[offset + 5] - 1)) {
+ outGrad[idx] += inGrad[idx] * value;
+ } else {
+ outGrad[idx] += inGrad[idx];
+ }
+ }
+ }
+ }
+ }
+}
+
+/**
+ * \brief For each instance, ScaleSubRegion can be used to multiply a value to
+ * a specified sub continuous region. By providing start index and end
+ * index for C/H/W, you can specify the location and shape of the region.
+ *
+ * Argument in this Function:
+ * \param inputs A 4-D tensor with shape [N, C, H, W], only one input.
+ * \param indices A 2-D tensor with shape [N, 6], indicates the sub region.
+ * \param outputs A 4-D tensor with same shape as inputs, output value.
+ */
+template
+class ScaleSubRegionFunc : public FunctionBase {
+public:
+ void init(const FuncConfig& config) override { conf_ = config; }
+
+ void calc(const BufferArgs& inputs, const BufferArgs& outputs) override {
+ CHECK_EQ(2UL, inputs.size());
+ CHECK_EQ(1UL, outputs.size());
+ CHECK_EQ(outputs[0].getArgType(), ASSIGN_TO);
+
+ TensorShape shape = inputs[0].shape();
+
+ ScaleSubRegion(outputs[0].data(),
+ inputs[0].data(),
+ inputs[1].data(),
+ shape,
+ conf_);
+ }
+
+private:
+ FuncConfig conf_;
+};
+
+/**
+ * \brief The backward propagation of ScaleSubRegion Function.
+ *
+ * Argument in this Function:
+ * \param inputs A 4-D tensor with shape [N, C, H, W], output gradient.
+ * \param indices A 2-D tensor with shape [N, 6], indicates the sub region.
+ * \param outputs A 4-D tensor with shape [N, C, H, W], gradient of input value.
+ */
+
+template
+class ScaleSubRegionGradFunc : public FunctionBase {
+public:
+ void init(const FuncConfig& config) override { conf_ = config; }
+
+ void calc(const BufferArgs& inputs, const BufferArgs& outputs) override {
+ CHECK_EQ(2UL, inputs.size());
+ CHECK_EQ(1UL, outputs.size());
+ CHECK_EQ(outputs[0].getArgType(), ADD_TO);
+
+ TensorShape shape = inputs[0].shape();
+
+ ScaleSubRegionGrad(inputs[0].data(),
+ outputs[0].data(),
+ inputs[1].data(),
+ shape,
+ conf_);
+ }
+
+private:
+ FuncConfig conf_;
+};
+
+REGISTER_TYPED_FUNC(ScaleSubRegion, CPU, ScaleSubRegionFunc);
+REGISTER_TYPED_FUNC(ScaleSubRegionGrad, CPU, ScaleSubRegionGradFunc);
+#ifdef PADDLE_WITH_CUDA
+REGISTER_TYPED_FUNC(ScaleSubRegion, GPU, ScaleSubRegionFunc);
+REGISTER_TYPED_FUNC(ScaleSubRegionGrad, GPU, ScaleSubRegionGradFunc);
+#endif
+
+} // namespace paddle
diff --git a/paddle/function/ScaleSubRegionOp.h b/paddle/function/ScaleSubRegionOp.h
new file mode 100644
index 0000000000000000000000000000000000000000..0480c8577f3fbf3bc9e94b635df96a31b103e9e3
--- /dev/null
+++ b/paddle/function/ScaleSubRegionOp.h
@@ -0,0 +1,55 @@
+/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License. */
+
+#pragma once
+
+#include "Function.h"
+
+namespace paddle {
+
+/**
+ * \brief Function to multiply a value to values in specified sub continuous
+ * region. Indices must be provided to indcate the location and shape of
+ * the region and the multiplied value is passed by configure variable.
+ *
+ *
+ * \param[out] outputs Output value.
+ * \param[in] inputs Input data which contains NCHW information.
+ * \param[in] indices Indices data to indcate the sub region.
+ * \param[in] shape Tensor shape of input value.
+ * \param[in] conf Configure variable which contains the multiplied value.
+ */
+template
+void ScaleSubRegion(real* outputs,
+ const real* inputs,
+ const real* indices,
+ const TensorShape shape,
+ const FuncConfig& conf);
+
+/**
+ * \brief Backward propagation function of ScaleSubRegion.
+ *
+ * \param[out] inGrad Gradients of previous layer.
+ * \param[in] outGrad Output gradient.
+ * \param[in] indices Indices data.
+ * \param[in] shape The Shape of input tensor.
+ * \param[in] conf Configure variable.
+ */
+template
+void ScaleSubRegionGrad(const real* inGrad,
+ real* outGrad,
+ const real* indices,
+ const TensorShape shape,
+ const FuncConfig& conf);
+} // namespace paddle
diff --git a/paddle/function/ScaleSubRegionOpGpu.cu b/paddle/function/ScaleSubRegionOpGpu.cu
new file mode 100644
index 0000000000000000000000000000000000000000..8aae2e44c3fdc8b516e66ecfd2e04f466a17dde9
--- /dev/null
+++ b/paddle/function/ScaleSubRegionOpGpu.cu
@@ -0,0 +1,116 @@
+/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License. */
+
+#include "ScaleSubRegionOp.h"
+#include "hl_base.h"
+
+namespace paddle {
+
+__global__ void KeScaleSubRegion(real* outputs,
+ const real* inputs,
+ const real* indices,
+ real value,
+ int channel,
+ int height,
+ int width,
+ int nthreads) {
+ const int idx = threadIdx.x + blockIdx.x * blockDim.x;
+ if (idx < nthreads) {
+ const int w = idx % width;
+ const int h = (idx / width) % height;
+ const int c = (idx / width / height) % channel;
+ const int n = idx / width / height / channel;
+
+ const int offset = n * 6;
+ if (c >= (indices[offset] - 1) && c <= (indices[offset + 1] - 1) &&
+ h >= (indices[offset + 2] - 1) && h <= (indices[offset + 3] - 1) &&
+ w >= (indices[offset + 4] - 1) && w <= (indices[offset + 5] - 1)) {
+ outputs[idx] = inputs[idx] * value;
+ } else {
+ outputs[idx] = inputs[idx];
+ }
+ }
+}
+
+template <>
+void ScaleSubRegion(real* outputs,
+ const real* inputs,
+ const real* indices,
+ const TensorShape shape,
+ const FuncConfig& conf) {
+ real value = conf.get("value");
+
+ int number = shape[0];
+ int channel = shape[1];
+ int height = shape[2];
+ int width = shape[3];
+
+ size_t nth = number * channel * height * width;
+ int blockSize = 1024;
+ int gridSize = (nth + blockSize - 1) / blockSize;
+
+ KeScaleSubRegion<<>>(
+ outputs, inputs, indices, value, channel, height, width, nth);
+ CHECK_SYNC("ScaleSubRegion");
+}
+
+__global__ void KeScaleSubRegionDiff(const real* inGrad,
+ real* outGrad,
+ const real* indices,
+ real value,
+ int channel,
+ int height,
+ int width,
+ int nthreads) {
+ const int idx = threadIdx.x + blockIdx.x * blockDim.x;
+ if (idx < nthreads) {
+ const int w = idx % width;
+ const int h = (idx / width) % height;
+ const int c = (idx / width / height) % channel;
+ const int n = idx / width / height / channel;
+
+ const int offset = n * 6;
+ if (c >= (indices[offset] - 1) && c <= (indices[offset + 1] - 1) &&
+ h >= (indices[offset + 2] - 1) && h <= (indices[offset + 3] - 1) &&
+ w >= (indices[offset + 4] - 1) && w <= (indices[offset + 5] - 1)) {
+ outGrad[idx] += inGrad[idx] * value;
+ } else {
+ outGrad[idx] += inGrad[idx];
+ }
+ }
+}
+
+template <>
+void ScaleSubRegionGrad(const real* inGrad,
+ real* outGrad,
+ const real* indices,
+ const TensorShape shape,
+ const FuncConfig& conf) {
+ real value = conf.get("value");
+
+ int number = shape[0];
+ int channel = shape[1];
+ int height = shape[2];
+ int width = shape[3];
+
+ size_t nth = number * channel * height * width;
+ int blockSize = 1024;
+ int gridSize = (nth + blockSize - 1) / blockSize;
+
+ KeScaleSubRegionDiff<<>>(
+ inGrad, outGrad, indices, value, channel, height, width, nth);
+ CHECK_SYNC("ScaleSubRegionGrad");
+}
+
+} // namespace paddle
diff --git a/paddle/function/ScaleSubRegionOpTest.cpp b/paddle/function/ScaleSubRegionOpTest.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..43331f258dddaa43cbc8cc77519e299de7e98290
--- /dev/null
+++ b/paddle/function/ScaleSubRegionOpTest.cpp
@@ -0,0 +1,72 @@
+/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License. */
+
+#include
+#include "FunctionTest.h"
+
+namespace paddle {
+
+TEST(ScaleSubRegion, real) {
+ for (size_t numSamples : {5, 32}) {
+ for (size_t channels : {5, 32}) {
+ for (size_t imgSizeH : {5, 33}) {
+ for (size_t imgSizeW : {5, 32}) {
+ for (real value : {-0.5, 0.0, 0.5}) {
+ for (bool firstHalf : {false, true}) {
+ VLOG(3) << " numSamples=" << numSamples
+ << " channels=" << channels << " imgSizeH=" << imgSizeH
+ << " imgSizeW=" << imgSizeW;
+
+ for (bool testGrad : {false, true}) {
+ CpuGpuFuncCompare compare(
+ testGrad ? "ScaleSubRegionGrad" : "ScaleSubRegion",
+ FuncConfig().set("value", value));
+
+ TensorShape shape{numSamples, channels, imgSizeH, imgSizeW};
+ TensorShape indicesShape{numSamples, 6};
+
+ compare.addInputs(BufferArg(VALUE_TYPE_FLOAT, shape));
+ compare.addInputs(BufferArg(VALUE_TYPE_FLOAT, indicesShape));
+
+ compare.registerInitCallback([=](BufferArg& arg, size_t index) {
+ if (index == 1) {
+ real* data = (real*)arg.data();
+
+ for (size_t i = 0; i < numSamples; ++i) {
+ size_t offset = i * 6;
+ data[offset] = firstHalf ? 1 : channels / 2;
+ data[offset + 1] = firstHalf ? channels / 2 : channels;
+ data[offset + 2] = firstHalf ? 1 : imgSizeH / 2;
+ data[offset + 3] = firstHalf ? imgSizeH / 2 : imgSizeH;
+ data[offset + 4] = firstHalf ? 1 : imgSizeW / 2;
+ data[offset + 5] = firstHalf ? imgSizeW / 2 : imgSizeW;
+ }
+ }
+ });
+
+ compare.addOutputs(
+ BufferArg(
+ VALUE_TYPE_FLOAT, shape, testGrad ? ADD_TO : ASSIGN_TO),
+ testGrad ? ADD_TO : ASSIGN_TO);
+ compare.run();
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+} // namespace paddle
diff --git a/paddle/gserver/CMakeLists.txt b/paddle/gserver/CMakeLists.txt
index 5f39167afc34affbea7858fa0794ef52b786a383..91d732641a4a5eed050841b59fd10da397eb732f 100644
--- a/paddle/gserver/CMakeLists.txt
+++ b/paddle/gserver/CMakeLists.txt
@@ -85,9 +85,49 @@ if(MOBILE_INFERENCE)
gradientmachines/GradientMachineMode.cpp
gradientmachines/MultiGradientMachine.cpp)
- # Remove useless layers
+ # Remove layers that used in training
list(REMOVE_ITEM GSERVER_SOURCES
- layers/RecurrentLayerGroup.cpp)
+ layers/RecurrentLayerGroup.cpp
+ layers/CostLayer.cpp
+ layers/MultiBoxLossLayer.cpp
+ layers/WarpCTCLayer.cpp
+ layers/CTCLayer.cpp
+ layers/LinearChainCTC.cpp
+ layers/PrintLayer.cpp)
+ list(REMOVE_ITEM GSERVER_SOURCES
+ layers/OuterProdLayer.cpp
+ layers/SumToOneNormLayer.cpp
+ layers/ConvShiftLayer.cpp
+ layers/InterpolationLayer.cpp
+ layers/AgentLayer.cpp
+ layers/DotMulOperator.cpp
+ layers/GruStepLayer.cpp
+ layers/LstmStepLayer.cpp
+ layers/ConvexCombinationLayer.cpp
+ layers/Conv3DLayer.cpp
+ layers/DeConv3DLayer.cpp
+ layers/CropLayer.cpp
+ layers/CrossEntropyOverBeam.cpp
+ layers/DataNormLayer.cpp
+ layers/FeatureMapExpandLayer.cpp
+ layers/HierarchicalSigmoidLayer.cpp
+ layers/MultinomialSampler.cpp
+ layers/NCELayer.cpp
+ layers/KmaxSeqScoreLayer.cpp
+ layers/MDLstmLayer.cpp
+ layers/MultiplexLayer.cpp
+ layers/PadLayer.cpp
+ layers/Pool3DLayer.cpp
+ layers/ResizeLayer.cpp
+ layers/RotateLayer.cpp
+ layers/RowConvLayer.cpp
+ layers/RowL2NormLayer.cpp
+ layers/SamplingIdLayer.cpp
+ layers/ScaleShiftLayer.cpp
+ layers/SelectiveFullyConnectedLayer.cpp
+ layers/SpatialPyramidPoolLayer.cpp
+ layers/BilinearInterpLayer.cpp
+ layers/ClipLayer.cpp)
endif()
if(WITH_GPU)
diff --git a/paddle/gserver/gradientmachines/NeuralNetwork.cpp b/paddle/gserver/gradientmachines/NeuralNetwork.cpp
index dbadc352a4ccd7483bf67e1025c212f514e32a24..be112b41239cace3fa9b9ee97923f8c3c7a9a98f 100644
--- a/paddle/gserver/gradientmachines/NeuralNetwork.cpp
+++ b/paddle/gserver/gradientmachines/NeuralNetwork.cpp
@@ -16,7 +16,6 @@ limitations under the License. */
#include "NeuralNetwork.h"
#include "hl_gpu.h"
-#include "paddle/gserver/layers/AgentLayer.h"
#include "paddle/utils/CustomStackTrace.h"
#include "paddle/utils/Logging.h"
#include "paddle/utils/Stat.h"
@@ -28,6 +27,7 @@ limitations under the License. */
#ifndef PADDLE_MOBILE_INFERENCE
#include "MultiNetwork.h"
#include "RecurrentGradientMachine.h"
+#include "paddle/gserver/layers/AgentLayer.h"
#endif
namespace paddle {
@@ -192,9 +192,11 @@ void NeuralNetwork::init(const ModelConfig& config,
void NeuralNetwork::connect(LayerPtr agentLayer,
LayerPtr realLayer,
int height) {
+#ifndef PADDLE_MOBILE_INFERENCE
AgentLayer* agent = dynamic_cast(agentLayer.get());
CHECK_NOTNULL(agent);
agent->setRealLayer(realLayer, height);
+#endif
}
void NeuralNetwork::connect(std::string agentLayerName,
diff --git a/paddle/gserver/layers/ExpandConvLayer.cpp b/paddle/gserver/layers/ExpandConvLayer.cpp
index 48dfcb49a4c2c46891bb5236fc1f8e644c03f327..7ff0c73721d3de93aa7fa5fae58876884592c51f 100644
--- a/paddle/gserver/layers/ExpandConvLayer.cpp
+++ b/paddle/gserver/layers/ExpandConvLayer.cpp
@@ -79,6 +79,10 @@ bool ExpandConvLayer::init(const LayerMap &layerMap,
for (int i = 0; i < config_.inputs_size(); i++) {
std::vector paddings = {(size_t)paddingY_[i], (size_t)padding_[i]};
std::vector strides = {(size_t)strideY_[i], (size_t)stride_[i]};
+ std::vector dilations = {(size_t)dilationY_[i],
+ (size_t)dilation_[i]};
+
+ bool useDilation = ((size_t)dilationY_[i] > 1 || (size_t)dilation_[i] > 1);
// Convolution Layer uses the GemmConv function by default.
convType = "GemmConv";
@@ -97,13 +101,14 @@ bool ExpandConvLayer::init(const LayerMap &layerMap,
#if defined(__ARM_NEON__) || defined(__ARM_NEON)
if ((filterSize_[i] == filterSizeY_[i]) &&
(filterSize_[i] == 3 || filterSize_[i] == 4) &&
- (stride_[i] == strideY_[i]) && (stride_[i] == 1 || stride_[i] == 2)) {
+ (stride_[i] == strideY_[i]) && (stride_[i] == 1 || stride_[i] == 2) &&
+ !useDilation) {
convType = "NeonDepthwiseConv";
}
#endif
}
- if (FLAGS_use_nnpack && !isDeconv_) {
+ if (FLAGS_use_nnpack && !isDeconv_ && !useDilation) {
createFunction(forward_,
"NNPACKConv",
FuncConfig()
@@ -117,6 +122,7 @@ bool ExpandConvLayer::init(const LayerMap &layerMap,
FuncConfig()
.set("paddings", paddings)
.set("strides", strides)
+ .set("dilations", dilations)
.set("groups", (size_t)groups_[i]));
createFunction(backward_,
@@ -124,6 +130,7 @@ bool ExpandConvLayer::init(const LayerMap &layerMap,
FuncConfig()
.set("paddings", paddings)
.set("strides", strides)
+ .set("dilations", dilations)
.set("groups", (size_t)groups_[i]));
createFunction(backward_,
@@ -131,6 +138,7 @@ bool ExpandConvLayer::init(const LayerMap &layerMap,
FuncConfig()
.set("paddings", paddings)
.set("strides", strides)
+ .set("dilations", dilations)
.set("groups", (size_t)groups_[i]));
}
}
diff --git a/paddle/gserver/layers/Layer.cpp b/paddle/gserver/layers/Layer.cpp
index 01f2aae6cf88d47296da804061b9b039cca593db..b55b86221cd411addfa8c5e93f8089f5ed9b0557 100644
--- a/paddle/gserver/layers/Layer.cpp
+++ b/paddle/gserver/layers/Layer.cpp
@@ -98,6 +98,7 @@ ClassRegistrar Layer::registrar_;
LayerPtr Layer::create(const LayerConfig& config) {
std::string type = config.type();
+#ifndef PADDLE_MOBILE_INFERENCE
// NOTE: As following types have illegal character '-',
// they can not use REGISTER_LAYER to registrar.
// Besides, to fit with old training models,
@@ -106,7 +107,6 @@ LayerPtr Layer::create(const LayerConfig& config) {
return LayerPtr(new MultiClassCrossEntropy(config));
else if (type == "rank-cost")
return LayerPtr(new RankingCost(config));
-#ifndef PADDLE_MOBILE_INFERENCE
else if (type == "auc-validation")
return LayerPtr(new AucValidation(config));
else if (type == "pnpair-validation")
diff --git a/paddle/gserver/layers/MKLDNNAddtoLayer.cpp b/paddle/gserver/layers/MKLDNNAddtoLayer.cpp
index 8eb700723f2cf7dda969739bb5e3d48358d278a0..0f2b67fd758ec1513f42c4cb1a36f2f3915f4740 100644
--- a/paddle/gserver/layers/MKLDNNAddtoLayer.cpp
+++ b/paddle/gserver/layers/MKLDNNAddtoLayer.cpp
@@ -54,7 +54,6 @@ void MKLDNNAddtoLayer::reshape(
ow = iw;
reshapeOutput(oh, ow);
resizeOutput(bs, oc * oh * ow);
- printSizeInfo();
}
void MKLDNNAddtoLayer::resetFwd(std::vector& pipeline,
@@ -62,16 +61,14 @@ void MKLDNNAddtoLayer::resetFwd(std::vector& pipeline,
MKLDNNMatrixPtr& wgt,
MKLDNNMatrixPtr& bias,
MKLDNNMatrixPtr& out) {
- if (biases_) {
- LOG(FATAL) << "not implemented yet";
- }
- resetFwdBuffers(inVals_, out);
+ resetFwdBuffers(inVals_, bias, out);
in = inVals_[0];
std::shared_ptr fwdPD;
- resetFwdPD(fwdPD, inVals_, out);
+ std::shared_ptr biasPD;
+ resetFwdPD(fwdPD, biasPD, inVals_, bias, out);
- resetFwdPipeline(pipeline, fwdPD, inVals_, out);
+ resetFwdPipeline(pipeline, fwdPD, biasPD, inVals_, bias, out);
}
void MKLDNNAddtoLayer::resetBwd(std::vector& pipeline,
@@ -79,7 +76,7 @@ void MKLDNNAddtoLayer::resetBwd(std::vector& pipeline,
MKLDNNMatrixPtr& wgt,
MKLDNNMatrixPtr& bias,
MKLDNNMatrixPtr& out) {
- resetBwdBuffers(inGrads_, out);
+ resetBwdBuffers(inGrads_, bias, out);
in = inGrads_[0];
// backward only need share output grad to input grad
@@ -89,6 +86,20 @@ void MKLDNNAddtoLayer::resetBwd(std::vector& pipeline,
inputLayers_[i]->getOutputGrad()->setData(inGrads_[i]->getData());
}
}
+
+ // backward bias
+ bwdBias_ = nullptr;
+ if (bias) {
+ std::vector scales(bs_, 1.0);
+ std::vector srcPDs(bs_, bias->getPrimitiveDesc());
+ auto biasPD = sum::primitive_desc(bias->getMemoryDesc(), scales, srcPDs);
+ std::vector srcs;
+ for (size_t i = 0; i < grads_.size(); ++i) {
+ srcs.push_back(*(grads_[i]));
+ }
+ bwdBias_.reset(new sum(biasPD, srcs, *bias));
+ pipeline.push_back(*bwdBias_);
+ }
}
void MKLDNNAddtoLayer::updateWeights(const UpdateCallback& callback) {
@@ -97,7 +108,25 @@ void MKLDNNAddtoLayer::updateWeights(const UpdateCallback& callback) {
}
}
+void MKLDNNAddtoLayer::prepareBias(MKLDNNMatrixPtr& bias,
+ const MatrixPtr& biasMat,
+ const MKLDNNMatrixPtr& out,
+ std::vector& outs) {
+ auto pd = MKLDNNMatrix::createPrimitiveDesc(
+ {(int)layerSize_}, memory::format::x, engine_);
+ bias = MKLDNNMatrix::create(pd, biasMat);
+ outs.clear();
+ real* data = out->getData();
+ CHECK_EQ(bs_ * layerSize_, out->getElementCnt());
+ for (int i = 0; i < bs_; ++i) {
+ MatrixPtr tmp =
+ Matrix::create(data + i * layerSize_, 1, layerSize_, false, false);
+ outs.push_back(MKLDNNMatrix::create(bias->getPrimitiveDesc(), tmp));
+ }
+}
+
void MKLDNNAddtoLayer::resetFwdBuffers(std::vector& inputs,
+ MKLDNNMatrixPtr& bias,
MKLDNNMatrixPtr& out) {
inputs.resize(inputLayers_.size());
for (size_t i = 0; i < inputs.size(); i++) {
@@ -110,12 +139,20 @@ void MKLDNNAddtoLayer::resetFwdBuffers(std::vector& inputs,
}
resetOutValue(out, inputs[0]->getPrimitiveDesc());
+
+ if (biases_ && biases_->getW()) {
+ prepareBias(bias, biases_->getW(), out, vals_);
+ } else {
+ bias = nullptr;
+ }
}
void MKLDNNAddtoLayer::resetFwdPD(std::shared_ptr& pd,
+ std::shared_ptr& biasPD,
std::vector& inputs,
+ MKLDNNMatrixPtr bias,
MKLDNNMatrixPtr out) {
- std::vector scales(inputs.size(), 1.0);
+ std::vector scales(inputs.size(), 1.0);
std::vector srcPDs;
for (size_t i = 0; i < inputs.size(); i++) {
srcPDs.push_back(inputs[i]->getPrimitiveDesc());
@@ -123,12 +160,23 @@ void MKLDNNAddtoLayer::resetFwdPD(std::shared_ptr& pd,
CHECK(out);
pd.reset(new sum::primitive_desc(out->getMemoryDesc(), scales, srcPDs));
CHECK_PRIMITIVE_DESC_EQ(out, pd->dst_primitive_desc());
+
+ biasPD = nullptr;
+ if (bias) {
+ std::vector scales(2, 1.0);
+ std::vector srcPDs(2, bias->getPrimitiveDesc());
+ biasPD.reset(
+ new sum::primitive_desc(bias->getMemoryDesc(), scales, srcPDs));
+ CHECK_PRIMITIVE_DESC_EQ(bias, biasPD->dst_primitive_desc());
+ }
}
void MKLDNNAddtoLayer::resetFwdPipeline(
std::vector& pipeline,
std::shared_ptr& pd,
+ std::shared_ptr& biasPD,
std::vector& inputs,
+ MKLDNNMatrixPtr& bias,
MKLDNNMatrixPtr& out) {
std::vector srcs;
for (size_t i = 0; i < inputs.size(); i++) {
@@ -136,9 +184,23 @@ void MKLDNNAddtoLayer::resetFwdPipeline(
}
fwd_.reset(new sum(*pd, srcs, *out));
pipeline.push_back(*fwd_);
+
+ fwdBias_.clear();
+ if (biasPD == nullptr || bias == nullptr) {
+ return;
+ }
+ fwdBias_.resize(vals_.size());
+ for (size_t i = 0; i < vals_.size(); ++i) {
+ std::vector srcs;
+ srcs.push_back(*(vals_[i]));
+ srcs.push_back(*bias);
+ fwdBias_[i].reset(new sum(*biasPD, srcs, *vals_[i]));
+ pipeline.push_back(*fwdBias_[i]);
+ }
}
void MKLDNNAddtoLayer::resetBwdBuffers(std::vector& inputs,
+ MKLDNNMatrixPtr& bias,
MKLDNNMatrixPtr& out) {
CHECK(outVal_);
resetOutGrad(out, outVal_->getPrimitiveDesc());
@@ -149,6 +211,12 @@ void MKLDNNAddtoLayer::resetBwdBuffers(std::vector& inputs,
resetInGrad(inputs[i], inVal_->getPrimitiveDesc(), i);
CHECK_PRIMITIVE_DESC_EQ(inputs[i], out->getPrimitiveDesc());
}
+
+ if (biases_ && biases_->getWGrad()) {
+ prepareBias(bias, biases_->getWGrad(), out, grads_);
+ } else {
+ bias = nullptr;
+ }
}
} // namespace paddle
diff --git a/paddle/gserver/layers/MKLDNNAddtoLayer.h b/paddle/gserver/layers/MKLDNNAddtoLayer.h
index 15f74ec5bdf3d1e4ae5e09051be6be418590a67a..24504b7b4f50726e2b2757ca3029461cdc27b411 100644
--- a/paddle/gserver/layers/MKLDNNAddtoLayer.h
+++ b/paddle/gserver/layers/MKLDNNAddtoLayer.h
@@ -32,9 +32,15 @@ protected:
// layer size == ic * ih * iw == oc * oh *ow, and can not be changed
size_t layerSize_;
- // TODO(TJ): this part has not been optimized by MKL-DNN
std::unique_ptr biases_;
+ // buffers for adding bias
+ std::vector vals_;
+ std::vector grads_;
+ // primitives for adding bias
+ std::vector> fwdBias_;
+ std::shared_ptr bwdBias_;
+
public:
explicit MKLDNNAddtoLayer(const LayerConfig& config) : MKLDNNLayer(config) {}
@@ -91,20 +97,34 @@ protected:
* reset pipeline.
*/
void resetFwdBuffers(std::vector& inputs,
+ MKLDNNMatrixPtr& bias,
MKLDNNMatrixPtr& out);
void resetFwdPD(std::shared_ptr& pd,
+ std::shared_ptr& biasPD,
std::vector& inputs,
+ MKLDNNMatrixPtr bias,
MKLDNNMatrixPtr out);
void resetFwdPipeline(std::vector& pipeline,
std::shared_ptr& pd,
+ std::shared_ptr& biasPD,
std::vector& inputs,
+ MKLDNNMatrixPtr& bias,
MKLDNNMatrixPtr& out);
/**
* Backward functions: reset buffers(inputs, output, bias)
*/
void resetBwdBuffers(std::vector& inputs,
+ MKLDNNMatrixPtr& bias,
MKLDNNMatrixPtr& out);
+
+ /**
+ * prepare for bias
+ */
+ void prepareBias(MKLDNNMatrixPtr& bias,
+ const MatrixPtr& biasMat,
+ const MKLDNNMatrixPtr& out,
+ std::vector& outs);
};
} // namespace paddle
diff --git a/paddle/gserver/layers/MKLDNNBatchNormLayer.cpp b/paddle/gserver/layers/MKLDNNBatchNormLayer.cpp
index 9b0ae20f089e34a719883bc65e88e33ab9334e39..071bdf54d5dc9538d5ced580a73b9c0fbcea41fb 100644
--- a/paddle/gserver/layers/MKLDNNBatchNormLayer.cpp
+++ b/paddle/gserver/layers/MKLDNNBatchNormLayer.cpp
@@ -119,13 +119,12 @@ void MKLDNNBatchNormLayer::reshape(
int& bs, int& ic, int& ih, int& iw, int oc, int& oh, int& ow) {
reshapeInput(bs, ih, iw);
oh = ih;
- ow = ow;
+ ow = iw;
// ic_ and oc can not be changed
CHECK_EQ(inputElemenCnt_ / bs / ih / iw, (size_t)ic)
<< "Input channel can not be changed";
reshapeOutput(oh, ow);
resizeOutput(bs, oc * oh * ow);
- printSizeInfo();
}
void MKLDNNBatchNormLayer::resetFwd(std::vector& pipeline,
diff --git a/paddle/gserver/layers/MKLDNNConvLayer.cpp b/paddle/gserver/layers/MKLDNNConvLayer.cpp
index b8120eda1e2dadab943869a05546351a369af6fd..8aa54e0a9efa7adb766cbb6009f6a29410c6ae7d 100644
--- a/paddle/gserver/layers/MKLDNNConvLayer.cpp
+++ b/paddle/gserver/layers/MKLDNNConvLayer.cpp
@@ -102,8 +102,6 @@ void MKLDNNConvLayer::reshape(
reshapeOutput(oh, ow);
resizeOutput(bs, oc * oh * ow);
-
- printSizeInfo();
}
void MKLDNNConvLayer::resetFwd(std::vector& pipeline,
diff --git a/paddle/gserver/layers/MKLDNNConvLayer.h b/paddle/gserver/layers/MKLDNNConvLayer.h
index 1fed0e1c6565b763a3ee73a0853f560ddfbd44c6..9c69136684e5f9005860b476ec6ed1bbc9ceff6c 100644
--- a/paddle/gserver/layers/MKLDNNConvLayer.h
+++ b/paddle/gserver/layers/MKLDNNConvLayer.h
@@ -92,7 +92,7 @@ public:
void printSizeInfo() override {
MKLDNNLayer::printSizeInfo();
VLOG(MKLDNN_SIZES) << getName() << ": fh: " << fh_ << ", fw: " << fw_
- << ": ph: " << ph_ << ", pw: " << pw_ << ", sh: " << sh_
+ << ", ph: " << ph_ << ", pw: " << pw_ << ", sh: " << sh_
<< ", sw: " << sw_ << ", dh: " << dh_ << ", dw: " << dw_;
}
diff --git a/paddle/gserver/layers/MKLDNNFcLayer.cpp b/paddle/gserver/layers/MKLDNNFcLayer.cpp
index d82063a7130ca928ba042e210eb216f90c7207cd..350ec65fffbc73c3a6e4245f763f4c6aa868f574 100644
--- a/paddle/gserver/layers/MKLDNNFcLayer.cpp
+++ b/paddle/gserver/layers/MKLDNNFcLayer.cpp
@@ -60,18 +60,16 @@ void MKLDNNFcLayer::convertWeightsFromPaddle() {
}
CHECK(wgtVal_) << "should have been initialized";
- bool hasNoSpatial_ = ih_ == 1 && iw_ == 1;
auto targetDim = wgtVal_->getDims();
- auto srcFmt = hasNoSpatial_ ? format::io : format::ihwo;
+ auto srcFmt = targetDim.size() == 2 ? format::io : format::ihwo;
wgtVal_->reorderDataFrom(wgtVal_, srcFmt, targetDim);
hasInitedWgt_ = true;
}
void MKLDNNFcLayer::convertWeightsToPaddle() {
CHECK(wgtVal_) << "should have been initialized";
- bool hasNoSpatial_ = ih_ == 1 && iw_ == 1;
auto targetDim = wgtVal_->getDims();
- auto dstFmt = hasNoSpatial_ ? format::io : format::ihwo;
+ auto dstFmt = targetDim.size() == 2 ? format::io : format::ihwo;
wgtVal_->reorderDataTo(wgtVal_, dstFmt, targetDim);
}
@@ -86,8 +84,6 @@ void MKLDNNFcLayer::reshape(
reshapeOutput(oh, ow);
resizeOutput(bs, oc);
-
- printSizeInfo();
}
void MKLDNNFcLayer::resetFwd(std::vector& pipeline,
diff --git a/paddle/gserver/layers/MKLDNNLayer.cpp b/paddle/gserver/layers/MKLDNNLayer.cpp
index 5fd62f4f73b18df683ccf74143e45054c3631c22..e75ac5ba4647a8267b7bc189893bd7adb5c3053f 100644
--- a/paddle/gserver/layers/MKLDNNLayer.cpp
+++ b/paddle/gserver/layers/MKLDNNLayer.cpp
@@ -181,21 +181,17 @@ void MKLDNNLayer::resetInValue(
auto extPD = MKLDNNMatrix::createPrimitiveDesc(
{bs_, ic_, ih_, iw_}, format::nchw, engine_);
const MatrixPtr& inMat = inputLayers_[inputIdx]->getOutputValue();
- in = std::dynamic_pointer_cast(inMat);
- CHECK_EQ(inputIsOnlyMKLDNN(), in != nullptr);
- if (in == nullptr || in->getFormat() == format::nc) {
- in = MKLDNNMatrix::create(extPD, inMat);
- }
- extInVal_ = isPaddleFormat(in->getFormat()) ? in : nullptr;
- if (in->getFormat() == format::nc) {
- CHECK(ih_ == 1 && iw_ == 1);
+ extInVal_ = std::dynamic_pointer_cast(inMat);
+ CHECK_EQ(inputIsOnlyMKLDNN(), extInVal_ != nullptr);
+ if (extInVal_ == nullptr || extInVal_->getFormat() == format::nc) {
+ extInVal_ = MKLDNNMatrix::create(extPD, inMat);
}
+ in = extInVal_;
if (nullptr == intPD || in->getPrimitiveDesc() == *intPD) {
return;
}
// need create reorder
in = MKLDNNMatrix::create(*intPD);
- extInVal_ = extInVal_ ? extInVal_ : MKLDNNMatrix::create(extPD, inMat);
cvtInVal_ = MKLDNNMatrix::createReorder(extInVal_, in);
CHECK(cvtInVal_) << "should not be emptry";
}
@@ -291,7 +287,7 @@ void MKLDNNLayer::resetMergeGrad(MKLDNNMatrixPtr& out) {
return;
}
CHECK(out) << "should have reset internal ouput grad";
- std::vector scales(outputMap_.size(), 1.0);
+ std::vector scales(outputMap_.size(), 1.0);
std::vector srcPDs;
std::vector srcs;
for (auto it = outputMap_.begin(); it != outputMap_.end(); ++it) {
diff --git a/paddle/gserver/layers/MKLDNNPoolLayer.cpp b/paddle/gserver/layers/MKLDNNPoolLayer.cpp
index 6e89260f49979d4edb4da138507a73dc2bf120de..a18c455beab96ef25b5545281bae4d48cec98d9e 100644
--- a/paddle/gserver/layers/MKLDNNPoolLayer.cpp
+++ b/paddle/gserver/layers/MKLDNNPoolLayer.cpp
@@ -71,8 +71,6 @@ void MKLDNNPoolLayer::reshape(
reshapeOutput(oh, ow);
resizeOutput(bs, oc * oh * ow);
-
- printSizeInfo();
}
void MKLDNNPoolLayer::resetFwd(std::vector& pipeline,
diff --git a/paddle/gserver/layers/MaxPoolWithMaskLayer.cpp b/paddle/gserver/layers/MaxPoolWithMaskLayer.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..d810a58d9a3aea4333806dc9805d3444c3772ba3
--- /dev/null
+++ b/paddle/gserver/layers/MaxPoolWithMaskLayer.cpp
@@ -0,0 +1,109 @@
+/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License. */
+
+#include "MaxPoolWithMaskLayer.h"
+#include "paddle/utils/Logging.h"
+#include "paddle/utils/Stat.h"
+
+namespace paddle {
+
+bool MaxPoolWithMaskLayer::init(const LayerMap& layerMap,
+ const ParameterMap& parameterMap) {
+ PoolLayer::init(layerMap, parameterMap);
+ setOutput("mask", &mask_);
+ return true;
+}
+
+size_t MaxPoolWithMaskLayer::getSize() {
+ CHECK_EQ(inputLayers_.size(), 1UL);
+ size_t layerSize = 0;
+
+ outputY_ = outputSize(imgSizeY_,
+ sizeY_,
+ confPaddingY_,
+ strideY_,
+ /* caffeMode */ false);
+ outputX_ = outputSize(imgSize_,
+ sizeX_,
+ confPadding_,
+ stride_,
+ /* caffeMode */ false);
+
+ layerSize = outputX_ * outputY_ * channels_;
+ getOutput().setFrameHeight(outputY_);
+ getOutput().setFrameWidth(outputX_);
+
+ return layerSize;
+}
+
+void MaxPoolWithMaskLayer::forward(PassType passType) {
+ size_t size = getSize();
+ MatrixPtr inputV = inputLayers_[0]->getOutputValue();
+ int batchSize = inputV->getHeight();
+ resetOutput(batchSize, size);
+
+ MatrixPtr outV = getOutputValue();
+ CHECK_EQ(size, outV->getWidth());
+
+ resetSpecifyOutput(mask_,
+ batchSize,
+ size,
+ /* isValueClean */ false,
+ /* isGradClean */ true);
+
+ MatrixPtr maskV = mask_.value;
+ outV->maxPoolForward(*inputV,
+ imgSizeY_,
+ imgSize_,
+ channels_,
+ sizeX_,
+ sizeY_,
+ strideY_,
+ stride_,
+ outputY_,
+ outputX_,
+ confPaddingY_,
+ confPadding_,
+ maskV);
+}
+
+void MaxPoolWithMaskLayer::backward(const UpdateCallback& callback) {
+ (void)callback;
+ if (NULL == getInputGrad(0)) {
+ return;
+ }
+
+ MatrixPtr outGrad = getOutputGrad();
+ MatrixPtr inputV = inputLayers_[0]->getOutputValue();
+ MatrixPtr outV = getOutputValue();
+ MatrixPtr inputGrad = inputLayers_[0]->getOutputGrad();
+
+ inputGrad->maxPoolBackward(*inputV,
+ imgSizeY_,
+ imgSize_,
+ *outGrad,
+ *outV,
+ sizeX_,
+ sizeY_,
+ strideY_,
+ stride_,
+ outputY_,
+ outputX_,
+ 1,
+ 1,
+ confPaddingY_,
+ confPadding_);
+}
+
+} // namespace paddle
diff --git a/paddle/operators/fill_constant_op.h b/paddle/gserver/layers/MaxPoolWithMaskLayer.h
similarity index 50%
rename from paddle/operators/fill_constant_op.h
rename to paddle/gserver/layers/MaxPoolWithMaskLayer.h
index 3668f42f1c29541e29463ff3969064e80703fa04..e0174add9d944930289f2bdf78d9f730fd1fcc7d 100644
--- a/paddle/operators/fill_constant_op.h
+++ b/paddle/gserver/layers/MaxPoolWithMaskLayer.h
@@ -13,25 +13,28 @@ See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
-#include "paddle/framework/eigen.h"
-#include "paddle/framework/op_registry.h"
+
+#include
+#include "PoolLayer.h"
+#include "paddle/math/Matrix.h"
namespace paddle {
-namespace operators {
-
-template
-class FillConstantOpKernel : public framework::OpKernel {
- public:
- void Compute(const framework::ExecutionContext& ctx) const override {
- auto* out = ctx.Output("Out");
- out->mutable_data(ctx.GetPlace());
- auto value = ctx.Attr("value");
-
- auto out_eigen = framework::EigenVector::Flatten(*out);
- auto place = ctx.GetEigenDevice();
- out_eigen.device(place) = out_eigen.constant(static_cast(value));
- }
+/**
+ * @brief Basic parent layer of different kinds of pooling
+ */
+class MaxPoolWithMaskLayer : public PoolLayer {
+protected:
+ Argument mask_;
+
+public:
+ explicit MaxPoolWithMaskLayer(const LayerConfig& config)
+ : PoolLayer(config) {}
+
+ size_t getSize();
+
+ void forward(PassType passType) override;
+ void backward(const UpdateCallback& callback = nullptr) override;
+ bool init(const LayerMap& layerMap,
+ const ParameterMap& parameterMap) override;
};
-
-} // namespace operators
} // namespace paddle
diff --git a/paddle/gserver/layers/PoolLayer.cpp b/paddle/gserver/layers/PoolLayer.cpp
index 7b932d5a76e9c4fe7cbe5882bbc19eb3de4b503a..87613a96c5b3c2da212f63e9e678bcd22308b08e 100644
--- a/paddle/gserver/layers/PoolLayer.cpp
+++ b/paddle/gserver/layers/PoolLayer.cpp
@@ -13,6 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License. */
#include "PoolLayer.h"
+#include "MaxPoolWithMaskLayer.h"
#include "PoolProjectionLayer.h"
#include "paddle/utils/Logging.h"
#ifdef PADDLE_WITH_CUDA
@@ -44,7 +45,6 @@ bool PoolLayer::init(const LayerMap& layerMap,
strideY_ = conf.has_stride_y() ? conf.stride_y() : conf.stride();
confPaddingY_ = conf.has_padding_y() ? conf.padding_y() : conf.padding();
outputY_ = conf.has_output_y() ? conf.output_y() : conf.output_x();
-
return true;
}
@@ -57,6 +57,8 @@ Layer* PoolLayer::create(const LayerConfig& config) {
} else if (CudnnPoolLayer::typeCheck(pool)) {
return new CudnnPoolLayer(config);
#endif
+ } else if (pool == "max-pool-with-mask") {
+ return new MaxPoolWithMaskLayer(config);
} else {
LOG(FATAL) << "Unknown pool type: " << pool;
return nullptr;
diff --git a/paddle/gserver/layers/ROIPoolLayer.cpp b/paddle/gserver/layers/ROIPoolLayer.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..35d4b12d3d357800fe72899069b5377c252fac5f
--- /dev/null
+++ b/paddle/gserver/layers/ROIPoolLayer.cpp
@@ -0,0 +1,220 @@
+/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License. */
+
+#include "ROIPoolLayer.h"
+
+namespace paddle {
+
+REGISTER_LAYER(roi_pool, ROIPoolLayer);
+
+bool ROIPoolLayer::init(const LayerMap& layerMap,
+ const ParameterMap& parameterMap) {
+ Layer::init(layerMap, parameterMap);
+
+ const ROIPoolConfig& layerConf = config_.inputs(0).roi_pool_conf();
+ pooledWidth_ = layerConf.pooled_width();
+ pooledHeight_ = layerConf.pooled_height();
+ spatialScale_ = layerConf.spatial_scale();
+
+ return true;
+}
+
+void ROIPoolLayer::forward(PassType passType) {
+ Layer::forward(passType);
+
+ const ROIPoolConfig& layerConf = config_.inputs(0).roi_pool_conf();
+ height_ = getInput(0).getFrameHeight();
+ if (!height_) height_ = layerConf.height();
+ width_ = getInput(0).getFrameWidth();
+ if (!width_) width_ = layerConf.width();
+ channels_ = getInputValue(0)->getWidth() / width_ / height_;
+
+ size_t batchSize = getInput(0).getBatchSize();
+ size_t numROIs = getInput(1).getBatchSize();
+
+ MatrixPtr dataValue = getInputValue(0);
+ MatrixPtr roiValue = getInputValue(1);
+ resetOutput(numROIs, channels_ * pooledHeight_ * pooledWidth_);
+ MatrixPtr outputValue = getOutputValue();
+
+ if (useGpu_) { // TODO(guosheng): implement on GPU later
+ MatrixPtr dataCpuBuffer;
+ Matrix::resizeOrCreate(dataCpuBuffer,
+ dataValue->getHeight(),
+ dataValue->getWidth(),
+ false,
+ false);
+ MatrixPtr roiCpuBuffer;
+ Matrix::resizeOrCreate(roiCpuBuffer,
+ roiValue->getHeight(),
+ roiValue->getWidth(),
+ false,
+ false);
+ dataCpuBuffer->copyFrom(*dataValue);
+ roiCpuBuffer->copyFrom(*roiValue);
+ dataValue = dataCpuBuffer;
+ roiValue = roiCpuBuffer;
+ MatrixPtr outputCpuBuffer;
+ Matrix::resizeOrCreate(outputCpuBuffer,
+ outputValue->getHeight(),
+ outputValue->getWidth(),
+ false,
+ false);
+ outputCpuBuffer->copyFrom(*outputValue);
+ outputValue = outputCpuBuffer;
+ }
+
+ real* bottomData = dataValue->getData();
+ size_t batchOffset = dataValue->getWidth();
+ size_t channelOffset = height_ * width_;
+ real* bottomROIs = roiValue->getData();
+ size_t roiOffset = roiValue->getWidth();
+ size_t poolChannelOffset = pooledHeight_ * pooledWidth_;
+
+ real* outputData = outputValue->getData();
+ Matrix::resizeOrCreate(maxIdxs_,
+ numROIs,
+ channels_ * pooledHeight_ * pooledWidth_,
+ false,
+ false);
+ real* argmaxData = maxIdxs_->getData();
+
+ for (size_t n = 0; n < numROIs; ++n) {
+ // the first five elememts of each RoI should be:
+ // batch_idx, roi_x_start, roi_y_start, roi_x_end, roi_y_end
+ size_t roiBatchIdx = bottomROIs[0];
+ size_t roiStartW = round(bottomROIs[1] * spatialScale_);
+ size_t roiStartH = round(bottomROIs[2] * spatialScale_);
+ size_t roiEndW = round(bottomROIs[3] * spatialScale_);
+ size_t roiEndH = round(bottomROIs[4] * spatialScale_);
+ CHECK_GE(roiBatchIdx, 0UL);
+ CHECK_LT(roiBatchIdx, batchSize);
+ size_t roiHeight = std::max(roiEndH - roiStartH + 1, 1UL);
+ size_t roiWidth = std::max(roiEndW - roiStartW + 1, 1UL);
+ real binSizeH =
+ static_cast(roiHeight) / static_cast(pooledHeight_);
+ real binSizeW =
+ static_cast(roiWidth) / static_cast(pooledWidth_);
+ real* batchData = bottomData + batchOffset * roiBatchIdx;
+ for (size_t c = 0; c < channels_; ++c) {
+ for (size_t ph = 0; ph < pooledHeight_; ++ph) {
+ for (size_t pw = 0; pw < pooledWidth_; ++pw) {
+ size_t hstart = static_cast(std::floor(ph * binSizeH));
+ size_t wstart = static_cast(std::floor(pw * binSizeW));
+ size_t hend = static_cast(std::ceil((ph + 1) * binSizeH));
+ size_t wend = static_cast(std::ceil((pw + 1) * binSizeW));
+ hstart = std::min(std::max(hstart + roiStartH, 0UL), height_);
+ wstart = std::min(std::max(wstart + roiStartW, 0UL), width_);
+ hend = std::min(std::max(hend + roiStartH, 0UL), height_);
+ wend = std::min(std::max(wend + roiStartW, 0UL), width_);
+
+ bool isEmpty = (hend <= hstart) || (wend <= wstart);
+ size_t poolIndex = ph * pooledWidth_ + pw;
+ if (isEmpty) {
+ outputData[poolIndex] = 0;
+ argmaxData[poolIndex] = -1;
+ }
+
+ for (size_t h = hstart; h < hend; ++h) {
+ for (size_t w = wstart; w < wend; ++w) {
+ size_t index = h * width_ + w;
+ if (batchData[index] > outputData[poolIndex]) {
+ outputData[poolIndex] = batchData[index];
+ argmaxData[poolIndex] = index;
+ }
+ }
+ }
+ }
+ }
+ batchData += channelOffset;
+ outputData += poolChannelOffset;
+ argmaxData += poolChannelOffset;
+ }
+ bottomROIs += roiOffset;
+ }
+ if (useGpu_) {
+ getOutputValue()->copyFrom(*outputValue);
+ }
+}
+
+void ROIPoolLayer::backward(const UpdateCallback& callback) {
+ MatrixPtr inGradValue = getInputGrad(0);
+ MatrixPtr outGradValue = getOutputGrad();
+ MatrixPtr roiValue = getInputValue(1);
+
+ if (useGpu_) {
+ MatrixPtr inGradCpuBuffer;
+ Matrix::resizeOrCreate(inGradCpuBuffer,
+ inGradValue->getHeight(),
+ inGradValue->getWidth(),
+ false,
+ false);
+ MatrixPtr outGradCpuBuffer;
+ Matrix::resizeOrCreate(outGradCpuBuffer,
+ outGradValue->getHeight(),
+ outGradValue->getWidth(),
+ false,
+ false);
+ MatrixPtr roiCpuBuffer;
+ Matrix::resizeOrCreate(roiCpuBuffer,
+ roiValue->getHeight(),
+ roiValue->getWidth(),
+ false,
+ false);
+ inGradCpuBuffer->copyFrom(*inGradValue);
+ outGradCpuBuffer->copyFrom(*outGradValue);
+ roiCpuBuffer->copyFrom(*roiValue);
+ inGradValue = inGradCpuBuffer;
+ outGradValue = outGradCpuBuffer;
+ roiValue = roiCpuBuffer;
+ }
+
+ real* bottomROIs = roiValue->getData();
+ size_t numROIs = getInput(1).getBatchSize();
+ size_t roiOffset = getInputValue(1)->getWidth();
+
+ real* inDiffData = inGradValue->getData();
+ size_t batchOffset = getInputValue(0)->getWidth();
+ size_t channelOffset = height_ * width_;
+
+ real* outDiffData = outGradValue->getData();
+ size_t poolChannelOffset = pooledHeight_ * pooledWidth_;
+ real* argmaxData = maxIdxs_->getData();
+
+ for (size_t n = 0; n < numROIs; ++n) {
+ size_t roiBatchIdx = bottomROIs[0];
+ real* batchDiffData = inDiffData + batchOffset * roiBatchIdx;
+ for (size_t c = 0; c < channels_; ++c) {
+ for (size_t ph = 0; ph < pooledHeight_; ++ph) {
+ for (size_t pw = 0; pw < pooledWidth_; ++pw) {
+ size_t poolIndex = ph * pooledWidth_ + pw;
+ if (argmaxData[poolIndex] > 0) {
+ size_t index = static_cast(argmaxData[poolIndex]);
+ batchDiffData[index] += outDiffData[poolIndex];
+ }
+ }
+ }
+ batchDiffData += channelOffset;
+ outDiffData += poolChannelOffset;
+ argmaxData += poolChannelOffset;
+ }
+ bottomROIs += roiOffset;
+ }
+
+ if (useGpu_) {
+ getInputGrad(0)->copyFrom(*inGradValue);
+ }
+}
+
+} // namespace paddle
diff --git a/paddle/gserver/layers/ROIPoolLayer.h b/paddle/gserver/layers/ROIPoolLayer.h
new file mode 100644
index 0000000000000000000000000000000000000000..4f07e49d6fd1eda9fa7bd46e4cec771a75f571be
--- /dev/null
+++ b/paddle/gserver/layers/ROIPoolLayer.h
@@ -0,0 +1,56 @@
+/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License. */
+
+#pragma once
+
+#include "Layer.h"
+
+namespace paddle {
+
+/**
+ * A layer used by Fast R-CNN to extract feature maps of ROIs from the last
+ * feature map.
+ * - Input: This layer needs two input layers: The first input layer is a
+ * convolution layer; The second input layer contains the ROI data
+ * which is the output of ProposalLayer in Faster R-CNN. layers for
+ * generating bbox location offset and the classification confidence.
+ * - Output: The ROIs' feature map.
+ * Reference:
+ * Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun.
+ * Faster R-CNN: Towards Real-Time Object Detection with Region Proposal
+ * Networks
+ */
+
+class ROIPoolLayer : public Layer {
+protected:
+ size_t channels_;
+ size_t width_;
+ size_t height_;
+ size_t pooledWidth_;
+ size_t pooledHeight_;
+ real spatialScale_;
+
+ // Since there is no int matrix, use real maxtrix instead.
+ MatrixPtr maxIdxs_;
+
+public:
+ explicit ROIPoolLayer(const LayerConfig& config) : Layer(config) {}
+
+ bool init(const LayerMap& layerMap,
+ const ParameterMap& parameterMap) override;
+
+ void forward(PassType passType) override;
+ void backward(const UpdateCallback& callback = nullptr) override;
+};
+} // namespace paddle
diff --git a/paddle/gserver/layers/ScaleSubRegionLayer.cpp b/paddle/gserver/layers/ScaleSubRegionLayer.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..aa6778aef4e893208fd064ca22e217c6c4d960f9
--- /dev/null
+++ b/paddle/gserver/layers/ScaleSubRegionLayer.cpp
@@ -0,0 +1,78 @@
+/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License. */
+
+#include "ScaleSubRegionLayer.h"
+#include "paddle/utils/Stat.h"
+namespace paddle {
+
+REGISTER_LAYER(scale_sub_region, ScaleSubRegionLayer);
+
+bool ScaleSubRegionLayer::init(const LayerMap& layerMap,
+ const ParameterMap& parameterMap) {
+ Layer::init(layerMap, parameterMap);
+ CHECK_EQ(static_cast(inputLayers_.size()), 2);
+ auto& conf = config_.inputs(0).scale_sub_region_conf();
+ value_ = conf.value();
+
+ createFunction(forward_, "ScaleSubRegion", FuncConfig().set("value", value_));
+ createFunction(
+ backward_, "ScaleSubRegionGrad", FuncConfig().set("value", value_));
+
+ return true;
+}
+
+void ScaleSubRegionLayer::forward(PassType passType) {
+ Layer::forward(passType);
+ auto in0 = getInput(0);
+ imgH_ = in0.getFrameHeight();
+ imgW_ = in0.getFrameWidth();
+ if (imgH_ == 0 || imgW_ == 0) {
+ auto& conf = config_.inputs(0).scale_sub_region_conf();
+ imgH_ = conf.image_conf().img_size_y();
+ imgW_ = conf.image_conf().img_size();
+ }
+ MatrixPtr imgV = in0.value;
+ size_t batchSize = imgV->getHeight();
+ size_t spatialSize = imgH_ * imgW_;
+ channelsNum_ = imgV->getWidth() / spatialSize;
+ shape_ = TensorShape({batchSize, channelsNum_, imgH_, imgW_});
+
+ resetOutput(batchSize, imgV->getWidth());
+ auto& out = getOutput();
+ out.setFrameHeight(imgH_);
+ out.setFrameWidth(imgW_);
+
+ MatrixPtr indicesV = getInputValue(1);
+ indicesShape_ = TensorShape({batchSize, 6});
+
+ REGISTER_TIMER_INFO("ScaleSubRegionForward", getName().c_str());
+ BufferArgs inArgs;
+ BufferArgs outArgs;
+ inArgs.addArg(*imgV, shape_);
+ inArgs.addArg(*indicesV, indicesShape_);
+ outArgs.addArg(*out.value, shape_, ASSIGN_TO);
+ forward_[0]->calc(inArgs, outArgs);
+}
+
+void ScaleSubRegionLayer::backward(const UpdateCallback& callback) {
+ REGISTER_TIMER_INFO("ScaleSubRegionBackward", getName().c_str());
+ BufferArgs inArgs;
+ BufferArgs outArgs;
+ inArgs.addArg(*getOutputGrad(), shape_);
+ inArgs.addArg(*getInputValue(1), indicesShape_);
+ outArgs.addArg(*getInputGrad(0), shape_, ADD_TO);
+ backward_[0]->calc(inArgs, outArgs);
+}
+
+} // namespace paddle
diff --git a/paddle/gserver/layers/ScaleSubRegionLayer.h b/paddle/gserver/layers/ScaleSubRegionLayer.h
new file mode 100644
index 0000000000000000000000000000000000000000..a27c56de93bb6fdde0f95cd4c5abe5dfabe4e858
--- /dev/null
+++ b/paddle/gserver/layers/ScaleSubRegionLayer.h
@@ -0,0 +1,52 @@
+/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License. */
+
+#pragma once
+
+#include "Layer.h"
+
+namespace paddle {
+
+/**
+ * \brief For each instance, this layer can be used to multiply a value to a
+ * specified sub continuous region. By providing start index and end
+ * index for C/H/W, you can specify the location and shape of the
+ * region.
+ *
+ * input_0: Input value.
+ * input_1: Indices value to specify the location an shape of the
+ * region.
+ */
+class ScaleSubRegionLayer : public Layer {
+public:
+ explicit ScaleSubRegionLayer(const LayerConfig& config) : Layer(config) {}
+
+ ~ScaleSubRegionLayer() {}
+
+ bool init(const LayerMap& layerMap, const ParameterMap& parameterMap);
+
+ void forward(PassType passType);
+
+ void backward(const UpdateCallback& callback = nullptr);
+
+protected:
+ TensorShape shape_;
+ TensorShape indicesShape_;
+ size_t imgH_;
+ size_t imgW_;
+ size_t channelsNum_;
+ real value_;
+};
+
+} // namespace paddle
diff --git a/paddle/gserver/tests/CMakeLists.txt b/paddle/gserver/tests/CMakeLists.txt
index aa94ee406e27c86e6d49b6d2b5327a3f86bcacd6..4bea348f637f39444e8aad89278e6366ecd73b1d 100644
--- a/paddle/gserver/tests/CMakeLists.txt
+++ b/paddle/gserver/tests/CMakeLists.txt
@@ -1,9 +1,12 @@
# gserver pacakge unittests
add_simple_unittest(test_LinearChainCRF)
-add_simple_unittest(test_MultinomialSampler)
add_simple_unittest(test_RecurrentLayer)
+if(NOT MOBILE_INFERENCE)
+ add_simple_unittest(test_MultinomialSampler)
+endif()
+
function(gserver_test TARGET)
add_unittest_without_exec(${TARGET}
${TARGET}.cpp
@@ -24,6 +27,7 @@ gserver_test(test_ConvUnify)
gserver_test(test_BatchNorm)
gserver_test(test_KmaxSeqScore)
gserver_test(test_Expand)
+gserver_test(test_MaxPoolingWithMaskOutput)
########## test_Mkldnn layers and activations ##########
if(WITH_MKLDNN)
@@ -48,7 +52,7 @@ if(WITH_PYTHON)
endif()
############### test_WarpCTCLayer #######################
-if(NOT WITH_DOUBLE)
+if(NOT WITH_DOUBLE AND NOT MOBILE_INFERENCE)
add_unittest_without_exec(test_WarpCTCLayer
test_WarpCTCLayer.cpp)
diff --git a/paddle/gserver/tests/test_LayerGrad.cpp b/paddle/gserver/tests/test_LayerGrad.cpp
index 1a46fb49153a0aa4228f58db481b950bc2d6de83..3517d293e3c901caaa19952b04e56d1ef0d2b46e 100644
--- a/paddle/gserver/tests/test_LayerGrad.cpp
+++ b/paddle/gserver/tests/test_LayerGrad.cpp
@@ -53,7 +53,7 @@ TEST(Operator, dot_mul) {
TEST(Projection, context) {
for (auto contextStart : {-5, -3, -1, 0, 3}) {
for (auto contextLength : {1, 2, 5, 7}) {
- for (auto batchSize : {1, 2, 5, 20, 50}) {
+ for (auto batchSize : {1, 2, 5, 20}) {
for (auto trainablePadding : {false, true}) {
LOG(INFO) << " contextStart=" << contextStart
<< " contextLength=" << contextLength
@@ -434,7 +434,7 @@ void testConvLayer(const string& type, bool trans, bool useGpu) {
config.layerConfig.set_partial_sum(1);
config.layerConfig.set_shared_biases(true);
- int dilation = 1;
+ int dilation = 2;
if (type == "cudnn_conv") {
#if CUDNN_VERSION >= 6000
dilation = 2;
@@ -585,14 +585,14 @@ TEST(Layer, maxoutLayer) {
}
void testFcLayer(string format, size_t nnz) {
TestConfig config;
- config.biasSize = 4096;
+ config.biasSize = 1024;
config.layerConfig.set_type("fc");
- config.layerConfig.set_size(4096);
+ config.layerConfig.set_size(1024);
config.layerConfig.set_active_type("sigmoid");
config.layerConfig.set_drop_rate(0.1);
config.inputDefs.push_back(
- {INPUT_DATA, "layer_0", 8192, nnz, ParaSparse(format)});
+ {INPUT_DATA, "layer_0", 2048, nnz, ParaSparse(format)});
config.layerConfig.add_inputs();
LOG(INFO) << config.inputDefs[0].sparse.sparse << " "
@@ -609,9 +609,9 @@ void testFcLayer(string format, size_t nnz) {
}
TEST(Layer, fcLayer) {
- testFcLayer("", 4096 * 4096 * 2);
- testFcLayer("csc", 4096 * 40);
- testFcLayer("csr", 4096 * 40);
+ testFcLayer("", 1024 * 1024 * 2);
+ testFcLayer("csc", 1024 * 10);
+ testFcLayer("csr", 1024 * 10);
}
TEST(Layer, SelectiveFullyConnectedLayer) {
@@ -1234,6 +1234,7 @@ void testPoolLayer2(const string& poolType, bool trans, bool useGpu) {
TEST(Layer, PoolLayer) {
testPoolLayer("avg-projection", /* trans= */ false, /* useGpu= */ false);
testPoolLayer("max-projection", /* trans= */ false, /* useGpu= */ false);
+ testPoolLayer("max-pool-with-mask", /* trans= */ false, /* useGpu= */ false);
#ifdef PADDLE_WITH_CUDA
testPoolLayer("avg-projection", /* trans= */ false, /* useGpu= */ true);
@@ -1242,6 +1243,7 @@ TEST(Layer, PoolLayer) {
testPoolLayer("cudnn-avg-pool", /* trans= */ false, /* useGpu= */ true);
testPoolLayer2("cudnn-max-pool", /* trans= */ false, /* useGpu= */ true);
testPoolLayer2("cudnn-avg-pool", /* trans= */ false, /* useGpu= */ true);
+ testPoolLayer("max-pool-with-mask", /* trans= */ false, /* useGpu= */ true);
#endif
}
@@ -1995,7 +1997,7 @@ TEST(Layer, multibox_loss) {
TEST(Layer, TransLayer) {
TestConfig config;
const int height = 128;
- const int width = 1028;
+ const int width = 256;
config.layerConfig.set_type("trans");
config.layerConfig.set_size(width);
@@ -2056,6 +2058,43 @@ TEST(Layer, CropLayer) {
}
}
+TEST(Layer, roi_pool) {
+ TestConfig config;
+ config.layerConfig.set_type("roi_pool");
+ config.biasSize = 0;
+ LayerInputConfig* input = config.layerConfig.add_inputs();
+ ROIPoolConfig* roiPoolConf = input->mutable_roi_pool_conf();
+ roiPoolConf->set_pooled_width(7);
+ roiPoolConf->set_pooled_height(7);
+ roiPoolConf->set_spatial_scale(1. / 16);
+ roiPoolConf->set_width(14);
+ roiPoolConf->set_height(14);
+
+ const size_t roiNum = 10;
+ const size_t roiDim = 10;
+ const size_t batchSize = 5;
+ MatrixPtr roiValue = Matrix::create(roiNum, roiDim, false, false);
+ roiValue->zeroMem();
+ real* roiData = roiValue->getData();
+ for (size_t i = 0; i < roiNum; ++i) {
+ roiData[i * roiDim + 0] = std::rand() % batchSize;
+ roiData[i * roiDim + 1] = std::rand() % 224; // xMin
+ roiData[i * roiDim + 2] = std::rand() % 224; // yMin
+ size_t xMin = static_cast(roiData[i * roiDim + 1]);
+ size_t yMin = static_cast(roiData[i * roiDim + 2]);
+ roiData[i * roiDim + 3] = xMin + std::rand() % (224 - xMin); // xMax
+ roiData[i * roiDim + 4] = yMin + std::rand() % (224 - yMin); // yMax
+ }
+
+ config.inputDefs.push_back({INPUT_DATA, "input", 3 * 14 * 14, {}});
+ config.inputDefs.push_back({INPUT_SELF_DEFINE_DATA, "rois", roiValue, {}});
+ config.layerConfig.add_inputs();
+
+ for (auto useGpu : {false, true}) {
+ testLayerGrad(config, "roi_pool", batchSize, false, useGpu, false);
+ }
+}
+
TEST(Layer, SwitchOrderLayer) {
TestConfig config;
// config input_0
@@ -2358,6 +2397,38 @@ TEST(Layer, ScaleShiftLayer) {
}
}
+TEST(Layer, ScaleSubRegionLayer) {
+ const size_t batchSize = 64;
+ const size_t size = 4096;
+ TestConfig config;
+ config.layerConfig.set_type("scale_sub_region");
+ config.inputDefs.push_back({INPUT_DATA, "input", size, 0});
+ MatrixPtr indicesV = Matrix::create(batchSize, 6, false, false);
+ auto* data = indicesV->getData();
+ for (size_t i = 0; i < batchSize; ++i) {
+ data[i * 2] = 2;
+ data[i * 2 + 1] = 4;
+ data[i * 2 + 2] = 16;
+ data[i * 2 + 3] = 32;
+ data[i * 2 + 4] = 16;
+ data[i * 2 + 5] = 32;
+ }
+ config.inputDefs.push_back({INPUT_SELF_DEFINE_DATA, "indices", indicesV, {}});
+ LayerInputConfig* input = config.layerConfig.add_inputs();
+ ScaleSubRegionConfig* scaleSubRegionConf =
+ input->mutable_scale_sub_region_conf();
+ ImageConfig* imgConf = scaleSubRegionConf->mutable_image_conf();
+ imgConf->set_img_size(32);
+ imgConf->set_img_size_y(32);
+ imgConf->set_channels(4);
+ scaleSubRegionConf->set_value(2.0);
+ config.layerConfig.add_inputs();
+
+ for (auto useGpu : {false, true}) {
+ testLayerGrad(config, "scale_sub_region", batchSize, false, useGpu, false);
+ }
+}
+
int main(int argc, char** argv) {
testing::InitGoogleTest(&argc, argv);
initMain(argc, argv);
diff --git a/paddle/gserver/tests/test_MKLDNN.cpp b/paddle/gserver/tests/test_MKLDNN.cpp
index 2e8d9f3333b36005c9b3b28449c76a4a44c74cc6..a859e34c8996d81f14bf1edcb6e23d5a4f687e6b 100644
--- a/paddle/gserver/tests/test_MKLDNN.cpp
+++ b/paddle/gserver/tests/test_MKLDNN.cpp
@@ -269,6 +269,7 @@ void testBatchNormLayer(const testBatchNormDesc& pm) {
TEST(MKLDNNLayer, BatchNormLayer) {
testBatchNormLayer({4, 10, 6, 6});
testBatchNormLayer({16, 32, 16, 16});
+ testBatchNormLayer({4, 16, 8, 10});
}
struct testImageDesc {
@@ -296,17 +297,12 @@ static void getAddtoConfig(TestConfig& cfg,
}
void testAddtoLayer(const testImageDesc& pm, const size_t nInputs) {
- CHECK_GE(nInputs, 1);
+ CHECK_GE(nInputs, 1UL);
TestConfig dnnConfig;
getAddtoConfig(dnnConfig, pm, nInputs);
dnnConfig.layerConfig.set_type("mkldnn_addto");
- // TODO(TJ): test with bias
- for (auto withBias : {false}) {
- if (withBias) {
- dnnConfig.biasSize = pm.ic * pm.ih * pm.iw;
- } else {
- dnnConfig.biasSize = 0;
- }
+ for (auto withBias : {false, true}) {
+ dnnConfig.biasSize = withBias ? pm.ic * pm.ih * pm.iw : 0;
RUN_MKLDNN_TEST_LAYER(dnnConfig, "addto", pm)
}
}
diff --git a/paddle/gserver/tests/test_MaxPoolingWithMaskOutput.cpp b/paddle/gserver/tests/test_MaxPoolingWithMaskOutput.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..16438886df94cab9d29d05924bb047e6c7f1f6fa
--- /dev/null
+++ b/paddle/gserver/tests/test_MaxPoolingWithMaskOutput.cpp
@@ -0,0 +1,117 @@
+/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License. */
+
+#include
+#include
+#include
+
+#include "LayerGradUtil.h"
+#include "paddle/math/MathUtils.h"
+#include "paddle/testing/TestUtil.h"
+
+using namespace paddle;
+
+void setPoolConfig(TestConfig* config,
+ PoolConfig* pool,
+ const string& poolType) {
+ (*config).biasSize = 0;
+ (*config).layerConfig.set_type("pool");
+ (*config).layerConfig.set_num_filters(1);
+
+ int kw = 3, kh = 3;
+ int pw = 0, ph = 0;
+ int sw = 2, sh = 2;
+ pool->set_pool_type(poolType);
+ pool->set_channels(1);
+ pool->set_size_x(kw);
+ pool->set_size_y(kh);
+ pool->set_start(0);
+ pool->set_padding(pw);
+ pool->set_padding_y(ph);
+ pool->set_stride(sw);
+ pool->set_stride_y(sh);
+
+ int ow = outputSize(pool->img_size(), kw, pw, sw, /* caffeMode */ false);
+ int oh = outputSize(pool->img_size_y(), kh, ph, sh, /* caffeMode */ false);
+ pool->set_output_x(ow);
+ pool->set_output_y(oh);
+}
+
+void doOneMaxPoolingWithMaskOutputTest(MatrixPtr& inputMat,
+ const string& poolType,
+ bool use_gpu,
+ MatrixPtr& maskMat) {
+ TestConfig config;
+ config.inputDefs.push_back({INPUT_DATA, "layer_0", 25, 0});
+ LayerInputConfig* input = config.layerConfig.add_inputs();
+ PoolConfig* pool = input->mutable_pool_conf();
+
+ pool->set_img_size(5);
+ pool->set_img_size_y(5);
+ setPoolConfig(&config, pool, poolType);
+ config.layerConfig.set_size(pool->output_x() * pool->output_y() *
+ pool->channels());
+
+ config.layerConfig.set_name("MaxPoolWithMask");
+
+ std::vector dataLayers;
+ LayerMap layerMap;
+ vector datas;
+
+ initDataLayer(config,
+ &dataLayers,
+ &datas,
+ &layerMap,
+ "MaxPoolWithMask",
+ 1,
+ false,
+ use_gpu);
+
+ dataLayers[0]->getOutputValue()->copyFrom(*inputMat);
+
+ FLAGS_use_gpu = use_gpu;
+ std::vector parameters;
+ LayerPtr maxPoolingWithMaskOutputLayer;
+ initTestLayer(config, &layerMap, ¶meters, &maxPoolingWithMaskOutputLayer);
+ maxPoolingWithMaskOutputLayer->forward(PASS_GC);
+
+ checkMatrixEqual(maxPoolingWithMaskOutputLayer->getOutput("mask").value,
+ maskMat);
+}
+
+TEST(Layer, maxPoolingWithMaskOutputLayerFwd) {
+ bool useGpu = false;
+ MatrixPtr inputMat;
+ MatrixPtr maskMat;
+ real inputData[] = {0.1, 0.1, 0.5, 0.5, 1.1, 0.2, 0.2, 0.6, 0.1,
+ 0.1, 0.3, 0.3, 0.7, 0.1, 0.1, 0.4, 0.4, 0.8,
+ 0.8, 0.1, 1.0, 2.0, 3.0, 0.0, 9.0};
+ real maskData[] = {12, 4, 22, 24};
+
+ inputMat = Matrix::create(1, 25, false, useGpu);
+ maskMat = Matrix::create(1, 4, false, useGpu);
+ inputMat->setData(inputData);
+ maskMat->setData(maskData);
+ doOneMaxPoolingWithMaskOutputTest(
+ inputMat, "max-pool-with-mask", useGpu, maskMat);
+#ifdef PADDLE_WITH_CUDA
+ useGpu = true;
+ inputMat = Matrix::create(1, 25, false, useGpu);
+ maskMat = Matrix::create(1, 4, false, useGpu);
+ inputMat->copyFrom(inputData, 25);
+ maskMat->copyFrom(maskData, 4);
+ doOneMaxPoolingWithMaskOutputTest(
+ inputMat, "max-pool-with-mask", useGpu, maskMat);
+#endif
+}
diff --git a/paddle/math/BaseMatrix.cu b/paddle/math/BaseMatrix.cu
index 53dd5383601782231e6e742784007d1c9154dc6b..e3eff59dc575ee43552e401bc887f885a9804b61 100644
--- a/paddle/math/BaseMatrix.cu
+++ b/paddle/math/BaseMatrix.cu
@@ -1902,5 +1902,52 @@ void BaseMatrixT::sumOfProducts(BaseMatrixT& b,
}
template class BaseMatrixT;
+
+#ifndef PADDLE_MOBILE_INFERENCE
+
template class BaseMatrixT;
+
+#else
+
+template <>
+void BaseMatrixT::zero() {
+ applyUnary(unary::Zero());
+}
+
+template <>
+void BaseMatrixT::assign(int p) {
+ applyUnary(unary::Assign(p));
+}
+
+template <>
+void BaseMatrixT::isEqualTo(BaseMatrixT& b, int value) {
+ applyBinary(binary::IsEqual(value), b);
+}
+
+template <>
+void BaseMatrixT::neg() {
+ applyUnary(unary::Neg());
+}
+
+template <>
+void BaseMatrixT::abs2() {
+ applyUnary(unary::Abs());
+}
+
+template <>
+void BaseMatrixT::add(int p) {
+ applyUnary(unary::Add(p));
+}
+
+template <>
+void BaseMatrixT::add(int p1, int p2) {
+ applyUnary(unary::Add2(p1, p2));
+}
+
+template <>
+void BaseMatrixT::applyL1(int learningRate, int decayRate) {
+ applyUnary(unary::ApplyL1(learningRate * decayRate));
+}
+
+#endif
} // namespace paddle
diff --git a/paddle/math/CMakeLists.txt b/paddle/math/CMakeLists.txt
index 68b5296228cd733dc3cb7ca0f762e0a69187dbff..86bb270a4372841b3e6f4676e222d2190549c153 100644
--- a/paddle/math/CMakeLists.txt
+++ b/paddle/math/CMakeLists.txt
@@ -25,6 +25,19 @@ else()
message(STATUS "Compile with MKLDNNMatrix")
endif()
+if(MOBILE_INFERENCE)
+ list(REMOVE_ITEM MATH_SOURCES
+ ${CMAKE_CURRENT_SOURCE_DIR}/SIMDFunctions.cpp)
+ # Remove sparse
+ list(REMOVE_ITEM MATH_HEADERS
+ ${CMAKE_CURRENT_SOURCE_DIR}/CpuSparseMatrix.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/SparseMatrix.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/SparseRowMatrix.h)
+ list(REMOVE_ITEM MATH_SOURCES
+ ${CMAKE_CURRENT_SOURCE_DIR}/CpuSparseMatrix.cpp
+ ${CMAKE_CURRENT_SOURCE_DIR}/SparseMatrix.cpp
+ ${CMAKE_CURRENT_SOURCE_DIR}/SparseRowMatrix.cpp)
+endif()
set(MATH_SOURCES
"${PADDLE_SOURCE_DIR}/paddle/math/BaseMatrix.cu"
"${PADDLE_SOURCE_DIR}/paddle/math/TrainingAlgorithmOp.cu"
diff --git a/paddle/math/CpuSparseMatrix.h b/paddle/math/CpuSparseMatrix.h
index 36d57bbb65245de6b0de5909b55fbc4be3eccd78..aad1348353d558abca72ed0fa5cf943237e3ac78 100644
--- a/paddle/math/CpuSparseMatrix.h
+++ b/paddle/math/CpuSparseMatrix.h
@@ -13,6 +13,9 @@ See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
+
+#ifndef PADDLE_MOBILE_INFERENCE
+
#include
#include "Matrix.h"
@@ -309,3 +312,57 @@ private:
using Matrix::subMatrix;
};
} // namespace paddle
+
+#else
+
+#include "Matrix.h"
+
+namespace paddle {
+
+class CpuSparseMatrix : public Matrix {
+public:
+ CpuSparseMatrix(size_t height,
+ size_t width,
+ size_t nnz, /* used to allocate space */
+ SparseValueType valueType = FLOAT_VALUE,
+ SparseFormat format = SPARSE_CSR,
+ bool trans = false)
+ : Matrix(NULL, height, width, trans, false) {}
+
+ CpuSparseMatrix(real* data,
+ int* rows,
+ int* cols,
+ size_t height,
+ size_t width,
+ size_t nnz,
+ SparseValueType valueType,
+ SparseFormat format,
+ bool trans)
+ : Matrix(NULL, height, width, trans, false) {}
+
+ real* getValue() const { return nullptr; }
+ size_t getColStartIdx(size_t i) const { return 0; }
+ size_t getRowStartIdx(size_t i) const { return 0; }
+ size_t getColNum(size_t i) const { return 0; }
+ int* getRowCols(size_t i) const { return nullptr; }
+
+ CpuSparseMatrixPtr getTmpSparseMatrix(size_t height, size_t width) {
+ return nullptr;
+ }
+
+ void resize(size_t newHeight,
+ size_t newWidth,
+ size_t newNnz, /* used to allocate space */
+ SparseValueType valueType,
+ SparseFormat format) {}
+ void resize(size_t newHeight, size_t newWidth) {}
+ MatrixPtr getTranspose() { return nullptr; }
+ void setRow(size_t row,
+ size_t colNum,
+ const unsigned int* cols,
+ const real* values) {}
+};
+
+} // namespace paddle
+
+#endif
diff --git a/paddle/math/MKLDNNMatrix.cpp b/paddle/math/MKLDNNMatrix.cpp
index 21a8f73c3e650d4b3c3b86247594cd965f4ead35..a710479bab82ed52122cf59bb14a05ccbd4aa05c 100644
--- a/paddle/math/MKLDNNMatrix.cpp
+++ b/paddle/math/MKLDNNMatrix.cpp
@@ -152,12 +152,7 @@ void MKLDNNMatrix::downSpatial() {
}
memory::desc md = memory::desc(dstDims, getDtype(), dstFmt);
memory::primitive_desc pd = memory::primitive_desc(md, getEngine());
- mkldnn_primitive_t result;
- mkldnn::error::wrap_c_api(
- mkldnn_primitive_create(&result, pd.get(), nullptr, nullptr),
- "could not create a memory primitive");
- reset(result);
- set_data_handle(data_);
+ resetMKLDNNMemory(pd, data_);
}
} // namespace paddle
diff --git a/paddle/math/MKLDNNMatrix.h b/paddle/math/MKLDNNMatrix.h
index 54cfefe23b3dc70fd12fd2ca8886c941047b59f7..39d40a1f61609a649d3341c170d24b0604921ac2 100644
--- a/paddle/math/MKLDNNMatrix.h
+++ b/paddle/math/MKLDNNMatrix.h
@@ -145,6 +145,27 @@ public:
m_.reset();
}
+ /**
+ * override the CpuMatrix::resize
+ */
+ void resize(size_t newHeight, size_t newWidth) override {
+ m_->resize(newHeight, newWidth);
+ if (data_ == m_->getData() && elementCnt_ == newHeight * newWidth) {
+ return;
+ }
+ CpuMatrix::setData(data_);
+ height_ = newHeight;
+ width_ = newWidth;
+ elementCnt_ = newHeight * newWidth;
+ stride_ = width_;
+ auto pd = mkldnn::memory::primitive_desc(
+ mkldnn::memory::desc({(int)newHeight, (int)newWidth},
+ getDtype(),
+ mkldnn::memory::format::nc),
+ getEngine());
+ resetMKLDNNMemory(pd, data_);
+ }
+
/**
* override Matrix::getData
* check data before return
@@ -215,6 +236,17 @@ protected:
memory::format srcFmt,
memory::format dstFmt,
memory::dims dm);
+ /**
+ * reset this MKLDNN Memory from primitve desc
+ */
+ void resetMKLDNNMemory(memory::primitive_desc pd, real* data) {
+ mkldnn_primitive_t result;
+ mkldnn::error::wrap_c_api(
+ mkldnn_primitive_create(&result, pd.get(), nullptr, nullptr),
+ "could not create a memory primitive");
+ reset(result);
+ set_data_handle(data);
+ }
private:
// save the CpuMatrixPtr in case the buffer released outside
diff --git a/paddle/math/MathFunctions.cpp b/paddle/math/MathFunctions.cpp
index c2f17beeb87942ea681f5d388659c0d280157b26..ba86eacbb5d53ee43a60d2cd1dd922333a5d48f0 100644
--- a/paddle/math/MathFunctions.cpp
+++ b/paddle/math/MathFunctions.cpp
@@ -206,7 +206,7 @@ double dotProduct(const int n, const double* x, const double* y) {
}
#endif
-#if defined(PADDLE_USE_MKL) || defined(PADDLE_USE_MKLML)
+#if defined(PADDLE_USE_MKLML)
template <>
void vExp(const int n, const float* a, float* r) {
@@ -295,38 +295,6 @@ template void vAdd(const int n, const double* a, const double* b, double* r);
#endif
-#ifdef PADDLE_USE_MKL
-template <>
-void vInvSqrt(const int n, const float* a, float* r) {
- vsInvSqrt(n, a, r);
-}
-
-template <>
-void vInvSqrt(const int n, const double* a, double* r) {
- vdInvSqrt(n, a, r);
-}
-
-template <>
-void vLog1p(const int n, const float* a, float* r) {
- vsLog1p(n, a, r);
-}
-
-template <>
-void vLog1p(const int n, const double* a, double* r) {
- vdLog1p(n, a, r);
-}
-
-template <>
-void vTanh(const int n, const float* a, float* r) {
- vsTanh(n, a, r);
-}
-
-template <>
-void vTanh(const int n, const double* a, double* r) {
- vdTanh(n, a, r);
-}
-#else
-
DEFINE_MATRIX_BINARY_OP(vInvSqrt, b = 1.0f / std::sqrt(a));
template
void vInvSqrt(const int n, const T* a, T* r) {
@@ -357,6 +325,4 @@ template void vLog1p(const int n, const double* a, double* r);
template void vTanh(const int n, const float* a, float* r);
template void vTanh(const int n, const double* a, double* r);
-#endif
-
} // namespace paddle
diff --git a/paddle/math/MathFunctions.h b/paddle/math/MathFunctions.h
index 8193aa4adffc0409d8ea68417c68fa153a2942d8..f6e77029bdd75a602f88b688ca810f47ba4ee615 100644
--- a/paddle/math/MathFunctions.h
+++ b/paddle/math/MathFunctions.h
@@ -21,11 +21,6 @@ limitations under the License. */
#include
#endif
-#ifdef PADDLE_USE_MKL
-#include
-#include
-#endif
-
#if defined(PADDLE_USE_ATLAS) || defined(PADDLE_USE_VECLIB)
extern "C" {
#include
diff --git a/paddle/math/Matrix.cpp b/paddle/math/Matrix.cpp
index c3e34d5309d9ca8a32d7b0a8043e668cdb5be54b..88e9180690606c92cf46c5b295d80f14e5d64567 100644
--- a/paddle/math/Matrix.cpp
+++ b/paddle/math/Matrix.cpp
@@ -451,6 +451,7 @@ void GpuMatrix::addSharedBias(Matrix& b, real scale) {
}
void GpuMatrix::collectBias(Matrix& a, real scale) {
+#ifdef PADDLE_WITH_CUDA
CHECK_EQ(getHeight(), (size_t)1);
CHECK_EQ(width_, a.getWidth());
GpuSparseMatrix* sMatPtr = dynamic_cast(&a);
@@ -461,6 +462,7 @@ void GpuMatrix::collectBias(Matrix& a, real scale) {
hl_sparse_matrix_s A_d = sMatPtr->sMatrix_.get();
hl_sparse_matrix_column_sum(data, A_d, sMatPtr->getHeight(), width_, scale);
}
+#endif
}
void GpuMatrix::collectSharedBias(Matrix& a, real scale) {
@@ -552,6 +554,7 @@ void GpuMatrix::mul(const GpuSparseMatrix& a,
const GpuMatrix& b,
real scaleAB,
real scaleT) {
+#ifdef PADDLE_WITH_CUDA
CHECK(isContiguous());
CHECK(b.isContiguous());
CHECK(b.useGpu_ == true) << "Matrix type are not equal";
@@ -578,12 +581,14 @@ void GpuMatrix::mul(const GpuSparseMatrix& a,
b.height_,
scaleAB,
scaleT);
+#endif
}
void GpuMatrix::mul(const GpuMatrix& a,
const GpuSparseMatrix& b,
real scaleAB,
real scaleT) {
+#ifdef PADDLE_WITH_CUDA
CHECK(isContiguous());
CHECK(a.isContiguous());
CHECK(a.useGpu_ == true) << "Matrix type are not equal";
@@ -622,6 +627,7 @@ void GpuMatrix::mul(const GpuMatrix& a,
scaleAB,
scaleT);
}
+#endif
}
/* this = a*b */
@@ -1028,15 +1034,23 @@ void GpuMatrix::maxPoolForward(Matrix& inputMat,
size_t outputH,
size_t outputW,
size_t paddingH,
- size_t paddingW) {
+ size_t paddingW,
+ MatrixPtr maskMatP) {
CHECK(inputMat.useGpu_ == true) << "Matrix type are not equal";
real* inputData = inputMat.getData();
+ real* maskData = NULL;
size_t frameNum = inputMat.getHeight();
CHECK(imgSizeH * imgSizeW * channels == inputMat.getWidth());
CHECK(height_ == inputMat.getHeight());
CHECK(width_ == outputH * outputW * channels);
+ if (maskMatP != NULL) {
+ CHECK(maskMatP->useGpu_ == true) << "Matrix type are not equal";
+ CHECK(outputH * outputW * channels == maskMatP->getWidth());
+ maskData = maskMatP->getData();
+ }
+
hl_maxpool_forward(frameNum,
inputData,
channels,
@@ -1051,7 +1065,8 @@ void GpuMatrix::maxPoolForward(Matrix& inputMat,
paddingH,
paddingW,
data_,
- getStride());
+ getStride(),
+ maskData);
}
void GpuMatrix::maxPoolBackward(Matrix& inputMat,
@@ -1548,6 +1563,7 @@ void GpuMatrix::bilinearBackward(const Matrix& out,
}
void GpuMatrix::multiBinaryLabelCrossEntropy(Matrix& output, Matrix& label) {
+#ifdef PADDLE_WITH_CUDA
GpuMatrix* outputPtr = dynamic_cast(&output);
auto labelPtr = dynamic_cast(&label);
@@ -1563,9 +1579,11 @@ void GpuMatrix::multiBinaryLabelCrossEntropy(Matrix& output, Matrix& label) {
hl_sparse_matrix_s mat_d = labelPtr->sMatrix_.get();
hl_matrix_multi_binary_cross_entropy(
output_d, entropy_d, mat_d, height_, outputPtr->width_);
+#endif
}
void GpuMatrix::multiBinaryLabelCrossEntropyBp(Matrix& output, Matrix& label) {
+#ifdef PADDLE_WITH_CUDA
GpuMatrix* outputPtr = dynamic_cast(&output);
auto labelPtr = dynamic_cast(&label);
@@ -1581,6 +1599,7 @@ void GpuMatrix::multiBinaryLabelCrossEntropyBp(Matrix& output, Matrix& label) {
hl_sparse_matrix_s mat_d = labelPtr->sMatrix_.get();
hl_matrix_multi_binary_cross_entropy_bp(
output_d, grad_d, mat_d, height_, width_);
+#endif
}
void GpuMatrix::vol2Col(real* dataSrc,
@@ -1973,9 +1992,11 @@ void CpuMatrix::maxPoolForward(Matrix& inputMat,
size_t outputH,
size_t outputW,
size_t paddingH,
- size_t paddingW) {
+ size_t paddingW,
+ MatrixPtr maskMatP) {
real* inputData = inputMat.getData();
real* outData = data_;
+ real* maskData = NULL;
size_t num = inputMat.getHeight();
size_t inLength = imgSizeH * imgSizeW;
size_t outLength = outputH * outputW;
@@ -1984,6 +2005,11 @@ void CpuMatrix::maxPoolForward(Matrix& inputMat,
CHECK_EQ(channels * outLength, this->getWidth());
size_t outStride = getStride();
+ if (maskMatP != NULL) {
+ maskData = maskMatP->getData();
+ CHECK_EQ(channels * outLength, maskMatP->getWidth());
+ }
+
/* initialize the data_ */
for (size_t i = 0; i < height_; i++) {
for (size_t j = 0; j < width_; j++) {
@@ -2005,10 +2031,21 @@ void CpuMatrix::maxPoolForward(Matrix& inputMat,
int wstart = pw * strideW - paddingW;
int wend = std::min(wstart + sizeX, imgSizeW);
wstart = std::max(wstart, 0);
- for (int h = hstart; h < hend; ++h) {
- for (int w = wstart; w < wend; ++w) {
- outData[ph * outputW + pw] = std::max(
- outData[ph * outputW + pw], inputData[h * imgSizeW + w]);
+ if (maskData == NULL) {
+ for (int h = hstart; h < hend; ++h) {
+ for (int w = wstart; w < wend; ++w) {
+ outData[ph * outputW + pw] = std::max(
+ outData[ph * outputW + pw], inputData[h * imgSizeW + w]);
+ }
+ }
+ } else {
+ for (int h = hstart; h < hend; ++h) {
+ for (int w = wstart; w < wend; ++w) {
+ if (outData[ph * outputW + pw] < inputData[h * imgSizeW + w]) {
+ outData[ph * outputW + pw] = inputData[h * imgSizeW + w];
+ maskData[ph * outputW + pw] = h * imgSizeW + w;
+ }
+ }
}
}
}
@@ -2016,6 +2053,8 @@ void CpuMatrix::maxPoolForward(Matrix& inputMat,
// compute offset
inputData += inLength;
outData += outLength;
+
+ if (maskData != NULL) maskData += outLength;
}
}
}
@@ -3226,6 +3265,7 @@ template void CpuMatrix::mul(CpuSparseMatrix* a,
real scaleAB,
real scaleT);
+#ifndef PADDLE_MOBILE_INFERENCE
void SharedCpuMatrix::mul(CpuSparseMatrix* a,
CpuMatrix* b,
real scaleAB,
@@ -3354,6 +3394,7 @@ void SharedCpuMatrix::initBlock(int blockNum) {
}
}
+#endif
/* Add a (column) vector b to matrix a, column by column */
void CpuMatrix::addColumnVector(const Matrix& b) {
BaseMatrix::addColVector(const_cast(b));
diff --git a/paddle/math/Matrix.h b/paddle/math/Matrix.h
index 44180bca8bca53e74d71ce7bed3516399c01c81d..e273f1123690e31984c97185c5a8bc5e7b92c38c 100644
--- a/paddle/math/Matrix.h
+++ b/paddle/math/Matrix.h
@@ -861,7 +861,8 @@ public:
/**
* Pooling forward operation, pick out the largest element
- * in the sizeX of value
+ * in the sizeX of value, if the maskMatP is not NULL, it will
+ * also caculate the location indices.
*/
virtual void maxPoolForward(Matrix& inputMat,
size_t imgSizeH,
@@ -874,7 +875,8 @@ public:
size_t outputH,
size_t outputW,
size_t paddingH,
- size_t paddingW) {
+ size_t paddingW,
+ MatrixPtr maskMatP = NULL) {
LOG(FATAL) << "Not implemeted";
}
@@ -1426,7 +1428,8 @@ public:
size_t outputH,
size_t outputW,
size_t paddingH,
- size_t paddingW);
+ size_t paddingW,
+ MatrixPtr maskMatP);
void maxPoolBackward(Matrix& image,
size_t imgSizeH,
@@ -1697,7 +1700,8 @@ public:
size_t outputH,
size_t outputW,
size_t paddingH,
- size_t paddingW);
+ size_t paddingW,
+ MatrixPtr maskMatP);
void maxPoolBackward(Matrix& image,
size_t imgSizeH,
@@ -2066,6 +2070,7 @@ public:
class SharedCpuMatrix : public CpuMatrix {
public:
+#ifndef PADDLE_MOBILE_INFERENCE
/* blockNum is number of partitions of the matrix */
SharedCpuMatrix(int blockNum, size_t height, size_t width, bool trans = false)
: CpuMatrix(height, width, trans) {
@@ -2111,6 +2116,7 @@ private:
ThreadLocal localBuf_;
ThreadLocal> localBufRows_;
ThreadLocal> blockSeq_;
+#endif
};
typedef struct { unsigned int col; } sparse_non_value_t;
diff --git a/paddle/math/SparseMatrix.h b/paddle/math/SparseMatrix.h
index 16300db081f89182faa82ea5798e8ec2f1cd93f9..e0a3c6d2286521f6030867b747099514a16df5cf 100644
--- a/paddle/math/SparseMatrix.h
+++ b/paddle/math/SparseMatrix.h
@@ -13,6 +13,9 @@ See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
+
+#ifndef PADDLE_MOBILE_INFERENCE
+
#include
#include "CpuSparseMatrix.h"
#include "Matrix.h"
@@ -237,3 +240,47 @@ private:
};
} // namespace paddle
+
+#else
+
+#include "CpuSparseMatrix.h"
+
+namespace paddle {
+
+class GpuSparseMatrix : public Matrix {
+public:
+ GpuSparseMatrix(size_t height,
+ size_t width,
+ size_t nnz, /* used to allocate space */
+ SparseValueType valueType = FLOAT_VALUE,
+ SparseFormat format_ = SPARSE_CSR,
+ bool trans = false)
+ : Matrix(NULL, height, width, trans, false) {}
+
+ GpuSparseMatrix(real* value,
+ int* rows,
+ int* cols,
+ size_t height,
+ size_t width,
+ size_t nnz,
+ SparseValueType valueType,
+ SparseFormat format,
+ bool trans)
+ : Matrix(NULL, height, width, trans, true) {}
+
+ void resize(size_t newHeight,
+ size_t newWidth,
+ size_t newNnz, /* used to allocate space */
+ SparseValueType valueType,
+ SparseFormat format) {}
+ void resize(size_t newHeight, size_t newWidth) {}
+ MatrixPtr getTranspose() { return nullptr; }
+ void setRow(size_t row,
+ size_t colNum,
+ const unsigned int* cols,
+ const real* values) {}
+};
+
+} // namespace paddle
+
+#endif
diff --git a/paddle/math/SparseRowMatrix.h b/paddle/math/SparseRowMatrix.h
index 8704eb038d5d42ca834d232c0a651e9ffb2b40f3..ca7a6806da3a58ad5fffdbb6505319964c25bc6f 100644
--- a/paddle/math/SparseRowMatrix.h
+++ b/paddle/math/SparseRowMatrix.h
@@ -14,6 +14,8 @@ limitations under the License. */
#pragma once
+#ifndef PADDLE_MOBILE_INFERENCE
+
#include
#include
#include
@@ -313,3 +315,27 @@ private:
};
} // namespace paddle
+
+#else
+namespace paddle {
+
+class SparseRowCpuMatrix : public CpuMatrix {
+public:
+ void reserveStore() {}
+ void clearIndices() {}
+};
+
+class SparsePrefetchRowCpuMatrix : public SparseRowCpuMatrix {
+public:
+ void setupIndices() {}
+ void addRows(MatrixPtr input) {}
+ void addRows(IVectorPtr ids) {}
+};
+
+class SparseAutoGrowRowCpuMatrix : public SparseRowCpuMatrix {};
+class CacheRowCpuMatrix : public SparseAutoGrowRowCpuMatrix {};
+class SparseRowIdsCpuMatrix : public CpuMatrix {};
+
+} // namespace paddle
+
+#endif
diff --git a/paddle/math/tests/CMakeLists.txt b/paddle/math/tests/CMakeLists.txt
index ceb96b2e250d8e04ffb2b1d8c77ad498dca91cf3..d8b7f9e3fc74040189ade83049e4a1c3348e08de 100644
--- a/paddle/math/tests/CMakeLists.txt
+++ b/paddle/math/tests/CMakeLists.txt
@@ -3,8 +3,10 @@
add_simple_unittest(test_ExecViaCpu)
add_simple_unittest(test_SIMDFunctions)
add_simple_unittest(test_TrainingAlgorithm)
-add_simple_unittest(test_SparseMatrix)
add_simple_unittest(test_RowBuffer)
+if(NOT MOBILE_INFERENCE)
+ add_simple_unittest(test_SparseMatrix)
+endif()
# TODO(yuyang18): Refactor TestUtil.cpp. Remove this cross module reference.
add_unittest(test_matrixCompare
diff --git a/paddle/math/tests/TensorCheck.h b/paddle/math/tests/TensorCheck.h
index 5bc4a03067a75527fa30e5bb5526f93dc7b9fdcc..b998e5772e70d0a0ec79dc4064dcbaa2c302efd2 100644
--- a/paddle/math/tests/TensorCheck.h
+++ b/paddle/math/tests/TensorCheck.h
@@ -169,7 +169,7 @@ void TensorCheck(AssertEq compare,
count++;
}
}
- EXPECT_EQ(count, 0) << "There are " << count << " different element.";
+ EXPECT_EQ(count, 0) << "There are " << count << " different elements.";
}
template
diff --git a/paddle/operators/CMakeLists.txt b/paddle/operators/CMakeLists.txt
index eae87a5141ef1284630170b07d22a0cf9cd977b0..709f7de2e43093114d096cbfca5b5d49293a6d3e 100644
--- a/paddle/operators/CMakeLists.txt
+++ b/paddle/operators/CMakeLists.txt
@@ -195,8 +195,13 @@ op_library(sequence_pool_op DEPS sequence_pooling)
op_library(lstm_op DEPS sequence2batch lstm_compute)
op_library(conv_transpose_op DEPS vol2col)
op_library(gru_op DEPS sequence2batch gru_compute)
-op_library(dynamic_recurrent_op SRCS dynamic_recurrent_op.cc rnn/recurrent_op_utils.cc
- DEPS net_op tensor_array)
+if(WITH_TESTING)
+ op_library(dynamic_recurrent_op SRCS dynamic_recurrent_op.cc rnn/recurrent_op_utils.cc
+ DEPS net_op tensor_array gtest)
+else()
+ op_library(dynamic_recurrent_op SRCS dynamic_recurrent_op.cc rnn/recurrent_op_utils.cc
+ DEPS net_op tensor_array)
+endif()
op_library(recurrent_op SRCS recurrent_op.cc DEPS executor)
list(REMOVE_ITEM GENERAL_OPS ${DEPS_OPS})
@@ -209,6 +214,7 @@ set(GLOB_OP_LIB ${OP_LIBRARY} CACHE INTERNAL "Global OP library")
cc_test(gather_test SRCS gather_test.cc DEPS tensor)
cc_test(net_op_test SRCS net_op_test.cc DEPS net_op)
cc_test(scatter_test SRCS scatter_test.cc DEPS tensor)
+cc_test(beam_search_decode_op_test SRCS beam_search_decode_op_test.cc DEPS lod_tensor)
cc_test(strided_memcpy_test SRCS strided_memcpy_test.cc DEPS tensor paddle_memory)
cc_test(dynamic_recurrent_op_test SRCS dynamic_recurrent_op_test.cc
rnn/recurrent_op_utils.cc
diff --git a/paddle/operators/accuracy_op.cc b/paddle/operators/accuracy_op.cc
index 03c2fa945d94a522d25e65103c8842a93852ba3d..2785a8c6fb62527db4d203788be88ebead068a19 100644
--- a/paddle/operators/accuracy_op.cc
+++ b/paddle/operators/accuracy_op.cc
@@ -30,6 +30,10 @@ class AccuracyOp : public framework::OperatorWithKernel {
"Input (Label) of accuracy op should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("Accuracy"),
"Output (Accuracy) of AccuracyOp should not be null.");
+ PADDLE_ENFORCE(ctx->HasOutput("Correct"),
+ "Output (Correct) of AccuracyOp should not be null.");
+ PADDLE_ENFORCE(ctx->HasOutput("Total"),
+ "Output (Total) of AccuracyOp should not be null.");
auto inference_dim = ctx->GetInputDim("Out");
auto label_dim = ctx->GetInputDim("Label");
@@ -43,6 +47,8 @@ class AccuracyOp : public framework::OperatorWithKernel {
" the same as label.");
ctx->SetOutputDim("Accuracy", {1});
+ ctx->SetOutputDim("Correct", {1});
+ ctx->SetOutputDim("Total", {1});
ctx->ShareLoD("Out", /*->*/ "Accuracy");
}
@@ -66,6 +72,8 @@ class AccuracyOpMaker : public framework::OpProtoAndCheckerMaker {
AddInput("Label", "Label of the training data");
// TODO(typhoonzero): AddInput("Weight", ...
AddOutput("Accuracy", "The accuracy of current batch");
+ AddOutput("Correct", "The correct samples count of current batch");
+ AddOutput("Total", "The samples count of current batch");
AddComment(R"DOC(
Accuracy Operator.
diff --git a/paddle/operators/accuracy_op.cu b/paddle/operators/accuracy_op.cu
index d0c4c0d25d6f4e3ab7acd72d62a8a17fa102637b..b575c682f0d30678a72a33040cce6cc799da26cb 100644
--- a/paddle/operators/accuracy_op.cu
+++ b/paddle/operators/accuracy_op.cu
@@ -24,7 +24,8 @@ using platform::PADDLE_CUDA_NUM_THREADS;
template
__global__ void AccuracyCudaKernel(const int N, const int D,
const int64_t* Xdata,
- const int64_t* labeldata, float* accuracy) {
+ const int64_t* labeldata, int* correct_data,
+ float* accuracy) {
int count = 0;
__shared__ int total[BlockSize];
@@ -43,6 +44,7 @@ __global__ void AccuracyCudaKernel(const int N, const int D,
// reduce the count with init value 0, and output accuracy.
int result = thrust::reduce(thrust::device, total, total + BlockSize, 0);
if (threadIdx.x == 0) {
+ *correct_data = result;
*accuracy = static_cast(result) / static_cast(N);
}
}
@@ -56,31 +58,48 @@ class AccuracyOpCUDAKernel : public framework::OpKernel {
auto* inference = ctx.Input("Out");
auto* indices = ctx.Input("Indices");
auto* label = ctx.Input("Label");
+
auto* accuracy = ctx.Output("Accuracy");
+ auto* correct = ctx.Output("Correct");
+ auto* total = ctx.Output("Total");
// FIXME(typhoonzero): only support indices currently
// if add support for output values, how to detect the data type?
const int64_t* indices_data = indices->data();
const int64_t* label_data = label->data();
+
+ int* correct_data = correct->mutable_data(ctx.GetPlace());
+ int* total_data = total->mutable_data(ctx.GetPlace());
float* accuracy_data = accuracy->mutable_data(ctx.GetPlace());
- size_t num_samples = inference->dims()[0];
+ int num_samples = static_cast(inference->dims()[0]);
size_t infer_width = inference->dims()[1];
- cudaMemset((void**)&accuracy_data, 0, sizeof(float));
+ PADDLE_ENFORCE(cudaMemset(accuracy_data, 0, sizeof(float)));
+ // cudaMemset((void**)&correct_data, 0, sizeof(float));
if (num_samples == 0) {
return;
}
+ cudaMemcpy(total_data, &num_samples, sizeof(int), cudaMemcpyHostToDevice);
AccuracyCudaKernel<<<
1, PADDLE_CUDA_NUM_THREADS, 0, ctx.cuda_device_context().stream()>>>(
- num_samples, infer_width, indices_data, label_data, accuracy_data);
+ num_samples, infer_width, indices_data, label_data, correct_data,
+ accuracy_data);
+
+ int d_num_samples, d_num_correct;
+ float d_accuracy;
+ cudaMemcpy(&d_num_correct, correct_data, sizeof(int),
+ cudaMemcpyDeviceToHost);
+ cudaMemcpy(&d_num_samples, total_data, sizeof(int), cudaMemcpyDeviceToHost);
+ cudaMemcpy(&d_accuracy, accuracy_data, sizeof(float),
+ cudaMemcpyDeviceToHost);
}
};
} // namespace operators
} // namespace paddle
-// FIXME(typhoonzero): types of T is for infernece data.
-// label data is always int
+// FIXME(typhoonzero): types of T is for inference data.
+// label data is always int64
REGISTER_OP_GPU_KERNEL(accuracy, paddle::operators::AccuracyOpCUDAKernel,
paddle::operators::AccuracyOpCUDAKernel);
diff --git a/paddle/operators/accuracy_op.h b/paddle/operators/accuracy_op.h
index 1968b53d19acfddaa514eca6e24d98a298d8d311..d060e6edddb31ecc1a4d27836f80b8ac5fa7d36d 100644
--- a/paddle/operators/accuracy_op.h
+++ b/paddle/operators/accuracy_op.h
@@ -14,7 +14,6 @@ limitations under the License. */
#pragma once
#include
-#include "paddle/framework/eigen.h"
#include "paddle/framework/op_registry.h"
namespace paddle {
@@ -22,18 +21,6 @@ namespace operators {
using Tensor = framework::Tensor;
-template
-using EigenMatrix = framework::EigenMatrix;
-
-template