未验证 提交 9202671d 编写于 作者: R ranqiu92 提交者: GitHub

Merge branch 'develop' into doc

#!/usr/bin/env python
from paddle.trainer_config_helpers import *
height = 224
width = 224
num_class = 1000
batch_size = get_config_arg('batch_size', int, 64)
layer_num = get_config_arg("layer_num", int, 50)
is_test = get_config_arg("is_test", bool, False)
args = {'height': height, 'width': width, 'color': True, 'num_class': num_class}
define_py_data_sources2(
"train.list", None, module="provider", obj="process", args=args)
settings(
batch_size=batch_size,
learning_rate=0.01 / batch_size,
learning_method=MomentumOptimizer(0.9),
regularization=L2Regularization(0.0005 * batch_size))
#######################Network Configuration #############
def conv_bn_layer(name,
input,
filter_size,
num_filters,
stride,
padding,
channels=None,
active_type=ReluActivation()):
"""
A wrapper for conv layer with batch normalization layers.
Note:
conv layer has no activation.
"""
tmp = img_conv_layer(
name=name + "_conv",
input=input,
filter_size=filter_size,
num_channels=channels,
num_filters=num_filters,
stride=stride,
padding=padding,
act=LinearActivation(),
bias_attr=False)
return batch_norm_layer(
name=name + "_bn", input=tmp, act=active_type, use_global_stats=is_test)
def bottleneck_block(name, input, num_filters1, num_filters2):
"""
A wrapper for bottlenect building block in ResNet.
Last conv_bn_layer has no activation.
Addto layer has activation of relu.
"""
last_name = conv_bn_layer(
name=name + '_branch2a',
input=input,
filter_size=1,
num_filters=num_filters1,
stride=1,
padding=0)
last_name = conv_bn_layer(
name=name + '_branch2b',
input=last_name,
filter_size=3,
num_filters=num_filters1,
stride=1,
padding=1)
last_name = conv_bn_layer(
name=name + '_branch2c',
input=last_name,
filter_size=1,
num_filters=num_filters2,
stride=1,
padding=0,
active_type=LinearActivation())
return addto_layer(
name=name + "_addto", input=[input, last_name], act=ReluActivation())
def mid_projection(name, input, num_filters1, num_filters2, stride=2):
"""
A wrapper for middile projection in ResNet.
projection shortcuts are used for increasing dimensions,
and other shortcuts are identity
branch1: projection shortcuts are used for increasing
dimensions, has no activation.
branch2x: bottleneck building block, shortcuts are identity.
"""
# stride = 2
branch1 = conv_bn_layer(
name=name + '_branch1',
input=input,
filter_size=1,
num_filters=num_filters2,
stride=stride,
padding=0,
active_type=LinearActivation())
last_name = conv_bn_layer(
name=name + '_branch2a',
input=input,
filter_size=1,
num_filters=num_filters1,
stride=stride,
padding=0)
last_name = conv_bn_layer(
name=name + '_branch2b',
input=last_name,
filter_size=3,
num_filters=num_filters1,
stride=1,
padding=1)
last_name = conv_bn_layer(
name=name + '_branch2c',
input=last_name,
filter_size=1,
num_filters=num_filters2,
stride=1,
padding=0,
active_type=LinearActivation())
return addto_layer(
name=name + "_addto", input=[branch1, last_name], act=ReluActivation())
img = data_layer(name='image', size=height * width * 3)
def deep_res_net(res2_num=3, res3_num=4, res4_num=6, res5_num=3):
"""
A wrapper for 50,101,152 layers of ResNet.
res2_num: number of blocks stacked in conv2_x
res3_num: number of blocks stacked in conv3_x
res4_num: number of blocks stacked in conv4_x
res5_num: number of blocks stacked in conv5_x
"""
# For ImageNet
# conv1: 112x112
tmp = conv_bn_layer(
"conv1",
input=img,
filter_size=7,
channels=3,
num_filters=64,
stride=2,
padding=3)
tmp = img_pool_layer(name="pool1", input=tmp, pool_size=3, stride=2)
# conv2_x: 56x56
tmp = mid_projection(
name="res2_1", input=tmp, num_filters1=64, num_filters2=256, stride=1)
for i in xrange(2, res2_num + 1, 1):
tmp = bottleneck_block(
name="res2_" + str(i), input=tmp, num_filters1=64, num_filters2=256)
# conv3_x: 28x28
tmp = mid_projection(
name="res3_1", input=tmp, num_filters1=128, num_filters2=512)
for i in xrange(2, res3_num + 1, 1):
tmp = bottleneck_block(
name="res3_" + str(i),
input=tmp,
num_filters1=128,
num_filters2=512)
# conv4_x: 14x14
tmp = mid_projection(
name="res4_1", input=tmp, num_filters1=256, num_filters2=1024)
for i in xrange(2, res4_num + 1, 1):
tmp = bottleneck_block(
name="res4_" + str(i),
input=tmp,
num_filters1=256,
num_filters2=1024)
# conv5_x: 7x7
tmp = mid_projection(
name="res5_1", input=tmp, num_filters1=512, num_filters2=2048)
for i in xrange(2, res5_num + 1, 1):
tmp = bottleneck_block(
name="res5_" + str(i),
input=tmp,
num_filters1=512,
num_filters2=2048)
tmp = img_pool_layer(
name='avgpool',
input=tmp,
pool_size=7,
stride=1,
pool_type=AvgPooling())
return fc_layer(input=tmp, size=num_class, act=SoftmaxActivation())
if layer_num == 50:
resnet = deep_res_net(3, 4, 6, 3)
elif layer_num == 101:
resnet = deep_res_net(3, 4, 23, 3)
elif layer_num == 152:
resnet = deep_res_net(3, 8, 36, 3)
else:
print("Wrong layer number.")
lbl = data_layer(name="label", size=num_class)
loss = cross_entropy(name='loss', input=resnet, label=lbl)
inputs(img, lbl)
outputs(loss)
...@@ -5,22 +5,23 @@ function train() { ...@@ -5,22 +5,23 @@ function train() {
export OMP_DYNAMIC="FALSE" export OMP_DYNAMIC="FALSE"
export KMP_AFFINITY="granularity=fine,compact,0,0" export KMP_AFFINITY="granularity=fine,compact,0,0"
topology=$1 topology=$1
bs=$2 layer_num=$2
use_mkldnn=$3 bs=$3
if [ $3 == "True" ]; then use_mkldnn=$4
if [ $4 == "True" ]; then
thread=1 thread=1
log="logs/${topology}-mkldnn-${bs}.log" log="logs/${topology}-${layer_num}-mkldnn-${bs}.log"
elif [ $3 == "False" ]; then elif [ $4 == "False" ]; then
thread=`nproc` thread=`nproc`
# each trainer_count use only 1 core to avoid conflict # each trainer_count use only 1 core to avoid conflict
export OMP_NUM_THREADS=1 export OMP_NUM_THREADS=1
export MKL_NUM_THREADS=1 export MKL_NUM_THREADS=1
log="logs/${topology}-${thread}mklml-${bs}.log" log="logs/${topology}-${layer_num}-${thread}mklml-${bs}.log"
else else
echo "Wrong input $3, use True or False." echo "Wrong input $3, use True or False."
exit 0 exit 0
fi fi
args="batch_size=${bs}" args="batch_size=${bs},layer_num=${layer_num}"
config="${topology}.py" config="${topology}.py"
paddle train --job=time \ paddle train --job=time \
--config=$config \ --config=$config \
...@@ -40,12 +41,9 @@ if [ ! -d "logs" ]; then ...@@ -40,12 +41,9 @@ if [ ! -d "logs" ]; then
mkdir logs mkdir logs
fi fi
#========== mkldnn ==========# for use_mkldnn in True False; do
train vgg 64 True for batchsize in 64 128 256; do
train vgg 128 True train vgg 19 $batchsize $use_mkldnn
train vgg 256 True train resnet 50 $batchsize $use_mkldnn
done
#========== mklml ===========# done
train vgg 64 False
train vgg 128 False
train vgg 256 False
...@@ -13,7 +13,7 @@ define_py_data_sources2( ...@@ -13,7 +13,7 @@ define_py_data_sources2(
settings( settings(
batch_size=batch_size, batch_size=batch_size,
learning_rate=0.01 / batch_size, learning_rate=0.001 / batch_size,
learning_method=MomentumOptimizer(0.9), learning_method=MomentumOptimizer(0.9),
regularization=L2Regularization(0.0005 * batch_size)) regularization=L2Regularization(0.0005 * batch_size))
......
# Find the CBlas and lapack libraries # Find the CBlas and lapack libraries
# #
# It will search MKL, atlas, OpenBlas, reference-cblas in order. # It will search MKLML, atlas, OpenBlas, reference-cblas in order.
# #
# If any cblas implementation found, the following variable will be set. # If any cblas implementation found, the following variable will be set.
# CBLAS_PROVIDER # one of MKL, ATLAS, OPENBLAS, REFERENCE # CBLAS_PROVIDER # one of MKLML, ATLAS, OPENBLAS, REFERENCE
# CBLAS_INC_DIR # the include directory for cblas. # CBLAS_INC_DIR # the include directory for cblas.
# CBLAS_LIBS # a list of libraries should be linked by paddle. # CBLAS_LIBS # a list of libraries should be linked by paddle.
# # Each library should be full path to object file. # # Each library should be full path to object file.
#
# User should set one of MKL_ROOT, ATLAS_ROOT, OPENBLAS_ROOT, REFERENCE_CBLAS_ROOT
# during cmake. If none of them set, it will try to find cblas implementation in
# system paths.
#
set(CBLAS_FOUND OFF) set(CBLAS_FOUND OFF)
...@@ -30,44 +25,6 @@ if(WITH_MKLML AND MKLML_INC_DIR AND MKLML_LIB) ...@@ -30,44 +25,6 @@ if(WITH_MKLML AND MKLML_INC_DIR AND MKLML_LIB)
return() return()
endif() endif()
## Then find MKL.
set(INTEL_MKL_ROOT "/opt/intel/mkl" CACHE PATH "Folder contains intel mkl libs")
set(MKL_ROOT $ENV{MKL_ROOT} CACHE PATH "Folder contains env MKL")
set(MKL_INCLUDE_SEARCH_PATHS
${MKL_ROOT}/include
${INTEL_MKL_ROOT}/include)
set(MKL_LIB_SEARCH_PATHS
${MKL_ROOT}/lib
${MKL_ROOT}/lib/intel64
${INTEL_MKL_ROOT}/lib
${INTEL_MKL_ROOT}/lib/intel64)
find_path(MKL_INC_DIR mkl.h PATHS
${MKL_INCLUDE_SEARCH_PATHS})
find_path(MKL_LAPACK_INC_DIR mkl_lapacke.h PATHS
${MKL_INCLUDE_SEARCH_PATHS})
find_library(MKL_CORE_LIB NAMES mkl_core PATHS
${MKL_LIB_SEARCH_PATHS})
find_library(MKL_SEQUENTIAL_LIB NAMES mkl_sequential PATHS
${MKL_LIB_SEARCH_PATHS})
find_library(MKL_INTEL_LP64 NAMES mkl_intel_lp64 PATHS
${MKL_LIB_SEARCH_PATHS})
if(MKL_LAPACK_INC_DIR AND MKL_INC_DIR AND MKL_CORE_LIB AND MKL_SEQUENTIAL_LIB AND MKL_INTEL_LP64)
set(CBLAS_FOUND ON)
set(CBLAS_PROVIDER MKL)
set(CBLAS_INC_DIR ${MKL_INC_DIR} ${MKL_LAPACK_INC_DIR})
set(CBLAS_LIBRARIES ${MKL_INTEL_LP64} ${MKL_SEQUENTIAL_LIB} ${MKL_CORE_LIB})
add_definitions(-DPADDLE_USE_MKL)
add_definitions(-DLAPACK_FOUND)
message(STATUS "Found MKL (include: ${MKL_INC_DIR}, library: ${CBLAS_LIBRARIES})")
message(STATUS "Found lapack in MKL (include: ${MKL_LAPACK_INC_DIR})")
return()
endif()
## Then find atlas. ## Then find atlas.
set(ATLAS_ROOT $ENV{ATLAS_ROOT} CACHE PATH "Folder contains Atlas") set(ATLAS_ROOT $ENV{ATLAS_ROOT} CACHE PATH "Folder contains Atlas")
set(ATLAS_INCLUDE_SEARCH_PATHS set(ATLAS_INCLUDE_SEARCH_PATHS
......
...@@ -46,16 +46,20 @@ IF(${CBLAS_PROVIDER} STREQUAL "MKLML") ...@@ -46,16 +46,20 @@ IF(${CBLAS_PROVIDER} STREQUAL "MKLML")
MESSAGE(STATUS "Build MKLDNN with ${MKLDNN_MKLROOT}") MESSAGE(STATUS "Build MKLDNN with ${MKLDNN_MKLROOT}")
ENDIF() ENDIF()
SET(MKLDNN_CFLAG "${CMAKE_C_FLAGS} -Wno-error=strict-overflow")
SET(MKLDNN_CXXFLAG "${CMAKE_CXX_FLAGS} -Wno-error=strict-overflow")
ExternalProject_Add( ExternalProject_Add(
${MKLDNN_PROJECT} ${MKLDNN_PROJECT}
${EXTERNAL_PROJECT_LOG_ARGS} ${EXTERNAL_PROJECT_LOG_ARGS}
DEPENDS ${MKLDNN_DEPENDS} DEPENDS ${MKLDNN_DEPENDS}
GIT_REPOSITORY "https://github.com/01org/mkl-dnn.git" GIT_REPOSITORY "https://github.com/01org/mkl-dnn.git"
GIT_TAG "v0.10" GIT_TAG "v0.11"
PREFIX ${MKLDNN_SOURCES_DIR} PREFIX ${MKLDNN_SOURCES_DIR}
UPDATE_COMMAND "" UPDATE_COMMAND ""
CMAKE_ARGS -DCMAKE_INSTALL_PREFIX=${MKLDNN_INSTALL_DIR} CMAKE_ARGS -DCMAKE_INSTALL_PREFIX=${MKLDNN_INSTALL_DIR}
CMAKE_ARGS -DMKLROOT=${MKLDNN_MKLROOT} CMAKE_ARGS -DMKLROOT=${MKLDNN_MKLROOT}
CMAKE_ARGS -DCMAKE_C_FLAGS=${MKLDNN_CFLAG}
CMAKE_ARGS -DCMAKE_CXX_FLAGS=${MKLDNN_CXXFLAG}
CMAKE_CACHE_ARGS -DCMAKE_INSTALL_PREFIX:PATH=${MKLDNN_INSTALL_DIR} CMAKE_CACHE_ARGS -DCMAKE_INSTALL_PREFIX:PATH=${MKLDNN_INSTALL_DIR}
-DMKLROOT:PATH=${MKLDNN_MKLROOT} -DMKLROOT:PATH=${MKLDNN_MKLROOT}
) )
......
...@@ -27,8 +27,8 @@ ENDIF() ...@@ -27,8 +27,8 @@ ENDIF()
INCLUDE(ExternalProject) INCLUDE(ExternalProject)
SET(MKLML_PROJECT "extern_mklml") SET(MKLML_PROJECT "extern_mklml")
SET(MKLML_VER "mklml_lnx_2018.0.20170720") SET(MKLML_VER "mklml_lnx_2018.0.1.20171007")
SET(MKLML_URL "https://github.com/01org/mkl-dnn/releases/download/v0.10/${MKLML_VER}.tgz") SET(MKLML_URL "https://github.com/01org/mkl-dnn/releases/download/v0.11/${MKLML_VER}.tgz")
SET(MKLML_SOURCE_DIR "${THIRD_PARTY_PATH}/mklml") SET(MKLML_SOURCE_DIR "${THIRD_PARTY_PATH}/mklml")
SET(MKLML_DOWNLOAD_DIR "${MKLML_SOURCE_DIR}/src/${MKLML_PROJECT}") SET(MKLML_DOWNLOAD_DIR "${MKLML_SOURCE_DIR}/src/${MKLML_PROJECT}")
SET(MKLML_DST_DIR "mklml") SET(MKLML_DST_DIR "mklml")
......
...@@ -86,7 +86,7 @@ IF(NOT ${CBLAS_FOUND}) ...@@ -86,7 +86,7 @@ IF(NOT ${CBLAS_FOUND})
UPDATE_COMMAND "" UPDATE_COMMAND ""
CONFIGURE_COMMAND "" CONFIGURE_COMMAND ""
) )
SET(CBLAS_PROVIDER openblas)
IF(WITH_C_API) IF(WITH_C_API)
INSTALL(DIRECTORY ${CBLAS_INC_DIR} DESTINATION third_party/openblas) INSTALL(DIRECTORY ${CBLAS_INC_DIR} DESTINATION third_party/openblas)
# Because libopenblas.a is a symbolic link of another library, thus need to # Because libopenblas.a is a symbolic link of another library, thus need to
...@@ -115,7 +115,7 @@ INCLUDE_DIRECTORIES(${CBLAS_INC_DIR}) ...@@ -115,7 +115,7 @@ INCLUDE_DIRECTORIES(${CBLAS_INC_DIR})
# linear algebra libraries for cc_library(xxx SRCS xxx.c DEPS cblas) # linear algebra libraries for cc_library(xxx SRCS xxx.c DEPS cblas)
SET(dummyfile ${CMAKE_CURRENT_BINARY_DIR}/cblas_dummy.c) SET(dummyfile ${CMAKE_CURRENT_BINARY_DIR}/cblas_dummy.c)
FILE(WRITE ${dummyfile} "const char * dummy = \"${dummyfile}\";") FILE(WRITE ${dummyfile} "const char * dummy = \"${dummyfile}\";")
IF(${CBLAS_PROVIDER} MATCHES MKL) IF("${CBLAS_PROVIDER}" STREQUAL "MKLML")
ADD_LIBRARY(cblas SHARED ${dummyfile}) ADD_LIBRARY(cblas SHARED ${dummyfile})
ELSE() ELSE()
ADD_LIBRARY(cblas STATIC ${dummyfile}) ADD_LIBRARY(cblas STATIC ${dummyfile})
......
...@@ -93,7 +93,7 @@ include_directories(${CMAKE_CURRENT_BINARY_DIR}) ...@@ -93,7 +93,7 @@ include_directories(${CMAKE_CURRENT_BINARY_DIR})
if(NOT APPLE AND NOT ANDROID) if(NOT APPLE AND NOT ANDROID)
find_package(Threads REQUIRED) find_package(Threads REQUIRED)
link_libraries(${CMAKE_THREAD_LIBS_INIT}) link_libraries(${CMAKE_THREAD_LIBS_INIT})
set(CMAKE_CXX_LINK_EXECUTABLE "${CMAKE_CXX_LINK_EXECUTABLE} -ldl -lrt") set(CMAKE_CXX_LINK_EXECUTABLE "${CMAKE_CXX_LINK_EXECUTABLE} -pthread -ldl -lrt")
endif(NOT APPLE AND NOT ANDROID) endif(NOT APPLE AND NOT ANDROID)
function(merge_static_libs TARGET_NAME) function(merge_static_libs TARGET_NAME)
......
...@@ -82,6 +82,11 @@ maxout ...@@ -82,6 +82,11 @@ maxout
.. autoclass:: paddle.v2.layer.maxout .. autoclass:: paddle.v2.layer.maxout
:noindex: :noindex:
roi_pool
--------
.. autoclass:: paddle.v2.layer.roi_pool
:noindex:
Norm Layer Norm Layer
========== ==========
......
...@@ -2,112 +2,9 @@ ...@@ -2,112 +2,9 @@
Data Reader Interface and DataSets Data Reader Interface and DataSets
================================== ==================================
.. toctree::
:maxdepth: 1
DataTypes data/data_reader.rst
========= data/image.rst
data/dataset.rst
.. automodule:: paddle.v2.data_type
:members:
:noindex:
DataFeeder
==========
.. automodule:: paddle.v2.data_feeder
:members:
:noindex:
Reader
======
.. automodule:: paddle.v2.reader
:members:
:noindex:
.. automodule:: paddle.v2.reader.creator
:members:
:noindex:
minibatch
=========
.. automodule:: paddle.v2.minibatch
:members:
:noindex:
Dataset
=======
.. automodule:: paddle.v2.dataset
:members:
:noindex:
mnist
+++++
.. automodule:: paddle.v2.dataset.mnist
:members:
:noindex:
cifar
+++++
.. automodule:: paddle.v2.dataset.cifar
:members:
:noindex:
conll05
+++++++
.. automodule:: paddle.v2.dataset.conll05
:members: get_dict,get_embedding,test
:noindex:
imdb
++++
.. automodule:: paddle.v2.dataset.imdb
:members:
:noindex:
imikolov
++++++++
.. automodule:: paddle.v2.dataset.imikolov
:members:
:noindex:
movielens
+++++++++
.. automodule:: paddle.v2.dataset.movielens
:members:
:noindex:
.. autoclass:: paddle.v2.dataset.movielens.MovieInfo
:noindex:
.. autoclass:: paddle.v2.dataset.movielens.UserInfo
:noindex:
sentiment
+++++++++
.. automodule:: paddle.v2.dataset.sentiment
:members:
:noindex:
uci_housing
+++++++++++
.. automodule:: paddle.v2.dataset.uci_housing
:members:
:noindex:
wmt14
+++++
.. automodule:: paddle.v2.dataset.wmt14
:members:
:noindex:
=====================
Data Reader Interface
=====================
DataTypes
=========
.. automodule:: paddle.v2.data_type
:members:
:noindex:
DataFeeder
==========
.. automodule:: paddle.v2.data_feeder
:members:
:noindex:
Reader
======
.. automodule:: paddle.v2.reader
:members:
:noindex:
.. automodule:: paddle.v2.reader.creator
:members:
:noindex:
minibatch
=========
.. automodule:: paddle.v2.minibatch
:members:
:noindex:
Dataset
=======
.. automodule:: paddle.v2.dataset
:members:
:noindex:
mnist
+++++
.. automodule:: paddle.v2.dataset.mnist
:members:
:noindex:
cifar
+++++
.. automodule:: paddle.v2.dataset.cifar
:members:
:noindex:
conll05
+++++++
.. automodule:: paddle.v2.dataset.conll05
:members: get_dict,get_embedding,test
:noindex:
imdb
++++
.. automodule:: paddle.v2.dataset.imdb
:members:
:noindex:
imikolov
++++++++
.. automodule:: paddle.v2.dataset.imikolov
:members:
:noindex:
movielens
+++++++++
.. automodule:: paddle.v2.dataset.movielens
:members:
:noindex:
.. autoclass:: paddle.v2.dataset.movielens.MovieInfo
:noindex:
.. autoclass:: paddle.v2.dataset.movielens.UserInfo
:noindex:
sentiment
+++++++++
.. automodule:: paddle.v2.dataset.sentiment
:members:
:noindex:
uci_housing
+++++++++++
.. automodule:: paddle.v2.dataset.uci_housing
:members:
:noindex:
wmt14
+++++
.. automodule:: paddle.v2.dataset.wmt14
:members:
:noindex:
Image Interface
===============
.. automodule:: paddle.v2.image
:members:
# Design Doc: float16
## Why float16
Half precision (float16) is a binary floating-point format that occupies 16 bits in memory. float16 is half the size of traditional 32-bit single precision format (float) and has lower precision and smaller range.
When high precision computation is not required, using float16 data type could potentially
- reduce storage space, memory bandwidth, and power usages;
- increase the chance of data fitting into a smaller cache of lower latency;
- provide arithmetic speed up if supported by hardware.
## Survey of current float16 support
A brief survey of float16 support on different compilers, hardwares, and libraries can be found below. Interested readers can refer to [link1](https://github.com/PaddlePaddle/Paddle/issues/4853) and [link2](https://github.com/Xreki/Xreki.github.io/blob/master/multi_data_types_in_dl_framework/ppt/float16_and_quantized_type.md) for more info.
The goal of float16 is to serve as a key for the executor to find and run the correct version of compute method specialized for float16 in operator kernel. It should be compatible with various natively supported float16 implementations including `__half` for cuda, `float16_t` for ARM, and `Eigen::half` for Eigen to make writing customized float16 kernels easier.
### Compiler
- nvcc supports `__half` data type after CUDA 7.5.
- `__fp16` or `float16_t` is supported as storage type for gcc >= 6.1 and clang >= 3.4.
- `__fp16` or `float16_t` is supported as arithmetic type for gcc >= 7.1 and clang >= 3.9.
### Hardware
- `__half` is supported on GPU with compute capability >= 5.3.
- `__fp16` is supported as storage type for ARMv7-A, ARMv8-A, and above.
- `__fp16` is supported as arithmetic type after ARMv8.2-A (currently, the only microarchitecture implementing ARMv8.2-A is ARM Cortex-A75, which is announced in May 2017. There seems to be no application processors currently available on market that adopts this architecture. It is reported that Qualcomm Snapdragon 845 uses Cortex-A75 design and will be available in mobile devices in early 2018).
### Libraries
- [Eigen](https://github.com/RLovelett/eigen) >= 3.3 supports float16 calculation on both GPU and CPU using the `Eigen::half` class. It is mostly useful for Nvidia GPUs because of the overloaded arithmetic operators using cuda intrinsics. It falls back to using software emulation on CPU for calculation and there is no special treatment to ARM processors.
- [ARM compute library](https://github.com/ARM-software/ComputeLibrary) >= 17.02.01 supports NEON FP16 kernels (requires ARMv8.2-A CPU).
## Implementation
The float16 class holds a 16-bit `uint16_t` data internally.
```
struct float16 {
uint16_t x;
};
```
float16 supports the following features:
- constructors / assignment operators that take input from primitive data types including bool, integers of various length, float, and double.
- constructors / assignment operators that take input from `__half` on cuda, `float16_t` on ARM, and `Eigen::half` on Eigen.
- conversion operators to primitive data types and half precision data types on cuda, ARM and Eigen.
- overloaded arithmetic operators for cuda, arm, and non-arm cpu, respectively. These operators will take advantage of the cuda and ARM intrinsics on the corresponding hardware.
To support the above features, two fundamental conversion functions are provided:
```
float16 float_to_half_rn(float f); // convert to half precision in round-to-nearest-even mode
float half_to_float(float16 h);
```
which provides one-to-one conversion between float32 and float16. These twos functions will do different conversion routines based on the current hardware. CUDA/ARM instrinsics will be used when the corresonding hardware is available. If the hardware or compiler level does not support float32 to float16 conversion, software emulation will be performed to do the conversion.
## To do
After float16 class is available, some of the future items are below:
- Update pybind/tensor_py.h to bind c++ float16 with numpy float16.
- Modify `GetKernelType()` method in `framework/operator.h` to make it compatible with float16.
- Create a type-casting operator that can convert the data type in tensor between float16 and other types.
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
- [CMake](#cmake) - [CMake](#cmake)
- [Layers](#layers) - [Layers](#layers)
- [Activations](#activations) - [Activations](#activations)
- [Weights](#weights)
- [Unit Tests](#unit-tests) - [Unit Tests](#unit-tests)
- [Protobuf Messages](#protobuf-messages) - [Protobuf Messages](#protobuf-messages)
- [Python API](#python-api) - [Python API](#python-api)
...@@ -45,17 +46,23 @@ Figure 1. PaddlePaddle on IA. ...@@ -45,17 +46,23 @@ Figure 1. PaddlePaddle on IA.
### Layers ### Layers
所有MKL-DNN相关的C++ layers,都会按照PaddlePaddle的目录结构存放在 所有MKL-DNN相关的C++ layers,都会按照PaddlePaddle的目录结构存放在
`paddle/gserver/layers`中,并且文件名都会一以*Mkldnn*开头。 `paddle/gserver/layers`中,并且文件名都会一以*MKLDNN*开头。
所有MKL-DNN的layers都会继承于一个叫做`MkldnnLayer`的父类,该父类继承于PaddlePaddle的基类`Layer` 所有MKL-DNN的layers都会继承于一个叫做`MKLDNNLayer`的父类,该父类继承于PaddlePaddle的基类`Layer`
`MKLDNNLayer`中会提供一些必要的接口和函数,并且会写好`forward``backward`的基本逻辑。部分函数定义为纯虚函数,子类只需要实现这些函数即可。
### Activations ### Activations
由于在PaddlePaddle中,激活函数是独立于layer概念的,所以会在`paddle/gserver/activations`目录下添加一个`MkldnnActivation.h`文件定义一些用于MKL-DNN的接口,实现方法还是会在`ActivationFunction.cpp`文件 由于在PaddlePaddle中,激活函数是独立于layer概念的,所以会在`paddle/gserver/activations`目录下添加`MKLDNNActivation.h``MKLDNNActivation.cpp`文件用于定义和使用MKL-DNN的接口
### Unit Tests ### Weights
会在`paddle/gserver/test`目录下添加`test_Mkldnn.cpp``MkldnnTester.*`用于MKL-DNN的测试。 由于有些layer是含有参数的,我们会尽量让MKL-DNN的参数与PaddlePaddle中`parameter`共享一块内存。
同时,由于MKL-DNN在训练时使用的参数layout可能与PaddlePaddle默认的`nchw`不一致,我们会在网络训练的开始和结束时分别转换这个layout,使得最终保存的参数格式与PaddlePaddle一致。
Activation的测试,计划在PaddlePaddle原有的测试文件上直接添加新的测试type。 ### Unit Tests
会在`paddle/gserver/test`目录下添加`test_MKLDNN.cpp``MKLDNNTester.*`用于MKL-DNN的测试。
测试分为每个layer(或activation)的单元测试和简单网络的整体测试。
每个测试会对比PaddlePaddle中CPU算出的结果与MKL-DNN的结果,小于某个比较小的阈值认为通过。
### Protobuf Messages ### Protobuf Messages
根据具体layer的需求可能会在`proto/ModelConfig.proto`里面添加必要的选项。 根据具体layer的需求可能会在`proto/ModelConfig.proto`里面添加必要的选项。
...@@ -82,7 +89,7 @@ if use_mkldnn ...@@ -82,7 +89,7 @@ if use_mkldnn
会在`v1_api_demo`目录下添加一个`mkldnn`的文件夹,里面放入一些用于MKL-DNN测试的demo脚本。 会在`v1_api_demo`目录下添加一个`mkldnn`的文件夹,里面放入一些用于MKL-DNN测试的demo脚本。
### Benchmarking ### Benchmarking
考虑添加部分逻辑在`benchmark/paddle/image/run.sh`,添加使用MKL-DNN的测试 添加`benchmark/paddle/image/run_mkldnn.sh`,用于测试使用MKL-DNN之后的性能
### Others ### Others
1. 如果在使用MKL-DNN的情况下,会把CPU的Buffer对齐为64。 1. 如果在使用MKL-DNN的情况下,会把CPU的Buffer对齐为64。
...@@ -94,14 +101,16 @@ if use_mkldnn ...@@ -94,14 +101,16 @@ if use_mkldnn
我们总结出一些特别需要注意的点: 我们总结出一些特别需要注意的点:
1. 使用**deviceId_**。为了尽可能少的在父类Layer中添加变量或者函数,我们决定使用已有的`deviceId_`变量来区分layer的属性,定义`-2``MkldnnLayer`特有的设备ID。 1. 使用**deviceId_**。为了尽可能少的在父类Layer中添加变量或者函数,我们决定使用已有的`deviceId_`变量来区分layer的属性,定义`-2``MKLDNNLayer`特有的设备ID。
2. 重写父类Layer的**init**函数,修改`deviceId_``-2`,代表这个layer是用于跑在MKL-DNN的环境下。 2. 重写父类Layer的**init**函数,修改`deviceId_``-2`,代表这个layer是用于跑在MKL-DNN的环境下。
3. 创建`MkldnnMatrix`,用于管理MKL-DNN会用到的相关memory函数、接口以及会用的到格式信息。 3. 创建`MKLDNNMatrix`,同时继承`CpuMatrix``mkldnn::memory`。用于管理MKL-DNN会用到的相关memory函数、接口以及会用的到格式信息。
4. 创建`MkldnnBase`,定义一些除了layer和memory相关的类和函数。包括MKL-DNN会用到`MkldnnStream``CpuEngine`,和未来可能还会用到`FPGAEngine`等。 4. 创建`MKLDNNBase`,定义一些除了layer和memory相关的类和函数。包括MKL-DNN会用到`MKLDNNStream``CPUEngine`,和未来可能还会用到`FPGAEngine`等。
5.**Argument**里添加两个`MkldnnMatrixPtr`,取名为`mkldnnValue``mkldnnGrad`,用于存放`MkldnnLayer`会用到的memory buffer。 并且添加函数cvt(会修改为一个更加合适的函数名),用于处理"CPU device"和"MKL-DNN device"之间memory的相互转化。 5. 每个`MKLDNNlayer`都会有`inVal_`,`inGrad_`,`outVal_``outGrad_`,分别代表input value, input gradient,output value和output gradient。他们会存放MKL-DNN用到的internal memory。同时还会定义以*ext*开头的`MKLDNNMatrix`(表示external的memory),主要是在格式与PaddlePaddle默认的`nchw`格式不匹配时,用于转换内存的工作。必要的转换函数也会在`MKLDNNLayer`中提前定义好,每个子类只需要调用定义好的reset buffer函数即可。
6. 在父类`Layer`中的`getOutput`函数中添加一段逻辑,用于判断`deviceId`,并针对device在MKL-DNN和CPU之间不统一的情况,做一个前期转换。 也就是调用`Argument`的cvt函数把output统一到需要的device上。 6. 每个`MKLDNNlayer`的resetbuffer相关的函数(包括reset input、output的Value和grad),他们会根据输入参数reset internal和external的memory,当然这两者也可以相等,即表示不需要转换。只需要把握一个原则,每个`MKLDNNlayer`的子类,只需要使用internal的memory就可以了,所有external的转换工作在父类的reset函数中都提前准备好了。
7. 在原来的`FLAGS`中添加一个`use_mkldnn`的flag,用于选择是否使用MKL-DNN的相关功能。 7. 一般来说,external的memory会尽量与PaddlePaddle中的`value``grad`共享内存。同时每个`MKLDNNLayer`中的external output value和gradient(也就是`extOutVal_``extOutGrad_`)必须分别与`output_.value``output_.grad`共享内存,因为PaddlePaddle的activation会直接使用`output_.value``output_.grad`。如果不需要external的buffer用于转换,那么internal的buffer也会与他们共享内存。
8. 关于MKLDNN参数的保存。由于MKLDNN参数的格式与PaddlePaddle原有的格式存在不一样的情况,所以需要在保存参数时同时保存该格式信息。目前准备扩展[Header](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/parameter/Parameter.h#L247)里面的`int32_t version`。这个值不管是在v1还是在v2里面,一直保存的是0,所以可以充分利用这个信息,定义一个枚举处理所有MKLDNN的参数格式,从而`MKLDNNLayer`就可以从输入的参数中获取需要的格式信息。 8. 如果MKL-DNN layer的后面接有cpu device,那么就会使`output_.value``extOutVal_`共享内存,同时数据格式就是`nchw`,这样下一个cpu device就能拿到正确的数据。在有cpu device的时候,external的memory的格式始终是`nchw`或者`nc`
9. 由于MKL-DNN的输出操作都是覆盖data的,不是在原来的数据上累加,所以当网络出现分支时,在`backward`时会需要merge不同layer的梯度。`MKLDNNlayer`中会实现merge的方法,此时每个小分支的input gradient会先临时保存在一个`MKLDNNMatrix`中,由分支处的layer负责求和,并把结果放到这个layer的`output_.grad`中。所以整体上,每个子类并不会需要关心分支的事情,也是在父类都实现好了。
10. 在原来的`FLAGS`中添加一个`use_mkldnn`的flag,用于选择是否使用MKL-DNN的相关功能。
## References ## References
......
# Design: Sequence Decoder Generating LoDTensors
In tasks such as machine translation and image to text,
a [sequence decoder](https://github.com/PaddlePaddle/book/blob/develop/08.machine_translation/README.md) is necessary to generate sequences.
This documentation describes how to implement the sequence decoder as an operator.
## Beam Search based Decoder
The [beam search algorithm](https://en.wikipedia.org/wiki/Beam_search) is necessary when generating sequences,
it is a heuristic search algorithm that explores the paths by expanding the most promising node in a limited set.
In the old version of PaddlePaddle, a C++ class `RecurrentGradientMachine` implements the general sequence decoder based on beam search,
due to the complexity, the implementation relays on a lot of special data structures,
quite trivial and hard to be customized by users.
There are a lot of heuristic tricks in the sequence generation tasks,
so the flexibility of sequence decoder is very important to users.
During PaddlePaddle's refactoring work,
some new concept is proposed such as [LoDTensor](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/lod_tensor.md) and [TensorArray](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/tensor_array.md) that can better support sequence usage,
and they can help to make the implementation of beam search based sequence decoder **more transparent and modular** .
For example, the RNN sates, candidates IDs and probabilities of beam search can be represented as `LoDTensors`;
the selected candidate's IDs in each time step can be stored in a `TensorArray`, and `Packed` to the sentences translated.
## Changing LoD's absolute offset to relative offsets
The current `LoDTensor` is designed to store levels of variable-length sequences,
it stores several arrays of integers each represents a level.
The integers in each level represents the begin and end (not inclusive) offset of a sequence **in the underlying tensor**,
let's call this format the **absolute-offset LoD** for clear.
The relative-offset LoD can fast retrieve any sequence but fails to represent empty sequences, for example, a two-level LoD is as follows
```python
[[0, 3, 9]
[0, 2, 3, 3, 3, 9]]
```
The first level tells that there are two sequences:
- the first's offset is `[0, 3)`
- the second's offset is `[3, 9)`
while on the second level, there are several empty sequences that both begin and end at `3`.
It is impossible to tell how many empty second-level sequences exist in the first-level sequences.
There are many scenarios that relay on empty sequence representation,
such as machine translation or image to text, one instance has no translations or the empty candidate set for a prefix.
So let's introduce another format of LoD,
it stores **the offsets of the lower level sequences** and is called **relative-offset** LoD.
For example, to represent the same sequences of the above data
```python
[[0, 3, 6]
[0, 2, 3, 3, 3, 9]]
```
the first level represents that there are two sequences,
their offsets in the second-level LoD is `[0, 3)` and `[3, 5)`.
The second level is the same with the relative offset example because the lower level is a tensor.
It is easy to find out the second sequence in the first-level LoD has two empty sequences.
The following demos are based on relative-offset LoD.
## Usage in a simple machine translation model
Let's start from a simple machine translation model that is simplified from [machine translation chapter](https://github.com/PaddlePaddle/book/tree/develop/08.machine_translation) to draw a simple blueprint of what a sequence decoder can do and how to use it.
The model has an encoder that learns the semantic vector from a sequence,
and a decoder which uses the sequence decoder to generate new sentences.
**Encoder**
```python
import paddle as pd
dict_size = 8000
source_dict_size = dict_size
target_dict_size = dict_size
word_vector_dim = 128
encoder_dim = 128
decoder_dim = 128
beam_size = 5
max_length = 120
# encoder
src_word_id = pd.data(
name='source_language_word',
type=pd.data.integer_value_sequence(source_dict_dim))
src_embedding = pd.embedding(size=source_dict_size, size=word_vector_dim)
src_word_vec = pd.lookup(src_embedding, src_word_id)
encoder_out_seq = pd.gru(input=src_word_vec, size=encoder_dim)
encoder_ctx = pd.last_seq(encoder_out_seq)
# encoder_ctx_proj is the learned semantic vector
encoder_ctx_proj = pd.fc(
encoder_ctx, size=decoder_dim, act=pd.activation.Tanh(), bias=None)
```
**Decoder**
```python
def generate():
decoder = pd.while_loop()
with decoder.step():
decoder_mem = decoder.memory(init=encoder_ctx) # mark the memory
generated_ids = decoder.memory() # TODO init to batch_size <s>s
generated_scores = decoder.memory() # TODO init to batch_size 1s or 0s
target_word = pd.lookup(trg_embedding, gendrated_ids)
# expand encoder_ctx's batch to fit target_word's lod
# for example
# decoder_mem.lod is
# [[0 1 3],
# [0 1 3 6]]
# its tensor content is [a1 a2 a3 a4 a5]
# which means there are 2 sentences to translate
# - the first sentence has 1 translation prefixes, the offsets are [0, 1)
# - the second sentence has 2 translation prefixes, the offsets are [1, 3) and [3, 6)
# the target_word.lod is
# [[0, 1, 6]
# [0, 2, 4, 7, 9 12]]
# which means 2 sentences to translate, each has 1 and 5 prefixes
# the first prefix has 2 candidates
# the following has 2, 3, 2, 3 candidates
# the encoder_ctx_expanded's content will be
# [a1 a1 a2 a2 a3 a3 a3 a4 a4 a5 a5 a5]
encoder_ctx_expanded = pd.lod_expand(encoder_ctx, target_word)
decoder_input = pd.fc(
act=pd.activation.Linear(),
input=[target_word, encoder_ctx],
size=3 * decoder_dim)
gru_out, cur_mem = pd.gru_step(
decoder_input, mem=decoder_mem, size=decoder_dim)
scores = pd.fc(
gru_out,
size=trg_dic_size,
bias=None,
act=pd.activation.Softmax())
# K is an config
topk_scores, topk_ids = pd.top_k(scores, K)
topk_generated_scores = pd.add_scalar(topk_scores, generated_scores)
selected_ids, selected_generation_scores = decoder.beam_search(
topk_ids, topk_generated_scores)
# update the states
decoder_mem.update(cur_mem) # tells how to update state
generated_ids.update(selected_ids)
generated_scores.update(selected_generation_scores)
decoder.output(selected_ids)
decoder.output(selected_generation_scores)
translation_ids, translation_scores = decoder()
```
The `decoder.beam_search` is a operator that given the candidates and the scores of translations including the candidates,
return the result of the beam search algorithm.
In this way, users can customize anything on the inputs or outputs of beam search, for example, two ways to prune some translation prefixes
1. meke the correspondind elements in `topk_generated_scores` zero or some small values, beam_search will discard this candidate.
2. remove some specific candidate in `selected_ids`
3. get the final `translation_ids`, remove the translation sequence in it.
The implementation of sequence decoder can reuse the C++ class [RNNAlgorithm](https://github.com/Superjom/Paddle/blob/68cac3c0f8451fe62a4cdf156747d6dc0ee000b3/paddle/operators/dynamic_recurrent_op.h#L30),
so the python syntax is quite similar to a [RNN](https://github.com/Superjom/Paddle/blob/68cac3c0f8451fe62a4cdf156747d6dc0ee000b3/doc/design/block.md#blocks-with-for-and-rnnop).
Both of them are two-level `LoDTensors`
- the first level represents `batch_size` of (source) sentences;
- the second level represents the candidate ID sets for translation prefix.
for example, 3 source sentences to translate, and has 2, 3, 1 candidates.
Unlike an RNN, in sequence decoder, the previous state and the current state have different LoD and shape,
a `lod_expand` operator is used to expand the LoD of the previous state to fit the current state.
For example, the previous state
* LoD is `[0, 1, 3][0, 2, 5, 6]`
* content of tensor is `a1 a2 b1 b2 b3 c1`
the current state stored in `encoder_ctx_expanded`
* LoD is `[0, 2, 7][0 3 5 8 9 11 11]`
* the content is
- a1 a1 a1 (a1 has 3 candidates, so the state should be copied 3 times for each candidates)
- a2 a2
- b1 b1 b1
- b2
- b3 b3
- None (c1 has 0 candidates, so c1 is dropped)
Benefit from the relative offset LoD, empty candidate set can be represented naturally.
the status in each time step can be stored in `TensorArray`, and `Pack`ed to a final LoDTensor, the corresponding syntax is
```python
decoder.output(selected_ids)
decoder.output(selected_generation_scores)
```
the `selected_ids` is the candidate ids for the prefixes,
it will be `Packed` by `TensorArray` to a two-level `LoDTensor`,
the first level represents the source sequences,
the second level represents generated sequences.
Pack the `selected_scores` will get a `LoDTensor` that stores scores of each candidate of translations.
Pack the `selected_generation_scores` will get a `LoDTensor`, and each tail is the probability of the translation.
## LoD and shape changes during decoding
<p align="center">
<img src="./images/LOD-and-shape-changes-during-decoding.jpg"/>
</p>
According the image above, the only phrase to change LoD is beam search.
## Beam search design
The beam search algorthm will be implemented as one method of the sequence decoder, it has 3 inputs
1. `topk_ids`, top K candidate ids for each prefix.
2. `topk_scores`, the corresponding scores for `topk_ids`
3. `generated_scores`, the score of the prefixes.
All of the are LoDTensors, so that the sequence affilication is clear.
Beam search will keep a beam for each prefix and select a smaller candidate set for each prefix.
It will return three variables
1. `selected_ids`, the final candidate beam search function selected for the next step.
2. `selected_scores`, the scores for the candidates.
3. `generated_scores`, the updated scores for each prefixes (with the new candidates appended).
## Introducing the LoD-based `Pack` and `Unpack` methods in `TensorArray`
The `selected_ids`, `selected_scores` and `generated_scores` are LoDTensors,
and they exist in each time step,
so it is natural to store them in arrays.
Currently, PaddlePaddle has a module called `TensorArray` which can store an array of tensors,
the results of beam search are better to store in a `TensorArray`.
The `Pack` and `UnPack` in `TensorArray` are used to package tensors in the array to a `LoDTensor` or split the `LoDTensor` to an array of tensors.
It needs some extensions to support pack or unpack an array of `LoDTensors`.
...@@ -99,7 +99,7 @@ PaddlePaddle支持Sparse的训练,sparse训练需要训练特征是 :code:`spa ...@@ -99,7 +99,7 @@ PaddlePaddle支持Sparse的训练,sparse训练需要训练特征是 :code:`spa
利用更多的计算资源 利用更多的计算资源
++++++++++++++++++ ++++++++++++++++++
利用更多的计算资源可以分为下几个方式来进行\: 利用更多的计算资源可以分为下几个方式来进行\:
* 单机CPU训练 * 单机CPU训练
......
...@@ -214,7 +214,7 @@ MulOp(const std::string &type, const framework::VariableNameMap &inputs, ...@@ -214,7 +214,7 @@ MulOp(const std::string &type, const framework::VariableNameMap &inputs,
```cpp ```cpp
// if use Eigen unsupported module before include head files // if use Eigen unsupported module before include head files
#define EIGEN_USE_GPU // #define EIGEN_USE_GPU
namespace ops = paddle::operators; namespace ops = paddle::operators;
REGISTER_OP_GPU_KERNEL(mul, ops::MulKernel<paddle::platform::GPUPlace, float>); REGISTER_OP_GPU_KERNEL(mul, ops::MulKernel<paddle::platform::GPUPlace, float>);
......
...@@ -54,6 +54,46 @@ paddle_error paddle_matrix_set_row(paddle_matrix mat, ...@@ -54,6 +54,46 @@ paddle_error paddle_matrix_set_row(paddle_matrix mat,
return kPD_NO_ERROR; return kPD_NO_ERROR;
} }
PD_API paddle_error paddle_matrix_set_value(paddle_matrix mat,
paddle_real* value) {
if (mat == nullptr || value == nullptr) return kPD_NULLPTR;
auto ptr = cast(mat);
if (ptr->mat == nullptr) return kPD_NULLPTR;
paddle::real* buf = ptr->mat->getRowBuf(0);
size_t width = ptr->mat->getWidth();
size_t height = ptr->mat->getHeight();
if (ptr->mat->useGpu()) {
#ifdef PADDLE_WITH_CUDA
hl_memcpy(buf, value, sizeof(paddle::real) * width * height);
#else
return kPD_NOT_SUPPORTED;
#endif
} else {
std::copy(value, value + width * height, buf);
}
return kPD_NO_ERROR;
}
PD_API paddle_error paddle_matrix_get_value(paddle_matrix mat,
paddle_real* result) {
if (mat == nullptr || result == nullptr) return kPD_NULLPTR;
auto ptr = cast(mat);
if (ptr->mat == nullptr) return kPD_NULLPTR;
paddle::real* buf = ptr->mat->getRowBuf(0);
size_t width = ptr->mat->getWidth();
size_t height = ptr->mat->getHeight();
if (ptr->mat->useGpu()) {
#ifdef PADDLE_WITH_CUDA
hl_memcpy(result, buf, width * height * sizeof(paddle::real));
#else
return kPD_NOT_SUPPORTED;
#endif
} else {
std::copy(buf, buf + width * height, result);
}
return kPD_NO_ERROR;
}
paddle_error paddle_matrix_get_row(paddle_matrix mat, paddle_error paddle_matrix_get_row(paddle_matrix mat,
uint64_t rowID, uint64_t rowID,
paddle_real** rawRowBuffer) { paddle_real** rawRowBuffer) {
......
...@@ -27,18 +27,20 @@ int main() { ...@@ -27,18 +27,20 @@ int main() {
CHECK(paddle_arguments_resize(in_args, 1)); CHECK(paddle_arguments_resize(in_args, 1));
// Create input matrix. // Create input matrix.
paddle_matrix mat = paddle_matrix_create(/* sample_num */ 1, paddle_matrix mat = paddle_matrix_create(/* sample_num */ 10,
/* size */ 784, /* size */ 784,
/* useGPU */ false); /* useGPU */ false);
srand(time(0)); srand(time(0));
paddle_real* array;
// Get First row. std::vector<paddle_real> input;
CHECK(paddle_matrix_get_row(mat, 0, &array)); input.resize(784 * 10);
for (int i = 0; i < 784; ++i) { for (int i = 0; i < input.size(); ++i) {
array[i] = rand() / ((float)RAND_MAX); input[i] = rand() / ((float)RAND_MAX);
} }
// Set value for the input matrix
CHECK(paddle_matrix_set_value(mat, input.data()));
CHECK(paddle_arguments_set_value(in_args, 0, mat)); CHECK(paddle_arguments_set_value(in_args, 0, mat));
...@@ -51,11 +53,17 @@ int main() { ...@@ -51,11 +53,17 @@ int main() {
CHECK(paddle_arguments_get_value(out_args, 0, prob)); CHECK(paddle_arguments_get_value(out_args, 0, prob));
CHECK(paddle_matrix_get_row(prob, 0, &array)); std::std::vector<paddle_real> result;
int height;
int width;
CHECK(paddle_matrix_get_shape(prob, &height, &width);
result.resize(height * width);
CHECK(paddle_matrix_get_value(prob, result.data()));
printf("Prob: "); printf("Prob: ");
for (int i = 0; i < 10; ++i) { for (int i = 0; i < height * width; ++i) {
printf("%.2f ", array[i]); printf("%.2f ", result[i]);
} }
printf("\n"); printf("\n");
......
...@@ -70,6 +70,16 @@ PD_API paddle_error paddle_matrix_set_row(paddle_matrix mat, ...@@ -70,6 +70,16 @@ PD_API paddle_error paddle_matrix_set_row(paddle_matrix mat,
uint64_t rowID, uint64_t rowID,
paddle_real* rowArray); paddle_real* rowArray);
/**
* @brief paddle_matrix_set_value Set value to matrix.
* @param mat Target Matrix
* @param value Row data.
* @return paddle_error
* @note value should contain enough element of data to init the mat
*/
PD_API paddle_error paddle_matrix_set_value(paddle_matrix mat,
paddle_real* value);
/** /**
* @brief PDMatGetRow Get raw row buffer from matrix * @brief PDMatGetRow Get raw row buffer from matrix
* @param [in] mat Target matrix * @param [in] mat Target matrix
...@@ -81,6 +91,15 @@ PD_API paddle_error paddle_matrix_get_row(paddle_matrix mat, ...@@ -81,6 +91,15 @@ PD_API paddle_error paddle_matrix_get_row(paddle_matrix mat,
uint64_t rowID, uint64_t rowID,
paddle_real** rawRowBuffer); paddle_real** rawRowBuffer);
/**
* @brief copy data from the matrix
* @param [in] mat Target matrix
* @param [out] result pointer to store the matrix data
* @return paddle_error
* @note the space of the result should allocated before invoke this API
*/
PD_API paddle_error paddle_matrix_get_value(paddle_matrix mat,
paddle_real* result);
/** /**
* @brief PDMatCreateNone Create None Matrix * @brief PDMatCreateNone Create None Matrix
* @return * @return
......
...@@ -45,3 +45,49 @@ TEST(CAPIMatrix, createNone) { ...@@ -45,3 +45,49 @@ TEST(CAPIMatrix, createNone) {
paddle_matrix mat = paddle_matrix_create_none(); paddle_matrix mat = paddle_matrix_create_none();
ASSERT_EQ(kPD_NO_ERROR, paddle_matrix_destroy(mat)); ASSERT_EQ(kPD_NO_ERROR, paddle_matrix_destroy(mat));
} }
TEST(CAPIMatrix, cpu_get_set_value) {
paddle_matrix mat = paddle_matrix_create(128, 32, false);
std::vector<paddle_real> sample;
std::vector<paddle_real> result;
sample.resize(128 * 32);
result.resize(128 * 32);
for (size_t i = 0; i < sample.size(); ++i) {
sample[i] = 1.0 / (i + 1.0);
}
ASSERT_EQ(kPD_NO_ERROR, paddle_matrix_set_value(mat, sample.data()));
ASSERT_EQ(kPD_NO_ERROR, paddle_matrix_get_value(mat, result.data()));
for (size_t i = 0; i < sample.size(); ++i) {
ASSERT_NEAR(sample[i], result[i], 1e-5);
}
uint64_t height, width;
ASSERT_EQ(kPD_NO_ERROR, paddle_matrix_get_shape(mat, &height, &width));
ASSERT_EQ(128UL, height);
ASSERT_EQ(32UL, width);
ASSERT_EQ(kPD_NO_ERROR, paddle_matrix_destroy(mat));
}
#ifdef PADDLE_WITH_CUDA
TEST(CAPIMatrix, gpu_get_set_value) {
paddle_matrix mat = paddle_matrix_create(128, 32, true);
std::vector<paddle_real> sample;
std::vector<paddle_real> result;
sample.resize(128 * 32);
result.resize(128 * 32);
for (size_t i = 0; i < sample.size(); ++i) {
sample[i] = 1.0 / (i + 1.0);
}
ASSERT_EQ(kPD_NO_ERROR, paddle_matrix_set_value(mat, sample.data()));
ASSERT_EQ(kPD_NO_ERROR, paddle_matrix_get_value(mat, result.data()));
for (size_t i = 0; i < sample.size(); ++i) {
ASSERT_NEAR(sample[i], result[i], 1e-5);
}
uint64_t height, width;
ASSERT_EQ(kPD_NO_ERROR, paddle_matrix_get_shape(mat, &height, &width));
ASSERT_EQ(128UL, height);
ASSERT_EQ(32UL, width);
ASSERT_EQ(kPD_NO_ERROR, paddle_matrix_destroy(mat));
}
#endif
...@@ -321,8 +321,6 @@ static void CreateGradVarInBlock( ...@@ -321,8 +321,6 @@ static void CreateGradVarInBlock(
auto* param = block_desc->FindVarRecursive(pname); auto* param = block_desc->FindVarRecursive(pname);
auto* grad = block_desc->FindVar(arg); auto* grad = block_desc->FindVar(arg);
if (param == nullptr) { if (param == nullptr) {
LOG(WARNING) << "Cannot find forward variable of " << arg
<< ". Set its gradient to FP32";
grad->SetDataType(DataType::FP32); grad->SetDataType(DataType::FP32);
} else { } else {
grad->SetDataType(param->GetDataType()); grad->SetDataType(param->GetDataType());
...@@ -408,6 +406,11 @@ std::vector<std::unique_ptr<OpDescBind>> MakeBlockBackward( ...@@ -408,6 +406,11 @@ std::vector<std::unique_ptr<OpDescBind>> MakeBlockBackward(
for (const auto& desc : op_grads) { for (const auto& desc : op_grads) {
for (const std::string& out_name : desc->OutputArgumentNames()) { for (const std::string& out_name : desc->OutputArgumentNames()) {
if (out_name.find("@GRAD") == std::string::npos) {
// Not all outputs of a backward operator is a gradient. Only gradient
// need to be sum. Skip variables are not gradient.
continue;
}
dup_out_ops[out_name].emplace_back(grad_desc_idx); dup_out_ops[out_name].emplace_back(grad_desc_idx);
} }
++grad_desc_idx; ++grad_desc_idx;
......
...@@ -21,7 +21,7 @@ ...@@ -21,7 +21,7 @@
#include "paddle/framework/var_desc.h" #include "paddle/framework/var_desc.h"
#include "paddle/operators/net_op.h" #include "paddle/operators/net_op.h"
USE_OP(fill_constant); USE_NO_KERNEL_OP(fill_constant);
namespace paddle { namespace paddle {
namespace framework { namespace framework {
......
...@@ -50,6 +50,15 @@ VarDescBind *BlockDescBind::FindVarRecursive(const std::string &name) const { ...@@ -50,6 +50,15 @@ VarDescBind *BlockDescBind::FindVarRecursive(const std::string &name) const {
return it->second.get(); return it->second.get();
} }
VarDescBind *BlockDescBind::FindRecursiveOrCreateVar(
const std::string &name_bytes) {
VarDescBind *res = FindVarRecursive(name_bytes);
if (res == nullptr) {
res = Var(name_bytes);
}
return res;
}
bool BlockDescBind::HasVarRecursive(const std::string &name) const { bool BlockDescBind::HasVarRecursive(const std::string &name) const {
return FindVarRecursive(name) != nullptr; return FindVarRecursive(name) != nullptr;
} }
......
...@@ -58,6 +58,8 @@ class BlockDescBind { ...@@ -58,6 +58,8 @@ class BlockDescBind {
VarDescBind *FindVarRecursive(const std::string &name_bytes) const; VarDescBind *FindVarRecursive(const std::string &name_bytes) const;
VarDescBind *FindRecursiveOrCreateVar(const std::string &name_bytes);
bool HasVarRecursive(const std::string &var_name) const; bool HasVarRecursive(const std::string &var_name) const;
std::set<std::string> LocalVarNames() const { std::set<std::string> LocalVarNames() const {
......
...@@ -34,6 +34,21 @@ inline DataType ToDataType(std::type_index type) { ...@@ -34,6 +34,21 @@ inline DataType ToDataType(std::type_index type) {
} }
} }
inline std::type_index ToTypeIndex(DataType type) {
switch (type) {
case DataType::FP32:
return typeid(float);
case DataType::FP64:
return typeid(double);
case DataType::INT32:
return typeid(int);
case DataType::INT64:
return typeid(int64_t);
default:
PADDLE_THROW("Not support type %d", type);
}
}
template <typename Visitor> template <typename Visitor>
inline void VisitDataType(DataType type, Visitor visitor) { inline void VisitDataType(DataType type, Visitor visitor) {
switch (type) { switch (type) {
......
...@@ -79,6 +79,13 @@ DDim make_ddim(const std::vector<int64_t>& dims) { ...@@ -79,6 +79,13 @@ DDim make_ddim(const std::vector<int64_t>& dims) {
return result; return result;
} }
DDim make_ddim(const std::vector<int>& dims) {
std::vector<int64_t> res(dims.size());
std::transform(dims.begin(), dims.end(), res.begin(),
[](int d) { return static_cast<int64_t>(d); });
return make_ddim(res);
}
/// @cond HIDDEN /// @cond HIDDEN
// XXX For some reason, putting this in an anonymous namespace causes errors // XXX For some reason, putting this in an anonymous namespace causes errors
class DynamicMutableIndexer : public boost::static_visitor<int64_t&> { class DynamicMutableIndexer : public boost::static_visitor<int64_t&> {
...@@ -117,7 +124,7 @@ int64_t DDim::operator[](int idx) const { ...@@ -117,7 +124,7 @@ int64_t DDim::operator[](int idx) const {
return boost::apply_visitor(DynamicConstIndexer(idx), var); return boost::apply_visitor(DynamicConstIndexer(idx), var);
} }
int64_t DDim::size() const { return arity(*this); } int DDim::size() const { return arity(*this); }
bool DDim::operator==(DDim d) const { bool DDim::operator==(DDim d) const {
if (var.which() != d.getVar().which()) { if (var.which() != d.getVar().which()) {
......
...@@ -71,7 +71,7 @@ struct DDim { ...@@ -71,7 +71,7 @@ struct DDim {
DDim operator*(DDim d) const; DDim operator*(DDim d) const;
int64_t size() const; int size() const;
}; };
/** /**
...@@ -81,6 +81,8 @@ struct DDim { ...@@ -81,6 +81,8 @@ struct DDim {
*/ */
DDim make_ddim(const std::vector<int64_t>& dims); DDim make_ddim(const std::vector<int64_t>& dims);
DDim make_ddim(const std::vector<int>& dims);
/** /**
* \brief Make a DDim from an initializer list * \brief Make a DDim from an initializer list
* *
......
...@@ -31,6 +31,7 @@ void LoDRankTable::Reset(const LoD& lod, size_t level) { ...@@ -31,6 +31,7 @@ void LoDRankTable::Reset(const LoD& lod, size_t level) {
TableItem item; TableItem item;
item.index = i; item.index = i;
item.length = vec[i + 1] - vec[i]; item.length = vec[i + 1] - vec[i];
VLOG(10) << "Add item to rank table " << item.index << " " << item.length;
items_.emplace_back(item); items_.emplace_back(item);
} }
// NOTE(yuyang18): // NOTE(yuyang18):
......
...@@ -27,6 +27,20 @@ ...@@ -27,6 +27,20 @@
namespace paddle { namespace paddle {
namespace framework { namespace framework {
std::ostream& operator<<(std::ostream& os, const LoD& lod) {
os << "{";
for (auto& v : lod) {
os << "{";
for (auto& i : v) {
os << i << ",";
}
os << "}";
}
os << "}";
return os;
}
LoD SliceLevels(const LoD& in, size_t level_begin, size_t level_end) { LoD SliceLevels(const LoD& in, size_t level_begin, size_t level_end) {
LoD new_lod; LoD new_lod;
new_lod.reserve(level_end - level_begin); new_lod.reserve(level_end - level_begin);
...@@ -136,37 +150,35 @@ void LoDTensor::ShrinkInLevel(size_t level, size_t elem_begin, ...@@ -136,37 +150,35 @@ void LoDTensor::ShrinkInLevel(size_t level, size_t elem_begin,
ShareDataWith(Slice(begin, end)); ShareDataWith(Slice(begin, end));
} }
void GetFineGrainedLoDLength(const LoD& lod, size_t start_idx, size_t end_idx, using LoDAndOffset = std::pair<LoD, std::pair<size_t, size_t>>;
std::vector<std::vector<size_t>>* lod_length, LoDAndOffset GetSubLoDAndAbsoluteOffset(const LoD& lod, size_t start_idx,
size_t* start_offset) { size_t end_idx, size_t start_level) {
lod_length->clear(); LoD sub_lod;
PADDLE_ENFORCE(start_idx < lod.size() - 1,
"start_idx should be >= 0 and < lod.size() - 1."); for (size_t level_idx = start_level; level_idx < lod.size(); ++level_idx) {
PADDLE_ENFORCE(end_idx < lod.size(), PADDLE_ENFORCE_LE(start_idx, end_idx);
"end_idx should be >= 0 and < lod.size()."); PADDLE_ENFORCE_LT(end_idx, lod[level_idx].size());
PADDLE_ENFORCE_LE(start_idx, end_idx,
"start_idx should be less than end_idx.");
for (size_t level_idx = 0; level_idx < lod.size(); ++level_idx) {
std::vector<size_t> level_lens; std::vector<size_t> level_lens;
for (size_t i = start_idx; i < end_idx; ++i) { for (size_t i = start_idx; i < end_idx; ++i) {
level_lens.push_back(lod[level_idx][i + 1] - lod[level_idx][i]); level_lens.push_back(lod[level_idx][i + 1] - lod[level_idx][i]);
} }
lod_length->emplace_back(level_lens); sub_lod.emplace_back(level_lens);
start_idx = lod[level_idx][start_idx]; start_idx = lod[level_idx][start_idx];
end_idx = lod[level_idx][end_idx]; end_idx = lod[level_idx][end_idx];
} }
*start_offset = start_idx;
return LoDAndOffset{sub_lod, {start_idx, end_idx}};
} }
void AppendLoD(LoD* lod, const std::vector<std::vector<size_t>>& lod_length) { void AppendLoD(LoD* lod, const LoD& lod_length) {
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE(
lod->size(), lod_length.size(), lod->empty() || lod->size() == lod_length.size(),
"The lod_length should has the same size with the appended lod."); "The lod_length should has the same size with the appended lod.");
if (lod->empty()) {
*lod = LoD(lod_length.size(), std::vector<size_t>({0}));
}
for (size_t i = 0; i < lod->size(); ++i) { for (size_t i = 0; i < lod->size(); ++i) {
auto& level = (*lod)[i]; auto& level = (*lod)[i];
if (level.empty()) {
level.push_back(0);
}
for (size_t len : lod_length[i]) { for (size_t len : lod_length[i]) {
level.push_back(level.back() + len); level.push_back(level.back() + len);
} }
......
...@@ -56,6 +56,8 @@ using Vector = thrust::host_vector< ...@@ -56,6 +56,8 @@ using Vector = thrust::host_vector<
*/ */
using LoD = std::vector<Vector<size_t>>; using LoD = std::vector<Vector<size_t>>;
std::ostream& operator<<(std::ostream& os, const LoD& lod);
/* /*
* Slice levels from a LoD. * Slice levels from a LoD.
* NOTE the lowest level should always be the absolute offsets of the underlying * NOTE the lowest level should always be the absolute offsets of the underlying
...@@ -181,11 +183,10 @@ LoDTensor LodExpand(const LoDTensor& source, const LoD& lod, size_t level, ...@@ -181,11 +183,10 @@ LoDTensor LodExpand(const LoDTensor& source, const LoD& lod, size_t level,
return tensor; return tensor;
} }
void GetFineGrainedLoDLength(const LoD& lod, size_t start_idx, size_t end_idx, std::pair<LoD, std::pair<size_t, size_t>> GetSubLoDAndAbsoluteOffset(
std::vector<std::vector<size_t>>* lod_length, const LoD& lod, size_t start_idx, size_t end_idx, size_t start_level);
size_t* start_offset);
void AppendLoD(LoD* lod, const std::vector<std::vector<size_t>>& lod_length); void AppendLoD(LoD* lod, const LoD& lod_length);
} // namespace framework } // namespace framework
} // namespace paddle } // namespace paddle
...@@ -146,43 +146,44 @@ TEST(LodExpand, test) { ...@@ -146,43 +146,44 @@ TEST(LodExpand, test) {
TEST(LoD, GetFineGrainedLoDLength) { TEST(LoD, GetFineGrainedLoDLength) {
LoD lod; LoD lod;
lod.push_back(std::vector<size_t>{0, 2, 4, 5}); lod.push_back(std::vector<size_t>({0, 2, 4, 5}));
lod.push_back(std::vector<size_t>{0, 1, 6, 8, 10, 11}); lod.push_back(std::vector<size_t>({0, 1, 6, 8, 10, 11}));
lod.push_back( lod.push_back(
std::vector<size_t>{0, 2, 5, 7, 10, 12, 15, 17, 20, 24, 26, 29}); std::vector<size_t>({0, 2, 5, 7, 10, 12, 15, 17, 20, 24, 26, 29}));
std::vector<std::vector<size_t>> lod_length; auto lod_and_offset =
size_t start_offset; paddle::framework::GetSubLoDAndAbsoluteOffset(lod, 1, 2, 0);
paddle::framework::GetFineGrainedLoDLength(lod, 1, 2, &lod_length, LoD lod_length = lod_and_offset.first;
&start_offset); size_t start_offset = lod_and_offset.second.first;
size_t end_offset = lod_and_offset.second.second;
std::vector<std::vector<size_t>> expected; LoD expected;
expected.push_back(std::vector<size_t>{2}); expected.push_back(std::vector<size_t>{2});
expected.push_back(std::vector<size_t>{2, 2}); expected.push_back(std::vector<size_t>{2, 2});
expected.push_back(std::vector<size_t>{2, 3, 4, 2}); expected.push_back(std::vector<size_t>{2, 3, 4, 2});
EXPECT_EQ(lod_length, expected); EXPECT_EQ(lod_length, expected);
EXPECT_EQ(start_offset, 15UL); EXPECT_EQ(start_offset, 15UL);
EXPECT_EQ(end_offset, 26UL);
} }
TEST(LoD, AppendLoD) { TEST(LoD, AppendLoD) {
std::vector<std::vector<size_t>> lod_lens; LoD lod_lens;
lod_lens.push_back(std::vector<size_t>{2}); lod_lens.push_back(std::vector<size_t>({2}));
lod_lens.push_back(std::vector<size_t>{2, 2}); lod_lens.push_back(std::vector<size_t>({2, 2}));
lod_lens.push_back(std::vector<size_t>{2, 3, 4, 2}); lod_lens.push_back(std::vector<size_t>({2, 3, 4, 2}));
LoD origin; LoD origin;
origin.push_back(std::vector<size_t>{0, 2}); origin.push_back(std::vector<size_t>({0, 2}));
origin.push_back(std::vector<size_t>{0, 1, 6}); origin.push_back(std::vector<size_t>({0, 1, 6}));
origin.push_back(std::vector<size_t>{0, 2, 5, 7, 10, 12, 15}); origin.push_back(std::vector<size_t>({0, 2, 5, 7, 10, 12, 15}));
paddle::framework::AppendLoD(&origin, lod_lens); paddle::framework::AppendLoD(&origin, lod_lens);
LoD expected; LoD expected;
expected.push_back(std::vector<size_t>{0, 2, 4}); expected.push_back(std::vector<size_t>({0, 2, 4}));
expected.push_back(std::vector<size_t>{0, 1, 6, 8, 10}); expected.push_back(std::vector<size_t>({0, 1, 6, 8, 10}));
expected.push_back( expected.push_back(
std::vector<size_t>{0, 2, 5, 7, 10, 12, 15, 17, 20, 24, 26}); std::vector<size_t>({0, 2, 5, 7, 10, 12, 15, 17, 20, 24, 26}));
EXPECT_EQ(origin, expected); EXPECT_EQ(origin, expected);
} }
......
...@@ -357,7 +357,8 @@ void OpDescBind::InferVarType(BlockDescBind *block) const { ...@@ -357,7 +357,8 @@ void OpDescBind::InferVarType(BlockDescBind *block) const {
"LOD_TENSOR"; "LOD_TENSOR";
for (auto &out_pair : this->outputs_) { for (auto &out_pair : this->outputs_) {
for (auto &out_var_name : out_pair.second) { for (auto &out_var_name : out_pair.second) {
block->Var(out_var_name)->SetType(VarDesc::LOD_TENSOR); block->FindRecursiveOrCreateVar(out_var_name)
->SetType(VarDesc::LOD_TENSOR);
} }
} }
} }
......
...@@ -92,8 +92,7 @@ struct OpKernelRegistrarFunctor<PlaceType, false, I, KernelTypes...> { ...@@ -92,8 +92,7 @@ struct OpKernelRegistrarFunctor<PlaceType, false, I, KernelTypes...> {
void operator()(const char* op_type) const { void operator()(const char* op_type) const {
using T = typename KERNEL_TYPE::ELEMENT_TYPE; using T = typename KERNEL_TYPE::ELEMENT_TYPE;
OperatorWithKernel::OpKernelKey key(ToDataType(std::type_index(typeid(T))), OpKernelType key(ToDataType(std::type_index(typeid(T))), PlaceType());
PlaceType());
OperatorWithKernel::AllOpKernels()[op_type][key].reset(new KERNEL_TYPE); OperatorWithKernel::AllOpKernels()[op_type][key].reset(new KERNEL_TYPE);
constexpr auto size = std::tuple_size<std::tuple<KernelTypes...>>::value; constexpr auto size = std::tuple_size<std::tuple<KernelTypes...>>::value;
......
...@@ -254,8 +254,7 @@ std::vector<Tensor*> ExecutionContext::MultiOutput<Tensor>( ...@@ -254,8 +254,7 @@ std::vector<Tensor*> ExecutionContext::MultiOutput<Tensor>(
return res; return res;
} }
std::ostream& operator<<(std::ostream& os, std::ostream& operator<<(std::ostream& os, const OpKernelType& kernel_key) {
const OperatorWithKernel::OpKernelKey& kernel_key) {
os << "place[" << kernel_key.place_ << "]:data_type[" << kernel_key.data_type_ os << "place[" << kernel_key.place_ << "]:data_type[" << kernel_key.data_type_
<< "]"; << "]";
return os; return os;
...@@ -432,7 +431,7 @@ void OperatorWithKernel::Run(const Scope& scope, ...@@ -432,7 +431,7 @@ void OperatorWithKernel::Run(const Scope& scope,
// check if op[type] have kernel for kernel_key // check if op[type] have kernel for kernel_key
OpKernelMap& kernels = kernels_iter->second; OpKernelMap& kernels = kernels_iter->second;
auto kernel_key = OpKernelKey(IndicateDataType(ctx), dev_ctx); auto kernel_key = GetKernelType(ctx);
auto kernel_iter = kernels.find(kernel_key); auto kernel_iter = kernels.find(kernel_key);
if (kernel_iter == kernels.end()) { if (kernel_iter == kernels.end()) {
...@@ -440,6 +439,41 @@ void OperatorWithKernel::Run(const Scope& scope, ...@@ -440,6 +439,41 @@ void OperatorWithKernel::Run(const Scope& scope,
} }
kernel_iter->second->Compute(ctx); kernel_iter->second->Compute(ctx);
// throws errors if have.
dev_ctx.Finish();
}
OpKernelType OperatorWithKernel::GetKernelType(
const ExecutionContext& ctx) const {
return OpKernelType(IndicateDataType(ctx), ctx.device_context());
}
DataType OperatorWithKernel::IndicateDataType(
const ExecutionContext& ctx) const {
auto& scope = ctx.scope();
int data_type = -1;
for (auto& input : this->inputs_) {
for (auto& ipt_name : input.second) {
auto* var = scope.FindVar(ipt_name);
if (var != nullptr) {
const Tensor* t = nullptr;
if (var->IsType<Tensor>()) {
t = &var->Get<Tensor>();
} else if (var->IsType<LoDTensor>()) {
t = &var->Get<LoDTensor>();
} else if (var->IsType<SelectedRows>()) {
t = &(var->Get<SelectedRows>().value());
}
if (t != nullptr) {
int tmp = static_cast<int>(ToDataType(t->type()));
PADDLE_ENFORCE(tmp == data_type || data_type == -1,
"DataType of Paddle Op %s must be the same.", Type());
data_type = tmp;
}
}
}
}
PADDLE_ENFORCE(data_type != -1, "DataType should be indicated by input");
return static_cast<DataType>(data_type);
} }
} // namespace framework } // namespace framework
......
...@@ -345,27 +345,10 @@ class OpKernel : public OpKernelBase { ...@@ -345,27 +345,10 @@ class OpKernel : public OpKernelBase {
using ELEMENT_TYPE = T; using ELEMENT_TYPE = T;
}; };
class OperatorWithKernel : public OperatorBase { struct OpKernelType {
public: struct Hash {
struct OpKernelKey {
platform::Place place_;
DataType data_type_;
OpKernelKey(DataType data_type, platform::Place place)
: place_(place), data_type_(data_type) {}
OpKernelKey(DataType data_type, const platform::DeviceContext& dev_ctx)
: place_(dev_ctx.GetPlace()), data_type_(data_type) {}
bool operator==(const OpKernelKey& o) const {
return platform::places_are_same_class(place_, o.place_) &&
data_type_ == o.data_type_;
}
};
struct OpKernelHash {
std::hash<int> hash_; std::hash<int> hash_;
size_t operator()(const OpKernelKey& key) const { size_t operator()(const OpKernelType& key) const {
int place = key.place_.which(); int place = key.place_.which();
int data_type = static_cast<int>(key.data_type_); int data_type = static_cast<int>(key.data_type_);
int pre_hash = data_type << NUM_PLACE_TYPE_LIMIT_IN_BIT | int pre_hash = data_type << NUM_PLACE_TYPE_LIMIT_IN_BIT |
...@@ -374,9 +357,26 @@ class OperatorWithKernel : public OperatorBase { ...@@ -374,9 +357,26 @@ class OperatorWithKernel : public OperatorBase {
} }
}; };
platform::Place place_;
DataType data_type_;
OpKernelType(DataType data_type, platform::Place place)
: place_(place), data_type_(data_type) {}
OpKernelType(DataType data_type, const platform::DeviceContext& dev_ctx)
: place_(dev_ctx.GetPlace()), data_type_(data_type) {}
bool operator==(const OpKernelType& o) const {
return platform::places_are_same_class(place_, o.place_) &&
data_type_ == o.data_type_;
}
};
class OperatorWithKernel : public OperatorBase {
public:
using OpKernelMap = using OpKernelMap =
std::unordered_map<OpKernelKey, std::unique_ptr<OpKernelBase>, std::unordered_map<OpKernelType, std::unique_ptr<OpKernelBase>,
OpKernelHash>; OpKernelType::Hash>;
OperatorWithKernel(const std::string& type, const VariableNameMap& inputs, OperatorWithKernel(const std::string& type, const VariableNameMap& inputs,
const VariableNameMap& outputs, const AttributeMap& attrs) const VariableNameMap& outputs, const AttributeMap& attrs)
...@@ -404,40 +404,15 @@ class OperatorWithKernel : public OperatorBase { ...@@ -404,40 +404,15 @@ class OperatorWithKernel : public OperatorBase {
} }
protected: protected:
virtual OpKernelType GetKernelType(const ExecutionContext& ctx) const;
private:
// indicate kernel DataType by input data. Defaultly all input data must be // indicate kernel DataType by input data. Defaultly all input data must be
// same. // same.
virtual DataType IndicateDataType(const ExecutionContext& ctx) const { DataType IndicateDataType(const ExecutionContext& ctx) const;
auto& scope = ctx.scope();
int data_type = -1;
for (auto& input : this->inputs_) {
for (auto& ipt_name : input.second) {
auto* var = scope.FindVar(ipt_name);
if (var != nullptr) {
const Tensor* t = nullptr;
if (var->IsType<Tensor>()) {
t = &var->Get<Tensor>();
} else if (var->IsType<LoDTensor>()) {
t = &var->Get<LoDTensor>();
} else if (var->IsType<SelectedRows>()) {
t = &(var->Get<SelectedRows>().value());
}
if (t != nullptr) {
int tmp = static_cast<int>(ToDataType(t->type()));
PADDLE_ENFORCE(tmp == data_type || data_type == -1,
"DataType of Paddle Op %s must be the same.",
Type());
data_type = tmp;
}
}
}
}
PADDLE_ENFORCE(data_type != -1, "DataType should be indicated by input");
return static_cast<DataType>(data_type);
}
}; };
std::ostream& operator<<(std::ostream& os, std::ostream& operator<<(std::ostream& os, const OpKernelType& kernel_key);
const OperatorWithKernel::OpKernelKey& kernel_key);
extern bool OpSupportGPU(const std::string& op_type); extern bool OpSupportGPU(const std::string& op_type);
......
...@@ -114,8 +114,8 @@ class OpWithKernelTest : public OperatorWithKernel { ...@@ -114,8 +114,8 @@ class OpWithKernelTest : public OperatorWithKernel {
protected: protected:
void InferShape(framework::InferShapeContext* ctx) const override {} void InferShape(framework::InferShapeContext* ctx) const override {}
DataType IndicateDataType(const ExecutionContext& ctx) const override { OpKernelType GetKernelType(const ExecutionContext& ctx) const override {
return DataType::FP32; return OpKernelType(DataType::FP32, ctx.device_context());
} }
}; };
......
...@@ -98,5 +98,23 @@ void Scope::DeleteScope(Scope* scope) { ...@@ -98,5 +98,23 @@ void Scope::DeleteScope(Scope* scope) {
delete scope; delete scope;
} }
void Scope::Rename(const std::string& origin_name,
const std::string& new_name) const {
auto origin_it = vars_.find(origin_name);
PADDLE_ENFORCE(origin_it != vars_.end(),
"Cannot find original variable with name %s", origin_name);
auto new_it = vars_.find(new_name);
PADDLE_ENFORCE(new_it == vars_.end(),
"The variable with name %s is already in the scope", new_name);
vars_[new_name] = origin_it->second;
vars_.erase(origin_it);
}
std::string Scope::Rename(const std::string& origin_name) const {
auto var_name = string::Sprintf("%p.%d", this, vars_.size());
Rename(origin_name, var_name);
return var_name;
}
} // namespace framework } // namespace framework
} // namespace paddle } // namespace paddle
...@@ -68,11 +68,18 @@ class Scope { ...@@ -68,11 +68,18 @@ class Scope {
// enumerate all the variables current contains. // enumerate all the variables current contains.
std::vector<std::string> GetAllNames(bool recursive = false) const; std::vector<std::string> GetAllNames(bool recursive = false) const;
// Rename variable to a new name
void Rename(const std::string& origin_name,
const std::string& new_name) const;
// Rename variable to a new name and return the new name
std::string Rename(const std::string& origin_name) const;
private: private:
// Call Scope::NewScope for a sub-scope. // Call Scope::NewScope for a sub-scope.
explicit Scope(Scope const* parent) : parent_(parent) {} explicit Scope(Scope const* parent) : parent_(parent) {}
std::unordered_map<std::string, Variable*> vars_; mutable std::unordered_map<std::string, Variable*> vars_;
mutable std::list<Scope*> kids_; mutable std::list<Scope*> kids_;
Scope const* parent_{nullptr}; Scope const* parent_{nullptr};
......
...@@ -52,7 +52,7 @@ struct SizeOfTypeFunctor<HEAD, TAIL...> { ...@@ -52,7 +52,7 @@ struct SizeOfTypeFunctor<HEAD, TAIL...> {
}; };
static inline size_t SizeOfType(std::type_index type) { static inline size_t SizeOfType(std::type_index type) {
SizeOfTypeFunctor<int, float, double, int16_t, int64_t> functor; SizeOfTypeFunctor<int, float, double, int16_t, int64_t, bool> functor;
size_t size = functor(type); size_t size = functor(type);
PADDLE_ENFORCE(size != 0UL, "Cannot get size of type %s", type.name()); PADDLE_ENFORCE(size != 0UL, "Cannot get size of type %s", type.name());
return size; return size;
......
...@@ -45,7 +45,8 @@ void VarDescBind::SetLoDLevel(int32_t lod_level) { ...@@ -45,7 +45,8 @@ void VarDescBind::SetLoDLevel(int32_t lod_level) {
desc_.mutable_tensor_array()->set_lod_level(lod_level); desc_.mutable_tensor_array()->set_lod_level(lod_level);
break; break;
default: default:
PADDLE_THROW("Tensor type=%d does not support LoDLevel", desc_.type()); PADDLE_THROW("Tensor type=%d does not support LoDLevel",
desc_.tensor_array().lod_level());
} }
} }
...@@ -56,7 +57,8 @@ int32_t VarDescBind::GetLodLevel() const { ...@@ -56,7 +57,8 @@ int32_t VarDescBind::GetLodLevel() const {
case VarDesc::LOD_TENSOR_ARRAY: case VarDesc::LOD_TENSOR_ARRAY:
return desc_.tensor_array().lod_level(); return desc_.tensor_array().lod_level();
default: default:
PADDLE_THROW("Tensor type=%d does not support LoDLevel", desc_.type()); PADDLE_THROW("Tensor type=%d does not support LoDLevel",
desc_.tensor_array().lod_level());
} }
} }
......
...@@ -45,6 +45,7 @@ if(WITH_GPU) ...@@ -45,6 +45,7 @@ if(WITH_GPU)
add_simple_unittest(BlockExpandOpTest) add_simple_unittest(BlockExpandOpTest)
add_simple_unittest(CropOpTest) add_simple_unittest(CropOpTest)
add_simple_unittest(SwitchOpTest) add_simple_unittest(SwitchOpTest)
add_simple_unittest(ScaleSubRegionOpTest)
endif() endif()
add_simple_unittest(Im2ColTest) add_simple_unittest(Im2ColTest)
......
...@@ -110,6 +110,7 @@ public: ...@@ -110,6 +110,7 @@ public:
function2_(FunctionBase::funcRegistrar_.createByType(name2)) { function2_(FunctionBase::funcRegistrar_.createByType(name2)) {
function1_->init(config); function1_->init(config);
function2_->init(config); function2_->init(config);
initArgsCallback_ = nullptr;
} }
~Compare2Function() {} ~Compare2Function() {}
...@@ -170,6 +171,10 @@ public: ...@@ -170,6 +171,10 @@ public:
*seq2_)); *seq2_));
} }
void registerInitCallback(std::function<void(BufferArg&, size_t)> callback) {
initArgsCallback_ = callback;
}
// output need only contains shape, do not contains data. // output need only contains shape, do not contains data.
void addOutputs(const BufferArg& output, ArgType argType = ASSIGN_TO) { void addOutputs(const BufferArg& output, ArgType argType = ASSIGN_TO) {
size_t size = size_t size =
...@@ -340,6 +345,10 @@ protected: ...@@ -340,6 +345,10 @@ protected:
initArg(*func1Inputs_[i]); initArg(*func1Inputs_[i]);
} }
if (initArgsCallback_ != nullptr) {
initArgsCallback_(*func1Inputs_[i], i);
}
copyArg_(*func1Inputs_[i], *func2Inputs_[i]); copyArg_(*func1Inputs_[i], *func2Inputs_[i]);
} }
} }
...@@ -386,6 +395,7 @@ protected: ...@@ -386,6 +395,7 @@ protected:
std::shared_ptr<SequenceIdArg> seq1_; std::shared_ptr<SequenceIdArg> seq1_;
std::shared_ptr<SequenceIdArg> seq2_; std::shared_ptr<SequenceIdArg> seq2_;
test::CopyArgument<DType1, DType2> copyArg_; test::CopyArgument<DType1, DType2> copyArg_;
std::function<void(BufferArg&, size_t)> initArgsCallback_;
}; };
class CpuGpuFuncCompare class CpuGpuFuncCompare
......
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "ScaleSubRegionOp.h"
#include "paddle/function/TensorShape.h"
namespace paddle {
template <>
void ScaleSubRegion<DEVICE_TYPE_CPU>(real* outputs,
const real* inputs,
const real* indices,
const TensorShape shape,
const FuncConfig& conf) {
real value = conf.get<real>("value");
int number = shape[0];
int channel = shape[1];
int height = shape[2];
int width = shape[3];
memcpy(outputs, inputs, number * channel * height * width * sizeof(real));
for (int n = 0; n < number; ++n) {
// indices start from 1
int offset = n * 6;
for (int c = indices[offset] - 1; c < indices[offset + 1]; ++c) {
for (int h = indices[offset + 2] - 1; h < indices[offset + 3]; ++h) {
for (int w = indices[offset + 4] - 1; w < indices[offset + 5]; ++w) {
int idx = ((n * channel + c) * height + h) * width + w;
outputs[idx] *= value;
}
}
}
}
}
template <>
void ScaleSubRegionGrad<DEVICE_TYPE_CPU>(const real* inGrad,
real* outGrad,
const real* indices,
const TensorShape shape,
const FuncConfig& conf) {
real value = conf.get<real>("value");
int number = shape[0];
int channel = shape[1];
int height = shape[2];
int width = shape[3];
for (int n = 0; n < number; ++n) {
for (int c = 0; c < channel; ++c) {
for (int h = 0; h < height; ++h) {
for (int w = 0; w < width; ++w) {
int idx = ((n * channel + c) * height + h) * width + w;
int offset = n * 6;
if (c >= (indices[offset] - 1) && c <= (indices[offset + 1] - 1) &&
h >= (indices[offset + 2] - 1) &&
h <= (indices[offset + 3] - 1) &&
w >= (indices[offset + 4] - 1) &&
w <= (indices[offset + 5] - 1)) {
outGrad[idx] += inGrad[idx] * value;
} else {
outGrad[idx] += inGrad[idx];
}
}
}
}
}
}
/**
* \brief For each instance, ScaleSubRegion can be used to multiply a value to
* a specified sub continuous region. By providing start index and end
* index for C/H/W, you can specify the location and shape of the region.
*
* Argument in this Function:
* \param inputs A 4-D tensor with shape [N, C, H, W], only one input.
* \param indices A 2-D tensor with shape [N, 6], indicates the sub region.
* \param outputs A 4-D tensor with same shape as inputs, output value.
*/
template <DeviceType Device>
class ScaleSubRegionFunc : public FunctionBase {
public:
void init(const FuncConfig& config) override { conf_ = config; }
void calc(const BufferArgs& inputs, const BufferArgs& outputs) override {
CHECK_EQ(2UL, inputs.size());
CHECK_EQ(1UL, outputs.size());
CHECK_EQ(outputs[0].getArgType(), ASSIGN_TO);
TensorShape shape = inputs[0].shape();
ScaleSubRegion<Device>(outputs[0].data<real>(),
inputs[0].data<real>(),
inputs[1].data<real>(),
shape,
conf_);
}
private:
FuncConfig conf_;
};
/**
* \brief The backward propagation of ScaleSubRegion Function.
*
* Argument in this Function:
* \param inputs A 4-D tensor with shape [N, C, H, W], output gradient.
* \param indices A 2-D tensor with shape [N, 6], indicates the sub region.
* \param outputs A 4-D tensor with shape [N, C, H, W], gradient of input value.
*/
template <DeviceType Device>
class ScaleSubRegionGradFunc : public FunctionBase {
public:
void init(const FuncConfig& config) override { conf_ = config; }
void calc(const BufferArgs& inputs, const BufferArgs& outputs) override {
CHECK_EQ(2UL, inputs.size());
CHECK_EQ(1UL, outputs.size());
CHECK_EQ(outputs[0].getArgType(), ADD_TO);
TensorShape shape = inputs[0].shape();
ScaleSubRegionGrad<Device>(inputs[0].data<real>(),
outputs[0].data<real>(),
inputs[1].data<real>(),
shape,
conf_);
}
private:
FuncConfig conf_;
};
REGISTER_TYPED_FUNC(ScaleSubRegion, CPU, ScaleSubRegionFunc);
REGISTER_TYPED_FUNC(ScaleSubRegionGrad, CPU, ScaleSubRegionGradFunc);
#ifdef PADDLE_WITH_CUDA
REGISTER_TYPED_FUNC(ScaleSubRegion, GPU, ScaleSubRegionFunc);
REGISTER_TYPED_FUNC(ScaleSubRegionGrad, GPU, ScaleSubRegionGradFunc);
#endif
} // namespace paddle
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "Function.h"
namespace paddle {
/**
* \brief Function to multiply a value to values in specified sub continuous
* region. Indices must be provided to indcate the location and shape of
* the region and the multiplied value is passed by configure variable.
*
*
* \param[out] outputs Output value.
* \param[in] inputs Input data which contains NCHW information.
* \param[in] indices Indices data to indcate the sub region.
* \param[in] shape Tensor shape of input value.
* \param[in] conf Configure variable which contains the multiplied value.
*/
template <DeviceType Device>
void ScaleSubRegion(real* outputs,
const real* inputs,
const real* indices,
const TensorShape shape,
const FuncConfig& conf);
/**
* \brief Backward propagation function of ScaleSubRegion.
*
* \param[out] inGrad Gradients of previous layer.
* \param[in] outGrad Output gradient.
* \param[in] indices Indices data.
* \param[in] shape The Shape of input tensor.
* \param[in] conf Configure variable.
*/
template <DeviceType Device>
void ScaleSubRegionGrad(const real* inGrad,
real* outGrad,
const real* indices,
const TensorShape shape,
const FuncConfig& conf);
} // namespace paddle
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "ScaleSubRegionOp.h"
#include "hl_base.h"
namespace paddle {
__global__ void KeScaleSubRegion(real* outputs,
const real* inputs,
const real* indices,
real value,
int channel,
int height,
int width,
int nthreads) {
const int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < nthreads) {
const int w = idx % width;
const int h = (idx / width) % height;
const int c = (idx / width / height) % channel;
const int n = idx / width / height / channel;
const int offset = n * 6;
if (c >= (indices[offset] - 1) && c <= (indices[offset + 1] - 1) &&
h >= (indices[offset + 2] - 1) && h <= (indices[offset + 3] - 1) &&
w >= (indices[offset + 4] - 1) && w <= (indices[offset + 5] - 1)) {
outputs[idx] = inputs[idx] * value;
} else {
outputs[idx] = inputs[idx];
}
}
}
template <>
void ScaleSubRegion<DEVICE_TYPE_GPU>(real* outputs,
const real* inputs,
const real* indices,
const TensorShape shape,
const FuncConfig& conf) {
real value = conf.get<real>("value");
int number = shape[0];
int channel = shape[1];
int height = shape[2];
int width = shape[3];
size_t nth = number * channel * height * width;
int blockSize = 1024;
int gridSize = (nth + blockSize - 1) / blockSize;
KeScaleSubRegion<<<gridSize, blockSize, 0, STREAM_DEFAULT>>>(
outputs, inputs, indices, value, channel, height, width, nth);
CHECK_SYNC("ScaleSubRegion");
}
__global__ void KeScaleSubRegionDiff(const real* inGrad,
real* outGrad,
const real* indices,
real value,
int channel,
int height,
int width,
int nthreads) {
const int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < nthreads) {
const int w = idx % width;
const int h = (idx / width) % height;
const int c = (idx / width / height) % channel;
const int n = idx / width / height / channel;
const int offset = n * 6;
if (c >= (indices[offset] - 1) && c <= (indices[offset + 1] - 1) &&
h >= (indices[offset + 2] - 1) && h <= (indices[offset + 3] - 1) &&
w >= (indices[offset + 4] - 1) && w <= (indices[offset + 5] - 1)) {
outGrad[idx] += inGrad[idx] * value;
} else {
outGrad[idx] += inGrad[idx];
}
}
}
template <>
void ScaleSubRegionGrad<DEVICE_TYPE_GPU>(const real* inGrad,
real* outGrad,
const real* indices,
const TensorShape shape,
const FuncConfig& conf) {
real value = conf.get<real>("value");
int number = shape[0];
int channel = shape[1];
int height = shape[2];
int width = shape[3];
size_t nth = number * channel * height * width;
int blockSize = 1024;
int gridSize = (nth + blockSize - 1) / blockSize;
KeScaleSubRegionDiff<<<gridSize, blockSize, 0, STREAM_DEFAULT>>>(
inGrad, outGrad, indices, value, channel, height, width, nth);
CHECK_SYNC("ScaleSubRegionGrad");
}
} // namespace paddle
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <gtest/gtest.h>
#include "FunctionTest.h"
namespace paddle {
TEST(ScaleSubRegion, real) {
for (size_t numSamples : {5, 32}) {
for (size_t channels : {5, 32}) {
for (size_t imgSizeH : {5, 33}) {
for (size_t imgSizeW : {5, 32}) {
for (real value : {-0.5, 0.0, 0.5}) {
for (bool firstHalf : {false, true}) {
VLOG(3) << " numSamples=" << numSamples
<< " channels=" << channels << " imgSizeH=" << imgSizeH
<< " imgSizeW=" << imgSizeW;
for (bool testGrad : {false, true}) {
CpuGpuFuncCompare compare(
testGrad ? "ScaleSubRegionGrad" : "ScaleSubRegion",
FuncConfig().set<real>("value", value));
TensorShape shape{numSamples, channels, imgSizeH, imgSizeW};
TensorShape indicesShape{numSamples, 6};
compare.addInputs(BufferArg(VALUE_TYPE_FLOAT, shape));
compare.addInputs(BufferArg(VALUE_TYPE_FLOAT, indicesShape));
compare.registerInitCallback([=](BufferArg& arg, size_t index) {
if (index == 1) {
real* data = (real*)arg.data();
for (size_t i = 0; i < numSamples; ++i) {
size_t offset = i * 6;
data[offset] = firstHalf ? 1 : channels / 2;
data[offset + 1] = firstHalf ? channels / 2 : channels;
data[offset + 2] = firstHalf ? 1 : imgSizeH / 2;
data[offset + 3] = firstHalf ? imgSizeH / 2 : imgSizeH;
data[offset + 4] = firstHalf ? 1 : imgSizeW / 2;
data[offset + 5] = firstHalf ? imgSizeW / 2 : imgSizeW;
}
}
});
compare.addOutputs(
BufferArg(
VALUE_TYPE_FLOAT, shape, testGrad ? ADD_TO : ASSIGN_TO),
testGrad ? ADD_TO : ASSIGN_TO);
compare.run();
}
}
}
}
}
}
}
}
} // namespace paddle
...@@ -62,16 +62,14 @@ void MKLDNNAddtoLayer::resetFwd(std::vector<primitive>& pipeline, ...@@ -62,16 +62,14 @@ void MKLDNNAddtoLayer::resetFwd(std::vector<primitive>& pipeline,
MKLDNNMatrixPtr& wgt, MKLDNNMatrixPtr& wgt,
MKLDNNMatrixPtr& bias, MKLDNNMatrixPtr& bias,
MKLDNNMatrixPtr& out) { MKLDNNMatrixPtr& out) {
if (biases_) { resetFwdBuffers(inVals_, bias, out);
LOG(FATAL) << "not implemented yet";
}
resetFwdBuffers(inVals_, out);
in = inVals_[0]; in = inVals_[0];
std::shared_ptr<sum::primitive_desc> fwdPD; std::shared_ptr<sum::primitive_desc> fwdPD;
resetFwdPD(fwdPD, inVals_, out); std::shared_ptr<sum::primitive_desc> biasPD;
resetFwdPD(fwdPD, biasPD, inVals_, bias, out);
resetFwdPipeline(pipeline, fwdPD, inVals_, out); resetFwdPipeline(pipeline, fwdPD, biasPD, inVals_, bias, out);
} }
void MKLDNNAddtoLayer::resetBwd(std::vector<primitive>& pipeline, void MKLDNNAddtoLayer::resetBwd(std::vector<primitive>& pipeline,
...@@ -79,7 +77,7 @@ void MKLDNNAddtoLayer::resetBwd(std::vector<primitive>& pipeline, ...@@ -79,7 +77,7 @@ void MKLDNNAddtoLayer::resetBwd(std::vector<primitive>& pipeline,
MKLDNNMatrixPtr& wgt, MKLDNNMatrixPtr& wgt,
MKLDNNMatrixPtr& bias, MKLDNNMatrixPtr& bias,
MKLDNNMatrixPtr& out) { MKLDNNMatrixPtr& out) {
resetBwdBuffers(inGrads_, out); resetBwdBuffers(inGrads_, bias, out);
in = inGrads_[0]; in = inGrads_[0];
// backward only need share output grad to input grad // backward only need share output grad to input grad
...@@ -89,6 +87,20 @@ void MKLDNNAddtoLayer::resetBwd(std::vector<primitive>& pipeline, ...@@ -89,6 +87,20 @@ void MKLDNNAddtoLayer::resetBwd(std::vector<primitive>& pipeline,
inputLayers_[i]->getOutputGrad()->setData(inGrads_[i]->getData()); inputLayers_[i]->getOutputGrad()->setData(inGrads_[i]->getData());
} }
} }
// backward bias
bwdBias_ = nullptr;
if (bias) {
std::vector<float> scales(bs_, 1.0);
std::vector<memory::primitive_desc> srcPDs(bs_, bias->getPrimitiveDesc());
auto biasPD = sum::primitive_desc(bias->getMemoryDesc(), scales, srcPDs);
std::vector<primitive::at> srcs;
for (size_t i = 0; i < grads_.size(); ++i) {
srcs.push_back(*(grads_[i]));
}
bwdBias_.reset(new sum(biasPD, srcs, *bias));
pipeline.push_back(*bwdBias_);
}
} }
void MKLDNNAddtoLayer::updateWeights(const UpdateCallback& callback) { void MKLDNNAddtoLayer::updateWeights(const UpdateCallback& callback) {
...@@ -97,7 +109,25 @@ void MKLDNNAddtoLayer::updateWeights(const UpdateCallback& callback) { ...@@ -97,7 +109,25 @@ void MKLDNNAddtoLayer::updateWeights(const UpdateCallback& callback) {
} }
} }
void MKLDNNAddtoLayer::prepareBias(MKLDNNMatrixPtr& bias,
const MatrixPtr& biasMat,
const MKLDNNMatrixPtr& out,
std::vector<MKLDNNMatrixPtr>& outs) {
auto pd = MKLDNNMatrix::createPrimitiveDesc(
{(int)layerSize_}, memory::format::x, engine_);
bias = MKLDNNMatrix::create(pd, biasMat);
outs.clear();
real* data = out->getData();
CHECK_EQ(bs_ * layerSize_, out->getElementCnt());
for (int i = 0; i < bs_; ++i) {
MatrixPtr tmp =
Matrix::create(data + i * layerSize_, 1, layerSize_, false, false);
outs.push_back(MKLDNNMatrix::create(bias->getPrimitiveDesc(), tmp));
}
}
void MKLDNNAddtoLayer::resetFwdBuffers(std::vector<MKLDNNMatrixPtr>& inputs, void MKLDNNAddtoLayer::resetFwdBuffers(std::vector<MKLDNNMatrixPtr>& inputs,
MKLDNNMatrixPtr& bias,
MKLDNNMatrixPtr& out) { MKLDNNMatrixPtr& out) {
inputs.resize(inputLayers_.size()); inputs.resize(inputLayers_.size());
for (size_t i = 0; i < inputs.size(); i++) { for (size_t i = 0; i < inputs.size(); i++) {
...@@ -110,12 +140,20 @@ void MKLDNNAddtoLayer::resetFwdBuffers(std::vector<MKLDNNMatrixPtr>& inputs, ...@@ -110,12 +140,20 @@ void MKLDNNAddtoLayer::resetFwdBuffers(std::vector<MKLDNNMatrixPtr>& inputs,
} }
resetOutValue(out, inputs[0]->getPrimitiveDesc()); resetOutValue(out, inputs[0]->getPrimitiveDesc());
if (biases_ && biases_->getW()) {
prepareBias(bias, biases_->getW(), out, vals_);
} else {
bias = nullptr;
}
} }
void MKLDNNAddtoLayer::resetFwdPD(std::shared_ptr<sum::primitive_desc>& pd, void MKLDNNAddtoLayer::resetFwdPD(std::shared_ptr<sum::primitive_desc>& pd,
std::shared_ptr<sum::primitive_desc>& biasPD,
std::vector<MKLDNNMatrixPtr>& inputs, std::vector<MKLDNNMatrixPtr>& inputs,
MKLDNNMatrixPtr bias,
MKLDNNMatrixPtr out) { MKLDNNMatrixPtr out) {
std::vector<double> scales(inputs.size(), 1.0); std::vector<float> scales(inputs.size(), 1.0);
std::vector<memory::primitive_desc> srcPDs; std::vector<memory::primitive_desc> srcPDs;
for (size_t i = 0; i < inputs.size(); i++) { for (size_t i = 0; i < inputs.size(); i++) {
srcPDs.push_back(inputs[i]->getPrimitiveDesc()); srcPDs.push_back(inputs[i]->getPrimitiveDesc());
...@@ -123,12 +161,23 @@ void MKLDNNAddtoLayer::resetFwdPD(std::shared_ptr<sum::primitive_desc>& pd, ...@@ -123,12 +161,23 @@ void MKLDNNAddtoLayer::resetFwdPD(std::shared_ptr<sum::primitive_desc>& pd,
CHECK(out); CHECK(out);
pd.reset(new sum::primitive_desc(out->getMemoryDesc(), scales, srcPDs)); pd.reset(new sum::primitive_desc(out->getMemoryDesc(), scales, srcPDs));
CHECK_PRIMITIVE_DESC_EQ(out, pd->dst_primitive_desc()); CHECK_PRIMITIVE_DESC_EQ(out, pd->dst_primitive_desc());
biasPD = nullptr;
if (bias) {
std::vector<float> scales(2, 1.0);
std::vector<memory::primitive_desc> srcPDs(2, bias->getPrimitiveDesc());
biasPD.reset(
new sum::primitive_desc(bias->getMemoryDesc(), scales, srcPDs));
CHECK_PRIMITIVE_DESC_EQ(bias, biasPD->dst_primitive_desc());
}
} }
void MKLDNNAddtoLayer::resetFwdPipeline( void MKLDNNAddtoLayer::resetFwdPipeline(
std::vector<primitive>& pipeline, std::vector<primitive>& pipeline,
std::shared_ptr<sum::primitive_desc>& pd, std::shared_ptr<sum::primitive_desc>& pd,
std::shared_ptr<sum::primitive_desc>& biasPD,
std::vector<MKLDNNMatrixPtr>& inputs, std::vector<MKLDNNMatrixPtr>& inputs,
MKLDNNMatrixPtr& bias,
MKLDNNMatrixPtr& out) { MKLDNNMatrixPtr& out) {
std::vector<primitive::at> srcs; std::vector<primitive::at> srcs;
for (size_t i = 0; i < inputs.size(); i++) { for (size_t i = 0; i < inputs.size(); i++) {
...@@ -136,9 +185,23 @@ void MKLDNNAddtoLayer::resetFwdPipeline( ...@@ -136,9 +185,23 @@ void MKLDNNAddtoLayer::resetFwdPipeline(
} }
fwd_.reset(new sum(*pd, srcs, *out)); fwd_.reset(new sum(*pd, srcs, *out));
pipeline.push_back(*fwd_); pipeline.push_back(*fwd_);
fwdBias_.clear();
if (biasPD == nullptr || bias == nullptr) {
return;
}
fwdBias_.resize(vals_.size());
for (size_t i = 0; i < vals_.size(); ++i) {
std::vector<primitive::at> srcs;
srcs.push_back(*(vals_[i]));
srcs.push_back(*bias);
fwdBias_[i].reset(new sum(*biasPD, srcs, *vals_[i]));
pipeline.push_back(*fwdBias_[i]);
}
} }
void MKLDNNAddtoLayer::resetBwdBuffers(std::vector<MKLDNNMatrixPtr>& inputs, void MKLDNNAddtoLayer::resetBwdBuffers(std::vector<MKLDNNMatrixPtr>& inputs,
MKLDNNMatrixPtr& bias,
MKLDNNMatrixPtr& out) { MKLDNNMatrixPtr& out) {
CHECK(outVal_); CHECK(outVal_);
resetOutGrad(out, outVal_->getPrimitiveDesc()); resetOutGrad(out, outVal_->getPrimitiveDesc());
...@@ -149,6 +212,12 @@ void MKLDNNAddtoLayer::resetBwdBuffers(std::vector<MKLDNNMatrixPtr>& inputs, ...@@ -149,6 +212,12 @@ void MKLDNNAddtoLayer::resetBwdBuffers(std::vector<MKLDNNMatrixPtr>& inputs,
resetInGrad(inputs[i], inVal_->getPrimitiveDesc(), i); resetInGrad(inputs[i], inVal_->getPrimitiveDesc(), i);
CHECK_PRIMITIVE_DESC_EQ(inputs[i], out->getPrimitiveDesc()); CHECK_PRIMITIVE_DESC_EQ(inputs[i], out->getPrimitiveDesc());
} }
if (biases_ && biases_->getWGrad()) {
prepareBias(bias, biases_->getWGrad(), out, grads_);
} else {
bias = nullptr;
}
} }
} // namespace paddle } // namespace paddle
...@@ -32,9 +32,15 @@ protected: ...@@ -32,9 +32,15 @@ protected:
// layer size == ic * ih * iw == oc * oh *ow, and can not be changed // layer size == ic * ih * iw == oc * oh *ow, and can not be changed
size_t layerSize_; size_t layerSize_;
// TODO(TJ): this part has not been optimized by MKL-DNN
std::unique_ptr<Weight> biases_; std::unique_ptr<Weight> biases_;
// buffers for adding bias
std::vector<MKLDNNMatrixPtr> vals_;
std::vector<MKLDNNMatrixPtr> grads_;
// primitives for adding bias
std::vector<std::shared_ptr<mkldnn::primitive>> fwdBias_;
std::shared_ptr<mkldnn::primitive> bwdBias_;
public: public:
explicit MKLDNNAddtoLayer(const LayerConfig& config) : MKLDNNLayer(config) {} explicit MKLDNNAddtoLayer(const LayerConfig& config) : MKLDNNLayer(config) {}
...@@ -91,20 +97,34 @@ protected: ...@@ -91,20 +97,34 @@ protected:
* reset pipeline. * reset pipeline.
*/ */
void resetFwdBuffers(std::vector<MKLDNNMatrixPtr>& inputs, void resetFwdBuffers(std::vector<MKLDNNMatrixPtr>& inputs,
MKLDNNMatrixPtr& bias,
MKLDNNMatrixPtr& out); MKLDNNMatrixPtr& out);
void resetFwdPD(std::shared_ptr<mkldnn::sum::primitive_desc>& pd, void resetFwdPD(std::shared_ptr<mkldnn::sum::primitive_desc>& pd,
std::shared_ptr<mkldnn::sum::primitive_desc>& biasPD,
std::vector<MKLDNNMatrixPtr>& inputs, std::vector<MKLDNNMatrixPtr>& inputs,
MKLDNNMatrixPtr bias,
MKLDNNMatrixPtr out); MKLDNNMatrixPtr out);
void resetFwdPipeline(std::vector<mkldnn::primitive>& pipeline, void resetFwdPipeline(std::vector<mkldnn::primitive>& pipeline,
std::shared_ptr<mkldnn::sum::primitive_desc>& pd, std::shared_ptr<mkldnn::sum::primitive_desc>& pd,
std::shared_ptr<mkldnn::sum::primitive_desc>& biasPD,
std::vector<MKLDNNMatrixPtr>& inputs, std::vector<MKLDNNMatrixPtr>& inputs,
MKLDNNMatrixPtr& bias,
MKLDNNMatrixPtr& out); MKLDNNMatrixPtr& out);
/** /**
* Backward functions: reset buffers(inputs, output, bias) * Backward functions: reset buffers(inputs, output, bias)
*/ */
void resetBwdBuffers(std::vector<MKLDNNMatrixPtr>& inputs, void resetBwdBuffers(std::vector<MKLDNNMatrixPtr>& inputs,
MKLDNNMatrixPtr& bias,
MKLDNNMatrixPtr& out); MKLDNNMatrixPtr& out);
/**
* prepare for bias
*/
void prepareBias(MKLDNNMatrixPtr& bias,
const MatrixPtr& biasMat,
const MKLDNNMatrixPtr& out,
std::vector<MKLDNNMatrixPtr>& outs);
}; };
} // namespace paddle } // namespace paddle
...@@ -119,7 +119,7 @@ void MKLDNNBatchNormLayer::reshape( ...@@ -119,7 +119,7 @@ void MKLDNNBatchNormLayer::reshape(
int& bs, int& ic, int& ih, int& iw, int oc, int& oh, int& ow) { int& bs, int& ic, int& ih, int& iw, int oc, int& oh, int& ow) {
reshapeInput(bs, ih, iw); reshapeInput(bs, ih, iw);
oh = ih; oh = ih;
ow = ow; ow = iw;
// ic_ and oc can not be changed // ic_ and oc can not be changed
CHECK_EQ(inputElemenCnt_ / bs / ih / iw, (size_t)ic) CHECK_EQ(inputElemenCnt_ / bs / ih / iw, (size_t)ic)
<< "Input channel can not be changed"; << "Input channel can not be changed";
......
...@@ -60,18 +60,16 @@ void MKLDNNFcLayer::convertWeightsFromPaddle() { ...@@ -60,18 +60,16 @@ void MKLDNNFcLayer::convertWeightsFromPaddle() {
} }
CHECK(wgtVal_) << "should have been initialized"; CHECK(wgtVal_) << "should have been initialized";
bool hasNoSpatial_ = ih_ == 1 && iw_ == 1;
auto targetDim = wgtVal_->getDims(); auto targetDim = wgtVal_->getDims();
auto srcFmt = hasNoSpatial_ ? format::io : format::ihwo; auto srcFmt = targetDim.size() == 2 ? format::io : format::ihwo;
wgtVal_->reorderDataFrom(wgtVal_, srcFmt, targetDim); wgtVal_->reorderDataFrom(wgtVal_, srcFmt, targetDim);
hasInitedWgt_ = true; hasInitedWgt_ = true;
} }
void MKLDNNFcLayer::convertWeightsToPaddle() { void MKLDNNFcLayer::convertWeightsToPaddle() {
CHECK(wgtVal_) << "should have been initialized"; CHECK(wgtVal_) << "should have been initialized";
bool hasNoSpatial_ = ih_ == 1 && iw_ == 1;
auto targetDim = wgtVal_->getDims(); auto targetDim = wgtVal_->getDims();
auto dstFmt = hasNoSpatial_ ? format::io : format::ihwo; auto dstFmt = targetDim.size() == 2 ? format::io : format::ihwo;
wgtVal_->reorderDataTo(wgtVal_, dstFmt, targetDim); wgtVal_->reorderDataTo(wgtVal_, dstFmt, targetDim);
} }
......
...@@ -181,21 +181,17 @@ void MKLDNNLayer::resetInValue( ...@@ -181,21 +181,17 @@ void MKLDNNLayer::resetInValue(
auto extPD = MKLDNNMatrix::createPrimitiveDesc( auto extPD = MKLDNNMatrix::createPrimitiveDesc(
{bs_, ic_, ih_, iw_}, format::nchw, engine_); {bs_, ic_, ih_, iw_}, format::nchw, engine_);
const MatrixPtr& inMat = inputLayers_[inputIdx]->getOutputValue(); const MatrixPtr& inMat = inputLayers_[inputIdx]->getOutputValue();
in = std::dynamic_pointer_cast<MKLDNNMatrix>(inMat); extInVal_ = std::dynamic_pointer_cast<MKLDNNMatrix>(inMat);
CHECK_EQ(inputIsOnlyMKLDNN(), in != nullptr); CHECK_EQ(inputIsOnlyMKLDNN(), extInVal_ != nullptr);
if (in == nullptr || in->getFormat() == format::nc) { if (extInVal_ == nullptr || extInVal_->getFormat() == format::nc) {
in = MKLDNNMatrix::create(extPD, inMat); extInVal_ = MKLDNNMatrix::create(extPD, inMat);
}
extInVal_ = isPaddleFormat(in->getFormat()) ? in : nullptr;
if (in->getFormat() == format::nc) {
CHECK(ih_ == 1 && iw_ == 1);
} }
in = extInVal_;
if (nullptr == intPD || in->getPrimitiveDesc() == *intPD) { if (nullptr == intPD || in->getPrimitiveDesc() == *intPD) {
return; return;
} }
// need create reorder // need create reorder
in = MKLDNNMatrix::create(*intPD); in = MKLDNNMatrix::create(*intPD);
extInVal_ = extInVal_ ? extInVal_ : MKLDNNMatrix::create(extPD, inMat);
cvtInVal_ = MKLDNNMatrix::createReorder(extInVal_, in); cvtInVal_ = MKLDNNMatrix::createReorder(extInVal_, in);
CHECK(cvtInVal_) << "should not be emptry"; CHECK(cvtInVal_) << "should not be emptry";
} }
...@@ -291,7 +287,7 @@ void MKLDNNLayer::resetMergeGrad(MKLDNNMatrixPtr& out) { ...@@ -291,7 +287,7 @@ void MKLDNNLayer::resetMergeGrad(MKLDNNMatrixPtr& out) {
return; return;
} }
CHECK(out) << "should have reset internal ouput grad"; CHECK(out) << "should have reset internal ouput grad";
std::vector<double> scales(outputMap_.size(), 1.0); std::vector<float> scales(outputMap_.size(), 1.0);
std::vector<memory::primitive_desc> srcPDs; std::vector<memory::primitive_desc> srcPDs;
std::vector<primitive::at> srcs; std::vector<primitive::at> srcs;
for (auto it = outputMap_.begin(); it != outputMap_.end(); ++it) { for (auto it = outputMap_.begin(); it != outputMap_.end(); ++it) {
......
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "ROIPoolLayer.h"
namespace paddle {
REGISTER_LAYER(roi_pool, ROIPoolLayer);
bool ROIPoolLayer::init(const LayerMap& layerMap,
const ParameterMap& parameterMap) {
Layer::init(layerMap, parameterMap);
const ROIPoolConfig& layerConf = config_.inputs(0).roi_pool_conf();
pooledWidth_ = layerConf.pooled_width();
pooledHeight_ = layerConf.pooled_height();
spatialScale_ = layerConf.spatial_scale();
return true;
}
void ROIPoolLayer::forward(PassType passType) {
Layer::forward(passType);
const ROIPoolConfig& layerConf = config_.inputs(0).roi_pool_conf();
height_ = getInput(0).getFrameHeight();
if (!height_) height_ = layerConf.height();
width_ = getInput(0).getFrameWidth();
if (!width_) width_ = layerConf.width();
channels_ = getInputValue(0)->getWidth() / width_ / height_;
size_t batchSize = getInput(0).getBatchSize();
size_t numROIs = getInput(1).getBatchSize();
MatrixPtr dataValue = getInputValue(0);
MatrixPtr roiValue = getInputValue(1);
resetOutput(numROIs, channels_ * pooledHeight_ * pooledWidth_);
MatrixPtr outputValue = getOutputValue();
if (useGpu_) { // TODO(guosheng): implement on GPU later
MatrixPtr dataCpuBuffer;
Matrix::resizeOrCreate(dataCpuBuffer,
dataValue->getHeight(),
dataValue->getWidth(),
false,
false);
MatrixPtr roiCpuBuffer;
Matrix::resizeOrCreate(roiCpuBuffer,
roiValue->getHeight(),
roiValue->getWidth(),
false,
false);
dataCpuBuffer->copyFrom(*dataValue);
roiCpuBuffer->copyFrom(*roiValue);
dataValue = dataCpuBuffer;
roiValue = roiCpuBuffer;
MatrixPtr outputCpuBuffer;
Matrix::resizeOrCreate(outputCpuBuffer,
outputValue->getHeight(),
outputValue->getWidth(),
false,
false);
outputCpuBuffer->copyFrom(*outputValue);
outputValue = outputCpuBuffer;
}
real* bottomData = dataValue->getData();
size_t batchOffset = dataValue->getWidth();
size_t channelOffset = height_ * width_;
real* bottomROIs = roiValue->getData();
size_t roiOffset = roiValue->getWidth();
size_t poolChannelOffset = pooledHeight_ * pooledWidth_;
real* outputData = outputValue->getData();
Matrix::resizeOrCreate(maxIdxs_,
numROIs,
channels_ * pooledHeight_ * pooledWidth_,
false,
false);
real* argmaxData = maxIdxs_->getData();
for (size_t n = 0; n < numROIs; ++n) {
// the first five elememts of each RoI should be:
// batch_idx, roi_x_start, roi_y_start, roi_x_end, roi_y_end
size_t roiBatchIdx = bottomROIs[0];
size_t roiStartW = round(bottomROIs[1] * spatialScale_);
size_t roiStartH = round(bottomROIs[2] * spatialScale_);
size_t roiEndW = round(bottomROIs[3] * spatialScale_);
size_t roiEndH = round(bottomROIs[4] * spatialScale_);
CHECK_GE(roiBatchIdx, 0);
CHECK_LT(roiBatchIdx, batchSize);
size_t roiHeight = std::max(roiEndH - roiStartH + 1, 1UL);
size_t roiWidth = std::max(roiEndW - roiStartW + 1, 1UL);
real binSizeH =
static_cast<real>(roiHeight) / static_cast<real>(pooledHeight_);
real binSizeW =
static_cast<real>(roiWidth) / static_cast<real>(pooledWidth_);
real* batchData = bottomData + batchOffset * roiBatchIdx;
for (size_t c = 0; c < channels_; ++c) {
for (size_t ph = 0; ph < pooledHeight_; ++ph) {
for (size_t pw = 0; pw < pooledWidth_; ++pw) {
size_t hstart = static_cast<size_t>(std::floor(ph * binSizeH));
size_t wstart = static_cast<size_t>(std::floor(pw * binSizeW));
size_t hend = static_cast<size_t>(std::ceil((ph + 1) * binSizeH));
size_t wend = static_cast<size_t>(std::ceil((pw + 1) * binSizeW));
hstart = std::min(std::max(hstart + roiStartH, 0UL), height_);
wstart = std::min(std::max(wstart + roiStartW, 0UL), width_);
hend = std::min(std::max(hend + roiStartH, 0UL), height_);
wend = std::min(std::max(wend + roiStartW, 0UL), width_);
bool isEmpty = (hend <= hstart) || (wend <= wstart);
size_t poolIndex = ph * pooledWidth_ + pw;
if (isEmpty) {
outputData[poolIndex] = 0;
argmaxData[poolIndex] = -1;
}
for (size_t h = hstart; h < hend; ++h) {
for (size_t w = wstart; w < wend; ++w) {
size_t index = h * width_ + w;
if (batchData[index] > outputData[poolIndex]) {
outputData[poolIndex] = batchData[index];
argmaxData[poolIndex] = index;
}
}
}
}
}
batchData += channelOffset;
outputData += poolChannelOffset;
argmaxData += poolChannelOffset;
}
bottomROIs += roiOffset;
}
if (useGpu_) {
getOutputValue()->copyFrom(*outputValue);
}
}
void ROIPoolLayer::backward(const UpdateCallback& callback) {
MatrixPtr inGradValue = getInputGrad(0);
MatrixPtr outGradValue = getOutputGrad();
MatrixPtr roiValue = getInputValue(1);
if (useGpu_) {
MatrixPtr inGradCpuBuffer;
Matrix::resizeOrCreate(inGradCpuBuffer,
inGradValue->getHeight(),
inGradValue->getWidth(),
false,
false);
MatrixPtr outGradCpuBuffer;
Matrix::resizeOrCreate(outGradCpuBuffer,
outGradValue->getHeight(),
outGradValue->getWidth(),
false,
false);
MatrixPtr roiCpuBuffer;
Matrix::resizeOrCreate(roiCpuBuffer,
roiValue->getHeight(),
roiValue->getWidth(),
false,
false);
inGradCpuBuffer->copyFrom(*inGradValue);
outGradCpuBuffer->copyFrom(*outGradValue);
roiCpuBuffer->copyFrom(*roiValue);
inGradValue = inGradCpuBuffer;
outGradValue = outGradCpuBuffer;
roiValue = roiCpuBuffer;
}
real* bottomROIs = roiValue->getData();
size_t numROIs = getInput(1).getBatchSize();
size_t roiOffset = getInputValue(1)->getWidth();
real* inDiffData = inGradValue->getData();
size_t batchOffset = getInputValue(0)->getWidth();
size_t channelOffset = height_ * width_;
real* outDiffData = outGradValue->getData();
size_t poolChannelOffset = pooledHeight_ * pooledWidth_;
real* argmaxData = maxIdxs_->getData();
for (size_t n = 0; n < numROIs; ++n) {
size_t roiBatchIdx = bottomROIs[0];
real* batchDiffData = inDiffData + batchOffset * roiBatchIdx;
for (size_t c = 0; c < channels_; ++c) {
for (size_t ph = 0; ph < pooledHeight_; ++ph) {
for (size_t pw = 0; pw < pooledWidth_; ++pw) {
size_t poolIndex = ph * pooledWidth_ + pw;
if (argmaxData[poolIndex] > 0) {
size_t index = static_cast<size_t>(argmaxData[poolIndex]);
batchDiffData[index] += outDiffData[poolIndex];
}
}
}
batchDiffData += channelOffset;
outDiffData += poolChannelOffset;
argmaxData += poolChannelOffset;
}
bottomROIs += roiOffset;
}
if (useGpu_) {
getInputGrad(0)->copyFrom(*inGradValue);
}
}
} // namespace paddle
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "Layer.h"
namespace paddle {
/**
* A layer used by Fast R-CNN to extract feature maps of ROIs from the last
* feature map.
* - Input: This layer needs two input layers: The first input layer is a
* convolution layer; The second input layer contains the ROI data
* which is the output of ProposalLayer in Faster R-CNN. layers for
* generating bbox location offset and the classification confidence.
* - Output: The ROIs' feature map.
* Reference:
* Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun.
* Faster R-CNN: Towards Real-Time Object Detection with Region Proposal
* Networks
*/
class ROIPoolLayer : public Layer {
protected:
size_t channels_;
size_t width_;
size_t height_;
size_t pooledWidth_;
size_t pooledHeight_;
real spatialScale_;
// Since there is no int matrix, use real maxtrix instead.
MatrixPtr maxIdxs_;
public:
explicit ROIPoolLayer(const LayerConfig& config) : Layer(config) {}
bool init(const LayerMap& layerMap,
const ParameterMap& parameterMap) override;
void forward(PassType passType) override;
void backward(const UpdateCallback& callback = nullptr) override;
};
} // namespace paddle
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "ScaleSubRegionLayer.h"
#include "paddle/utils/Stat.h"
namespace paddle {
REGISTER_LAYER(scale_sub_region, ScaleSubRegionLayer);
bool ScaleSubRegionLayer::init(const LayerMap& layerMap,
const ParameterMap& parameterMap) {
Layer::init(layerMap, parameterMap);
CHECK_EQ(static_cast<int>(inputLayers_.size()), 2);
auto& conf = config_.inputs(0).scale_sub_region_conf();
value_ = conf.value();
createFunction(forward_, "ScaleSubRegion", FuncConfig().set("value", value_));
createFunction(
backward_, "ScaleSubRegionGrad", FuncConfig().set("value", value_));
return true;
}
void ScaleSubRegionLayer::forward(PassType passType) {
Layer::forward(passType);
auto in0 = getInput(0);
imgH_ = in0.getFrameHeight();
imgW_ = in0.getFrameWidth();
if (imgH_ == 0 || imgW_ == 0) {
auto& conf = config_.inputs(0).scale_sub_region_conf();
imgH_ = conf.image_conf().img_size_y();
imgW_ = conf.image_conf().img_size();
}
MatrixPtr imgV = in0.value;
size_t batchSize = imgV->getHeight();
size_t spatialSize = imgH_ * imgW_;
channelsNum_ = imgV->getWidth() / spatialSize;
shape_ = TensorShape({batchSize, channelsNum_, imgH_, imgW_});
resetOutput(batchSize, imgV->getWidth());
auto& out = getOutput();
out.setFrameHeight(imgH_);
out.setFrameWidth(imgW_);
MatrixPtr indicesV = getInputValue(1);
indicesShape_ = TensorShape({batchSize, 6});
REGISTER_TIMER_INFO("ScaleSubRegionForward", getName().c_str());
BufferArgs inArgs;
BufferArgs outArgs;
inArgs.addArg(*imgV, shape_);
inArgs.addArg(*indicesV, indicesShape_);
outArgs.addArg(*out.value, shape_, ASSIGN_TO);
forward_[0]->calc(inArgs, outArgs);
}
void ScaleSubRegionLayer::backward(const UpdateCallback& callback) {
REGISTER_TIMER_INFO("ScaleSubRegionBackward", getName().c_str());
BufferArgs inArgs;
BufferArgs outArgs;
inArgs.addArg(*getOutputGrad(), shape_);
inArgs.addArg(*getInputValue(1), indicesShape_);
outArgs.addArg(*getInputGrad(0), shape_, ADD_TO);
backward_[0]->calc(inArgs, outArgs);
}
} // namespace paddle
...@@ -13,25 +13,40 @@ See the License for the specific language governing permissions and ...@@ -13,25 +13,40 @@ See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#pragma once #pragma once
#include "paddle/framework/eigen.h"
#include "paddle/framework/op_registry.h" #include "Layer.h"
namespace paddle { namespace paddle {
namespace operators {
/**
template <typename Place, typename T> * \brief For each instance, this layer can be used to multiply a value to a
class FillConstantOpKernel : public framework::OpKernel<T> { * specified sub continuous region. By providing start index and end
public: * index for C/H/W, you can specify the location and shape of the
void Compute(const framework::ExecutionContext& ctx) const override { * region.
auto* out = ctx.Output<framework::Tensor>("Out"); *
out->mutable_data<T>(ctx.GetPlace()); * input_0: Input value.
auto value = ctx.Attr<float>("value"); * input_1: Indices value to specify the location an shape of the
* region.
auto out_eigen = framework::EigenVector<T>::Flatten(*out); */
auto place = ctx.GetEigenDevice<Place>(); class ScaleSubRegionLayer : public Layer {
out_eigen.device(place) = out_eigen.constant(static_cast<T>(value)); public:
} explicit ScaleSubRegionLayer(const LayerConfig& config) : Layer(config) {}
~ScaleSubRegionLayer() {}
bool init(const LayerMap& layerMap, const ParameterMap& parameterMap);
void forward(PassType passType);
void backward(const UpdateCallback& callback = nullptr);
protected:
TensorShape shape_;
TensorShape indicesShape_;
size_t imgH_;
size_t imgW_;
size_t channelsNum_;
real value_;
}; };
} // namespace operators
} // namespace paddle } // namespace paddle
...@@ -53,7 +53,7 @@ TEST(Operator, dot_mul) { ...@@ -53,7 +53,7 @@ TEST(Operator, dot_mul) {
TEST(Projection, context) { TEST(Projection, context) {
for (auto contextStart : {-5, -3, -1, 0, 3}) { for (auto contextStart : {-5, -3, -1, 0, 3}) {
for (auto contextLength : {1, 2, 5, 7}) { for (auto contextLength : {1, 2, 5, 7}) {
for (auto batchSize : {1, 2, 5, 20, 50}) { for (auto batchSize : {1, 2, 5, 20}) {
for (auto trainablePadding : {false, true}) { for (auto trainablePadding : {false, true}) {
LOG(INFO) << " contextStart=" << contextStart LOG(INFO) << " contextStart=" << contextStart
<< " contextLength=" << contextLength << " contextLength=" << contextLength
...@@ -585,14 +585,14 @@ TEST(Layer, maxoutLayer) { ...@@ -585,14 +585,14 @@ TEST(Layer, maxoutLayer) {
} }
void testFcLayer(string format, size_t nnz) { void testFcLayer(string format, size_t nnz) {
TestConfig config; TestConfig config;
config.biasSize = 4096; config.biasSize = 1024;
config.layerConfig.set_type("fc"); config.layerConfig.set_type("fc");
config.layerConfig.set_size(4096); config.layerConfig.set_size(1024);
config.layerConfig.set_active_type("sigmoid"); config.layerConfig.set_active_type("sigmoid");
config.layerConfig.set_drop_rate(0.1); config.layerConfig.set_drop_rate(0.1);
config.inputDefs.push_back( config.inputDefs.push_back(
{INPUT_DATA, "layer_0", 8192, nnz, ParaSparse(format)}); {INPUT_DATA, "layer_0", 2048, nnz, ParaSparse(format)});
config.layerConfig.add_inputs(); config.layerConfig.add_inputs();
LOG(INFO) << config.inputDefs[0].sparse.sparse << " " LOG(INFO) << config.inputDefs[0].sparse.sparse << " "
...@@ -609,9 +609,9 @@ void testFcLayer(string format, size_t nnz) { ...@@ -609,9 +609,9 @@ void testFcLayer(string format, size_t nnz) {
} }
TEST(Layer, fcLayer) { TEST(Layer, fcLayer) {
testFcLayer("", 4096 * 4096 * 2); testFcLayer("", 1024 * 1024 * 2);
testFcLayer("csc", 4096 * 40); testFcLayer("csc", 1024 * 10);
testFcLayer("csr", 4096 * 40); testFcLayer("csr", 1024 * 10);
} }
TEST(Layer, SelectiveFullyConnectedLayer) { TEST(Layer, SelectiveFullyConnectedLayer) {
...@@ -1995,7 +1995,7 @@ TEST(Layer, multibox_loss) { ...@@ -1995,7 +1995,7 @@ TEST(Layer, multibox_loss) {
TEST(Layer, TransLayer) { TEST(Layer, TransLayer) {
TestConfig config; TestConfig config;
const int height = 128; const int height = 128;
const int width = 1028; const int width = 256;
config.layerConfig.set_type("trans"); config.layerConfig.set_type("trans");
config.layerConfig.set_size(width); config.layerConfig.set_size(width);
...@@ -2056,6 +2056,43 @@ TEST(Layer, CropLayer) { ...@@ -2056,6 +2056,43 @@ TEST(Layer, CropLayer) {
} }
} }
TEST(Layer, roi_pool) {
TestConfig config;
config.layerConfig.set_type("roi_pool");
config.biasSize = 0;
LayerInputConfig* input = config.layerConfig.add_inputs();
ROIPoolConfig* roiPoolConf = input->mutable_roi_pool_conf();
roiPoolConf->set_pooled_width(7);
roiPoolConf->set_pooled_height(7);
roiPoolConf->set_spatial_scale(1. / 16);
roiPoolConf->set_width(14);
roiPoolConf->set_height(14);
const size_t roiNum = 10;
const size_t roiDim = 10;
const size_t batchSize = 5;
MatrixPtr roiValue = Matrix::create(roiNum, roiDim, false, false);
roiValue->zeroMem();
real* roiData = roiValue->getData();
for (size_t i = 0; i < roiNum; ++i) {
roiData[i * roiDim + 0] = std::rand() % batchSize;
roiData[i * roiDim + 1] = std::rand() % 224; // xMin
roiData[i * roiDim + 2] = std::rand() % 224; // yMin
size_t xMin = static_cast<size_t>(roiData[i * roiDim + 1]);
size_t yMin = static_cast<size_t>(roiData[i * roiDim + 2]);
roiData[i * roiDim + 3] = xMin + std::rand() % (224 - xMin); // xMax
roiData[i * roiDim + 4] = yMin + std::rand() % (224 - yMin); // yMax
}
config.inputDefs.push_back({INPUT_DATA, "input", 3 * 14 * 14, {}});
config.inputDefs.push_back({INPUT_SELF_DEFINE_DATA, "rois", roiValue, {}});
config.layerConfig.add_inputs();
for (auto useGpu : {false, true}) {
testLayerGrad(config, "roi_pool", batchSize, false, useGpu, false);
}
}
TEST(Layer, SwitchOrderLayer) { TEST(Layer, SwitchOrderLayer) {
TestConfig config; TestConfig config;
// config input_0 // config input_0
...@@ -2358,6 +2395,38 @@ TEST(Layer, ScaleShiftLayer) { ...@@ -2358,6 +2395,38 @@ TEST(Layer, ScaleShiftLayer) {
} }
} }
TEST(Layer, ScaleSubRegionLayer) {
const size_t batchSize = 64;
const size_t size = 4096;
TestConfig config;
config.layerConfig.set_type("scale_sub_region");
config.inputDefs.push_back({INPUT_DATA, "input", size, 0});
MatrixPtr indicesV = Matrix::create(batchSize, 6, false, false);
auto* data = indicesV->getData();
for (size_t i = 0; i < batchSize; ++i) {
data[i * 2] = 2;
data[i * 2 + 1] = 4;
data[i * 2 + 2] = 16;
data[i * 2 + 3] = 32;
data[i * 2 + 4] = 16;
data[i * 2 + 5] = 32;
}
config.inputDefs.push_back({INPUT_SELF_DEFINE_DATA, "indices", indicesV, {}});
LayerInputConfig* input = config.layerConfig.add_inputs();
ScaleSubRegionConfig* scaleSubRegionConf =
input->mutable_scale_sub_region_conf();
ImageConfig* imgConf = scaleSubRegionConf->mutable_image_conf();
imgConf->set_img_size(32);
imgConf->set_img_size_y(32);
imgConf->set_channels(4);
scaleSubRegionConf->set_value(2.0);
config.layerConfig.add_inputs();
for (auto useGpu : {false, true}) {
testLayerGrad(config, "scale_sub_region", batchSize, false, useGpu, false);
}
}
int main(int argc, char** argv) { int main(int argc, char** argv) {
testing::InitGoogleTest(&argc, argv); testing::InitGoogleTest(&argc, argv);
initMain(argc, argv); initMain(argc, argv);
......
...@@ -269,6 +269,7 @@ void testBatchNormLayer(const testBatchNormDesc& pm) { ...@@ -269,6 +269,7 @@ void testBatchNormLayer(const testBatchNormDesc& pm) {
TEST(MKLDNNLayer, BatchNormLayer) { TEST(MKLDNNLayer, BatchNormLayer) {
testBatchNormLayer({4, 10, 6, 6}); testBatchNormLayer({4, 10, 6, 6});
testBatchNormLayer({16, 32, 16, 16}); testBatchNormLayer({16, 32, 16, 16});
testBatchNormLayer({4, 16, 8, 10});
} }
struct testImageDesc { struct testImageDesc {
...@@ -300,13 +301,8 @@ void testAddtoLayer(const testImageDesc& pm, const size_t nInputs) { ...@@ -300,13 +301,8 @@ void testAddtoLayer(const testImageDesc& pm, const size_t nInputs) {
TestConfig dnnConfig; TestConfig dnnConfig;
getAddtoConfig(dnnConfig, pm, nInputs); getAddtoConfig(dnnConfig, pm, nInputs);
dnnConfig.layerConfig.set_type("mkldnn_addto"); dnnConfig.layerConfig.set_type("mkldnn_addto");
// TODO(TJ): test with bias for (auto withBias : {false, true}) {
for (auto withBias : {false}) { dnnConfig.biasSize = withBias ? pm.ic * pm.ih * pm.iw : 0;
if (withBias) {
dnnConfig.biasSize = pm.ic * pm.ih * pm.iw;
} else {
dnnConfig.biasSize = 0;
}
RUN_MKLDNN_TEST_LAYER(dnnConfig, "addto", pm) RUN_MKLDNN_TEST_LAYER(dnnConfig, "addto", pm)
} }
} }
......
...@@ -152,12 +152,7 @@ void MKLDNNMatrix::downSpatial() { ...@@ -152,12 +152,7 @@ void MKLDNNMatrix::downSpatial() {
} }
memory::desc md = memory::desc(dstDims, getDtype(), dstFmt); memory::desc md = memory::desc(dstDims, getDtype(), dstFmt);
memory::primitive_desc pd = memory::primitive_desc(md, getEngine()); memory::primitive_desc pd = memory::primitive_desc(md, getEngine());
mkldnn_primitive_t result; resetMKLDNNMemory(pd, data_);
mkldnn::error::wrap_c_api(
mkldnn_primitive_create(&result, pd.get(), nullptr, nullptr),
"could not create a memory primitive");
reset(result);
set_data_handle(data_);
} }
} // namespace paddle } // namespace paddle
...@@ -145,6 +145,27 @@ public: ...@@ -145,6 +145,27 @@ public:
m_.reset(); m_.reset();
} }
/**
* override the CpuMatrix::resize
*/
void resize(size_t newHeight, size_t newWidth) override {
m_->resize(newHeight, newWidth);
if (data_ == m_->getData() && elementCnt_ == newHeight * newWidth) {
return;
}
CpuMatrix::setData(data_);
height_ = newHeight;
width_ = newWidth;
elementCnt_ = newHeight * newWidth;
stride_ = width_;
auto pd = mkldnn::memory::primitive_desc(
mkldnn::memory::desc({(int)newHeight, (int)newWidth},
getDtype(),
mkldnn::memory::format::nc),
getEngine());
resetMKLDNNMemory(pd, data_);
}
/** /**
* override Matrix::getData * override Matrix::getData
* check data before return * check data before return
...@@ -215,6 +236,17 @@ protected: ...@@ -215,6 +236,17 @@ protected:
memory::format srcFmt, memory::format srcFmt,
memory::format dstFmt, memory::format dstFmt,
memory::dims dm); memory::dims dm);
/**
* reset this MKLDNN Memory from primitve desc
*/
void resetMKLDNNMemory(memory::primitive_desc pd, real* data) {
mkldnn_primitive_t result;
mkldnn::error::wrap_c_api(
mkldnn_primitive_create(&result, pd.get(), nullptr, nullptr),
"could not create a memory primitive");
reset(result);
set_data_handle(data);
}
private: private:
// save the CpuMatrixPtr in case the buffer released outside // save the CpuMatrixPtr in case the buffer released outside
......
...@@ -206,7 +206,7 @@ double dotProduct<double>(const int n, const double* x, const double* y) { ...@@ -206,7 +206,7 @@ double dotProduct<double>(const int n, const double* x, const double* y) {
} }
#endif #endif
#if defined(PADDLE_USE_MKL) || defined(PADDLE_USE_MKLML) #if defined(PADDLE_USE_MKLML)
template <> template <>
void vExp<float>(const int n, const float* a, float* r) { void vExp<float>(const int n, const float* a, float* r) {
...@@ -295,38 +295,6 @@ template void vAdd(const int n, const double* a, const double* b, double* r); ...@@ -295,38 +295,6 @@ template void vAdd(const int n, const double* a, const double* b, double* r);
#endif #endif
#ifdef PADDLE_USE_MKL
template <>
void vInvSqrt<float>(const int n, const float* a, float* r) {
vsInvSqrt(n, a, r);
}
template <>
void vInvSqrt<double>(const int n, const double* a, double* r) {
vdInvSqrt(n, a, r);
}
template <>
void vLog1p<float>(const int n, const float* a, float* r) {
vsLog1p(n, a, r);
}
template <>
void vLog1p<double>(const int n, const double* a, double* r) {
vdLog1p(n, a, r);
}
template <>
void vTanh<float>(const int n, const float* a, float* r) {
vsTanh(n, a, r);
}
template <>
void vTanh<double>(const int n, const double* a, double* r) {
vdTanh(n, a, r);
}
#else
DEFINE_MATRIX_BINARY_OP(vInvSqrt, b = 1.0f / std::sqrt(a)); DEFINE_MATRIX_BINARY_OP(vInvSqrt, b = 1.0f / std::sqrt(a));
template <class T> template <class T>
void vInvSqrt(const int n, const T* a, T* r) { void vInvSqrt(const int n, const T* a, T* r) {
...@@ -357,6 +325,4 @@ template void vLog1p(const int n, const double* a, double* r); ...@@ -357,6 +325,4 @@ template void vLog1p(const int n, const double* a, double* r);
template void vTanh(const int n, const float* a, float* r); template void vTanh(const int n, const float* a, float* r);
template void vTanh(const int n, const double* a, double* r); template void vTanh(const int n, const double* a, double* r);
#endif
} // namespace paddle } // namespace paddle
...@@ -21,11 +21,6 @@ limitations under the License. */ ...@@ -21,11 +21,6 @@ limitations under the License. */
#include <mkl_vml_functions.h> #include <mkl_vml_functions.h>
#endif #endif
#ifdef PADDLE_USE_MKL
#include <mkl.h>
#include <mkl_lapacke.h>
#endif
#if defined(PADDLE_USE_ATLAS) || defined(PADDLE_USE_VECLIB) #if defined(PADDLE_USE_ATLAS) || defined(PADDLE_USE_VECLIB)
extern "C" { extern "C" {
#include <cblas.h> #include <cblas.h>
......
...@@ -169,7 +169,7 @@ void TensorCheck(AssertEq compare, ...@@ -169,7 +169,7 @@ void TensorCheck(AssertEq compare,
count++; count++;
} }
} }
EXPECT_EQ(count, 0) << "There are " << count << " different element."; EXPECT_EQ(count, 0) << "There are " << count << " different elements.";
} }
template <typename AssertEq, typename Tensor1, typename Tensor2> template <typename AssertEq, typename Tensor1, typename Tensor2>
......
...@@ -62,6 +62,11 @@ function(op_library TARGET) ...@@ -62,6 +62,11 @@ function(op_library TARGET)
file(APPEND ${pybind_file} "USE_OP(pool2d);\n") file(APPEND ${pybind_file} "USE_OP(pool2d);\n")
endif() endif()
if ("${TARGET}" STREQUAL "compare_op")
set(pybind_flag 1)
file(APPEND ${pybind_file} "USE_OP(less_than);\nUSE_OP(equal);\n")
endif()
# pool_with_index_op contains several operators # pool_with_index_op contains several operators
if ("${TARGET}" STREQUAL "pool_with_index_op") if ("${TARGET}" STREQUAL "pool_with_index_op")
set(pybind_flag 1) set(pybind_flag 1)
...@@ -165,6 +170,8 @@ set(DEPS_OPS ...@@ -165,6 +170,8 @@ set(DEPS_OPS
sequence_conv_op sequence_conv_op
sequence_pool_op sequence_pool_op
lod_rank_table_op lod_rank_table_op
lod_tensor_to_array_op
array_to_lod_tensor_op
lstm_op lstm_op
tensor_array_read_write_op tensor_array_read_write_op
gru_op) gru_op)
...@@ -177,6 +184,8 @@ op_library(sum_op DEPS net_op selected_rows_functor) ...@@ -177,6 +184,8 @@ op_library(sum_op DEPS net_op selected_rows_functor)
op_library(pool_op DEPS pooling) op_library(pool_op DEPS pooling)
op_library(pool_with_index_op DEPS pooling) op_library(pool_with_index_op DEPS pooling)
op_library(lod_rank_table_op SRCS lod_rank_table_op.cc DEPS lod_rank_table) op_library(lod_rank_table_op SRCS lod_rank_table_op.cc DEPS lod_rank_table)
op_library(lod_tensor_to_array_op SRCS lod_tensor_to_array_op.cc DEPS lod_rank_table_op)
op_library(array_to_lod_tensor_op SRCS array_to_lod_tensor_op.cc DEPS lod_rank_table_op)
op_library(tensor_array_read_write_op SRCS tensor_array_read_write_op.cc) op_library(tensor_array_read_write_op SRCS tensor_array_read_write_op.cc)
if(WITH_GPU) if(WITH_GPU)
op_library(nccl_op DEPS nccl_common) op_library(nccl_op DEPS nccl_common)
...@@ -186,8 +195,13 @@ op_library(sequence_pool_op DEPS sequence_pooling) ...@@ -186,8 +195,13 @@ op_library(sequence_pool_op DEPS sequence_pooling)
op_library(lstm_op DEPS sequence2batch lstm_compute) op_library(lstm_op DEPS sequence2batch lstm_compute)
op_library(conv_transpose_op DEPS vol2col) op_library(conv_transpose_op DEPS vol2col)
op_library(gru_op DEPS sequence2batch gru_compute) op_library(gru_op DEPS sequence2batch gru_compute)
op_library(dynamic_recurrent_op SRCS dynamic_recurrent_op.cc rnn/recurrent_op_utils.cc if(WITH_TESTING)
DEPS net_op tensor_array) op_library(dynamic_recurrent_op SRCS dynamic_recurrent_op.cc rnn/recurrent_op_utils.cc
DEPS net_op tensor_array gtest)
else()
op_library(dynamic_recurrent_op SRCS dynamic_recurrent_op.cc rnn/recurrent_op_utils.cc
DEPS net_op tensor_array)
endif()
op_library(recurrent_op SRCS recurrent_op.cc DEPS executor) op_library(recurrent_op SRCS recurrent_op.cc DEPS executor)
list(REMOVE_ITEM GENERAL_OPS ${DEPS_OPS}) list(REMOVE_ITEM GENERAL_OPS ${DEPS_OPS})
......
...@@ -47,10 +47,11 @@ class AccuracyOp : public framework::OperatorWithKernel { ...@@ -47,10 +47,11 @@ class AccuracyOp : public framework::OperatorWithKernel {
} }
protected: protected:
// IndicateDataType framework::OpKernelType GetKernelType(
framework::DataType IndicateDataType(
const framework::ExecutionContext &ctx) const override { const framework::ExecutionContext &ctx) const override {
return framework::ToDataType(ctx.Input<Tensor>("Out")->type()); return framework::OpKernelType(
framework::ToDataType(ctx.Input<Tensor>("Out")->type()),
ctx.device_context());
} }
}; };
......
...@@ -65,7 +65,7 @@ class AccuracyOpCUDAKernel : public framework::OpKernel<T> { ...@@ -65,7 +65,7 @@ class AccuracyOpCUDAKernel : public framework::OpKernel<T> {
size_t num_samples = inference->dims()[0]; size_t num_samples = inference->dims()[0];
size_t infer_width = inference->dims()[1]; size_t infer_width = inference->dims()[1];
cudaMemset((void**)&accuracy_data, 0, sizeof(float)); PADDLE_ENFORCE(cudaMemset(accuracy_data, 0, sizeof(float)));
if (num_samples == 0) { if (num_samples == 0) {
return; return;
......
...@@ -14,7 +14,6 @@ limitations under the License. */ ...@@ -14,7 +14,6 @@ limitations under the License. */
#pragma once #pragma once
#include <algorithm> #include <algorithm>
#include "paddle/framework/eigen.h"
#include "paddle/framework/op_registry.h" #include "paddle/framework/op_registry.h"
namespace paddle { namespace paddle {
...@@ -22,18 +21,6 @@ namespace operators { ...@@ -22,18 +21,6 @@ namespace operators {
using Tensor = framework::Tensor; using Tensor = framework::Tensor;
template <typename T, int MajorType = Eigen::RowMajor,
typename IndexType = Eigen::DenseIndex>
using EigenMatrix = framework::EigenMatrix<T, MajorType, IndexType>;
template <typename T, int MajorType = Eigen::RowMajor,
typename IndexType = Eigen::DenseIndex>
using EigenVector = framework::EigenVector<T, MajorType, IndexType>;
template <typename T, int MajorType = Eigen::RowMajor,
typename IndexType = Eigen::DenseIndex>
using EigenScalar = framework::EigenScalar<T, MajorType, IndexType>;
template <typename Place, typename T> template <typename Place, typename T>
class AccuracyKernel : public framework::OpKernel<T> { class AccuracyKernel : public framework::OpKernel<T> {
public: public:
......
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "paddle/framework/lod_tensor_array.h"
#include "paddle/framework/op_registry.h"
namespace paddle {
namespace operators {
class ArrayOp : public framework::OperatorBase {
public:
ArrayOp(const std::string &type, const framework::VariableNameMap &inputs,
const framework::VariableNameMap &outputs,
const framework::AttributeMap &attrs)
: OperatorBase(type, inputs, outputs, attrs) {}
protected:
size_t GetOffset(const framework::Scope &scope,
const platform::DeviceContext &dev_ctx) const {
auto *i = scope.FindVar(Input("I"));
PADDLE_ENFORCE(i != nullptr, "I must be set");
auto &i_tensor = i->Get<framework::LoDTensor>();
PADDLE_ENFORCE_EQ(i_tensor.numel(), 1);
size_t offset;
if (platform::is_gpu_place(i_tensor.place())) {
// FIXME: Avoid copy from GPU to CPU
framework::Tensor t;
t.CopyFrom(i_tensor, platform::CPUPlace(), dev_ctx);
dev_ctx.Wait();
offset = static_cast<size_t>(*t.data<int64_t>());
} else {
offset = static_cast<size_t>(*i_tensor.data<int64_t>());
}
return offset;
}
};
} // namespace operators
} // namespace paddle
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <numeric>
#include "paddle/framework/lod_rank_table.h"
#include "paddle/framework/lod_tensor_array.h"
#include "paddle/framework/op_registry.h"
#include "paddle/memory/memcpy.h"
namespace paddle {
namespace operators {
using LoD = framework::LoD;
class ArrayToLoDTensorOp : public framework::OperatorBase {
public:
ArrayToLoDTensorOp(const std::string &type,
const framework::VariableNameMap &inputs,
const framework::VariableNameMap &outputs,
const framework::AttributeMap &attrs)
: OperatorBase(type, inputs, outputs, attrs) {}
void Run(const framework::Scope &scope,
const platform::DeviceContext &dev_ctx) const override {
auto &x = scope.FindVar(Input("X"))->Get<framework::LoDTensorArray>();
auto &rank_table =
scope.FindVar(Input("RankTable"))->Get<framework::LoDRankTable>();
auto *out =
scope.FindVar(Output("Out"))->GetMutable<framework::LoDTensor>();
// Check dims, place and data type of input's elements and infer output's
// dim
PADDLE_ENFORCE(!x.empty(), "There's no element in the input array.");
int rank = x[0].dims().size();
platform::Place place = x[0].place();
std::type_index data_type = x[0].type();
framework::DDim ins_dims = framework::slice_ddim(x[0].dims(), 1, rank);
int64_t batch_size = x[0].dims()[0];
for (size_t i = 1; i < x.size(); ++i) {
PADDLE_ENFORCE_EQ(framework::slice_ddim(x[i].dims(), 1, rank), ins_dims,
"The dimension of the %zu'th element in LoDTensorArray "
"differs from previous ones.",
i);
PADDLE_ENFORCE(platform::places_are_same_class(x[i].place(), place),
"The place class of the %zu'th element in LoDTensorArray "
"differs from previous ones.",
i);
PADDLE_ENFORCE(x[i].type() == data_type,
"The date type of the %zu'th element in LoDTensorArray "
"differs from previous ones.",
i);
batch_size += x[i].dims()[0];
}
auto ins_dim_vec = framework::vectorize(ins_dims);
ins_dim_vec.insert(ins_dim_vec.begin(), batch_size);
framework::DDim out_dims = framework::make_ddim(ins_dim_vec);
out->Resize(out_dims);
out->mutable_data(place, data_type);
auto &table_items = rank_table.items();
std::vector<size_t> table_item_idx(table_items.size());
// table_item_idx = range(table_items_idx.size())
std::iota(table_item_idx.begin(), table_item_idx.end(), 0);
std::sort(table_item_idx.begin(), table_item_idx.end(),
[&](size_t a, size_t b) {
return table_items[a].index < table_items[b].index;
});
// Build LoDTensor `out`
framework::LoD *out_lod = out->mutable_lod();
out_lod->clear();
size_t out_offset = 0;
auto prefix_lod = rank_table.coarse_lod();
prefix_lod.emplace_back();
auto &cur_level_lod = prefix_lod.back();
cur_level_lod.push_back(0);
for (size_t idx : table_item_idx) {
cur_level_lod.push_back(cur_level_lod.back() + table_items[idx].length);
for (size_t x_idx = 0; x_idx < table_items[idx].length; ++x_idx) {
auto lod_and_offset = framework::GetSubLoDAndAbsoluteOffset(
x[x_idx].lod(), idx, idx + 1, 0);
auto &lod_length = lod_and_offset.first;
framework::AppendLoD(out_lod, lod_length);
size_t start_offset = lod_and_offset.second.first;
size_t end_offset = lod_and_offset.second.second;
VLOG(10) << "idx=" << idx << " x_idx=" << x_idx << " ["
<< ", " << end_offset << "]";
// Copy data
PADDLE_ENFORCE_GE(end_offset, start_offset);
size_t len = end_offset - start_offset;
if (len == 0) {
continue;
}
out->Slice(out_offset, out_offset + len)
.CopyFrom(x[x_idx].Slice(start_offset, end_offset), place, dev_ctx);
out_offset += len;
}
}
out_lod->insert(out_lod->begin(), prefix_lod.begin(), prefix_lod.end());
}
};
class ArrayToLoDTensorOpProtoMaker : public framework::OpProtoAndCheckerMaker {
public:
ArrayToLoDTensorOpProtoMaker(framework::OpProto *proto,
framework::OpAttrChecker *op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X",
"(std::vector<LodTensor>) A vector of tensors that is going to "
"be casted to a big LoDTensor.");
AddInput("RankTable",
"(LoDRankTable) RankTable provides the coarse lod infomation to "
"build the output LoDTensor. See "
"'paddle/framework/lod_rank_table.h' for more details.");
AddOutput("Out", "(LoDTensor) The LoDTensor formed by input tensor array.");
AddComment(
R"DOC(This Op build a big LoDTensor from a std::vector<LoDTensor>
and a LoDRankTable. It is supposed to be used in getting dynamic RNN's
outputs back to a normal LoDTensor. The std::vector<LoDTensor>
would be the output of RNN Op and the LoDRankTable would be build
with RNN's input.)DOC");
}
};
class ArrayToLoDTensorInferShape : public framework::InferShapeBase {
public:
void operator()(framework::InferShapeContext *context) const override {
PADDLE_ENFORCE(context->HasInput("X"),
"ArrayToLoDTensorOp must has input X.");
PADDLE_ENFORCE(context->HasInput("RankTable"),
"ArrayToLoDTensorOp must has input RankTable.");
context->SetOutputDim("Out", context->GetInputDim("X"));
}
};
class ArrayToLoDTensorGradMaker : public framework::SingleGradOpDescMaker {
public:
using framework::SingleGradOpDescMaker::SingleGradOpDescMaker;
protected:
std::unique_ptr<framework::OpDescBind> Apply() const override {
auto *grad_op = new framework::OpDescBind();
grad_op->SetType("lod_tensor_to_array");
grad_op->SetInput("X", OutputGrad("Out"));
grad_op->SetInput("RankTable", Input("RankTable"));
grad_op->SetOutput("Out", InputGrad("X"));
grad_op->SetAttrMap(Attrs());
return std::unique_ptr<framework::OpDescBind>(grad_op);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OPERATOR(array_to_lod_tensor, ops::ArrayToLoDTensorOp,
ops::ArrayToLoDTensorOpProtoMaker,
ops::ArrayToLoDTensorInferShape,
ops::ArrayToLoDTensorGradMaker);
...@@ -39,10 +39,11 @@ class AucOp : public framework::OperatorWithKernel { ...@@ -39,10 +39,11 @@ class AucOp : public framework::OperatorWithKernel {
} }
protected: protected:
// IndicateDataType framework::OpKernelType GetKernelType(
framework::DataType IndicateDataType(
const framework::ExecutionContext &ctx) const override { const framework::ExecutionContext &ctx) const override {
return framework::ToDataType(ctx.Input<Tensor>("Out")->type()); return framework::OpKernelType(
framework::ToDataType(ctx.Input<Tensor>("Out")->type()),
ctx.device_context());
} }
}; };
......
...@@ -19,9 +19,6 @@ namespace operators { ...@@ -19,9 +19,6 @@ namespace operators {
using Tensor = framework::Tensor; using Tensor = framework::Tensor;
using LoDTensor = framework::LoDTensor; using LoDTensor = framework::LoDTensor;
template <typename T, int MajorType = Eigen::RowMajor,
typename IndexType = Eigen::DenseIndex>
using EigenMatrix = framework::EigenMatrix<T, MajorType, IndexType>;
template <typename T> template <typename T>
using EigenArrayMap = using EigenArrayMap =
...@@ -303,7 +300,8 @@ class BatchNormGradOp : public framework::OperatorWithKernel { ...@@ -303,7 +300,8 @@ class BatchNormGradOp : public framework::OperatorWithKernel {
ctx->SetOutputDim(framework::GradVarName("Bias"), {C}); ctx->SetOutputDim(framework::GradVarName("Bias"), {C});
} }
framework::DataType IndicateDataType( protected:
framework::OpKernelType GetKernelType(
const framework::ExecutionContext &ctx) const override { const framework::ExecutionContext &ctx) const override {
const auto *var = ctx.InputVar(framework::GradVarName("Y")); const auto *var = ctx.InputVar(framework::GradVarName("Y"));
if (var == nullptr) { if (var == nullptr) {
...@@ -318,7 +316,8 @@ class BatchNormGradOp : public framework::OperatorWithKernel { ...@@ -318,7 +316,8 @@ class BatchNormGradOp : public framework::OperatorWithKernel {
if (t == nullptr) { if (t == nullptr) {
PADDLE_THROW("can't find Y@GRAD"); PADDLE_THROW("can't find Y@GRAD");
} }
return framework::ToDataType(t->type()); return framework::OpKernelType(framework::ToDataType(t->type()),
ctx.device_context());
} }
}; };
......
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/operators/chunk_eval_op.h"
namespace paddle {
namespace operators {
class ChunkEvalOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext *ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("Inference"),
"Input(Inference) of ChunkEvalOp should not be null.");
PADDLE_ENFORCE(ctx->HasInput("Label"),
"Input(Label) of ChunkEvalOp should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("Precision"),
"Output(Precision) of ChunkEvalOp should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("Recall"),
"Output(Recall) of ChunkEvalOp should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("F1-Score"),
"Output(F1-Score) of ChunkEvalOp should not be null.");
auto inference_dim = ctx->GetInputDim("Inference");
auto label_dim = ctx->GetInputDim("Label");
PADDLE_ENFORCE(inference_dim == label_dim,
"Inference's shape must be the same as Label's shape.");
ctx->SetOutputDim("Precision", {1});
ctx->SetOutputDim("Recall", {1});
ctx->SetOutputDim("F1-Score", {1});
}
protected:
framework::OpKernelType GetKernelType(
const framework::ExecutionContext &ctx) const override {
return framework::OpKernelType(framework::DataType::FP32,
ctx.device_context());
}
};
class ChunkEvalOpMaker : public framework::OpProtoAndCheckerMaker {
public:
ChunkEvalOpMaker(framework::OpProto *proto,
framework::OpAttrChecker *op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("Inference",
"(Tensor, default: Tensor<int>). Predictions from the network.");
AddInput("Label",
"(Tensor, default: Tensor<int>). The true tag sequences.");
AddOutput("Precision",
"(float). The evaluated precision (called positive predictive "
"value) of chunks on the given mini-batch.");
AddOutput("Recall",
"(float). The evaluated recall (true positive rate or "
"sensitivity) of chunks on the given mini-batch.");
AddOutput("F1-Score",
"(float). The evaluated F1-Score on the given mini-batch.");
AddAttr<int>("num_chunk_types",
"(int). The number of chunk type. See below for details.");
AddAttr<std::string>(
"chunk_scheme",
"(string, default IOB). The labeling scheme indicating "
"how to encode the chunks. Must be IOB, IOE, IOBES or plain. See below "
"for details.")
.SetDefault("IOB");
AddAttr<std::vector<int>>("excluded_chunk_types",
"(list<int>) A list including chunk type ids "
"indicating chunk types that are not counted. "
"See below for details.")
.SetDefault(std::vector<int>{});
AddComment(R"DOC(
For some basics of chunking, please refer to
‘Chunking with Support Vector Mechines <https://aclanthology.info/pdf/N/N01/N01-1025.pdf>’.
CheckEvalOp computes the precision, recall, and F1-score of chunk detection,
and supports IOB, IOE, IOBES and IO (also known as plain) tagging schemes.
Here is a NER example of labeling for these tagging schemes:
Li Ming works at Agricultural Bank of China in Beijing.
IO: I-PER I-PER O O I-ORG I-ORG I-ORG I-ORG O I-LOC
IOB: B-PER I-PER O O B-ORG I-ORG I-ORG I-ORG O B-LOC
IOE: I-PER E-PER O O I-ORG I-ORG I-ORG E-ORG O E-LOC
IOBES: B-PER E-PER O O I-ORG I-ORG I-ORG E-ORG O S-LOC
There are three chunk types(named entity types) including PER(person), ORG(orgnazation)
and LOC(LOCATION), and we can see that the labels have the form <tag type>-<chunk type>.
Since the calculations actually use label ids rather than labels, extra attention
should be paid when mapping labels to ids to make CheckEvalOp work. The key point
is that the listed equations are satisfied by ids.
tag_type = label % num_tag_type
chunk_type = label / num_tag_type
where `num_tag_type` is the num of tag types in the tagging scheme, `num_chunk_type`
is the num of chunk types, and `tag_type` get its value from the following table.
Scheme Begin Inside End Single
plain 0 - - -
IOB 0 1 - -
IOE - 0 1 -
IOBES 0 1 2 3
Still use NER as example, assuming the tagging scheme is IOB while chunk types are ORG,
PER and LOC. To satisfy the above equations, the label map can be like this:
B-ORG 0
I-ORG 1
B-PER 2
I-PER 3
B-LOC 4
I-LOC 5
O 6
It’s not hard to verify the equations noting that the num of chunk types
is 3 and the num of tag types in IOB scheme is 2. For example, the label
id of I-LOC is 5, the tag type id of I-LOC is 1, and the chunk type id of
I-LOC is 2, which consistent with the results from the equations.
)DOC");
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_WITHOUT_GRADIENT(chunk_eval, ops::ChunkEvalOp,
ops::ChunkEvalOpMaker);
REGISTER_OP_CPU_KERNEL(chunk_eval,
ops::ChunkEvalKernel<paddle::platform::CPUPlace, float>);
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <set>
#include "paddle/framework/eigen.h"
#include "paddle/framework/op_registry.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
using LoDTensor = framework::LoDTensor;
template <typename Place, typename T>
class ChunkEvalKernel : public framework::OpKernel<T> {
public:
struct Segment {
int begin;
int end;
int type;
bool operator==(const Segment& y) const {
return begin == y.begin && end == y.end && type == y.type;
}
};
void GetSegments(const int* label, int length, std::vector<Segment>& segments,
int num_chunk_types, int num_tag_types, int other_chunk_type,
int tag_begin, int tag_inside, int tag_end,
int tag_single) const {
segments.clear();
segments.reserve(length);
int chunk_start = 0;
bool in_chunk = false;
int tag = -1;
int type = other_chunk_type;
for (int i = 0; i < length; ++i) {
int prev_tag = tag;
int prev_type = type;
PADDLE_ENFORCE_LE(label[i], num_chunk_types * num_tag_types);
tag = label[i] % num_tag_types;
type = label[i] / num_tag_types;
if (in_chunk && ChunkEnd(prev_tag, prev_type, tag, type, other_chunk_type,
tag_begin, tag_inside, tag_end, tag_single)) {
Segment segment{
chunk_start, // begin
i - 1, // end
prev_type,
};
segments.push_back(segment);
in_chunk = false;
}
if (ChunkBegin(prev_tag, prev_type, tag, type, other_chunk_type,
tag_begin, tag_inside, tag_end, tag_single)) {
chunk_start = i;
in_chunk = true;
}
}
if (in_chunk) {
Segment segment{
chunk_start, // begin
length - 1, // end
type,
};
segments.push_back(segment);
}
}
bool ChunkEnd(int prev_tag, int prev_type, int tag, int type,
int other_chunk_type, int tag_begin, int tag_inside,
int tag_end, int tag_single) const {
if (prev_type == other_chunk_type) return false;
if (type == other_chunk_type) return true;
if (type != prev_type) return true;
if (prev_tag == tag_begin) return tag == tag_begin || tag == tag_single;
if (prev_tag == tag_inside) return tag == tag_begin || tag == tag_single;
if (prev_tag == tag_end) return true;
if (prev_tag == tag_single) return true;
return false;
}
bool ChunkBegin(int prev_tag, int prev_type, int tag, int type,
int other_chunk_type, int tag_begin, int tag_inside,
int tag_end, int tag_single) const {
if (prev_type == other_chunk_type) return type != other_chunk_type;
if (type == other_chunk_type) return false;
if (type != prev_type) return true;
if (tag == tag_begin) return true;
if (tag == tag_inside) return prev_tag == tag_end || prev_tag == tag_single;
if (tag == tag_end) return prev_tag == tag_end || prev_tag == tag_single;
if (tag == tag_single) return true;
return false;
}
void Compute(const framework::ExecutionContext& context) const override {
// initialize to parse configurations
int num_chunk_types, num_tag_types;
int other_chunk_type;
int tag_begin, tag_inside, tag_end, tag_single;
std::vector<Segment> label_segments;
std::vector<Segment> output_segments;
std::set<int> excluded_chunk_types;
int64_t num_output_segments = 0;
int64_t num_label_segments = 0;
int64_t num_correct = 0;
if (context.Attr<std::string>("chunk_scheme") == "IOB") {
num_tag_types = 2;
tag_begin = 0;
tag_inside = 1;
tag_end = -1;
tag_single = -1;
} else if (context.Attr<std::string>("chunk_scheme") == "IOE") {
num_tag_types = 2;
tag_begin = -1;
tag_inside = 0;
tag_end = 1;
tag_single = -1;
} else if (context.Attr<std::string>("chunk_scheme") == "IOBES") {
num_tag_types = 4;
tag_begin = 0;
tag_inside = 1;
tag_end = 2;
tag_single = 3;
} else if (context.Attr<std::string>("chunk_scheme") == "plain") {
num_tag_types = 1;
tag_begin = -1;
tag_inside = -1;
tag_end = -1;
tag_single = -1;
} else {
PADDLE_THROW("Unknown chunk scheme.");
}
other_chunk_type = num_chunk_types = context.Attr<int>("num_chunk_types");
excluded_chunk_types.insert(
context.Attr<std::vector<int>>("excluded_chunk_types").begin(),
context.Attr<std::vector<int>>("excluded_chunk_types").end());
auto* inference = context.Input<LoDTensor>("Inference");
auto* label = context.Input<LoDTensor>("Label");
auto* precision = context.Output<Tensor>("Precision");
auto* recall = context.Output<Tensor>("Recall");
auto* f1 = context.Output<Tensor>("F1-Score");
const int* inference_data = inference->data<int>();
const int* label_data = label->data<int>();
T* precision_data = precision->mutable_data<T>(context.GetPlace());
T* racall_data = recall->mutable_data<T>(context.GetPlace());
T* f1_data = f1->mutable_data<T>(context.GetPlace());
auto lod = label->lod();
PADDLE_ENFORCE_EQ(lod.size(), 1UL, "Only support one level sequence now.");
PADDLE_ENFORCE(lod == inference->lod(),
"LoD must be same between Inference and Label.");
int num_sequences = lod[0].size() - 1;
for (int i = 0; i < num_sequences; ++i) {
int seq_length = lod[0][i + 1] - lod[0][i];
EvalOneSeq(inference_data + lod[0][i], label_data + lod[0][i], seq_length,
output_segments, label_segments, num_output_segments,
num_label_segments, num_correct, num_chunk_types,
num_tag_types, other_chunk_type, tag_begin, tag_inside,
tag_end, tag_single, excluded_chunk_types);
}
*precision_data = !num_output_segments ? 0 : static_cast<T>(num_correct) /
num_output_segments;
*racall_data = !num_label_segments ? 0 : static_cast<T>(num_correct) /
num_label_segments;
*f1_data = !num_correct ? 0 : 2 * (*precision_data) * (*racall_data) /
((*precision_data) + (*racall_data));
}
void EvalOneSeq(const int* output, const int* label, int length,
std::vector<Segment>& output_segments,
std::vector<Segment>& label_segments,
int64_t& num_output_segments, int64_t& num_label_segments,
int64_t& num_correct, int num_chunk_types, int num_tag_types,
int other_chunk_type, int tag_begin, int tag_inside,
int tag_end, int tag_single,
const std::set<int>& excluded_chunk_types) const {
GetSegments(output, length, output_segments, num_chunk_types, num_tag_types,
other_chunk_type, tag_begin, tag_inside, tag_end, tag_single);
GetSegments(label, length, label_segments, num_chunk_types, num_tag_types,
other_chunk_type, tag_begin, tag_inside, tag_end, tag_single);
size_t i = 0, j = 0;
while (i < output_segments.size() && j < label_segments.size()) {
if (output_segments[i] == label_segments[j] &&
excluded_chunk_types.count(output_segments[i].type) != 1) {
++num_correct;
}
if (output_segments[i].end < label_segments[j].end) {
++i;
} else if (output_segments[i].end > label_segments[j].end) {
++j;
} else {
++i;
++j;
}
}
for (auto& segment : label_segments) {
if (excluded_chunk_types.count(segment.type) != 1) ++num_label_segments;
}
for (auto& segment : output_segments) {
if (excluded_chunk_types.count(segment.type) != 1) ++num_output_segments;
}
}
};
} // namespace operators
} // namespace paddle
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/operators/clip_by_norm_op.h"
namespace paddle {
namespace operators {
class ClipByNormOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected:
void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"),
"Input(X) of ClipByNormOp should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("Out"),
"Output(Out) of ClipByNormOp should not be null.");
auto max_norm = ctx->Attrs().Get<float>("max_norm");
PADDLE_ENFORCE_GT(max_norm, 0, "max_norm should be greater than 0.");
auto x_dims = ctx->GetInputDim("X");
ctx->SetOutputDim("Out", x_dims);
ctx->ShareLoD("X", /*->*/ "Out");
}
};
class ClipByNormOpMaker : public framework::OpProtoAndCheckerMaker {
public:
ClipByNormOpMaker(framework::OpProto* proto,
framework::OpAttrChecker* op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X",
"(Tensor) The input of clip_by_norm op."
"The number of dimensions must be between [1, 9].");
AddOutput("Out",
"(Tensor) The output of clip_by_norm op with shape as input(X)");
AddAttr<float>("max_norm", "(float) The maximum norm value.");
AddComment(R"DOC(
ClipByNorm operator limits the L2 norm of the input 'X' within 'max_norm'.
If the L2 norm of 'X' is less than or equal to 'max_norm', 'Out' will be
the same as 'X'. If the L2 norm of 'X' is greater than 'max_norm', 'X' will
be linearly scaled to make the L2 norm of 'Out' equal to 'max_norm', as
shown in the following formula:
'Out' = 'max_norm' * 'X' / norm('X'),
where norm('X') represents the L2 norm of 'X'.
)DOC");
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_WITHOUT_GRADIENT(clip_by_norm, ops::ClipByNormOp,
ops::ClipByNormOpMaker);
REGISTER_OP_CPU_KERNEL(
clip_by_norm, ops::ClipByNormKernel<paddle::platform::CPUPlace, float>);
...@@ -12,11 +12,8 @@ ...@@ -12,11 +12,8 @@
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "paddle/operators/increment_op.h" #include "paddle/operators/clip_by_norm_op.h"
namespace ops = paddle::operators;
REGISTER_OP_GPU_KERNEL( REGISTER_OP_GPU_KERNEL(
increment, clip_by_norm, ops::ClipByNormKernel<paddle::platform::GPUPlace, float>);
paddle::operators::IncrementKernel<paddle::platform::GPUPlace, float>,
paddle::operators::IncrementKernel<paddle::platform::GPUPlace, double>,
paddle::operators::IncrementKernel<paddle::platform::GPUPlace, int>,
paddle::operators::IncrementKernel<paddle::platform::GPUPlace, int64_t>);
...@@ -16,23 +16,35 @@ ...@@ -16,23 +16,35 @@
#include "paddle/framework/eigen.h" #include "paddle/framework/eigen.h"
#include "paddle/framework/op_registry.h" #include "paddle/framework/op_registry.h"
#include "paddle/platform/transform.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
template <typename Place, typename T>
class IncrementKernel : public framework::OpKernel<T> {
public:
virtual void Compute(const framework::ExecutionContext& context) const {
auto* tensor = context.Output<framework::Tensor>("Out");
auto* in = context.Input<framework::Tensor>("X");
tensor->mutable_data<T>(in->place());
auto step = static_cast<T>(context.Attr<float>("step")); using Tensor = framework::Tensor;
template <typename T, int MajorType = Eigen::RowMajor,
typename IndexType = Eigen::DenseIndex>
using EigenVector = framework::EigenVector<T, MajorType, IndexType>;
auto eigen_out = framework::EigenVector<T>::Flatten(*tensor); template <typename Place, typename T>
auto eigen_in = framework::EigenVector<T>::Flatten(*in); class ClipByNormKernel : public framework::OpKernel<T> {
auto& place = context.GetEigenDevice<Place>(); public:
eigen_out.device(place) = eigen_in + step; void Compute(const framework::ExecutionContext& context) const override {
auto max_norm = context.Attr<T>("max_norm");
auto* input = context.Input<Tensor>("X");
auto* output = context.Output<Tensor>("Out");
output->mutable_data<T>(context.GetPlace());
auto x = EigenVector<T>::Flatten(*input);
auto out = EigenVector<T>::Flatten(*output);
auto x_norm = x.square().sum().sqrt();
auto place = context.GetEigenDevice<Place>();
auto temp = (x_norm <= max_norm).template cast<T>().eval();
auto scaling = temp + (static_cast<T>(1) - temp) * max_norm / x_norm;
Eigen::array<int, 1> one_dim{{1}};
Eigen::DSizes<int, 1> m_dsize(input->numel());
out.device(place) = x * scaling.reshape(one_dim).broadcast(m_dsize);
} }
}; };
......
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/operators/compare_op.h"
#include "paddle/framework/op_registry.h"
namespace paddle {
namespace operators {
template <typename OpComment>
class CompareOpProtoMaker : public framework::OpProtoAndCheckerMaker {
public:
CompareOpProtoMaker(framework::OpProto *proto,
framework::OpAttrChecker *op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) {
OpComment comment;
AddInput("X",
string::Sprintf("(LoDTensor) the left hand operand of %s operator",
comment.type));
AddInput("Y", string::Sprintf(
"(LoDTensor) the right hand operand of %s operator",
comment.type));
AddOutput("Out", string::Sprintf(
"(LoDTensor) n-dim bool tensor. Each element is %s",
comment.equation));
AddComment(string::Sprintf(R"DOC(%s Operator
It operates element-wise on X and Y, and returns the Out. Each of them is a
N-dim tensor. X and Y could be any type. The each element of the Out tensor is
calculated by %s
)DOC",
comment.type, comment.equation));
}
};
template <typename OpComment>
class CompareOpInferShape : public framework::InferShapeBase {
public:
void operator()(framework::InferShapeContext *context) const override {
OpComment comment;
PADDLE_ENFORCE(context->HasInput("X"), "%s operator must has input X",
comment.type);
PADDLE_ENFORCE(context->HasInput("Y"), "%s operator must has input Y",
comment.type);
auto dim_x = context->GetInputDim("X");
auto dim_y = context->GetInputDim("Y");
PADDLE_ENFORCE_EQ(framework::product(dim_x), framework::product(dim_y),
"The number of elements in X and Y should be same");
context->SetOutputDim("Out", context->GetInputDim("X"));
context->ShareLoD("X", "Out");
}
};
class CompareOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected:
framework::OpKernelType GetKernelType(
const framework::ExecutionContext &ctx) const override {
framework::OpKernelType kt = OperatorWithKernel::GetKernelType(ctx);
// CompareOp kernel's device type is decided by input tensor place
kt.place_ = ctx.Input<framework::LoDTensor>("X")->place();
return kt;
}
};
} // namespace operators
} // namespace paddle
#define REGISTER_LOGICAL_OP(op_type, _equation) \
struct _##op_type##Comment { \
static char type[]; \
static char equation[]; \
}; \
char _##op_type##Comment::type[]{#op_type}; \
char _##op_type##Comment::equation[]{_equation}; \
REGISTER_OPERATOR( \
op_type, ::paddle::operators::CompareOp, \
::paddle::operators::CompareOpProtoMaker<_##op_type##Comment>, \
::paddle::operators::CompareOpInferShape<_##op_type##Comment>, \
::paddle::framework::EmptyGradOpMaker);
REGISTER_LOGICAL_OP(less_than, "Out = X < Y");
REGISTER_LOGICAL_KERNEL(less_than, CPU, paddle::operators::LessThanFunctor);
REGISTER_LOGICAL_OP(equal, "Out = X == Y");
REGISTER_LOGICAL_KERNEL(equal, CPU, paddle::operators::EqualFunctor);
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/operators/compare_op.h"
REGISTER_LOGICAL_KERNEL(less_than, GPU, paddle::operators::LessThanFunctor);
REGISTER_LOGICAL_KERNEL(equal, GPU, paddle::operators::EqualFunctor);
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <math.h>
#include <type_traits>
#include "paddle/framework/op_registry.h"
#include "paddle/platform/transform.h"
namespace paddle {
namespace operators {
template <typename T>
struct LessThanFunctor {
using ELEM_TYPE = T;
HOSTDEVICE bool operator()(const T& a, const T& b) const { return a < b; }
};
template <typename T>
struct EqualFunctor {
using ELEM_TYPE = T;
HOSTDEVICE bool operator()(const T& a, const T& b) const {
if (std::is_floating_point<T>::value) {
// This branch will be optimized while compiling if T is integer. It is
// safe to cast a and b to double.
return fabs(static_cast<double>(a - b)) < 1e-8;
} else {
return (a == b);
}
}
};
template <typename Place, typename Functor>
class CompareOpKernel
: public framework::OpKernel<typename Functor::ELEM_TYPE> {
public:
void Compute(const framework::ExecutionContext& context) const override {
using T = typename Functor::ELEM_TYPE;
auto* x = context.Input<framework::Tensor>("X");
auto* y = context.Input<framework::Tensor>("Y");
auto* out = context.Output<framework::Tensor>("Out");
Functor binary_func;
platform::Transform<Place> trans;
trans(context.device_context(), x->data<T>(), x->data<T>() + x->numel(),
y->data<T>(), out->mutable_data<bool>(context.GetPlace()),
binary_func);
}
};
} // namespace operators
} // namespace paddle
#define REGISTER_LOGICAL_KERNEL(op_type, dev, functor) \
REGISTER_OP_##dev##_KERNEL( \
op_type, \
::paddle::operators::CompareOpKernel<::paddle::platform::dev##Place, \
functor<int>>, \
::paddle::operators::CompareOpKernel<::paddle::platform::dev##Place, \
functor<int64_t>>, \
::paddle::operators::CompareOpKernel<::paddle::platform::dev##Place, \
functor<float>>, \
::paddle::operators::CompareOpKernel<::paddle::platform::dev##Place, \
functor<double>>);
...@@ -120,9 +120,11 @@ class CRFDecodingOp : public framework::OperatorWithKernel { ...@@ -120,9 +120,11 @@ class CRFDecodingOp : public framework::OperatorWithKernel {
} }
protected: protected:
framework::DataType IndicateDataType( framework::OpKernelType GetKernelType(
const framework::ExecutionContext& ctx) const override { const framework::ExecutionContext& ctx) const override {
return framework::ToDataType(ctx.Input<LoDTensor>("Emission")->type()); return framework::OpKernelType(
framework::ToDataType(ctx.Input<LoDTensor>("Emission")->type()),
ctx.device_context());
} }
}; };
} // namespace operators } // namespace operators
......
...@@ -51,9 +51,11 @@ class CrossEntropyOp : public framework::OperatorWithKernel { ...@@ -51,9 +51,11 @@ class CrossEntropyOp : public framework::OperatorWithKernel {
protected: protected:
// Explicitly set that the data type of computation kernel of cross_entropy // Explicitly set that the data type of computation kernel of cross_entropy
// is determined by its input "X". // is determined by its input "X".
framework::DataType IndicateDataType( framework::OpKernelType GetKernelType(
const framework::ExecutionContext& ctx) const override { const framework::ExecutionContext& ctx) const override {
return framework::ToDataType(ctx.Input<Tensor>("X")->type()); return framework::OpKernelType(
framework::ToDataType(ctx.Input<Tensor>("X")->type()),
ctx.device_context());
} }
}; };
...@@ -98,9 +100,11 @@ class CrossEntropyGradientOp : public framework::OperatorWithKernel { ...@@ -98,9 +100,11 @@ class CrossEntropyGradientOp : public framework::OperatorWithKernel {
protected: protected:
// Explicitly set that the data type of computation kernel of cross_entropy // Explicitly set that the data type of computation kernel of cross_entropy
// is determined by its input "X". // is determined by its input "X".
framework::DataType IndicateDataType( framework::OpKernelType GetKernelType(
const framework::ExecutionContext& ctx) const override { const framework::ExecutionContext& ctx) const override {
return framework::ToDataType(ctx.Input<Tensor>("X")->type()); return framework::OpKernelType(
framework::ToDataType(ctx.Input<Tensor>("X")->type()),
ctx.device_context());
} }
}; };
......
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/operators/expand_op.h"
namespace paddle {
namespace operators {
using framework::Tensor;
class ExpandOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected:
void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("Out"), "Output(Out) should not be null.");
std::vector<int> expand_times =
ctx->Attrs().Get<std::vector<int>>("expand_times");
auto x_dims = ctx->GetInputDim("X");
PADDLE_ENFORCE_EQ(static_cast<size_t>(x_dims.size()), expand_times.size(),
"The number of Attr(expand_times)'s value must be equal "
"to the rank of Input(X).");
PADDLE_ENFORCE_LE(x_dims.size(), 6,
"The rank of Input(X) must not be greater than 6.");
std::vector<int64_t> out_shape(x_dims.size());
for (size_t i = 0; i < expand_times.size(); ++i) {
PADDLE_ENFORCE_GE(expand_times[i], 1,
"Each value of Attr(expand_times) should not be "
"less than 1.");
out_shape[i] = x_dims[i] * expand_times[i];
}
ctx->SetOutputDim("Out", framework::make_ddim(out_shape));
if (out_shape[0] == x_dims[0]) {
ctx->ShareLoD("X", "Out");
}
}
};
class ExpandOpMaker : public framework::OpProtoAndCheckerMaker {
public:
ExpandOpMaker(framework::OpProto* proto, framework::OpAttrChecker* op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X",
"(Tensor, default Tensor<float>) A tensor with rank in [1, 6]."
"X is the input tensor to be expanded.");
AddOutput("Out",
"(Tensor, default Tensor<float>) A tensor with rank in [1, 6]."
"The rank of Output(Out) is same as Input(X) except that each "
"dimension size of Output(Out) is equal to corresponding "
"dimension size of Input(X) multiplying corresponding value of "
"Attr(expand_times).");
AddAttr<std::vector<int>>("expand_times",
"Expand times number for each dimension.");
AddComment(R"DOC(
Expand operator tiles the input by given times number. You should set times
number for each dimension by providing attribute 'expand_times'. The rank of X
should be in [1, 6]. Please notice that size of 'expand_times' must be same with
X's rank. Following is a using case:
Input(X) is a 3-D tensor with shape [2, 3, 1]:
[
[[1], [2], [3]],
[[4], [5], [6]]
]
Attr(expand_times): [1, 2, 2]
Output(Out) is a 3-D tensor with shape [2, 6, 2]:
[
[[1, 1], [2, 2], [3, 3], [1, 1], [2, 2], [3, 3]],
[[4, 4], [5, 5], [6, 6], [4, 4], [5, 5], [6, 6]]
]
)DOC");
}
};
class ExpandGradOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected:
void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null.");
PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")),
"Input(Out@GRAD) should not be null.");
auto x_dims = ctx->GetInputDim("X");
std::vector<int> expand_times =
ctx->Attrs().Get<std::vector<int>>("expand_times");
auto out_dims = ctx->GetInputDim(framework::GradVarName("Out"));
for (size_t i = 0; i < expand_times.size(); ++i) {
PADDLE_ENFORCE_EQ(x_dims[i] * expand_times[i], out_dims[i],
"Each dimension size of Input(Out@GRAD) should be "
"equal to multiplication of crroresponding dimension "
"size of Input(X) and Attr(expand_times) value.");
}
auto x_grad_name = framework::GradVarName("X");
if (ctx->HasOutput(x_grad_name)) {
ctx->SetOutputDim(x_grad_name, x_dims);
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP(expand, ops::ExpandOp, ops::ExpandOpMaker, expand_grad,
ops::ExpandGradOp);
REGISTER_OP_CPU_KERNEL(expand,
ops::ExpandKernel<paddle::platform::CPUPlace, float>);
REGISTER_OP_CPU_KERNEL(
expand_grad, ops::ExpandGradKernel<paddle::platform::CPUPlace, float>);
...@@ -13,12 +13,11 @@ ...@@ -13,12 +13,11 @@
limitations under the License. */ limitations under the License. */
#define EIGEN_USE_GPU #define EIGEN_USE_GPU
#include "paddle/framework/op_registry.h"
#include "paddle/operators/fill_constant_op.h" #include "paddle/operators/expand_op.h"
namespace ops = paddle::operators; namespace ops = paddle::operators;
REGISTER_OP_GPU_KERNEL(expand,
ops::ExpandKernel<paddle::platform::GPUPlace, float>);
REGISTER_OP_GPU_KERNEL( REGISTER_OP_GPU_KERNEL(
fill_constant, ops::FillConstantOpKernel<paddle::platform::GPUPlace, float>, expand_grad, ops::ExpandGradKernel<paddle::platform::GPUPlace, float>);
ops::FillConstantOpKernel<paddle::platform::GPUPlace, double>,
ops::FillConstantOpKernel<paddle::platform::GPUPlace, int>,
ops::FillConstantOpKernel<paddle::platform::GPUPlace, int64_t>);
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
You may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <boost/preprocessor/arithmetic/div.hpp>
#include <boost/preprocessor/arithmetic/mod.hpp>
#include <boost/preprocessor/comparison/greater.hpp>
#include <boost/preprocessor/comparison/greater_equal.hpp>
#include <boost/preprocessor/control/if.hpp>
#include <boost/preprocessor/repetition/repeat.hpp>
#include <iostream>
#include "paddle/framework/eigen.h"
#include "paddle/framework/op_registry.h"
#include "paddle/framework/operator.h"
#define MAX_RANK_SUPPORTED 6
#define EXPAND_TEMPLATE(z, n, data) \
case n + 1: { \
Expand<n + 1>(context); \
break; \
}
#define REP_EXPAND_TEMPLATE(n) BOOST_PP_REPEAT(n, EXPAND_TEMPLATE, ~)
#define COND(n) \
BOOST_PP_GREATER_EQUAL(BOOST_PP_DIV(n, MAX_RANK_SUPPORTED), \
BOOST_PP_MOD(n, MAX_RANK_SUPPORTED))
#define EXPAND_GRAD_CASE(n) \
case n: { \
ExpandBackward<n>(context, reshape_dims_vec, reduce_dims_vec); \
break; \
}
#define EXPAND_GRAD_TEMPLATE(z, n, data) \
BOOST_PP_IF(COND(n), EXPAND_GRAD_CASE(n), )
#define REP_EXPAND_GRAD_TEMPLATE(n) BOOST_PP_REPEAT(n, EXPAND_GRAD_TEMPLATE, ~)
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
template <typename T, int MajorType = Eigen::RowMajor,
typename IndexType = Eigen::DenseIndex>
using EigenVector = framework::EigenVector<T, MajorType, IndexType>;
template <typename T, size_t D, int MajorType = Eigen::RowMajor,
typename IndexType = Eigen::DenseIndex>
using EigenTensor = framework::EigenTensor<T, D, MajorType, IndexType>;
template <typename Place, typename T>
class ExpandKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto rank = context.Input<Tensor>("X")->dims().size();
switch (rank) {
REP_EXPAND_TEMPLATE(MAX_RANK_SUPPORTED)
default:
PADDLE_ENFORCE(false,
"Only support tensor with rank being between 1 and 6.");
}
}
protected:
template <int Rank>
void Expand(const framework::ExecutionContext& context) const {
auto* in0 = context.Input<Tensor>("X");
auto& expand_times = context.Attr<std::vector<int>>("expand_times");
auto* out0 = context.Output<Tensor>("Out");
Eigen::DSizes<int, Rank> bcast_dims;
auto x_dims = in0->dims();
for (size_t i = 0; i < expand_times.size(); ++i) {
bcast_dims[i] = expand_times[i];
}
auto x = EigenTensor<T, Rank>::From(*in0);
out0->mutable_data<T>(context.GetPlace());
auto y = EigenTensor<T, Rank>::From(*out0);
auto place = context.GetEigenDevice<Place>();
y.device(place) = x.broadcast(bcast_dims);
}
};
template <typename Place, typename T>
class ExpandGradKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto* in0 = context.Input<Tensor>("X");
auto& expand_times = context.Attr<std::vector<int>>("expand_times");
auto x_dims = in0->dims();
// 1. reshape_dims_vec is the broadcast parameter. For each dimension i,
// if expand_times[i] > 1 and x_dims[i] > 1, i will be splitted to two
// dimensions [expand_times[i], x_dims[i]].
// 2. reduce_dims_vec is the dimension parameter to compute gradients. For
// each dimension expanded, the gradients should be summed to original
// size.
std::vector<int> reshape_dims_vec;
std::vector<int> reduce_dims_vec;
for (size_t i = 0; i < expand_times.size(); ++i) {
if (expand_times[i] == 1) {
reshape_dims_vec.push_back(x_dims[i]);
} else {
if (x_dims[i] == 1) {
reduce_dims_vec.push_back(reshape_dims_vec.size());
reshape_dims_vec.push_back(expand_times[i]);
} else {
reduce_dims_vec.push_back(reshape_dims_vec.size());
reshape_dims_vec.push_back(expand_times[i]);
reshape_dims_vec.push_back(x_dims[i]);
}
}
}
int dims = reshape_dims_vec.size() * MAX_RANK_SUPPORTED +
reduce_dims_vec.size() - MAX_RANK_SUPPORTED - 1;
// no need reduce, just copy
if (reduce_dims_vec.size() == 0) {
auto* in0 = context.Input<Tensor>(framework::GradVarName("Out"));
auto* out0 = context.Output<Tensor>(framework::GradVarName("X"));
out0->mutable_data<T>(context.GetPlace());
out0->CopyFrom(*in0, context.GetPlace(), context.device_context());
} else {
switch (dims) {
REP_EXPAND_GRAD_TEMPLATE(72)
default:
PADDLE_ENFORCE(
false, "Only support tensor with rank being between 1 and 6.");
}
}
}
protected:
template <int Dims>
void ExpandBackward(const framework::ExecutionContext& context,
const std::vector<int>& reshape_dims_vec,
const std::vector<int>& reduce_dims_vec) const {
size_t reshape_size = Dims / MAX_RANK_SUPPORTED + 1;
size_t reduce_size = Dims % MAX_RANK_SUPPORTED + 1;
PADDLE_ENFORCE_EQ(reshape_size, reshape_dims_vec.size(),
"Inconsistent size between template Dims and "
"reshape dimensions.");
PADDLE_ENFORCE_EQ(reduce_size, reduce_dims_vec.size(),
"Inconsistent size between template Dims and "
"reduce dimensions.");
auto* in0 = context.Input<Tensor>(framework::GradVarName("Out"));
auto* out0 = context.Output<Tensor>(framework::GradVarName("X"));
auto x = EigenVector<T>::Flatten(*(context.Input<Tensor>("X")));
out0->mutable_data<T>(context.GetPlace());
auto x_grad = EigenVector<T>::Flatten(*out0);
Eigen::DSizes<int, Dims / MAX_RANK_SUPPORTED + 1> reshape_dims;
for (size_t i = 0; i < reshape_size; ++i) {
reshape_dims[i] = reshape_dims_vec[i];
}
Eigen::DSizes<int, Dims % MAX_RANK_SUPPORTED + 1> reduce_dims;
for (size_t i = 0; i < reduce_size; ++i) {
reduce_dims[i] = reduce_dims_vec[i];
}
auto out_grad = EigenVector<T>::Flatten(*in0);
x_grad.device(context.GetEigenDevice<Place>()) =
out_grad.reshape(reshape_dims).sum(reduce_dims).reshape(x.dimensions());
}
};
} // namespace operators
} // namespace paddle
...@@ -49,9 +49,11 @@ class FillConstantBatchSizeLikeOp : public framework::OperatorWithKernel { ...@@ -49,9 +49,11 @@ class FillConstantBatchSizeLikeOp : public framework::OperatorWithKernel {
} }
protected: protected:
framework::DataType IndicateDataType( framework::OpKernelType GetKernelType(
const framework::ExecutionContext &ctx) const override { const framework::ExecutionContext &ctx) const override {
return static_cast<framework::DataType>(ctx.Attr<int>("data_type")); return framework::OpKernelType(
static_cast<framework::DataType>(ctx.Attr<int>("data_type")),
ctx.device_context());
} }
}; };
...@@ -73,10 +75,10 @@ class FillConstantBatchSizeLikeOpMaker ...@@ -73,10 +75,10 @@ class FillConstantBatchSizeLikeOpMaker
"with the specified value"); "with the specified value");
AddAttr<std::vector<int>>("shape", "(vector<int>) The shape of the output"); AddAttr<std::vector<int>>("shape", "(vector<int>) The shape of the output");
AddAttr<int>("input_dim_idx", AddAttr<int>("input_dim_idx",
"(int, default 0) the index of input's batch size dimension") "(int, default 0) The index of input's batch size dimension")
.SetDefault(0); .SetDefault(0);
AddAttr<int>("output_dim_idx", AddAttr<int>("output_dim_idx",
"(int, default 0) the index of output's batch size dimension") "(int, default 0) The index of output's batch size dimension")
.SetDefault(0); .SetDefault(0);
AddAttr<float>("value", "(float, default 0) The value to be filled") AddAttr<float>("value", "(float, default 0) The value to be filled")
.SetDefault(0.0f); .SetDefault(0.0f);
......
...@@ -12,7 +12,6 @@ ...@@ -12,7 +12,6 @@
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#define EIGEN_USE_GPU
#include "paddle/framework/op_registry.h" #include "paddle/framework/op_registry.h"
#include "paddle/operators/fill_constant_batch_size_like_op.h" #include "paddle/operators/fill_constant_batch_size_like_op.h"
......
...@@ -13,8 +13,8 @@ See the License for the specific language governing permissions and ...@@ -13,8 +13,8 @@ See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#pragma once #pragma once
#include "paddle/framework/eigen.h"
#include "paddle/framework/op_registry.h" #include "paddle/framework/op_registry.h"
#include "paddle/operators/math/math_function.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
...@@ -27,9 +27,8 @@ class FillConstantBatchSizeLikeOpKernel : public framework::OpKernel<T> { ...@@ -27,9 +27,8 @@ class FillConstantBatchSizeLikeOpKernel : public framework::OpKernel<T> {
out->mutable_data<T>(ctx.GetPlace()); out->mutable_data<T>(ctx.GetPlace());
auto value = ctx.Attr<float>("value"); auto value = ctx.Attr<float>("value");
auto out_eigen = framework::EigenVector<T>::Flatten(*out); math::SetConstant<Place, T> setter;
auto place = ctx.GetEigenDevice<Place>(); setter(ctx.device_context(), out, static_cast<T>(value));
out_eigen.device(place) = out_eigen.constant(static_cast<T>(value));
} }
}; };
......
...@@ -12,32 +12,41 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ...@@ -12,32 +12,41 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "paddle/operators/fill_constant_op.h" #include "paddle/framework/data_type.h"
#include "paddle/framework/op_registry.h"
#include "paddle/operators/math/math_function.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
class FillConstantOp : public framework::OperatorWithKernel { class FillConstantInferShape : public framework::InferShapeBase {
public: public:
using framework::OperatorWithKernel::OperatorWithKernel; void operator()(framework::InferShapeContext *ctx) const override {
void InferShape(framework::InferShapeContext *ctx) const override {
PADDLE_ENFORCE(ctx->HasOutput("Out"), PADDLE_ENFORCE(ctx->HasOutput("Out"),
"Output(Out) of FillConstantOp should not be null."); "Output(Out) of FillConstantOp should not be null.");
auto &shape = ctx->Attrs().Get<std::vector<int>>("shape"); auto &shape = ctx->Attrs().Get<std::vector<int>>("shape");
std::vector<int64_t> shape_int64(shape.size(), 0); ctx->SetOutputDim("Out", framework::make_ddim(shape));
std::transform(shape.begin(), shape.end(), shape_int64.begin(),
[](int a) { return static_cast<int64_t>(a); });
auto dims = framework::make_ddim(shape_int64);
ctx->SetOutputDim("Out", dims);
} }
};
protected: class FillConstantOp : public framework::OperatorBase {
framework::DataType IndicateDataType( public:
const framework::ExecutionContext &ctx) const override { using framework::OperatorBase::OperatorBase;
int data_type = ctx.Attr<int>("data_type"); void Run(const framework::Scope &scope,
VLOG(10) << " FillConstant data_type = " << data_type; const platform::DeviceContext &dev_ctx) const override {
return static_cast<framework::DataType>(data_type); auto data_type = static_cast<framework::DataType>(Attr<int>("data_type"));
auto value = Attr<float>("value");
auto force_cpu = Attr<bool>("force_cpu");
auto &out =
*scope.FindVar(Output("Out"))->GetMutable<framework::LoDTensor>();
out.Resize(framework::make_ddim(Attr<std::vector<int>>("shape")));
if (force_cpu) {
auto cpu = platform::CPUPlace();
out.mutable_data(cpu, framework::ToTypeIndex(data_type));
} else {
out.mutable_data(dev_ctx.GetPlace(), framework::ToTypeIndex(data_type));
}
math::set_constant(dev_ctx, &out, value);
} }
}; };
...@@ -53,6 +62,11 @@ class FillConstantOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -53,6 +62,11 @@ class FillConstantOpMaker : public framework::OpProtoAndCheckerMaker {
AddAttr<std::vector<int>>("shape", "(vector<int>) The shape of the output"); AddAttr<std::vector<int>>("shape", "(vector<int>) The shape of the output");
AddAttr<float>("value", "(float, default 0) The value to be filled") AddAttr<float>("value", "(float, default 0) The value to be filled")
.SetDefault(0.0f); .SetDefault(0.0f);
AddAttr<bool>("force_cpu",
"(bool, default false) Force fill output variable to cpu "
"memory. Otherwise, fill output variable to the running "
"device")
.SetDefault(false);
AddOutput("Out", AddOutput("Out",
"(Tensor) Tensor of specified shape will be filled " "(Tensor) Tensor of specified shape will be filled "
"with the specified value"); "with the specified value");
...@@ -68,10 +82,6 @@ Fill up a variable with specified constant value. ...@@ -68,10 +82,6 @@ Fill up a variable with specified constant value.
} // namespace paddle } // namespace paddle
namespace ops = paddle::operators; namespace ops = paddle::operators;
REGISTER_OP_WITHOUT_GRADIENT(fill_constant, ops::FillConstantOp, REGISTER_OPERATOR(fill_constant, ops::FillConstantOp,
ops::FillConstantOpMaker); ops::FillConstantInferShape, ops::FillConstantOpMaker,
REGISTER_OP_CPU_KERNEL( paddle::framework::EmptyGradOpMaker);
fill_constant, ops::FillConstantOpKernel<paddle::platform::CPUPlace, float>,
ops::FillConstantOpKernel<paddle::platform::CPUPlace, double>,
ops::FillConstantOpKernel<paddle::platform::CPUPlace, int>,
ops::FillConstantOpKernel<paddle::platform::CPUPlace, int64_t>);
...@@ -12,7 +12,6 @@ ...@@ -12,7 +12,6 @@
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#define EIGEN_USE_GPU
#include "paddle/framework/op_registry.h" #include "paddle/framework/op_registry.h"
#include "paddle/operators/fill_zeros_like_op.h" #include "paddle/operators/fill_zeros_like_op.h"
......
...@@ -13,8 +13,8 @@ See the License for the specific language governing permissions and ...@@ -13,8 +13,8 @@ See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#pragma once #pragma once
#include "paddle/framework/eigen.h"
#include "paddle/framework/op_registry.h" #include "paddle/framework/op_registry.h"
#include "paddle/operators/math/math_function.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
...@@ -23,10 +23,11 @@ template <typename Place, typename T> ...@@ -23,10 +23,11 @@ template <typename Place, typename T>
class FillZerosLikeKernel : public framework::OpKernel<T> { class FillZerosLikeKernel : public framework::OpKernel<T> {
public: public:
void Compute(const framework::ExecutionContext& context) const override { void Compute(const framework::ExecutionContext& context) const override {
auto* output = context.Output<framework::Tensor>("Y"); auto* out = context.Output<framework::Tensor>("Y");
output->mutable_data<T>(context.GetPlace()); out->mutable_data<T>(context.GetPlace());
auto t = framework::EigenVector<T>::Flatten(*output);
t.device(context.GetEigenDevice<Place>()) = t.constant(static_cast<T>(0)); math::SetConstant<Place, T> setter;
setter(context.device_context(), out, static_cast<T>(0));
} }
}; };
......
...@@ -40,9 +40,11 @@ class GatherOp : public framework::OperatorWithKernel { ...@@ -40,9 +40,11 @@ class GatherOp : public framework::OperatorWithKernel {
} }
protected: protected:
framework::DataType IndicateDataType( framework::OpKernelType GetKernelType(
const framework::ExecutionContext& ctx) const override { const framework::ExecutionContext& ctx) const override {
return framework::ToDataType(ctx.Input<Tensor>("X")->type()); return framework::OpKernelType(
framework::ToDataType(ctx.Input<Tensor>("X")->type()),
ctx.device_context());
} }
}; };
...@@ -55,9 +57,11 @@ class GatherGradOp : public framework::OperatorWithKernel { ...@@ -55,9 +57,11 @@ class GatherGradOp : public framework::OperatorWithKernel {
} }
protected: protected:
framework::DataType IndicateDataType( framework::OpKernelType GetKernelType(
const framework::ExecutionContext& ctx) const override { const framework::ExecutionContext& ctx) const override {
return framework::ToDataType(ctx.Input<Tensor>("X")->type()); return framework::OpKernelType(
framework::ToDataType(ctx.Input<Tensor>("X")->type()),
ctx.device_context());
} }
}; };
......
...@@ -57,9 +57,11 @@ class GaussianRandomOp : public framework::OperatorWithKernel { ...@@ -57,9 +57,11 @@ class GaussianRandomOp : public framework::OperatorWithKernel {
} }
protected: protected:
framework::DataType IndicateDataType( framework::OpKernelType GetKernelType(
const framework::ExecutionContext& ctx) const override { const framework::ExecutionContext& ctx) const override {
return static_cast<framework::DataType>(ctx.Attr<int>("data_type")); return framework::OpKernelType(
static_cast<framework::DataType>(ctx.Attr<int>("data_type")),
ctx.device_context());
} }
}; };
......
...@@ -12,22 +12,57 @@ ...@@ -12,22 +12,57 @@
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "paddle/operators/increment_op.h" #include "paddle/framework/op_registry.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
class IncrementOp : public framework::OperatorWithKernel { class IncrementInferShape : public framework::InferShapeBase {
public: public:
using framework::OperatorWithKernel::OperatorWithKernel; void operator()(framework::InferShapeContext *ctx) const override {
void InferShape(framework::InferShapeContext *ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"), PADDLE_ENFORCE(ctx->HasInput("X"),
"Input(X) of IncrementOp should not be null."); "Input(X) of IncrementOp should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("Out"), PADDLE_ENFORCE(ctx->HasOutput("Out"),
"Output(Out) of IncrementOp should not be null."); "Output(Out) of IncrementOp should not be null.");
PADDLE_ENFORCE_EQ(1, framework::product(ctx->GetInputDim("X")));
ctx->SetOutputDim("Out", ctx->GetInputDim("X")); ctx->SetOutputDim("Out", ctx->GetInputDim("X"));
ctx->ShareLoD("X", /*->*/ "Out"); }
};
struct IncrementFunctor {
IncrementFunctor(const framework::LoDTensor &x, framework::LoDTensor *out,
float value)
: x_(x), out_(out), value_(value) {}
template <typename T>
void operator()() const {
*out_->data<T>() = *x_.data<T>() + static_cast<T>(value_);
}
const framework::LoDTensor &x_;
framework::LoDTensor *out_;
float value_;
};
class IncrementOp : public framework::OperatorBase {
public:
IncrementOp(const std::string &type, const framework::VariableNameMap &inputs,
const framework::VariableNameMap &outputs,
const framework::AttributeMap &attrs)
: OperatorBase(type, inputs, outputs, attrs) {}
void Run(const framework::Scope &scope,
const platform::DeviceContext &dev_ctx) const override {
auto &x = scope.FindVar(Input("X"))->Get<framework::LoDTensor>();
auto &out =
*scope.FindVar(Output("Out"))->GetMutable<framework::LoDTensor>();
PADDLE_ENFORCE(platform::is_cpu_place(x.place()));
out.Resize(x.dims());
out.mutable_data(x.place(), x.type());
float value = Attr<float>("step");
framework::VisitDataType(framework::ToDataType(out.type()),
IncrementFunctor(x, &out, value));
} }
}; };
...@@ -59,10 +94,10 @@ class IncrementGradOpMaker : public framework::SingleGradOpDescMaker { ...@@ -59,10 +94,10 @@ class IncrementGradOpMaker : public framework::SingleGradOpDescMaker {
std::unique_ptr<framework::OpDescBind> Apply() const override { std::unique_ptr<framework::OpDescBind> Apply() const override {
auto *grad_op = new framework::OpDescBind(); auto *grad_op = new framework::OpDescBind();
grad_op->SetType("scale"); grad_op->SetType("increment");
grad_op->SetInput("X", OutputGrad("Out")); grad_op->SetInput("X", Output("Out"));
grad_op->SetOutput("Out", InputGrad("X")); grad_op->SetOutput("Out", Input("X"));
grad_op->SetAttr("scale", 1.0f); grad_op->SetAttr("step", -boost::get<float>(GetAttr("step")));
return std::unique_ptr<framework::OpDescBind>(grad_op); return std::unique_ptr<framework::OpDescBind>(grad_op);
} }
}; };
...@@ -71,11 +106,5 @@ class IncrementGradOpMaker : public framework::SingleGradOpDescMaker { ...@@ -71,11 +106,5 @@ class IncrementGradOpMaker : public framework::SingleGradOpDescMaker {
} // namespace paddle } // namespace paddle
namespace ops = paddle::operators; namespace ops = paddle::operators;
REGISTER_OPERATOR(increment, ops::IncrementOp, ops::IncrementInferShape,
REGISTER_OPERATOR(increment, ops::IncrementOp, ops::IncrementOpMaker, ops::IncrementOpMaker, ops::IncrementGradOpMaker);
ops::IncrementGradOpMaker);
REGISTER_OP_CPU_KERNEL(
increment, ops::IncrementKernel<paddle::platform::CPUPlace, float>,
ops::IncrementKernel<paddle::platform::CPUPlace, double>,
ops::IncrementKernel<paddle::platform::CPUPlace, int>,
ops::IncrementKernel<paddle::platform::CPUPlace, int64_t>);
...@@ -183,9 +183,11 @@ class LinearChainCRFOp : public framework::OperatorWithKernel { ...@@ -183,9 +183,11 @@ class LinearChainCRFOp : public framework::OperatorWithKernel {
protected: protected:
// Explicitly set that the data type of computation kernel of linear_chain_crf // Explicitly set that the data type of computation kernel of linear_chain_crf
// is determined by its input "Emission". // is determined by its input "Emission".
framework::DataType IndicateDataType( framework::OpKernelType GetKernelType(
const framework::ExecutionContext& ctx) const override { const framework::ExecutionContext& ctx) const override {
return framework::ToDataType(ctx.Input<LoDTensor>("Emission")->type()); return framework::OpKernelType(
framework::ToDataType(ctx.Input<LoDTensor>("Emission")->type()),
ctx.device_context());
} }
}; };
...@@ -240,10 +242,13 @@ class LinearChainCRFGradOp : public framework::OperatorWithKernel { ...@@ -240,10 +242,13 @@ class LinearChainCRFGradOp : public framework::OperatorWithKernel {
protected: protected:
// Explicitly set that the data type of output of the linear_chain_crf_grad // Explicitly set that the data type of output of the linear_chain_crf_grad
// operator is determined by its input: gradients of LogLikelihood. // operator is determined by its input: gradients of LogLikelihood.
framework::DataType IndicateDataType( framework::OpKernelType GetKernelType(
const framework::ExecutionContext& ctx) const override { const framework::ExecutionContext& ctx) const override {
return framework::ToDataType( return framework::OpKernelType(
ctx.Input<LoDTensor>(framework::GradVarName("LogLikelihood"))->type()); framework::ToDataType(
ctx.Input<LoDTensor>(framework::GradVarName("LogLikelihood"))
->type()),
ctx.device_context());
} }
}; };
......
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/framework/lod_tensor_array.h"
#include "paddle/framework/op_registry.h"
namespace paddle {
namespace operators {
class LoDArrayLengthOp : public framework::OperatorBase {
public:
LoDArrayLengthOp(const std::string &type,
const framework::VariableNameMap &inputs,
const framework::VariableNameMap &outputs,
const framework::AttributeMap &attrs)
: OperatorBase(type, inputs, outputs, attrs) {}
void Run(const framework::Scope &scope,
const platform::DeviceContext &dev_ctx) const override {
auto &x = scope.FindVar(Input("X"))->Get<framework::LoDTensorArray>();
auto &out =
*scope.FindVar(Output("Out"))->GetMutable<framework::LoDTensor>();
out.Resize({1});
auto cpu = platform::CPUPlace();
*out.mutable_data<int64_t>(cpu) = static_cast<int64_t>(x.size());
}
};
class LoDArrayLengthProtoMaker : public framework::OpProtoAndCheckerMaker {
public:
LoDArrayLengthProtoMaker(framework::OpProto *proto,
framework::OpAttrChecker *op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "(LoDTensorArray) The input tensor array.");
AddOutput("Out", "(Tensor) 1x1 CPU Tensor of length, int64_t");
AddComment(R"DOC(Get the length of lod tensor array
Out = len(X)
NOTE: The output is a CPU Tensor since the control variable should be only in
CPU and the length of LoDTensorArray should be used as control variables.
)DOC");
}
};
class LoDArrayLengthInferShape : public framework::InferShapeBase {
public:
void operator()(framework::InferShapeContext *context) const override {
PADDLE_ENFORCE(context->HasInput("X"));
PADDLE_ENFORCE(context->HasOutput("Out"));
context->SetOutputDim("Out", {1});
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OPERATOR(lod_array_length, ops::LoDArrayLengthOp,
ops::LoDArrayLengthInferShape, ops::LoDArrayLengthProtoMaker,
paddle::framework::EmptyGradOpMaker);
...@@ -28,6 +28,7 @@ class LoDRankTableOp : public framework::OperatorBase { ...@@ -28,6 +28,7 @@ class LoDRankTableOp : public framework::OperatorBase {
auto x = scope.FindVar(Input("X"))->Get<framework::LoDTensor>(); auto x = scope.FindVar(Input("X"))->Get<framework::LoDTensor>();
auto *out = auto *out =
scope.FindVar(Output("Out"))->GetMutable<framework::LoDRankTable>(); scope.FindVar(Output("Out"))->GetMutable<framework::LoDRankTable>();
VLOG(10) << "Level = " << static_cast<size_t>(Attr<int>("level"));
out->Reset(x.lod(), static_cast<size_t>(Attr<int>("level"))); out->Reset(x.lod(), static_cast<size_t>(Attr<int>("level")));
} }
}; };
...@@ -65,7 +66,8 @@ class LoDRankTableInferVarType : public framework::VarTypeInference { ...@@ -65,7 +66,8 @@ class LoDRankTableInferVarType : public framework::VarTypeInference {
void operator()(const framework::OpDescBind &op_desc, void operator()(const framework::OpDescBind &op_desc,
framework::BlockDescBind *block) const override { framework::BlockDescBind *block) const override {
for (auto &o : op_desc.Output("Out")) { for (auto &o : op_desc.Output("Out")) {
block->Var(o)->SetType(framework::VarDesc::LOD_RANK_TABLE); block->FindRecursiveOrCreateVar(o)->SetType(
framework::VarDesc::LOD_RANK_TABLE);
} }
} }
}; };
......
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/framework/lod_rank_table.h"
#include "paddle/framework/lod_tensor_array.h"
#include "paddle/framework/op_registry.h"
namespace paddle {
namespace operators {
struct CopyRange {
size_t begin;
size_t end;
};
class LoDTensorToArrayOp : public framework::OperatorBase {
public:
LoDTensorToArrayOp(const std::string &type,
const framework::VariableNameMap &inputs,
const framework::VariableNameMap &outputs,
const framework::AttributeMap &attrs)
: OperatorBase(type, inputs, outputs, attrs) {}
void Run(const framework::Scope &scope,
const platform::DeviceContext &dev_ctx) const override {
auto &x = scope.FindVar(Input("X"))->Get<framework::LoDTensor>();
auto &rank_table =
scope.FindVar(Input("RankTable"))->Get<framework::LoDRankTable>();
auto &out =
*scope.FindVar(Output("Out"))->GetMutable<framework::LoDTensorArray>();
auto &items = rank_table.items();
auto max_seq_len = items[0].length;
auto rank_level = rank_table.level();
out.resize(max_seq_len);
std::vector<std::vector<CopyRange>> copy_ranges(max_seq_len);
// set out[i] lod
for (size_t t = 0; t < max_seq_len; t++) {
auto &lod = *out[t].mutable_lod();
lod.clear();
for (auto &item : items) {
if (t >= item.length) {
break;
}
size_t start_idx = x.lod()[rank_level][item.index] + t;
auto lod_and_offset = framework::GetSubLoDAndAbsoluteOffset(
x.lod(), start_idx, start_idx + 1, rank_level + 1);
auto &lod_length = lod_and_offset.first;
framework::AppendLoD(&lod, lod_length);
size_t start_offset = lod_and_offset.second.first;
size_t end_offset = lod_and_offset.second.second;
copy_ranges[t].emplace_back(CopyRange{start_offset, end_offset});
}
}
for (size_t i = 0; i < max_seq_len; ++i) {
auto &ranges = copy_ranges[i];
size_t height = std::accumulate(
ranges.begin(), ranges.end(), 0UL,
[](size_t a, const CopyRange &b) { return a + b.end - b.begin; });
auto x_dim = x.dims();
x_dim[0] = static_cast<int64_t>(height);
out[i].Resize(x_dim);
out[i].mutable_data(x.place(), x.type());
size_t offset = 0;
for (auto &each_range : ranges) {
size_t len = each_range.end - each_range.begin;
if (len == 0) {
continue;
}
// out[i][offset: offset+len] = x[each_range.begin: each_range.end]
out[i]
.Slice(static_cast<int>(offset), static_cast<int>(offset + len))
.CopyFrom(x.Slice(static_cast<int>(each_range.begin),
static_cast<int>(each_range.end)),
x.place(), dev_ctx);
offset += len;
}
}
}
};
class LoDTensorToArrayOpProtoMaker : public framework::OpProtoAndCheckerMaker {
public:
LoDTensorToArrayOpProtoMaker(framework::OpProto *proto,
framework::OpAttrChecker *op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "");
AddInput("RankTable", "");
AddOutput("Out", "");
AddComment("");
}
};
class LoDTensorToArrayInferShape : public framework::InferShapeBase {
public:
void operator()(framework::InferShapeContext *context) const override {
PADDLE_ENFORCE(context->HasInput("X"),
"Input(X) of LoDTensorToArrayOp should not be null.");
PADDLE_ENFORCE(
context->HasInput("RankTable"),
"Input(RankTable) of LoDTensorToArrayOp should not be null.");
PADDLE_ENFORCE(context->HasOutput("Out"),
"Output(Out) of LoDTensorToArrayOp should not be null.");
auto x_dim = context->GetInputDim("X");
// The first dim of each LoDTensor in Output can only be set at run-time.;
// We still have to Resize each LoDTensor in Output.
context->SetOutputDim("Out", x_dim);
}
};
class LoDTensorToArrayInferVarType : public framework::VarTypeInference {
public:
void operator()(const framework::OpDescBind &op_desc,
framework::BlockDescBind *block) const override {
for (auto &out_var : op_desc.Output("Out")) {
block->Var(out_var)->SetType(framework::VarDesc::LOD_TENSOR_ARRAY);
}
}
};
class LoDTensorToArrayGradMaker : public framework::SingleGradOpDescMaker {
public:
using framework::SingleGradOpDescMaker::SingleGradOpDescMaker;
protected:
std::unique_ptr<framework::OpDescBind> Apply() const override {
auto *grad_op = new framework::OpDescBind();
grad_op->SetType("array_to_lod_tensor");
grad_op->SetInput("X", OutputGrad("Out"));
grad_op->SetInput("RankTable", Input("RankTable"));
grad_op->SetOutput("Out", InputGrad("X"));
grad_op->SetAttrMap(Attrs());
return std::unique_ptr<framework::OpDescBind>(grad_op);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OPERATOR(lod_tensor_to_array, ops::LoDTensorToArrayOp,
ops::LoDTensorToArrayOpProtoMaker,
ops::LoDTensorToArrayInferShape,
ops::LoDTensorToArrayInferVarType,
ops::LoDTensorToArrayGradMaker);
...@@ -41,9 +41,11 @@ class LookupTableOp : public framework::OperatorWithKernel { ...@@ -41,9 +41,11 @@ class LookupTableOp : public framework::OperatorWithKernel {
} }
protected: protected:
framework::DataType IndicateDataType( framework::OpKernelType GetKernelType(
const framework::ExecutionContext& ctx) const override { const framework::ExecutionContext& ctx) const override {
return framework::ToDataType(ctx.Input<LoDTensor>("W")->type()); return framework::OpKernelType(
framework::ToDataType(ctx.Input<LoDTensor>("W")->type()),
ctx.device_context());
} }
}; };
...@@ -97,9 +99,11 @@ class LookupTableOpGrad : public framework::OperatorWithKernel { ...@@ -97,9 +99,11 @@ class LookupTableOpGrad : public framework::OperatorWithKernel {
} }
protected: protected:
framework::DataType IndicateDataType( framework::OpKernelType GetKernelType(
const framework::ExecutionContext& ctx) const override { const framework::ExecutionContext& ctx) const override {
return framework::ToDataType(ctx.Input<LoDTensor>("W")->type()); return framework::OpKernelType(
framework::ToDataType(ctx.Input<LoDTensor>("W")->type()),
ctx.device_context());
} }
}; };
......
...@@ -74,8 +74,9 @@ class LookupTableCUDAKernel : public framework::OpKernel<T> { ...@@ -74,8 +74,9 @@ class LookupTableCUDAKernel : public framework::OpKernel<T> {
dim3 threads(128, 8); dim3 threads(128, 8);
dim3 grids(8, 1); dim3 grids(8, 1);
LookupTable<T, 128, 8, LookupTable<
8><<<grids, threads, 0, context.device_context().stream()>>>( T, 128, 8,
8><<<grids, threads, 0, context.cuda_device_context().stream()>>>(
output, table, ids, N, K, D); output, table, ids, N, K, D);
} }
}; };
...@@ -135,7 +136,7 @@ class LookupTableGradCUDAKernel : public framework::OpKernel<T> { ...@@ -135,7 +136,7 @@ class LookupTableGradCUDAKernel : public framework::OpKernel<T> {
dim3 grids(8, 1); dim3 grids(8, 1);
LookupTableGrad< LookupTableGrad<
T, 128, 8, T, 128, 8,
8><<<grids, threads, 0, context.device_context().stream()>>>( 8><<<grids, threads, 0, context.cuda_device_context().stream()>>>(
d_table, d_output, ids, N, K, D); d_table, d_output, ids, N, K, D);
} }
} }
......
...@@ -24,6 +24,11 @@ class LSTMOp : public framework::OperatorWithKernel { ...@@ -24,6 +24,11 @@ class LSTMOp : public framework::OperatorWithKernel {
void InferShape(framework::InferShapeContext* ctx) const override { void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("Input"), PADDLE_ENFORCE(ctx->HasInput("Input"),
"Input(Input) of LSTM should not be null."); "Input(Input) of LSTM should not be null.");
PADDLE_ENFORCE(ctx->HasInput("Weight"),
"Input(Weight) of LSTM should not be null.");
PADDLE_ENFORCE(ctx->HasInput("Bias"),
"Input(Bias) of LSTM should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("Hidden"), PADDLE_ENFORCE(ctx->HasOutput("Hidden"),
"Output(Hidden) of LSTM should not be null."); "Output(Hidden) of LSTM should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("Cell"), PADDLE_ENFORCE(ctx->HasOutput("Cell"),
...@@ -59,11 +64,13 @@ class LSTMOp : public framework::OperatorWithKernel { ...@@ -59,11 +64,13 @@ class LSTMOp : public framework::OperatorWithKernel {
"The second dimension of Input(Weight) " "The second dimension of Input(Weight) "
"should be 4 * %d.", "should be 4 * %d.",
frame_size); frame_size);
auto b_dims = ctx->GetInputDim("Bias"); auto b_dims = ctx->GetInputDim("Bias");
PADDLE_ENFORCE_EQ(b_dims.size(), 2, "The rank of Input(Bias) should be 2."); PADDLE_ENFORCE_EQ(b_dims.size(), 2, "The rank of Input(Bias) should be 2.");
PADDLE_ENFORCE_EQ(b_dims[0], 1, PADDLE_ENFORCE_EQ(b_dims[0], 1,
"The first dimension of Input(Bias) should be 1."); "The first dimension of Input(Bias) should be 1.");
if (ctx->Attrs().Get<bool>("usePeepholes")) {
if (ctx->Attrs().Get<bool>("use_peepholes")) {
PADDLE_ENFORCE_EQ(b_dims[1], 7 * frame_size, PADDLE_ENFORCE_EQ(b_dims[1], 7 * frame_size,
"The second dimension of Input(Bias) should be " "The second dimension of Input(Bias) should be "
"7 * %d if enable peepholes connection", "7 * %d if enable peepholes connection",
...@@ -74,6 +81,7 @@ class LSTMOp : public framework::OperatorWithKernel { ...@@ -74,6 +81,7 @@ class LSTMOp : public framework::OperatorWithKernel {
"4 * %d if disable peepholes connection", "4 * %d if disable peepholes connection",
frame_size); frame_size);
} }
framework::DDim out_dims({in_dims[0], frame_size}); framework::DDim out_dims({in_dims[0], frame_size});
ctx->SetOutputDim("Hidden", out_dims); ctx->SetOutputDim("Hidden", out_dims);
ctx->SetOutputDim("Cell", out_dims); ctx->SetOutputDim("Cell", out_dims);
...@@ -84,10 +92,11 @@ class LSTMOp : public framework::OperatorWithKernel { ...@@ -84,10 +92,11 @@ class LSTMOp : public framework::OperatorWithKernel {
} }
protected: protected:
framework::DataType IndicateDataType( framework::OpKernelType GetKernelType(
const framework::ExecutionContext& ctx) const override { const framework::ExecutionContext& ctx) const override {
return framework::ToDataType( return framework::OpKernelType(
ctx.Input<framework::LoDTensor>("Input")->type()); framework::ToDataType(ctx.Input<framework::LoDTensor>("Input")->type()),
ctx.device_context());
} }
}; };
...@@ -117,14 +126,13 @@ class LSTMOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -117,14 +126,13 @@ class LSTMOpMaker : public framework::OpProtoAndCheckerMaker {
AddInput("Bias", AddInput("Bias",
"(Tensor) the learnable weights, which contains two parts: " "(Tensor) the learnable weights, which contains two parts: "
"input-hidden bias weight and peephole connections weight if " "input-hidden bias weight and peephole connections weight if "
"setting `usePeepholes` True. " "setting `use_peepholes` True. "
"1. `usePeepholes = False` " "1. `use_peepholes = False` "
" - The shape is (1 x 4D). " " - The shape is (1 x 4D). "
" - Bias = {b_c, b_i, b_f, b_o}." " - Bias = {b_c, b_i, b_f, b_o}."
"2. `usePeepholes = True` " "2. `use_peepholes = True` "
" - The shape is (1 x 7D). " " - The shape is (1 x 7D). "
" - Bias = {b_c, b_i, b_f, b_o, W_ic, W_fc, W_oc}.") " - Bias = {b_c, b_i, b_f, b_o, W_ic, W_fc, W_oc}.");
.AsDispensable();
AddOutput("Hidden", AddOutput("Hidden",
"(LoDTensor) the hidden state of LSTM operator. " "(LoDTensor) the hidden state of LSTM operator. "
"The shape is (T x D), and lod is the same with the `Input`."); "The shape is (T x D), and lod is the same with the `Input`.");
...@@ -144,29 +152,32 @@ class LSTMOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -144,29 +152,32 @@ class LSTMOpMaker : public framework::OpProtoAndCheckerMaker {
"(LoDTensor) This LoDTensor is obtained in the forward and used " "(LoDTensor) This LoDTensor is obtained in the forward and used "
"in the backward.") "in the backward.")
.AsIntermediate(); .AsIntermediate();
AddAttr<bool>("usePeepholes", AddAttr<bool>("use_peepholes",
"(bool, default True) " "(bool, defalut: True) "
"whether to enable diagonal/peephole connections.") "whether to enable diagonal/peephole connections.")
.SetDefault(true); .SetDefault(true);
AddAttr<bool>("isReverse", AddAttr<bool>("is_reverse",
"(bool, default False) " "(bool, defalut: False) "
"whether to compute reversed LSTM.") "whether to compute reversed LSTM.")
.SetDefault(false); .SetDefault(false);
AddAttr<std::string>( AddAttr<std::string>(
"gateActivation", "gate_activation",
"(string, default sigmoid)" "(string, default: sigmoid)"
"The activation for input gate, forget gate and output " "The activation for input gate, forget gate and output "
"gate, `sigmoid` by default.") "gate, `sigmoid` by default.")
.SetDefault("sigmoid"); .SetDefault("sigmoid")
AddAttr<std::string>("cellActivation", .InEnum({"sigmoid", "tanh", "relu", "identity"});
"(string, default tanh)" AddAttr<std::string>("cell_activation",
"(string, default: tanh)"
"The activation for cell output, `tanh` by defalut.") "The activation for cell output, `tanh` by defalut.")
.SetDefault("tanh"); .SetDefault("tanh")
AddAttr<std::string>("candidateActivation", .InEnum({"sigmoid", "tanh", "relu", "identity"});
"(string, default tanh)" AddAttr<std::string>("candidate_activation",
"(string, default: tanh)"
"The activation for candidate hidden state, " "The activation for candidate hidden state, "
"`tanh` by default.") "`tanh` by default.")
.SetDefault("tanh"); .SetDefault("tanh")
.InEnum({"sigmoid", "tanh", "relu", "identity"});
AddComment(R"DOC( AddComment(R"DOC(
Long-Short Term Memory (LSTM) Operator. Long-Short Term Memory (LSTM) Operator.
...@@ -202,7 +213,7 @@ are the cell input and cell output activation functions and `tanh` is usually ...@@ -202,7 +213,7 @@ are the cell input and cell output activation functions and `tanh` is usually
used for them. \f$\tilde{c_t}\f$ is also called candidate hidden state, used for them. \f$\tilde{c_t}\f$ is also called candidate hidden state,
which is computed based on the current input and the previous hidden state. which is computed based on the current input and the previous hidden state.
Set usePeepholes False to disable peephole connection Set `use_peepholes` False to disable peephole connection
(http://www.bioinf.jku.at/publications/older/2604.pdf). The formula (http://www.bioinf.jku.at/publications/older/2604.pdf). The formula
is omitted here. is omitted here.
...@@ -225,30 +236,35 @@ class LSTMGradOp : public framework::OperatorWithKernel { ...@@ -225,30 +236,35 @@ class LSTMGradOp : public framework::OperatorWithKernel {
"Input(Hidden) of LSTM should not be null."); "Input(Hidden) of LSTM should not be null.");
PADDLE_ENFORCE(ctx->HasInput("Cell"), PADDLE_ENFORCE(ctx->HasInput("Cell"),
"Input(Cell) of LSTM should not be null."); "Input(Cell) of LSTM should not be null.");
PADDLE_ENFORCE(ctx->HasInput("Weight"),
"Input(Weight) of LSTM should not be null.");
PADDLE_ENFORCE(ctx->HasInput("Bias"),
"Input(Bias) of LSTM should not be null.");
PADDLE_ENFORCE(ctx->HasInput("BatchGate"), PADDLE_ENFORCE(ctx->HasInput("BatchGate"),
"Input(BatchGate) of LSTM should not be null."); "Input(BatchGate) of LSTM should not be null.");
PADDLE_ENFORCE(ctx->HasInput("BatchCellPreAct"), PADDLE_ENFORCE(ctx->HasInput("BatchCellPreAct"),
"Input(BatchGate) of LSTM should not be null."); "Input(BatchGate) of LSTM should not be null.");
auto in_g_name = framework::GradVarName("Input"); auto SetOutGradDim = [&ctx](const std::string& name) {
if (ctx->HasOutput(in_g_name)) auto g_name = framework::GradVarName(name);
ctx->SetOutputDim(in_g_name, ctx->GetInputDim("Input")); if (ctx->HasOutput(g_name))
ctx->SetOutputDim(g_name, ctx->GetInputDim(name));
auto w_g_name = framework::GradVarName("Weight"); };
if (ctx->HasOutput(w_g_name))
ctx->SetOutputDim(w_g_name, ctx->GetInputDim("Weight")); SetOutGradDim("Input");
SetOutGradDim("Weight");
auto b_g_name = framework::GradVarName("Bias"); SetOutGradDim("Bias");
if (ctx->HasOutput(b_g_name)) SetOutGradDim("H0");
ctx->SetOutputDim(b_g_name, ctx->GetInputDim("Bias")); SetOutGradDim("C0");
} }
protected: protected:
framework::DataType IndicateDataType( framework::OpKernelType GetKernelType(
const framework::ExecutionContext& ctx) const override { const framework::ExecutionContext& ctx) const override {
return framework::ToDataType( return framework::OpKernelType(
ctx.Input<framework::LoDTensor>("Input")->type()); framework::ToDataType(ctx.Input<framework::LoDTensor>("Input")->type()),
ctx.device_context());
} }
}; };
......
...@@ -28,6 +28,15 @@ template <typename T, int MajorType = Eigen::RowMajor, ...@@ -28,6 +28,15 @@ template <typename T, int MajorType = Eigen::RowMajor,
typename IndexType = Eigen::DenseIndex> typename IndexType = Eigen::DenseIndex>
using EigenMatrix = framework::EigenMatrix<T, MajorType, IndexType>; using EigenMatrix = framework::EigenMatrix<T, MajorType, IndexType>;
template <typename Place, typename T>
inline void ReorderInitState(const platform::DeviceContext& ctx,
const framework::Tensor& src, const size_t* index,
framework::Tensor* dst, bool indexed_src) {
math::CopyMatrixRowsFunctor<Place, T> row_shuffle;
dst->mutable_data<T>(src.dims(), ctx.GetPlace());
row_shuffle(ctx, src, index, *dst, indexed_src);
}
template <typename Place, typename T> template <typename Place, typename T>
class LSTMKernel : public framework::OpKernel<T> { class LSTMKernel : public framework::OpKernel<T> {
public: public:
...@@ -36,6 +45,9 @@ class LSTMKernel : public framework::OpKernel<T> { ...@@ -36,6 +45,9 @@ class LSTMKernel : public framework::OpKernel<T> {
auto* weight = ctx.Input<Tensor>("Weight"); auto* weight = ctx.Input<Tensor>("Weight");
auto* bias = ctx.Input<Tensor>("Bias"); auto* bias = ctx.Input<Tensor>("Bias");
auto* hidden_t0 = ctx.Input<Tensor>("H0");
auto* cell_t0 = ctx.Input<Tensor>("C0");
auto* batch_gate = ctx.Output<LoDTensor>("BatchGate"); auto* batch_gate = ctx.Output<LoDTensor>("BatchGate");
batch_gate->mutable_data<T>(ctx.GetPlace()); batch_gate->mutable_data<T>(ctx.GetPlace());
auto* hidden_out = ctx.Output<LoDTensor>("Hidden"); auto* hidden_out = ctx.Output<LoDTensor>("Hidden");
...@@ -43,12 +55,7 @@ class LSTMKernel : public framework::OpKernel<T> { ...@@ -43,12 +55,7 @@ class LSTMKernel : public framework::OpKernel<T> {
auto* cell_out = ctx.Output<LoDTensor>("Cell"); auto* cell_out = ctx.Output<LoDTensor>("Cell");
cell_out->mutable_data<T>(ctx.GetPlace()); cell_out->mutable_data<T>(ctx.GetPlace());
// Now the function ShareLoD in InferShape is not implemented. bool is_reverse = ctx.Attr<bool>("is_reverse");
// So copy LoD here.
ctx.ShareLoD("Input", "Hidden");
ctx.ShareLoD("Input", "Cell");
bool is_reverse = ctx.Attr<bool>("isReverse");
math::LoDTensor2BatchFunctor<Place, T> to_batch; math::LoDTensor2BatchFunctor<Place, T> to_batch;
auto& device_ctx = ctx.device_context(); auto& device_ctx = ctx.device_context();
to_batch(device_ctx, *input, *batch_gate, true, is_reverse); to_batch(device_ctx, *input, *batch_gate, true, is_reverse);
...@@ -71,7 +78,7 @@ class LSTMKernel : public framework::OpKernel<T> { ...@@ -71,7 +78,7 @@ class LSTMKernel : public framework::OpKernel<T> {
} }
math::LstmMetaValue<T> lstm_value; math::LstmMetaValue<T> lstm_value;
if (bias) { if (bias && ctx.Attr<bool>("use_peepholes")) {
T* bias_data = const_cast<T*>(bias->data<T>()); T* bias_data = const_cast<T*>(bias->data<T>());
// the code style in LstmMetaValue will be updated later. // the code style in LstmMetaValue will be updated later.
...@@ -84,6 +91,16 @@ class LSTMKernel : public framework::OpKernel<T> { ...@@ -84,6 +91,16 @@ class LSTMKernel : public framework::OpKernel<T> {
lstm_value.checkOg = nullptr; lstm_value.checkOg = nullptr;
} }
lstm_value.prevStateValue = nullptr; lstm_value.prevStateValue = nullptr;
Tensor ordered_c0;
const size_t* order = batch_gate->lod()[2].data();
if (cell_t0) {
// Since the batch computing for LSTM reorders the input sequence
// according to their length. The initialized cell state also needs
// to reorder.
ReorderInitState<Place, T>(device_ctx, *cell_t0, order, &ordered_c0,
true);
lstm_value.prevStateValue = ordered_c0.data<T>();
}
// Use the local variable as here. // Use the local variable as here.
LoDTensor batch_hidden, batch_cell; LoDTensor batch_hidden, batch_cell;
...@@ -94,9 +111,9 @@ class LSTMKernel : public framework::OpKernel<T> { ...@@ -94,9 +111,9 @@ class LSTMKernel : public framework::OpKernel<T> {
auto batch_starts = batch_gate->lod()[0]; auto batch_starts = batch_gate->lod()[0];
size_t num_batch = batch_starts.size() - 1; size_t num_batch = batch_starts.size() - 1;
auto gate_act = ctx.Attr<std::string>("gateActivation"); auto gate_act = ctx.Attr<std::string>("gate_activation");
auto cell_act = ctx.Attr<std::string>("cellActivation"); auto cell_act = ctx.Attr<std::string>("cell_activation");
auto cand_act = ctx.Attr<std::string>("candidateActivation"); auto cand_act = ctx.Attr<std::string>("candidate_activation");
for (size_t n = 0; n < num_batch; n++) { for (size_t n = 0; n < num_batch; n++) {
int bstart = static_cast<int>(batch_starts[n]); int bstart = static_cast<int>(batch_starts[n]);
...@@ -109,15 +126,28 @@ class LSTMKernel : public framework::OpKernel<T> { ...@@ -109,15 +126,28 @@ class LSTMKernel : public framework::OpKernel<T> {
int cur_batch_size = bend - bstart; int cur_batch_size = bend - bstart;
if (n != 0) { if (n > 0) {
int pre_h_start = static_cast<int>(batch_starts[n - 1]); int pre_h_start = static_cast<int>(batch_starts[n - 1]);
int pre_h_end = pre_h_start + cur_batch_size; int pre_h_end = pre_h_start + cur_batch_size;
auto pre_hidden_t = batch_hidden.Slice(pre_h_start, pre_h_end); auto pre_hidden_t = batch_hidden.Slice(pre_h_start, pre_h_end);
math::matmul<Place, T>(device_ctx, pre_hidden_t, false, *weight, false, math::matmul<Place, T>(device_ctx, pre_hidden_t, false, *weight, false,
static_cast<T>(1.0), &gate_t, static_cast<T>(1.0), &gate_t,
static_cast<T>(1.0)); static_cast<T>(1.0));
} else if (hidden_t0) {
// If n == 0 and there is no initialized hidden state, that is to say
// the H0 is zeros, the calculation W_h * H0 will be skiped.
// If n == 0 and there is initialized hidden state, calculate W_h * H0.
// Since the batch computing for LSTM reorders the input sequence
// according to their length. The initialized hidden state also needs
// to reorder.
Tensor ordered_h0;
ReorderInitState<Place, T>(device_ctx, *hidden_t0, order, &ordered_h0,
true);
math::matmul<Place, T>(device_ctx, ordered_h0, false, *weight, false,
static_cast<T>(1.0), &gate_t,
static_cast<T>(1.0));
} }
// else if : FIXME support the initial hidden and cell
lstm_value.gateValue = gate_t.data<T>(); lstm_value.gateValue = gate_t.data<T>();
lstm_value.outputValue = out_t.data<T>(); lstm_value.outputValue = out_t.data<T>();
...@@ -160,6 +190,12 @@ class LSTMGradKernel : public framework::OpKernel<T> { ...@@ -160,6 +190,12 @@ class LSTMGradKernel : public framework::OpKernel<T> {
auto* weight_g = ctx.Output<Tensor>(framework::GradVarName("Weight")); auto* weight_g = ctx.Output<Tensor>(framework::GradVarName("Weight"));
auto* bias_g = ctx.Output<Tensor>(framework::GradVarName("Bias")); auto* bias_g = ctx.Output<Tensor>(framework::GradVarName("Bias"));
auto* h0 = ctx.Input<Tensor>("H0");
auto* c0 = ctx.Input<Tensor>("C0");
auto* h0_g = ctx.Output<Tensor>(framework::GradVarName("H0"));
auto* c0_g = ctx.Output<Tensor>(framework::GradVarName("C0"));
auto& device_ctx = ctx.device_context(); auto& device_ctx = ctx.device_context();
math::SetConstant<Place, T> zero; math::SetConstant<Place, T> zero;
if (weight_g) { if (weight_g) {
...@@ -167,13 +203,25 @@ class LSTMGradKernel : public framework::OpKernel<T> { ...@@ -167,13 +203,25 @@ class LSTMGradKernel : public framework::OpKernel<T> {
zero(device_ctx, weight_g, static_cast<T>(0.0)); zero(device_ctx, weight_g, static_cast<T>(0.0));
} }
// ordered_h0/c0 is the reordered hidden/cell initialization.
// ordered_h0_g/c0_g is the reordered gradient of hidden/cell
// initialization.
Tensor ordered_h0, ordered_c0, ordered_h0_g, ordered_c0_g;
const size_t* order = batch_gate->lod()[2].data();
if (c0) {
ReorderInitState<Place, T>(device_ctx, *c0, order, &ordered_c0, true);
}
if (c0 && c0_g) {
ordered_c0_g.mutable_data<T>(c0_g->dims(), ctx.GetPlace());
}
auto in_dims = input->dims(); auto in_dims = input->dims();
auto out_dims = hidden_g->dims(); auto out_dims = hidden_g->dims();
int frame_size = static_cast<int>(in_dims[1] / 4); int frame_size = static_cast<int>(in_dims[1] / 4);
PADDLE_ENFORCE_EQ(frame_size, out_dims[1]); PADDLE_ENFORCE_EQ(frame_size, out_dims[1]);
math::LstmMetaValue<T> lstm_value; math::LstmMetaValue<T> lstm_value;
if (bias) { if (bias && ctx.Attr<bool>("use_peepholes")) {
T* bias_data = const_cast<T*>(bias->data<T>()); T* bias_data = const_cast<T*>(bias->data<T>());
lstm_value.checkIg = bias_data + 4 * frame_size; lstm_value.checkIg = bias_data + 4 * frame_size;
lstm_value.checkFg = lstm_value.checkIg + frame_size; lstm_value.checkFg = lstm_value.checkIg + frame_size;
...@@ -185,9 +233,13 @@ class LSTMGradKernel : public framework::OpKernel<T> { ...@@ -185,9 +233,13 @@ class LSTMGradKernel : public framework::OpKernel<T> {
} }
math::LstmMetaGrad<T> lstm_grad; math::LstmMetaGrad<T> lstm_grad;
if (bias && bias_g) { if (bias && bias_g) {
T* bias_g_data = const_cast<T*>(bias_g->mutable_data<T>(ctx.GetPlace())); bias_g->mutable_data<T>(ctx.GetPlace());
zero(device_ctx, bias_g, static_cast<T>(0.0)); zero(device_ctx, bias_g, static_cast<T>(0.0));
}
if (bias && bias_g && ctx.Attr<bool>("use_peepholes")) {
T* bias_g_data = bias_g->data<T>();
lstm_grad.checkIgGrad = bias_g_data + 4 * frame_size; lstm_grad.checkIgGrad = bias_g_data + 4 * frame_size;
lstm_grad.checkFgGrad = lstm_grad.checkIgGrad + frame_size; lstm_grad.checkFgGrad = lstm_grad.checkIgGrad + frame_size;
lstm_grad.checkOgGrad = lstm_grad.checkFgGrad + frame_size; lstm_grad.checkOgGrad = lstm_grad.checkFgGrad + frame_size;
...@@ -199,36 +251,30 @@ class LSTMGradKernel : public framework::OpKernel<T> { ...@@ -199,36 +251,30 @@ class LSTMGradKernel : public framework::OpKernel<T> {
math::LoDTensor2BatchFunctor<Place, T> to_batch; math::LoDTensor2BatchFunctor<Place, T> to_batch;
// use the local variable as here. auto ToBatch = [&batch_gate, &to_batch](
LoDTensor batch_hidden; const platform::DeviceContext& ctx, const framework::LoDTensor& src,
batch_hidden.mutable_data<T>(out_dims, ctx.GetPlace()); const framework::DDim& dims, framework::LoDTensor& dst) {
batch_hidden.set_lod(batch_gate->lod()); dst.mutable_data<T>(dims, ctx.GetPlace());
to_batch(device_ctx, *hidden_out, batch_hidden, false); dst.set_lod(batch_gate->lod());
to_batch(ctx, src, dst, false);
};
LoDTensor batch_hidden_g; LoDTensor batch_hidden, batch_hidden_g, batch_cell;
batch_hidden_g.mutable_data<T>(out_dims, ctx.GetPlace()); ToBatch(device_ctx, *hidden_out, out_dims, batch_hidden);
batch_hidden_g.set_lod(batch_gate->lod()); ToBatch(device_ctx, *hidden_g, out_dims, batch_hidden_g);
to_batch(device_ctx, *hidden_g, batch_hidden_g, false); ToBatch(device_ctx, *cell_out, out_dims, batch_cell);
LoDTensor batch_cell; LoDTensor batch_cell_g, batch_gate_g;
batch_cell.mutable_data<T>(out_dims, ctx.GetPlace());
batch_cell.set_lod(batch_gate->lod());
to_batch(device_ctx, *cell_out, batch_cell, false);
LoDTensor batch_cell_g;
batch_cell_g.mutable_data<T>(out_dims, ctx.GetPlace()); batch_cell_g.mutable_data<T>(out_dims, ctx.GetPlace());
batch_cell_g.set_lod(batch_gate->lod());
// TODO(qingqing) support the case output cell has gradient. // TODO(qingqing) support the case output cell has gradient.
// to_batch(device_ctx, *cell_g, batch_cell_g, false); // to_batch(device_ctx, *cell_g, batch_cell_g, false);
zero(device_ctx, &batch_cell_g, static_cast<T>(0.0)); zero(device_ctx, &batch_cell_g, static_cast<T>(0.0));
LoDTensor batch_gate_g;
batch_gate_g.mutable_data<T>(batch_gate->dims(), ctx.GetPlace()); batch_gate_g.mutable_data<T>(batch_gate->dims(), ctx.GetPlace());
batch_gate_g.set_lod(batch_gate->lod()); batch_gate_g.set_lod(batch_gate->lod());
auto gate_act = ctx.Attr<std::string>("gateActivation"); auto gate_act = ctx.Attr<std::string>("gate_activation");
auto cell_act = ctx.Attr<std::string>("cellActivation"); auto cell_act = ctx.Attr<std::string>("cell_activation");
auto cand_act = ctx.Attr<std::string>("candidateActivation"); auto cand_act = ctx.Attr<std::string>("candidate_activation");
auto batch_starts = batch_gate->lod()[0]; auto batch_starts = batch_gate->lod()[0];
size_t num_batch = batch_starts.size() - 1; size_t num_batch = batch_starts.size() - 1;
...@@ -250,15 +296,15 @@ class LSTMGradKernel : public framework::OpKernel<T> { ...@@ -250,15 +296,15 @@ class LSTMGradKernel : public framework::OpKernel<T> {
lstm_grad.gateGrad = gate_g.data<T>(); lstm_grad.gateGrad = gate_g.data<T>();
lstm_grad.outputGrad = out_g.data<T>(); lstm_grad.outputGrad = out_g.data<T>();
if (n) { if (n > 0) {
int bstart_pre = static_cast<int>(batch_starts[n - 1]); int bstart_pre = static_cast<int>(batch_starts[n - 1]);
Tensor cell_pre = batch_cell.Slice(bstart_pre, bstart); Tensor cell_pre = batch_cell.Slice(bstart_pre, bstart);
Tensor cell_pre_g = batch_cell_g.Slice(bstart_pre, bstart); Tensor cell_pre_g = batch_cell_g.Slice(bstart_pre, bstart);
lstm_value.prevStateValue = cell_pre.data<T>(); lstm_value.prevStateValue = cell_pre.data<T>();
lstm_grad.prevStateGrad = cell_pre_g.data<T>(); lstm_grad.prevStateGrad = cell_pre_g.data<T>();
} else { } else {
lstm_value.prevStateValue = nullptr; lstm_value.prevStateValue = c0 ? ordered_c0.data<T>() : nullptr;
lstm_grad.prevStateGrad = nullptr; lstm_grad.prevStateGrad = c0_g ? ordered_c0_g.data<T>() : nullptr;
} }
int cur_batch_size = bend - bstart; int cur_batch_size = bend - bstart;
...@@ -266,7 +312,7 @@ class LSTMGradKernel : public framework::OpKernel<T> { ...@@ -266,7 +312,7 @@ class LSTMGradKernel : public framework::OpKernel<T> {
device_ctx, lstm_value, lstm_grad, frame_size, cur_batch_size, device_ctx, lstm_value, lstm_grad, frame_size, cur_batch_size,
gate_act, cell_act, cand_act); gate_act, cell_act, cand_act);
if (n != 0) { if (n > 0) {
int pre_h_start = static_cast<int>(batch_starts[n - 1]); int pre_h_start = static_cast<int>(batch_starts[n - 1]);
int pre_h_end = pre_h_start + cur_batch_size; int pre_h_end = pre_h_start + cur_batch_size;
auto pre_hidden_g = batch_hidden_g.Slice(pre_h_start, pre_h_end); auto pre_hidden_g = batch_hidden_g.Slice(pre_h_start, pre_h_end);
...@@ -280,6 +326,19 @@ class LSTMGradKernel : public framework::OpKernel<T> { ...@@ -280,6 +326,19 @@ class LSTMGradKernel : public framework::OpKernel<T> {
static_cast<T>(1.0), weight_g, static_cast<T>(1.0), weight_g,
static_cast<T>(1.0)); static_cast<T>(1.0));
} }
} else {
if (h0 && weight_g) {
ReorderInitState<Place, T>(device_ctx, *h0, order, &ordered_h0, true);
math::matmul<Place, T>(device_ctx, ordered_h0, true, gate_g, false,
static_cast<T>(1.0), weight_g,
static_cast<T>(1.0));
}
if (h0 && h0_g) {
ordered_h0_g.mutable_data<T>(h0_g->dims(), ctx.GetPlace());
math::matmul<Place, T>(device_ctx, gate_g, false, *weight, true,
static_cast<T>(1.0), &ordered_h0_g,
static_cast<T>(0.0));
}
} }
} }
...@@ -302,6 +361,13 @@ class LSTMGradKernel : public framework::OpKernel<T> { ...@@ -302,6 +361,13 @@ class LSTMGradKernel : public framework::OpKernel<T> {
math::gemv<Place, T>(device_ctx, true, m, n, 1., batch_gate_g.data<T>(), math::gemv<Place, T>(device_ctx, true, m, n, 1., batch_gate_g.data<T>(),
ones.data<T>(), 0., bias_g->data<T>()); ones.data<T>(), 0., bias_g->data<T>());
} }
if (h0 && h0_g) {
ReorderInitState<Place, T>(device_ctx, ordered_h0_g, order, h0_g, false);
}
if (c0 && c0_g) {
ReorderInitState<Place, T>(device_ctx, ordered_c0_g, order, c0_g, false);
}
} }
}; };
......
...@@ -34,10 +34,10 @@ class LstmUnitOp : public framework::OperatorWithKernel { ...@@ -34,10 +34,10 @@ class LstmUnitOp : public framework::OperatorWithKernel {
auto c_prev_dims = ctx->GetInputDim("C_prev"); auto c_prev_dims = ctx->GetInputDim("C_prev");
PADDLE_ENFORCE_EQ(x_dims.size(), 2, "Input(X)'s rank must be 2."); PADDLE_ENFORCE_EQ(x_dims.size(), 2, "Input(X)'s rank must be 2.");
PADDLE_ENFORCE(x_dims[0] == c_prev_dims[0], PADDLE_ENFORCE_EQ(x_dims[0], c_prev_dims[0],
"Batch size of inputs and states must be equal"); "Batch size of inputs and states must be equal");
PADDLE_ENFORCE(x_dims[1] == c_prev_dims[1] * 4, PADDLE_ENFORCE_EQ(x_dims[1], c_prev_dims[1] * 4,
"Dimension of FC should equal to prev state * 4"); "Dimension of FC should equal to prev state * 4");
int b_size = c_prev_dims[0]; // batch size int b_size = c_prev_dims[0]; // batch size
int s_dim = c_prev_dims[1]; // state dim int s_dim = c_prev_dims[1]; // state dim
......
...@@ -13,7 +13,7 @@ if(WITH_GPU) ...@@ -13,7 +13,7 @@ if(WITH_GPU)
nv_library(context_project SRCS context_project.cc context_project.cu DEPS device_context) nv_library(context_project SRCS context_project.cc context_project.cu DEPS device_context)
nv_library(sequence2batch SRCS sequence2batch.cc sequence2batch.cu DEPS device_context) nv_library(sequence2batch SRCS sequence2batch.cc sequence2batch.cu DEPS device_context)
nv_library(lstm_compute SRCS lstm_compute.cc lstm_compute.cu DEPS device_context activation_functions) nv_library(lstm_compute SRCS lstm_compute.cc lstm_compute.cu DEPS device_context activation_functions)
nv_library(gru_compute SRCS gru_compute.cc gru_compute.cu DEPS device_context activation_functions) nv_library(gru_compute SRCS gru_compute.cc gru_compute.cu DEPS device_context activation_functions math_function)
else() else()
cc_library(math_function SRCS math_function.cc im2col.cc DEPS cblas device_context operator) cc_library(math_function SRCS math_function.cc im2col.cc DEPS cblas device_context operator)
cc_library(selected_rows_functor SRCS selected_rows_functor.cc DEPS selected_rows math_function) cc_library(selected_rows_functor SRCS selected_rows_functor.cc DEPS selected_rows math_function)
......
...@@ -52,9 +52,9 @@ void naive_lstm_forward_one_sequence(Op op, LstmMetaValue<T> value, ...@@ -52,9 +52,9 @@ void naive_lstm_forward_one_sequence(Op op, LstmMetaValue<T> value,
rValueIg = valueIg[i]; rValueIg = valueIg[i];
rValueFg = valueFg[i]; rValueFg = valueFg[i];
rValueOg = valueOg[i]; rValueOg = valueOg[i];
rCheckI = value.checkIg[i]; rCheckI = value.checkIg ? value.checkIg[i] : 0;
rCheckF = value.checkFg[i]; rCheckF = value.checkFg ? value.checkFg[i] : 0;
rCheckO = value.checkOg[i]; rCheckO = value.checkOg ? value.checkOg[i] : 0;
if (value.prevStateValue) { if (value.prevStateValue) {
rPrevState = value.prevStateValue[i]; rPrevState = value.prevStateValue[i];
...@@ -114,9 +114,9 @@ void naive_lstm_backward_one_sequence(Op op, LstmMetaValue<T> value, ...@@ -114,9 +114,9 @@ void naive_lstm_backward_one_sequence(Op op, LstmMetaValue<T> value,
rValueIg = valueIg[i]; rValueIg = valueIg[i];
rValueFg = valueFg[i]; rValueFg = valueFg[i];
rValueOg = valueOg[i]; rValueOg = valueOg[i];
rCheckI = value.checkIg[i]; rCheckI = value.checkIg ? value.checkIg[i] : 0;
rCheckF = value.checkFg[i]; rCheckF = value.checkFg ? value.checkFg[i] : 0;
rCheckO = value.checkOg[i]; rCheckO = value.checkOg ? value.checkOg[i] : 0;
rState = value.stateValue[i]; rState = value.stateValue[i];
rStateAtv = value.stateActiveValue[i]; rStateAtv = value.stateActiveValue[i];
rOutputGrad = grad.outputGrad[i]; rOutputGrad = grad.outputGrad[i];
...@@ -155,9 +155,9 @@ void avx_lstm_forward_one_sequence(Op op, LstmMetaValue<T> value, int frameSize, ...@@ -155,9 +155,9 @@ void avx_lstm_forward_one_sequence(Op op, LstmMetaValue<T> value, int frameSize,
__m256 rValueIg; __m256 rValueIg;
__m256 rValueFg; __m256 rValueFg;
__m256 rValueOg; __m256 rValueOg;
__m256 rCheckI; __m256 rCheckI = _mm256_set1_ps(0.0f);
__m256 rCheckF; __m256 rCheckF = _mm256_set1_ps(0.0f);
__m256 rCheckO; __m256 rCheckO = _mm256_set1_ps(0.0f);
__m256 rState; __m256 rState;
__m256 rPrevState = _mm256_set1_ps(0.0f); __m256 rPrevState = _mm256_set1_ps(0.0f);
__m256 rStateAtv; __m256 rStateAtv;
...@@ -173,9 +173,11 @@ void avx_lstm_forward_one_sequence(Op op, LstmMetaValue<T> value, int frameSize, ...@@ -173,9 +173,11 @@ void avx_lstm_forward_one_sequence(Op op, LstmMetaValue<T> value, int frameSize,
rValueIg = valueIg[i]; rValueIg = valueIg[i];
rValueFg = valueFg[i]; rValueFg = valueFg[i];
rValueOg = valueOg[i]; rValueOg = valueOg[i];
rCheckI = ((__m256 *)value.checkIg)[i]; if (value.checkIg) {
rCheckF = ((__m256 *)value.checkFg)[i]; rCheckI = ((__m256 *)value.checkIg)[i];
rCheckO = ((__m256 *)value.checkOg)[i]; rCheckF = ((__m256 *)value.checkFg)[i];
rCheckO = ((__m256 *)value.checkOg)[i];
}
if (value.prevStateValue) { if (value.prevStateValue) {
rPrevState = ((__m256 *)value.prevStateValue)[i]; rPrevState = ((__m256 *)value.prevStateValue)[i];
...@@ -216,9 +218,9 @@ void avx_lstm_backward_one_sequence(Op op, LstmMetaValue<T> value, ...@@ -216,9 +218,9 @@ void avx_lstm_backward_one_sequence(Op op, LstmMetaValue<T> value,
__m256 rState; __m256 rState;
__m256 rStateAtv; __m256 rStateAtv;
__m256 rOutputGrad; __m256 rOutputGrad;
__m256 rCheckI; __m256 rCheckI = _mm256_set1_ps(0.0f);
__m256 rCheckF; __m256 rCheckF = _mm256_set1_ps(0.0f);
__m256 rCheckO; __m256 rCheckO = _mm256_set1_ps(0.0f);
__m256 rCheckIGrad; __m256 rCheckIGrad;
__m256 rCheckFGrad; __m256 rCheckFGrad;
__m256 rCheckOGrad; __m256 rCheckOGrad;
...@@ -237,9 +239,11 @@ void avx_lstm_backward_one_sequence(Op op, LstmMetaValue<T> value, ...@@ -237,9 +239,11 @@ void avx_lstm_backward_one_sequence(Op op, LstmMetaValue<T> value,
rValueIg = valueIg[i]; rValueIg = valueIg[i];
rValueFg = valueFg[i]; rValueFg = valueFg[i];
rValueOg = valueOg[i]; rValueOg = valueOg[i];
rCheckI = ((__m256 *)value.checkIg)[i]; if (value.checkIg) {
rCheckF = ((__m256 *)value.checkFg)[i]; rCheckI = ((__m256 *)value.checkIg)[i];
rCheckO = ((__m256 *)value.checkOg)[i]; rCheckF = ((__m256 *)value.checkFg)[i];
rCheckO = ((__m256 *)value.checkOg)[i];
}
rState = ((__m256 *)value.stateValue)[i]; rState = ((__m256 *)value.stateValue)[i];
rStateAtv = ((__m256 *)value.stateActiveValue)[i]; rStateAtv = ((__m256 *)value.stateActiveValue)[i];
rOutputGrad = ((__m256 *)grad.outputGrad)[i]; rOutputGrad = ((__m256 *)grad.outputGrad)[i];
......
...@@ -55,9 +55,10 @@ __global__ void KeLstmForward(Op op, LstmMetaValue<T> value, int frameSize, ...@@ -55,9 +55,10 @@ __global__ void KeLstmForward(Op op, LstmMetaValue<T> value, int frameSize,
T rValueIg; T rValueIg;
T rValueFg; T rValueFg;
T rValueOg; T rValueOg;
T rCheckI = value.checkIg[frameIdx];
T rCheckF = value.checkFg[frameIdx]; T rCheckI = value.checkIg ? value.checkIg[frameIdx] : 0;
T rCheckO = value.checkOg[frameIdx]; T rCheckF = value.checkFg ? value.checkFg[frameIdx] : 0;
T rCheckO = value.checkOg ? value.checkOg[frameIdx] : 0;
rValueIn = value.gateValue[frameIdx]; rValueIn = value.gateValue[frameIdx];
rValueIg = value.gateValue[frameIdx + frameSize]; rValueIg = value.gateValue[frameIdx + frameSize];
...@@ -121,9 +122,10 @@ __global__ void KeLstmBackward(Op op, LstmMetaValue<T> value, ...@@ -121,9 +122,10 @@ __global__ void KeLstmBackward(Op op, LstmMetaValue<T> value,
T rStateGrad; T rStateGrad;
T rStateAtv; T rStateAtv;
T rOutputGrad; T rOutputGrad;
T rCheckI = value.checkIg[frameIdx]; T rCheckI = value.checkIg ? value.checkIg[frameIdx] : 0;
T rCheckF = value.checkFg[frameIdx]; T rCheckF = value.checkFg ? value.checkFg[frameIdx] : 0;
T rCheckO = value.checkOg[frameIdx]; T rCheckO = value.checkOg ? value.checkOg[frameIdx] : 0;
T rCheckIGrad; T rCheckIGrad;
T rCheckFGrad; T rCheckFGrad;
T rCheckOGrad; T rCheckOGrad;
...@@ -244,11 +246,6 @@ void gpu_lstm_backward(const platform::DeviceContext& context, Op op, ...@@ -244,11 +246,6 @@ void gpu_lstm_backward(const platform::DeviceContext& context, Op op,
op, value, grad, frameSize, batchSize, active_node, active_gate, op, value, grad, frameSize, batchSize, active_node, active_gate,
active_state); active_state);
} }
cudaStreamSynchronize(stream);
// TODO(qingqing): Add cuda error check for each kernel.
cudaError_t err = cudaGetLastError();
PADDLE_ENFORCE(err, cudaGetErrorString(err));
} }
} // namespace detail } // namespace detail
......
...@@ -13,6 +13,7 @@ See the License for the specific language governing permissions and ...@@ -13,6 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "paddle/operators/math/math_function.h" #include "paddle/operators/math/math_function.h"
#include "paddle/framework/data_type.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
...@@ -233,6 +234,52 @@ void gemv<platform::CPUPlace, double>(const platform::DeviceContext& context, ...@@ -233,6 +234,52 @@ void gemv<platform::CPUPlace, double>(const platform::DeviceContext& context,
template struct SetConstant<platform::CPUPlace, float>; template struct SetConstant<platform::CPUPlace, float>;
struct TensorSetConstantCPU {
TensorSetConstantCPU(framework::Tensor* tensor, float value)
: tensor_(tensor), value_(value) {}
template <typename T>
void operator()() const {
auto cpu = platform::CPUPlace();
auto* begin = tensor_->mutable_data<T>(cpu);
std::fill(begin, begin + tensor_->numel(), static_cast<T>(value_));
}
framework::Tensor* tensor_;
float value_;
};
template <>
void set_constant_with_place<platform::CPUPlace>(
const platform::DeviceContext& context, framework::Tensor* tensor,
float value) {
framework::VisitDataType(framework::ToDataType(tensor->type()),
TensorSetConstantCPU(tensor, value));
}
struct TensorSetConstantWithPlace : public boost::static_visitor<void> {
TensorSetConstantWithPlace(const platform::DeviceContext& context,
framework::Tensor* tensor, float value)
: context_(context), tensor_(tensor), value_(value) {}
template <typename Place>
void operator()(Place place) const {
set_constant_with_place<Place>(context_, tensor_, value_);
}
const platform::DeviceContext& context_;
framework::Tensor* tensor_;
float value_;
};
void set_constant(const platform::DeviceContext& context,
framework::Tensor* tensor, float value) {
TensorSetConstantWithPlace func(context, tensor, value);
#ifdef PADDLE_WITH_CUDA
tensor->place().apply_visitor(func);
#else
func(platform::CPUPlace());
#endif
}
} // namespace math } // namespace math
} // namespace operators } // namespace operators
} // namespace paddle } // namespace paddle
...@@ -12,6 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ...@@ -12,6 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "paddle/framework/data_type.h"
#include "paddle/operators/math/math_function.h" #include "paddle/operators/math/math_function.h"
namespace paddle { namespace paddle {
...@@ -232,6 +233,30 @@ void gemv<platform::GPUPlace, double>(const platform::DeviceContext& context, ...@@ -232,6 +233,30 @@ void gemv<platform::GPUPlace, double>(const platform::DeviceContext& context,
template struct SetConstant<platform::GPUPlace, float>; template struct SetConstant<platform::GPUPlace, float>;
struct TensorSetConstantGPU {
TensorSetConstantGPU(const platform::DeviceContext& context,
framework::Tensor* tensor, float value)
: context_(context), tensor_(tensor), value_(value) {}
template <typename T>
void operator()() const {
SetConstant<platform::GPUPlace, T> functor;
functor(context_, tensor_, static_cast<T>(value_));
}
const platform::DeviceContext& context_;
framework::Tensor* tensor_;
float value_;
};
template <>
void set_constant_with_place<platform::GPUPlace>(
const platform::DeviceContext& context, framework::Tensor* tensor,
float value) {
framework::VisitDataType(framework::ToDataType(tensor->type()),
TensorSetConstantGPU(context, tensor, value));
}
} // namespace math } // namespace math
} // namespace operators } // namespace operators
} // namespace paddle } // namespace paddle
...@@ -19,11 +19,6 @@ limitations under the License. */ ...@@ -19,11 +19,6 @@ limitations under the License. */
#include <mkl_vml_functions.h> #include <mkl_vml_functions.h>
#endif #endif
#ifdef PADDLE_USE_MKL
#include <mkl.h>
#include <mkl_lapacke.h>
#endif
#ifdef PADDLE_USE_ATLAS #ifdef PADDLE_USE_ATLAS
extern "C" { extern "C" {
#include <cblas.h> #include <cblas.h>
...@@ -108,6 +103,13 @@ struct SetConstant { ...@@ -108,6 +103,13 @@ struct SetConstant {
} }
}; };
template <typename Place>
void set_constant_with_place(const platform::DeviceContext& context,
framework::Tensor* tensor, float value);
void set_constant(const platform::DeviceContext& context,
framework::Tensor* tensor, float value);
} // namespace math } // namespace math
} // namespace operators } // namespace operators
} // namespace paddle } // namespace paddle
...@@ -139,3 +139,15 @@ TEST(math_function, gemv) { ...@@ -139,3 +139,15 @@ TEST(math_function, gemv) {
GemvTest<float>(12, 7, true); GemvTest<float>(12, 7, true);
GemvTest<double>(7, 9, true); GemvTest<double>(7, 9, true);
} }
TEST(math_funciton, set_constant) {
paddle::framework::Tensor t;
t.Resize({10, 10});
t.mutable_data<int>(paddle::platform::CPUPlace());
auto* ctx = new paddle::platform::CPUDeviceContext();
paddle::operators::math::set_constant(*ctx, &t, 10);
for (int64_t i = 0; i < t.numel(); ++i) {
PADDLE_ENFORCE_EQ(10, t.data<int>()[i]);
}
delete ctx;
}
...@@ -22,8 +22,8 @@ template <typename T> ...@@ -22,8 +22,8 @@ template <typename T>
class CopyMatrixRowsFunctor<platform::CPUPlace, T> { class CopyMatrixRowsFunctor<platform::CPUPlace, T> {
public: public:
void operator()(const platform::DeviceContext& context, void operator()(const platform::DeviceContext& context,
const framework::LoDTensor& src, const size_t* index, const framework::Tensor& src, const size_t* index,
framework::LoDTensor& dst, bool is_src_index) { framework::Tensor& dst, bool is_src_index) {
auto src_dims = src.dims(); auto src_dims = src.dims();
auto dst_dims = dst.dims(); auto dst_dims = dst.dims();
PADDLE_ENFORCE_EQ(src_dims.size(), 2UL, PADDLE_ENFORCE_EQ(src_dims.size(), 2UL,
......
...@@ -41,8 +41,8 @@ template <typename T> ...@@ -41,8 +41,8 @@ template <typename T>
class CopyMatrixRowsFunctor<platform::GPUPlace, T> { class CopyMatrixRowsFunctor<platform::GPUPlace, T> {
public: public:
void operator()(const platform::DeviceContext& context, void operator()(const platform::DeviceContext& context,
const framework::LoDTensor& src, const size_t* index, const framework::Tensor& src, const size_t* index,
framework::LoDTensor& dst, bool is_src_index) { framework::Tensor& dst, bool is_src_index) {
auto src_dims = src.dims(); auto src_dims = src.dims();
auto dst_dims = dst.dims(); auto dst_dims = dst.dims();
PADDLE_ENFORCE_EQ(src_dims.size(), 2, PADDLE_ENFORCE_EQ(src_dims.size(), 2,
......
...@@ -30,8 +30,8 @@ class CopyMatrixRowsFunctor { ...@@ -30,8 +30,8 @@ class CopyMatrixRowsFunctor {
// copy the input src to the indexed rows of output dst. // copy the input src to the indexed rows of output dst.
// The indexed rows are based on the input index. // The indexed rows are based on the input index.
void operator()(const platform::DeviceContext& context, void operator()(const platform::DeviceContext& context,
const framework::LoDTensor& src, const size_t* index, const framework::Tensor& src, const size_t* index,
framework::LoDTensor& dst, bool is_src_index); framework::Tensor& dst, bool is_src_index);
}; };
template <typename Place, typename T> template <typename Place, typename T>
...@@ -57,7 +57,7 @@ class LoDTensor2BatchFunctor { ...@@ -57,7 +57,7 @@ class LoDTensor2BatchFunctor {
bool is_reverse = false) const { bool is_reverse = false) const {
if (!is_cal_batch_lod) { if (!is_cal_batch_lod) {
auto lods = batch.lod(); auto lods = batch.lod();
PADDLE_ENFORCE_EQ(lods.size(), 2UL); PADDLE_ENFORCE_GT(lods.size(), 2UL);
PADDLE_ENFORCE_EQ(lods[1].size(), PADDLE_ENFORCE_EQ(lods[1].size(),
static_cast<size_t>(lod_tensor.dims()[0])); static_cast<size_t>(lod_tensor.dims()[0]));
CopyMatrixRowsFunctor<Place, T> to_batch; CopyMatrixRowsFunctor<Place, T> to_batch;
...@@ -66,8 +66,8 @@ class LoDTensor2BatchFunctor { ...@@ -66,8 +66,8 @@ class LoDTensor2BatchFunctor {
} }
auto lods = lod_tensor.lod(); auto lods = lod_tensor.lod();
PADDLE_ENFORCE_EQ(lods.size(), 1UL, "Only support one level sequence now.");
auto lod = lods[0]; auto lod = lods[0];
PADDLE_ENFORCE_EQ(lods.size(), 1UL, "Only support one level sequence now.");
std::vector<SeqInfo> seq_info; std::vector<SeqInfo> seq_info;
for (size_t seq_id = 0; seq_id < lod.size() - 1; ++seq_id) { for (size_t seq_id = 0; seq_id < lod.size() - 1; ++seq_id) {
...@@ -78,8 +78,7 @@ class LoDTensor2BatchFunctor { ...@@ -78,8 +78,7 @@ class LoDTensor2BatchFunctor {
std::sort(seq_info.begin(), seq_info.end(), std::sort(seq_info.begin(), seq_info.end(),
[](SeqInfo a, SeqInfo b) { return a.length > b.length; }); [](SeqInfo a, SeqInfo b) { return a.length > b.length; });
// calculate the start position of each batch // Calculate the start position of each batch.
// (numBatch equal the maxLength of sequences)
// example: sequences = {s0, s1, s2} // example: sequences = {s0, s1, s2}
// s0: 0 0 0 0, s1: 1 1 1 1 1, s2: 2 2 2 // s0: 0 0 0 0, s1: 1 1 1 1 1, s2: 2 2 2
// num_batch = 5, // num_batch = 5,
...@@ -95,19 +94,25 @@ class LoDTensor2BatchFunctor { ...@@ -95,19 +94,25 @@ class LoDTensor2BatchFunctor {
// 6, 2, 11, // 6, 2, 11,
// 7, 3, // 7, 3,
// 8} // 8}
// The batch number represents batch size after rearranging the // seq_order = {1, 0, 2}, the sort order.
// where 1 is the second sequence,
// 0 is the first sequence,
// 2 is the third sequence.
// The num_batch represents batch size after rearranging the
// input LodTensor. It is also the maximum length of input sequence. // input LodTensor. It is also the maximum length of input sequence.
paddle::framework::LoD batch_lods; paddle::framework::LoD batch_lods;
batch_lods.emplace_back(std::vector<size_t>{0}); batch_lods.emplace_back(std::vector<size_t>{0});
batch_lods.emplace_back(std::vector<size_t>{0}); batch_lods.emplace_back(std::vector<size_t>{0});
batch_lods.emplace_back(std::vector<size_t>{0});
// batch_lods[0] is the start positions for batch LoDTensor // batch_lods[0] is the start positions for batch LoDTensor
int num_batch = seq_info[0].length; int num_batch = seq_info[0].length;
batch_lods[0].resize(static_cast<size_t>(num_batch + 1)); batch_lods[0].resize(static_cast<size_t>(num_batch + 1));
// batch_lods[1] is the raw index in the input LoDTensor // batch_lods[1] is the raw index in the input LoDTensor
auto dims = lod_tensor.dims(); batch_lods[1].resize(static_cast<size_t>(lod_tensor.dims()[0]));
batch_lods[1].resize(static_cast<size_t>(dims[0])); // batch_lods[2] is the sort order for the input LoDTensor.
batch_lods[2].resize(seq_info.size());
size_t* batch_starts = batch_lods[0].data(); size_t* batch_starts = batch_lods[0].data();
size_t* seq2batch_idx = batch_lods[1].data(); size_t* seq2batch_idx = batch_lods[1].data();
...@@ -127,6 +132,10 @@ class LoDTensor2BatchFunctor { ...@@ -127,6 +132,10 @@ class LoDTensor2BatchFunctor {
} }
batch_starts[n + 1] = static_cast<size_t>(batch_id); batch_starts[n + 1] = static_cast<size_t>(batch_id);
} }
size_t* seq_order = batch_lods[2].data();
for (size_t i = 0; i < seq_info.size(); ++i) {
seq_order[i] = seq_info[i].seq_idx;
}
batch.set_lod(batch_lods); batch.set_lod(batch_lods);
CopyMatrixRowsFunctor<Place, T> to_batch; CopyMatrixRowsFunctor<Place, T> to_batch;
...@@ -141,8 +150,7 @@ class Batch2LoDTensorFunctor { ...@@ -141,8 +150,7 @@ class Batch2LoDTensorFunctor {
const framework::LoDTensor& batch, const framework::LoDTensor& batch,
framework::LoDTensor& lod_tensor) const { framework::LoDTensor& lod_tensor) const {
auto in_lod = batch.lod(); auto in_lod = batch.lod();
PADDLE_ENFORCE_EQ(in_lod.size(), 2UL, PADDLE_ENFORCE_GT(in_lod.size(), 2UL);
"The LoD size of input `batch` should be 2.");
PADDLE_ENFORCE_EQ(in_lod[1].size(), PADDLE_ENFORCE_EQ(in_lod[1].size(),
static_cast<size_t>(lod_tensor.dims()[0])); static_cast<size_t>(lod_tensor.dims()[0]));
CopyMatrixRowsFunctor<Place, T> to_seq; CopyMatrixRowsFunctor<Place, T> to_seq;
......
...@@ -51,6 +51,7 @@ class MeanGradOp : public framework::OperatorWithKernel { ...@@ -51,6 +51,7 @@ class MeanGradOp : public framework::OperatorWithKernel {
void InferShape(framework::InferShapeContext* ctx) const override { void InferShape(framework::InferShapeContext* ctx) const override {
ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X")); ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X"));
ctx->ShareLoD("X", framework::GradVarName("X"));
} }
}; };
......
...@@ -75,7 +75,7 @@ class MomentumOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -75,7 +75,7 @@ class MomentumOpMaker : public framework::OpProtoAndCheckerMaker {
AddOutput("VelocityOut", "(Tensor) Output updated velocity"); AddOutput("VelocityOut", "(Tensor) Output updated velocity");
AddAttr<float>("mu", "(float) Momentum coefficient"); AddAttr<float>("mu", "(float) Momentum coefficient");
AddAttr<bool>("useNesterov", AddAttr<bool>("use_nesterov",
"(bool, default false) " "(bool, default false) "
"Use Nesterov Momentum") "Use Nesterov Momentum")
.SetDefault(false); .SetDefault(false);
......
...@@ -34,7 +34,7 @@ class MomentumOpKernel : public framework::OpKernel<T> { ...@@ -34,7 +34,7 @@ class MomentumOpKernel : public framework::OpKernel<T> {
velocity_out->mutable_data<T>(ctx.GetPlace()); velocity_out->mutable_data<T>(ctx.GetPlace());
float mu = ctx.Attr<float>("mu"); float mu = ctx.Attr<float>("mu");
bool use_nesterov = ctx.Attr<bool>("useNesterov"); bool use_nesterov = ctx.Attr<bool>("use_nesterov");
auto p_out = framework::EigenVector<T>::Flatten(*param_out); auto p_out = framework::EigenVector<T>::Flatten(*param_out);
auto v_out = framework::EigenVector<T>::Flatten(*velocity_out); auto v_out = framework::EigenVector<T>::Flatten(*velocity_out);
......
...@@ -12,7 +12,6 @@ ...@@ -12,7 +12,6 @@
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#define EIGEN_USE_GPU
#include "paddle/operators/mul_op.h" #include "paddle/operators/mul_op.h"
namespace ops = paddle::operators; namespace ops = paddle::operators;
......
...@@ -16,16 +16,12 @@ ...@@ -16,16 +16,12 @@
#include "paddle/operators/math/math_function.h" #include "paddle/operators/math/math_function.h"
#include "paddle/framework/eigen.h"
#include "paddle/framework/op_registry.h" #include "paddle/framework/op_registry.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
using Tensor = framework::Tensor; using Tensor = framework::Tensor;
template <typename T, int MajorType = Eigen::RowMajor,
typename IndexType = Eigen::DenseIndex>
using EigenMatrix = framework::EigenMatrix<T, MajorType, IndexType>;
template <typename Place, typename T> template <typename Place, typename T>
class MulKernel : public framework::OpKernel<T> { class MulKernel : public framework::OpKernel<T> {
......
...@@ -51,9 +51,11 @@ class MultiplexOp : public framework::OperatorWithKernel { ...@@ -51,9 +51,11 @@ class MultiplexOp : public framework::OperatorWithKernel {
} }
protected: protected:
framework::DataType IndicateDataType( framework::OpKernelType GetKernelType(
const framework::ExecutionContext& ctx) const override { const framework::ExecutionContext& ctx) const override {
return framework::ToDataType(ctx.MultiInput<Tensor>("X")[0]->type()); return framework::OpKernelType(
framework::ToDataType(ctx.MultiInput<Tensor>("X")[0]->type()),
ctx.device_context());
} }
}; };
...@@ -107,9 +109,11 @@ class MultiplexGradOp : public framework::OperatorWithKernel { ...@@ -107,9 +109,11 @@ class MultiplexGradOp : public framework::OperatorWithKernel {
} }
protected: protected:
framework::DataType IndicateDataType( framework::OpKernelType GetKernelType(
const framework::ExecutionContext& ctx) const override { const framework::ExecutionContext& ctx) const override {
return framework::ToDataType(ctx.MultiInput<Tensor>("X")[0]->type()); return framework::OpKernelType(
framework::ToDataType(ctx.MultiInput<Tensor>("X")[0]->type()),
ctx.device_context());
} }
}; };
......
...@@ -71,7 +71,7 @@ class MultiplexGradGPUKernel : public framework::OpKernel<T> { ...@@ -71,7 +71,7 @@ class MultiplexGradGPUKernel : public framework::OpKernel<T> {
index_t_cpu.CopyFrom(*ids, platform::CPUPlace(), ctx.device_context()); index_t_cpu.CopyFrom(*ids, platform::CPUPlace(), ctx.device_context());
auto* index = index_t_cpu.data<int32_t>(); auto* index = index_t_cpu.data<int32_t>();
auto stream = ctx.device_context().stream(); auto stream = ctx.cuda_device_context().stream();
Place place = boost::get<Place>(ctx.GetPlace()); Place place = boost::get<Place>(ctx.GetPlace());
for (auto i = 0; i < rows; i++) { for (auto i = 0; i < rows; i++) {
size_t k = static_cast<size_t>(index[i]); size_t k = static_cast<size_t>(index[i]);
......
...@@ -35,6 +35,7 @@ constexpr int kInvalidGPUId = -1; ...@@ -35,6 +35,7 @@ constexpr int kInvalidGPUId = -1;
struct Communicator { struct Communicator {
std::vector<ncclComm_t> comms_; std::vector<ncclComm_t> comms_;
std::unordered_map<int, int> comm_id_map_; std::unordered_map<int, int> comm_id_map_;
bool inited_;
Communicator() {} Communicator() {}
...@@ -42,17 +43,21 @@ struct Communicator { ...@@ -42,17 +43,21 @@ struct Communicator {
void InitAll(const std::vector<int>& gpus) { void InitAll(const std::vector<int>& gpus) {
comms_.resize(gpus.size()); comms_.resize(gpus.size());
inited_ = false;
for (size_t i = 0; i < gpus.size(); ++i) { for (size_t i = 0; i < gpus.size(); ++i) {
comm_id_map_[gpus[i]] = i; comm_id_map_[gpus[i]] = i;
} }
PADDLE_ENFORCE( PADDLE_ENFORCE(
dynload::ncclCommInitAll(comms_.data(), gpus.size(), gpus.data())); dynload::ncclCommInitAll(comms_.data(), gpus.size(), gpus.data()));
inited_ = true;
} }
~Communicator() { ~Communicator() {
for (size_t i = 0; i < comms_.size(); ++i) { if (inited_) {
// FIXME(dzh) : PADDLE_ENFORCE return void for (size_t i = 0; i < comms_.size(); ++i) {
dynload::ncclCommDestroy(comms_[i]); // FIXME(dzh) : PADDLE_ENFORCE return void
dynload::ncclCommDestroy(comms_[i]);
}
} }
} }
......
...@@ -26,7 +26,6 @@ ...@@ -26,7 +26,6 @@
#include "paddle/framework/op_registry.h" #include "paddle/framework/op_registry.h"
#include "paddle/framework/program_desc.h" #include "paddle/framework/program_desc.h"
#include "paddle/framework/var_desc.h" #include "paddle/framework/var_desc.h"
#include "paddle/operators/math/math_function.h"
#include "paddle/operators/nccl/nccl_gpu_common.h" #include "paddle/operators/nccl/nccl_gpu_common.h"
#include "paddle/platform/device_context.h" #include "paddle/platform/device_context.h"
#include "paddle/platform/enforce.h" #include "paddle/platform/enforce.h"
......
...@@ -37,11 +37,11 @@ class PoolCudnnOpKernel : public framework::OpKernel<T> { ...@@ -37,11 +37,11 @@ class PoolCudnnOpKernel : public framework::OpKernel<T> {
const T *input_data = input->data<T>(); const T *input_data = input->data<T>();
T *output_data = output->mutable_data<T>(ctx.GetPlace()); T *output_data = output->mutable_data<T>(ctx.GetPlace());
std::string pooling_type = ctx.Attr<std::string>("poolingType"); std::string pooling_type = ctx.Attr<std::string>("pooling_type");
std::vector<int> ksize = ctx.Attr<std::vector<int>>("ksize"); std::vector<int> ksize = ctx.Attr<std::vector<int>>("ksize");
std::vector<int> strides = ctx.Attr<std::vector<int>>("strides"); std::vector<int> strides = ctx.Attr<std::vector<int>>("strides");
std::vector<int> paddings = ctx.Attr<std::vector<int>>("paddings"); std::vector<int> paddings = ctx.Attr<std::vector<int>>("paddings");
if (ctx.Attr<bool>("globalPooling")) { if (ctx.Attr<bool>("global_pooling")) {
for (size_t i = 0; i < ksize.size(); ++i) { for (size_t i = 0; i < ksize.size(); ++i) {
paddings[i] = 0; paddings[i] = 0;
ksize[i] = static_cast<int>(input->dims()[i + 2]); ksize[i] = static_cast<int>(input->dims()[i + 2]);
...@@ -92,12 +92,12 @@ class PoolCudnnGradOpKernel : public framework::OpKernel<T> { ...@@ -92,12 +92,12 @@ class PoolCudnnGradOpKernel : public framework::OpKernel<T> {
ctx.Input<Tensor>(framework::GradVarName("Out")); ctx.Input<Tensor>(framework::GradVarName("Out"));
Tensor *input_grad = ctx.Output<Tensor>(framework::GradVarName("X")); Tensor *input_grad = ctx.Output<Tensor>(framework::GradVarName("X"));
std::string pooling_type = ctx.Attr<std::string>("poolingType"); std::string pooling_type = ctx.Attr<std::string>("pooling_type");
std::vector<int> ksize = ctx.Attr<std::vector<int>>("ksize"); std::vector<int> ksize = ctx.Attr<std::vector<int>>("ksize");
std::vector<int> strides = ctx.Attr<std::vector<int>>("strides"); std::vector<int> strides = ctx.Attr<std::vector<int>>("strides");
std::vector<int> paddings = ctx.Attr<std::vector<int>>("paddings"); std::vector<int> paddings = ctx.Attr<std::vector<int>>("paddings");
if (ctx.Attr<bool>("globalPooling")) { if (ctx.Attr<bool>("global_pooling")) {
for (size_t i = 0; i < ksize.size(); ++i) { for (size_t i = 0; i < ksize.size(); ++i) {
paddings[i] = 0; paddings[i] = 0;
ksize[i] = static_cast<int>(input->dims()[i + 2]); ksize[i] = static_cast<int>(input->dims()[i + 2]);
......
...@@ -29,7 +29,7 @@ void PoolOp::InferShape(framework::InferShapeContext *ctx) const { ...@@ -29,7 +29,7 @@ void PoolOp::InferShape(framework::InferShapeContext *ctx) const {
auto in_x_dims = ctx->GetInputDim("X"); auto in_x_dims = ctx->GetInputDim("X");
std::string pooling_type = ctx->Attrs().Get<std::string>("poolingType"); std::string pooling_type = ctx->Attrs().Get<std::string>("pooling_type");
std::vector<int> ksize = ctx->Attrs().Get<std::vector<int>>("ksize"); std::vector<int> ksize = ctx->Attrs().Get<std::vector<int>>("ksize");
std::vector<int> strides = ctx->Attrs().Get<std::vector<int>>("strides"); std::vector<int> strides = ctx->Attrs().Get<std::vector<int>>("strides");
std::vector<int> paddings = ctx->Attrs().Get<std::vector<int>>("paddings"); std::vector<int> paddings = ctx->Attrs().Get<std::vector<int>>("paddings");
...@@ -37,7 +37,7 @@ void PoolOp::InferShape(framework::InferShapeContext *ctx) const { ...@@ -37,7 +37,7 @@ void PoolOp::InferShape(framework::InferShapeContext *ctx) const {
PADDLE_ENFORCE(in_x_dims.size() == 4 || in_x_dims.size() == 5, PADDLE_ENFORCE(in_x_dims.size() == 4 || in_x_dims.size() == 5,
"Pooling intput should be 4-D or 5-D tensor."); "Pooling intput should be 4-D or 5-D tensor.");
if (ctx->Attrs().Get<bool>("globalPooling")) { if (ctx->Attrs().Get<bool>("global_pooling")) {
ksize.resize(static_cast<size_t>(in_x_dims.size()) - 2); ksize.resize(static_cast<size_t>(in_x_dims.size()) - 2);
for (size_t i = 0; i < ksize.size(); ++i) { for (size_t i = 0; i < ksize.size(); ++i) {
paddings[i] = 0; paddings[i] = 0;
...@@ -83,20 +83,20 @@ Pool2dOpMaker::Pool2dOpMaker(framework::OpProto *proto, ...@@ -83,20 +83,20 @@ Pool2dOpMaker::Pool2dOpMaker(framework::OpProto *proto,
"H is the height of the feature, " "H is the height of the feature, "
"and W is the width of the feature."); "and W is the width of the feature.");
AddAttr<std::string>("poolingType", AddAttr<std::string>("pooling_type",
"(string), pooling type, can be \"max\" for max-pooling " "(string), pooling type, can be \"max\" for max-pooling "
"and \"avg\" for average-pooling.") "and \"avg\" for average-pooling.")
.InEnum({"max", "avg"}); .InEnum({"max", "avg"});
AddAttr<std::vector<int>>("ksize", AddAttr<std::vector<int>>("ksize",
"(vector<int>) The pooling window " "(vector<int>) The pooling window "
"size(height, width) of the pooling operator. " "size(height, width) of the pooling operator. "
"If globalPooling = true, ksize and paddings will " "If global_pooling = true, ksize and paddings will "
"be ignored."); // TODO(Chengduo): Add checker. "be ignored."); // TODO(Chengduo): Add checker.
// (Currently, // (Currently,
// TypedAttrChecker don't support vector type.) // TypedAttrChecker don't support vector type.)
AddAttr<bool>("globalPooling", AddAttr<bool>("global_pooling",
"(bool, default false) Whether to use the global pooling. " "(bool, default false) Whether to use the global pooling. "
"If globalPooling = true, ksize and paddings will be ignored.") "If global_pooling = true, ksize and paddings will be ignored.")
.SetDefault(false); .SetDefault(false);
AddAttr<std::vector<int>>("strides", AddAttr<std::vector<int>>("strides",
"(vector<int>, default {1, 1}), strides(height, " "(vector<int>, default {1, 1}), strides(height, "
...@@ -107,7 +107,7 @@ Pool2dOpMaker::Pool2dOpMaker(framework::OpProto *proto, ...@@ -107,7 +107,7 @@ Pool2dOpMaker::Pool2dOpMaker(framework::OpProto *proto,
"paddings", "paddings",
"(vector<int>, defalut {0,0}), paddings(height, width) of pooling " "(vector<int>, defalut {0,0}), paddings(height, width) of pooling "
"operator." "operator."
"If globalPooling = true, paddings and ksize will be ignored.") "If global_pooling = true, paddings and ksize will be ignored.")
.SetDefault({0, 0}); // TODO(Chengduo): Add checker. (Currently, .SetDefault({0, 0}); // TODO(Chengduo): Add checker. (Currently,
// TypedAttrChecker don't support vector type.) // TypedAttrChecker don't support vector type.)
...@@ -115,7 +115,7 @@ Pool2dOpMaker::Pool2dOpMaker(framework::OpProto *proto, ...@@ -115,7 +115,7 @@ Pool2dOpMaker::Pool2dOpMaker(framework::OpProto *proto,
Pool2d Operator. Pool2d Operator.
The pooling2d operation calculates the output based on The pooling2d operation calculates the output based on
the input, poolingType and ksize, strides, paddings parameters. the input, pooling_type and ksize, strides, paddings parameters.
Input(X) and output(Out) are in NCHW format, where N is batch size, C is the Input(X) and output(Out) are in NCHW format, where N is batch size, C is the
number of channels, H is the height of the feature, and W is the width of the feature. number of channels, H is the height of the feature, and W is the width of the feature.
Parameters(ksize, strides, paddings) are two elements. Parameters(ksize, strides, paddings) are two elements.
...@@ -152,7 +152,7 @@ Pool3dOpMaker::Pool3dOpMaker(framework::OpProto *proto, ...@@ -152,7 +152,7 @@ Pool3dOpMaker::Pool3dOpMaker(framework::OpProto *proto,
"the number of channels, and D, H and W is the depth, height and " "the number of channels, and D, H and W is the depth, height and "
"width of the feature, respectively."); "width of the feature, respectively.");
AddAttr<std::string>("poolingType", AddAttr<std::string>("pooling_type",
"(string) Pooling type, can be \"max\" for max-pooling " "(string) Pooling type, can be \"max\" for max-pooling "
"and \"avg\" for average-pooling.") "and \"avg\" for average-pooling.")
.InEnum({"max", "avg"}); .InEnum({"max", "avg"});
...@@ -160,13 +160,14 @@ Pool3dOpMaker::Pool3dOpMaker(framework::OpProto *proto, ...@@ -160,13 +160,14 @@ Pool3dOpMaker::Pool3dOpMaker(framework::OpProto *proto,
"ksize", "ksize",
"(vector<int>) The pooling window size(depth, height, " "(vector<int>) The pooling window size(depth, height, "
"width) of pooling operator. " "width) of pooling operator. "
"If globalPooling = true, ksize and paddings will " "If global_pooling = true, ksize and paddings will "
"be ignored."); // TODO(Chengduo): Add checker. "be ignored."); // TODO(Chengduo): Add checker.
// (Currently, // (Currently,
// TypedAttrChecker don't support vector type.) // TypedAttrChecker don't support vector type.)
AddAttr<bool>("globalPooling", AddAttr<bool>(
"(bool, default false) Whether to use the global pooling. " "global_pooling",
"If globalPooling = true, ksize and paddings wille be ignored.") "(bool, default false) Whether to use the global pooling. "
"If global_pooling = true, ksize and paddings wille be ignored.")
.SetDefault(false); .SetDefault(false);
AddAttr<std::vector<int>>( AddAttr<std::vector<int>>(
"strides", "strides",
...@@ -178,7 +179,7 @@ Pool3dOpMaker::Pool3dOpMaker(framework::OpProto *proto, ...@@ -178,7 +179,7 @@ Pool3dOpMaker::Pool3dOpMaker(framework::OpProto *proto,
"paddings", "paddings",
"(vector<int>, defalut {0,0,0}), paddings(depth, height, " "(vector<int>, defalut {0,0,0}), paddings(depth, height, "
"width) of pooling operator. " "width) of pooling operator. "
"If globalPooling = true, ksize and paddings will be ignored.") "If global_pooling = true, ksize and paddings will be ignored.")
.SetDefault({0, 0, 0}); // TODO(Chengduo): Add checker. (Currently, .SetDefault({0, 0, 0}); // TODO(Chengduo): Add checker. (Currently,
// TypedAttrChecker don't support vector type.) // TypedAttrChecker don't support vector type.)
...@@ -186,7 +187,7 @@ Pool3dOpMaker::Pool3dOpMaker(framework::OpProto *proto, ...@@ -186,7 +187,7 @@ Pool3dOpMaker::Pool3dOpMaker(framework::OpProto *proto,
Pool3d Operator. Pool3d Operator.
The pooling3d operation calculates the output based on The pooling3d operation calculates the output based on
the input, poolingType, ksize, strides, and paddings parameters. the input, pooling_type, ksize, strides, and paddings parameters.
Input(X) and output(Out) are in NCDHW format, where N is batch Input(X) and output(Out) are in NCDHW format, where N is batch
size, C is the number of channels, and D, H and W are the depth, height and size, C is the number of channels, and D, H and W are the depth, height and
width of the feature, respectively. Parameters(ksize, strides, paddings) width of the feature, respectively. Parameters(ksize, strides, paddings)
......
...@@ -57,11 +57,11 @@ class PoolKernel : public framework::OpKernel<T> { ...@@ -57,11 +57,11 @@ class PoolKernel : public framework::OpKernel<T> {
const Tensor* in_x = context.Input<Tensor>("X"); const Tensor* in_x = context.Input<Tensor>("X");
Tensor* out = context.Output<Tensor>("Out"); Tensor* out = context.Output<Tensor>("Out");
std::string pooling_type = context.Attr<std::string>("poolingType"); std::string pooling_type = context.Attr<std::string>("pooling_type");
std::vector<int> ksize = context.Attr<std::vector<int>>("ksize"); std::vector<int> ksize = context.Attr<std::vector<int>>("ksize");
std::vector<int> strides = context.Attr<std::vector<int>>("strides"); std::vector<int> strides = context.Attr<std::vector<int>>("strides");
std::vector<int> paddings = context.Attr<std::vector<int>>("paddings"); std::vector<int> paddings = context.Attr<std::vector<int>>("paddings");
if (context.Attr<bool>("globalPooling")) { if (context.Attr<bool>("global_pooling")) {
for (size_t i = 0; i < ksize.size(); ++i) { for (size_t i = 0; i < ksize.size(); ++i) {
paddings[i] = 0; paddings[i] = 0;
ksize[i] = static_cast<int>(in_x->dims()[i + 2]); ksize[i] = static_cast<int>(in_x->dims()[i + 2]);
...@@ -119,12 +119,12 @@ class PoolGradKernel : public framework::OpKernel<T> { ...@@ -119,12 +119,12 @@ class PoolGradKernel : public framework::OpKernel<T> {
context.Input<Tensor>(framework::GradVarName("Out")); context.Input<Tensor>(framework::GradVarName("Out"));
Tensor* in_x_grad = context.Output<Tensor>(framework::GradVarName("X")); Tensor* in_x_grad = context.Output<Tensor>(framework::GradVarName("X"));
std::string pooling_type = context.Attr<std::string>("poolingType"); std::string pooling_type = context.Attr<std::string>("pooling_type");
std::vector<int> ksize = context.Attr<std::vector<int>>("ksize"); std::vector<int> ksize = context.Attr<std::vector<int>>("ksize");
std::vector<int> strides = context.Attr<std::vector<int>>("strides"); std::vector<int> strides = context.Attr<std::vector<int>>("strides");
std::vector<int> paddings = context.Attr<std::vector<int>>("paddings"); std::vector<int> paddings = context.Attr<std::vector<int>>("paddings");
if (context.Attr<bool>("globalPooling")) { if (context.Attr<bool>("global_pooling")) {
for (size_t i = 0; i < ksize.size(); ++i) { for (size_t i = 0; i < ksize.size(); ++i) {
paddings[i] = 0; paddings[i] = 0;
ksize[i] = static_cast<int>(in_x->dims()[i + 2]); ksize[i] = static_cast<int>(in_x->dims()[i + 2]);
......
...@@ -44,7 +44,7 @@ class MaxPoolWithIndexOp : public framework::OperatorWithKernel { ...@@ -44,7 +44,7 @@ class MaxPoolWithIndexOp : public framework::OperatorWithKernel {
PADDLE_ENFORCE(in_x_dims.size() == 4 || in_x_dims.size() == 5, PADDLE_ENFORCE(in_x_dims.size() == 4 || in_x_dims.size() == 5,
"Pooling intput should be 4-D or 5-D tensor."); "Pooling intput should be 4-D or 5-D tensor.");
if (ctx->Attrs().Get<bool>("globalPooling")) { if (ctx->Attrs().Get<bool>("global_pooling")) {
ksize.resize(static_cast<size_t>(in_x_dims.size()) - 2); ksize.resize(static_cast<size_t>(in_x_dims.size()) - 2);
for (size_t i = 0; i < ksize.size(); ++i) { for (size_t i = 0; i < ksize.size(); ++i) {
paddings[i] = 0; paddings[i] = 0;
...@@ -110,14 +110,14 @@ class MaxPool2dWithIndexOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -110,14 +110,14 @@ class MaxPool2dWithIndexOpMaker : public framework::OpProtoAndCheckerMaker {
AddAttr<std::vector<int>>("ksize", AddAttr<std::vector<int>>("ksize",
"(vector<int>) The pooling window size(height, " "(vector<int>) The pooling window size(height, "
"width) of pooling operator. " "width) of pooling operator. "
"If globalPooling = true, ksize and paddings " "If global_pooling = true, ksize and paddings "
"will be ignored."); // TODO(Chengduo): Add "will be ignored."); // TODO(Chengduo): Add
// checker. (Currently, // checker. (Currently,
// TypedAttrChecker don't support vector type.) // TypedAttrChecker don't support vector type.)
AddAttr<bool>( AddAttr<bool>(
"globalPooling", "global_pooling",
"(bool, default false) Whether to use the global pooling. " "(bool, default false) Whether to use the global pooling. "
"If globalPooling = true, ksize and paddings will be ignored.") "If global_pooling = true, ksize and paddings will be ignored.")
.SetDefault(false); .SetDefault(false);
AddAttr<std::vector<int>>("strides", AddAttr<std::vector<int>>("strides",
"(vector<int>, default {1, 1}), strides(height, " "(vector<int>, default {1, 1}), strides(height, "
...@@ -128,7 +128,7 @@ class MaxPool2dWithIndexOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -128,7 +128,7 @@ class MaxPool2dWithIndexOpMaker : public framework::OpProtoAndCheckerMaker {
"paddings", "paddings",
"(vector<int>, defalut {0, 0}), paddings(height, width) of pooling " "(vector<int>, defalut {0, 0}), paddings(height, width) of pooling "
"operator. " "operator. "
"If globalPooling = true, paddings and will be ignored.") "If global_pooling = true, paddings and will be ignored.")
.SetDefault({0, 0}); // TODO(Chengduo): Add checker. (Currently, .SetDefault({0, 0}); // TODO(Chengduo): Add checker. (Currently,
// TypedAttrChecker don't support vector type.) // TypedAttrChecker don't support vector type.)
...@@ -188,14 +188,14 @@ class MaxPool3dWithIndexOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -188,14 +188,14 @@ class MaxPool3dWithIndexOpMaker : public framework::OpProtoAndCheckerMaker {
AddAttr<std::vector<int>>("ksize", AddAttr<std::vector<int>>("ksize",
"(vector<int>) The pooling window size(depth, " "(vector<int>) The pooling window size(depth, "
"height, width) of pooling operator. " "height, width) of pooling operator. "
"If globalPooling = true, ksize and paddings " "If global_pooling = true, ksize and paddings "
"will be ignored."); // TODO(Chengduo): Add "will be ignored."); // TODO(Chengduo): Add
// checker. (Currently, // checker. (Currently,
// TypedAttrChecker don't support vector type.) // TypedAttrChecker don't support vector type.)
AddAttr<bool>( AddAttr<bool>(
"globalPooling", "global_pooling",
"(bool, default false) Whether to use the global pooling. " "(bool, default false) Whether to use the global pooling. "
"If globalPooling = true, ksize and paddings will be ignored.") "If global_pooling = true, ksize and paddings will be ignored.")
.SetDefault(false); .SetDefault(false);
AddAttr<std::vector<int>>("strides", AddAttr<std::vector<int>>("strides",
"(vector<int>, default {1,1,1}), strides(depth, " "(vector<int>, default {1,1,1}), strides(depth, "
...@@ -206,7 +206,7 @@ class MaxPool3dWithIndexOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -206,7 +206,7 @@ class MaxPool3dWithIndexOpMaker : public framework::OpProtoAndCheckerMaker {
"paddings", "paddings",
"(vector, defalut {0,0,0}), paddings(depth, " "(vector, defalut {0,0,0}), paddings(depth, "
"height, width) of pooling operator. " "height, width) of pooling operator. "
"If globalPooling = true, paddings and ksize will be ignored.") "If global_pooling = true, paddings and ksize will be ignored.")
.SetDefault({0, 0, 0}); // TODO(Chengduo): Add checker. (Currently, .SetDefault({0, 0, 0}); // TODO(Chengduo): Add checker. (Currently,
// TypedAttrChecker don't support vector type.) // TypedAttrChecker don't support vector type.)
......
...@@ -35,7 +35,7 @@ class MaxPoolWithIndexKernel : public framework::OpKernel<T> { ...@@ -35,7 +35,7 @@ class MaxPoolWithIndexKernel : public framework::OpKernel<T> {
std::vector<int> ksize = context.Attr<std::vector<int>>("ksize"); std::vector<int> ksize = context.Attr<std::vector<int>>("ksize");
std::vector<int> strides = context.Attr<std::vector<int>>("strides"); std::vector<int> strides = context.Attr<std::vector<int>>("strides");
std::vector<int> paddings = context.Attr<std::vector<int>>("paddings"); std::vector<int> paddings = context.Attr<std::vector<int>>("paddings");
if (context.Attr<bool>("globalPooling")) { if (context.Attr<bool>("global_pooling")) {
for (size_t i = 0; i < ksize.size(); ++i) { for (size_t i = 0; i < ksize.size(); ++i) {
paddings[i] = 0; paddings[i] = 0;
ksize[i] = static_cast<int>(in_x->dims()[i + 2]); ksize[i] = static_cast<int>(in_x->dims()[i + 2]);
...@@ -72,7 +72,7 @@ class MaxPoolWithIndexGradKernel : public framework::OpKernel<T> { ...@@ -72,7 +72,7 @@ class MaxPoolWithIndexGradKernel : public framework::OpKernel<T> {
std::vector<int> ksize = context.Attr<std::vector<int>>("ksize"); std::vector<int> ksize = context.Attr<std::vector<int>>("ksize");
std::vector<int> strides = context.Attr<std::vector<int>>("strides"); std::vector<int> strides = context.Attr<std::vector<int>>("strides");
std::vector<int> paddings = context.Attr<std::vector<int>>("paddings"); std::vector<int> paddings = context.Attr<std::vector<int>>("paddings");
if (context.Attr<bool>("globalPooling")) { if (context.Attr<bool>("global_pooling")) {
for (size_t i = 0; i < ksize.size(); ++i) { for (size_t i = 0; i < ksize.size(); ++i) {
paddings[i] = 0; paddings[i] = 0;
ksize[i] = static_cast<int>(in_x_grad->dims()[i + 2]); ksize[i] = static_cast<int>(in_x_grad->dims()[i + 2]);
......
...@@ -85,9 +85,11 @@ class PositiveNegativePairOp : public framework::OperatorWithKernel { ...@@ -85,9 +85,11 @@ class PositiveNegativePairOp : public framework::OperatorWithKernel {
} }
protected: protected:
framework::DataType IndicateDataType( framework::OpKernelType GetKernelType(
const framework::ExecutionContext &ctx) const override { const framework::ExecutionContext &ctx) const override {
return framework::ToDataType(ctx.Input<Tensor>("Score")->type()); return framework::OpKernelType(
framework::ToDataType(ctx.Input<Tensor>("Score")->type()),
ctx.device_context());
} }
}; };
......
...@@ -80,9 +80,11 @@ class PrecisionRecallOp : public framework::OperatorWithKernel { ...@@ -80,9 +80,11 @@ class PrecisionRecallOp : public framework::OperatorWithKernel {
} }
protected: protected:
framework::DataType IndicateDataType( framework::OpKernelType GetKernelType(
const framework::ExecutionContext &ctx) const override { const framework::ExecutionContext &ctx) const override {
return framework::ToDataType(ctx.Input<Tensor>("MaxProbs")->type()); return framework::OpKernelType(
framework::ToDataType(ctx.Input<Tensor>("MaxProbs")->type()),
ctx.device_context());
} }
}; };
......
...@@ -387,8 +387,8 @@ class RecurrentGradOp : public RecurrentBase { ...@@ -387,8 +387,8 @@ class RecurrentGradOp : public RecurrentBase {
auto &p_names = Inputs(kParameters); auto &p_names = Inputs(kParameters);
PADDLE_ENFORCE_EQ(pg_names.size(), p_names.size()); PADDLE_ENFORCE_EQ(pg_names.size(), p_names.size());
for (size_t prog_id = 0; prog_id < pg_names.size(); ++prog_id) { for (size_t param_id = 0; param_id < pg_names.size(); ++param_id) {
auto inside_grad_name = framework::GradVarName(p_names[prog_id]); auto inside_grad_name = framework::GradVarName(p_names[param_id]);
// If does not compute gradient of that variable inside rnn, just // If does not compute gradient of that variable inside rnn, just
// continue // continue
...@@ -406,27 +406,19 @@ class RecurrentGradOp : public RecurrentBase { ...@@ -406,27 +406,19 @@ class RecurrentGradOp : public RecurrentBase {
attrs["value"] = 0.0f; attrs["value"] = 0.0f;
auto zero_op = framework::OpRegistry::CreateOp( auto zero_op = framework::OpRegistry::CreateOp(
"fill_constant", {}, {{"Out", {pg_names[prog_id]}}}, attrs); "fill_constant", {}, {{"Out", {pg_names[param_id]}}}, attrs);
zero_op->Run(scope, dev_ctx); zero_op->Run(scope, dev_ctx);
} }
auto new_inside_name = cur_scope.Rename(inside_grad_name);
// sum gradient // sum gradient
auto *outside_var = scope.FindVar(pg_names[prog_id]);
PADDLE_ENFORCE(outside_var != nullptr);
auto &outside_tensor =
*outside_var->GetMutable<framework::LoDTensor>();
std::string result_var_name;
auto *local_result_var = cur_scope.Var(&result_var_name);
auto &local_result_tensor =
*local_result_var->GetMutable<framework::LoDTensor>();
local_result_tensor.ShareDataWith(outside_tensor);
auto sum_op = framework::OpRegistry::CreateOp( auto sum_op = framework::OpRegistry::CreateOp(
"sum", {{"X", {result_var_name, inside_grad_name}}}, "sum", {{"X", {pg_names[param_id], new_inside_name}}},
{{"Out", {result_var_name}}}, {}); {{"Out", {pg_names[param_id]}}}, {});
sum_op->Run(cur_scope, dev_ctx); sum_op->Run(cur_scope, dev_ctx);
cur_scope.Rename(new_inside_name, inside_grad_name);
} }
} }
VLOG(5) << "Accumulate Parameter finished "; VLOG(5) << "Accumulate Parameter finished ";
......
...@@ -49,9 +49,11 @@ class ScatterOp : public framework::OperatorWithKernel { ...@@ -49,9 +49,11 @@ class ScatterOp : public framework::OperatorWithKernel {
} }
protected: protected:
framework::DataType IndicateDataType( framework::OpKernelType GetKernelType(
const framework::ExecutionContext& ctx) const override { const framework::ExecutionContext& ctx) const override {
return framework::ToDataType(ctx.Input<Tensor>("Ref")->type()); return framework::OpKernelType(
framework::ToDataType(ctx.Input<Tensor>("Ref")->type()),
ctx.device_context());
} }
}; };
...@@ -66,9 +68,11 @@ class ScatterGradOp : public framework::OperatorWithKernel { ...@@ -66,9 +68,11 @@ class ScatterGradOp : public framework::OperatorWithKernel {
} }
protected: protected:
framework::DataType IndicateDataType( framework::OpKernelType GetKernelType(
const framework::ExecutionContext& ctx) const override { const framework::ExecutionContext& ctx) const override {
return framework::ToDataType(ctx.Input<Tensor>("Ref")->type()); return framework::OpKernelType(
framework::ToDataType(ctx.Input<Tensor>("Ref")->type()),
ctx.device_context());
} }
}; };
......
...@@ -68,38 +68,42 @@ class SequenceConcatOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -68,38 +68,42 @@ class SequenceConcatOpMaker : public framework::OpProtoAndCheckerMaker {
"The level should be less than the level number of inputs.") "The level should be less than the level number of inputs.")
.SetDefault(0); .SetDefault(0);
AddComment(R"DOC( AddComment(R"DOC(
Sequence Concat Operator. The sequence_concat operator concatenates multiple LoDTensors.
It only supports sequence (LoD Tensor with level number is 1)
The sequence_concat operator concatenates multiple LoDTensors.
It supports a sequence (LoD Tensor with level number is 1)
or a nested sequence (LoD tensor with level number is 2) as its input. or a nested sequence (LoD tensor with level number is 2) as its input.
The following examples explain how the operator works:
- Case1: - Case1:
If the axis is other than 0(here, axis is 1 and level is 1), If the axis is other than 0(here, axis is 1 and level is 1),
each input should have the same LoD information and the LoD each input should have the same LoD information and the LoD
information of the output keeps the same as the input. information of the output keeps the same as the input.
LoD(x0) = {{0,2,4}, {0,1,2,3,4}}; Dims(x0) = (4,3,4) LoD(x0) = {{0,2,4}, {0,1,2,3,4}}; Dims(x0) = (4,3,4)
LoD(x1) = {{0,2,4}, {0,1,2,3,4}}; Dims(x1) = (4,4,4) LoD(x1) = {{0,2,4}, {0,1,2,3,4}}; Dims(x1) = (4,4,4)
LoD(Out) = {{0,2,4}, {0,1,2,3,4}}; Dims(Out) = (4,7,4) LoD(Out) = {{0,2,4}, {0,1,2,3,4}}; Dims(Out) = (4,7,4)
- Case2: - Case2:
If the axis is 0(here, leve is 0), the inputs are concatenated along If the axis is 0(here, leve is 0), the inputs are concatenated along
time steps, the LoD information of the output need to re-compute. time steps, the LoD information of the output need to re-compute.
The LoD information of level-1 should be same.
LoD(x0) = {{0,2,4}, {0,1,2,3,4}}; Dims(x0) = (4,3,4) LoD(x0) = {{0,2,4}, {0,1,2,3,4}}; Dims(x0) = (4,3,4)
LoD(x1) = {{0,3,5}, {0,1,2,3,5}}; Dims(x1) = (5,3,4) LoD(x1) = {{0,2,4}, {0,1,3,5,7}}; Dims(x1) = (7,3,4)
LoD(Out) = {{0,5,9}, {0,1,2,3,4,5,6,7,9}}; Dims(Out) = (9,3,4) LoD(Out) = {{0,2,4}, {0,2,5,8,11}}; Dims(Out) = (11,3,4)
- Case3: - Case3:
If the axis is 0(here, level is 1). If the axis is 0(here, level is 1).
LoD(x0) = {{0,2,4}, {0,1,2,3,4}}; Dims(x0) = (4,3,4) LoD(x0) = {{0,2,4}, {0,1,2,3,4}}; Dims(x0) = (4,3,4)
LoD(x1) = {{0,3,5}, {0,1,3,4,5}}; Dims(x1) = (5,3,4) LoD(x1) = {{0,3,4}, {0,1,3,5,7}}; Dims(x1) = (7,3,4)
LoD(Out) = {{0,5,9}, {0,2,5,7,9}}; Dims(Out) = (9,3,4) LoD(Out) = {{0,5,8}, {0,1,2,3,5,7,8,9,11}}; Dims(Out) = (11,3,4)
NOTE: The levels of all the inputs should be the same. - Case4:
If the LoD number is 1, axis is 0, level is 0
LoD(x0) = {{0,1,2,3,4}}; Dims(x0) = (4,3,4)
LoD(x1) = {{0,1,3,5,7}}; Dims(x1) = (7,3,4)
LoD(Out) = {{0,2,5,8,11}}; Dims(Out) = (11,3,4)
NOTE: The levels of all the inputs should be the same.
)DOC"); )DOC");
} }
}; };
......
...@@ -12,8 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ...@@ -12,8 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#define EIGEN_USE_GPU
#include "paddle/operators/sequence_concat_op.h" #include "paddle/operators/sequence_concat_op.h"
namespace ops = paddle::operators; namespace ops = paddle::operators;
......
...@@ -24,28 +24,38 @@ using LoDTensor = framework::LoDTensor; ...@@ -24,28 +24,38 @@ using LoDTensor = framework::LoDTensor;
using LoD = framework::LoD; using LoD = framework::LoD;
template <typename T> template <typename T>
LoD concatLoD(const std::vector<const T*> ins, const size_t axis, LoD ConcatLoD(const std::vector<const T*> ins, const size_t level) {
const size_t level) {
auto out_lod = ins[0]->lod(); auto out_lod = ins[0]->lod();
auto numLevels = ins[0]->NumLevels();
const size_t n = ins.size(); const size_t n = ins.size();
if (axis == 0UL) { const size_t level_idx = ins[0]->NumLevels() - 1 - level;
for (size_t i = 1; i < n; ++i) { for (size_t i = 1; i < n; ++i) {
for (size_t j = 0; j < ins[i]->lod()[0].size(); ++j) { for (size_t j = 0; j < ins[i]->lod()[level_idx].size(); ++j) {
out_lod[0][j] += ins[i]->lod()[0][j]; out_lod[level_idx][j] += ins[i]->lod()[level_idx][j];
} }
}
if (ins[0]->NumLevels() == 2) { for (size_t i = level_idx; i < numLevels - 1; ++i) {
for (size_t j = 1; j < ins[i]->lod()[1].size(); ++j) { size_t lod_len = 1;
if (level == 0UL) { for (size_t j = 0; j < n; ++j) {
out_lod[1].push_back(out_lod[1].back() + ins[i]->lod()[1][j] - lod_len += ins[j]->lod()[i + 1].size() - 1;
ins[i]->lod()[1][j - 1]); }
} else if (level == 1UL) { out_lod[i + 1].clear();
out_lod[1][j] += ins[1]->lod()[1][j]; out_lod[i + 1].resize(lod_len);
}
size_t idx = 1;
for (size_t j = 0; j < ins[0]->lod()[i].size() - 1; ++j) {
for (size_t k = 0; k < n; ++k) {
for (size_t m = ins[k]->lod()[i][j]; m < ins[k]->lod()[i][j + 1]; ++m) {
out_lod[i + 1][idx] = out_lod[i + 1][idx - 1] +
ins[k]->lod()[i + 1][m + 1] -
ins[k]->lod()[i + 1][m];
idx++;
} }
} }
} }
} }
return out_lod; return out_lod;
} }
...@@ -82,18 +92,21 @@ class SequenceConcatOpKernel : public framework::OpKernel<T> { ...@@ -82,18 +92,21 @@ class SequenceConcatOpKernel : public framework::OpKernel<T> {
"should be greater than the specify level"); "should be greater than the specify level");
out->mutable_data<T>(ctx.GetPlace()); out->mutable_data<T>(ctx.GetPlace());
auto out_lod = concatLoD<LoDTensor>(ins, axis, level); auto out_lod = ins[0]->lod();
if (axis == 0) {
out_lod = ConcatLoD<LoDTensor>(ins, level);
}
out->set_lod(out_lod); out->set_lod(out_lod);
auto out_lod_level = out_lod[level]; const size_t level_idx = out_lod.size() - level - 1;
auto out_lod_level = framework::ToAbsOffset(out_lod)[level_idx];
for (size_t i = 0; i < out_lod_level.size() - 1; ++i) { for (size_t i = 0; i < out_lod_level.size() - 1; ++i) {
Tensor out_t = out->Slice(static_cast<int>(out_lod_level[i]), Tensor out_t = out->Slice(static_cast<int>(out_lod_level[i]),
static_cast<int>(out_lod_level[i + 1])); static_cast<int>(out_lod_level[i + 1]));
auto out_stride = framework::stride(out_t.dims()); auto out_stride = framework::stride(out_t.dims());
size_t offset = 0; size_t offset = 0;
for (size_t j = 0; j < n; ++j) { for (size_t j = 0; j < n; ++j) {
auto in_lod_level = ins[j]->lod()[level]; auto in_lod_level = framework::ToAbsOffset(ins[j]->lod())[level_idx];
auto in_stride = framework::stride(ins[j]->dims()); auto in_stride = framework::stride(ins[j]->dims());
Tensor in_t = ins[j]->Slice(static_cast<int>(in_lod_level[i]), Tensor in_t = ins[j]->Slice(static_cast<int>(in_lod_level[i]),
static_cast<int>(in_lod_level[i + 1])); static_cast<int>(in_lod_level[i + 1]));
...@@ -124,9 +137,12 @@ class SequenceConcatGradOpKernel : public framework::OpKernel<T> { ...@@ -124,9 +137,12 @@ class SequenceConcatGradOpKernel : public framework::OpKernel<T> {
x_grads[i]->set_lod(ins[i]->lod()); x_grads[i]->set_lod(ins[i]->lod());
x_grads[i]->mutable_data<T>(ctx.GetPlace()); x_grads[i]->mutable_data<T>(ctx.GetPlace());
} }
auto out_lod = ins[0]->lod();
auto out_lod = concatLoD<LoDTensor>(ins, axis, level); if (axis == 0UL) {
auto out_lod_level = out_lod[level]; out_lod = ConcatLoD<LoDTensor>(ins, level);
}
const size_t level_idx = out_lod.size() - level - 1;
auto out_lod_level = framework::ToAbsOffset(out_lod)[level_idx];
for (size_t i = 0; i < out_lod_level.size() - 1; ++i) { for (size_t i = 0; i < out_lod_level.size() - 1; ++i) {
Tensor out_grad_t = Tensor out_grad_t =
...@@ -136,7 +152,8 @@ class SequenceConcatGradOpKernel : public framework::OpKernel<T> { ...@@ -136,7 +152,8 @@ class SequenceConcatGradOpKernel : public framework::OpKernel<T> {
size_t offset = 0; size_t offset = 0;
for (size_t j = 0; j < n; ++j) { for (size_t j = 0; j < n; ++j) {
auto x_grad_lod_level = x_grads[j]->lod()[level]; auto x_grad_lod_level =
framework::ToAbsOffset(x_grads[j]->lod())[level_idx];
auto x_grad_stride = framework::stride(x_grads[j]->dims()); auto x_grad_stride = framework::stride(x_grads[j]->dims());
Tensor x_grad_t = Tensor x_grad_t =
x_grads[j]->Slice(static_cast<int>(x_grad_lod_level[i]), x_grads[j]->Slice(static_cast<int>(x_grad_lod_level[i]),
......
...@@ -107,9 +107,11 @@ class SequencePoolGradOp : public framework::OperatorWithKernel { ...@@ -107,9 +107,11 @@ class SequencePoolGradOp : public framework::OperatorWithKernel {
} }
protected: protected:
framework::DataType IndicateDataType( framework::OpKernelType GetKernelType(
const framework::ExecutionContext& ctx) const override { const framework::ExecutionContext& ctx) const override {
return framework::ToDataType(ctx.Input<Tensor>("X")->type()); return framework::OpKernelType(
framework::ToDataType(ctx.Input<Tensor>("X")->type()),
ctx.device_context());
} }
}; };
......
...@@ -12,8 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ...@@ -12,8 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#define EIGEN_USE_GPU
#include "paddle/operators/sequence_softmax_op.h" #include "paddle/operators/sequence_softmax_op.h"
namespace ops = paddle::operators; namespace ops = paddle::operators;
......
...@@ -14,7 +14,6 @@ limitations under the License. */ ...@@ -14,7 +14,6 @@ limitations under the License. */
#pragma once #pragma once
#include "paddle/framework/eigen.h"
#include "paddle/framework/op_registry.h" #include "paddle/framework/op_registry.h"
#include "paddle/operators/math/softmax.h" #include "paddle/operators/math/softmax.h"
......
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/framework/lod_rank_table.h"
#include "paddle/operators/array_operator.h"
#include "paddle/operators/math/math_function.h"
namespace paddle {
namespace operators {
class ShrinkRNNMemoryOp : public ArrayOp {
public:
ShrinkRNNMemoryOp(const std::string &type,
const framework::VariableNameMap &inputs,
const framework::VariableNameMap &outputs,
const framework::AttributeMap &attrs)
: ArrayOp(type, inputs, outputs, attrs) {}
void Run(const framework::Scope &scope,
const platform::DeviceContext &dev_ctx) const override {
auto *x_var = scope.FindVar(Input("X"));
PADDLE_ENFORCE(x_var != nullptr, "Input X must be set");
auto &x_tensor = x_var->Get<framework::LoDTensor>();
size_t offset = this->GetOffset(scope, dev_ctx);
auto *rank_table_var = scope.FindVar(Input("RankTable"));
PADDLE_ENFORCE(rank_table_var != nullptr, "RankTable must be set");
auto &rank_table = rank_table_var->Get<framework::LoDRankTable>();
auto &rank_items = rank_table.items();
int dst_num_rows =
std::lower_bound(rank_items.begin(), rank_items.end(), offset,
[](const framework::LoDRankTable::TableItem &a,
size_t b) { return a.length > b; }) -
rank_items.begin();
auto *out_var = scope.FindVar(Output("Out"));
PADDLE_ENFORCE(out_var != nullptr, "Output Out must be set");
auto &out_tensor = *out_var->GetMutable<framework::LoDTensor>();
if (dst_num_rows != 0) {
out_tensor.ShareDataWith(x_tensor.Slice(0, dst_num_rows));
}
}
};
class ShrinkRNNMemoryOpProtoMaker : public framework::OpProtoAndCheckerMaker {
public:
ShrinkRNNMemoryOpProtoMaker(framework::OpProto *proto,
framework::OpAttrChecker *op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "");
AddInput("RankTable", "");
AddInput("I", "");
AddOutput("Out", "");
AddComment("");
}
};
class ShrinkRNNMemoryInferShape : public framework::InferShapeBase {
public:
void operator()(framework::InferShapeContext *context) const override {
PADDLE_ENFORCE(context->HasInput("X"));
PADDLE_ENFORCE(context->HasInput("I"));
PADDLE_ENFORCE(context->HasInput("RankTable"));
context->SetOutputDim("Out", context->GetInputDim("X"));
}
};
class ShrinkRNNMemoryGradOp : public ArrayOp {
public:
ShrinkRNNMemoryGradOp(const std::string &type,
const framework::VariableNameMap &inputs,
const framework::VariableNameMap &outputs,
const framework::AttributeMap &attrs)
: ArrayOp(type, inputs, outputs, attrs) {}
void Run(const framework::Scope &scope,
const platform::DeviceContext &dev_ctx) const override {
auto *dout_var = scope.FindVar(Input(framework::GradVarName("Out")));
auto *dx_var = scope.FindVar(Output(framework::GradVarName("X")));
PADDLE_ENFORCE(dx_var != nullptr, "Input Gradient should not be nullptr");
auto *x_var = scope.FindVar(Input("X"));
PADDLE_ENFORCE(x_var != nullptr);
auto &x_tensor = x_var->Get<framework::LoDTensor>();
auto &dx_tensor = *dx_var->GetMutable<framework::LoDTensor>();
dx_tensor.Resize(x_tensor.dims());
dx_tensor.mutable_data(x_tensor.place(), x_tensor.type());
if (dout_var == nullptr) { // dx_tensor fill zero
math::set_constant(dev_ctx, &dx_tensor, 0.0f);
} else {
auto &dout_tensor = dout_var->Get<framework::LoDTensor>();
auto height = dout_tensor.dims()[0];
dx_tensor.Slice(0, static_cast<int>(height))
.CopyFrom(dout_tensor, dout_tensor.place(), dev_ctx);
if (dx_tensor.dims()[0] < height) {
auto rest_tensor = dx_tensor.Slice(
static_cast<int>(height), static_cast<int>(dout_tensor.dims()[0]));
math::set_constant(dev_ctx, &rest_tensor, 0.0f);
}
}
}
};
class ShrinkRNNMemoryGradInferShape : public framework::InferShapeBase {
public:
void operator()(framework::InferShapeContext *context) const override {
PADDLE_ENFORCE(context->HasInput("X"));
PADDLE_ENFORCE(context->HasOutput(framework::GradVarName("X")));
context->SetOutputDim(framework::GradVarName("X"),
context->GetInputDim("X"));
}
};
class ShrinkRNNGradOpMaker : public framework::SingleGradOpDescMaker {
public:
using framework::SingleGradOpDescMaker::SingleGradOpDescMaker;
protected:
std::unique_ptr<framework::OpDescBind> Apply() const override {
auto *op = new framework::OpDescBind();
op->SetType("shrink_rnn_memory_grad");
op->SetInput("X", Input("X"));
op->SetInput(framework::GradVarName("Out"), OutputGrad("Out"));
op->SetOutput(framework::GradVarName("X"), InputGrad("X"));
op->SetAttrMap(Attrs());
return std::unique_ptr<framework::OpDescBind>(op);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OPERATOR(shrink_rnn_memory, ops::ShrinkRNNMemoryOp,
ops::ShrinkRNNMemoryInferShape,
ops::ShrinkRNNMemoryOpProtoMaker, ops::ShrinkRNNGradOpMaker);
REGISTER_OPERATOR(shrink_rnn_memory_grad, ops::ShrinkRNNMemoryGradOp,
ops::ShrinkRNNMemoryGradInferShape);
...@@ -12,7 +12,6 @@ ...@@ -12,7 +12,6 @@
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#define EIGEN_USE_GPU
#include "paddle/operators/softmax_op.h" #include "paddle/operators/softmax_op.h"
namespace ops = paddle::operators; namespace ops = paddle::operators;
......
...@@ -13,7 +13,6 @@ See the License for the specific language governing permissions and ...@@ -13,7 +13,6 @@ See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#pragma once #pragma once
#include "paddle/framework/eigen.h"
#include "paddle/framework/op_registry.h" #include "paddle/framework/op_registry.h"
#include "paddle/operators/math/softmax.h" #include "paddle/operators/math/softmax.h"
...@@ -21,9 +20,6 @@ namespace paddle { ...@@ -21,9 +20,6 @@ namespace paddle {
namespace operators { namespace operators {
using Tensor = framework::Tensor; using Tensor = framework::Tensor;
template <typename T, int MajorType = Eigen::RowMajor,
typename IndexType = Eigen::DenseIndex>
using EigenMatrix = framework::EigenMatrix<T, MajorType, IndexType>;
template <typename Place, typename T> template <typename Place, typename T>
class SoftmaxKernel : public framework::OpKernel<T> { class SoftmaxKernel : public framework::OpKernel<T> {
......
...@@ -121,9 +121,11 @@ class SoftmaxWithCrossEntropyOp : public framework::OperatorWithKernel { ...@@ -121,9 +121,11 @@ class SoftmaxWithCrossEntropyOp : public framework::OperatorWithKernel {
} }
protected: protected:
framework::DataType IndicateDataType( framework::OpKernelType GetKernelType(
const framework::ExecutionContext& ctx) const override { const framework::ExecutionContext& ctx) const override {
return framework::ToDataType(ctx.Input<Tensor>("Logits")->type()); return framework::OpKernelType(
framework::ToDataType(ctx.Input<Tensor>("Logits")->type()),
ctx.device_context());
} }
}; };
...@@ -160,10 +162,12 @@ class SoftmaxWithCrossEntropyOpGrad : public framework::OperatorWithKernel { ...@@ -160,10 +162,12 @@ class SoftmaxWithCrossEntropyOpGrad : public framework::OperatorWithKernel {
} }
protected: protected:
framework::DataType IndicateDataType( framework::OpKernelType GetKernelType(
const framework::ExecutionContext& ctx) const override { const framework::ExecutionContext& ctx) const override {
return framework::ToDataType( return framework::OpKernelType(
ctx.Input<Tensor>(framework::GradVarName("Loss"))->type()); framework::ToDataType(
ctx.Input<Tensor>(framework::GradVarName("Loss"))->type()),
ctx.device_context());
} }
}; };
......
...@@ -47,20 +47,24 @@ class SumOp : public framework::OperatorWithKernel { ...@@ -47,20 +47,24 @@ class SumOp : public framework::OperatorWithKernel {
} }
protected: protected:
framework::DataType IndicateDataType( framework::OpKernelType GetKernelType(
const framework::ExecutionContext& ctx) const override { const framework::ExecutionContext& ctx) const override {
auto x_vars = ctx.MultiInputVar("X"); auto x_vars = ctx.MultiInputVar("X");
if (x_vars[0]->IsType<framework::LoDTensor>()) { if (x_vars[0]->IsType<framework::LoDTensor>()) {
return framework::ToDataType( return framework::OpKernelType(
x_vars[0]->Get<framework::LoDTensor>().type()); framework::ToDataType(x_vars[0]->Get<framework::LoDTensor>().type()),
ctx.device_context());
} else if (x_vars[0]->IsType<framework::SelectedRows>()) { } else if (x_vars[0]->IsType<framework::SelectedRows>()) {
return framework::ToDataType( return framework::OpKernelType(
x_vars[0]->Get<framework::SelectedRows>().value().type()); framework::ToDataType(
x_vars[0]->Get<framework::SelectedRows>().value().type()),
ctx.device_context());
} else if (x_vars[0]->IsType<framework::LoDTensorArray>()) { } else if (x_vars[0]->IsType<framework::LoDTensorArray>()) {
auto& array = x_vars[0]->Get<framework::LoDTensorArray>(); auto& array = x_vars[0]->Get<framework::LoDTensorArray>();
for (auto& each : array) { for (auto& each : array) {
if (each.numel() != 0) { if (each.numel() != 0) {
return framework::ToDataType(each.type()); return framework::OpKernelType(framework::ToDataType(each.type()),
ctx.device_context());
} }
} }
} }
...@@ -95,11 +99,12 @@ class SumOpVarTypeInference : public framework::VarTypeInference { ...@@ -95,11 +99,12 @@ class SumOpVarTypeInference : public framework::VarTypeInference {
bool any_input_is_lod_tensor = std::any_of( bool any_input_is_lod_tensor = std::any_of(
inputs.begin(), inputs.end(), [block](const std::string& name) { inputs.begin(), inputs.end(), [block](const std::string& name) {
return block->Var(name)->GetType() == framework::VarDesc::LOD_TENSOR; return block->FindRecursiveOrCreateVar(name)->GetType() ==
framework::VarDesc::LOD_TENSOR;
}); });
auto is_tensor_array = [block](const std::string& name) { auto is_tensor_array = [block](const std::string& name) {
return block->Var(name)->GetType() == return block->FindRecursiveOrCreateVar(name)->GetType() ==
framework::VarDesc::LOD_TENSOR_ARRAY; framework::VarDesc::LOD_TENSOR_ARRAY;
}; };
...@@ -116,7 +121,7 @@ class SumOpVarTypeInference : public framework::VarTypeInference { ...@@ -116,7 +121,7 @@ class SumOpVarTypeInference : public framework::VarTypeInference {
} }
auto out_var_name = op_desc.Output("Out").front(); auto out_var_name = op_desc.Output("Out").front();
block->Var(out_var_name)->SetType(var_type); block->FindRecursiveOrCreateVar(out_var_name)->SetType(var_type);
} }
}; };
......
...@@ -11,48 +11,18 @@ ...@@ -11,48 +11,18 @@
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "paddle/framework/lod_tensor_array.h" #include "paddle/operators/array_operator.h"
#include "paddle/framework/op_registry.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
class ArrayOpBase : public framework::OperatorBase {
public:
ArrayOpBase(const std::string &type, const framework::VariableNameMap &inputs,
const framework::VariableNameMap &outputs,
const framework::AttributeMap &attrs)
: OperatorBase(type, inputs, outputs, attrs) {}
void Run(const framework::Scope &scope,
const platform::DeviceContext &dev_ctx) const override {}
protected:
size_t GetOffset(const framework::Scope &scope,
const platform::DeviceContext &dev_ctx) const {
auto *i = scope.FindVar(Input("I"));
PADDLE_ENFORCE(i != nullptr, "I must be set");
auto &i_tensor = i->Get<framework::LoDTensor>();
PADDLE_ENFORCE_EQ(i_tensor.numel(), 1);
size_t offset;
if (platform::is_gpu_place(i_tensor.place())) {
// FIXME: Avoid copy from GPU to CPU
framework::Tensor t;
t.CopyFrom(i_tensor, platform::CPUPlace(), dev_ctx);
dev_ctx.Wait();
offset = static_cast<size_t>(*t.data<int64_t>());
} else {
offset = static_cast<size_t>(*i_tensor.data<int64_t>());
}
return offset;
}
};
class WriteToArrayOp : public ArrayOpBase { class WriteToArrayOp : public ArrayOp {
public: public:
WriteToArrayOp(const std::string &type, WriteToArrayOp(const std::string &type,
const framework::VariableNameMap &inputs, const framework::VariableNameMap &inputs,
const framework::VariableNameMap &outputs, const framework::VariableNameMap &outputs,
const framework::AttributeMap &attrs) const framework::AttributeMap &attrs)
: ArrayOpBase(type, inputs, outputs, attrs) {} : ArrayOp(type, inputs, outputs, attrs) {}
void Run(const framework::Scope &scope, void Run(const framework::Scope &scope,
const platform::DeviceContext &dev_ctx) const override { const platform::DeviceContext &dev_ctx) const override {
...@@ -117,18 +87,19 @@ class WriteToArrayInferVarType : public framework::VarTypeInference { ...@@ -117,18 +87,19 @@ class WriteToArrayInferVarType : public framework::VarTypeInference {
framework::BlockDescBind *block) const override { framework::BlockDescBind *block) const override {
for (auto &out_var : op_desc.OutputArgumentNames()) { for (auto &out_var : op_desc.OutputArgumentNames()) {
VLOG(10) << "Set Variable " << out_var << " as LOD_TENSOR_ARRAY"; VLOG(10) << "Set Variable " << out_var << " as LOD_TENSOR_ARRAY";
block->Var(out_var)->SetType(framework::VarDesc::LOD_TENSOR_ARRAY); block->FindRecursiveOrCreateVar(out_var)->SetType(
framework::VarDesc::LOD_TENSOR_ARRAY);
} }
} }
}; };
class ReadFromArrayOp : public ArrayOpBase { class ReadFromArrayOp : public ArrayOp {
public: public:
ReadFromArrayOp(const std::string &type, ReadFromArrayOp(const std::string &type,
const framework::VariableNameMap &inputs, const framework::VariableNameMap &inputs,
const framework::VariableNameMap &outputs, const framework::VariableNameMap &outputs,
const framework::AttributeMap &attrs) const framework::AttributeMap &attrs)
: ArrayOpBase(type, inputs, outputs, attrs) {} : ArrayOp(type, inputs, outputs, attrs) {}
void Run(const framework::Scope &scope, void Run(const framework::Scope &scope,
const platform::DeviceContext &dev_ctx) const override { const platform::DeviceContext &dev_ctx) const override {
auto *x = scope.FindVar(Input("X")); auto *x = scope.FindVar(Input("X"));
......
...@@ -63,9 +63,11 @@ class UniformRandomOp : public framework::OperatorWithKernel { ...@@ -63,9 +63,11 @@ class UniformRandomOp : public framework::OperatorWithKernel {
} }
protected: protected:
framework::DataType IndicateDataType( framework::OpKernelType GetKernelType(
const framework::ExecutionContext& ctx) const override { const framework::ExecutionContext& ctx) const override {
return static_cast<framework::DataType>(ctx.Attr<int>("data_type")); return framework::OpKernelType(
static_cast<framework::DataType>(ctx.Attr<int>("data_type")),
ctx.device_context());
} }
}; };
......
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <vector>
#include "paddle/framework/executor.h"
#include "paddle/framework/op_registry.h"
#include "paddle/framework/operator.h"
namespace paddle {
namespace operators {
using StepScopeVar = std::vector<framework::Scope *>;
using LoDTensor = framework::LoDTensor;
constexpr char kStepBlock[] = "step_block";
constexpr char kCondition[] = "Condition";
constexpr char kStepScopes[] = "StepScopes";
constexpr char kParamGrads[] = "X@Grad";
constexpr char kParameters[] = "X";
class WhileOp : public framework::OperatorBase {
public:
WhileOp(const std::string &type, const framework::VariableNameMap &inputs,
const framework::VariableNameMap &outputs,
const framework::AttributeMap &attrs)
: framework::OperatorBase(type, inputs, outputs, attrs) {}
void Run(const framework::Scope &scope,
const platform::DeviceContext &dev_ctx) const override {
PADDLE_ENFORCE_NOT_NULL(scope.FindVar(Input(kCondition)));
auto &cond = scope.FindVar(Input(kCondition))->Get<LoDTensor>();
PADDLE_ENFORCE_EQ(cond.dims(), paddle::framework::make_ddim({1}));
framework::Executor executor(dev_ctx);
auto *block = Attr<framework::BlockDescBind *>(kStepBlock);
auto *program = block->Program();
auto step_scopes =
scope.FindVar(Output(kStepScopes))->GetMutable<StepScopeVar>();
while (cond.data<bool>()[0]) {
auto &current_scope = scope.NewScope();
step_scopes->push_back(&current_scope);
executor.Run(*program, &current_scope, block->ID(),
false /*create_local_scope*/);
}
}
};
class WhileOpMaker : public framework::OpProtoAndCheckerMaker {
public:
WhileOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) {
AddInput(kParameters,
"A set of variables, which are required by operators inside the "
"block of While Op.")
.AsDuplicable();
AddInput(
kCondition,
"(Bool) An scalar. When it's False, the While Op will be terminated.")
.AsDuplicable();
AddOutput("Out",
"A set of variables, which will be assigned with values "
"generated by perators inside the block of While Op.")
.AsDuplicable();
AddOutput(kStepScopes,
"(StepScopeVar) A vector of local scope, which size equals the "
"step number of While Op. The i'th scope storages temporary "
"variables generated in the i'th step.");
AddAttr<framework::BlockDescBind *>(kStepBlock,
"The step block inside WhileOp");
AddComment(R"DOC(
)DOC");
}
};
class WhileGradOp : public framework::OperatorBase {
public:
WhileGradOp(const std::string &type, const framework::VariableNameMap &inputs,
const framework::VariableNameMap &outputs,
const framework::AttributeMap &attrs)
: framework::OperatorBase(type, inputs, outputs, attrs) {}
void Run(const framework::Scope &scope,
const platform::DeviceContext &dev_ctx) const override {
// PADDLE_ENFORCE(...)
framework::Executor executor(dev_ctx);
auto *block = Attr<framework::BlockDescBind *>(kStepBlock);
auto *program = block->Program();
auto *step_scopes =
scope.FindVar(Input(kStepScopes))->GetMutable<StepScopeVar>();
for (auto cur_scope_iter = step_scopes->rbegin();
cur_scope_iter != step_scopes->rend(); ++cur_scope_iter) {
executor.Run(*program, *cur_scope_iter, block->ID(), false);
auto &pg_names = Outputs(kParamGrads);
auto &p_names = Inputs(kParameters);
PADDLE_ENFORCE_EQ(pg_names.size(), p_names.size());
for (size_t prog_id = 0; prog_id < pg_names.size(); ++prog_id) {
auto inside_grad_name = framework::GradVarName(p_names[prog_id]);
// // TODO(tonyyang-savil: Not sure we need the following
// // If does not compute gradient of that variable inside rnn,
// just
// // continue
// if (local_var_names.find(inside_grad_name) ==
// local_var_names.end()) {
// continue;
// }
// zero gradient variable in step 0
if (cur_scope_iter == step_scopes->rbegin()) {
auto *var = (*cur_scope_iter)->FindVar(inside_grad_name);
PADDLE_ENFORCE_NOT_NULL(var);
if (var->IsType<LoDTensor>()) {
auto &inside_tensor = var->Get<framework::LoDTensor>();
framework::AttributeMap attrs;
attrs["data_type"] = framework::ToDataType(inside_tensor.type());
attrs["shape"] = framework::vectorize2int(inside_tensor.dims());
attrs["value"] = 0.0f;
auto zero_op = framework::OpRegistry::CreateOp(
"fill_constant", {}, {{"Out", {pg_names[prog_id]}}}, attrs);
zero_op->Run(scope, dev_ctx);
}
}
// sum gradient
auto *outside_var = scope.FindVar(pg_names[prog_id]);
PADDLE_ENFORCE_NOT_NULL(outside_var);
auto &outside_tensor = *outside_var->GetMutable<framework::LoDTensor>();
std::string result_var_name;
auto *local_result_var = (*cur_scope_iter)->Var(&result_var_name);
auto &local_result_tensor =
*local_result_var->GetMutable<framework::LoDTensor>();
local_result_tensor.ShareDataWith(outside_tensor);
auto sum_op = framework::OpRegistry::CreateOp(
"sum", {{"X", {result_var_name, inside_grad_name}}},
{{"Out", {result_var_name}}}, {});
sum_op->Run(**cur_scope_iter, dev_ctx);
}
}
}
};
class WhileGradOpDescMaker : public framework::SingleGradOpDescMaker {
public:
using framework::SingleGradOpDescMaker::SingleGradOpDescMaker;
protected:
virtual std::unique_ptr<framework::OpDescBind> Apply() const {
auto *grad = new framework::OpDescBind();
grad->SetType("while_grad");
for (auto &input_param : this->InputNames()) {
grad->SetInput(input_param, this->Input(input_param));
grad->SetOutput(framework::GradVarName(input_param),
this->InputGrad(input_param));
}
for (auto &output_param : this->OutputNames()) {
grad->SetInput(output_param, this->Output(output_param));
if (output_param != kStepScopes) {
grad->SetInput(framework::GradVarName(output_param),
this->OutputGrad(output_param));
}
}
grad->SetAttrMap(this->Attrs());
grad->SetBlockAttr(kStepBlock, *grad_block_[0]);
return std::unique_ptr<framework::OpDescBind>(grad);
}
};
} // namespace operators
} // namespace paddle
REGISTER_OPERATOR(while, paddle::operators::WhileOp,
paddle::operators::WhileOpMaker,
paddle::operators::WhileGradOpDescMaker);
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <mutex>
namespace paddle {
namespace platform {
/*
The current implementation of std::call_once has a bug described in
https://stackoverflow.com/questions/41717579/stdcall-once-hangs-on-second-call-after-callable-threw-on-first-call.
This is likely caused by a deeper bug of pthread_once, which is discussed in
https://patchwork.ozlabs.org/patch/482350/
This wrap is a hack to avoid this bug.
*/
template <class Callable, class... Args>
inline void call_once(std::once_flag& flag, Callable&& f, Args&&... args) {
bool good = false;
std::exception ex;
std::call_once(flag, [&]() {
try {
f(args...);
good = true;
} catch (const std::exception& e) {
ex = e;
} catch (...) {
ex = std::runtime_error("excption caught in call_once");
}
});
if (!good) {
throw std::exception(ex);
}
}
} // namespace platform
} // namespace paddle
...@@ -124,6 +124,11 @@ void CUDADeviceContext::Wait() const { ...@@ -124,6 +124,11 @@ void CUDADeviceContext::Wait() const {
PADDLE_ENFORCE(cudaStreamSynchronize(stream_)); PADDLE_ENFORCE(cudaStreamSynchronize(stream_));
} }
void CUDADeviceContext::Finish() const {
Wait();
PADDLE_ENFORCE(cudaGetLastError());
}
Eigen::GpuDevice* CUDADeviceContext::eigen_device() const { Eigen::GpuDevice* CUDADeviceContext::eigen_device() const {
return eigen_device_.get(); return eigen_device_.get();
} }
......
...@@ -46,6 +46,8 @@ class DeviceContext { ...@@ -46,6 +46,8 @@ class DeviceContext {
DeviceType* GetEigenDevice() const; DeviceType* GetEigenDevice() const;
virtual void Wait() const {} virtual void Wait() const {}
virtual void Finish() const {}
}; };
class CPUDeviceContext : public DeviceContext { class CPUDeviceContext : public DeviceContext {
...@@ -77,6 +79,9 @@ class CUDADeviceContext : public DeviceContext { ...@@ -77,6 +79,9 @@ class CUDADeviceContext : public DeviceContext {
/*! \brief Wait for all operations completion in the stream. */ /*! \brief Wait for all operations completion in the stream. */
void Wait() const override; void Wait() const override;
/*! \brief Check potential errors for the cuda kernel calls. */
void Finish() const override;
/*! \brief Return place in the device context. */ /*! \brief Return place in the device context. */
Place GetPlace() const override; Place GetPlace() const override;
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#include <dlfcn.h> #include <dlfcn.h>
#include <nccl.h> #include <nccl.h>
#include <mutex> #include <mutex>
#include "paddle/platform/call_once.h"
#include "paddle/platform/dynload/dynamic_loader.h" #include "paddle/platform/dynload/dynamic_loader.h"
namespace paddle { namespace paddle {
...@@ -27,18 +28,18 @@ extern std::once_flag nccl_dso_flag; ...@@ -27,18 +28,18 @@ extern std::once_flag nccl_dso_flag;
extern void* nccl_dso_handle; extern void* nccl_dso_handle;
#ifdef PADDLE_USE_DSO #ifdef PADDLE_USE_DSO
#define DECLARE_DYNAMIC_LOAD_NCCL_WRAP(__name) \ #define DECLARE_DYNAMIC_LOAD_NCCL_WRAP(__name) \
struct DynLoad__##__name { \ struct DynLoad__##__name { \
template <typename... Args> \ template <typename... Args> \
auto operator()(Args... args) -> decltype(__name(args...)) { \ auto operator()(Args... args) -> decltype(__name(args...)) { \
using nccl_func = decltype(__name(args...)) (*)(Args...); \ using nccl_func = decltype(__name(args...)) (*)(Args...); \
std::call_once(nccl_dso_flag, \ platform::call_once(nccl_dso_flag, \
paddle::platform::dynload::GetNCCLDsoHandle, \ paddle::platform::dynload::GetNCCLDsoHandle, \
&nccl_dso_handle); \ &nccl_dso_handle); \
void* p_##__name = dlsym(nccl_dso_handle, #__name); \ void* p_##__name = dlsym(nccl_dso_handle, #__name); \
return reinterpret_cast<nccl_func>(p_##__name)(args...); \ return reinterpret_cast<nccl_func>(p_##__name)(args...); \
} \ } \
}; \ }; \
extern DynLoad__##__name __name extern DynLoad__##__name __name
#else #else
#define DECLARE_DYNAMIC_LOAD_NCCL_WRAP(__name) \ #define DECLARE_DYNAMIC_LOAD_NCCL_WRAP(__name) \
......
...@@ -49,8 +49,6 @@ struct Transform<platform::CPUPlace> { ...@@ -49,8 +49,6 @@ struct Transform<platform::CPUPlace> {
template <typename InputIter, typename OutputIter, typename UnaryOperation> template <typename InputIter, typename OutputIter, typename UnaryOperation>
void operator()(const DeviceContext& context, InputIter first, InputIter last, void operator()(const DeviceContext& context, InputIter first, InputIter last,
OutputIter result, UnaryOperation op) { OutputIter result, UnaryOperation op) {
auto place = context.GetPlace();
PADDLE_ENFORCE(is_cpu_place(place), "It must use CPU place.");
std::transform(first, last, result, op); std::transform(first, last, result, op);
} }
...@@ -59,8 +57,6 @@ struct Transform<platform::CPUPlace> { ...@@ -59,8 +57,6 @@ struct Transform<platform::CPUPlace> {
void operator()(const DeviceContext& context, InputIter1 first1, void operator()(const DeviceContext& context, InputIter1 first1,
InputIter1 last1, InputIter2 first2, OutputIter result, InputIter1 last1, InputIter2 first2, OutputIter result,
BinaryOperation op) { BinaryOperation op) {
auto place = context.GetPlace();
PADDLE_ENFORCE(is_cpu_place(place), "It must use CPU place.");
std::transform(first1, last1, first2, result, op); std::transform(first1, last1, first2, result, op);
} }
}; };
......
...@@ -113,11 +113,13 @@ PYBIND11_PLUGIN(core) { ...@@ -113,11 +113,13 @@ PYBIND11_PLUGIN(core) {
.def("set", PyCPUTensorSetFromArray<int>) .def("set", PyCPUTensorSetFromArray<int>)
.def("set", PyCPUTensorSetFromArray<double>) .def("set", PyCPUTensorSetFromArray<double>)
.def("set", PyCPUTensorSetFromArray<int64_t>) .def("set", PyCPUTensorSetFromArray<int64_t>)
.def("set", PyCPUTensorSetFromArray<bool>)
#ifdef PADDLE_WITH_CUDA #ifdef PADDLE_WITH_CUDA
.def("set", PyCUDATensorSetFromArray<float>) .def("set", PyCUDATensorSetFromArray<float>)
.def("set", PyCUDATensorSetFromArray<int>) .def("set", PyCUDATensorSetFromArray<int>)
.def("set", PyCUDATensorSetFromArray<double>) .def("set", PyCUDATensorSetFromArray<double>)
.def("set", PyCUDATensorSetFromArray<int64_t>) .def("set", PyCUDATensorSetFromArray<int64_t>)
.def("set", PyCUDATensorSetFromArray<bool>)
#endif #endif
.def("shape", [](Tensor &self) { return vectorize(self.dims()); }) .def("shape", [](Tensor &self) { return vectorize(self.dims()); })
.def("set_float_element", TensorSetElement<float>) .def("set_float_element", TensorSetElement<float>)
......
...@@ -85,7 +85,7 @@ struct CastToPyBufferImpl<true, I, ARGS...> { ...@@ -85,7 +85,7 @@ struct CastToPyBufferImpl<true, I, ARGS...> {
} // namespace details } // namespace details
inline py::buffer_info CastToPyBuffer(framework::Tensor &tensor) { inline py::buffer_info CastToPyBuffer(framework::Tensor &tensor) {
auto buffer_info = auto buffer_info =
details::CastToPyBufferImpl<true, 0, float, int, double, int64_t>()( details::CastToPyBufferImpl<true, 0, float, int, double, int64_t, bool>()(
tensor); tensor);
return buffer_info; return buffer_info;
} }
......
...@@ -174,8 +174,6 @@ EOF ...@@ -174,8 +174,6 @@ EOF
EOF EOF
} }
set +xe
cmake_gen cmake_gen
run_build run_build
run_test run_test
......
...@@ -321,6 +321,19 @@ message ClipConfig { ...@@ -321,6 +321,19 @@ message ClipConfig {
required double max = 2; required double max = 2;
} }
message ROIPoolConfig {
required uint32 pooled_width = 1;
required uint32 pooled_height = 2;
required float spatial_scale = 3;
optional uint32 height = 4 [ default = 1 ];
optional uint32 width = 5 [ default = 1 ];
}
message ScaleSubRegionConfig {
required ImageConfig image_conf = 1;
required float value = 2;
}
message LayerInputConfig { message LayerInputConfig {
required string input_layer_name = 1; required string input_layer_name = 1;
optional string input_parameter_name = 2; optional string input_parameter_name = 2;
...@@ -342,6 +355,8 @@ message LayerInputConfig { ...@@ -342,6 +355,8 @@ message LayerInputConfig {
optional MultiBoxLossConfig multibox_loss_conf = 16; optional MultiBoxLossConfig multibox_loss_conf = 16;
optional DetectionOutputConfig detection_output_conf = 17; optional DetectionOutputConfig detection_output_conf = 17;
optional ClipConfig clip_conf = 18; optional ClipConfig clip_conf = 18;
optional ScaleSubRegionConfig scale_sub_region_conf = 19;
optional ROIPoolConfig roi_pool_conf = 20;
} }
message LayerConfig { message LayerConfig {
......
...@@ -1969,6 +1969,18 @@ class DetectionOutputLayer(LayerBase): ...@@ -1969,6 +1969,18 @@ class DetectionOutputLayer(LayerBase):
self.config.size = size self.config.size = size
@config_layer('roi_pool')
class ROIPoolLayer(LayerBase):
def __init__(self, name, inputs, pooled_width, pooled_height, spatial_scale,
num_channels, **xargs):
super(ROIPoolLayer, self).__init__(name, 'roi_pool', 0, inputs)
config_assert(len(inputs) == 2, 'ROIPoolLayer must have 2 inputs')
self.config.inputs[0].roi_pool_conf.pooled_width = pooled_width
self.config.inputs[0].roi_pool_conf.pooled_height = pooled_height
self.config.inputs[0].roi_pool_conf.spatial_scale = spatial_scale
self.set_cnn_layer(name, pooled_height, pooled_width, num_channels)
@config_layer('data') @config_layer('data')
class DataLayer(LayerBase): class DataLayer(LayerBase):
def __init__(self, def __init__(self,
...@@ -3801,6 +3813,25 @@ class SwitchOrderLayer(LayerBase): ...@@ -3801,6 +3813,25 @@ class SwitchOrderLayer(LayerBase):
self.config.reshape_conf.width_axis.extend(reshape['width']) self.config.reshape_conf.width_axis.extend(reshape['width'])
@config_layer('scale_sub_region')
class ScaleSubRegionLayer(LayerBase):
def __init__(self, name, inputs, value, **xargs):
super(ScaleSubRegionLayer, self).__init__(
name, 'scale_sub_region', 0, inputs=inputs, **xargs)
scale_sub_region_conf = self.config.inputs[0].scale_sub_region_conf
scale_sub_region_conf.value = value
# get channel, width and height from input_0 layer
input_layer = self.get_input_layer(0)
image_conf = scale_sub_region_conf.image_conf
image_conf.img_size = input_layer.width
image_conf.img_size_y = input_layer.height
image_conf.channels = input_layer.size / (input_layer.width *
input_layer.height)
self.set_cnn_layer(name, image_conf.img_size_y, image_conf.img_size,
image_conf.channels)
# Deprecated, use a new layer specific class instead # Deprecated, use a new layer specific class instead
@config_func @config_func
def Layer(name, type, **xargs): def Layer(name, type, **xargs):
......
...@@ -122,6 +122,7 @@ __all__ = [ ...@@ -122,6 +122,7 @@ __all__ = [
'cross_channel_norm_layer', 'cross_channel_norm_layer',
'multibox_loss_layer', 'multibox_loss_layer',
'detection_output_layer', 'detection_output_layer',
'roi_pool_layer',
'spp_layer', 'spp_layer',
'pad_layer', 'pad_layer',
'eos_layer', 'eos_layer',
...@@ -144,6 +145,7 @@ __all__ = [ ...@@ -144,6 +145,7 @@ __all__ = [
'img_conv3d_layer', 'img_conv3d_layer',
'resize_layer', 'resize_layer',
'sub_seq_layer', 'sub_seq_layer',
'scale_sub_region_layer',
] ]
...@@ -220,6 +222,7 @@ class LayerType(object): ...@@ -220,6 +222,7 @@ class LayerType(object):
PRIORBOX_LAYER = 'priorbox' PRIORBOX_LAYER = 'priorbox'
MULTIBOX_LOSS_LAYER = 'multibox_loss' MULTIBOX_LOSS_LAYER = 'multibox_loss'
DETECTION_OUTPUT_LAYER = 'detection_output' DETECTION_OUTPUT_LAYER = 'detection_output'
ROI_POOL_LAYER = 'roi_pool'
CTC_LAYER = 'ctc' CTC_LAYER = 'ctc'
WARP_CTC_LAYER = 'warp_ctc' WARP_CTC_LAYER = 'warp_ctc'
...@@ -255,6 +258,8 @@ class LayerType(object): ...@@ -255,6 +258,8 @@ class LayerType(object):
RESIZE = 'resize' RESIZE = 'resize'
SUB_SEQ_LAYER = 'subseq' SUB_SEQ_LAYER = 'subseq'
SCALE_SUB_REGION_LAYER = 'scale_sub_region'
@staticmethod @staticmethod
def is_layer_type(type_name): def is_layer_type(type_name):
""" """
...@@ -1302,6 +1307,50 @@ def detection_output_layer(input_loc, ...@@ -1302,6 +1307,50 @@ def detection_output_layer(input_loc,
name, LayerType.DETECTION_OUTPUT_LAYER, parents=parents, size=size) name, LayerType.DETECTION_OUTPUT_LAYER, parents=parents, size=size)
@wrap_name_default("roi_pool")
def roi_pool_layer(input,
rois,
pooled_width,
pooled_height,
spatial_scale,
num_channels=None,
name=None):
"""
A layer used by Fast R-CNN to extract feature maps of ROIs from the last
feature map.
:param name: The Layer Name.
:type name: basestring
:param input: The input layer.
:type input: LayerOutput.
:param rois: The input ROIs' data.
:type rois: LayerOutput.
:param pooled_width: The width after pooling.
:type pooled_width: int
:param pooled_height: The height after pooling.
:type pooled_height: int
:param spatial_scale: The spatial scale between the image and feature map.
:type spatial_scale: float
:param num_channels: number of input channel.
:type num_channels: int
:return: LayerOutput
"""
if num_channels is None:
assert input.num_filters is not None
num_channels = input.num_filters
size = num_channels * pooled_width * pooled_height
Layer(
name=name,
type=LayerType.ROI_POOL_LAYER,
inputs=[input.name, rois.name],
pooled_width=pooled_width,
pooled_height=pooled_height,
spatial_scale=spatial_scale,
num_channels=num_channels)
return LayerOutput(
name, LayerType.ROI_POOL_LAYER, parents=[input, rois], size=size)
@wrap_name_default("cross_channel_norm") @wrap_name_default("cross_channel_norm")
def cross_channel_norm_layer(input, name=None, param_attr=None): def cross_channel_norm_layer(input, name=None, param_attr=None):
""" """
...@@ -5518,7 +5567,11 @@ def crf_decoding_layer(input, ...@@ -5518,7 +5567,11 @@ def crf_decoding_layer(input,
return LayerOutput(name, LayerType.CRF_DECODING_LAYER, parents, size=1) return LayerOutput(name, LayerType.CRF_DECODING_LAYER, parents, size=1)
@wrap_act_default(act=SigmoidActivation()) """
Following are cost Layers.
"""
@wrap_bias_attr_default(has_bias=True) @wrap_bias_attr_default(has_bias=True)
@wrap_param_attr_default() @wrap_param_attr_default()
@wrap_name_default() @wrap_name_default()
...@@ -5526,7 +5579,6 @@ def crf_decoding_layer(input, ...@@ -5526,7 +5579,6 @@ def crf_decoding_layer(input,
def nce_layer(input, def nce_layer(input,
label, label,
num_classes=None, num_classes=None,
act=None,
param_attr=None, param_attr=None,
weight=None, weight=None,
num_neg_samples=10, num_neg_samples=10,
...@@ -5538,8 +5590,8 @@ def nce_layer(input, ...@@ -5538,8 +5590,8 @@ def nce_layer(input,
Noise-contrastive estimation. Noise-contrastive estimation.
Reference: Reference:
A fast and simple algorithm for training neural probabilistic language models. A fast and simple algorithm for training neural probabilistic language
http://www.icml.cc/2012/papers/855.pdf models. https://www.cs.toronto.edu/~amnih/papers/ncelm.pdf
The example usage is: The example usage is:
...@@ -5555,7 +5607,8 @@ def nce_layer(input, ...@@ -5555,7 +5607,8 @@ def nce_layer(input,
:type input: LayerOutput | list | tuple | collections.Sequence :type input: LayerOutput | list | tuple | collections.Sequence
:param label: The input label. :param label: The input label.
:type label: LayerOutput :type label: LayerOutput
:param weight: The scale of the cost. It is optional. :param weight: The weight layer defines a weight for each sample in the
mini-batch. The default value is None.
:type weight: LayerOutput :type weight: LayerOutput
:param num_classes: The number of classes. :param num_classes: The number of classes.
:type num_classes: int :type num_classes: int
...@@ -5564,15 +5617,20 @@ def nce_layer(input, ...@@ -5564,15 +5617,20 @@ def nce_layer(input,
:param param_attr: The parameter attribute. See ParameterAttribute for :param param_attr: The parameter attribute. See ParameterAttribute for
details. details.
:type param_attr: ParameterAttribute :type param_attr: ParameterAttribute
:param num_neg_samples: The number of negative samples. 10 is the default. :param num_neg_samples: The number of sampled negative labels. 10 is the default.
:type num_neg_samples: int :type num_neg_samples: int
:param neg_distribution: The probability distribution for generating the random negative :param neg_distribution: The discrete noisy distribution over the output
labels. If this parameter is not set, a uniform distribution will space from which num_neg_samples negative labels
be used. If not None, its length must be equal to num_classes. are sampled. If this parameter is not set, a
uniform distribution will be used. A user-defined
distribution is a list whose length must be equal
to the num_classes. Each member of the list defines
the probability of a class given input x.
:type neg_distribution: list | tuple | collections.Sequence | None :type neg_distribution: list | tuple | collections.Sequence | None
:param bias_attr: The bias attribute. If the parameter is set to False or an object :param bias_attr: The attribute for bias. If this parameter is set False or
whose type is not ParameterAttribute, no bias is defined. If the any object whose type is not ParameterAttribute, no bias
parameter is set to True, the bias is initialized to zero. is added. If this parameter is set True, the bias is
initialized to zero.
:type bias_attr: ParameterAttribute | None | bool | Any :type bias_attr: ParameterAttribute | None | bool | Any
:param layer_attr: The extra layer attribute. See ExtraLayerAttribute for :param layer_attr: The extra layer attribute. See ExtraLayerAttribute for
details. details.
...@@ -5600,8 +5658,6 @@ def nce_layer(input, ...@@ -5600,8 +5658,6 @@ def nce_layer(input,
assert isinstance(neg_distribution, collections.Sequence) assert isinstance(neg_distribution, collections.Sequence)
assert len(neg_distribution) == num_classes assert len(neg_distribution) == num_classes
assert abs(sum(neg_distribution) - 1.0) < 1e-5 assert abs(sum(neg_distribution) - 1.0) < 1e-5
if not isinstance(act, BaseActivation):
raise TypeError()
ipts_for_layer = [] ipts_for_layer = []
parents = [] parents = []
...@@ -5623,7 +5679,7 @@ def nce_layer(input, ...@@ -5623,7 +5679,7 @@ def nce_layer(input,
type=LayerType.NCE_LAYER, type=LayerType.NCE_LAYER,
num_classes=num_classes, num_classes=num_classes,
neg_sampling_dist=neg_distribution, neg_sampling_dist=neg_distribution,
active_type=act.name, active_type=SigmoidActivation().name,
num_neg_samples=num_neg_samples, num_neg_samples=num_neg_samples,
inputs=ipts_for_layer, inputs=ipts_for_layer,
bias=ParamAttr.to_bias(bias_attr), bias=ParamAttr.to_bias(bias_attr),
...@@ -5633,12 +5689,7 @@ def nce_layer(input, ...@@ -5633,12 +5689,7 @@ def nce_layer(input,
LayerType.NCE_LAYER, LayerType.NCE_LAYER,
parents=parents, parents=parents,
size=l.config.size, size=l.config.size,
activation=act) activation=SigmoidActivation())
"""
following are cost Layers.
"""
@wrap_name_default() @wrap_name_default()
...@@ -7084,3 +7135,54 @@ def sub_seq_layer(input, offsets, sizes, act=None, bias_attr=None, name=None): ...@@ -7084,3 +7135,54 @@ def sub_seq_layer(input, offsets, sizes, act=None, bias_attr=None, name=None):
LayerType.SUB_SEQ_LAYER, LayerType.SUB_SEQ_LAYER,
parents=[input, offsets, sizes], parents=[input, offsets, sizes],
size=input.size) size=input.size)
@wrap_name_default('scale_sub_region')
def scale_sub_region_layer(input, indices, value, name=None):
"""
Given an image or feature map with CHW information, scale_sub_region_layer
can be used to multiply a real value to values of a sub continuous region.
You can provide start and end indices of CHW for each instance.
Please notice that all start indices are counting from 1.
The shape of indices should be [batch_size, 6] and the layout for each row
is [C_Start, C_End, H_Start, H_End, W_Start, W_End].
.. code-block:: python
scale_sub_region = scale_sub_region_layer(input=input,
indices=indices,
value=value)
:param name: The name of this layer. It is optional.
:type name: basestring
:param input: The input of this layer which should contains CHW information.
:type input: LayerOutput
:param indices: Start index and end index for C H W, the input value should
be a 2-D matrix with shape [batch_size, 6].
:type indices: LayerOutput.
:param value: value to multiply.
:type value: float
:return: LayerOutput object.
:rtype: LayerOutput
"""
assert isinstance(input, LayerOutput), (
'The first input of scale_sub_region_layer, '
'must be a PaddlePaddle layer.')
assert isinstance(indices, LayerOutput), (
'The start and end indices for CHW, must be a PaddlePaddle layer.')
assert isinstance(value, float), (
'The value to multiply, must be a real value.')
Layer(
name=name,
type=LayerType.SCALE_SUB_REGION_LAYER,
inputs=[input.name, indices.name],
value=value)
return LayerOutput(
name,
LayerType.SCALE_SUB_REGION_LAYER,
parents=[input, indices],
num_filters=input.num_filters,
size=input.size)
...@@ -9,7 +9,7 @@ test_seq_concat_reshape test_pad test_smooth_l1 test_multiplex_layer ...@@ -9,7 +9,7 @@ test_seq_concat_reshape test_pad test_smooth_l1 test_multiplex_layer
test_prelu_layer test_row_conv test_detection_output_layer test_multibox_loss_layer test_prelu_layer test_row_conv test_detection_output_layer test_multibox_loss_layer
test_recursive_topology test_gated_unit_layer test_clip_layer test_row_l2_norm_layer test_recursive_topology test_gated_unit_layer test_clip_layer test_row_l2_norm_layer
test_kmax_seq_socre_layer test_sub_nested_seq_select_layer test_scale_shift_layer test_kmax_seq_socre_layer test_sub_nested_seq_select_layer test_scale_shift_layer
test_seq_slice_layer test_cross_entropy_over_beam test_pooling3D_layer test_seq_slice_layer test_cross_entropy_over_beam test_roi_pool_layer test_pooling3D_layer
test_conv3d_layer test_deconv3d_layer test_BatchNorm3D test_resize_layer) test_conv3d_layer test_deconv3d_layer test_BatchNorm3D test_resize_layer test_scale_sub_region_layer)
export whole_configs=(test_split_datasource) export whole_configs=(test_split_datasource)
type: "nn"
layers {
name: "data"
type: "data"
size: 588
active_type: ""
height: 14
width: 14
}
layers {
name: "rois"
type: "data"
size: 10
active_type: ""
}
layers {
name: "__conv_0__"
type: "exconv"
size: 3136
active_type: ""
inputs {
input_layer_name: "data"
input_parameter_name: "___conv_0__.w0"
conv_conf {
filter_size: 3
channels: 3
stride: 1
padding: 1
groups: 1
filter_channels: 3
output_x: 14
img_size: 14
caffe_mode: true
filter_size_y: 3
padding_y: 1
stride_y: 1
output_y: 14
img_size_y: 14
}
}
bias_parameter_name: "___conv_0__.wbias"
num_filters: 16
shared_biases: true
height: 14
width: 14
}
layers {
name: "__roi_pool_0__"
type: "roi_pool"
size: 784
active_type: ""
inputs {
input_layer_name: "__conv_0__"
roi_pool_conf {
pooled_width: 7
pooled_height: 7
spatial_scale: 0.0625
}
}
inputs {
input_layer_name: "rois"
}
height: 7
width: 7
}
parameters {
name: "___conv_0__.w0"
size: 432
initial_mean: 0.0
initial_std: 0.272165526976
initial_strategy: 0
initial_smart: false
}
parameters {
name: "___conv_0__.wbias"
size: 16
initial_mean: 0.0
initial_std: 0.0
dims: 16
dims: 1
initial_strategy: 0
initial_smart: false
}
input_layer_names: "data"
input_layer_names: "rois"
output_layer_names: "__roi_pool_0__"
sub_models {
name: "root"
layer_names: "data"
layer_names: "rois"
layer_names: "__conv_0__"
layer_names: "__roi_pool_0__"
input_layer_names: "data"
input_layer_names: "rois"
output_layer_names: "__roi_pool_0__"
is_recurrent_layer_group: false
}
type: "nn"
layers {
name: "data"
type: "data"
size: 2016
active_type: ""
height: 48
width: 42
}
layers {
name: "indices"
type: "data"
size: 6
active_type: ""
}
layers {
name: "__scale_sub_region_0__"
type: "scale_sub_region"
size: 2016
active_type: ""
inputs {
input_layer_name: "data"
scale_sub_region_conf {
image_conf {
channels: 1
img_size: 42
img_size_y: 48
}
value: 0.0
}
}
inputs {
input_layer_name: "indices"
}
height: 48
width: 42
}
input_layer_names: "data"
input_layer_names: "indices"
output_layer_names: "__scale_sub_region_0__"
sub_models {
name: "root"
layer_names: "data"
layer_names: "indices"
layer_names: "__scale_sub_region_0__"
input_layer_names: "data"
input_layer_names: "indices"
output_layer_names: "__scale_sub_region_0__"
is_recurrent_layer_group: false
}
from paddle.trainer_config_helpers import *
data = data_layer(name='data', size=3 * 14 * 14, height=14, width=14)
rois = data_layer(name='rois', size=10)
conv = img_conv_layer(
input=data,
filter_size=3,
num_channels=3,
num_filters=16,
padding=1,
act=LinearActivation(),
bias_attr=True)
roi_pool = roi_pool_layer(
input=conv,
rois=rois,
pooled_width=7,
pooled_height=7,
spatial_scale=1. / 16)
outputs(roi_pool)
from paddle.trainer_config_helpers import *
settings(batch_size=1000, learning_rate=1e-5)
data = data_layer(name='data', size=2016, height=48, width=42)
indices = data_layer(name='indices', size=6)
scale_sub_region = scale_sub_region_layer(
input=data, indices=indices, value=0.0)
outputs(scale_sub_region)
...@@ -22,6 +22,7 @@ parse training set and test set into paddle reader creators. ...@@ -22,6 +22,7 @@ parse training set and test set into paddle reader creators.
import numpy as np import numpy as np
import os import os
import paddle.v2.dataset.common import paddle.v2.dataset.common
from paddle.v2.parameters import Parameters
__all__ = ['train', 'test'] __all__ = ['train', 'test']
...@@ -34,7 +35,8 @@ feature_names = [ ...@@ -34,7 +35,8 @@ feature_names = [
UCI_TRAIN_DATA = None UCI_TRAIN_DATA = None
UCI_TEST_DATA = None UCI_TEST_DATA = None
URL_MODEL = 'https://github.com/PaddlePaddle/book/raw/develop/01.fit_a_line/fit_a_line.tar'
MD5_MODEL = '52fc3da8ef3937822fcdd87ee05c0c9b'
def feature_range(maximums, minimums): def feature_range(maximums, minimums):
import matplotlib import matplotlib
...@@ -111,6 +113,13 @@ def test(): ...@@ -111,6 +113,13 @@ def test():
return reader return reader
def model():
tar_file = paddle.v2.dataset.common.download(URL_MODEL, 'fit_a_line.tar', MD5_MODEL)
with open(tar_file, 'r') as f:
parameters = Parameters.from_tar(f)
return parameters
def fetch(): def fetch():
paddle.v2.dataset.common.download(URL, 'uci_housing', MD5) paddle.v2.dataset.common.download(URL, 'uci_housing', MD5)
......
...@@ -285,7 +285,7 @@ class Operator(object): ...@@ -285,7 +285,7 @@ class Operator(object):
self.desc.check_attrs() self.desc.check_attrs()
no_kernel_op_set = { no_kernel_op_set = {
'feed', 'fetch', 'save', 'load', 'recurrent', 'feed', 'fetch', 'save', 'load', 'recurrent',
'rnn_memory_helper_grad' 'rnn_memory_helper_grad', 'while'
} }
if type not in no_kernel_op_set: if type not in no_kernel_op_set:
self.desc.infer_var_type(self.block.desc) self.desc.infer_var_type(self.block.desc)
......
...@@ -22,12 +22,36 @@ def fc(input, ...@@ -22,12 +22,36 @@ def fc(input,
num_flatten_dims=1, num_flatten_dims=1,
main_program=None, main_program=None,
startup_program=None): startup_program=None):
# create helper """
Fully Connected Layer.
Args:
input: The input tensor to the function
size: The size of the layer
param_attr: The parameters/weights to the FC Layer
bias_attr: The bias parameter for the FC layer
name: Name/alias of the function
act: Activation to be applied to the output of FC layer
num_flatten_dims: Number of columns in input
main_program: Name of the main program that calls this
startup_program: Name of the startup program
This function can take in multiple inputs and performs the Fully Connected
function (linear transformation) on top of each of them.
So for input x, the output will be : Wx + b. Where W is the parameter,
b the bias and x is the input.
The function also applies an activation (non-linearity) on top of the
output, if activation is passed in the input.
All the input variables of this function are passed in as local variables
to the LayerHelper constructor.
"""
helper = LayerHelper('fc', **locals()) helper = LayerHelper('fc', **locals())
dtype = helper.input_dtype() dtype = helper.input_dtype()
# mul
mul_results = [] mul_results = []
for input_var, param_attr in helper.iter_inputs_and_params(): for input_var, param_attr in helper.iter_inputs_and_params():
input_shape = input_var.shape input_shape = input_var.shape
...@@ -68,6 +92,26 @@ def embedding(input, ...@@ -68,6 +92,26 @@ def embedding(input,
param_attr=None, param_attr=None,
main_program=None, main_program=None,
startup_program=None): startup_program=None):
"""
Embedding Layer.
Args:
input: The input to the function
size: The size of the layer
data_type: The type of data : float32, float_16, int etc
is_sparse: A flag that decleares whether the input is sparse
param_attr: Parameters for this layer
main_program: Name of the main program that calls this
startup_program: Name of the startup program
This function can take in the input (which is a vector of IDs) and
performs a lookup in the lookup_table using these IDs, to result into
the embedding of each ID in the input.
All the input variables of this function are passed in as local variables
to the LayerHelper constructor.
"""
helper = LayerHelper('embedding', **locals()) helper = LayerHelper('embedding', **locals())
w = helper.create_parameter( w = helper.create_parameter(
attr=helper.param_attr, shape=size, dtype=data_type) attr=helper.param_attr, shape=size, dtype=data_type)
...@@ -87,7 +131,30 @@ def data(name, ...@@ -87,7 +131,30 @@ def data(name,
type=core.VarDesc.VarType.LOD_TENSOR, type=core.VarDesc.VarType.LOD_TENSOR,
append_batch_size=True, append_batch_size=True,
main_program=None, main_program=None,
startup_program=None): startup_program=None,
stop_gradient=True):
"""
Data Layer.
Args:
name: The name/alias of the function
shape: Tuple declaring the shape.
data_type: The type of data : float32, float_16, int etc
type: The output type. By default it is LOD_TENSOR.
append_batch_size: Whether or not to append the data as a batch.
main_program: Name of the main program that calls this
startup_program: Name of the startup program
stop_gradient: A boolean that mentions whether gradient should flow.
This function takes in input and based on whether data has
to be returned back as a minibatch, it creates the global variable using
the helper functions. The global variables can be accessed by all the
following operations and layers in the graph.
All the input variables of this function are passed in as local variables
to the LayerHelper constructor.
"""
helper = LayerHelper('data', **locals()) helper = LayerHelper('data', **locals())
shape = list(shape) shape = list(shape)
for i in xrange(len(shape)): for i in xrange(len(shape)):
...@@ -101,15 +168,40 @@ def data(name, ...@@ -101,15 +168,40 @@ def data(name,
shape = [-1] + shape # append batch size as -1 shape = [-1] + shape # append batch size as -1
return helper.create_global_variable( return helper.create_global_variable(
name=name, shape=shape, dtype=data_type, type=type, stop_gradient=True) name=name,
shape=shape,
dtype=data_type,
type=type,
stop_gradient=stop_gradient)
def _convert_(name): def _convert_(name):
"""
Formatting.
Args:
name: The name/alias
This function takes in a name and converts it to a standard format of
group1_group2. Where as per the regular expression, group1 can have
alphabets and numbers and group2 has capital alphabets.
"""
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name) s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower() return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
def _create_op_func_(op_type): def _create_op_func_(op_type):
"""
Create an Operator for a Function.
Args:
op_type: The name of the operator to be created
This function takes in the operator type (sigmoid, mean , average etc) and
creates the operator functionality.
"""
op_proto = OpProtoHolder.instance().get_op_proto(op_type) op_proto = OpProtoHolder.instance().get_op_proto(op_type)
not_intermediate_outputs = \ not_intermediate_outputs = \
filter(lambda output: not output.intermediate, op_proto.outputs) filter(lambda output: not output.intermediate, op_proto.outputs)
...@@ -117,26 +209,26 @@ def _create_op_func_(op_type): ...@@ -117,26 +209,26 @@ def _create_op_func_(op_type):
filter(lambda output: output.intermediate, op_proto.outputs) filter(lambda output: output.intermediate, op_proto.outputs)
if len(not_intermediate_outputs) != 1: if len(not_intermediate_outputs) != 1:
raise ValueError( raise ValueError("Only one non intermediate output operator can be",
"Only one not intermediate output operator can be automatically generated" "automatically generated")
)
if not_intermediate_outputs[0].duplicable: if not_intermediate_outputs[0].duplicable:
raise ValueError( raise ValueError(
"Only not duplicable op can be automatically generated") "Only non duplicable op can be automatically generated")
for output in intermediate_outputs: for output in intermediate_outputs:
if output.duplicable: if output.duplicable:
raise ValueError( raise ValueError("The op can be automatically generated only when ",
"Only when all intermediate ops are not duplicable, " "all intermediate ops are not duplicable")
"this op can be automatically generated")
o_name = not_intermediate_outputs[0].name o_name = not_intermediate_outputs[0].name
intermediate_output_names = [output.name for output in intermediate_outputs] intermediate_output_names = [output.name for output in intermediate_outputs]
def func(**kwargs): def infer_and_check_data_type(op_proto, **kwargs):
helper = LayerHelper(op_type, **kwargs) """
inputs = dict() This function performs the sanity check for data_type and
instance type.
"""
dtype = None dtype = None
for ipt in op_proto.inputs: for ipt in op_proto.inputs:
name = _convert_(ipt.name) name = _convert_(ipt.name)
...@@ -153,6 +245,25 @@ def _create_op_func_(op_type): ...@@ -153,6 +245,25 @@ def _create_op_func_(op_type):
elif dtype != each.data_type: elif dtype != each.data_type:
raise ValueError( raise ValueError(
"operator {0} must input same dtype".format(op_type)) "operator {0} must input same dtype".format(op_type))
return dtype
def func(**kwargs):
"""
This function implements the function for the operator. This process
involves doing the sanity check (using the function above), reading
inputs from protobuf and applying the activations on top.
"""
helper = LayerHelper(op_type, **kwargs)
dtype = infer_and_check_data_type(op_proto, **kwargs)
inputs = dict()
for ipt in op_proto.inputs:
name = _convert_(ipt.name)
val = kwargs.pop(name, [])
if not isinstance(val, list) and not isinstance(val, tuple):
val = [val]
inputs[ipt.name] = val inputs[ipt.name] = val
outputs = dict() outputs = dict()
...@@ -178,9 +289,32 @@ _create_op_func_('reshape') ...@@ -178,9 +289,32 @@ _create_op_func_('reshape')
_create_op_func_('elementwise_add') _create_op_func_('elementwise_add')
_create_op_func_('sigmoid') _create_op_func_('sigmoid')
_create_op_func_('scale') _create_op_func_('scale')
_create_op_func_('reshape')
_create_op_func_('transpose')
def fill_constant(data_type, shape, value=None, program=None):
"""
This function creates a tensor , with shape as mentioned in the input and
specified data_type and fills this up with a constant value that
comes in the input.
"""
helper = LayerHelper('fill_constant', **locals())
out = helper.create_tmp_variable(dtype=data_type)
helper.append_op(
type='fill_constant',
outputs={'Out': [out]},
attrs={'data_type': data_type,
'shape': shape,
'value': value})
return out
def cast(x, data_type, main_program=None): def cast(x, data_type, main_program=None):
"""
This function takes in the input with input_data_type
and casts it to the output_data_type as the output.
"""
helper = LayerHelper('cast', **locals()) helper = LayerHelper('cast', **locals())
out = helper.create_tmp_variable(dtype=data_type) out = helper.create_tmp_variable(dtype=data_type)
helper.append_op( helper.append_op(
...@@ -193,6 +327,10 @@ def cast(x, data_type, main_program=None): ...@@ -193,6 +327,10 @@ def cast(x, data_type, main_program=None):
def concat(input, axis, main_program=None, startup_program=None): def concat(input, axis, main_program=None, startup_program=None):
"""
This function concats the input along the axis mentioned
and returns that as the output.
"""
helper = LayerHelper('concat', **locals()) helper = LayerHelper('concat', **locals())
out = helper.create_tmp_variable(dtype=helper.input_dtype()) out = helper.create_tmp_variable(dtype=helper.input_dtype())
helper.append_op( helper.append_op(
...@@ -204,6 +342,10 @@ def concat(input, axis, main_program=None, startup_program=None): ...@@ -204,6 +342,10 @@ def concat(input, axis, main_program=None, startup_program=None):
def sums(input, main_program=None, startup_program=None): def sums(input, main_program=None, startup_program=None):
"""
This function takes in the input and performs the sum operation on it
and returns that as the output.
"""
helper = LayerHelper('sum', **locals()) helper = LayerHelper('sum', **locals())
out = helper.create_tmp_variable(dtype=helper.input_dtype()) out = helper.create_tmp_variable(dtype=helper.input_dtype())
helper.append_op(type='sum', inputs={'X': input}, outputs={'Out': out}) helper.append_op(type='sum', inputs={'X': input}, outputs={'Out': out})
...@@ -211,6 +353,10 @@ def sums(input, main_program=None, startup_program=None): ...@@ -211,6 +353,10 @@ def sums(input, main_program=None, startup_program=None):
def cos_sim(X, Y, **kwargs): def cos_sim(X, Y, **kwargs):
"""
This function performs the cosine similarity between two tensors
X and Y and returns that as the output.
"""
helper = LayerHelper('cos_sim', **kwargs) helper = LayerHelper('cos_sim', **kwargs)
out = helper.create_tmp_variable(dtype=X.data_type) out = helper.create_tmp_variable(dtype=X.data_type)
xnorm = helper.create_tmp_variable(dtype=X.data_type) xnorm = helper.create_tmp_variable(dtype=X.data_type)
...@@ -226,6 +372,9 @@ def cos_sim(X, Y, **kwargs): ...@@ -226,6 +372,9 @@ def cos_sim(X, Y, **kwargs):
def cross_entropy(input, label, **kwargs): def cross_entropy(input, label, **kwargs):
"""
This function computes cross_entropy using the input and label.
"""
helper = LayerHelper('cross_entropy', **kwargs) helper = LayerHelper('cross_entropy', **kwargs)
out = helper.create_tmp_variable(dtype=input.data_type) out = helper.create_tmp_variable(dtype=input.data_type)
helper.append_op( helper.append_op(
...@@ -238,6 +387,10 @@ def cross_entropy(input, label, **kwargs): ...@@ -238,6 +387,10 @@ def cross_entropy(input, label, **kwargs):
def square_error_cost(input, label, **kwargs): def square_error_cost(input, label, **kwargs):
"""
This functions returns the squared error cost using the input and label.
The output is appending the op to do the above.
"""
helper = LayerHelper('square_error_cost', **kwargs) helper = LayerHelper('square_error_cost', **kwargs)
minus_out = helper.create_tmp_variable(dtype=input.data_type) minus_out = helper.create_tmp_variable(dtype=input.data_type)
helper.append_op( helper.append_op(
...@@ -253,6 +406,10 @@ def square_error_cost(input, label, **kwargs): ...@@ -253,6 +406,10 @@ def square_error_cost(input, label, **kwargs):
def accuracy(input, label, k=1, **kwargs): def accuracy(input, label, k=1, **kwargs):
"""
This function computes the accuracy using the input and label.
The output is the top_k inputs and their indices.
"""
helper = LayerHelper("accuracy", **kwargs) helper = LayerHelper("accuracy", **kwargs)
topk_out = helper.create_tmp_variable(dtype=input.data_type) topk_out = helper.create_tmp_variable(dtype=input.data_type)
topk_indices = helper.create_tmp_variable(dtype="int64") topk_indices = helper.create_tmp_variable(dtype="int64")
...@@ -285,6 +442,11 @@ def sequence_conv(input, ...@@ -285,6 +442,11 @@ def sequence_conv(input,
param_attr=None, param_attr=None,
main_program=None, main_program=None,
startup_program=None): startup_program=None):
"""
This function creates the op for sequence_conv, using the inputs and
other convolutional configurations for the filters and stride as given
in the input parameters to the function.
"""
# FIXME(dzh) : want to unify the argument of python layer # FIXME(dzh) : want to unify the argument of python layer
# function. So we ignore some unecessary attributes. # function. So we ignore some unecessary attributes.
# such as, padding_trainable, context_start. # such as, padding_trainable, context_start.
...@@ -325,6 +487,13 @@ def conv2d(input, ...@@ -325,6 +487,13 @@ def conv2d(input,
param_attr=None, param_attr=None,
main_program=None, main_program=None,
startup_program=None): startup_program=None):
"""
This function creates the op for a 2-dimensional Convolution.
This is performed using the parameters of filters(size, dimensionality etc)
, stride and other configurations for a Convolution operation.
This funciton can also append an activation on top of the
conv-2d output, if mentioned in the input parameters.
"""
helper = LayerHelper('conv2d', **locals()) helper = LayerHelper('conv2d', **locals())
dtype = helper.input_dtype() dtype = helper.input_dtype()
...@@ -371,6 +540,11 @@ def conv2d(input, ...@@ -371,6 +540,11 @@ def conv2d(input,
def sequence_pool(input, pool_type, **kwargs): def sequence_pool(input, pool_type, **kwargs):
"""
This function add the operator for sequence pooling.
This is applied on top of the input using pool_type mentioned
in the parameters.
"""
helper = LayerHelper('sequence_pool', input=input, **kwargs) helper = LayerHelper('sequence_pool', input=input, **kwargs)
dtype = helper.input_dtype() dtype = helper.input_dtype()
pool_out = helper.create_tmp_variable(dtype) pool_out = helper.create_tmp_variable(dtype)
...@@ -394,6 +568,10 @@ def pool2d(input, ...@@ -394,6 +568,10 @@ def pool2d(input,
global_pooling=False, global_pooling=False,
main_program=None, main_program=None,
startup_program=None): startup_program=None):
"""
This function adds the operator for pooling in 2 dimensions, using the
pooling configurations mentioned in input parameters.
"""
if pool_type not in ["max", "avg"]: if pool_type not in ["max", "avg"]:
raise ValueError( raise ValueError(
"Unknown pool_type: '%s'. It can only be 'max' or 'avg'.", "Unknown pool_type: '%s'. It can only be 'max' or 'avg'.",
...@@ -414,9 +592,9 @@ def pool2d(input, ...@@ -414,9 +592,9 @@ def pool2d(input,
inputs={"X": input}, inputs={"X": input},
outputs={"Out": pool_out}, outputs={"Out": pool_out},
attrs={ attrs={
"poolingType": pool_type, "pooling_type": pool_type,
"ksize": pool_size, "ksize": pool_size,
"globalPooling": global_pooling, "global_pooling": global_pooling,
"strides": pool_stride, "strides": pool_stride,
"paddings": pool_padding "paddings": pool_padding
}) })
...@@ -434,6 +612,10 @@ def batch_norm(input, ...@@ -434,6 +612,10 @@ def batch_norm(input,
data_layout='NCHW', data_layout='NCHW',
main_program=None, main_program=None,
startup_program=None): startup_program=None):
"""
This function helps create an operator to implement
the BatchNorm layer using the configurations from the input parameters.
"""
helper = LayerHelper('batch_norm', **locals()) helper = LayerHelper('batch_norm', **locals())
dtype = helper.input_dtype() dtype = helper.input_dtype()
...@@ -505,8 +687,10 @@ def batch_norm(input, ...@@ -505,8 +687,10 @@ def batch_norm(input,
class BlockGuard(object): class BlockGuard(object):
""" """
BlockGuard used to create sub-block in program by using Python `with` BlockGuard class.
keyword.
BlockGuard class is used to create a sub-block in a program by
using the Python `with` keyword.
""" """
def __init__(self, main_program): def __init__(self, main_program):
...@@ -525,9 +709,15 @@ class BlockGuard(object): ...@@ -525,9 +709,15 @@ class BlockGuard(object):
class StaticRNNGuard(BlockGuard): class StaticRNNGuard(BlockGuard):
"""
StaticRNNGuard class.
StaticRNNGuard class is used to create a StaticRNN block in a program.
"""
def __init__(self, rnn): def __init__(self, rnn):
if not isinstance(rnn, StaticRNN): if not isinstance(rnn, StaticRNN):
raise TypeError("StaticRNNGuard takes an StaticRNN") raise TypeError("StaticRNNGuard takes a StaticRNN")
super(StaticRNNGuard, self).__init__(rnn.helper.main_program) super(StaticRNNGuard, self).__init__(rnn.helper.main_program)
self.rnn = rnn self.rnn = rnn
...@@ -545,12 +735,18 @@ class StaticRNNGuard(BlockGuard): ...@@ -545,12 +735,18 @@ class StaticRNNGuard(BlockGuard):
class StaticRNNMemoryLink(object): class StaticRNNMemoryLink(object):
""" """
:param init: the initial variable for Memory StaticRNNMemoryLink class.
:type init: Variable
:param pre_mem: the memory variable in previous time step Args:
:type pre_mem: Variable init: the initial variable for Memory
:param mem: the memory variable in current time step init: Variable
:type mem: Variable pre_mem: the memory variable in previous time step
pre_mem: Variable
mem: the memory variable in current time step
mem: Variable
StaticRNNMemoryLink class is used to create a link between two
memory cells of a StaticRNN.
""" """
def __init__(self, init, pre_mem, mem=None): def __init__(self, init, pre_mem, mem=None):
...@@ -560,6 +756,12 @@ class StaticRNNMemoryLink(object): ...@@ -560,6 +756,12 @@ class StaticRNNMemoryLink(object):
class StaticRNN(object): class StaticRNN(object):
"""
StaticRNN class.
StaticRNN class is used to create a StaticRNN. The RNN will have its
own parameters like inputs, outputs, memories, status and length.
"""
BEFORE_RNN_BLOCK = 0 BEFORE_RNN_BLOCK = 0
IN_RNN_BLOCK = 1 IN_RNN_BLOCK = 1
AFTER_RNN_BLOCK = 2 AFTER_RNN_BLOCK = 2
...@@ -588,15 +790,15 @@ class StaticRNN(object): ...@@ -588,15 +790,15 @@ class StaticRNN(object):
init_value=0.0, init_value=0.0,
init_batch_dim_idx=0, init_batch_dim_idx=0,
ref_batch_dim_idx=1): ref_batch_dim_idx=1):
''' """
:param init: boot memory, if not set, a shape, batch_ref must be provided Args:
:param shape: shape of the boot memory init: boot memory, if not set, a shape, batch_ref must be provided
:param batch_ref: batch size reference variable shape: shape of the boot memory
:param init_value: the init value of boot memory batch_ref: batch size reference variable
:param init_batch_dim_idx: the index of batch size in init's dimension init_value: the init value of boot memory
:param ref_batch_dim_idx: the index of batch size in batch_ref's dimension init_batch_dim_idx: the index of batch size in init's dimension
:return: boot memory ref_batch_dim_idx: the index of batch size in batch_ref's dimension
''' """
self._assert_in_rnn_block_('memory') self._assert_in_rnn_block_('memory')
if init is None: if init is None:
if shape is None or batch_ref is None: if shape is None or batch_ref is None:
...@@ -762,7 +964,131 @@ class StaticRNN(object): ...@@ -762,7 +964,131 @@ class StaticRNN(object):
}) })
class WhileGuard(BlockGuard):
def __init__(self, while_op):
if not isinstance(while_op, While):
raise TypeError("WhileGuard takes a while op")
super(WhileGuard, self).__init__(while_op.helper.main_program)
self.while_op = while_op
def __enter__(self):
self.while_op.status = While.IN_WHILE_BLOCK
return super(WhileGuard, self).__enter__()
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is not None:
return False
self.while_op.status = While.AFTER_WHILE_BLOCK
self.while_op.complete()
return super(WhileGuard, self).__exit__(exc_type, exc_val, exc_tb)
class While(object):
BEFORE_WHILE_BLOCK = 0
IN_WHILE_BLOCK = 1
AFTER_WHILE_BLOCK = 2
def __init__(self, cond, name=None, main_program=None):
self.helper = LayerHelper("while", name=name, main_program=main_program)
self.status = While.BEFORE_WHILE_BLOCK
if not isinstance(cond, Variable):
raise TypeError("condition should be a variable")
assert isinstance(cond, Variable)
if cond.data_type != core.DataType.BOOL:
raise TypeError("condition should be a bool variable")
if reduce(lambda a, b: a * b, cond.shape, 1) != 1:
raise TypeError("condition should be a bool scalar")
self.cond_var = cond
def block(self):
return WhileGuard(self)
def complete(self):
main_program = self.helper.main_program
while_block = main_program.current_block()
parent_block = main_program.block(main_program.current_block()
.parent_idx)
inner_outputs = {self.cond_var.name}
x_name_list = set()
for op in while_block.ops:
for iname in op.input_names:
for in_var_name in op.input(iname):
if in_var_name not in inner_outputs:
x_name_list.add(in_var_name)
for oname in op.output_names:
for out_var_name in op.output(oname):
inner_outputs.add(out_var_name)
out_vars = []
for inner_out_name in inner_outputs:
if inner_out_name in parent_block.vars:
out_vars.append(parent_block.var(inner_out_name))
step_scope = parent_block.create_var(
type=core.VarDesc.VarType.STEP_SCOPES)
parent_block.append_op(
type='while',
inputs={
'X': [parent_block.var(x_name) for x_name in x_name_list],
'Condition': [self.cond_var]
},
outputs={'Out': out_vars,
'StepScopes': [step_scope]},
attrs={'step_block': while_block})
def lstm(x,
c_pre_init,
hidden_dim,
forget_bias=None,
main_program=None,
startup_program=None):
"""
This function helps create an operator for the LSTM (Long Short Term
Memory) cell that can be used inside an RNN.
"""
helper = LayerHelper('lstm_unit', **locals())
rnn = StaticRNN()
with rnn.step():
c_pre = rnn.memory(init=c_pre_init)
x_t = rnn.step_input(x)
before_fc = concat(
input=[x_t, c_pre],
axis=1,
main_program=main_program,
startup_program=startup_program)
after_fc = fc(input=before_fc,
size=hidden_dim * 4,
main_program=main_program,
startup_program=startup_program)
data_type = x.data_type
c = helper.create_tmp_variable(data_type)
h = helper.create_tmp_variable(data_type)
helper.append_op(
type='lstm_unit',
inputs={"X": after_fc,
"C_prev": c_pre},
outputs={"C": c,
"H": h},
attrs={"forget_bias": forget_bias})
rnn.update_memory(c_pre, c)
rnn.output(h)
return rnn()
def lod_rank_table(x, level=0, main_program=None): def lod_rank_table(x, level=0, main_program=None):
"""
This function creates an operator for creating a LOD_RANK_TABLE
using the input x.
"""
helper = LayerHelper("lod_rank_table", **locals()) helper = LayerHelper("lod_rank_table", **locals())
table = helper.create_variable( table = helper.create_variable(
type=core.VarDesc.VarType.LOD_RANK_TABLE, type=core.VarDesc.VarType.LOD_RANK_TABLE,
...@@ -775,8 +1101,46 @@ def lod_rank_table(x, level=0, main_program=None): ...@@ -775,8 +1101,46 @@ def lod_rank_table(x, level=0, main_program=None):
return table return table
def lod_tensor_to_array(x, table, main_program=None):
"""
This function creates an operator to convert an LOD_Tensor to
an array.
"""
helper = LayerHelper("lod_tensor_to_array", **locals())
array = helper.create_variable(
name=unique_name("lod_tensor_to_array"),
type=core.VarDesc.VarType.LOD_TENSOR_ARRAY,
dtype=x.data_type)
helper.append_op(
type='lod_tensor_to_array',
inputs={'X': x,
'RankTable': table},
outputs={'Out': array})
return array
def array_to_lod_tensor(x, table, main_program=None):
"""
This function creates an operator to convert an array to a
LOD_Tensor.
"""
helper = LayerHelper("array_to_lod_tensor", **locals())
tmp = helper.create_tmp_variable(dtype=x.data_type)
helper.append_op(
type="array_to_lod_tensor",
inputs={'X': x,
'RankTable': table},
outputs={'Out': tmp})
return tmp
def fill_constant(shape, dtype, value, main_program=None): def fill_constant(shape, dtype, value, main_program=None):
helper = LayerHelper("ones", **locals()) """
This function creates a tensor , with shape as mentioned in the input and
specified data_type and fills this up with a constant value that
comes in the input. It also sets the stop_gradient to be True.
"""
helper = LayerHelper("fill_constant", **locals())
out = helper.create_tmp_variable(dtype=dtype) out = helper.create_tmp_variable(dtype=dtype)
helper.append_op( helper.append_op(
type='fill_constant', type='fill_constant',
...@@ -792,25 +1156,45 @@ def fill_constant(shape, dtype, value, main_program=None): ...@@ -792,25 +1156,45 @@ def fill_constant(shape, dtype, value, main_program=None):
def ones(shape, dtype, main_program=None): def ones(shape, dtype, main_program=None):
"""
This function performs the same function as fill_constant() declared above
with the constant value being 1.0.
"""
return fill_constant(value=1.0, **locals()) return fill_constant(value=1.0, **locals())
def zeros(shape, dtype, main_program=None): def zeros(shape, dtype, main_program=None):
"""
This function performs the same function as fill_constant() declared above
with the constant value being 0.0.
"""
return fill_constant(value=0.0, **locals()) return fill_constant(value=0.0, **locals())
def increment(x, value=1.0, main_program=None): def increment(x, value=1.0, in_place=True, main_program=None):
"""
This function creates an operator to increment each value in the input
`x` by an amount: `value` as mentioned in the input parameter. This
operation is performed in-place by default.
"""
helper = LayerHelper("increment", **locals()) helper = LayerHelper("increment", **locals())
tmp = helper.create_tmp_variable(dtype=x.data_type) if not in_place:
out = helper.create_tmp_variable(dtype=x.data_type)
else:
out = x
helper.append_op( helper.append_op(
type='increment', type='increment',
inputs={'X': [x]}, inputs={'X': [x]},
outputs={'Out': [tmp]}, outputs={'Out': [out]},
attrs={'step': value}) attrs={'step': value})
return tmp return out
def array_write(x, i, array=None, main_program=None): def array_write(x, i, array=None, main_program=None):
"""
This function creates an operator to write the data out as a
LOD_TENSOR_ARRAY.
"""
helper = LayerHelper('array_write', **locals()) helper = LayerHelper('array_write', **locals())
if array is None: if array is None:
array = helper.create_variable( array = helper.create_variable(
...@@ -825,7 +1209,31 @@ def array_write(x, i, array=None, main_program=None): ...@@ -825,7 +1209,31 @@ def array_write(x, i, array=None, main_program=None):
return array return array
def create_array(dtype, main_program=None):
helper = LayerHelper("array", **locals())
return helper.create_variable(
name="{0}.out".format(helper.name),
type=core.VarDesc.VarType.LOD_TENSOR_ARRAY,
dtype=dtype)
def less_than(x, y, cond=None, main_program=None):
helper = LayerHelper("less_than", **locals())
if cond is None:
cond = helper.create_tmp_variable(dtype='bool')
cond.stop_gradient = True
helper.append_op(
type='less_than', inputs={'X': [x],
'Y': [y]}, outputs={'Out': [cond]})
return cond
def array_read(array, i, main_program=None): def array_read(array, i, main_program=None):
"""
This function creates an operator to read the data in as a
LOD_TENSOR_ARRAY.
"""
helper = LayerHelper('array_read', **locals()) helper = LayerHelper('array_read', **locals())
if not isinstance( if not isinstance(
array, array,
...@@ -838,3 +1246,33 @@ def array_read(array, i, main_program=None): ...@@ -838,3 +1246,33 @@ def array_read(array, i, main_program=None):
'I': [i]}, 'I': [i]},
outputs={'Out': [out]}) outputs={'Out': [out]})
return out return out
def shrink_memory(x, i, table, main_program=None):
"""
This function creates an operator to shrink_rnn_memory using the RankTable
as mentioned in the input parameter.
"""
helper = LayerHelper('shrink_memory', **locals())
out = helper.create_tmp_variable(dtype=x.data_type)
helper.append_op(
type='shrink_rnn_memory',
inputs={'X': [x],
'I': [i],
'RankTable': [table]},
outputs={'Out': [out]},
attrs={})
return out
def array_length(array, main_program=None):
"""
This function creates an operator to find the length of the
LOD_TENSOR_ARRAY.
"""
helper = LayerHelper('array_length', **locals())
tmp = helper.create_tmp_variable(dtype='int64')
tmp.stop_gradient = True
helper.append_op(
type='lod_array_length', inputs={'X': [array]}, outputs={'Out': [tmp]})
return tmp
...@@ -35,15 +35,21 @@ class Optimizer(object): ...@@ -35,15 +35,21 @@ class Optimizer(object):
""" """
raise NotImplementedError() raise NotImplementedError()
def _initialize_tensors(self, block): def _create_param_lr(self, param_and_grad):
"""Create all necessary tensors, that will be shared for all parameter updates. # create learning rate variable for every parameter
param = param_and_grad[0]
Tensors like learning rate should be initialized here. param_lr = param.optimize_attr['learning_rate']
param_lr_shape = [1]
Args: param_lr_var = self.helper.create_global_variable(
block: the block in which the loss variable is present name=unique_name("learning_rate"),
""" dtype='float32',
pass shape=param_lr_shape,
lod_level=1,
persistable=True)
param_lr = param_lr * self._learning_rate
self.helper.set_variable_initializer(
var=param_lr_var, initializer=ConstantInitializer(param_lr))
return param_lr_var
def _create_accumulators(self, block, parameters): def _create_accumulators(self, block, parameters):
"""Create all accumulators needed by the parameters """Create all accumulators needed by the parameters
...@@ -161,8 +167,6 @@ class Optimizer(object): ...@@ -161,8 +167,6 @@ class Optimizer(object):
startup_program=startup_program) startup_program=startup_program)
self._create_accumulators(loss.block, self._create_accumulators(loss.block,
[p[0] for p in parameters_and_grads]) [p[0] for p in parameters_and_grads])
# Create any necessary tensors
self._initialize_tensors(loss.block)
optimize_ops = [] optimize_ops = []
for param_and_grad in parameters_and_grads: for param_and_grad in parameters_and_grads:
...@@ -214,27 +218,16 @@ class SGDOptimizer(Optimizer): ...@@ -214,27 +218,16 @@ class SGDOptimizer(Optimizer):
self.type = "sgd" self.type = "sgd"
self._learning_rate = learning_rate self._learning_rate = learning_rate
def _initialize_tensors(self, block):
lr_shape = [1]
# create a variable for learning_rate
self._lr = self.helper.create_global_variable(
name=unique_name("learning_rate"),
dtype='float32',
shape=lr_shape,
lod_level=1,
persistable=True)
self.helper.set_variable_initializer(
var=self._lr, initializer=ConstantInitializer(self._learning_rate))
def _append_optimize_op(self, block, param_and_grad): def _append_optimize_op(self, block, param_and_grad):
assert isinstance(block, framework.Block) assert isinstance(block, framework.Block)
# create the optimize op # create the optimize op
sgd_op = block.append_op( sgd_op = block.append_op(
type=self.type, type=self.type,
inputs={ inputs={
"Param": param_and_grad[0], "Param": param_and_grad[0],
"Grad": param_and_grad[1], "Grad": param_and_grad[1],
"LearningRate": self._lr "LearningRate": self._create_param_lr(param_and_grad)
}, },
outputs={"ParamOut": param_and_grad[0]}) outputs={"ParamOut": param_and_grad[0]})
...@@ -259,19 +252,6 @@ class MomentumOptimizer(Optimizer): ...@@ -259,19 +252,6 @@ class MomentumOptimizer(Optimizer):
self._momentum = momentum self._momentum = momentum
self._use_nesterov = bool(use_nesterov) self._use_nesterov = bool(use_nesterov)
def _initialize_tensors(self, block):
assert isinstance(block, framework.Block)
lr_shape = [1]
# create a variable for learning_rate
self._lr = self.helper.create_global_variable(
name=unique_name("learning_rate"),
dtype='float32',
shape=lr_shape,
lod_level=1,
persistable=True)
self.helper.set_variable_initializer(
var=self._lr, initializer=ConstantInitializer(self._learning_rate))
def _create_accumulators(self, block, parameters): def _create_accumulators(self, block, parameters):
assert isinstance(block, framework.Block) assert isinstance(block, framework.Block)
...@@ -290,14 +270,14 @@ class MomentumOptimizer(Optimizer): ...@@ -290,14 +270,14 @@ class MomentumOptimizer(Optimizer):
"Param": param_and_grad[0], "Param": param_and_grad[0],
"Grad": param_and_grad[1], "Grad": param_and_grad[1],
"Velocity": velocity_acc, "Velocity": velocity_acc,
"LearningRate": self._lr "LearningRate": self._create_param_lr(param_and_grad)
}, },
outputs={ outputs={
"ParamOut": param_and_grad[0], "ParamOut": param_and_grad[0],
"VelocityOut": velocity_acc "VelocityOut": velocity_acc
}, },
attrs={"mu": self._momentum, attrs={"mu": self._momentum,
"useNesterov": self._use_nesterov}) "use_nesterov": self._use_nesterov})
return momentum_op return momentum_op
...@@ -315,18 +295,6 @@ class AdagradOptimizer(Optimizer): ...@@ -315,18 +295,6 @@ class AdagradOptimizer(Optimizer):
self._learning_rate = learning_rate self._learning_rate = learning_rate
self._epsilon = epsilon self._epsilon = epsilon
def _initialize_tensors(self, block):
lr_shape = [1]
# create a variable for learning_rate
self._lr = self.helper.create_global_variable(
name=unique_name("learning_rate"),
dtype='float32',
shape=lr_shape,
lod_level=1,
persistable=True)
self.helper.set_variable_initializer(
var=self._lr, initializer=ConstantInitializer(self._learning_rate))
def _create_accumulators(self, block, parameters): def _create_accumulators(self, block, parameters):
assert isinstance(block, framework.Block) assert isinstance(block, framework.Block)
...@@ -346,7 +314,7 @@ class AdagradOptimizer(Optimizer): ...@@ -346,7 +314,7 @@ class AdagradOptimizer(Optimizer):
"Param": param_and_grad[0], "Param": param_and_grad[0],
"Grad": param_and_grad[1], "Grad": param_and_grad[1],
"Moment": moment_acc, "Moment": moment_acc,
"LearningRate": self._lr "LearningRate": self._create_param_lr(param_and_grad)
}, },
outputs={"ParamOut": param_and_grad[0], outputs={"ParamOut": param_and_grad[0],
"MomentOut": moment_acc}, "MomentOut": moment_acc},
...@@ -378,18 +346,6 @@ class AdamOptimizer(Optimizer): ...@@ -378,18 +346,6 @@ class AdamOptimizer(Optimizer):
self._beta2 = beta2 self._beta2 = beta2
self._epsilon = epsilon self._epsilon = epsilon
def _initialize_tensors(self, block):
lr_shape = [1]
# create a variable for learning_rate
self._lr = self.helper.create_global_variable(
name=unique_name("learning_rate"),
dtype='float32',
shape=lr_shape,
lod_level=1,
persistable=True)
self.helper.set_variable_initializer(
var=self._lr, initializer=ConstantInitializer(self._learning_rate))
def _create_accumulators(self, block, parameters): def _create_accumulators(self, block, parameters):
assert isinstance(block, framework.Block) assert isinstance(block, framework.Block)
...@@ -433,7 +389,7 @@ class AdamOptimizer(Optimizer): ...@@ -433,7 +389,7 @@ class AdamOptimizer(Optimizer):
inputs={ inputs={
"Param": param_and_grad[0], "Param": param_and_grad[0],
"Grad": param_and_grad[1], "Grad": param_and_grad[1],
"LearningRate": self._lr, "LearningRate": self._create_param_lr(param_and_grad),
"Moment1": moment1, "Moment1": moment1,
"Moment2": moment2, "Moment2": moment2,
"Beta1Pow": self._beta1_pow_acc, "Beta1Pow": self._beta1_pow_acc,
...@@ -495,18 +451,6 @@ class AdamaxOptimizer(Optimizer): ...@@ -495,18 +451,6 @@ class AdamaxOptimizer(Optimizer):
self._beta2 = beta2 self._beta2 = beta2
self._epsilon = epsilon self._epsilon = epsilon
def _initialize_tensors(self, block):
lr_shape = [1]
# create a variable for learning_rate
self._lr = self.helper.create_global_variable(
name=unique_name("learning_rate"),
dtype='float32',
shape=lr_shape,
lod_level=1,
persistable=True)
self.helper.set_variable_initializer(
var=self._lr, initializer=ConstantInitializer(self._learning_rate))
def _create_accumulators(self, block, parameters): def _create_accumulators(self, block, parameters):
# Create beta1 power accumulator tensor # Create beta1 power accumulator tensor
beta_shape = [1] beta_shape = [1]
...@@ -536,7 +480,7 @@ class AdamaxOptimizer(Optimizer): ...@@ -536,7 +480,7 @@ class AdamaxOptimizer(Optimizer):
inputs={ inputs={
"Param": param_and_grad[0], "Param": param_and_grad[0],
"Grad": param_and_grad[1], "Grad": param_and_grad[1],
"LearningRate": self._lr, "LearningRate": self._create_param_lr(param_and_grad),
"Moment": moment, "Moment": moment,
"InfNorm": inf_norm, "InfNorm": inf_norm,
"Beta1Pow": self._beta1_pow_acc "Beta1Pow": self._beta1_pow_acc
......
...@@ -215,7 +215,11 @@ class OpTest(unittest.TestCase): ...@@ -215,7 +215,11 @@ class OpTest(unittest.TestCase):
if isinstance(input_vars[var_name], list): if isinstance(input_vars[var_name], list):
for name, np_value in self.inputs[var_name]: for name, np_value in self.inputs[var_name]:
tensor = core.LoDTensor() tensor = core.LoDTensor()
tensor.set(np_value, place) if isinstance(np_value, tuple):
tensor.set(np_value[0], place)
tensor.set_lod(np_value[1])
else:
tensor.set(np_value, place)
feed_map[name] = tensor feed_map[name] = tensor
else: else:
tensor = core.LoDTensor() tensor = core.LoDTensor()
...@@ -236,7 +240,6 @@ class OpTest(unittest.TestCase): ...@@ -236,7 +240,6 @@ class OpTest(unittest.TestCase):
inputs = append_input_output(block, op_proto, self.inputs, True) inputs = append_input_output(block, op_proto, self.inputs, True)
outputs = append_input_output(block, op_proto, self.outputs, False) outputs = append_input_output(block, op_proto, self.outputs, False)
op = block.append_op( op = block.append_op(
type=self.op_type, type=self.op_type,
inputs=inputs, inputs=inputs,
...@@ -397,9 +400,11 @@ class OpTest(unittest.TestCase): ...@@ -397,9 +400,11 @@ class OpTest(unittest.TestCase):
if not isinstance(item[0], basestring): if not isinstance(item[0], basestring):
item = [[param_name] + list(item)] item = [[param_name] + list(item)]
if len(item) == 2: if len(item) == 2:
# only set var name and value, set lod to None if isinstance(item[1], tuple):
var[i] = list(item) + [None] var[i] = [item[0], item[1][0], item[1][1]]
else:
# only set var name and value, set lod to None
var[i] = list(item) + [None]
var_descs = [(block.create_var( var_descs = [(block.create_var(
name=name, shape=each.shape, dtype=each.dtype), each, lod) name=name, shape=each.shape, dtype=each.dtype), each, lod)
for name, each, lod in var] for name, each, lod in var]
......
...@@ -20,21 +20,19 @@ class TestArrayReadWrite(unittest.TestCase): ...@@ -20,21 +20,19 @@ class TestArrayReadWrite(unittest.TestCase):
each_x.stop_gradient = False each_x.stop_gradient = False
i = layers.zeros(shape=[1], dtype='int64') i = layers.zeros(shape=[1], dtype='int64')
i.stop_gradient = False
arr = layers.array_write(x=x[0], i=i) arr = layers.array_write(x=x[0], i=i)
i = layers.increment(x=i) i = layers.increment(x=i)
i.stop_gradient = True
arr = layers.array_write(x=x[1], i=i, array=arr) arr = layers.array_write(x=x[1], i=i, array=arr)
i = layers.increment(x=i) i = layers.increment(x=i)
i.stop_gradient = True
arr = layers.array_write(x=x[2], i=i, array=arr) arr = layers.array_write(x=x[2], i=i, array=arr)
i = layers.zeros(shape=[1], dtype='int64') i = layers.zeros(shape=[1], dtype='int64')
i.stop_gradient = False
a0 = layers.array_read(array=arr, i=i) a0 = layers.array_read(array=arr, i=i)
i = layers.increment(x=i) i = layers.increment(x=i)
i.stop_gradient = True # index should not calculate gradient
a1 = layers.array_read(array=arr, i=i) a1 = layers.array_read(array=arr, i=i)
i = layers.increment(x=i) i = layers.increment(x=i)
i.stop_gradient = True
a2 = layers.array_read(array=arr, i=i) a2 = layers.array_read(array=arr, i=i)
mean_a0 = layers.mean(x=a0) mean_a0 = layers.mean(x=a0)
......
import unittest
import numpy as np
from op_test import OpTest
class Segment(object):
def __init__(self, chunk_type, start_idx, end_idx):
self.chunk_type = chunk_type
self.start_idx = start_idx
self.end_idx = end_idx
def __str__(self):
return '(Segment: %s, %s, %s)' % (self.chunk_type, self.start_idx,
self.end_idx)
__repr__ = __str__
class TestChunkEvalOp(OpTest):
num_sequences = 5
batch_size = 50
def parse_scheme(self):
if self.scheme == 'IOB':
self.num_tag_types = 2
elif self.scheme == 'IOE':
self.num_tag_types = 2
def fill_with_chunks(self, data, chunks):
for chunk in chunks:
if self.scheme == 'IOB':
data[chunk.start_idx] = chunk.chunk_type * self.num_tag_types
data[chunk.start_idx + 1:
chunk.end_idx] = chunk.chunk_type * self.num_tag_types + (
self.num_tag_types - 1)
data[chunk.end_idx] = chunk.chunk_type * self.num_tag_types + (
self.num_tag_types - 1
) if chunk.start_idx < chunk.end_idx else data[chunk.start_idx]
elif self.scheme == 'IOE':
data[chunk.start_idx:
chunk.end_idx] = chunk.chunk_type * self.num_tag_types
data[chunk.end_idx] = chunk.chunk_type * self.num_tag_types + (
self.num_tag_types - 1)
def rand_chunks(self, starts, num_chunks):
if num_chunks < 0:
num_chunks = np.random.randint(starts[-1])
chunks = []
# generate chunk beginnings
chunk_begins = sorted(
np.random.choice(
range(starts[-1]), num_chunks, replace=False))
seq_chunk_begins = []
begin_idx = 0
# divide chunks into sequences
for i in range(len(starts) - 1):
tmp_chunk_begins = []
while begin_idx < len(chunk_begins) and chunk_begins[
begin_idx] < starts[i + 1]:
tmp_chunk_begins.append(chunk_begins[begin_idx])
begin_idx += 1
seq_chunk_begins.append(tmp_chunk_begins)
# generate chunk ends
chunk_ends = []
for i in range(len(seq_chunk_begins)):
for j in range(len(seq_chunk_begins[i])):
low = seq_chunk_begins[i][j]
high = seq_chunk_begins[i][j + 1] if j < len(seq_chunk_begins[
i]) - 1 else starts[i + 1]
chunk_ends.append(np.random.randint(low, high))
# generate chunks
for chunk_pos in zip(chunk_begins, chunk_ends):
chunk_type = np.random.randint(self.num_chunk_types)
chunks.append(Segment(chunk_type, *chunk_pos))
return chunks
def gen_chunks(self, infer, label, starts):
chunks = self.rand_chunks(starts,
self.num_infer_chunks + self.num_label_chunks
- self.num_correct_chunks)
correct_chunks = np.random.choice(
range(len(chunks)), self.num_correct_chunks, replace=False)
infer_chunks = np.random.choice(
[x for x in range(len(chunks)) if x not in correct_chunks],
self.num_infer_chunks - self.num_correct_chunks,
replace=False)
infer_chunks = sorted(correct_chunks.tolist() + infer_chunks.tolist())
label_chunks = np.random.choice(
[x for x in range(len(chunks)) if x not in infer_chunks],
self.num_label_chunks - self.num_correct_chunks,
replace=False)
label_chunks = sorted(correct_chunks.tolist() + label_chunks.tolist())
self.fill_with_chunks(infer, [chunks[idx] for idx in infer_chunks])
self.fill_with_chunks(label, [chunks[idx] for idx in label_chunks])
# exclude types in excluded_chunk_types
if len(self.excluded_chunk_types) > 0:
for idx in correct_chunks:
if chunks[idx].chunk_type in self.excluded_chunk_types:
self.num_correct_chunks -= 1
for idx in infer_chunks:
if chunks[idx].chunk_type in self.excluded_chunk_types:
self.num_infer_chunks -= 1
for idx in label_chunks:
if chunks[idx].chunk_type in self.excluded_chunk_types:
self.num_label_chunks -= 1
return self.num_correct_chunks, self.num_infer_chunks, self.num_label_chunks
def set_confs(self):
# Use the IOB scheme and labels with 2 chunk types
self.scheme = 'IOB'
self.num_chunk_types = 2
self.excluded_chunk_types = []
self.other_chunk_type = self.num_chunk_types
self.attrs = {
'num_chunk_types': self.num_chunk_types,
'chunk_scheme': self.scheme,
'excluded_chunk_types': self.excluded_chunk_types
}
self.parse_scheme()
self.num_correct_chunks, self.num_infer_chunks, self.num_label_chunks = 4, 5, 9
def set_data(self):
infer = np.zeros((self.batch_size, )).astype('int32')
infer.fill(self.num_chunk_types * self.num_tag_types)
label = np.copy(infer)
starts = np.random.choice(
range(1, self.batch_size), self.num_sequences - 1,
replace=False).tolist()
starts.extend([0, self.batch_size])
starts = sorted(starts)
self.num_correct_chunks, self.num_infer_chunks, self.num_label_chunks = self.gen_chunks(
infer, label, starts)
self.inputs = {
'Inference': (infer, [starts]),
'Label': (label, [starts])
}
precision = float(
self.num_correct_chunks
) / self.num_infer_chunks if self.num_infer_chunks else 0
recall = float(self.num_correct_chunks
) / self.num_label_chunks if self.num_label_chunks else 0
f1 = float(2 * precision * recall) / (
precision + recall) if self.num_correct_chunks else 0
self.outputs = {
'Precision': np.asarray(
[precision], dtype='float32'),
'Recall': np.asarray(
[recall], dtype='float32'),
'F1-Score': np.asarray(
[f1], dtype='float32')
}
def setUp(self):
self.op_type = 'chunk_eval'
self.set_confs()
self.set_data()
def test_check_output(self):
self.check_output()
class TestChunkEvalOpWithExclude(TestChunkEvalOp):
def set_confs(self):
# Use the IOE scheme and labels with 3 chunk types
self.scheme = 'IOE'
self.num_chunk_types = 3
self.excluded_chunk_types = [1]
self.other_chunk_type = self.num_chunk_types
self.attrs = {
'num_chunk_types': self.num_chunk_types,
'chunk_scheme': self.scheme,
'excluded_chunk_types': self.excluded_chunk_types
}
self.parse_scheme()
self.num_correct_chunks, self.num_infer_chunks, self.num_label_chunks = 15, 18, 20
if __name__ == '__main__':
unittest.main()
import unittest
import numpy as np
from op_test import OpTest
class TestClipByNormOp(OpTest):
def setUp(self):
self.max_relative_error = 0.006
self.initTestCase()
input = np.random.random(self.shape).astype("float32")
input[np.abs(input) < self.max_relative_error] = 0.5
self.op_type = "clip_by_norm"
self.inputs = {'X': input, }
self.attrs = {}
self.attrs['max_norm'] = self.max_norm
norm = np.sqrt(np.sum(np.square(input)))
if norm > self.max_norm:
output = self.max_norm * input / norm
else:
output = input
self.outputs = {'Out': output}
def test_check_output(self):
self.check_output()
def initTestCase(self):
self.shape = (100, )
self.max_norm = 1.0
class TestCase1(TestClipByNormOp):
def initTestCase(self):
self.shape = (100, )
self.max_norm = 1e20
class TestCase2(TestClipByNormOp):
def initTestCase(self):
self.shape = (16, 16)
self.max_norm = 0.1
class TestCase3(TestClipByNormOp):
def initTestCase(self):
self.shape = (4, 8, 16)
self.max_norm = 1.0
if __name__ == '__main__':
unittest.main()
import op_test
import unittest
import numpy
def create_test_class(op_type, typename, callback):
class Cls(op_test.OpTest):
def setUp(self):
a = numpy.random.random(size=(10, 7)).astype(typename)
b = numpy.random.random(size=(10, 7)).astype(typename)
c = callback(a, b)
self.inputs = {'X': a, 'Y': b}
self.outputs = {'Out': c}
self.op_type = op_type
def test_output(self):
self.check_output()
cls_name = "{0}_{1}".format(op_type, typename)
Cls.__name__ = cls_name
globals()[cls_name] = Cls
for _type_name in {'float32', 'float64', 'int32', 'int64'}:
create_test_class('less_than', _type_name, lambda _a, _b: _a < _b)
create_test_class('equal', _type_name, lambda _a, _b: _a == _b)
if __name__ == '__main__':
unittest.main()
import unittest
import numpy as np
from op_test import OpTest
class TestExpandOpRank1(OpTest):
def setUp(self):
self.op_type = "expand"
self.inputs = {'X': np.random.random(12).astype("float32")}
self.attrs = {'expand_times': [2]}
output = np.tile(self.inputs['X'], 2)
self.outputs = {'Out': output}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out')
class TestExpandOpRank2_Corner(OpTest):
def setUp(self):
self.op_type = "expand"
self.inputs = {'X': np.random.random((12, 14)).astype("float32")}
self.attrs = {'expand_times': [1, 1]}
output = np.tile(self.inputs['X'], (1, 1))
self.outputs = {'Out': output}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out')
class TestExpandOpRank2(OpTest):
def setUp(self):
self.op_type = "expand"
self.inputs = {'X': np.random.random((12, 14)).astype("float32")}
self.attrs = {'expand_times': [2, 3]}
output = np.tile(self.inputs['X'], (2, 3))
self.outputs = {'Out': output}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out')
class TestExpandOpRank3_Corner(OpTest):
def setUp(self):
self.op_type = "expand"
self.inputs = {'X': np.random.random((2, 4, 5)).astype("float32")}
self.attrs = {'expand_times': [1, 1, 1]}
output = np.tile(self.inputs['X'], (1, 1, 1))
self.outputs = {'Out': output}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out')
class TestExpandOpRank3(OpTest):
def setUp(self):
self.op_type = "expand"
self.inputs = {'X': np.random.random((2, 4, 5)).astype("float32")}
self.attrs = {'expand_times': [2, 1, 4]}
output = np.tile(self.inputs['X'], (2, 1, 4))
self.outputs = {'Out': output}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out')
class TestExpandOpRank4(OpTest):
def setUp(self):
self.op_type = "expand"
self.inputs = {'X': np.random.random((2, 4, 5, 7)).astype("float32")}
self.attrs = {'expand_times': [3, 2, 1, 2]}
output = np.tile(self.inputs['X'], (3, 2, 1, 2))
self.outputs = {'Out': output}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out')
if __name__ == "__main__":
unittest.main()
...@@ -3,7 +3,7 @@ import paddle.v2.framework.layers as layers ...@@ -3,7 +3,7 @@ import paddle.v2.framework.layers as layers
import paddle.v2.framework.core as core import paddle.v2.framework.core as core
import paddle.v2.framework.optimizer as optimizer import paddle.v2.framework.optimizer as optimizer
from paddle.v2.framework.framework import Program, g_main_program from paddle.v2.framework.framework import Program
from paddle.v2.framework.io import save_persistables, load_persistables from paddle.v2.framework.io import save_persistables, load_persistables
from paddle.v2.framework.executor import Executor from paddle.v2.framework.executor import Executor
......
import unittest
import numpy as np
from op_test import OpTest
class TestIncrementOpPositiveStep(OpTest):
"""Test increment op with positive step
"""
def setUp(self):
self.op_type = "increment"
self.inputs = {'X': np.random.random((10, 10)).astype("float32")}
self.attrs = {'step': 14.8}
self.outputs = {'Out': self.inputs['X'] + self.attrs['step']}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out')
class TestIncrementOpNegativeStep(OpTest):
"""Test increment op with negative step
"""
def setUp(self):
self.op_type = "increment"
self.inputs = {'X': np.random.random((10, 10)).astype("float32")}
self.attrs = {'step': -3.8}
self.outputs = {'Out': self.inputs['X'] + self.attrs['step']}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out')
if __name__ == "__main__":
unittest.main()
...@@ -3,7 +3,7 @@ import paddle.v2.framework.layers as layers ...@@ -3,7 +3,7 @@ import paddle.v2.framework.layers as layers
import paddle.v2.framework.core as core import paddle.v2.framework.core as core
import paddle.v2.framework.optimizer as optimizer import paddle.v2.framework.optimizer as optimizer
from paddle.v2.framework.framework import Program, g_main_program from paddle.v2.framework.framework import Program
from paddle.v2.framework.io import save_inference_model, load_inference_model from paddle.v2.framework.io import save_inference_model, load_inference_model
import paddle.v2.framework.executor as executor import paddle.v2.framework.executor as executor
import unittest import unittest
......
import paddle.v2.framework.layers as layers import paddle.v2.framework.layers as layers
import paddle.v2.framework.nets as nets import paddle.v2.framework.nets as nets
from paddle.v2.framework.framework import Program, g_main_program from paddle.v2.framework.framework import Program
import paddle.v2.framework.core as core import paddle.v2.framework.core as core
import unittest import unittest
......
import unittest
import paddle.v2.framework.layers as layers
from paddle.v2.framework.executor import Executor
import paddle.v2.framework.core as core
import numpy
class TestLoDArrayLength(unittest.TestCase):
def test_array_length(self):
tmp = layers.zeros(shape=[10], dtype='int32')
i = layers.fill_constant(shape=[1], dtype='int64', value=10)
arr = layers.array_write(tmp, i=i)
arr_len = layers.array_length(arr)
cpu = core.CPUPlace()
exe = Executor(cpu)
result = numpy.array(exe.run(fetch_list=[arr_len])[0])
self.assertEqual(11, result[0])
if __name__ == '__main__':
unittest.main()
...@@ -18,7 +18,6 @@ class TestLoDRankTable(unittest.TestCase): ...@@ -18,7 +18,6 @@ class TestLoDRankTable(unittest.TestCase):
tensor = core.LoDTensor() tensor = core.LoDTensor()
tensor.set(numpy.random.random(size=(17, 100)), cpu) tensor.set(numpy.random.random(size=(17, 100)), cpu)
tensor.set_lod([[0, 1, 3], [0, 5, 6, 7], [0, 3, 4, 9, 10, 13, 16, 17]]) tensor.set_lod([[0, 1, 3], [0, 5, 6, 7], [0, 3, 4, 9, 10, 13, 16, 17]])
exe.run(g_main_program, scope=scope, feed={'x': tensor}) exe.run(g_main_program, scope=scope, feed={'x': tensor})
var = scope.find_var(rank_table.name) var = scope.find_var(rank_table.name)
table = var.get_lod_rank_table() table = var.get_lod_rank_table()
......
import unittest
import paddle.v2.framework.core as core
import numpy
import paddle.v2.framework.layers as layers
from paddle.v2.framework.framework import Program
from paddle.v2.framework.executor import Executor
from paddle.v2.framework.backward import append_backward_ops
class TestCPULoDTensorArrayOps(unittest.TestCase):
def place(self):
return core.CPUPlace()
def test_lod_tensor_to_array_level_0(self):
tensor = core.LoDTensor()
tensor.set(
numpy.arange(10).reshape(10, 1).astype('int32'), self.place())
tensor.set_lod([[0, 3, 9, 10]])
expect = map(lambda x: numpy.array(x).astype('int32'),
[[3, 0, 9], [4, 1], [5, 2], [6], [7], [8]])
self.main(tensor=tensor, expect_array=expect, expect_lod=[] * 6)
def test_lod_tensor_to_array_level_0_empty_seq(self):
tensor = core.LoDTensor()
tensor.set(
numpy.arange(10).reshape(10, 1).astype('int32'), self.place())
tensor.set_lod([[0, 3, 9, 9, 10]])
expect = map(lambda x: numpy.array(x).astype('int32'),
[[3, 0, 9], [4, 1], [5, 2], [6], [7], [8]])
self.main(tensor=tensor, expect_array=expect, expect_lod=[] * 6)
def test_lod_tensor_to_array_level_1(self):
tensor = core.LoDTensor()
tensor.set(
numpy.arange(20).reshape(20, 1).astype('int32'), self.place())
tensor.set_lod([[0, 2, 5], [0, 3, 9, 11, 17, 20]])
expect = [
numpy.array(
[9, 10, 0, 1, 2], dtype='int32'), numpy.array(
[11, 12, 13, 14, 15, 16, 3, 4, 5, 6, 7, 8], dtype='int32'),
numpy.array(
[17, 18, 19], dtype='int32')
]
lod = [[[0, 2, 5]], [[0, 6, 12]], [[0, 3]]]
self.main(tensor=tensor, expect_array=expect, expect_lod=lod)
def test_lod_tensor_to_array_level_1_empty_seq(self):
tensor = core.LoDTensor()
tensor.set(
numpy.arange(31).reshape(31, 1).astype('int32'), self.place())
tensor.set_lod([[0, 3, 5, 9, 11],
[0, 3, 7, 11, 11, 12, 17, 19, 21, 23, 30, 31]])
expect = [
numpy.array(
item, dtype='int32')
for item in [[
12, 13, 14, 15, 16, 0, 1, 2, 23, 24, 25, 26, 27, 28, 29
], [17, 18, 3, 4, 5, 6, 11, 30], [19, 20, 7, 8, 9, 10], [21, 22]]
]
lod = [[[0, 5, 8, 8, 15]], [[0, 2, 6, 7, 8]], [[0, 2, 6]], [[0, 2]]]
self.main(tensor=tensor, expect_array=expect, expect_lod=lod)
def test_lod_tensor_to_array_level_2(self):
tensor = core.LoDTensor()
tensor.set(
numpy.arange(50).reshape(50, 1).astype('int32'), self.place())
tensor.set_lod([[0, 2, 5, 6], [0, 2, 5, 6, 10, 12, 13],
[0, 3, 7, 11, 17, 21, 22, 23, 27, 31, 39, 45, 46, 50]])
expect = [
numpy.array(
item, dtype='int32')
for item in [[21, 0, 1, 2, 3, 4, 5, 6, 46, 47, 48, 49], range(
22, 39) + range(7, 21), range(39, 46)]
]
lod = [[[0, 1, 3, 4], [0, 1, 4, 8, 12]],
[[0, 4, 7], [0, 1, 5, 9, 17, 21, 27, 31]], [[0, 2], [0, 6, 7]]]
self.main(tensor=tensor, expect_array=expect, expect_lod=lod)
def test_lod_tensor_to_array_level_2_skip_level(self):
tensor = core.LoDTensor()
tensor.set(
numpy.arange(50).reshape(50, 1).astype('int32'), self.place())
tensor.set_lod([[0, 2, 5, 6], [0, 2, 5, 6, 10, 12, 13],
[0, 3, 7, 11, 17, 21, 22, 23, 27, 31, 39, 45, 46, 50]])
self.main(tensor=tensor, expect_array=None, expect_lod=None, level=1)
def main(self, tensor, expect_array, expect_lod, level=0):
place = self.place()
program = Program()
x = layers.data(name='x', shape=[10], main_program=program)
x.persistable = True
table = layers.lod_rank_table(x, level=level, main_program=program)
array = layers.lod_tensor_to_array(x, table, main_program=program)
array.persistable = True
result = layers.array_to_lod_tensor(array, table, main_program=program)
result.persistable = True
exe = Executor(place)
scope = core.Scope()
exe.run(program, feed={'x': tensor}, scope=scope)
var = scope.find_var(array.name)
array = var.get_lod_tensor_array()
if expect_array is not None and expect_lod is not None:
self.check_array_same(array, expect_array, expect_lod)
self.check_tensor_same(scope.find_var(result.name).get_tensor(), tensor)
def check_array_same(self, array, expect_tensor, expect_lod):
self.assertEqual(len(expect_tensor), len(array))
for i, exp in enumerate(zip(expect_tensor, expect_lod)):
exp_tensor, exp_lod = exp
exp_tensor = numpy.expand_dims(exp_tensor, axis=1)
self.assertTrue(numpy.allclose(exp_tensor, numpy.array(array[i])))
self.assertEqual(exp_lod, array[i].lod())
def check_tensor_same(self, actual, expect):
self.assertTrue(
numpy.allclose(numpy.array(actual), numpy.array(expect)))
self.assertEqual(actual.lod(), expect.lod())
class TestCPULoDTensorArrayOpGrad(unittest.TestCase):
def test_grad(self):
place = core.CPUPlace()
program = Program()
x = layers.data(
name='x',
shape=[1],
data_type='float32',
main_program=program,
stop_gradient=False)
table = layers.lod_rank_table(x, level=0, main_program=program)
array = layers.lod_tensor_to_array(x, table, main_program=program)
result = layers.array_to_lod_tensor(array, table, main_program=program)
mean = layers.mean(x=result, main_program=program)
append_backward_ops(mean)
tensor = core.LoDTensor()
tensor.set(numpy.arange(10).reshape(10, 1).astype('float32'), place)
tensor.set_lod([[0, 3, 9, 10]])
g_vars = program.global_block().var(x.name + "@GRAD")
exe = Executor(place)
g_out = [
item.sum()
for item in map(
numpy.array,
exe.run(program, feed={'x': tensor}, fetch_list=[g_vars]))
]
g_out_sum = numpy.array(g_out).sum()
self.assertAlmostEqual(1.0, g_out_sum, delta=0.1)
if __name__ == '__main__':
unittest.main()
...@@ -117,8 +117,9 @@ class TestLstmOp(OpTest): ...@@ -117,8 +117,9 @@ class TestLstmOp(OpTest):
self.act_cell = 'tanh' self.act_cell = 'tanh'
self.act_cand = 'tanh' self.act_cand = 'tanh'
self.has_initial_state = True self.has_initial_state = False
self.is_reverse = False self.is_reverse = False
self.use_peepholes = True
def setUp(self): def setUp(self):
self.set_argument() self.set_argument()
...@@ -128,18 +129,28 @@ class TestLstmOp(OpTest): ...@@ -128,18 +129,28 @@ class TestLstmOp(OpTest):
N = len(self.lod[0]) - 1 N = len(self.lod[0]) - 1
x = np.random.normal(size=(T, 4 * self.D)).astype('float64') x = np.random.normal(size=(T, 4 * self.D)).astype('float64')
h0 = np.zeros((N, self.D)).astype('float64') if self.has_initial_state:
c0 = np.zeros((N, self.D)).astype('float64') h0 = np.random.normal(size=(N, self.D)).astype('float64')
c0 = np.random.normal(size=(N, self.D)).astype('float64')
else:
h0 = np.zeros((N, self.D)).astype('float64')
c0 = np.zeros((N, self.D)).astype('float64')
w = np.random.normal(size=(self.D, 4 * self.D)).astype('float64') w = np.random.normal(size=(self.D, 4 * self.D)).astype('float64')
b = np.random.normal(size=(1, 7 * self.D)).astype('float64') if self.use_peepholes:
b = np.random.normal(size=(1, 7 * self.D)).astype('float64')
else:
b = np.random.normal(size=(1, 4 * self.D)).astype('float64')
w_b = b[:, 0:4 * self.D] w_b = b[:, 0:4 * self.D]
w_c = b[:, 4 * self.D:] w_c = b[:, 4 * self.D:] if self.use_peepholes else None
h, c = lstm(x, self.lod, h0, c0, w, w_b, w_c, self.is_reverse, h, c = lstm(x, self.lod, h0, c0, w, w_b, w_c, self.is_reverse,
ACTVATION[self.act_gate], ACTVATION[self.act_cell], ACTVATION[self.act_gate], ACTVATION[self.act_cell],
ACTVATION[self.act_cand]) ACTVATION[self.act_cand])
self.inputs = {'Input': (x, self.lod), 'Weight': w, 'Bias': b} self.inputs = {'Input': (x, self.lod), 'Weight': w}
self.inputs['Bias'] = b
if self.has_initial_state: if self.has_initial_state:
self.inputs['H0'] = h0 self.inputs['H0'] = h0
self.inputs['C0'] = c0 self.inputs['C0'] = c0
...@@ -149,17 +160,16 @@ class TestLstmOp(OpTest): ...@@ -149,17 +160,16 @@ class TestLstmOp(OpTest):
'Cell': (c, self.lod), 'Cell': (c, self.lod),
} }
self.attrs = { self.attrs = {
'usePeepholes': True, 'use_peepholes': self.use_peepholes,
'isReverse': self.is_reverse, 'is_reverse': self.is_reverse,
'gateActivation': self.act_gate, 'gate_activation': self.act_gate,
'cellActivation': self.act_cell, 'cell_activation': self.act_cell,
'candidateActivation': self.act_cand 'candidate_activation': self.act_cand
} }
def test_check_output(self): def test_check_output(self):
self.check_output(atol=1e-8) self.check_output(atol=1e-8)
#TODO(qingqing) add more unit testing case
def test_check_grad(self): def test_check_grad(self):
# TODO(qingqing) remove folowing lines after the check_grad is refined. # TODO(qingqing) remove folowing lines after the check_grad is refined.
N = len(self.lod[0]) - 1 N = len(self.lod[0]) - 1
...@@ -170,7 +180,7 @@ class TestLstmOp(OpTest): ...@@ -170,7 +180,7 @@ class TestLstmOp(OpTest):
['Input', 'Weight', 'Bias'], ['Hidden'], max_relative_error=5e-4) ['Input', 'Weight', 'Bias'], ['Hidden'], max_relative_error=5e-4)
class TestLstmOpHasNoInitial(TestLstmOp): class TestLstmOpHasInitial(TestLstmOp):
def set_argument(self): def set_argument(self):
self.lod = [[0, 2, 5, 7]] self.lod = [[0, 2, 5, 7]]
self.D = 16 self.D = 16
...@@ -179,8 +189,69 @@ class TestLstmOpHasNoInitial(TestLstmOp): ...@@ -179,8 +189,69 @@ class TestLstmOpHasNoInitial(TestLstmOp):
self.act_cell = 'tanh' self.act_cell = 'tanh'
self.act_cand = 'tanh' self.act_cand = 'tanh'
self.has_initial_state = False self.has_initial_state = True
self.is_reverse = True self.is_reverse = True
self.use_peepholes = True
def test_check_grad(self):
# TODO(qingqing) remove folowing lines after the check_grad is refined.
N = len(self.lod[0]) - 1
self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64')
self.outputs['BatchCellPreAct'] = np.zeros(
(N, self.D)).astype('float64')
self.check_grad(
['Input', 'Weight', 'Bias', 'H0', 'C0'], ['Hidden'],
max_relative_error=5e-4)
def test_check_grad_ingore_bias(self):
N = len(self.lod[0]) - 1
self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64')
self.outputs['BatchCellPreAct'] = np.zeros(
(N, self.D)).astype('float64')
self.check_grad(
['Input', 'Weight'], ['Hidden'],
max_relative_error=5e-4,
no_grad_set=set('Bias'))
def test_check_grad_ingore_weight(self):
N = len(self.lod[0]) - 1
self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64')
self.outputs['BatchCellPreAct'] = np.zeros(
(N, self.D)).astype('float64')
self.check_grad(
['Input', 'Bias'], ['Hidden'],
max_relative_error=5e-4,
no_grad_set=set('Weight'))
def test_check_grad_ingore_input(self):
N = len(self.lod[0]) - 1
self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64')
self.outputs['BatchCellPreAct'] = np.zeros(
(N, self.D)).astype('float64')
self.check_grad(
['Weight', 'Bias'], ['Hidden'],
max_relative_error=5e-4,
no_grad_set=set('Input'))
def test_check_grad_ingore_h0(self):
N = len(self.lod[0]) - 1
self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64')
self.outputs['BatchCellPreAct'] = np.zeros(
(N, self.D)).astype('float64')
self.check_grad(
['Input', 'Weight', 'Bias', 'C0'], ['Hidden'],
max_relative_error=5e-4,
no_grad_set=set('H0'))
def test_check_grad_ingore_c0(self):
N = len(self.lod[0]) - 1
self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64')
self.outputs['BatchCellPreAct'] = np.zeros(
(N, self.D)).astype('float64')
self.check_grad(
['Input', 'Weight', 'Bias', 'H0'], ['Hidden'],
max_relative_error=5e-4,
no_grad_set=set('C0'))
class TestLstmOpRerverse(TestLstmOp): class TestLstmOpRerverse(TestLstmOp):
...@@ -192,8 +263,23 @@ class TestLstmOpRerverse(TestLstmOp): ...@@ -192,8 +263,23 @@ class TestLstmOpRerverse(TestLstmOp):
self.act_cell = 'tanh' self.act_cell = 'tanh'
self.act_cand = 'tanh' self.act_cand = 'tanh'
self.has_initial_state = True self.has_initial_state = False
self.is_reverse = True
self.use_peepholes = True
class TestLstmOpNotUsePeepholes(TestLstmOp):
def set_argument(self):
self.lod = [[0, 2, 5, 7]]
self.D = 16
self.act_gate = 'sigmoid'
self.act_cell = 'tanh'
self.act_cand = 'tanh'
self.has_initial_state = False
self.is_reverse = True self.is_reverse = True
self.use_peepholes = False
if __name__ == '__main__': if __name__ == '__main__':
......
...@@ -37,7 +37,7 @@ class TestMomentumOp1(OpTest): ...@@ -37,7 +37,7 @@ class TestMomentumOp1(OpTest):
class TestMomentumOp2(OpTest): class TestMomentumOp2(OpTest):
'''Test Momentum with defaukt values for attributes '''Test Momentum with default values for attributes
''' '''
def setUp(self): def setUp(self):
...@@ -57,7 +57,7 @@ class TestMomentumOp2(OpTest): ...@@ -57,7 +57,7 @@ class TestMomentumOp2(OpTest):
'LearningRate': learning_rate 'LearningRate': learning_rate
} }
self.attrs = {'mu': mu, 'useNesterov': use_nesterov} self.attrs = {'mu': mu, 'use_nesterov': use_nesterov}
velocity_out = mu * velocity + grad velocity_out = mu * velocity + grad
if use_nesterov: if use_nesterov:
......
...@@ -98,7 +98,7 @@ class TestMomentumOptimizer(unittest.TestCase): ...@@ -98,7 +98,7 @@ class TestMomentumOptimizer(unittest.TestCase):
self.assertEqual(len(opts), 1) self.assertEqual(len(opts), 1)
sgd_op = opts[0] sgd_op = opts[0]
self.assertEqual(sgd_op.type, "momentum") self.assertEqual(sgd_op.type, "momentum")
self.assertFalse(sgd_op.attr('useNesterov')) self.assertFalse(sgd_op.attr('use_nesterov'))
# Check accumulators # Check accumulators
accumulators = momentum_optimizer.get_accumulators() accumulators = momentum_optimizer.get_accumulators()
...@@ -143,7 +143,7 @@ class TestMomentumOptimizer(unittest.TestCase): ...@@ -143,7 +143,7 @@ class TestMomentumOptimizer(unittest.TestCase):
self.assertEqual(len(opts), 1) self.assertEqual(len(opts), 1)
sgd_op = opts[0] sgd_op = opts[0]
self.assertEqual(sgd_op.type, "momentum") self.assertEqual(sgd_op.type, "momentum")
self.assertTrue(sgd_op.attr('useNesterov')) self.assertTrue(sgd_op.attr('use_nesterov'))
# Check accumulators # Check accumulators
accumulators = momentum_optimizer.get_accumulators() accumulators = momentum_optimizer.get_accumulators()
......
...@@ -61,8 +61,8 @@ class TestPool2d_Op(OpTest): ...@@ -61,8 +61,8 @@ class TestPool2d_Op(OpTest):
'strides': self.strides, 'strides': self.strides,
'paddings': self.paddings, 'paddings': self.paddings,
'ksize': self.ksize, 'ksize': self.ksize,
'poolingType': self.pool_type, 'pooling_type': self.pool_type,
'globalPooling': self.global_pool, 'global_pooling': self.global_pool,
} }
self.outputs = {'Out': output.astype('float32')} self.outputs = {'Out': output.astype('float32')}
......
...@@ -67,8 +67,8 @@ class TestPool3d_Op(OpTest): ...@@ -67,8 +67,8 @@ class TestPool3d_Op(OpTest):
'strides': self.strides, 'strides': self.strides,
'paddings': self.paddings, 'paddings': self.paddings,
'ksize': self.ksize, 'ksize': self.ksize,
'poolingType': self.pool_type, 'pooling_type': self.pool_type,
'globalPooling': self.global_pool, 'global_pooling': self.global_pool,
} }
self.outputs = {'Out': output.astype('float32')} self.outputs = {'Out': output.astype('float32')}
......
...@@ -86,7 +86,7 @@ class TestMaxPoolWithIndex_Op(OpTest): ...@@ -86,7 +86,7 @@ class TestMaxPoolWithIndex_Op(OpTest):
'strides': self.strides, 'strides': self.strides,
'paddings': self.paddings, 'paddings': self.paddings,
'ksize': self.ksize, 'ksize': self.ksize,
'globalPooling': self.global_pool, 'global_pooling': self.global_pool,
} }
self.inputs = {'X': input} self.inputs = {'X': input}
......
...@@ -4,7 +4,7 @@ import paddle.v2.framework.nets as nets ...@@ -4,7 +4,7 @@ import paddle.v2.framework.nets as nets
import paddle.v2.framework.core as core import paddle.v2.framework.core as core
import paddle.v2.framework.optimizer as optimizer import paddle.v2.framework.optimizer as optimizer
from paddle.v2.framework.framework import Program, g_main_program from paddle.v2.framework.framework import Program
from paddle.v2.framework.executor import Executor from paddle.v2.framework.executor import Executor
import numpy as np import numpy as np
......
...@@ -4,7 +4,7 @@ import paddle.v2.framework.nets as nets ...@@ -4,7 +4,7 @@ import paddle.v2.framework.nets as nets
import paddle.v2.framework.core as core import paddle.v2.framework.core as core
import paddle.v2.framework.optimizer as optimizer import paddle.v2.framework.optimizer as optimizer
from paddle.v2.framework.framework import Program, g_main_program from paddle.v2.framework.framework import Program
from paddle.v2.framework.executor import Executor from paddle.v2.framework.executor import Executor
import numpy as np import numpy as np
......
...@@ -2,9 +2,36 @@ import unittest ...@@ -2,9 +2,36 @@ import unittest
import numpy as np import numpy as np
import sys import sys
from op_test import OpTest from op_test import OpTest
exit(0)
class TestConcatOp(OpTest): def to_abs_lod(lod):
if len(lod) == 0 or len(lod) == 1:
return lod
import copy
new_lod = copy.deepcopy(lod)
for idx, val in enumerate(lod[0]):
new_lod[0][idx] = lod[1][val]
return new_lod
def seq_concat(inputs, level):
lod0 = inputs['X'][0][1][1]
lod1 = inputs['X'][1][1][1]
x0 = inputs['X'][0][1][0]
x1 = inputs['X'][1][1][0]
level_idx = len(lod0) - level - 1
outs = []
for i in range(len(lod0[level_idx]) - 1):
sub_x0 = x0[to_abs_lod(lod0)[level_idx][i]:to_abs_lod(lod0)[level_idx][
i + 1], :]
sub_x1 = x1[to_abs_lod(lod1)[level_idx][i]:to_abs_lod(lod1)[level_idx][
i + 1], :]
outs.append(np.concatenate((sub_x0, sub_x1), axis=0))
return np.concatenate(outs, axis=0)
class TestSeqConcatOp(OpTest):
def set_data(self): def set_data(self):
# two level, batch size is 3 # two level, batch size is 3
x0 = np.random.random((4, 6, 3)).astype('float32') x0 = np.random.random((4, 6, 3)).astype('float32')
...@@ -15,13 +42,7 @@ class TestConcatOp(OpTest): ...@@ -15,13 +42,7 @@ class TestConcatOp(OpTest):
level = 1 level = 1
self.inputs = {'X': [('x0', (x0, lod0)), ('x1', (x1, lod1))]} self.inputs = {'X': [('x0', (x0, lod0)), ('x1', (x1, lod1))]}
self.attrs = {'axis': axis, 'level': level} self.attrs = {'axis': axis, 'level': level}
outs = [] self.outputs = {'Out': (np.concatenate([x0, x1], axis=1), lod0)}
for i in range(4):
sub_x0 = x0[lod0[level][i]:lod0[level][i + 1], :]
sub_x1 = x1[lod1[level][i]:lod1[level][i + 1], :]
outs.append(np.concatenate((sub_x0, sub_x1), axis=axis))
self.outputs = {'Out': np.concatenate(outs, axis=0)}
def setUp(self): def setUp(self):
self.op_type = "sequence_concat" self.op_type = "sequence_concat"
...@@ -34,46 +55,50 @@ class TestConcatOp(OpTest): ...@@ -34,46 +55,50 @@ class TestConcatOp(OpTest):
self.check_grad(['x0'], 'Out') self.check_grad(['x0'], 'Out')
class TestConcatOpDiffLod(TestConcatOp): class TestSeqConcatOpLevelZeroNestedSequence(TestSeqConcatOp):
def set_data(self): def set_data(self):
# two level, batch size is 3 # two level, batch size is 3
x0 = np.random.random((4, 6, 3)).astype('float32') x0 = np.random.random((4, 6, 3)).astype('float32')
lod0 = [[0, 2, 4], [0, 1, 2, 3, 4]] lod0 = [[0, 2, 4], [0, 1, 2, 3, 4]]
x1 = np.random.random((5, 6, 3)).astype('float32') x1 = np.random.random((7, 6, 3)).astype('float32')
lod1 = [[0, 3, 5], [0, 1, 2, 3, 5]] lod1 = [[0, 2, 4], [0, 1, 3, 5, 7]]
axis = 0 axis = 0
level = 1 level = 0
self.inputs = {'X': [('x0', (x0, lod0)), ('x1', (x1, lod1))]} self.inputs = {'X': [('x0', (x0, lod0)), ('x1', (x1, lod1))]}
self.attrs = {'axis': axis, 'level': level} self.attrs = {'axis': axis, 'level': level}
outs = [] out_lod = [[0, 2, 4], [0, 2, 5, 8, 11]]
for i in range(4): self.outputs = {'Out': (seq_concat(self.inputs, level), out_lod)}
sub_x0 = x0[lod0[level][i]:lod0[level][i + 1], :]
sub_x1 = x1[lod1[level][i]:lod1[level][i + 1], :]
outs.append(np.concatenate((sub_x0, sub_x1), axis=axis))
self.outputs = {'Out': np.concatenate(outs, axis=0)}
class TestSeqConcatOplevelOneNestedSequence(TestSeqConcatOp):
def set_data(self):
# two level, batch size is 3
x0 = np.random.random((4, 6, 3)).astype('float32')
lod0 = [[0, 2, 4], [0, 1, 2, 3, 4]]
x1 = np.random.random((7, 6, 3)).astype('float32')
lod1 = [[0, 3, 4], [0, 1, 3, 5, 7]]
axis = 0
level = 1
self.inputs = {'X': [('x0', (x0, lod0)), ('x1', (x1, lod1))]}
self.attrs = {'axis': axis, 'level': level}
out_lod = [[0, 5, 8], [0, 1, 2, 3, 5, 7, 8, 9, 11]]
self.outputs = {'Out': (seq_concat(self.inputs, level), out_lod)}
class TestConcatOpLevelZero(TestConcatOp): class TestSeqConcatOpLevelZeroSequence(TestSeqConcatOp):
def set_data(self): def set_data(self):
# two level, batch size is 3 # two level, batch size is 3
x0 = np.random.random((4, 3, 4)).astype('float32') x0 = np.random.random((4, 3, 4)).astype('float32')
lod0 = [[0, 2, 4], [0, 1, 2, 3, 4]] lod0 = [[0, 1, 2, 3, 4]]
x1 = np.random.random((5, 3, 4)).astype('float32') x1 = np.random.random((7, 3, 4)).astype('float32')
lod1 = [[0, 3, 5], [0, 1, 3, 4, 5]] lod1 = [[0, 1, 3, 5, 7]]
axis = 0 axis = 0
level = 0 level = 0
self.inputs = {'X': [('x0', (x0, lod0)), ('x1', (x1, lod1))]} self.inputs = {'X': [('x0', (x0, lod0)), ('x1', (x1, lod1))]}
self.attrs = {'axis': axis, 'level': level} self.attrs = {'axis': axis, 'level': level}
outs = [] out_lod = [[0, 2, 5, 8, 11]]
for i in range(2): self.outputs = {'Out': (seq_concat(self.inputs, level), out_lod)}
sub_x0 = x0[lod0[level][i]:lod0[level][i + 1], :]
sub_x1 = x1[lod1[level][i]:lod1[level][i + 1], :]
outs.append(np.concatenate((sub_x0, sub_x1), axis=axis))
self.outputs = {'Out': np.concatenate(outs, axis=0)}
if __name__ == '__main__': if __name__ == '__main__':
sys.exit(0)
unittest.main() unittest.main()
import unittest
import paddle.v2.framework.core as core
from paddle.v2.framework.executor import Executor
import paddle.v2.framework.layers as layers
from paddle.v2.framework.backward import append_backward_ops
from paddle.v2.framework.framework import g_main_program
import numpy
class TestShrinkRNNMemory(unittest.TestCase):
def test_shrink_rnn_memory(self):
x = layers.data('x', shape=[100], data_type='float32')
x.stop_gradient = False
table = layers.lod_rank_table(x=x)
i = layers.zeros(dtype='int64', shape=[1])
mem1 = layers.shrink_memory(x=x, i=i, table=table)
i = layers.increment(x=i)
i.stop_gradient = True
mem2 = layers.shrink_memory(x=mem1, i=i, table=table)
i = layers.increment(x=i)
i.stop_gradient = True
mem3 = layers.shrink_memory(x=mem2, i=i, table=table)
cpu = core.CPUPlace()
tensor = core.LoDTensor()
tensor.set_lod([[0, 2, 5, 6]])
tensor_np = numpy.random.random(size=(3, 100)).astype('float32')
tensor.set(tensor_np, cpu)
exe = Executor(cpu)
outs = map(numpy.array,
exe.run(feed={'x': tensor}, fetch_list=[mem1, mem2, mem3]))
self.assertTrue(numpy.allclose(tensor_np[0:3], outs[0]))
self.assertTrue(numpy.allclose(tensor_np[0:2], outs[1]))
self.assertTrue(numpy.allclose(tensor_np[0:1], outs[2]))
mem3_mean = layers.mean(x=mem3)
append_backward_ops(loss=mem3_mean)
x_grad = map(numpy.array,
exe.run(feed={'x': tensor},
fetch_list=[
g_main_program.global_block().var('x@GRAD')
]))[0]
self.assertAlmostEqual(1.0, x_grad.sum(), delta=0.1)
if __name__ == '__main__':
unittest.main()
import paddle.v2 as paddle
import paddle.v2.framework.layers as layers
import paddle.v2.framework.core as core
import paddle.v2.framework.optimizer as optimizer
from paddle.v2.framework.framework import g_main_program, g_startup_program
from paddle.v2.framework.executor import Executor
import numpy as np
def lstm_net(dict_dim, class_dim=2, emb_dim=32, seq_len=80, batch_size=50):
data = layers.data(
name="words",
shape=[seq_len * batch_size, 1],
append_batch_size=False,
data_type="int64")
label = layers.data(
name="label",
shape=[batch_size, 1],
append_batch_size=False,
data_type="int64")
emb = layers.embedding(input=data, size=[dict_dim, emb_dim])
emb = layers.reshape(x=emb, shape=[batch_size, seq_len, emb_dim])
emb = layers.transpose(x=emb, axis=[1, 0, 2])
c_pre_init = layers.fill_constant(
dtype=emb.data_type, shape=[batch_size, emb_dim], value=0.0)
layer_1_out = layers.lstm(emb, c_pre_init=c_pre_init, hidden_dim=emb_dim)
layer_1_out = layers.transpose(x=layer_1_out, axis=[1, 0, 2])
prediction = layers.fc(input=layer_1_out, size=class_dim, act="softmax")
cost = layers.cross_entropy(input=prediction, label=label)
avg_cost = layers.mean(x=cost)
adam_optimizer = optimizer.AdamOptimizer(learning_rate=0.002)
opts = adam_optimizer.minimize(avg_cost)
acc = layers.accuracy(input=prediction, label=label)
return avg_cost, acc
def to_lodtensor(data, place):
seq_lens = [len(seq) for seq in data]
cur_len = 0
lod = [cur_len]
for l in seq_lens:
cur_len += l
lod.append(cur_len)
flattened_data = np.concatenate(data, axis=0).astype("int64")
flattened_data = flattened_data.reshape([len(flattened_data), 1])
res = core.LoDTensor()
res.set(flattened_data, place)
res.set_lod([lod])
return res
def chop_data(data, chop_len=80, batch_len=50):
data = [(x[0][:chop_len], x[1]) for x in data if len(x[0]) >= chop_len]
return data[:batch_len]
def prepare_feed_data(data, place):
tensor_words = to_lodtensor(map(lambda x: x[0], data), place)
label = np.array(map(lambda x: x[1], data)).astype("int64")
label = label.reshape([50, 1])
tensor_label = core.LoDTensor()
tensor_label.set(label, place)
return tensor_words, tensor_label
def main():
word_dict = paddle.dataset.imdb.word_dict()
cost, acc = lstm_net(dict_dim=len(word_dict), class_dim=2)
batch_size = 100
train_data = paddle.batch(
paddle.reader.buffered(
paddle.dataset.imdb.train(word_dict), size=batch_size * 10),
batch_size=batch_size)
data = chop_data(next(train_data()))
place = core.CPUPlace()
tensor_words, tensor_label = prepare_feed_data(data, place)
exe = Executor(place)
exe.run(g_startup_program)
while True:
outs = exe.run(g_main_program,
feed={"words": tensor_words,
"label": tensor_label},
fetch_list=[cost, acc])
cost_val = np.array(outs[0])
acc_val = np.array(outs[1])
print("cost=" + str(cost_val) + " acc=" + str(acc_val))
if acc_val > 0.9:
break
if __name__ == '__main__':
main()
import unittest
import paddle.v2.framework.layers as layers
from paddle.v2.framework.executor import Executor
import paddle.v2.framework.core as core
import numpy
class TestWhileOp(unittest.TestCase):
def test_simple_forward(self):
d0 = layers.data(
"d0", shape=[10], append_batch_size=False, data_type='float32')
d1 = layers.data(
"d1", shape=[10], append_batch_size=False, data_type='float32')
d2 = layers.data(
"d2", shape=[10], append_batch_size=False, data_type='float32')
i = layers.zeros(shape=[1], dtype='int64')
i.stop_gradient = True
init = layers.zeros(shape=[10], dtype='float32')
mem_array = layers.array_write(init, i=i)
data_array = layers.array_write(x=d0, i=i)
i = layers.increment(i)
layers.array_write(d1, i, array=data_array)
i = layers.increment(i)
layers.array_write(d2, i, array=data_array)
i = layers.zeros(shape=[1], dtype='int64')
i.stop_gradient = True
array_len = layers.fill_constant(shape=[1], dtype='int64', value=3)
cond = layers.less_than(x=i, y=array_len)
while_op = layers.While(cond=cond)
with while_op.block():
d = layers.array_read(array=data_array, i=i)
prev = layers.array_read(array=mem_array, i=i)
i = layers.increment(x=i, in_place=True)
result = layers.sums(input=[d, prev])
layers.array_write(result, i=i, array=mem_array)
layers.less_than(x=i, y=array_len, cond=cond)
sum_result = layers.array_read(mem_array, i=array_len)
cpu = core.CPUPlace()
exe = Executor(cpu)
d = []
for i in xrange(3):
d.append(numpy.random.random(size=[10]).astype('float32'))
d_tensor = []
for item in d:
t = core.LoDTensor()
t.set(item, cpu)
d_tensor.append(t)
outs = map(numpy.array,
exe.run(feed={
'd0': d_tensor[0],
'd1': d_tensor[1],
'd2': d_tensor[2]
},
fetch_list=[sum_result]))
self.assertAlmostEqual(numpy.sum(d), numpy.sum(outs[0]), delta=0.01)
if __name__ == '__main__':
unittest.main()
...@@ -3,7 +3,7 @@ import paddle.v2.framework.layers as layers ...@@ -3,7 +3,7 @@ import paddle.v2.framework.layers as layers
import paddle.v2.framework.core as core import paddle.v2.framework.core as core
import paddle.v2.framework.optimizer as optimizer import paddle.v2.framework.optimizer as optimizer
from paddle.v2.framework.framework import Program, g_main_program from paddle.v2.framework.framework import Program
from paddle.v2.framework.executor import Executor from paddle.v2.framework.executor import Executor
import numpy as np import numpy as np
...@@ -118,6 +118,10 @@ train_reader = paddle.batch( ...@@ -118,6 +118,10 @@ train_reader = paddle.batch(
place = core.CPUPlace() place = core.CPUPlace()
exe = Executor(place) exe = Executor(place)
# fix https://github.com/PaddlePaddle/Paddle/issues/5434 then remove
# below exit line.
exit(0)
exe.run(startup_program, feed={}, fetch_list=[]) exe.run(startup_program, feed={}, fetch_list=[])
PASS_NUM = 100 PASS_NUM = 100
for pass_id in range(PASS_NUM): for pass_id in range(PASS_NUM):
......
import numpy as np
try:
import cv2
except ImportError:
cv2 = None
import os
import tarfile
import cPickle
__all__ = [
"load_image_bytes", "load_image", "resize_short", "to_chw", "center_crop",
"random_crop", "left_right_flip", "simple_transform", "load_and_transform",
"batch_images_from_tar"
]
""" """
This file contains some common interfaces for image preprocess. This file contains some common interfaces for image preprocess.
Many users are confused about the image layout. We introduce Many users are confused about the image layout. We introduce
the image layout as follows. the image layout as follows.
- CHW Layout - CHW Layout
- The abbreviations: C=channel, H=Height, W=Width - The abbreviations: C=channel, H=Height, W=Width
- The default layout of image opened by cv2 or PIL is HWC. - The default layout of image opened by cv2 or PIL is HWC.
PaddlePaddle only supports the CHW layout. And CHW is simply PaddlePaddle only supports the CHW layout. And CHW is simply
a transpose of HWC. It must transpose the input image. a transpose of HWC. It must transpose the input image.
- Color format: RGB or BGR - Color format: RGB or BGR
OpenCV use BGR color format. PIL use RGB color format. Both OpenCV use BGR color format. PIL use RGB color format. Both
formats can be used for training. Noted that, the format should formats can be used for training. Noted that, the format should
be keep consistent between the training and inference peroid. be keep consistent between the training and inference peroid.
""" """
import numpy as np
try:
import cv2
except ImportError:
cv2 = None
import os
import tarfile
import cPickle
__all__ = [
"load_image_bytes", "load_image", "resize_short", "to_chw", "center_crop",
"random_crop", "left_right_flip", "simple_transform", "load_and_transform",
"batch_images_from_tar"
]
def batch_images_from_tar(data_file, def batch_images_from_tar(data_file,
...@@ -36,17 +38,18 @@ def batch_images_from_tar(data_file, ...@@ -36,17 +38,18 @@ def batch_images_from_tar(data_file,
num_per_batch=1024): num_per_batch=1024):
""" """
Read images from tar file and batch them into batch file. Read images from tar file and batch them into batch file.
param data_file: path of image tar file
type data_file: string :param data_file: path of image tar file
param dataset_name: 'train','test' or 'valid' :type data_file: string
type dataset_name: string :param dataset_name: 'train','test' or 'valid'
param img2label: a dic with image file name as key :type dataset_name: string
:param img2label: a dic with image file name as key
and image's label as value and image's label as value
type img2label: dic :type img2label: dic
param num_per_batch: image number per batch file :param num_per_batch: image number per batch file
type num_per_batch: int :type num_per_batch: int
return: path of list file containing paths of batch file :return: path of list file containing paths of batch file
rtype: string :rtype: string
""" """
batch_dir = data_file + "_batch" batch_dir = data_file + "_batch"
out_path = "%s/%s" % (batch_dir, dataset_name) out_path = "%s/%s" % (batch_dir, dataset_name)
...@@ -99,14 +102,16 @@ def load_image_bytes(bytes, is_color=True): ...@@ -99,14 +102,16 @@ def load_image_bytes(bytes, is_color=True):
Example usage: Example usage:
.. code-block:: python .. code-block:: python
with open('cat.jpg') as f: with open('cat.jpg') as f:
im = load_image_bytes(f.read()) im = load_image_bytes(f.read())
:param bytes: the input image bytes array. :param bytes: the input image bytes array.
:type file: str :type bytes: str
:param is_color: If set is_color True, it will load and :param is_color: If set is_color True, it will load and
return a color image. Otherwise, it will return a color image. Otherwise, it will
load and return a gray image. load and return a gray image.
:type is_color: bool
""" """
flag = 1 if is_color else 0 flag = 1 if is_color else 0
file_bytes = np.asarray(bytearray(bytes), dtype=np.uint8) file_bytes = np.asarray(bytearray(bytes), dtype=np.uint8)
...@@ -121,6 +126,7 @@ def load_image(file, is_color=True): ...@@ -121,6 +126,7 @@ def load_image(file, is_color=True):
Example usage: Example usage:
.. code-block:: python .. code-block:: python
im = load_image('cat.jpg') im = load_image('cat.jpg')
:param file: the input image path. :param file: the input image path.
...@@ -128,6 +134,7 @@ def load_image(file, is_color=True): ...@@ -128,6 +134,7 @@ def load_image(file, is_color=True):
:param is_color: If set is_color True, it will load and :param is_color: If set is_color True, it will load and
return a color image. Otherwise, it will return a color image. Otherwise, it will
load and return a gray image. load and return a gray image.
:type is_color: bool
""" """
# cv2.IMAGE_COLOR for OpenCV3 # cv2.IMAGE_COLOR for OpenCV3
# cv2.CV_LOAD_IMAGE_COLOR for older OpenCV Version # cv2.CV_LOAD_IMAGE_COLOR for older OpenCV Version
...@@ -147,6 +154,7 @@ def resize_short(im, size): ...@@ -147,6 +154,7 @@ def resize_short(im, size):
Example usage: Example usage:
.. code-block:: python .. code-block:: python
im = load_image('cat.jpg') im = load_image('cat.jpg')
im = resize_short(im, 256) im = resize_short(im, 256)
...@@ -175,6 +183,7 @@ def to_chw(im, order=(2, 0, 1)): ...@@ -175,6 +183,7 @@ def to_chw(im, order=(2, 0, 1)):
Example usage: Example usage:
.. code-block:: python .. code-block:: python
im = load_image('cat.jpg') im = load_image('cat.jpg')
im = resize_short(im, 256) im = resize_short(im, 256)
im = to_chw(im) im = to_chw(im)
...@@ -196,6 +205,7 @@ def center_crop(im, size, is_color=True): ...@@ -196,6 +205,7 @@ def center_crop(im, size, is_color=True):
Example usage: Example usage:
.. code-block:: python .. code-block:: python
im = center_crop(im, 224) im = center_crop(im, 224)
:param im: the input image with HWC layout. :param im: the input image with HWC layout.
...@@ -223,6 +233,7 @@ def random_crop(im, size, is_color=True): ...@@ -223,6 +233,7 @@ def random_crop(im, size, is_color=True):
Example usage: Example usage:
.. code-block:: python .. code-block:: python
im = random_crop(im, 224) im = random_crop(im, 224)
:param im: the input image with HWC layout. :param im: the input image with HWC layout.
...@@ -251,6 +262,7 @@ def left_right_flip(im): ...@@ -251,6 +262,7 @@ def left_right_flip(im):
Example usage: Example usage:
.. code-block:: python .. code-block:: python
im = left_right_flip(im) im = left_right_flip(im)
:paam im: input image with HWC layout :paam im: input image with HWC layout
...@@ -275,6 +287,7 @@ def simple_transform(im, ...@@ -275,6 +287,7 @@ def simple_transform(im,
Example usage: Example usage:
.. code-block:: python .. code-block:: python
im = simple_transform(im, 256, 224, True) im = simple_transform(im, 256, 224, True)
:param im: The input image with HWC layout. :param im: The input image with HWC layout.
...@@ -285,6 +298,11 @@ def simple_transform(im, ...@@ -285,6 +298,11 @@ def simple_transform(im,
:type crop_size: int :type crop_size: int
:param is_train: Whether it is training or not. :param is_train: Whether it is training or not.
:type is_train: bool :type is_train: bool
:param is_color: whether the image is color or not.
:type is_color: bool
:param mean: the mean values, which can be element-wise mean values or
mean values per channel.
:type mean: numpy array | list
""" """
im = resize_short(im, resize_size) im = resize_short(im, resize_size)
if is_train: if is_train:
...@@ -324,6 +342,7 @@ def load_and_transform(filename, ...@@ -324,6 +342,7 @@ def load_and_transform(filename,
Example usage: Example usage:
.. code-block:: python .. code-block:: python
im = load_and_transform('cat.jpg', 256, 224, True) im = load_and_transform('cat.jpg', 256, 224, True)
:param filename: The file name of input image. :param filename: The file name of input image.
...@@ -334,6 +353,11 @@ def load_and_transform(filename, ...@@ -334,6 +353,11 @@ def load_and_transform(filename,
:type crop_size: int :type crop_size: int
:param is_train: Whether it is training or not. :param is_train: Whether it is training or not.
:type is_train: bool :type is_train: bool
:param is_color: whether the image is color or not.
:type is_color: bool
:param mean: the mean values, which can be element-wise mean values or
mean values per channel.
:type mean: numpy array | list
""" """
im = load_image(filename) im = load_image(filename)
im = simple_transform(im, resize_size, crop_size, is_train, is_color, mean) im = simple_transform(im, resize_size, crop_size, is_train, is_color, mean)
......
...@@ -102,7 +102,7 @@ class Momentum(Optimizer): ...@@ -102,7 +102,7 @@ class Momentum(Optimizer):
.. math:: .. math::
v_{t} &= k * v_{t-1} - \\gamma_t / (g_{t} + \\lambda w_{t-1}) \\\\ v_{t} &= k * v_{t-1} - \\gamma_t (g_{t} + \\lambda w_{t-1}) \\\\
w_{t} &= w_{t-1} + v_{t} \\\\ w_{t} &= w_{t-1} + v_{t} \\\\
where, :math:`k` is momentum, :math:`\\lambda` is decay rate, where, :math:`k` is momentum, :math:`\\lambda` is decay rate,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册