提交 bc6bae8d 编写于 作者: C caoying03

Merge branch 'develop' into add_sequence_slice_layer

#!/bin/bash
set -e
readonly VERSION="3.8"
version=$(clang-format -version)
if ! [[ $version == *"$VERSION"* ]]; then
echo "clang-format version check failed."
echo "a version contains '$VERSION' is needed, but get '$version'"
echo "you can install the right version, and make an soft-link to '\$PATH' env"
exit -1
fi
clang-format $@
...@@ -19,10 +19,10 @@ ...@@ -19,10 +19,10 @@
- id: end-of-file-fixer - id: end-of-file-fixer
- repo: local - repo: local
hooks: hooks:
- id: clang-format - id: clang-format-with-version-check
name: clang-format name: clang-format
description: Format files with ClangFormat. description: Format files with ClangFormat.
entry: clang-format -i entry: ./.clang_format.hook -i
language: system language: system
files: \.(c|cc|cxx|cpp|cu|h|hpp|hxx|proto)$ files: \.(c|cc|cxx|cpp|cu|h|hpp|hxx|proto)$
- repo: https://github.com/PaddlePaddle/pre-commit-golang - repo: https://github.com/PaddlePaddle/pre-commit-golang
......
...@@ -37,8 +37,8 @@ before_install: ...@@ -37,8 +37,8 @@ before_install:
- if [[ "$JOB" == "check_style" ]]; then sudo ln -s /usr/bin/clang-format-3.8 /usr/bin/clang-format; fi - if [[ "$JOB" == "check_style" ]]; then sudo ln -s /usr/bin/clang-format-3.8 /usr/bin/clang-format; fi
# Paddle is using protobuf 3.1 currently. Protobuf 3.2 breaks the compatibility. So we specify the python # Paddle is using protobuf 3.1 currently. Protobuf 3.2 breaks the compatibility. So we specify the python
# protobuf version. # protobuf version.
- pip install numpy wheel 'protobuf==3.1' sphinx==1.5.6 recommonmark sphinx-rtd-theme==0.1.9 virtualenv pre-commit requests==2.9.2 LinkChecker - pip install -r $TRAVIS_BUILD_DIR/python/requirements.txt
- pip install rarfile nltk==3.2.2 scipy==0.19.0 recordio matplotlib Pillow - pip install wheel sphinx==1.5.6 recommonmark sphinx-rtd-theme==0.1.9 virtualenv pre-commit LinkChecker
- curl https://glide.sh/get | bash - curl https://glide.sh/get | bash
- eval "$(GIMME_GO_VERSION=1.8.3 gimme)" - eval "$(GIMME_GO_VERSION=1.8.3 gimme)"
- go get -u github.com/alecthomas/gometalinter - go get -u github.com/alecthomas/gometalinter
......
...@@ -36,8 +36,8 @@ include(simd) ...@@ -36,8 +36,8 @@ include(simd)
################################ Configurations ####################################### ################################ Configurations #######################################
option(WITH_GPU "Compile PaddlePaddle with NVIDIA GPU" ${CUDA_FOUND}) option(WITH_GPU "Compile PaddlePaddle with NVIDIA GPU" ${CUDA_FOUND})
option(WITH_AVX "Compile PaddlePaddle with AVX intrinsics" ${AVX_FOUND}) option(WITH_AVX "Compile PaddlePaddle with AVX intrinsics" ${AVX_FOUND})
option(WITH_MKLDNN "Compile PaddlePaddle with mkl-dnn support." OFF) option(WITH_MKLDNN "Compile PaddlePaddle with mkl-dnn support." ${AVX_FOUND})
option(WITH_MKLML "Compile PaddlePaddle with mklml package." OFF) option(WITH_MKLML "Compile PaddlePaddle with mklml package." ${AVX_FOUND})
option(WITH_DSO "Compile PaddlePaddle with dynamic linked CUDA" ON) option(WITH_DSO "Compile PaddlePaddle with dynamic linked CUDA" ON)
option(WITH_TESTING "Compile PaddlePaddle with unit testing" ON) option(WITH_TESTING "Compile PaddlePaddle with unit testing" ON)
option(WITH_SWIG_PY "Compile PaddlePaddle with inference api" ON) option(WITH_SWIG_PY "Compile PaddlePaddle with inference api" ON)
...@@ -55,6 +55,7 @@ option(WITH_C_API "Compile PaddlePaddle with C-API(Prediction)" OFF) ...@@ -55,6 +55,7 @@ option(WITH_C_API "Compile PaddlePaddle with C-API(Prediction)" OFF)
option(WITH_GOLANG "Compile PaddlePaddle with GOLANG" OFF) option(WITH_GOLANG "Compile PaddlePaddle with GOLANG" OFF)
option(GLIDE_INSTALL "Download and install go dependencies " ON) option(GLIDE_INSTALL "Download and install go dependencies " ON)
option(USE_NNPACK "Compile PaddlePaddle with NNPACK library" OFF) option(USE_NNPACK "Compile PaddlePaddle with NNPACK library" OFF)
option(USE_EIGEN_FOR_BLAS "Use matrix multiplication in Eigen" OFF)
# CMAKE_BUILD_TYPE # CMAKE_BUILD_TYPE
if(NOT CMAKE_BUILD_TYPE) if(NOT CMAKE_BUILD_TYPE)
...@@ -137,9 +138,9 @@ set(EXTERNAL_LIBS ...@@ -137,9 +138,9 @@ set(EXTERNAL_LIBS
) )
if(WITH_GPU) if(WITH_GPU)
list(APPEND EXTERNAL_LIB ${CUDA_LIBRARIES} ${CUDA_rt_LIBRARY}) list(APPEND EXTERNAL_LIBS ${CUDA_LIBRARIES} ${CUDA_rt_LIBRARY})
if(NOT WITH_DSO) if(NOT WITH_DSO)
list(APPEND EXTERNAL_LIB ${CUDNN_LIBRARY} ${CUDA_CUBLAS_LIBRARIES} ${CUDA_curand_LIBRARY}) list(APPEND EXTERNAL_LIBS ${CUDNN_LIBRARY} ${CUDA_CUBLAS_LIBRARIES} ${CUDA_curand_LIBRARY})
endif(NOT WITH_DSO) endif(NOT WITH_DSO)
endif(WITH_GPU) endif(WITH_GPU)
......
...@@ -34,9 +34,6 @@ RUN apt-get update && \ ...@@ -34,9 +34,6 @@ RUN apt-get update && \
net-tools && \ net-tools && \
apt-get clean -y apt-get clean -y
# paddle is using numpy.flip, which is introduced since 1.12.0
RUN pip --no-cache-dir install 'numpy>=1.12.0'
# Install Go and glide # Install Go and glide
RUN wget -qO- https://storage.googleapis.com/golang/go1.8.1.linux-amd64.tar.gz | \ RUN wget -qO- https://storage.googleapis.com/golang/go1.8.1.linux-amd64.tar.gz | \
tar -xz -C /usr/local && \ tar -xz -C /usr/local && \
...@@ -58,33 +55,22 @@ RUN localedef -i en_US -f UTF-8 en_US.UTF-8 ...@@ -58,33 +55,22 @@ RUN localedef -i en_US -f UTF-8 en_US.UTF-8
# FIXME: due to temporary ipykernel dependency issue, specify ipykernel jupyter # FIXME: due to temporary ipykernel dependency issue, specify ipykernel jupyter
# version util jupyter fixes this issue. # version util jupyter fixes this issue.
RUN pip install --upgrade pip && \ RUN pip install --upgrade pip && \
pip install -U 'protobuf==3.1.0' && \ pip install -U wheel && \
pip install -U wheel pillow BeautifulSoup && \
pip install -U docopt PyYAML sphinx && \ pip install -U docopt PyYAML sphinx && \
pip install -U sphinx-rtd-theme==0.1.9 recommonmark && \ pip install -U sphinx-rtd-theme==0.1.9 recommonmark
pip install pre-commit 'requests==2.9.2' 'ipython==5.3.0' && \
RUN pip install pre-commit 'ipython==5.3.0' && \
pip install 'ipykernel==4.6.0' 'jupyter==1.0.0' && \ pip install 'ipykernel==4.6.0' 'jupyter==1.0.0' && \
pip install opencv-python rarfile 'scipy>=0.19.0' 'nltk>=3.2.2' pip install opencv-python
COPY ./python/requirements.txt /root/
RUN pip install -r /root/requirements.txt
# To fix https://github.com/PaddlePaddle/Paddle/issues/1954, we use # To fix https://github.com/PaddlePaddle/Paddle/issues/1954, we use
# the solution in https://urllib3.readthedocs.io/en/latest/user-guide.html#ssl-py2 # the solution in https://urllib3.readthedocs.io/en/latest/user-guide.html#ssl-py2
RUN apt-get install -y libssl-dev libffi-dev RUN apt-get install -y libssl-dev libffi-dev
RUN pip install certifi urllib3[secure] RUN pip install certifi urllib3[secure]
# TODO(qijun) The template library Eigen doesn't work well with GCC 5
# coming with the default Docker image, so we switch to use GCC 4.8
# by default. And I will check Eigen library later.
RUN ln -sf gcc-4.8 /usr/bin/gcc && \
ln -sf gcc-ar-4.8 /usr/bin/gcc-ar && \
ln -sf gcc-nm-4.8 /usr/bin/gcc-nm && \
ln -sf gcc-ranlib-4.8 /usr/bin/gcc-ranlib && \
ln -sf gcc-4.8 /usr/bin/x86_64-linux-gnu-gcc && \
ln -sf gcc-ar-4.8 /usr/bin/x86_64-linux-gnu-gcc-ar && \
ln -sf gcc-nm-4.8 /usr/bin/x86_64-linux-gnu-gcc-nm && \
ln -sf gcc-ranlib-4.8 /usr/bin/x86_64-linux-gnu-gcc-ranlib && \
ln -sf g++-4.8 /usr/bin/g++ && \
ln -sf g++-4.8 /usr/bin/x86_64-linux-gnu-g++
# Install woboq_codebrowser to /woboq # Install woboq_codebrowser to /woboq
RUN git clone https://github.com/woboq/woboq_codebrowser /woboq && \ RUN git clone https://github.com/woboq/woboq_codebrowser /woboq && \
......
...@@ -28,6 +28,10 @@ if(NOT WITH_TIMER) ...@@ -28,6 +28,10 @@ if(NOT WITH_TIMER)
add_definitions(-DPADDLE_DISABLE_TIMER) add_definitions(-DPADDLE_DISABLE_TIMER)
endif(NOT WITH_TIMER) endif(NOT WITH_TIMER)
if(USE_EIGEN_FOR_BLAS)
add_definitions(-DPADDLE_USE_EIGEN_FOR_BLAS)
endif(USE_EIGEN_FOR_BLAS)
if(NOT WITH_PROFILER) if(NOT WITH_PROFILER)
add_definitions(-DPADDLE_DISABLE_PROFILER) add_definitions(-DPADDLE_DISABLE_PROFILER)
endif(NOT WITH_PROFILER) endif(NOT WITH_PROFILER)
......
...@@ -2,7 +2,7 @@ if(NOT WITH_GPU) ...@@ -2,7 +2,7 @@ if(NOT WITH_GPU)
return() return()
endif() endif()
set(CUDNN_ROOT "" CACHE PATH "CUDNN ROOT") set(CUDNN_ROOT "/usr" CACHE PATH "CUDNN ROOT")
find_path(CUDNN_INCLUDE_DIR cudnn.h find_path(CUDNN_INCLUDE_DIR cudnn.h
PATHS ${CUDNN_ROOT} ${CUDNN_ROOT}/include PATHS ${CUDNN_ROOT} ${CUDNN_ROOT}/include
$ENV{CUDNN_ROOT} $ENV{CUDNN_ROOT}/include ${CUDA_TOOLKIT_INCLUDE} $ENV{CUDNN_ROOT} $ENV{CUDNN_ROOT}/include ${CUDA_TOOLKIT_INCLUDE}
......
...@@ -73,10 +73,18 @@ INCLUDE_DIRECTORIES(${CBLAS_INC_DIR}) ...@@ -73,10 +73,18 @@ INCLUDE_DIRECTORIES(${CBLAS_INC_DIR})
# linear algebra libraries for cc_library(xxx SRCS xxx.c DEPS cblas) # linear algebra libraries for cc_library(xxx SRCS xxx.c DEPS cblas)
SET(dummyfile ${CMAKE_CURRENT_BINARY_DIR}/cblas_dummy.c) SET(dummyfile ${CMAKE_CURRENT_BINARY_DIR}/cblas_dummy.c)
FILE(WRITE ${dummyfile} "const char * dummy = \"${dummyfile}\";") FILE(WRITE ${dummyfile} "const char * dummy = \"${dummyfile}\";")
ADD_LIBRARY(cblas STATIC ${dummyfile}) IF(${CBLAS_PROVIDER} MATCHES MKL)
ADD_LIBRARY(cblas SHARED ${dummyfile})
ELSE()
ADD_LIBRARY(cblas STATIC ${dummyfile})
ENDIF()
TARGET_LINK_LIBRARIES(cblas ${CBLAS_LIBRARIES}) TARGET_LINK_LIBRARIES(cblas ${CBLAS_LIBRARIES})
IF(NOT ${CBLAS_FOUND}) IF(NOT ${CBLAS_FOUND})
ADD_DEPENDENCIES(cblas extern_openblas) ADD_DEPENDENCIES(cblas extern_openblas)
LIST(APPEND external_project_dependencies cblas) LIST(APPEND external_project_dependencies cblas)
ELSE()
IF("${CBLAS_PROVIDER}" STREQUAL "MKLML")
ADD_DEPENDENCIES(cblas mklml)
ENDIF()
ENDIF(NOT ${CBLAS_FOUND}) ENDIF(NOT ${CBLAS_FOUND})
...@@ -9,13 +9,6 @@ function(CheckCompilerCXX11Flag) ...@@ -9,13 +9,6 @@ function(CheckCompilerCXX11Flag)
if(${CMAKE_CXX_COMPILER_VERSION} VERSION_LESS 4.8) if(${CMAKE_CXX_COMPILER_VERSION} VERSION_LESS 4.8)
message(FATAL_ERROR "Unsupported GCC version. GCC >= 4.8 required.") message(FATAL_ERROR "Unsupported GCC version. GCC >= 4.8 required.")
endif() endif()
if(NOT ANDROID)
# TODO(qijun) gcc 4.9 or later versions raise SEGV due to the optimization problem.
# Use Debug mode instead for now.
if(CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 4.9 OR CMAKE_CXX_COMPILER_VERSION VERSION_EQUAL 4.9)
set(CMAKE_BUILD_TYPE "Debug" CACHE STRING "" FORCE)
endif()
endif()
elseif(CMAKE_CXX_COMPILER_ID STREQUAL "AppleClang" OR CMAKE_CXX_COMPILER_ID STREQUAL "Clang") elseif(CMAKE_CXX_COMPILER_ID STREQUAL "AppleClang" OR CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
# cmake >= 3.0 compiler id "AppleClang" on Mac OS X, otherwise "Clang" # cmake >= 3.0 compiler id "AppleClang" on Mac OS X, otherwise "Clang"
# Apple Clang is a different compiler than upstream Clang which havs different version numbers. # Apple Clang is a different compiler than upstream Clang which havs different version numbers.
...@@ -160,7 +153,7 @@ set(CUDA_PROPAGATE_HOST_FLAGS OFF) ...@@ -160,7 +153,7 @@ set(CUDA_PROPAGATE_HOST_FLAGS OFF)
# Release/Debug flags set by cmake. Such as -O3 -g -DNDEBUG etc. # Release/Debug flags set by cmake. Such as -O3 -g -DNDEBUG etc.
# So, don't set these flags here. # So, don't set these flags here.
LIST(APPEND CUDA_NVCC_FLAGS -std=c++11 --default-stream per-thread) LIST(APPEND CUDA_NVCC_FLAGS -std=c++11)
LIST(APPEND CUDA_NVCC_FLAGS --use_fast_math) LIST(APPEND CUDA_NVCC_FLAGS --use_fast_math)
if(CMAKE_BUILD_TYPE STREQUAL "Debug") if(CMAKE_BUILD_TYPE STREQUAL "Debug")
......
...@@ -367,6 +367,11 @@ trans ...@@ -367,6 +367,11 @@ trans
.. autoclass:: paddle.v2.layer.trans .. autoclass:: paddle.v2.layer.trans
:noindex: :noindex:
scale_shift
-----------
.. autoclass:: paddle.v2.layer.scale_shift
:noindex:
Sampling Layers Sampling Layers
=============== ===============
......
## Auto Gradient Checker Design
## Backgraound:
- Operator forward computing is easy to check if the result is right because it has a clear definition. **But** backpropagation is a notoriously difficult algorithm to debug and get right:
- 1. you should get the right backpropagation formula according to the forward computation.
- 2. you should implement it right in CPP.
- 3. it's difficult to prepare test data.
- Auto gradient check gets a numeric gradient by forward Operator and use it as a reference of the backward Operator's result. It has several advantages:
- 1. numeric gradient checker only need forward operator.
- 2. user only need to prepare the input data for forward Operator.
## Mathematical Theory
The following two document from stanford has a detailed explanation of how to get numeric gradient and why it's useful.
- [Gradient checking and advanced optimization(en)](http://deeplearning.stanford.edu/wiki/index.php/Gradient_checking_and_advanced_optimization)
- [Gradient checking and advanced optimization(cn)](http://ufldl.stanford.edu/wiki/index.php/%E6%A2%AF%E5%BA%A6%E6%A3%80%E9%AA%8C%E4%B8%8E%E9%AB%98%E7%BA%A7%E4%BC%98%E5%8C%96)
## Numeric Gradient Implementation
### Python Interface
```python
def get_numeric_gradient(op,
input_values,
output_name,
input_to_check,
delta=0.005,
local_scope=None):
"""
Get Numeric Gradient for an operator's input.
:param op: C++ operator instance, could be an network
:param input_values: The input variables. Should be an dictionary, key is
variable name. Value is numpy array.
:param output_name: The final output variable name.
:param input_to_check: The input variable need to get gradient.
:param delta: The perturbation value for numeric gradient method. The
smaller delta is, the more accurate result will get. But if that delta is
too small, it could occur numerical stability problem.
:param local_scope: The local scope used for get_numeric_gradient.
:return: The gradient array in numpy format.
"""
```
### Explaination:
- Why need `output_name`
- One Operator may have multiple Output, you can get independent gradient from each Output. So user should set one output to calculate.
- Why need `input_to_check`
- One operator may have multiple inputs. Gradient Op can calculate the gradient of these Inputs at the same time. But Numeric Gradient needs to calculate them one by one. So `get_numeric_gradient` is designed to calculate the gradient for one input. If you need to compute multiple inputs, you can call `get_numeric_gradient` multiple times.
### Core Algorithm Implementation
```python
# we only compute gradient of one element each time.
# we use a for loop to compute the gradient of every element.
for i in xrange(tensor_size):
# get one input element throw it's index i.
origin = tensor_to_check.get_float_element(i)
# add delta to it, run op and then get the sum of the result tensor.
x_pos = origin + delta
tensor_to_check.set_float_element(i, x_pos)
y_pos = get_output()
# plus delta to this element, run op and get the sum of the result tensor.
x_neg = origin - delta
tensor_to_check.set_float_element(i, x_neg)
y_neg = get_output()
# restore old value
tensor_to_check.set_float_element(i, origin)
# compute the gradient of this element and store it into a numpy array.
gradient_flat[i] = (y_pos - y_neg) / delta / 2
# reshape the gradient result to the shape of the source tensor.
return gradient_flat.reshape(tensor_to_check.get_dims())
```
## Auto Graident Checker Framework
Each Operator Kernel has three kinds of Gradient:
- 1. Numeric Gradient
- 2. CPU Operator Gradient
- 3. GPU Operator Gradient(if supported)
Numeric Gradient Only relies on forward Operator. So we use Numeric Gradient as the reference value.
- 1. calculate the numeric gradient.
- 2. calculate CPU kernel Gradient with the backward Operator and compare it with the numeric gradient.
- 3. calculate GPU kernel Gradient with the backward Operator and compare it with the numeric gradient.(if support GPU)
#### Python Interface
```python
def check_grad(self,
forward_op,
input_vars,
inputs_to_check,
output_name,
no_grad_set=None,
only_cpu=False,
max_relative_error=0.005):
"""
:param forward_op: used to create backward_op
:param input_vars: numpy value of input variable. The following
computation will use these variables.
:param inputs_to_check: inputs var names that should check gradient.
:param output_name: output name that used to
:param max_relative_error: The relative tolerance parameter.
:param no_grad_set: used when create backward ops
:param only_cpu: only compute and check gradient on cpu kernel.
:return:
"""
```
### How to check if two numpy array is close enough?
if `abs_numeric_grad` is nearly zero, then use abs error for numeric_grad, not relative
```python
numeric_grad = ...
operator_grad = numpy.array(scope.find_var(grad_var_name(name)).get_tensor())
abs_numeric_grad = numpy.abs(numeric_grad)
# if abs_numeric_grad is nearly zero, then use abs error for numeric_grad, not relative
# error.
abs_numeric_grad[abs_numeric_grad < 1e-3] = 1
diff_mat = numpy.abs(abs_numeric_grad - operator_grad) / abs_numeric_grad
max_diff = numpy.max(diff_mat)
```
#### Notes:
1,The Input data for auto gradient checker should be reasonable to avoid numeric problem.
#### Refs:
- [Gradient checking and advanced optimization(en)](http://deeplearning.stanford.edu/wiki/index.php/Gradient_checking_and_advanced_optimization)
- [Gradient checking and advanced optimization(cn)](http://ufldl.stanford.edu/wiki/index.php/%E6%A2%AF%E5%BA%A6%E6%A3%80%E9%AA%8C%E4%B8%8E%E9%AB%98%E7%BA%A7%E4%BC%98%E5%8C%96)
# Alalysis of large model distributed training in Paddle
***NOTE: This is only some note for how we implemeted this scheme in V1, not a new design.***
## What is it
We often encounter cases that the embedding layer parameters(sparse) are so large that we can not store it in the trainer's memory when training. So we need to put them to several servers, and fetch them row by row instead of fetch all of the parameters.
## How to use
Specify command-line argument like `--loadsave_parameters_in_pserver=true --ports_num_for_sparse=1 --use_old_updater=1` when starting the paddle trainer. And also add something like `--ports_num_for_sparse=1 --pserver_num_threads=5` when starting pserver processes.
Accrodingly, configure your embedding layers like:
```python
SPARSE_REMOTE=True
w1 = data_layer(name="w1", size=dict_size)
emb1 = embedding_layer(input=w1, size=32, param_attr=ParameterAttribute(sparse_update=SPARSE_REMOTE))
w2 = data_layer(name="w2", size=dict_size)
emb2 = embedding_layer(input=w2, size=32, param_attr=ParameterAttribute(sparse_update=SPARSE_REMOTE))
...
```
## Implementation details
```c++
enum MatType {
MAT_NORMAL,
MAT_NORMAL_SHARED,
MAT_VALUE_SHARED,
MAT_SPARSE_ROW_IDS,
MAT_SPARSE_ROW_AUTO_GROW,
MAT_CACHE_ROW,
MAT_SPARSE_ROW,
MAT_SPARSE_ROW_PREFETCH,
MAT_SPARSE_ROW_PREFETCH_FULL_SIZE,
};
```
`MAT_SPARSE_ROW_PREFETCH` is what we use when configured to fetch only row of matrix when training.
In `trainer_internal.cpp:L93 trainOneBatch`:
```c++
if (config_->getOptConfig().use_sparse_remote_updater()) {
REGISTER_TIMER("prefetch");
gradientMachine_->prefetch(inArgs);
parameterUpdater_->getParametersRemote();
}
```
When doing actual network forward and backward, at the beginning of each batch, the trainer will try to download one row of data from pserver.
In `trainer/RemoteParameterUpdater.cpp`: `parameterUpdater_->getParametersRemote();`:
```c++
if (fullSize) {
...
} else {
getParams = [&] {
parameterClient_->getParameterSparse(
/* recvParameterType= */ PARAMETER_VALUE, sendBackParameterType);
};
applyL1 = [](Parameter& para, real decayRate) {
para.getMat(PARAMETER_VALUE)->applyL1(/*lr=*/1.0f, decayRate);
};
}
```
Calling `parameterClient_->getParameterSparse` will do remote call to pserver's `getParameterSparse`:
```c++
void ParameterServer2::getParameterSparse(const SendParameterRequest& request,
std::vector<Buffer>& inputBuffers,
SendParameterResponse* response,
std::vector<Buffer>* outputBuffers) {
(void)inputBuffers;
auto& buffer = *readWriteBuffer_;
size_t numReals = 0;
for (const auto& block : request.blocks()) {
numReals += getParameterConfig(block).dims(1);
}
buffer.resize(numReals);
VLOG(3) << "pserver: getParameterSparse, numReals=" << numReals;
ReadLockGuard guard(parameterMutex_);
size_t offset = 0;
for (const auto& block : request.blocks()) {
size_t width = getParameterConfig(block).dims(1);
Buffer buf = {buffer.data() + offset, width};
int type = request.send_back_parameter_type();
sendBackParameterSparse(block, type, response, &buf, width, outputBuffers);
offset += width;
}
}
```
`getParameterConfig(block).dims(1)` returns the width of the current "parameter block"(a shard of parameter object),
then `getParameterSparse` remote call returns only one row of data to the client.
...@@ -101,6 +101,7 @@ if use_mkldnn ...@@ -101,6 +101,7 @@ if use_mkldnn
5.**Argument**里添加两个`MkldnnMatrixPtr`,取名为`mkldnnValue``mkldnnGrad`,用于存放`MkldnnLayer`会用到的memory buffer。 并且添加函数cvt(会修改为一个更加合适的函数名),用于处理"CPU device"和"MKL-DNN device"之间memory的相互转化。 5.**Argument**里添加两个`MkldnnMatrixPtr`,取名为`mkldnnValue``mkldnnGrad`,用于存放`MkldnnLayer`会用到的memory buffer。 并且添加函数cvt(会修改为一个更加合适的函数名),用于处理"CPU device"和"MKL-DNN device"之间memory的相互转化。
6. 在父类`Layer`中的`getOutput`函数中添加一段逻辑,用于判断`deviceId`,并针对device在MKL-DNN和CPU之间不统一的情况,做一个前期转换。 也就是调用`Argument`的cvt函数把output统一到需要的device上。 6. 在父类`Layer`中的`getOutput`函数中添加一段逻辑,用于判断`deviceId`,并针对device在MKL-DNN和CPU之间不统一的情况,做一个前期转换。 也就是调用`Argument`的cvt函数把output统一到需要的device上。
7. 在原来的`FLAGS`中添加一个`use_mkldnn`的flag,用于选择是否使用MKL-DNN的相关功能。 7. 在原来的`FLAGS`中添加一个`use_mkldnn`的flag,用于选择是否使用MKL-DNN的相关功能。
8. 关于MKLDNN参数的保存。由于MKLDNN参数的格式与PaddlePaddle原有的格式存在不一样的情况,所以需要在保存参数时同时保存该格式信息。目前准备扩展[Header](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/parameter/Parameter.h#L247)里面的`int32_t version`。这个值不管是在v1还是在v2里面,一直保存的是0,所以可以充分利用这个信息,定义一个枚举处理所有MKLDNN的参数格式,从而`MKLDNNLayer`就可以从输入的参数中获取需要的格式信息。
## References ## References
......
...@@ -68,7 +68,7 @@ As a simple example, consider the following: ...@@ -68,7 +68,7 @@ As a simple example, consider the following:
1. **BLAS Dependencies(optional)** 1. **BLAS Dependencies(optional)**
CMake will search BLAS libraries from system. If not found, OpenBLAS will be downloaded, built and installed automatically. CMake will search BLAS libraries from the system. If not found, OpenBLAS will be downloaded, built and installed automatically.
To utilize preinstalled BLAS, you can simply specify MKL, OpenBLAS or ATLAS via `MKL_ROOT`, `OPENBLAS_ROOT` or `ATLAS_ROOT`. To utilize preinstalled BLAS, you can simply specify MKL, OpenBLAS or ATLAS via `MKL_ROOT`, `OPENBLAS_ROOT` or `ATLAS_ROOT`.
```bash ```bash
...@@ -131,9 +131,9 @@ As a simple example, consider the following: ...@@ -131,9 +131,9 @@ As a simple example, consider the following:
To build GPU version, you will need the following installed: To build GPU version, you will need the following installed:
1. a CUDA-capable GPU 1. a CUDA-capable GPU
2. A supported version of Linux with a gcc compiler and toolchain 2. A supported version of Linux with a GCC compiler and toolchain
3. NVIDIA CUDA Toolkit (available at http://developer.nvidia.com/cuda-downloads) 3. NVIDIA CUDA Toolkit (available at http://developer.nvidia.com/cuda-downloads)
4. NVIDIA cuDNN Library (availabel at https://developer.nvidia.com/cudnn) 4. NVIDIA cuDNN Library (available at https://developer.nvidia.com/cudnn)
The CUDA development environment relies on tight integration with the host development environment, The CUDA development environment relies on tight integration with the host development environment,
including the host compiler and C runtime libraries, and is therefore only supported on including the host compiler and C runtime libraries, and is therefore only supported on
...@@ -172,6 +172,7 @@ export PATH=<path to install>/bin:$PATH ...@@ -172,6 +172,7 @@ export PATH=<path to install>/bin:$PATH
# install PaddlePaddle Python modules. # install PaddlePaddle Python modules.
sudo pip install <path to install>/opt/paddle/share/wheels/*.whl sudo pip install <path to install>/opt/paddle/share/wheels/*.whl
``` ```
## <span id="centos">Build on Centos 7</span> ## <span id="centos">Build on Centos 7</span>
### Install Dependencies ### Install Dependencies
...@@ -192,9 +193,9 @@ sudo pip install <path to install>/opt/paddle/share/wheels/*.whl ...@@ -192,9 +193,9 @@ sudo pip install <path to install>/opt/paddle/share/wheels/*.whl
To build GPU version, you will need the following installed: To build GPU version, you will need the following installed:
1. a CUDA-capable GPU 1. a CUDA-capable GPU
2. A supported version of Linux with a gcc compiler and toolchain 2. A supported version of Linux with a GCC compiler and toolchain
3. NVIDIA CUDA Toolkit (available at http://developer.nvidia.com/cuda-downloads) 3. NVIDIA CUDA Toolkit (available at http://developer.nvidia.com/cuda-downloads)
4. NVIDIA cuDNN Library (availabel at https://developer.nvidia.com/cudnn) 4. NVIDIA cuDNN Library (available at https://developer.nvidia.com/cudnn)
The CUDA development environment relies on tight integration with the host development environment, The CUDA development environment relies on tight integration with the host development environment,
including the host compiler and C runtime libraries, and is therefore only supported on including the host compiler and C runtime libraries, and is therefore only supported on
......
...@@ -74,13 +74,13 @@ PaddlePaddle发布新版本的时候都会发布对应版本的生产镜像以 ...@@ -74,13 +74,13 @@ PaddlePaddle发布新版本的时候都会发布对应版本的生产镜像以
.. code-block:: bash .. code-block:: bash
docker run -it --rm paddlepaddle/paddle:0.10.0-dev /bin/bash docker run -it --rm -v $(pwd):/paddle paddlepaddle/paddle:0.10.0-dev /bin/bash
或者,可以以后台进程方式运行容器: 或者,可以以后台进程方式运行容器:
.. code-block:: bash .. code-block:: bash
docker run -d -p 2202:22 -p 8888:8888 paddledev/paddle:0.10.0-dev docker run -d -p 2202:22 -p 8888:8888 -v $(pwd):/paddle paddlepaddle/paddle:0.10.0-dev /usr/sbin/sshd -D
然后用密码 :code:`root` SSH进入容器: 然后用密码 :code:`root` SSH进入容器:
......
...@@ -146,3 +146,19 @@ paddle_error paddle_gradient_machine_randomize_param( ...@@ -146,3 +146,19 @@ paddle_error paddle_gradient_machine_randomize_param(
m->machine->randParameters(); m->machine->randParameters();
return kPD_NO_ERROR; return kPD_NO_ERROR;
} }
paddle_error paddle_gradient_machine_get_layer_output(
paddle_gradient_machine machine,
const char* layerName,
paddle_arguments args) {
auto m = cast(machine);
auto out = paddle::capi::cast<paddle::capi::CArguments>(args);
if (m == nullptr || layerName == nullptr || out == nullptr ||
m->machine == nullptr) {
return kPD_NULLPTR;
}
auto layerOutput = m->machine->getLayerOutput(layerName);
out->args.push_back(layerOutput);
return kPD_NO_ERROR;
}
...@@ -39,7 +39,11 @@ PD_API paddle_error paddle_gradient_machine_create_for_inference( ...@@ -39,7 +39,11 @@ PD_API paddle_error paddle_gradient_machine_create_for_inference(
/** /**
* @brief Create a gradient machine used for model inference, using config with * @brief Create a gradient machine used for model inference, using config with
* parameters which is generated by `paddle merge_model`. * parameters which is generated by `paddle merge_model`.
* @param [out] machine that used for model inference. * Example:
* paddle merge_model \
* --model_dir="pass-00000" \
* --model_file="merged_model.paddle"
* @param [out] machine that used for model inference
* @param [in] mergedModel * @param [in] mergedModel
* @param [in] size * @param [in] size
* @return paddle_error * @return paddle_error
...@@ -97,6 +101,18 @@ paddle_gradient_machine_randomize_param(paddle_gradient_machine machine); ...@@ -97,6 +101,18 @@ paddle_gradient_machine_randomize_param(paddle_gradient_machine machine);
PD_API paddle_error PD_API paddle_error
paddle_gradient_machine_destroy(paddle_gradient_machine machine); paddle_gradient_machine_destroy(paddle_gradient_machine machine);
/**
* @brief Get the output of the layer named `layerName`.
* @param [in] gradient machine that have run a inference
* @param [in] layerName name of specified layer
* @param [out] args output of the specified layer
* @return paddle_error
*/
PD_API paddle_error
paddle_gradient_machine_get_layer_output(paddle_gradient_machine machine,
const char* layerName,
paddle_arguments args);
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif
......
...@@ -7,7 +7,7 @@ cc_library(tensor SRCS tensor.cc DEPS ddim place paddle_memory device_context) ...@@ -7,7 +7,7 @@ cc_library(tensor SRCS tensor.cc DEPS ddim place paddle_memory device_context)
cc_test(tensor_test SRCS tensor_test.cc DEPS tensor) cc_test(tensor_test SRCS tensor_test.cc DEPS tensor)
cc_test(eigen_test SRCS eigen_test.cc DEPS tensor) cc_test(eigen_test SRCS eigen_test.cc DEPS tensor)
cc_library(lod_tensor SRCS lod_tensor.cc details/lod_tensor.cc DEPS ddim place tensor) cc_library(lod_tensor SRCS lod_tensor.cc DEPS ddim place tensor)
cc_test(lod_tensor_test SRCS lod_tensor_test.cc DEPS lod_tensor) cc_test(lod_tensor_test SRCS lod_tensor_test.cc DEPS lod_tensor)
cc_test(variable_test SRCS variable_test.cc) cc_test(variable_test SRCS variable_test.cc)
...@@ -15,42 +15,41 @@ cc_test(variable_test SRCS variable_test.cc) ...@@ -15,42 +15,41 @@ cc_test(variable_test SRCS variable_test.cc)
cc_library(scope SRCS scope.cc) cc_library(scope SRCS scope.cc)
cc_test(scope_test SRCS scope_test.cc DEPS scope) cc_test(scope_test SRCS scope_test.cc DEPS scope)
proto_library(attribute_proto SRCS attribute.proto) proto_library(framework_proto SRCS framework.proto)
proto_library(op_proto SRCS op_proto.proto DEPS attribute_proto)
proto_library(op_desc SRCS op_desc.proto DEPS attribute_proto)
cc_test(op_proto_test SRCS op_proto_test.cc DEPS op_proto protobuf)
cc_test(op_desc_test SRCS op_desc_test.cc DEPS op_desc protobuf)
cc_library(attribute SRCS attribute.cc DEPS op_desc op_proto) cc_library(attribute SRCS attribute.cc DEPS framework_proto)
cc_library(operator SRCS operator.cc DEPS op_desc device_context tensor scope attribute) cc_library(operator SRCS operator.cc DEPS framework_proto device_context tensor scope attribute)
cc_test(operator_test SRCS operator_test.cc DEPS operator op_registry) cc_test(operator_test SRCS operator_test.cc DEPS operator op_registry)
cc_library(grad_op_builder SRCS grad_op_builder.cc DEPS op_proto operator) cc_library(grad_op_builder SRCS grad_op_builder.cc DEPS operator)
cc_library(op_registry SRCS op_registry.cc DEPS op_desc grad_op_builder) cc_library(op_registry SRCS op_registry.cc DEPS grad_op_builder)
cc_test(op_registry_test SRCS op_registry_test.cc DEPS op_registry) cc_test(op_registry_test SRCS op_registry_test.cc DEPS op_registry)
cc_test(grad_op_builder_test SRCS grad_op_builder_test.cc DEPS grad_op_builder op_registry add_op) cc_test(grad_op_builder_test SRCS grad_op_builder_test.cc DEPS grad_op_builder op_registry add_op)
py_proto_compile(framework_py_proto SRCS attribute.proto op_proto.proto op_desc.proto) py_proto_compile(framework_py_proto SRCS framework.proto)
# Generate an empty __init__.py to make framework_py_proto as a valid python module. # Generate an empty __init__.py to make framework_py_proto as a valid python module.
add_custom_target(framework_py_proto_init ALL COMMAND ${CMAKE_COMMAND} -E touch __init__.py) add_custom_target(framework_py_proto_init ALL COMMAND ${CMAKE_COMMAND} -E touch __init__.py)
add_dependencies(framework_py_proto framework_py_proto_init) add_dependencies(framework_py_proto framework_py_proto_init)
add_custom_command(TARGET framework_py_proto POST_BUILD add_custom_command(TARGET framework_py_proto POST_BUILD
COMMAND ${CMAKE_COMMAND} -E make_directory ${PROJ_ROOT}/python/paddle/v2/framework/proto COMMAND ${CMAKE_COMMAND} -E make_directory ${PADDLE_SOURCE_DIR}/python/paddle/v2/framework/proto
COMMAND cp *.py ${PROJ_ROOT}/python/paddle/v2/framework/proto/ COMMAND cp *.py ${PADDLE_SOURCE_DIR}/python/paddle/v2/framework/proto/
COMMENT "Copy generated python proto into directory paddle/v2/framework/proto." COMMENT "Copy generated python proto into directory paddle/v2/framework/proto."
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}) WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR})
cc_library(backward SRCS backward.cc DEPS net_op) cc_library(backward SRCS backward.cc DEPS net_op)
cc_test(backward_test SRCS backward_test.cc DEPS backward) cc_test(backward_test SRCS backward_test.cc DEPS backward recurrent_op device_context)
if(WITH_PYTHON) if(WITH_PYTHON)
cc_library(paddle_pybind SHARED cc_library(paddle_pybind SHARED
SRCS pybind.cc SRCS pybind.cc
DEPS pybind python backward DEPS pybind python backward
fc_op
sgd_op sgd_op
add_op add_op
mul_op
rowwise_add_op
sigmoid_op
softmax_op
mean_op mean_op
cross_entropy_op cross_entropy_op
recurrent_op recurrent_op
......
...@@ -44,7 +44,7 @@ AttrType AttrTypeID<std::vector<std::string>>() { ...@@ -44,7 +44,7 @@ AttrType AttrTypeID<std::vector<std::string>>() {
return STRINGS; return STRINGS;
} }
Attribute GetAttrValue(const AttrDesc& attr_desc) { Attribute GetAttrValue(const OpDesc::Attr& attr_desc) {
switch (attr_desc.type()) { switch (attr_desc.type()) {
case paddle::framework::AttrType::INT: { case paddle::framework::AttrType::INT: {
return attr_desc.i(); return attr_desc.i();
......
...@@ -20,8 +20,7 @@ limitations under the License. */ ...@@ -20,8 +20,7 @@ limitations under the License. */
#include <unordered_set> #include <unordered_set>
#include <vector> #include <vector>
#include "paddle/framework/attribute.pb.h" #include "paddle/framework/framework.pb.h"
#include "paddle/framework/op_desc.pb.h"
#include "paddle/platform/enforce.h" #include "paddle/platform/enforce.h"
#include "paddle/platform/variant.h" #include "paddle/platform/variant.h"
...@@ -37,7 +36,7 @@ typedef std::unordered_map<std::string, Attribute> AttributeMap; ...@@ -37,7 +36,7 @@ typedef std::unordered_map<std::string, Attribute> AttributeMap;
template <typename T> template <typename T>
AttrType AttrTypeID(); AttrType AttrTypeID();
Attribute GetAttrValue(const AttrDesc& attr_desc); Attribute GetAttrValue(const OpDesc::Attr& attr_desc);
// check whether a value(attribute) fit a certain limit // check whether a value(attribute) fit a certain limit
template <typename T> template <typename T>
......
...@@ -15,31 +15,44 @@ ...@@ -15,31 +15,44 @@
#include "paddle/framework/backward.h" #include "paddle/framework/backward.h"
#include <list> #include <list>
#include <memory>
#include "paddle/framework/op_registry.h" #include "paddle/framework/op_registry.h"
#include "paddle/operators/net_op.h" #include "paddle/operators/net_op.h"
#include "paddle/operators/recurrent_op.h"
namespace paddle { namespace paddle {
namespace framework { namespace framework {
static bool AllInSet(const std::vector<std::string>& names, template <typename Map, typename T>
const std::string& suffix, static void ForEachVarName(const Map& names, T callback) {
const std::unordered_set<std::string>& set) {
for (auto& name : names) { for (auto& name : names) {
if (set.find(name + suffix) == set.end()) { for (auto& n : name.second) {
return false; if (callback(n)) return;
} }
} }
return true;
} }
static std::shared_ptr<OperatorBase> NOP() { // return whether all the names + suffixes in the set
auto net_op = std::make_shared<operators::NetOp>(); static bool AllInSet(
net_op->type_ = "@NOP@"; const std::map<std::string, std::vector<std::string>>& names,
const std::string& suffix, const std::unordered_set<std::string>& set) {
bool all_in_set = true;
ForEachVarName(names, [&all_in_set, &set, &suffix](const std::string& n) {
all_in_set = set.find(n + suffix) != set.end();
return !all_in_set;
});
return all_in_set;
}
static std::unique_ptr<OperatorBase> NOP() {
auto net_op = new operators::NetOp();
net_op->SetType("@NOP@");
net_op->CompleteAddOp(); net_op->CompleteAddOp();
return net_op; return std::unique_ptr<OperatorBase>(net_op);
} }
// Get backward operator from a forward operator, recursively implementation. // Get backward operator from a forward operator, a recursive implementation.
// //
// no_grad_names the gradient variable names without gradient calculating. // no_grad_names the gradient variable names without gradient calculating.
// //
...@@ -47,122 +60,152 @@ static std::shared_ptr<OperatorBase> NOP() { ...@@ -47,122 +60,152 @@ static std::shared_ptr<OperatorBase> NOP() {
// BackwardRecursive. use `uid = uniq_id++;` to get the unique index, and // BackwardRecursive. use `uid = uniq_id++;` to get the unique index, and
// pass `uniq_id` through recursive calling. // pass `uniq_id` through recursive calling.
// //
// returns The backward operator. For simple situation, it is a simple // returns The backward operator. In a simple situation, it may be a simple
// operator. For complex situation, it is a NetOp. // operator, in a complex situation, it maybe a NetOp.
// //
// See Backward.h for details // See Backward.h for details
static std::shared_ptr<OperatorBase> BackwardRecursive( static std::unique_ptr<OperatorBase> BackwardRecursive(
const OperatorBase& forwardOp,
std::unordered_set<std::string>& no_grad_names, size_t& uniq_id);
std::shared_ptr<OperatorBase> BackwardRecursive(
const OperatorBase& forwardOp, const OperatorBase& forwardOp,
std::unordered_set<std::string>& no_grad_names, size_t& uniq_id) { std::unordered_set<std::string>& no_grad_names, size_t& uniq_id) {
// If all input gradients of forwarding operator do not need to calculate, // If all input gradients of forwarding operator do not need to calculate,
// just return an NOP. Not return null ptr because NOP does not take // just return an NOP. Not return null ptr because NOP does not take
// too much time for calculation, but it is useful for simplifying logic. // too much time for calculation, but it is useful for simplifying logic.
if (AllInSet(forwardOp.inputs_, kGradVarSuffix, no_grad_names)) { if (AllInSet(forwardOp.Inputs() /*names*/, kGradVarSuffix /*suffix*/,
no_grad_names /*set*/)) {
return NOP(); return NOP();
} }
// All output gradients of forwarding operator do not need to calculate. // All output gradients of forwarding operator do not need to calculate.
// Then all input gradients cannot be computed at all, and we put them into // Then all input gradients cannot be computed at all, and we put them into
// `no_grad_names` set. Return an NOP. // `no_grad_names` set. Return an NOP.
if (AllInSet(forwardOp.outputs_, kGradVarSuffix, no_grad_names)) { if (AllInSet(forwardOp.Outputs() /*names*/, kGradVarSuffix /*suffix*/,
for (auto& name : forwardOp.inputs_) { no_grad_names /*set*/)) {
// Mark all input is not need ForEachVarName(forwardOp.Inputs(),
no_grad_names.insert(name + kGradVarSuffix); [&no_grad_names](const std::string& name) -> bool {
} no_grad_names.insert(GradVarName(name));
return false;
});
return NOP(); return NOP();
} }
// Returned gradient network // Returned gradient network
auto net = std::make_shared<operators::NetOp>(); auto net = std::unique_ptr<operators::NetOp>(new operators::NetOp());
if (forwardOp.IsNetOp()) { if (forwardOp.IsNetOp()) {
// Because forwardOp is a net op, it can static_cast. // Because forwardOp is a net op, it can static_cast.
auto& forwardNet = static_cast<const operators::NetOp&>(forwardOp); auto& forwardNet = static_cast<const operators::NetOp&>(forwardOp);
// Map from output gradient variable name to operator's indices in // Map from output gradient variable name to operator's indices in
// backward net. That operator generates that variable. // backward net's ops_. That operator generates that variable.
std::unordered_map<std::string, std::vector<size_t>> dup_output_ops; std::unordered_map<std::string, std::vector<size_t>> dup_output_ops;
size_t local_op_id = 0; size_t local_op_id = 0;
// reversely travel forwardNet // reversely travel forwardNet and collect all duplicate outputs.
for (auto it = forwardNet.ops_.rbegin(); it != forwardNet.ops_.rend(); for (auto it = forwardNet.ops_.rbegin(); it != forwardNet.ops_.rend();
++it, ++local_op_id) { ++it, ++local_op_id) {
auto fwd = *it; auto& fwd = *it;
auto bwd = BackwardRecursive(*fwd, no_grad_names, uniq_id); auto bwd = BackwardRecursive(*fwd, no_grad_names, uniq_id);
net->AddOp(bwd); ForEachVarName(bwd->Outputs(),
for (auto& out : bwd->outputs_) { [&dup_output_ops, local_op_id](const std::string& out) {
dup_output_ops[out].emplace_back(local_op_id); dup_output_ops[out].emplace_back(local_op_id);
} return false;
});
net->AppendOp(std::move(bwd));
} }
// Get unique ID for this method. // Get unique ID for this method.
auto uid = uniq_id++; auto uid = uniq_id++;
// TODO(dzh): more comment // TODO(dzh): more comment
using Pos = std::pair<size_t, std::shared_ptr<OperatorBase>>; // multiple operators which have the same output (y for example) may
// overwrite the same y variable when backward, special operations are token
// to handle this case. For each duplicate output, rename it to an alias
// (original name with a offset), append an `add` op for its operator,
// and finally sum all the alias variable to the final output variable y.
using Pos = std::pair<size_t, std::unique_ptr<OperatorBase>>;
std::list<Pos> insert_position; std::list<Pos> insert_position;
for (auto& dup_output_op : dup_output_ops) { for (auto& dup_output_op : dup_output_ops) {
const std::string& name = dup_output_op.first; const std::string& name = dup_output_op.first;
auto& dup_op = dup_output_op.second; auto& dup_op = dup_output_op.second;
// no duplicate output
if (dup_op.size() == 1) continue; if (dup_op.size() == 1) continue;
std::vector<std::string> dup_outputs;
// process the duplicate outputs
std::vector<std::string> dup_outputs;
for (size_t i = 0; i < dup_op.size(); ++i) { for (size_t i = 0; i < dup_op.size(); ++i) {
// rename each duplicate output to an alias
auto op_offset = dup_op[i]; auto op_offset = dup_op[i];
dup_outputs.push_back(name + "@RENAME@" + std::to_string(uid) + "@" + dup_outputs.push_back(name + "@RENAME@" + std::to_string(uid) + "@" +
std::to_string(i)); std::to_string(i));
net->ops_[op_offset]->Rename(name, dup_outputs.back()); net->ops_[op_offset]->Rename(name, dup_outputs.back());
} }
// collect all the offset to append `add` op for each alias
insert_position.push_back( insert_position.push_back(
{dup_op.back(), {dup_op.back(), OpRegistry::CreateOp("add", {{"X", {dup_outputs}}},
OpRegistry::CreateOp( {{"Out", {name}}}, {})});
"add", {dup_outputs}, {name},
{{"input_format",
std::vector<int>{0, static_cast<int>(dup_outputs.size())}}})});
} }
// make sure the inserted `add` ops follow the BFS order.
insert_position.sort( insert_position.sort(
[](const Pos& l, const Pos& r) { return l.first > r.first; }); [](const Pos& l, const Pos& r) { return l.first > r.first; });
for (auto& pos : insert_position) { for (auto& pos : insert_position) {
net->InsertOp(pos.first + 1, pos.second); net->InsertOp(pos.first + 1, std::move(pos.second));
} }
} else { } else {
std::shared_ptr<OperatorBase> grad_op = OpRegistry::CreateGradOp(forwardOp); std::unique_ptr<OperatorBase> grad_op(OpRegistry::CreateGradOp(forwardOp));
for (std::string& grad_input : grad_op->inputs_) {
ForEachVarName(grad_op->Inputs(), [&no_grad_names, &net, &grad_op](
const std::string& grad_input) {
if (no_grad_names.count(grad_input)) { if (no_grad_names.count(grad_input)) {
// +1 for \0 // +1 for \0
std::string prefix = grad_input.substr( std::string prefix = grad_input.substr(
0, grad_input.size() - sizeof(kGradVarSuffix) / sizeof(char) + 1); 0, grad_input.size() - sizeof(kGradVarSuffix) / sizeof(char) + 1);
grad_input = prefix + kZeroVarSuffix; grad_op->Rename(grad_input, prefix + kZeroVarSuffix);
// If part of input gradient of that operator is not calculated, fill // If part of input gradient of that operator is not calculated, fill
// zero variables to that input gradient. // zero variables to that input gradient.
net->AddOp(OpRegistry::CreateOp("fill_zeros_like", {prefix}, net->AppendOp(OpRegistry::CreateOp("fill_zeros_like",
{grad_input}, {})); {{"Src", {prefix}}},
} {{"Dst", {grad_input}}}, {}));
} }
return false;
});
for (std::string& grad_output : grad_op->outputs_) { ForEachVarName(grad_op->Outputs(),
[&no_grad_names, &grad_op](const std::string& grad_output) {
if (no_grad_names.count(grad_output)) { if (no_grad_names.count(grad_output)) {
grad_output = kEmptyVarName; grad_op->Rename(grad_output, kEmptyVarName);
} }
return false;
});
// process recurrent gradient op as a special operator.
if (forwardOp.Type() == "recurrent_op") {
// NOTE clean up cycle call somewhere (RNN's stepnet constains itself), or
// this will result in infinite loop.
const auto& rnnop =
*static_cast<const operators::RecurrentOp*>(&forwardOp);
auto rnn_grad_op =
static_cast<operators::RecurrentGradientOp*>(grad_op.get());
const auto& stepnet_op =
*static_cast<const OperatorBase*>(&rnnop.stepnet());
// create stepnet's gradient op
rnn_grad_op->set_stepnet(
BackwardRecursive(stepnet_op, no_grad_names, uniq_id));
} }
if (net->ops_.empty()) { // Current no aux op is added to network if (net->ops_.empty()) { // Current no aux op is added to network
return grad_op; return grad_op;
} }
net->AddOp(grad_op); net->AppendOp(std::move(grad_op));
} }
net->type_ = "@GENERATED_BACKWARD@"; net->SetType("@GENERATED_BACKWARD@");
net->CompleteAddOp(); net->CompleteAddOp();
return net; return std::unique_ptr<OperatorBase>(
static_cast<OperatorBase*>(net.release()));
} }
// See header for comments // See header for comments
std::shared_ptr<OperatorBase> Backward( std::unique_ptr<OperatorBase> Backward(
const OperatorBase& forwardOp, const OperatorBase& forwardOp,
const std::unordered_set<std::string>& no_grad_vars) { const std::unordered_set<std::string>& no_grad_vars) {
std::unordered_set<std::string> no_grad_names; std::unordered_set<std::string> no_grad_names;
......
...@@ -20,7 +20,7 @@ namespace framework { ...@@ -20,7 +20,7 @@ namespace framework {
// Create the backward operator from a forward operator. // Create the backward operator from a forward operator.
// TODO(yuyang18): Add more API reference comment. // TODO(yuyang18): Add more API reference comment.
extern std::shared_ptr<OperatorBase> Backward( extern std::unique_ptr<OperatorBase> Backward(
const OperatorBase& forwardOp, const OperatorBase& forwardOp,
const std::unordered_set<std::string>& no_grad_vars); const std::unordered_set<std::string>& no_grad_vars);
} // namespace framework } // namespace framework
......
...@@ -28,19 +28,13 @@ using OpAttrChecker = framework::OpAttrChecker; ...@@ -28,19 +28,13 @@ using OpAttrChecker = framework::OpAttrChecker;
using Scope = framework::Scope; using Scope = framework::Scope;
using DeviceContext = platform::DeviceContext; using DeviceContext = platform::DeviceContext;
class EmptyOp : public OperatorBase {
public:
void InferShape(const Scope &scope) const override {}
void Run(const Scope &scope, const DeviceContext &dev_ctx) const override {}
};
class RowWiseAddOpMaker : public OpProtoAndCheckerMaker { class RowWiseAddOpMaker : public OpProtoAndCheckerMaker {
public: public:
RowWiseAddOpMaker(OpProto *proto, OpAttrChecker *op_checker) RowWiseAddOpMaker(OpProto *proto, OpAttrChecker *op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) { : OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "Input X of Add").IgnoreGradient(); AddInput("X", "Input X of Add").NotInGradient();
AddInput("b", "Bias of Add").IgnoreGradient(); AddInput("b", "Bias of Add").NotInGradient();
AddOutput("Out", "Out of Add").IgnoreGradient(); AddOutput("Out", "Out of Add").NotInGradient();
AddComment("Add Op"); AddComment("Add Op");
} }
}; };
...@@ -49,8 +43,8 @@ class MulOpMaker : public OpProtoAndCheckerMaker { ...@@ -49,8 +43,8 @@ class MulOpMaker : public OpProtoAndCheckerMaker {
public: public:
MulOpMaker(OpProto *proto, OpAttrChecker *op_checker) MulOpMaker(OpProto *proto, OpAttrChecker *op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) { : OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("A", "A"); AddInput("X", "A");
AddInput("B", "B"); AddInput("Y", "B");
AddOutput("Out", "Out"); AddOutput("Out", "Out");
AddComment("Mul"); AddComment("Mul");
} }
...@@ -61,7 +55,7 @@ class SigmoidOpMaker : public OpProtoAndCheckerMaker { ...@@ -61,7 +55,7 @@ class SigmoidOpMaker : public OpProtoAndCheckerMaker {
SigmoidOpMaker(OpProto *proto, OpAttrChecker *op_checker) SigmoidOpMaker(OpProto *proto, OpAttrChecker *op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) { : OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "X"); AddInput("X", "X");
AddOutput("Y", "Y"); AddOutput("Out", "Y");
AddComment("Sigmoid"); AddComment("Sigmoid");
} }
}; };
...@@ -71,21 +65,25 @@ class NoGradOpMaker : public OpProtoAndCheckerMaker { ...@@ -71,21 +65,25 @@ class NoGradOpMaker : public OpProtoAndCheckerMaker {
NoGradOpMaker(OpProto *proto, OpAttrChecker *op_checker) NoGradOpMaker(OpProto *proto, OpAttrChecker *op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) { : OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "X input"); AddInput("X", "X input");
AddOutput("Y", "Y output"); AddOutput("Out", "Y output");
AddComment("NoGradOp, same input output. no Grad"); AddComment("NoGradOp, same input output. no Grad");
} }
}; };
class FcOp : public operators::NetOp { class FcOp : public operators::NetOp {
public: public:
void Init() override { FcOp(const std::string &type, const VarNameMap &inputs,
AddOp(OpRegistry::CreateOp("mul", {Input("X"), Input("W")}, const VarNameMap &outputs, const AttributeMap &attrs)
{Output("mul_result")}, {})); : NetOp(type, inputs, outputs, attrs) {
auto b_name = Input("b"); AppendOp(OpRegistry::CreateOp("mul",
{{"X", {Input("X")}}, {"Y", {Input("W")}}},
{{"Out", {Output("mul_result")}}}, {}));
auto input_b = Inputs("b");
std::string before_act = "mul_result"; std::string before_act = "mul_result";
if (b_name != kEmptyVarName) { if (input_b.size() != 0) {
AddOp(OpRegistry::CreateOp("rowwise_add", {Output("mul_result"), b_name}, AppendOp(OpRegistry::CreateOp(
{Output("add_result")}, {})); "rowwise_add", {{"X", {Output("mul_result")}}, {"b", {input_b[0]}}},
{{"Out", {Output("add_result")}}}, {}));
before_act = "add_result"; before_act = "add_result";
} else { } else {
auto out_varname = Output("add_result"); auto out_varname = Output("add_result");
...@@ -94,8 +92,8 @@ class FcOp : public operators::NetOp { ...@@ -94,8 +92,8 @@ class FcOp : public operators::NetOp {
} }
} }
AddOp(OpRegistry::CreateOp("sigmoid", {Output(before_act)}, {Output("Out")}, AppendOp(OpRegistry::CreateOp("sigmoid", {{"X", {Output(before_act)}}},
{})); {{"Out", {Output("Out")}}}, {}));
CompleteAddOp(false); CompleteAddOp(false);
} }
}; };
...@@ -107,8 +105,8 @@ class FcOpMaker : public OpProtoAndCheckerMaker { ...@@ -107,8 +105,8 @@ class FcOpMaker : public OpProtoAndCheckerMaker {
AddInput("X", "x"); AddInput("X", "x");
AddInput("W", "w"); AddInput("W", "w");
AddInput("b", "b"); AddInput("b", "b");
AddOutput("mul_result", "").SetTemporary(); AddOutput("mul_result", "").AsIntermediate();
AddOutput("add_result", "").SetTemporary(); AddOutput("add_result", "").AsIntermediate();
AddOutput("Out", ""); AddOutput("Out", "");
AddComment(""); AddComment("");
} }
...@@ -139,7 +137,7 @@ class AddOpMaker : public OpProtoAndCheckerMaker { ...@@ -139,7 +137,7 @@ class AddOpMaker : public OpProtoAndCheckerMaker {
public: public:
AddOpMaker(OpProto *proto, OpAttrChecker *op_checker) AddOpMaker(OpProto *proto, OpAttrChecker *op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) { : OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "x").SetMultiple(); AddInput("X", "x").AsDuplicable();
AddOutput("Y", "y"); AddOutput("Y", "y");
AddComment(""); AddComment("");
} }
...@@ -150,51 +148,48 @@ class AddOpMaker : public OpProtoAndCheckerMaker { ...@@ -150,51 +148,48 @@ class AddOpMaker : public OpProtoAndCheckerMaker {
namespace f = paddle::framework; namespace f = paddle::framework;
namespace ops = paddle::operators; namespace ops = paddle::operators;
using EnforceNotMet = paddle::platform::EnforceNotMet; using EnforceNotMet = paddle::platform::EnforceNotMet;
REGISTER_OP(rowwise_add, f::EmptyOp, f::RowWiseAddOpMaker); REGISTER_OP(rowwise_add, f::NOP, f::RowWiseAddOpMaker, rowwise_add_grad,
REGISTER_GRADIENT_OP(rowwise_add, rowwise_add_grad, f::EmptyOp); f::NOP);
REGISTER_OP(mul, f::EmptyOp, f::MulOpMaker); REGISTER_OP(mul, f::NOP, f::MulOpMaker, mul_grad, f::NOP);
REGISTER_GRADIENT_OP(mul, mul_grad, f::EmptyOp); REGISTER_OP(sigmoid, f::NOP, f::SigmoidOpMaker, sigmoid_grad, f::NOP);
REGISTER_OP(sigmoid, f::EmptyOp, f::SigmoidOpMaker); REGISTER_OP_WITHOUT_GRADIENT(nograd, f::NOP, f::NoGradOpMaker);
REGISTER_GRADIENT_OP(sigmoid, sigmoid_grad, f::EmptyOp); REGISTER_OP_WITHOUT_GRADIENT(fill_zeros_like, f::NOP, f::FillZeroOpMaker);
REGISTER_OP(nograd, f::EmptyOp, f::NoGradOpMaker); REGISTER_OP(add, f::NOP, f::AddOpMaker, add_grad, f::NOP);
REGISTER_OP(fill_zeros_like, f::EmptyOp, f::FillZeroOpMaker); REGISTER_OP_WITHOUT_GRADIENT(fc, f::FcOp, f::FcOpMaker);
REGISTER_OP(add, f::EmptyOp, f::AddOpMaker); REGISTER_OP(many_output_op, f::NOP, f::ManyOutputOpMaker, many_output_op_grad,
REGISTER_GRADIENT_OP(add, add_grad, f::EmptyOp); f::NOP);
REGISTER_OP(fc, f::FcOp, f::FcOpMaker);
REGISTER_OP(many_output_op, f::EmptyOp, f::ManyOutputOpMaker);
REGISTER_GRADIENT_OP(many_output_op, many_output_op_grad, f::EmptyOp);
TEST(Backward, simple_op_grad) { TEST(Backward, simple_op_grad) {
auto fwd = f::OpRegistry::CreateOp("rowwise_add", {"X", "b"}, {"Out"}, {}); auto fwd = f::OpRegistry::CreateOp(
"rowwise_add", {{"X", {"x"}}, {"b", {"b"}}}, {{"Out", {"out"}}}, {});
ASSERT_NE(fwd, nullptr); ASSERT_NE(fwd, nullptr);
auto gop = f::OpRegistry::CreateGradOp(*fwd); auto gop = f::OpRegistry::CreateGradOp(*fwd);
ASSERT_EQ(4UL, gop->inputs_.size()); ASSERT_EQ(1UL, gop->Inputs().size());
ASSERT_EQ(f::kEmptyVarName, gop->inputs_[0]); ASSERT_EQ("rowwise_add_grad", gop->Type());
ASSERT_EQ("rowwise_add_grad", gop->type_); ASSERT_EQ(f::GradVarName("x"), gop->Output(f::GradVarName("X")));
ASSERT_EQ(f::GradVarName("X"), gop->outputs_[0]); ASSERT_EQ(f::GradVarName("b"), gop->Output(f::GradVarName("b")));
ASSERT_EQ(f::GradVarName("b"), gop->outputs_[1]);
ASSERT_EQ(f::GradVarName("X"), gop->Output(f::GradVarName("X")));
} }
TEST(Backward, simple_op_not_need_grad) { TEST(Backward, simple_op_not_need_grad) {
auto fwd = f::OpRegistry::CreateOp("rowwise_add", {"X", "b"}, {"Out"}, {}); auto fwd = f::OpRegistry::CreateOp(
"rowwise_add", {{"X", {"x"}}, {"b", {"b"}}}, {{"Out", {"out"}}}, {});
ASSERT_NE(fwd, nullptr); ASSERT_NE(fwd, nullptr);
auto gop = f::Backward(*fwd, {"X"}); auto gop = f::Backward(*fwd, {"x"});
ASSERT_EQ(std::find(gop->outputs_.begin(), gop->outputs_.end(), ASSERT_EQ(gop->Output(f::GradVarName("X")), f::kEmptyVarName);
f::GradVarName("X")),
gop->outputs_.end());
auto no_input_gop = f::Backward(*fwd, {"X", "b"}); auto no_input_gop = f::Backward(*fwd, {"x", "b"});
ASSERT_NE(no_input_gop, nullptr); ASSERT_NE(no_input_gop, nullptr);
ASSERT_TRUE(no_input_gop->IsNetOp()); ASSERT_TRUE(no_input_gop->IsNetOp());
ASSERT_EQ(0UL, ASSERT_EQ(0UL, static_cast<ops::NetOp *>(no_input_gop.get())->ops_.size());
std::static_pointer_cast<ops::NetOp>(no_input_gop)->ops_.size());
} }
TEST(Backward, net_fc_backward_normal) { TEST(Backward, net_fc_backward_normal) {
std::shared_ptr<f::OperatorBase> fwd = f::OpRegistry::CreateOp( std::shared_ptr<f::OperatorBase> fwd =
"fc", {"X", "w", "b"}, {"mul_result", "add_result", "out"}, {}); f::OpRegistry::CreateOp("fc", {{"X", {"x"}}, {"W", {"w"}}, {"b", {"b"}}},
{{"mul_result", {"mul_res"}},
{"add_result", {"add_re"}},
{"Out", {"out"}}},
{});
ASSERT_NE(fwd, nullptr); ASSERT_NE(fwd, nullptr);
std::shared_ptr<f::OperatorBase> gop = f::Backward(*fwd, {}); std::shared_ptr<f::OperatorBase> gop = f::Backward(*fwd, {});
ASSERT_TRUE(gop->IsNetOp()); ASSERT_TRUE(gop->IsNetOp());
...@@ -205,19 +200,22 @@ TEST(Backward, net_fc_backward_normal) { ...@@ -205,19 +200,22 @@ TEST(Backward, net_fc_backward_normal) {
ASSERT_EQ(3UL, net->ops_.size()); ASSERT_EQ(3UL, net->ops_.size());
f::OperatorBase &d_sigmoid = *net->ops_[0]; f::OperatorBase &d_sigmoid = *net->ops_[0];
ASSERT_EQ("sigmoid_grad", d_sigmoid.type_); ASSERT_EQ("sigmoid_grad", d_sigmoid.Type());
f::OperatorBase &d_add = *net->ops_[1]; f::OperatorBase &d_add = *net->ops_[1];
ASSERT_EQ("rowwise_add_grad", d_add.type_); ASSERT_EQ("rowwise_add_grad", d_add.Type());
f::OperatorBase &d_mul = *net->ops_[2]; f::OperatorBase &d_mul = *net->ops_[2];
ASSERT_EQ("mul_grad", d_mul.type_); ASSERT_EQ("mul_grad", d_mul.Type());
} }
TEST(Backward, net_fc_backward_not_have_b) { TEST(Backward, net_fc_backward_not_have_b) {
std::shared_ptr<f::OperatorBase> fwd = std::shared_ptr<f::OperatorBase> fwd =
f::OpRegistry::CreateOp("fc", {"X", "w", f::kEmptyVarName}, f::OpRegistry::CreateOp("fc", {{"X", {"x"}}, {"W", {"w"}}, {"b", {}}},
{"mul_result", "add_result", "tmp"}, {}); {{"mul_result", {"mul_res"}},
{"add_result", {"add_res"}},
{"Out", {"tmp"}}},
{});
ASSERT_NE(fwd, nullptr); ASSERT_NE(fwd, nullptr);
std::shared_ptr<f::OperatorBase> gop = f::Backward(*fwd, {}); std::shared_ptr<f::OperatorBase> gop = f::Backward(*fwd, {});
ASSERT_TRUE(gop->IsNetOp()); ASSERT_TRUE(gop->IsNetOp());
...@@ -228,96 +226,113 @@ TEST(Backward, net_fc_backward_not_have_b) { ...@@ -228,96 +226,113 @@ TEST(Backward, net_fc_backward_not_have_b) {
ASSERT_EQ(2UL, net->ops_.size()); ASSERT_EQ(2UL, net->ops_.size());
f::OperatorBase &d_sigmoid = *net->ops_[0]; f::OperatorBase &d_sigmoid = *net->ops_[0];
ASSERT_EQ("sigmoid_grad", d_sigmoid.type_); ASSERT_EQ("sigmoid_grad", d_sigmoid.Type());
f::OperatorBase &d_mul = *net->ops_[1]; f::OperatorBase &d_mul = *net->ops_[1];
ASSERT_EQ("mul_grad", d_mul.type_); ASSERT_EQ("mul_grad", d_mul.Type());
} }
TEST(Backward, net_input_of_network_not_need_grad) { TEST(Backward, net_input_of_network_not_need_grad) {
ops::NetOp net; ops::NetOp net;
net.AddOp(f::OpRegistry::CreateOp("fc", {"X", "W1", "b1"}, net.AppendOp(f::OpRegistry::CreateOp(
{"mul_tmp_0", "add_tmp_0", "hidden0"}, {})); "fc", {{"X", {"x"}}, {"W", {"W1"}}, {"b", {"b1"}}},
net.AddOp(f::OpRegistry::CreateOp("fc", {"hidden0", "W2", "b2"}, {{"mul_result", {"mul_tmp_0"}},
{"mul_tmp_1", "add_tmp_1", "hidden1"}, {})); {"add_result", {"add_tmp_0"}},
{"Out", {"hidden0"}}},
{}));
net.AppendOp(f::OpRegistry::CreateOp(
"fc", {{"X", {"hidden0"}}, {"W", {"W2"}}, {"b", {"b2"}}},
{{"mul_result", {"mul_tmp_1"}},
{"add_result", {"add_tmp_1"}},
{"Out", {"hidden1"}}},
{}));
net.CompleteAddOp(); net.CompleteAddOp();
auto bwd = Backward(net, {"X"}); // X@GRAD is not need. auto bwd = Backward(net, {"x"}); // x@GRAD is not need.
ASSERT_TRUE(bwd->IsNetOp()); ASSERT_TRUE(bwd->IsNetOp());
auto bwd_net = static_cast<ops::NetOp *>(bwd.get()); auto bwd_net = static_cast<ops::NetOp *>(bwd.get());
std::unordered_set<std::string> all_output = std::unordered_set<std::string>( auto output_vars = bwd_net->OutputVars(true);
bwd_net->outputs_.begin(), bwd_net->outputs_.end()); std::unordered_set<std::string> all_outputs =
all_output.erase(f::kEmptyVarName); std::unordered_set<std::string>(output_vars.begin(), output_vars.end());
all_outputs.erase(f::kEmptyVarName);
for (auto &out : {"W1", "b1", "hidden0", "W2", "b2"}) { for (auto &out : {"W1", "b1", "hidden0", "W2", "b2"}) {
ASSERT_NE(all_output.find(f::GradVarName(out)), all_output.end()); ASSERT_NE(all_outputs.find(f::GradVarName(out)), all_outputs.end());
} }
// Not Generated X // Not Generated X
ASSERT_EQ(all_output.find(f::GradVarName("X")), all_output.end()); ASSERT_EQ(all_outputs.find(f::GradVarName("X")), all_outputs.end());
ASSERT_EQ(2UL, bwd_net->ops_.size()); ASSERT_EQ(2UL, bwd_net->ops_.size());
ASSERT_TRUE(bwd_net->ops_[1]->IsNetOp()); ASSERT_TRUE(bwd_net->ops_[1]->IsNetOp());
auto first_fc_grad = static_cast<ops::NetOp *>(bwd_net->ops_[1].get()); auto first_fc_grad = static_cast<ops::NetOp *>(bwd_net->ops_[1].get());
ASSERT_EQ(3UL, first_fc_grad->ops_.size()); ASSERT_EQ(3UL, first_fc_grad->ops_.size());
ASSERT_EQ(f::kEmptyVarName, ASSERT_EQ(f::kEmptyVarName,
first_fc_grad->ops_[2]->Output(f::GradVarName("A"))); first_fc_grad->ops_[2]->Output(f::GradVarName("X")));
} }
TEST(Backward, net_shared_weight) { TEST(Backward, net_shared_weight) {
ops::NetOp net; ops::NetOp net;
net.AddOp(f::OpRegistry::CreateOp("mul", {"X", "W"}, {"Out"}, {})); net.AppendOp(f::OpRegistry::CreateOp("mul", {{"X", {"x"}}, {"Y", {"w"}}},
net.AddOp(f::OpRegistry::CreateOp("mul", {"Out", "W"}, {"FinalOut"}, {})); {{"Out", {"out"}}}, {}));
net.AppendOp(f::OpRegistry::CreateOp("mul", {{"X", {"out"}}, {"Y", {"w"}}},
{{"Out", {"FinalOut"}}}, {}));
net.CompleteAddOp(); net.CompleteAddOp();
auto bwd = f::Backward(net, {}); auto bwd = f::Backward(net, {});
ASSERT_TRUE(bwd->IsNetOp()); ASSERT_TRUE(bwd->IsNetOp());
auto bwd_net = static_cast<ops::NetOp *>(bwd.get()); auto bwd_net = static_cast<ops::NetOp *>(bwd.get());
ASSERT_EQ(3UL, bwd_net->ops_.size()); ASSERT_EQ(3UL, bwd_net->ops_.size());
ASSERT_EQ("add", bwd_net->ops_[2]->type_); ASSERT_EQ("add", bwd_net->ops_[2]->Type());
} }
TEST(Backward, op_register_grad_not_for_network) { TEST(Backward, op_register_grad_not_for_network) {
auto fwd = f::OpRegistry::CreateOp( auto fwd =
"fc", {"X", "W", "b"}, {"mul_out", "add_out", "out1"}, f::OpRegistry::CreateOp("fc", {{"X", {"x"}}, {"W", {"w"}}, {"b", {"b"}}},
{{"mul_result", {"mul_out"}},
{"add_result", {"add_out"}},
{"Out", {"out1"}}},
{{"temporary_index", std::vector<int>{0, 1}}}); {{"temporary_index", std::vector<int>{0, 1}}});
ASSERT_THROW(f::OpRegistry::CreateGradOp(*fwd), EnforceNotMet); ASSERT_THROW(f::OpRegistry::CreateGradOp(*fwd), EnforceNotMet);
} }
TEST(Backward, op_all_input_are_not_need) { TEST(Backward, op_all_input_are_not_need) {
auto fwd = f::OpRegistry::CreateOp("rowwise_add", {"X", "b"}, {"Out"}, {}); auto fwd = f::OpRegistry::CreateOp(
auto backward = f::Backward(*fwd, {"X", "b"}); "rowwise_add", {{"X", {"x"}}, {"b", {"b"}}}, {{"Out", {"out"}}}, {});
auto backward = f::Backward(*fwd, {"x", "b"});
ASSERT_TRUE(backward->IsNetOp()); ASSERT_TRUE(backward->IsNetOp());
auto net = static_cast<ops::NetOp *>(backward.get()); auto net = static_cast<ops::NetOp *>(backward.get());
ASSERT_TRUE(net->ops_.empty()); ASSERT_TRUE(net->ops_.empty());
} }
TEST(Backward, op_all_output_are_not_need) { TEST(Backward, op_all_output_are_not_need) {
auto fwd = f::OpRegistry::CreateOp("rowwise_add", {"X", "b"}, {"Out"}, {}); auto fwd = f::OpRegistry::CreateOp(
auto backward = f::Backward(*fwd, {"Out"}); "rowwise_add", {{"X", {"x"}}, {"b", {"b"}}}, {{"Out", {"out"}}}, {});
auto backward = f::Backward(*fwd, {"out"});
ASSERT_TRUE(backward->IsNetOp()); ASSERT_TRUE(backward->IsNetOp());
auto net = static_cast<ops::NetOp *>(backward.get()); auto net = static_cast<ops::NetOp *>(backward.get());
ASSERT_TRUE(net->ops_.empty()); ASSERT_TRUE(net->ops_.empty());
} }
TEST(Backward, op_part_of_output_are_not_need) { TEST(Backward, op_part_of_output_are_not_need) {
auto fwd = f::OpRegistry::CreateOp("many_output_op", {"X"}, {"Y", "Z"}, {}); auto fwd = f::OpRegistry::CreateOp("many_output_op", {{"x", {"X"}}},
{{"y", {"Y"}}, {"z", {"Z"}}}, {});
auto backward = f::Backward(*fwd, {"Z"}); auto backward = f::Backward(*fwd, {"Z"});
ASSERT_TRUE(backward->IsNetOp()); ASSERT_TRUE(backward->IsNetOp());
auto net = static_cast<ops::NetOp *>(backward.get()); auto net = static_cast<ops::NetOp *>(backward.get());
ASSERT_EQ(net->ops_.size(), 2UL); ASSERT_EQ(net->ops_.size(), 2UL);
auto &fill_zero = *net->ops_[0]; auto &fill_zero = *net->ops_[0];
ASSERT_EQ("fill_zeros_like", fill_zero.type_); ASSERT_EQ("fill_zeros_like", fill_zero.Type());
ASSERT_EQ(1UL, fill_zero.inputs_.size()); ASSERT_EQ(1UL, fill_zero.Inputs("Src").size());
ASSERT_EQ("Z", fill_zero.inputs_[0]); ASSERT_EQ("Z", fill_zero.Input("Src"));
ASSERT_EQ(1UL, fill_zero.outputs_.size()); ASSERT_EQ(1UL, fill_zero.Outputs("Dst").size());
ASSERT_EQ(std::string("Z") + f::kZeroVarSuffix, fill_zero.outputs_[0]); ASSERT_EQ(std::string("Z") + f::kZeroVarSuffix, fill_zero.Output("Dst"));
auto &d_many_out = *net->ops_[1]; auto &d_many_out = *net->ops_[1];
ASSERT_EQ("many_output_op_grad", d_many_out.type_); ASSERT_EQ("many_output_op_grad", d_many_out.Type());
ASSERT_EQ(1UL + 2UL + 2UL, d_many_out.inputs_.size()); // I/O/OG ASSERT_EQ(1UL + 2UL + 2UL, d_many_out.Inputs().size()); // I/O/OG
ASSERT_EQ(std::string("Z") + f::kZeroVarSuffix, ASSERT_EQ(std::string("Z") + f::kZeroVarSuffix,
d_many_out.Input(f::GradVarName("z"))); d_many_out.Input(f::GradVarName("z")));
ASSERT_EQ(f::GradVarName("Y"), d_many_out.Input(f::GradVarName("y"))); ASSERT_EQ(f::GradVarName("Y"), d_many_out.Input(f::GradVarName("y")));
...@@ -325,44 +340,62 @@ TEST(Backward, op_part_of_output_are_not_need) { ...@@ -325,44 +340,62 @@ TEST(Backward, op_part_of_output_are_not_need) {
} }
TEST(Backward, op_part_of_input_are_not_need) { TEST(Backward, op_part_of_input_are_not_need) {
auto fwd = f::OpRegistry::CreateOp("mul", {"a", "b"}, {"out"}, {}); auto fwd = f::OpRegistry::CreateOp("mul", {{"X", {"a"}}, {"Y", {"b"}}},
{{"Out", {"out"}}}, {});
auto backward = f::Backward(*fwd, {"a"}); auto backward = f::Backward(*fwd, {"a"});
auto &grad_mul = *backward; auto &grad_mul = *backward;
ASSERT_EQ(grad_mul.type_, "mul_grad"); ASSERT_EQ(grad_mul.Type(), "mul_grad");
ASSERT_EQ(grad_mul.inputs_.size(), 2UL + 1UL + 1UL); ASSERT_EQ(grad_mul.Inputs().size(), 2UL + 1UL + 1UL);
ASSERT_EQ(grad_mul.outputs_.size(), 2UL); ASSERT_EQ(grad_mul.Outputs().size(), 2UL);
ASSERT_EQ(grad_mul.Output(f::GradVarName("A")), f::kEmptyVarName); ASSERT_EQ(grad_mul.Output(f::GradVarName("X")), f::kEmptyVarName);
ASSERT_EQ(grad_mul.Output(f::GradVarName("B")), f::GradVarName("b")); ASSERT_EQ(grad_mul.Output(f::GradVarName("Y")), f::GradVarName("b"));
ASSERT_EQ(grad_mul.Input(f::GradVarName("Out")), f::GradVarName("out")); ASSERT_EQ(grad_mul.Input(f::GradVarName("Out")), f::GradVarName("out"));
ASSERT_EQ(grad_mul.Input("A"), "a"); ASSERT_EQ(grad_mul.Input("X"), "a");
ASSERT_EQ(grad_mul.Input("B"), "b"); ASSERT_EQ(grad_mul.Input("Y"), "b");
ASSERT_EQ(grad_mul.Input("Out"), "out"); ASSERT_EQ(grad_mul.Input("Out"), "out");
} }
TEST(Backward, linear_net_intermediate_variable_has_no_grad) { TEST(Backward, linear_net_intermediate_variable_has_no_grad) {
ops::NetOp net; ops::NetOp net;
net.AddOp(f::OpRegistry::CreateOp("fc", {"x1", "w1", "b1"}, net.AppendOp(f::OpRegistry::CreateOp(
{"mul_out1", "add_out1", "out1"}, {})); "fc", {{"X", {"x1"}}, {"W", {"w1"}}, {"b", {"b1"}}},
net.AddOp(f::OpRegistry::CreateOp("fc", {"out1", "w2", "b2"}, {{"mul_result", {"mul_out1"}},
{"mul_out2", "tmp_out2", "out2"}, {})); {"add_result", {"add_out1"}},
net.AddOp(f::OpRegistry::CreateOp("fc", {"out2", "w3", "b3"}, {"Out", {"out1"}}},
{"mul_out3", "tmp_out3", "out3"}, {})); {}));
net.AppendOp(f::OpRegistry::CreateOp(
"fc", {{"X", {"out1"}}, {"W", {"w2"}}, {"b", {"b2"}}},
{{"mul_result", {"mul_out2"}},
{"add_result", {"tmp_out2"}},
{"Out", {"out2"}}},
{}));
net.AppendOp(f::OpRegistry::CreateOp(
"fc", {{"X", {"out2"}}, {"W", {"w3"}}, {"b", {"b3"}}},
{{"mul_result", {"mul_out3"}},
{"add_result", {"tmp_out3"}},
{"Out", {"out3"}}},
{}));
net.CompleteAddOp(); net.CompleteAddOp();
auto backward = f::Backward(net, {"mul_out2", "tmp_out2", "out2"}); auto backward = f::Backward(net, {"mul_out2", "tmp_out2", "out2"});
ASSERT_TRUE(backward->IsNetOp()); ASSERT_TRUE(backward->IsNetOp());
auto bwd_net = static_cast<ops::NetOp *>(backward.get()); auto bwd_net = static_cast<ops::NetOp *>(backward.get());
ASSERT_EQ(bwd_net->ops_.size(), 3UL); ASSERT_EQ(bwd_net->ops_.size(), 3UL);
auto &grad_fc = *bwd_net->ops_[0]; auto &grad_fc = *bwd_net->ops_[0];
EXPECT_EQ(grad_fc.inputs_.size(),
3UL /* external input number */ const char *all = paddle::operators::NetOp::kAll;
EXPECT_EQ(grad_fc.Inputs(all).size(),
2UL /* external input number */
+ 1UL /* external output number*/ + 1UL /* external output number*/
+ 1UL /* number of gradient of external output*/ + 1UL /* number of gradient of external output*/
+ 2U /* internal variable number*/); + 2U /* internal variable number*/);
EXPECT_EQ(grad_fc.outputs_.size(), 2UL /* input number of mul*/ EXPECT_EQ(grad_fc.Outputs(all).size(),
+ 2UL /* input number of rowwise_add */ 2UL /* input number of mul*/
+ 2UL /* input number of rowwise_add
*/
+ 1UL /* input number of sigmod */); + 1UL /* input number of sigmod */);
EXPECT_EQ(bwd_net->ops_[1]->inputs_.size(), 0UL); EXPECT_EQ(bwd_net->ops_[1]->Inputs(all).size(), 0UL);
EXPECT_EQ(bwd_net->ops_[1]->outputs_.size(), 0UL); EXPECT_EQ(bwd_net->ops_[1]->Outputs(all).size(), 0UL);
EXPECT_EQ(bwd_net->ops_[2]->inputs_.size(), 0UL); EXPECT_EQ(bwd_net->ops_[2]->Inputs(all).size(), 0UL);
EXPECT_EQ(bwd_net->ops_[2]->outputs_.size(), 0UL); EXPECT_EQ(bwd_net->ops_[2]->Outputs(all).size(), 0UL);
} }
...@@ -283,6 +283,5 @@ std::ostream& operator<<(std::ostream& os, const DDim& ddim) { ...@@ -283,6 +283,5 @@ std::ostream& operator<<(std::ostream& os, const DDim& ddim) {
DDim::DDim(std::initializer_list<int> init_list) { DDim::DDim(std::initializer_list<int> init_list) {
*this = make_ddim(init_list); *this = make_ddim(init_list);
} }
} // namespace framework } // namespace framework
} // namespace paddle } // namespace paddle
...@@ -20,7 +20,6 @@ limitations under the License. */ ...@@ -20,7 +20,6 @@ limitations under the License. */
#include "paddle/framework/dim.h" #include "paddle/framework/dim.h"
#include "paddle/platform/enforce.h" #include "paddle/platform/enforce.h"
#include "paddle/platform/variant.h" #include "paddle/platform/variant.h"
#include "unsupported/Eigen/CXX11/Tensor"
namespace paddle { namespace paddle {
namespace framework { namespace framework {
......
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/framework/lod_tensor.h"
#include <memory>
namespace paddle {
namespace framework {
namespace details {
using LOD = LODTensor::LOD;
std::shared_ptr<LOD> SliceLOD(const LOD &lod, size_t level_begin,
size_t level_end) {
auto new_lod = std::make_shared<LOD>();
new_lod->reserve(level_end - level_begin);
for (size_t i = level_begin; i < level_end; i++) {
new_lod->emplace_back(lod[i]);
}
return new_lod;
}
std::shared_ptr<LOD> SliceLOD(const LOD &lod, size_t level, size_t elem_begin,
size_t elem_end, bool tensor_shared) {
// slice the lod.
auto new_lod = std::make_shared<LOD>();
new_lod->reserve(lod.size() - level);
auto start = lod.at(level)[elem_begin];
auto end = lod.at(level)[elem_end];
for (auto it = lod.begin() + level; it != lod.end(); it++) {
auto it_begin = std::find(it->begin(), it->end(), start);
auto it_end = std::find(it_begin, it->end(), end);
PADDLE_ENFORCE(it_begin != it->end(), "error in parsing lod info");
PADDLE_ENFORCE(it_end != it->end(), "error in parsing lod info");
new_lod->emplace_back(it_begin, it_end + 1);
if (!tensor_shared) {
// reset offset if tensor is copyed and sliced.
std::transform(new_lod->back().begin(), new_lod->back().end(),
new_lod->back().begin(),
[start](int v) { return v - start; });
PADDLE_ENFORCE(new_lod->back().front() == 0, "error in slice LOD");
}
}
return new_lod;
}
} // namespace details
} // namespace framework
} // namespace paddle
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <memory>
namespace paddle {
namespace framework {
namespace details {
/*
* Slice levels from LOD.
*
* @lod: LOD to slice.
* @level_begin: level to begin slice.
* @level_end: level to end slice.
*/
std::shared_ptr<LODTensor::LOD> SliceLOD(const LODTensor::LOD &lod,
size_t level_begin, size_t level_end);
/*
* Slice elements from a level of LOD.
*
* @lod: LOD to slice.
* @level: which level to slice.
* @elem_begin: element's index to begin slice.
* @elem_end: element's index to end slice.
*/
std::shared_ptr<LODTensor::LOD> SliceLOD(const LODTensor::LOD &lod,
size_t level, size_t elem_begin,
size_t elem_end, bool tensor_shared);
} // namespace details
} // namespace framework
} // namespace paddle
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
syntax = "proto2";
package paddle.framework;
enum AttrType {
INT = 0;
FLOAT = 1;
STRING = 2;
INTS = 3;
FLOATS = 4;
STRINGS = 5;
}
// OpDesc describes an instance of a C++ framework::OperatorBase
// derived class type.
message OpDesc {
message Attr {
required string name = 1;
required AttrType type = 2;
optional int32 i = 3;
optional float f = 4;
optional string s = 5;
repeated int32 ints = 6;
repeated float floats = 7;
repeated string strings = 8;
};
message Var {
required string parameter = 1;
repeated string arguments = 2;
};
required string type = 3;
repeated Var inputs = 1;
repeated Var outputs = 2;
repeated Attr attrs = 4;
};
// OpProto describes a C++ framework::OperatorBase derived class.
message OpProto {
// VarProto describes the C++ type framework::Variable.
message Var {
required string name = 1;
required string comment = 2;
optional bool duplicable = 3 [ default = false ];
optional bool intermediate = 4 [ default = false ];
optional bool not_in_gradient = 5 [ default = false ];
}
// AttrProto describes the C++ type Attribute.
message Attr {
required string name = 1;
required AttrType type = 2;
required string comment = 3;
// If that attribute is generated, it means the Paddle third
// language binding has responsibility to fill that
// attribute. End-User should not set that attribute.
optional bool generated = 4 [ default = false ];
}
required string type = 1;
repeated Var inputs = 2;
repeated Var outputs = 3;
repeated Attr attrs = 4;
required string comment = 5;
}
...@@ -13,90 +13,53 @@ express or implied. See the License for the specific language governing ...@@ -13,90 +13,53 @@ express or implied. See the License for the specific language governing
permissions and limitations under the License. */ permissions and limitations under the License. */
#include "paddle/framework/grad_op_builder.h" #include "paddle/framework/grad_op_builder.h"
#include "paddle/framework/op_proto.pb.h"
#include "paddle/framework/op_registry.h" #include "paddle/framework/op_registry.h"
namespace paddle { namespace paddle {
namespace framework { namespace framework {
class OpRegistry;
using VarIndexMap = std::unordered_map<std::string, int>;
enum class OpArgType { IN, OUT }; enum class OpArgType { IN, OUT };
static std::vector<int>* GetOpFormat(OperatorBase* op, const OpArgType& type) { static void TransOpArg(const OperatorBase* src_op, const OpArgType& src_type,
std::string key = type == OpArgType::IN ? "input_format" : "output_format"; bool is_grad, OperatorBase::VarNameMap* vars) {
return op->attrs_.count(key) const auto& src_inout =
? &boost::get<std::vector<int>>(op->attrs_.at(key)) src_type == OpArgType::IN ? src_op->Inputs() : src_op->Outputs();
: nullptr; auto& dst_inout = *vars;
} const OpProto* proto = OpRegistry::op_info_map().at(src_op->Type()).proto_;
static const std::vector<int>* GetOpFormat(const OperatorBase* op,
const OpArgType& type) {
std::string key = type == OpArgType::IN ? "input_format" : "output_format";
return op->attrs_.count(key)
? &boost::get<std::vector<int>>(op->attrs_.at(key))
: nullptr;
}
static void TransOpArg(const OperatorBase* src_op, OperatorBase* dst_op,
const OpArgType& src_type, const OpArgType& dst_type,
int& idx, bool is_grad) {
const std::vector<std::string>& src_inout =
src_type == OpArgType::IN ? src_op->inputs_ : src_op->outputs_;
const std::vector<int>* src_format = GetOpFormat(src_op, src_type);
std::vector<std::string>& dst_inout =
dst_type == OpArgType::IN ? dst_op->inputs_ : dst_op->outputs_;
std::vector<int>* dst_format = GetOpFormat(dst_op, dst_type);
const OpProto& proto = OpRegistry::protos().at(src_op->type_);
const auto& src_arg_list = const auto& src_arg_list =
src_type == OpArgType::IN ? proto.inputs() : proto.outputs(); src_type == OpArgType::IN ? proto->inputs() : proto->outputs();
for (const auto& arg : src_arg_list) { for (const auto& arg : src_arg_list) {
std::string src_name = arg.name(); if (arg.not_in_gradient() && !is_grad) continue;
std::string dst_name = is_grad ? src_name + kGradVarSuffix : src_name; const std::string src_name = arg.name();
(*dst_op->in_out_idxs_)[dst_name] = idx++; std::string dst_name = is_grad ? GradVarName(src_name) : src_name;
int src_arg_idx = src_op->in_out_idxs_->at(src_name); dst_inout[dst_name].reserve(src_inout.at(src_name).size());
int src_begin = for (auto& var_name : src_inout.at(src_name)) {
src_format == nullptr ? src_arg_idx : src_format->at(src_arg_idx); std::string s = is_grad ? GradVarName(var_name) : var_name;
int src_end = src_format == nullptr ? src_arg_idx + 1 dst_inout[dst_name].emplace_back(s);
: src_format->at(src_arg_idx + 1);
for (int i = src_begin; i < src_end; ++i) {
std::string s =
is_grad ? src_inout[i] + kGradVarSuffix
: (arg.ignore_gradient() ? kEmptyVarName : src_inout[i]);
dst_inout.emplace_back(s);
}
if (dst_format != nullptr) {
dst_format->push_back(dst_inout.size());
} }
} }
} }
OperatorBase* BuildGradOp(const OperatorBase* op) { OperatorBase* BuildGradOp(const OperatorBase* op) {
std::string grad_op_type = OpRegistry::grad_ops().at(op->type_); auto it = OpRegistry::op_info_map().find(op->Type());
OperatorBase* grad_op = OpRegistry::op_creators().at(grad_op_type)(); PADDLE_ENFORCE(it != OpRegistry::op_info_map().end(),
grad_op->type_ = grad_op_type; "'%s' has not been registered.", op->Type());
grad_op->attrs_ = op->attrs_; PADDLE_ENFORCE(it->second.proto_ != nullptr, "'%s' has no OpProto.",
grad_op->attrs_.erase("input_format"); op->Type());
grad_op->attrs_.erase("output_format"); std::string grad_op_type = it->second.grad_op_type_;
if (GetOpFormat(op, OpArgType::IN) != nullptr) { PADDLE_ENFORCE(!grad_op_type.empty(), "'%s' has no gradient operator.",
grad_op->attrs_["output_format"] = std::vector<int>({0}); op->Type());
}
if (GetOpFormat(op, OpArgType::IN) != nullptr || OperatorBase::VarNameMap inputs;
GetOpFormat(op, OpArgType::OUT) != nullptr) { OperatorBase::VarNameMap outputs;
grad_op->attrs_["input_format"] = std::vector<int>({0}); TransOpArg(op, OpArgType::IN, false, &inputs); // I
} TransOpArg(op, OpArgType::OUT, false, &inputs); // O
grad_op->in_out_idxs_.reset(new VarIndexMap()); TransOpArg(op, OpArgType::OUT, true, &inputs); // OG
int in_idx = 0; TransOpArg(op, OpArgType::IN, true, &outputs); // IG
int out_idx = 0;
TransOpArg(op, grad_op, OpArgType::IN, OpArgType::IN, in_idx, false); // I it = OpRegistry::op_info_map().find(grad_op_type);
TransOpArg(op, grad_op, OpArgType::OUT, OpArgType::IN, in_idx, false); // G PADDLE_ENFORCE(it != OpRegistry::op_info_map().end(),
TransOpArg(op, grad_op, OpArgType::OUT, OpArgType::IN, in_idx, true); // OG "'%s' has not been registered.", grad_op_type);
TransOpArg(op, grad_op, OpArgType::IN, OpArgType::OUT, out_idx, true); // IG return it->second.creator_(grad_op_type, inputs, outputs, op->Attrs());
return grad_op;
} }
} // namespace framework } // namespace framework
......
...@@ -8,22 +8,15 @@ USE_OP(add_two); ...@@ -8,22 +8,15 @@ USE_OP(add_two);
namespace paddle { namespace paddle {
namespace framework { namespace framework {
class NOP : public OperatorBase {
public:
void InferShape(const Scope &scope) const override {}
void Run(const Scope &scope,
const platform::DeviceContext &dev_ctx) const override {}
};
class MutiInOutOpMaker : public OpProtoAndCheckerMaker { class MutiInOutOpMaker : public OpProtoAndCheckerMaker {
public: public:
MutiInOutOpMaker(OpProto *proto, OpAttrChecker *op_checker) MutiInOutOpMaker(OpProto *proto, OpAttrChecker *op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) { : OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("In1", "a single input"); AddInput("In1", "a single input");
AddInput("In2_mult", "a multiple input").SetMultiple(); AddInput("In2_mult", "a multiple input").AsDuplicable();
AddInput("In3", "another single input"); AddInput("In3", "another single input");
AddOutput("Out1", "a single output"); AddOutput("Out1", "a single output");
AddOutput("Out2_mult", "a multiple output").SetMultiple(); AddOutput("Out2_mult", "a multiple output").AsDuplicable();
AddComment("test op with multiple inputs and outputs"); AddComment("test op with multiple inputs and outputs");
} }
}; };
...@@ -33,10 +26,10 @@ class IOIgnoredOpMaker : public OpProtoAndCheckerMaker { ...@@ -33,10 +26,10 @@ class IOIgnoredOpMaker : public OpProtoAndCheckerMaker {
IOIgnoredOpMaker(OpProto *proto, OpAttrChecker *op_checker) IOIgnoredOpMaker(OpProto *proto, OpAttrChecker *op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) { : OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("In1", "a single input"); AddInput("In1", "a single input");
AddInput("In2_mult", "a multiple input").SetMultiple().IgnoreGradient(); AddInput("In2_mult", "a multiple input").AsDuplicable().NotInGradient();
AddInput("In3_mult", "another multiple input").SetMultiple(); AddInput("In3_mult", "another multiple input").AsDuplicable();
AddOutput("Out1_mult", "a multiple output").SetMultiple(); AddOutput("Out1_mult", "a multiple output").AsDuplicable();
AddOutput("Out2", "a single output").IgnoreGradient(); AddOutput("Out2", "a single output").NotInGradient();
AddComment("op with inputs and outputs ignored in gradient calculating"); AddComment("op with inputs and outputs ignored in gradient calculating");
} }
}; };
...@@ -47,35 +40,33 @@ class IOIgnoredOpMaker : public OpProtoAndCheckerMaker { ...@@ -47,35 +40,33 @@ class IOIgnoredOpMaker : public OpProtoAndCheckerMaker {
namespace f = paddle::framework; namespace f = paddle::framework;
TEST(GradOpBuilder, AddTwo) { TEST(GradOpBuilder, AddTwo) {
std::shared_ptr<f::OperatorBase> add_op( std::shared_ptr<f::OperatorBase> add_op(f::OpRegistry::CreateOp(
f::OpRegistry::CreateOp("add_two", {"x", "y"}, {"out"}, {})); "add_two", {{"X", {"x"}}, {"Y", {"y"}}}, {{"Out", {"out"}}}, {}));
std::shared_ptr<f::OperatorBase> grad_add_op = std::shared_ptr<f::OperatorBase> grad_add_op =
f::OpRegistry::CreateGradOp(*add_op); f::OpRegistry::CreateGradOp(*add_op);
EXPECT_EQ(static_cast<int>(grad_add_op->inputs_.size()), 4); EXPECT_EQ(grad_add_op->Inputs().size(), 4UL);
EXPECT_EQ(static_cast<int>(grad_add_op->outputs_.size()), 2); EXPECT_EQ(grad_add_op->Outputs().size(), 2UL);
EXPECT_EQ(grad_add_op->Input("X"), "x"); EXPECT_EQ(grad_add_op->Input("X"), "x");
EXPECT_EQ(grad_add_op->Input("Y"), "y"); EXPECT_EQ(grad_add_op->Input("Y"), "y");
EXPECT_EQ(grad_add_op->Input("Out"), "out"); EXPECT_EQ(grad_add_op->Input("Out"), "out");
EXPECT_EQ(grad_add_op->Input("Out@GRAD"), "out@GRAD"); EXPECT_EQ(grad_add_op->Input(f::GradVarName("Out")), f::GradVarName("out"));
EXPECT_EQ(grad_add_op->Output("X@GRAD"), "x@GRAD"); EXPECT_EQ(grad_add_op->Output(f::GradVarName("X")), f::GradVarName("x"));
EXPECT_EQ(grad_add_op->Output("Y@GRAD"), "y@GRAD"); EXPECT_EQ(grad_add_op->Output(f::GradVarName("Y")), f::GradVarName("y"));
} }
REGISTER_OP(mult_io, f::NOP, f::MutiInOutOpMaker); REGISTER_OP(mult_io, f::NOP, f::MutiInOutOpMaker, mult_io_grad, f::NOP);
REGISTER_GRADIENT_OP(mult_io, mult_io_grad, f::NOP); REGISTER_OP(io_ignored, f::NOP, f::IOIgnoredOpMaker, io_ignored_grad, f::NOP);
REGISTER_OP(io_ignored, f::NOP, f::IOIgnoredOpMaker);
REGISTER_GRADIENT_OP(io_ignored, io_ignored_grad, f::NOP);
TEST(GradOpBuilder, MutiInOut) { TEST(GradOpBuilder, MutiInOut) {
f::AttributeMap attrs{{"input_format", std::vector<int>{0, 1, 4, 5}},
{"output_format", std::vector<int>{0, 1, 3}}};
std::shared_ptr<f::OperatorBase> test_op(f::OpRegistry::CreateOp( std::shared_ptr<f::OperatorBase> test_op(f::OpRegistry::CreateOp(
"mult_io", {"in1", "in2_1", "in2_2", "in2_3", "in3"}, "mult_io", {{"In1", {"in1"}},
{"out1", "out2_1", "out2_2"}, attrs)); {"In2_mult", {"in2_1", "in2_2", "in2_3"}},
{"In3", {"in3"}}},
{{"Out1", {"out1"}}, {"Out2_mult", {"out2_1", "out2_2"}}}, {}));
std::shared_ptr<f::OperatorBase> grad_test_op = std::shared_ptr<f::OperatorBase> grad_test_op =
f::OpRegistry::CreateGradOp(*test_op); f::OpRegistry::CreateGradOp(*test_op);
ASSERT_EQ(grad_test_op->inputs_.size(), 5UL + 3UL + 3UL); ASSERT_EQ(grad_test_op->Inputs().size(), 3UL + 2UL + 2UL);
EXPECT_EQ(grad_test_op->Input("In1"), "in1"); EXPECT_EQ(grad_test_op->Input("In1"), "in1");
EXPECT_EQ(grad_test_op->Inputs("In2_mult"), EXPECT_EQ(grad_test_op->Inputs("In2_mult"),
std::vector<std::string>({"in2_1", "in2_2", "in2_3"})); std::vector<std::string>({"in2_1", "in2_2", "in2_3"}));
...@@ -89,7 +80,7 @@ TEST(GradOpBuilder, MutiInOut) { ...@@ -89,7 +80,7 @@ TEST(GradOpBuilder, MutiInOut) {
std::vector<std::string>( std::vector<std::string>(
{f::GradVarName("out2_1"), f::GradVarName("out2_2")})); {f::GradVarName("out2_1"), f::GradVarName("out2_2")}));
ASSERT_EQ(grad_test_op->outputs_.size(), 5UL); ASSERT_EQ(grad_test_op->Outputs().size(), 3UL);
EXPECT_EQ(grad_test_op->Output(f::GradVarName("In1")), f::GradVarName("in1")); EXPECT_EQ(grad_test_op->Output(f::GradVarName("In1")), f::GradVarName("in1"));
EXPECT_EQ(grad_test_op->Outputs(f::GradVarName("In2_mult")), EXPECT_EQ(grad_test_op->Outputs(f::GradVarName("In2_mult")),
std::vector<std::string>({f::GradVarName("in2_1"), std::vector<std::string>({f::GradVarName("in2_1"),
...@@ -99,31 +90,28 @@ TEST(GradOpBuilder, MutiInOut) { ...@@ -99,31 +90,28 @@ TEST(GradOpBuilder, MutiInOut) {
} }
TEST(GradOpBuilder, IOIgnoredInGradient) { TEST(GradOpBuilder, IOIgnoredInGradient) {
f::AttributeMap attrs{{"input_format", std::vector<int>{0, 1, 3, 5}},
{"output_format", std::vector<int>{0, 2, 3}}};
std::shared_ptr<f::OperatorBase> test_op(f::OpRegistry::CreateOp( std::shared_ptr<f::OperatorBase> test_op(f::OpRegistry::CreateOp(
"io_ignored", {"in1", "in2_1", "in2_2", "in3_1", "in3_2"}, "io_ignored", {{"In1", {"in1"}},
{"out1_1", "out1_2", "out2"}, attrs)); {"In2_mult", {"in2_1", "in2_2"}},
{"In3_mult", {"in3_1", "in3_2"}}},
{{"Out1_mult", {"out1_1", "out1_2"}}, {"Out2", {"out2"}}}, {}));
std::shared_ptr<f::OperatorBase> grad_test_op = std::shared_ptr<f::OperatorBase> grad_test_op =
f::OpRegistry::CreateGradOp(*test_op); f::OpRegistry::CreateGradOp(*test_op);
// 'In2' and 'Out2' are ignored in gradient calculating // 'In2' and 'Out2' are ignored in gradient calculating
ASSERT_EQ(grad_test_op->inputs_.size(), 5UL + 3UL + 3UL); ASSERT_EQ(grad_test_op->Inputs().size(), 2UL + 1UL + 2UL);
EXPECT_EQ(grad_test_op->Input("In1"), "in1"); EXPECT_EQ(grad_test_op->Input("In1"), "in1");
EXPECT_EQ(grad_test_op->Inputs("In2_mult"),
std::vector<std::string>({f::kEmptyVarName, f::kEmptyVarName}));
EXPECT_EQ(grad_test_op->Inputs("In3_mult"), EXPECT_EQ(grad_test_op->Inputs("In3_mult"),
std::vector<std::string>({"in3_1", "in3_2"})); std::vector<std::string>({"in3_1", "in3_2"}));
EXPECT_EQ(grad_test_op->Inputs("Out1_mult"), EXPECT_EQ(grad_test_op->Inputs("Out1_mult"),
std::vector<std::string>({"out1_1", "out1_2"})); std::vector<std::string>({"out1_1", "out1_2"}));
EXPECT_EQ(grad_test_op->Input("Out2"), f::kEmptyVarName);
EXPECT_EQ(grad_test_op->Inputs(f::GradVarName("Out1_mult")), EXPECT_EQ(grad_test_op->Inputs(f::GradVarName("Out1_mult")),
std::vector<std::string>( std::vector<std::string>(
{f::GradVarName("out1_1"), f::GradVarName("out1_2")})); {f::GradVarName("out1_1"), f::GradVarName("out1_2")}));
EXPECT_EQ(grad_test_op->Input(f::GradVarName("Out2")), EXPECT_EQ(grad_test_op->Input(f::GradVarName("Out2")),
f::GradVarName("out2")); f::GradVarName("out2"));
ASSERT_EQ(grad_test_op->outputs_.size(), 5UL); ASSERT_EQ(grad_test_op->Outputs().size(), 3UL);
EXPECT_EQ(grad_test_op->Output(f::GradVarName("In1")), f::GradVarName("in1")); EXPECT_EQ(grad_test_op->Output(f::GradVarName("In1")), f::GradVarName("in1"));
EXPECT_EQ(grad_test_op->Outputs(f::GradVarName("In2_mult")), EXPECT_EQ(grad_test_op->Outputs(f::GradVarName("In2_mult")),
std::vector<std::string>( std::vector<std::string>(
......
...@@ -19,32 +19,59 @@ ...@@ -19,32 +19,59 @@
namespace paddle { namespace paddle {
namespace framework { namespace framework {
LODTensor LODTensor::SliceShared(size_t level_begin, size_t level_end) const { LODTensor::LOD LODTensor::LOD::SliceLevels(size_t level_begin,
PADDLE_ENFORCE(HasLOD(), "has no LOD info, can't be sliced."); size_t level_end) const {
auto new_lod = details::SliceLOD(*lod_start_pos_, level_begin, level_end); LOD new_lod;
// slice levels just need to update LOD info, each level will contains the new_lod.reserve(level_end - level_begin);
// whole tensor_, so no need to modify tensor_. for (size_t i = level_begin; i < level_end; i++) {
return LODTensor(tensor_, new_lod); new_lod.emplace_back(at(i));
}
return new_lod;
} }
LODTensor LODTensor::SliceShared(size_t level, size_t elem_begin, LODTensor::LOD LODTensor::LOD::SliceInLevel(size_t level, size_t elem_begin,
size_t elem_end) const { size_t elem_end) const {
PADDLE_ENFORCE(HasLOD(), "has no LOD info, can't be sliced."); // slice the lod.
PADDLE_ENFORCE(level < NumLevels(), "level [%d] out of range [%d]", level, LOD new_lod;
NumLevels()); new_lod.reserve(size() - level);
PADDLE_ENFORCE(elem_begin < NumElements(level), auto start = this->at(level)[elem_begin];
"element begin [%d] out of range [%d]", elem_begin, auto end = this->at(level)[elem_end];
NumElements(level));
PADDLE_ENFORCE(elem_end < NumElements(level) + 1, for (auto it = this->begin() + level; it != this->end(); it++) {
"element end [%d] out of range [%d]", elem_end, auto it_begin = std::find(it->begin(), it->end(), start);
NumElements(level)); auto it_end = std::find(it_begin, it->end(), end);
PADDLE_ENFORCE(it_begin != it->end(), "error in parsing lod info");
auto new_lod = details::SliceLOD(*lod_start_pos_, level, elem_begin, elem_end, PADDLE_ENFORCE(it_end != it->end(), "error in parsing lod info");
true /*tensor_shared*/); new_lod.emplace_back(it_begin, it_end + 1);
// reset offset if tensor is copyed and sliced.
// slice elements just need to update LOD info, because offsets are not std::transform(new_lod.back().begin(), new_lod.back().end(),
// changed, so the original tensor_ can be reused. new_lod.back().begin(),
return LODTensor(tensor_, new_lod); [start](int v) { return v - start; });
PADDLE_ENFORCE_EQ(new_lod.back().front(), 0, "error in slice LOD");
}
PADDLE_ENFORCE_LE(new_lod.size(), this->size());
return new_lod;
}
bool operator==(const LODTensor::LOD& a, const LODTensor::LOD& b) {
if (a.size() != b.size()) {
return false;
}
for (size_t i = 0; i < a.size(); i++) {
const auto& a_level = a[i];
const auto& b_level = b[i];
if (a_level.size() != b_level.size()) {
return false;
}
for (size_t j = 0; j < a_level.size(); j++) {
if (a_level[j] != b_level[j]) {
return false;
}
}
}
return true;
} }
} // namespace framework } // namespace framework
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
#pragma once #pragma once
#include <memory> #include <memory>
#if (!PADDLE_ONLY_CPU) #if !defined(PADDLE_ONLY_CPU)
#include <thrust/device_vector.h> #include <thrust/device_vector.h>
#include <thrust/host_vector.h> #include <thrust/host_vector.h>
#endif #endif
...@@ -31,30 +31,29 @@ namespace framework { ...@@ -31,30 +31,29 @@ namespace framework {
* LODTensor (Level of details Tensor) * LODTensor (Level of details Tensor)
* see https://en.wikipedia.org/wiki/Level_of_details for reference. * see https://en.wikipedia.org/wiki/Level_of_details for reference.
*/ */
class LODTensor { class LODTensor : public Tensor {
public: public:
// Level save offsets of each unit. // Level save offsets of each unit.
#ifdef PADDLE_ONLY_CPU #ifdef PADDLE_ONLY_CPU
using Level = std::vector<size_t>; template <typename T>
using Vector = std::vector<T>;
#else #else
using Level = thrust::device_vector<size_t>; template <typename T>
using Vector = thrust::host_vector<T>;
#endif #endif
// LOD stores offsets of each level of units, the largest units level first, // LoD stores offsets of each level of units, the largest units level first,
// then the smaller units level. Each Level stores the offsets of units in // then the smaller units level. Each Level stores the offsets of units in
// Tesor. // Tesor.
typedef std::vector<Level> LOD; class LOD : public std::vector<Vector<size_t>> {
public:
LOD SliceLevels(size_t level_begin, size_t level_end) const;
LOD SliceInLevel(size_t level, size_t elem_begin, size_t elem_end) const;
};
LODTensor() {} LODTensor() {}
LODTensor(const std::shared_ptr<Tensor> &tensor, explicit LODTensor(const LOD &lod) : lod_(lod) {}
const std::shared_ptr<LOD> &lod) {
Reset(tensor, lod);
}
void Reset(const std::shared_ptr<Tensor> &tensor, virtual Tensor *Clone() const { return new LODTensor(lod_); }
const std::shared_ptr<LOD> &lod) {
tensor_ = tensor;
lod_start_pos_ = lod;
}
/* /*
* Get a element from LOD. * Get a element from LOD.
...@@ -65,16 +64,14 @@ class LODTensor { ...@@ -65,16 +64,14 @@ class LODTensor {
PADDLE_ENFORCE(elem < NumElements(level), PADDLE_ENFORCE(elem < NumElements(level),
"element begin [%d] out of range [%d]", elem, "element begin [%d] out of range [%d]", elem,
NumElements(level)); NumElements(level));
return (*lod_start_pos_)[level][elem]; return (lod_)[level][elem];
} }
/* /*
* Number of LODTensor's levels, each level has units of data, for example, * Number of LODTensor's levels, each level has units of data, for example,
* in the sentence's view, article, paragraph, sentence are 3 levels. * in the sentence's view, article, paragraph, sentence are 3 levels.
*/ */
size_t NumLevels() const { size_t NumLevels() const { return lod_.size(); }
return lod_start_pos_ ? lod_start_pos_->size() : 0UL;
}
/* /*
* Number of elements in a level. * Number of elements in a level.
*/ */
...@@ -82,64 +79,71 @@ class LODTensor { ...@@ -82,64 +79,71 @@ class LODTensor {
PADDLE_ENFORCE(level < NumLevels(), "level [%d] out of range [%d]", level, PADDLE_ENFORCE(level < NumLevels(), "level [%d] out of range [%d]", level,
NumLevels()); NumLevels());
// the last offset is the end of last element // the last offset is the end of last element
return lod_start_pos_->at(level).size() - 1; return lod_[level].size() - 1;
} }
/*
* Slice of levels[level_begin:level_end], with tensor copied.
*/
template <typename T>
LODTensor SliceCopied(size_t level_begin, size_t level_end,
const platform::Place &dst_place) const;
/* /*
* Slice of levels[level_begin:level_end], with tensor shared. * Slice of levels[level_begin:level_end], with tensor shared.
*/ */
LODTensor SliceShared(size_t level_begin, size_t level_end) const;
/*
* Slice of elements of a level, [elem_begin: elem_end], with tensor copied.
* @note: low performance in slice lod_start_pos_.
*/
template <typename T> template <typename T>
LODTensor SliceCopied(size_t level, size_t elem_begin, size_t elem_end, LODTensor SliceLevels(size_t level_begin, size_t level_end) const;
const platform::Place &dst_place) const;
/* /*
* Slice of elements of a level, [elem_begin: elem_end], with tensor shared. * Slice of elements of a level, [elem_begin: elem_end], with tensor shared.
* @note: low performance in slice lod_start_pos_. * @note: low performance in slice lod_.
*/ */
LODTensor SliceShared(size_t level, size_t elem_begin, size_t elem_end) const; template <typename T>
LODTensor SliceInLevel(size_t level, size_t elem_begin,
/* size_t elem_end) const;
* Copy other's lod_start_pos_, to share LOD info.
* @note: the LOD info should not be changed.
*/
void ShareLOD(const LODTensor &other) {
lod_start_pos_ = other.lod_start_pos_;
}
/* /*
* Copy other's lod_start_pos_'s content, free to mutate. * Copy other's lod_'s content, free to mutate.
*/ */
void CopyLOD(const LODTensor &other) { void CopyLOD(const LODTensor &other) { lod_ = other.lod_; }
lod_start_pos_ = std::make_shared<LOD>(*other.lod_start_pos_);
}
/* /*
* Determine whether LODTensor has a valid LOD info. * Determine whether LODTensor has a valid LOD info.
*/ */
bool HasLOD() const { return bool(lod_start_pos_); } const LOD &lod() const { return lod_; }
LOD *lod() const { return lod_start_pos_.get(); } LOD *mutable_lod() { return &lod_; }
std::shared_ptr<Tensor> &tensor() { return tensor_; } virtual ~LODTensor() {}
Tensor *raw_tensor() { return tensor_.get(); }
private: private:
std::shared_ptr<LOD> lod_start_pos_; LOD lod_;
std::shared_ptr<Tensor> tensor_;
}; };
bool operator==(const LODTensor::LOD &a, const LODTensor::LOD &b);
template <typename T>
LODTensor LODTensor::SliceLevels(size_t level_begin, size_t level_end) const {
auto new_lod = lod_.SliceLevels(level_begin, level_end);
// slice levels just need to update LOD info, each level will contains the
// whole tensor_, so no need to modify tensor_.
LODTensor new_tensor(new_lod);
new_tensor.ShareDataWith<T>(*this);
return new_tensor;
}
template <typename T>
LODTensor LODTensor::SliceInLevel(size_t level, size_t elem_begin,
size_t elem_end) const {
PADDLE_ENFORCE(level < NumLevels(), "level [%d] out of range [%d]", level,
NumLevels());
PADDLE_ENFORCE(elem_begin < NumElements(level),
"element begin [%d] out of range [%d]", elem_begin,
NumElements(level));
PADDLE_ENFORCE(elem_end < NumElements(level) + 1,
"element end [%d] out of range [%d]", elem_end,
NumElements(level));
auto new_lod = lod_.SliceInLevel(level, elem_begin, elem_end);
// slice elements just need to update LOD info, because offsets are not
// changed, so the original tensor_ can be reused.
LODTensor new_tensor(new_lod);
new_tensor.ShareDataWith<T>(*this);
return new_tensor;
}
} // namespace framework } // namespace framework
} // namespace paddle } // namespace paddle
#include "paddle/framework/lod_tensor_impl.h"
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "paddle/framework/details/lod_tensor.h"
namespace paddle {
namespace framework {
template <typename T>
LODTensor LODTensor::SliceCopied(size_t level_begin, size_t level_end,
const platform::Place &dst_place) const {
PADDLE_ENFORCE(HasLOD(), "has no LOD info, can't be sliced.");
auto new_lod = details::SliceLOD(*lod_start_pos_, level_begin, level_end);
auto new_tensor = std::make_shared<Tensor>();
new_tensor->CopyFrom<T>(*tensor_, dst_place);
return LODTensor(new_tensor, new_lod);
}
template <typename T>
LODTensor LODTensor::SliceCopied(size_t level, size_t elem_begin,
size_t elem_end,
const platform::Place &dst_place) const {
PADDLE_ENFORCE(HasLOD(), "has no LOD info, can't be sliced.");
PADDLE_ENFORCE(level < NumLevels(), "level [%d] out of range [%d]", level,
NumLevels());
PADDLE_ENFORCE(elem_begin < NumElements(level),
"element begin [%d] out of range [%d]", elem_begin,
NumElements(level));
PADDLE_ENFORCE(elem_end < NumElements(level) + 1,
"element end [%d] out of range [%d]", elem_end,
NumElements(level));
auto new_lod = details::SliceLOD(*lod_start_pos_, level, elem_begin, elem_end,
false /*tensor_shared*/);
auto start_idx = new_lod->front().front();
auto end_idx = new_lod->front().back() - 1 /*the next element's start*/;
auto sliced_tensor = tensor_->Slice<T>(start_idx, end_idx);
auto new_tensor = std::make_shared<Tensor>();
new_tensor->CopyFrom<T>(sliced_tensor, dst_place);
return LODTensor(new_tensor, new_lod);
}
} // namespace framework
} // namespace paddle
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#include <glog/logging.h> #include <glog/logging.h>
#include <gtest/gtest.h> #include <gtest/gtest.h>
#include <algorithm>
#include <memory> #include <memory>
namespace paddle { namespace paddle {
...@@ -29,22 +30,28 @@ class LODTensorTester : public ::testing::Test { ...@@ -29,22 +30,28 @@ class LODTensorTester : public ::testing::Test {
// 0 10 20 // 0 10 20
// 0 5 10 15 20 // 0 5 10 15 20
// 0 2 5 7 10 12 15 20 // 0 2 5 7 10 12 15 20
auto lod = std::make_shared<LODTensor::LOD>(); LODTensor::LOD lod;
lod->push_back(std::vector<size_t>{0, 10, 20}); lod.push_back(std::vector<size_t>{0, 10, 20});
lod->push_back(std::vector<size_t>{0, 5, 10, 15, 20}); lod.push_back(std::vector<size_t>{0, 5, 10, 15, 20});
lod->push_back(std::vector<size_t>{0, 2, 5, 7, 10, 12, 15, 17, 20}); lod.push_back(std::vector<size_t>{0, 2, 5, 7, 10, 12, 15, 17, 20});
auto tensor = std::make_shared<Tensor>(); ASSERT_EQ(lod.size(), 3UL);
tensor->Resize({20 /*batch size*/, 128 /*dim*/});
tensor.Resize({20 /*batch size*/, 128 /*dim*/});
// malloc memory // malloc memory
tensor->mutable_data<float>(place); tensor.mutable_data<float>(place);
lod_tensor.reset(new LODTensor(lod));
lod_tensor->Resize({20 /*batch size*/, 128 /*dim*/});
lod_tensor->Reset(tensor, lod); lod_tensor->ShareDataWith<float>(tensor);
// lod_tensor->ShareDataWith<Tensor>(tensor);
} }
protected: protected:
std::unique_ptr<LODTensor> lod_tensor; std::unique_ptr<LODTensor> lod_tensor;
platform::CPUPlace place; platform::CPUPlace place;
Tensor tensor;
}; };
TEST_F(LODTensorTester, NumLevels) { ASSERT_EQ(lod_tensor->NumLevels(), 3UL); } TEST_F(LODTensorTester, NumLevels) { ASSERT_EQ(lod_tensor->NumLevels(), 3UL); }
...@@ -55,110 +62,54 @@ TEST_F(LODTensorTester, NumElements) { ...@@ -55,110 +62,54 @@ TEST_F(LODTensorTester, NumElements) {
ASSERT_EQ(lod_tensor->NumElements(2), 8UL); ASSERT_EQ(lod_tensor->NumElements(2), 8UL);
} }
TEST_F(LODTensorTester, SliceShared_Level) { TEST_F(LODTensorTester, SliceLevels) {
// slice 1 level
for (size_t level = 0; level < 3UL; ++level) {
auto new_lod_tensor = lod_tensor->SliceShared(level, level + 1);
ASSERT_EQ(new_lod_tensor.NumLevels(), 1UL);
ASSERT_EQ(new_lod_tensor.NumElements(0UL), lod_tensor->NumElements(level));
ASSERT_EQ(new_lod_tensor.tensor(), lod_tensor->tensor());
}
// slice 2 level
for (size_t level = 0; level < 2UL; ++level) {
auto new_lod_tensor = lod_tensor->SliceShared(level, level + 2);
ASSERT_EQ(new_lod_tensor.NumLevels(), 2UL);
ASSERT_EQ(new_lod_tensor.NumElements(0), lod_tensor->NumElements(level));
ASSERT_EQ(new_lod_tensor.NumElements(1),
lod_tensor->NumElements(level + 1));
ASSERT_EQ(new_lod_tensor.tensor(), lod_tensor->tensor());
}
}
TEST_F(LODTensorTester, SliceCopied_Level) {
// slice 1 level // slice 1 level
for (size_t level = 0; level < 3UL; ++level) { for (size_t level = 0; level < 3UL; ++level) {
auto new_lod_tensor = auto new_lod_tensor = lod_tensor->SliceLevels<float>(level, level + 1);
lod_tensor->SliceCopied<float>(level, level + 1, place);
ASSERT_EQ(new_lod_tensor.NumLevels(), 1UL); ASSERT_EQ(new_lod_tensor.NumLevels(), 1UL);
ASSERT_EQ(new_lod_tensor.NumElements(0UL), lod_tensor->NumElements(level)); ASSERT_EQ(new_lod_tensor.NumElements(0UL), lod_tensor->NumElements(level));
// ASSERT_EQ(new_lod_tensor.tensor(), lod_tensor->tensor()); // ASSERT_EQ(new_lod_tensor, *lod_tensor);
// TODO(superjom) add tensor comparation here.
} }
// slice 2 level // slice 2 level
for (size_t level = 0; level < 2UL; ++level) { for (size_t level = 0; level < 2UL; ++level) {
auto new_lod_tensor = auto new_lod_tensor = lod_tensor->SliceLevels<float>(level, level + 2);
lod_tensor->SliceCopied<float>(level, level + 2, place);
ASSERT_EQ(new_lod_tensor.NumLevels(), 2UL); ASSERT_EQ(new_lod_tensor.NumLevels(), 2UL);
ASSERT_EQ(new_lod_tensor.NumElements(0), lod_tensor->NumElements(level)); ASSERT_EQ(new_lod_tensor.NumElements(0), lod_tensor->NumElements(level));
ASSERT_EQ(new_lod_tensor.NumElements(1), ASSERT_EQ(new_lod_tensor.NumElements(1),
lod_tensor->NumElements(level + 1)); lod_tensor->NumElements(level + 1));
// ASSERT_EQ(new_lod_tensor.tensor(), lod_tensor->tensor()); ASSERT_EQ(new_lod_tensor.data<float>(), lod_tensor->data<float>());
// TODO(superjom) add tensor comparation here.
} }
} }
TEST_F(LODTensorTester, SliceShared_Element) { TEST_F(LODTensorTester, SliceInLevel) {
size_t level = 0;
auto new_lod_tensor = lod_tensor->SliceShared(level, 0, 2);
ASSERT_EQ(new_lod_tensor.NumLevels(), 3UL);
ASSERT_EQ(new_lod_tensor.NumElements(0), 2UL);
ASSERT_EQ(new_lod_tensor.NumElements(1), 4UL);
ASSERT_EQ(new_lod_tensor.NumElements(2), 8UL);
ASSERT_EQ(new_lod_tensor.raw_tensor(), lod_tensor->raw_tensor());
level = 1;
new_lod_tensor = lod_tensor->SliceShared(level, 0, 2);
ASSERT_EQ(new_lod_tensor.NumLevels(), 2UL);
ASSERT_EQ(new_lod_tensor.NumElements(0), 2UL);
ASSERT_EQ(new_lod_tensor.NumElements(1), 4UL);
ASSERT_EQ(new_lod_tensor.raw_tensor(), lod_tensor->raw_tensor());
}
TEST_F(LODTensorTester, SliceCopied_Element) {
size_t level = 0; size_t level = 0;
auto new_lod_tensor = lod_tensor->SliceCopied<float>(level, 0, 2, place); auto new_lod_tensor = lod_tensor->SliceInLevel<float>(level, 0, 2);
ASSERT_EQ(new_lod_tensor.NumLevels(), 3UL); EXPECT_EQ(new_lod_tensor.NumLevels(), 3UL);
ASSERT_EQ(new_lod_tensor.NumElements(0), 2UL); EXPECT_EQ(new_lod_tensor.NumElements(0), 2UL);
ASSERT_EQ(new_lod_tensor.NumElements(1), 4UL); EXPECT_EQ(new_lod_tensor.NumElements(1), 4UL);
ASSERT_EQ(new_lod_tensor.NumElements(2), 8UL); EXPECT_EQ(new_lod_tensor.NumElements(2), 8UL);
ASSERT_NE(new_lod_tensor.raw_tensor(), lod_tensor->raw_tensor()); ASSERT_EQ(new_lod_tensor.data<float>(), lod_tensor->data<float>());
level = 1; level = 1;
new_lod_tensor = lod_tensor->SliceCopied<float>(level, 0, 2, place); new_lod_tensor = lod_tensor->SliceInLevel<float>(level, 0, 2);
ASSERT_EQ(new_lod_tensor.NumLevels(), 2UL); ASSERT_EQ(new_lod_tensor.NumLevels(), 2UL);
ASSERT_EQ(new_lod_tensor.NumElements(0), 2UL); ASSERT_EQ(new_lod_tensor.NumElements(0), 2UL);
ASSERT_EQ(new_lod_tensor.NumElements(1), 4UL); ASSERT_EQ(new_lod_tensor.NumElements(1), 4UL);
ASSERT_NE(new_lod_tensor.raw_tensor(), lod_tensor->raw_tensor()); ASSERT_EQ(new_lod_tensor.data<float>(), lod_tensor->data<float>());
level = 1;
// LOD is
// 0 5 10
// 0 2 5 7 10
new_lod_tensor = lod_tensor->SliceCopied<float>(level, 1, 3, place);
ASSERT_EQ(new_lod_tensor.NumLevels(), 2UL);
ASSERT_EQ(new_lod_tensor.NumElements(0), 2UL);
ASSERT_EQ(new_lod_tensor.NumElements(1), 4UL);
ASSERT_EQ(new_lod_tensor.lod_element(0, 0), 0UL);
ASSERT_EQ(new_lod_tensor.lod_element(0, 1), 5UL);
ASSERT_EQ(new_lod_tensor.lod_element(1, 0), 0UL);
ASSERT_EQ(new_lod_tensor.lod_element(1, 1), 2UL);
ASSERT_EQ(new_lod_tensor.lod_element(1, 2), 5UL);
ASSERT_EQ(new_lod_tensor.lod_element(1, 3), 7UL);
// TODO(superjom) compare the content of these tensors
} }
TEST_F(LODTensorTester, ShareLOD) { TEST_F(LODTensorTester, ShareLOD) {
LODTensor new_lod_tensor; LODTensor new_lod_tensor;
new_lod_tensor.ShareLOD(*lod_tensor); new_lod_tensor.CopyLOD(*lod_tensor);
ASSERT_EQ(new_lod_tensor.lod(), lod_tensor->lod()); ASSERT_EQ(new_lod_tensor.lod(), lod_tensor->lod());
} }
TEST_F(LODTensorTester, CopyLOD) { TEST_F(LODTensorTester, CopyLOD) {
LODTensor new_lod_tensor; LODTensor new_lod_tensor;
new_lod_tensor.CopyLOD(*lod_tensor); new_lod_tensor.CopyLOD(*lod_tensor);
ASSERT_NE(new_lod_tensor.lod(), lod_tensor->lod()); bool equals = std::equal(lod_tensor->lod().begin(), lod_tensor->lod().end(),
new_lod_tensor.lod().begin());
ASSERT_TRUE(equals);
} }
} // namespace framework } // namespace framework
......
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
// Protocol Message for 3rd-party language binding.
//
// Paddle Python package will use `OpProto` to generate op creation methods.
// The op creation methods take user's input and generate `OpDesc` proto
// message,
// then pass `OpDesc` to C++ side and create Op pointer.
//
syntax = "proto2";
package paddle.framework;
import "attribute.proto";
// Attribute protocol message for 3rd-party language binding.
// It will store the Op support what attribute and what type.
message AttrProto {
// Supported attribute name. e.g. `scale` for cosine op.
required string name = 1;
// Supported attribute type.
required AttrType type = 2;
// Supported attribute comments. It helps 3rd-party language generate
// doc-string.
required string comment = 3;
// If that attribute is generated, it means the Paddle third language
// binding has responsibility to fill that attribute. End-User should
// not set that attribute.
optional bool generated = 4 [ default = false ];
}
// Input or output message for 3rd-party language binding.
// It contains parameter name and its comments.
message VarProto {
// Input or output name in that op creation function.
// e.g. `cos(a, b, output, ...)`, "a", "b", "output" are names.
required string name = 1;
// The comment for that input. It helps 3rd-party language generate
// doc-string.
required string comment = 2;
// Is that input/output could be a list or not.
// If so, that Op should write a attributed named `input_format` or
// `output_format`.
//
// e.g.
// If the op is a fc op, the inputs are `X`, `W`, `b`. The `X` and `W`
// could be multiple, so the multiple of `X` and `W` is True, and OpDesc
// will hold a attribute of them.
//
// The Op desc of same fc could be
// {
// "type": "fc",
// "input": ["X1", "X2", "W1", "W2", "b"],
// "output": "fc.out",
// "attrs" : {
// "input_format": [0, 2, 4, 5]
// }
// }
//
optional bool multiple = 3 [ default = false ];
// It marks that output is a temporary output. That output is not used by
// user, but used by other op internally as input. If other op is not use
// that output, it could be optimized early.
//
// Attribute temporary_index will be set in OpDesc if there is some
// outputs are temporary.
//
// output = [ "xxx.out1", "xxx.tmp", "xxx.out2"],
// attrs = {
// "temporary_index": [1]
// }
optional bool temporary = 4 [ default = false ];
// The gradient of operator can be ignored immediately
// e.g. operator AddOp, y = x1 + x2, the gradient of dy/dx1, dy/dx2
// can be ignored for the future optimized on graph.
optional bool ignore_gradient = 6;
}
// Op protocol message for 3rd-party language binding.
// It contains all information for generating op creation method.
message OpProto {
// The input information to generate op creation method.
repeated VarProto inputs = 1;
// The output information to generate op creation method.
repeated VarProto outputs = 2;
// The attribute information to generate op creation method.
repeated AttrProto attrs = 3;
// The comments for that Op. It helps 3rd-party language generate
// doc-string. The whole documentation of that Op is generated by comment,
// inputs, outputs, attrs together.
required string comment = 4;
// The type of that Op.
required string type = 5;
}
#include <gtest/gtest.h>
#include <paddle/framework/op_proto.pb.h>
TEST(TestOpProto, ALL) {
paddle::framework::OpProto proto;
{
auto ipt = proto.mutable_inputs()->Add();
*ipt->mutable_name() = "a";
*ipt->mutable_comment() = "the one input of cosine op";
}
{
auto ipt = proto.mutable_inputs()->Add();
*ipt->mutable_name() = "b";
*ipt->mutable_comment() = "the other input of cosine op";
}
{
auto opt = proto.mutable_outputs()->Add();
*opt->mutable_name() = "output";
*opt->mutable_comment() = "the output of cosine op";
}
{
auto attr = proto.mutable_attrs()->Add();
*attr->mutable_name() = "scale";
attr->set_type(paddle::framework::AttrType::FLOAT);
*attr->mutable_comment() = "the scale attribute of cosine op";
}
proto.set_type("cos");
*proto.mutable_comment() = "cosine op, output = scale * cos(a, b)";
ASSERT_TRUE(proto.IsInitialized());
}
\ No newline at end of file
...@@ -17,5 +17,48 @@ limitations under the License. */ ...@@ -17,5 +17,48 @@ limitations under the License. */
#include <vector> #include <vector>
namespace paddle { namespace paddle {
namespace framework {} // namespace framework namespace framework {
std::unique_ptr<OperatorBase> OpRegistry::CreateOp(const std::string& type,
const VarNameMap& inputs,
const VarNameMap& outputs,
AttributeMap attrs) {
auto it = op_info_map().find(type);
PADDLE_ENFORCE(it != op_info_map().end(),
"Operator '%s' has not been registered.", type);
it->second.checker_->Check(attrs);
auto op = it->second.creator_(type, inputs, outputs, attrs);
return std::unique_ptr<OperatorBase>(op);
}
std::unique_ptr<OperatorBase> OpRegistry::CreateOp(const OpDesc& op_desc) {
VarNameMap inputs = ConvertOpDescVarsToVarNameMap(op_desc.inputs());
VarNameMap outputs = ConvertOpDescVarsToVarNameMap(op_desc.outputs());
AttributeMap attrs;
for (auto& attr : op_desc.attrs()) {
attrs[attr.name()] = GetAttrValue(attr);
}
return CreateOp(op_desc.type(), inputs, outputs, attrs);
}
OperatorBase::VarNameMap OpRegistry::ConvertOpDescVarsToVarNameMap(
const google::protobuf::RepeatedPtrField<OpDesc::Var>& op_desc_vars) {
VarNameMap ret_val;
for (auto& var : op_desc_vars) {
auto& var_names = ret_val[var.parameter()];
auto& var_names_in_proto = var.arguments();
var_names.reserve(static_cast<size_t>(var_names_in_proto.size()));
std::copy(var_names_in_proto.begin(), var_names_in_proto.end(),
std::back_inserter(var_names));
}
return ret_val;
}
std::unique_ptr<OperatorBase> OpRegistry::CreateGradOp(const OperatorBase& op) {
PADDLE_ENFORCE(!op.IsNetOp(), "Use framework::Backward to get backward ops");
return std::unique_ptr<OperatorBase>(BuildGradOp(&op));
}
} // namespace framework
} // namespace paddle } // namespace paddle
...@@ -17,309 +17,114 @@ limitations under the License. */ ...@@ -17,309 +17,114 @@ limitations under the License. */
#include <algorithm> #include <algorithm>
#include <atomic> #include <atomic>
#include <type_traits> #include <type_traits>
#include <typeinfo>
#include <unordered_map> #include <unordered_map>
#include <unordered_set> #include <unordered_set>
#include "paddle/framework/attribute.h" #include "paddle/framework/attribute.h"
#include "paddle/framework/framework.pb.h"
#include "paddle/framework/grad_op_builder.h" #include "paddle/framework/grad_op_builder.h"
#include "paddle/framework/op_desc.pb.h" #include "paddle/framework/operator.h"
#include "paddle/framework/scope.h" #include "paddle/framework/scope.h"
namespace paddle { namespace paddle {
namespace framework { namespace framework {
// this class not only make proto but also init attribute checkers. class OpRegistry {
class OpProtoAndCheckerMaker { using VarNameMap = OperatorBase::VarNameMap;
public: using OpCreator = std::function<OperatorBase*(
OpProtoAndCheckerMaker(OpProto* proto, OpAttrChecker* op_checker) const std::string& /*type*/, const VarNameMap& /*inputs*/,
: proto_(proto), op_checker_(op_checker) {} const VarNameMap& /*outputs*/, const AttributeMap& /*attrs*/)>;
~OpProtoAndCheckerMaker() {
PADDLE_ENFORCE(validated_, "should call Validate after build");
}
void Validate() {
validated_ = true;
CheckNoDuplicatedInOutAttrs();
}
protected:
struct VariableBuilder {
VarProto* var_;
std::function<void()> on_multiple_;
std::function<void()> on_temporary_;
VariableBuilder& SetMultiple() {
var_->set_multiple(true);
on_multiple_();
return *this;
}
VariableBuilder& SetTemporary() {
PADDLE_ENFORCE(bool(on_temporary_), "Cannot set temporary");
var_->set_temporary(true);
on_temporary_();
return *this;
}
VariableBuilder& IgnoreGradient() { public:
var_->set_ignore_gradient(true); struct OpInfo {
return *this; OpCreator creator_;
} std::string grad_op_type_;
OpProto* proto_;
OpAttrChecker* checker_;
}; };
VariableBuilder AddInput(const std::string& name, template <typename OpType, typename ProtoMakerType, typename GradOpType>
const std::string& comment) { static void RegisterOp(const std::string& op_type,
auto input = proto_->mutable_inputs()->Add(); const std::string& grad_op_type) {
*input->mutable_name() = name; PADDLE_ENFORCE(op_info_map().count(op_type) == 0,
*input->mutable_comment() = comment; "'%s' is registered more than once.", op_type);
return VariableBuilder{input, [=] { this->SetHasMultipleInput(); }, OpInfo op_info;
nullptr}; op_info.creator_ = [](const std::string& type, const VarNameMap& inputs,
} const VarNameMap& outputs,
const AttributeMap& attrs) {
VariableBuilder AddOutput(const std::string& name, return new OpType(type, inputs, outputs, attrs);
const std::string& comment) {
auto output = proto_->mutable_outputs()->Add();
*output->mutable_name() = name;
*output->mutable_comment() = comment;
return VariableBuilder{output, [=] { this->SetHasMultipleOutput(); },
[=] { this->SetHasTemporaryOutput(); }};
}
template <typename T>
TypedAttrChecker<T>& AddAttr(const std::string& name,
const std::string& comment,
bool generated = false) {
auto attr = proto_->mutable_attrs()->Add();
*attr->mutable_name() = name;
*attr->mutable_comment() = comment;
attr->set_generated(generated);
attr->set_type(AttrTypeID<T>());
return op_checker_->AddAttrChecker<T>(name);
}
void AddComment(const std::string& comment) {
*(proto_->mutable_comment()) = comment;
}
private:
void SetHasMultiple(const std::string& in_out, bool* flag) {
if (!*flag) {
AddAttr<std::vector<int>>(in_out + "_format",
"The multiple index of " + in_out +
"\n"
R"DOC(
This attribute is used by Paddle core framework. Paddle's Op support each input
or output could be a list of variable. This attribute is used to show how that
list organized.
e.g.
input = ["a", "b", "c", "d", "e", "f"]
input_format = [0, 4, 5, 6]
means
The number of all input variables this op is six, and they are segmented into
three inputs.
The first input is input[0:4], second is input[4:5], third is input[5:6].
)DOC",
/*generated*/ true);
*flag = true;
}
}
void SetHasMultipleInput() { SetHasMultiple("input", &has_multiple_input_); }
void SetHasMultipleOutput() {
SetHasMultiple("output", &has_multiple_output_);
}
void SetHasTemporaryOutput() {
if (!has_temporary_output_) {
AddAttr<std::vector<int>>("temporary_index",
R"DOC(The temporary index of output.
Not all output of Paddle Op is used by user. For faster computation, each op
could output some its internal state to other op, other op could take that
output to make compute faster.
Add a mark to which output is temporary is helpful for future optimization.
)DOC",
/*generated*/ true)
.SetDefault(std::vector<int>());
has_temporary_output_ = true;
}
}
void CheckNoDuplicatedInOutAttrs() {
std::unordered_set<std::string> names;
auto checker = [&](const std::string& name) {
PADDLE_ENFORCE(!names.count(name), "[%s] is duplicated", name);
names.insert(name);
}; };
for (auto& attr : proto_->attrs()) { op_info.grad_op_type_ = grad_op_type;
checker(attr.name()); if (std::type_index(typeid(ProtoMakerType)) !=
} std::type_index(typeid(NOPMaker))) {
for (auto& input : proto_->inputs()) { op_info.proto_ = new OpProto;
checker(input.name()); op_info.checker_ = new OpAttrChecker;
} auto maker = ProtoMakerType(op_info.proto_, op_info.checker_);
for (auto& output : proto_->outputs()) {
checker(output.name());
}
}
OpProto* proto_;
OpAttrChecker* op_checker_;
bool validated_{false};
bool has_multiple_input_{false};
bool has_multiple_output_{false};
bool has_temporary_output_{false};
};
class OpRegistry {
using OpCreator = std::function<OperatorBase*()>;
using VarIndexMap = std::unordered_map<std::string, int>;
using VarNameList = std::vector<std::string>;
public:
template <typename OpType, typename ProtoMakerType>
static void RegisterOp(const std::string& op_type) {
op_creators()[op_type] = [] { return new OpType; };
OpAttrChecker& op_checker = op_checkers()[op_type];
OpProto& op_proto = protos()[op_type];
auto maker = ProtoMakerType(&op_proto, &op_checker);
maker.Validate(); maker.Validate();
*op_proto.mutable_type() = op_type; op_info.proto_->set_type(op_type);
PADDLE_ENFORCE( PADDLE_ENFORCE(
op_proto.IsInitialized(), op_info.proto_->IsInitialized(),
"Fail to initialize %s's OpProto, because %s is not initialized", "Fail to initialize %s's OpProto, because %s is not initialized",
op_type, op_proto.InitializationErrorString()); op_type, op_info.proto_->InitializationErrorString());
} else {
VarIndexMaps()[op_type].reset(new VarIndexMap()); op_info.proto_ = nullptr;
auto& varmap = *VarIndexMaps()[op_type]; op_info.checker_ = nullptr;
int idx = 0;
for (auto& var : op_proto.inputs()) {
varmap[var.name()] = idx++;
}
idx = 0;
for (auto& var : op_proto.outputs()) {
varmap[var.name()] = idx++;
}
}
template <typename GradOpType>
static void RegisterGradOp(const std::string& op_type,
const std::string& grad_op_type) {
op_creators()[grad_op_type] = [] { return new GradOpType; };
grad_ops()[op_type] = grad_op_type;
}
static std::shared_ptr<OperatorBase> CreateOp(const std::string& type,
const VarNameList& inputs,
const VarNameList& outputs,
const AttributeMap& attrs) {
auto op_create_it = op_creators().find(type);
PADDLE_ENFORCE(op_create_it != op_creators().end(),
"Operator %s cannot be found.", type);
auto op = op_create_it->second();
op->type_ = type;
op->inputs_ = inputs;
op->outputs_ = outputs;
op->attrs_ = attrs;
op_checkers().at(type).Check(op->attrs_);
GenerateTempVariableName(op);
{
auto var_index_it = VarIndexMaps().find(type);
if (var_index_it != VarIndexMaps().end()) {
op->in_out_idxs_ = var_index_it->second;
}
} }
op_info_map().insert(std::make_pair(op_type, op_info));
op->Init(); // register gradient op
return std::shared_ptr<OperatorBase>(op); if (!grad_op_type.empty()) {
RegisterOp<GradOpType, NOPMaker, NOP>(grad_op_type, "");
} }
static std::shared_ptr<OperatorBase> CreateOp(const OpDesc& op_desc) {
std::vector<std::string> inputs;
inputs.reserve((size_t)op_desc.inputs_size());
std::copy(op_desc.inputs().begin(), op_desc.inputs().end(),
std::back_inserter(inputs));
std::vector<std::string> outputs;
outputs.reserve((size_t)op_desc.outputs_size());
std::copy(op_desc.outputs().begin(), op_desc.outputs().end(),
std::back_inserter(outputs));
AttributeMap attrs;
for (auto& attr : op_desc.attrs()) {
attrs[attr.name()] = GetAttrValue(attr);
} }
return CreateOp(op_desc.type(), inputs, outputs, attrs); static std::unique_ptr<OperatorBase> CreateOp(const std::string& type,
} const VarNameMap& inputs,
const VarNameMap& outputs,
AttributeMap attrs);
static std::shared_ptr<OperatorBase> CreateGradOp(const OperatorBase& op) { static std::unique_ptr<OperatorBase> CreateOp(const OpDesc& op_desc);
PADDLE_ENFORCE(!op.IsNetOp(),
"Use framework::Backward to get backward ops");
std::shared_ptr<OperatorBase> grad_op(BuildGradOp(&op));
grad_op->Init();
return grad_op;
}
static std::unordered_map<std::string, OpProto>& protos() { static VarNameMap ConvertOpDescVarsToVarNameMap(
static std::unordered_map<std::string, OpProto> protos_; const google::protobuf::RepeatedPtrField<OpDesc::Var>& op_desc_vars);
return protos_;
}
static std::unordered_map<std::string, std::string>& grad_ops() { static std::unique_ptr<OperatorBase> CreateGradOp(const OperatorBase& op);
static std::unordered_map<std::string, std::string> grad_ops_;
return grad_ops_;
}
static std::unordered_map<std::string, std::shared_ptr<VarIndexMap>>& static std::unordered_map<std::string, const OpInfo>& op_info_map() {
VarIndexMaps() { static std::unordered_map<std::string, const OpInfo> op_info_map_;
static std::unordered_map<std::string, std::shared_ptr<VarIndexMap>> maps_; return op_info_map_;
return maps_;
}
static std::unordered_map<std::string, OpCreator>& op_creators() {
static std::unordered_map<std::string, OpCreator> op_creators_;
return op_creators_;
}
private:
static std::unordered_map<std::string, OpAttrChecker>& op_checkers() {
static std::unordered_map<std::string, OpAttrChecker> op_checkers_;
return op_checkers_;
} }
};
static void GenerateTempVariableName(OperatorBase* op) { class Registrar {
static std::atomic<size_t> gUniqId(0UL); public:
for (auto& outname : op->outputs_) { // In our design, various kinds of classes, e.g., operators and kernels,
if (outname == kTempVarName) { // have their corresponding registry and registrar. The action of
outname += op->type_; // registration is in the constructor of a global registrar variable, which,
outname += "@"; // however, are not used in the code that calls package framework, and would
outname += std::to_string(gUniqId.fetch_add(1)); // be removed from the generated binary file by the linker. To avoid such
} // removal, we add Touch to all registrar classes and make USE_OP macros to
} // call this method. So, as long as the callee code calls USE_OP, the global
} // registrar variable won't be removed by the linker.
void Touch() {}
}; };
template <typename OpType, typename ProtoMakerType> template <typename OpType, typename ProtoMakerType, typename GradOpType>
class OpRegisterHelper { class OpRegistrar : public Registrar {
public: public:
explicit OpRegisterHelper(const char* op_type) { explicit OpRegistrar(const char* op_type) { OpRegistrar(op_type, ""); }
OpRegistry::RegisterOp<OpType, ProtoMakerType>(op_type); OpRegistrar(const char* op_type, const char* grad_op_type) {
OpRegistry::RegisterOp<OpType, ProtoMakerType, GradOpType>(op_type,
grad_op_type);
} }
}; };
template <typename GradOpType> template <typename PlaceType, typename KernelType>
class GradOpRegisterHelper { class OpKernelRegistrar : public Registrar {
public: public:
GradOpRegisterHelper(const char* op_type, const char* grad_op_type) { explicit OpKernelRegistrar(const char* op_type) {
OpRegistry::RegisterGradOp<GradOpType>(op_type, grad_op_type); OperatorWithKernel::OpKernelKey key;
key.place_ = PlaceType();
OperatorWithKernel::AllOpKernels()[op_type][key].reset(new KernelType);
} }
}; };
...@@ -333,97 +138,93 @@ class GradOpRegisterHelper { ...@@ -333,97 +138,93 @@ class GradOpRegisterHelper {
msg) msg)
/** /**
* Macro to Register Operator. * Macro to register Operator.
*/
#define REGISTER_OP(__op_type, __op_class, __op_maker_class) \
STATIC_ASSERT_GLOBAL_NAMESPACE(__reg_op__##__op_type, \
"REGISTER_OP must be in global namespace"); \
static ::paddle::framework::OpRegisterHelper<__op_class, __op_maker_class> \
__op_register_##__op_type##__(#__op_type); \
int __op_register_##__op_type##_handle__() { return 0; }
/**
* Macro to Register Gradient Operator.
*/ */
#define REGISTER_GRADIENT_OP(__op_type, __grad_op_type, __grad_op_class) \ #define REGISTER_OP(op_type, op_class, op_maker_class, grad_op_type, \
grad_op_class) \
STATIC_ASSERT_GLOBAL_NAMESPACE( \ STATIC_ASSERT_GLOBAL_NAMESPACE( \
__reg_gradient_op__##__op_type##__grad_op_type, \ __reg_op__##op_type, "REGISTER_OP must be called in global namespace"); \
"REGISTER_GRADIENT_OP must be in global namespace"); \ class _OpClass_##op_type##_ : public op_class { \
static ::paddle::framework::GradOpRegisterHelper<__grad_op_class> \ public: \
__op_gradient_register_##__op_type##__grad_op_type##__(#__op_type, \ DEFINE_OP_CLONE_METHOD(_OpClass_##op_type##_); \
#__grad_op_type); \ DEFINE_OP_CONSTRUCTOR(_OpClass_##op_type##_, op_class); \
int __op_gradient_register_##__op_type##__grad_op_type##_handle__() { \ }; \
class _OpGradClass_##op_type##_ : public grad_op_class { \
public: \
DEFINE_OP_CLONE_METHOD(_OpGradClass_##op_type##_); \
DEFINE_OP_CONSTRUCTOR(_OpGradClass_##op_type##_, grad_op_class); \
}; \
static ::paddle::framework::OpRegistrar< \
_OpClass_##op_type##_, op_maker_class, _OpGradClass_##op_type##_> \
__op_registrar_##op_type##__(#op_type, #grad_op_type); \
int TouchOpRegistrar_##op_type() { \
__op_registrar_##op_type##__.Touch(); \
return 0; \ return 0; \
} }
/** #define REGISTER_OP_WITHOUT_GRADIENT(op_type, op_class, op_maker_class) \
* Macro to Forbid user register Gradient Operator. REGISTER_OP(op_type, op_class, op_maker_class, , ::paddle::framework::NOP)
*/
#define NO_GRADIENT(__op_type) \
STATIC_ASSERT_GLOBAL_NAMESPACE( \
__reg_gradient_op__##__op_type##__op_type##_grad, \
"NO_GRADIENT must be in global namespace")
/** /**
* Macro to Register OperatorKernel. * Macro to register OperatorKernel.
*/ */
#define REGISTER_OP_KERNEL(type, DEVICE_TYPE, PlaceType, ...) \ #define REGISTER_OP_KERNEL(op_type, DEVICE_TYPE, place_class, ...) \
STATIC_ASSERT_GLOBAL_NAMESPACE( \ STATIC_ASSERT_GLOBAL_NAMESPACE( \
__reg_op_kernel_##type##_##DEVICE_TYPE##__, \ __reg_op_kernel_##op_type##_##DEVICE_TYPE##__, \
"REGISTER_OP_KERNEL must be in global namespace"); \ "REGISTER_OP_KERNEL must be called in global namespace"); \
struct __op_kernel_register__##type##__##DEVICE_TYPE##__ { \ static ::paddle::framework::OpKernelRegistrar<place_class, __VA_ARGS__> \
__op_kernel_register__##type##__##DEVICE_TYPE##__() { \ __op_kernel_registrar_##op_type##_##DEVICE_TYPE##__(#op_type); \
::paddle::framework::OperatorWithKernel::OpKernelKey key; \ int TouchOpKernelRegistrar_##op_type##_##DEVICE_TYPE() { \
key.place_ = PlaceType(); \ __op_kernel_registrar_##op_type##_##DEVICE_TYPE##__.Touch(); \
::paddle::framework::OperatorWithKernel::AllOpKernels()[#type][key] \ return 0; \
.reset(new __VA_ARGS__()); \ }
} \
}; \
static __op_kernel_register__##type##__##DEVICE_TYPE##__ \
__reg_kernel_##type##__##DEVICE_TYPE##__; \
int __op_kernel_register_##type##_handle_##DEVICE_TYPE##__() { return 0; }
// (type, KernelType) #define REGISTER_OP_GPU_KERNEL(op_type, ...) \
#define REGISTER_OP_GPU_KERNEL(type, ...) \ REGISTER_OP_KERNEL(op_type, GPU, ::paddle::platform::GPUPlace, __VA_ARGS__)
REGISTER_OP_KERNEL(type, GPU, ::paddle::platform::GPUPlace, __VA_ARGS__)
// (type, KernelType) #define REGISTER_OP_CPU_KERNEL(op_type, ...) \
#define REGISTER_OP_CPU_KERNEL(type, ...) \ REGISTER_OP_KERNEL(op_type, CPU, ::paddle::platform::CPUPlace, __VA_ARGS__)
REGISTER_OP_KERNEL(type, CPU, ::paddle::platform::CPUPlace, __VA_ARGS__)
/** /**
* Macro to mark what Operator and Kernel we will use and tell the compiler to * Macro to mark what Operator and Kernel
* we will use and tell the compiler to
* link them into target. * link them into target.
*/ */
#define USE_OP_WITHOUT_KERNEL(op_type) \ #define USE_OP_ITSELF(op_type) \
STATIC_ASSERT_GLOBAL_NAMESPACE( \ STATIC_ASSERT_GLOBAL_NAMESPACE( \
__use_op_without_kernel_##op_type, \ __use_op_itself_##op_type, \
"USE_OP_WITHOUT_KERNEL must be in global namespace"); \ "USE_OP_ITSELF must be called in global namespace"); \
extern int __op_register_##op_type##_handle__(); \ extern int TouchOpRegistrar_##op_type(); \
static int __use_op_ptr_##op_type##_without_kernel__ \ static int use_op_itself_##op_type##_ __attribute__((unused)) = \
__attribute__((unused)) = __op_register_##op_type##_handle__() TouchOpRegistrar_##op_type()
#define USE_OP_KERNEL(op_type, DEVICE_TYPE) \ #define USE_OP_DEVICE_KERNEL(op_type, DEVICE_TYPE) \
STATIC_ASSERT_GLOBAL_NAMESPACE( \ STATIC_ASSERT_GLOBAL_NAMESPACE( \
__use_op_kernel_##op_type##_##DEVICE_TYPE##__, \ __use_op_kernel_##op_type##_##DEVICE_TYPE##__, \
"USE_OP_KERNEL must be in global namespace"); \ "USE_OP_DEVICE_KERNEL must be in global namespace"); \
extern int __op_kernel_register_##op_type##_handle_##DEVICE_TYPE##__(); \ extern int TouchOpKernelRegistrar_##op_type##_##DEVICE_TYPE(); \
static int __use_op_ptr_##op_type##_##DEVICE_TYPE##_kernel__ \ static int use_op_kernel_##op_type##_##DEVICE_TYPE##_ \
__attribute__((unused)) = \ __attribute__((unused)) = \
__op_kernel_register_##op_type##_handle_##DEVICE_TYPE##__() TouchOpKernelRegistrar_##op_type##_##DEVICE_TYPE()
// use Operator with only cpu kernel. // TODO(fengjiayi): The following macros
#define USE_OP_CPU(op_type) \ // seems ugly, do we have better method?
USE_OP_WITHOUT_KERNEL(op_type); \
USE_OP_KERNEL(op_type, CPU)
#ifdef PADDLE_ONLY_CPU #ifdef PADDLE_ONLY_CPU
#define USE_OP(op_type) USE_OP_CPU(op_type) #define USE_OP_KERNEL(op_type) USE_OP_DEVICE_KERNEL(op_type, CPU)
#else #else
#define USE_OP(op_type) \ #define USE_OP_KERNEL(op_type) \
USE_OP_CPU(op_type); \ USE_OP_DEVICE_KERNEL(op_type, CPU); \
USE_OP_KERNEL(op_type, GPU) USE_OP_DEVICE_KERNEL(op_type, GPU)
#endif #endif
#define USE_CPU_ONLY_OP(op_type) \
USE_OP_ITSELF(op_type); \
USE_OP_DEVICE_KERNEL(op_type, CPU);
#define USE_OP(op_type) \
USE_OP_ITSELF(op_type); \
USE_OP_KERNEL(op_type)
} // namespace framework } // namespace framework
} // namespace paddle } // namespace paddle
...@@ -7,6 +7,7 @@ namespace paddle { ...@@ -7,6 +7,7 @@ namespace paddle {
namespace framework { namespace framework {
class CosineOp : public OperatorBase { class CosineOp : public OperatorBase {
public: public:
using OperatorBase::OperatorBase;
void Run(const Scope& scope, void Run(const Scope& scope,
const platform::DeviceContext& dev_ctx) const override {} const platform::DeviceContext& dev_ctx) const override {}
void InferShape(const Scope& scope) const override {} void InferShape(const Scope& scope) const override {}
...@@ -27,6 +28,7 @@ class CosineOpProtoAndCheckerMaker : public OpProtoAndCheckerMaker { ...@@ -27,6 +28,7 @@ class CosineOpProtoAndCheckerMaker : public OpProtoAndCheckerMaker {
class MyTestOp : public OperatorBase { class MyTestOp : public OperatorBase {
public: public:
using OperatorBase::OperatorBase;
void InferShape(const Scope& scope) const override {} void InferShape(const Scope& scope) const override {}
void Run(const Scope& scope, void Run(const Scope& scope,
const platform::DeviceContext& dev_ctx) const override {} const platform::DeviceContext& dev_ctx) const override {}
...@@ -36,8 +38,8 @@ class MyTestOpProtoAndCheckerMaker : public OpProtoAndCheckerMaker { ...@@ -36,8 +38,8 @@ class MyTestOpProtoAndCheckerMaker : public OpProtoAndCheckerMaker {
public: public:
MyTestOpProtoAndCheckerMaker(OpProto* proto, OpAttrChecker* op_checker) MyTestOpProtoAndCheckerMaker(OpProto* proto, OpAttrChecker* op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) { : OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("input", "input of cosine op").SetMultiple(); AddInput("input", "input of cosine op").AsDuplicable();
AddOutput("output", "output of cosine op").SetTemporary(); AddOutput("output", "output of cosine op").AsIntermediate();
auto my_checker = [](int i) { auto my_checker = [](int i) {
PADDLE_ENFORCE(i % 2 == 0, "'test_attr' must be even!"); PADDLE_ENFORCE(i % 2 == 0, "'test_attr' must be even!");
}; };
...@@ -49,16 +51,24 @@ class MyTestOpProtoAndCheckerMaker : public OpProtoAndCheckerMaker { ...@@ -49,16 +51,24 @@ class MyTestOpProtoAndCheckerMaker : public OpProtoAndCheckerMaker {
} // namespace framework } // namespace framework
} // namespace paddle } // namespace paddle
REGISTER_OP(cos_sim, paddle::framework::CosineOp, static void BuildVar(const std::string& param_name,
std::initializer_list<const char*> arguments,
paddle::framework::OpDesc::Var* var) {
var->set_parameter(param_name);
for (auto& arg_name : arguments) {
var->add_arguments(arg_name);
}
}
REGISTER_OP_WITHOUT_GRADIENT(cos_sim, paddle::framework::CosineOp,
paddle::framework::CosineOpProtoAndCheckerMaker); paddle::framework::CosineOpProtoAndCheckerMaker);
REGISTER_OP(my_test_op, paddle::framework::MyTestOp, REGISTER_OP_WITHOUT_GRADIENT(my_test_op, paddle::framework::MyTestOp,
paddle::framework::MyTestOpProtoAndCheckerMaker); paddle::framework::MyTestOpProtoAndCheckerMaker);
TEST(OpRegistry, CreateOp) { TEST(OpRegistry, CreateOp) {
paddle::framework::OpDesc op_desc; paddle::framework::OpDesc op_desc;
op_desc.set_type("cos_sim"); op_desc.set_type("cos_sim");
op_desc.add_inputs("aa"); BuildVar("input", {"aa"}, op_desc.add_inputs());
op_desc.add_outputs("bb"); BuildVar("output", {"bb"}, op_desc.add_outputs());
float scale = 3.3; float scale = 3.3;
auto attr = op_desc.mutable_attrs()->Add(); auto attr = op_desc.mutable_attrs()->Add();
...@@ -66,8 +76,7 @@ TEST(OpRegistry, CreateOp) { ...@@ -66,8 +76,7 @@ TEST(OpRegistry, CreateOp) {
attr->set_type(paddle::framework::AttrType::FLOAT); attr->set_type(paddle::framework::AttrType::FLOAT);
attr->set_f(scale); attr->set_f(scale);
std::shared_ptr<paddle::framework::OperatorBase> op = auto op = paddle::framework::OpRegistry::CreateOp(op_desc);
paddle::framework::OpRegistry::CreateOp(op_desc);
paddle::framework::Scope scope; paddle::framework::Scope scope;
paddle::platform::CPUDeviceContext dev_ctx; paddle::platform::CPUDeviceContext dev_ctx;
op->Run(scope, dev_ctx); op->Run(scope, dev_ctx);
...@@ -78,8 +87,8 @@ TEST(OpRegistry, CreateOp) { ...@@ -78,8 +87,8 @@ TEST(OpRegistry, CreateOp) {
TEST(OpRegistry, IllegalAttr) { TEST(OpRegistry, IllegalAttr) {
paddle::framework::OpDesc op_desc; paddle::framework::OpDesc op_desc;
op_desc.set_type("cos_sim"); op_desc.set_type("cos_sim");
op_desc.add_inputs("aa"); BuildVar("input", {"aa"}, op_desc.add_inputs());
op_desc.add_outputs("bb"); BuildVar("output", {"bb"}, op_desc.add_outputs());
auto attr = op_desc.mutable_attrs()->Add(); auto attr = op_desc.mutable_attrs()->Add();
attr->set_name("scale"); attr->set_name("scale");
...@@ -103,33 +112,23 @@ TEST(OpRegistry, IllegalAttr) { ...@@ -103,33 +112,23 @@ TEST(OpRegistry, IllegalAttr) {
TEST(OpRegistry, DefaultValue) { TEST(OpRegistry, DefaultValue) {
paddle::framework::OpDesc op_desc; paddle::framework::OpDesc op_desc;
op_desc.set_type("cos_sim"); op_desc.set_type("cos_sim");
op_desc.add_inputs("aa"); BuildVar("input", {"aa"}, op_desc.add_inputs());
op_desc.add_outputs("bb"); BuildVar("output", {"bb"}, op_desc.add_outputs());
ASSERT_TRUE(op_desc.IsInitialized()); ASSERT_TRUE(op_desc.IsInitialized());
std::shared_ptr<paddle::framework::OperatorBase> op = auto op = paddle::framework::OpRegistry::CreateOp(op_desc);
paddle::framework::OpRegistry::CreateOp(op_desc);
paddle::framework::Scope scope; paddle::framework::Scope scope;
paddle::platform::CPUDeviceContext dev_ctx; paddle::platform::CPUDeviceContext dev_ctx;
op->Run(scope, dev_ctx); op->Run(scope, dev_ctx);
ASSERT_EQ(op->GetAttr<float>("scale"), 1.0); ASSERT_EQ(op->GetAttr<float>("scale"), 1.0);
} }
static void SetInputFormat(paddle::framework::OpDesc* desc) {
auto attr = desc->add_attrs();
attr->set_name("input_format");
attr->set_type(paddle::framework::INTS);
attr->mutable_ints()->Add(0);
attr->mutable_ints()->Add(1);
}
TEST(OpRegistry, CustomChecker) { TEST(OpRegistry, CustomChecker) {
paddle::framework::OpDesc op_desc; paddle::framework::OpDesc op_desc;
op_desc.set_type("my_test_op"); op_desc.set_type("my_test_op");
op_desc.add_inputs("ii"); BuildVar("input", {"ii"}, op_desc.add_inputs());
op_desc.add_outputs("oo"); BuildVar("output", {"oo"}, op_desc.add_outputs());
SetInputFormat(&op_desc);
// attr 'test_attr' is not set // attr 'test_attr' is not set
bool caught = false; bool caught = false;
...@@ -169,7 +168,6 @@ TEST(OpRegistry, CustomChecker) { ...@@ -169,7 +168,6 @@ TEST(OpRegistry, CustomChecker) {
attr->set_name("test_attr"); attr->set_name("test_attr");
attr->set_type(paddle::framework::AttrType::INT); attr->set_type(paddle::framework::AttrType::INT);
attr->set_i(4); attr->set_i(4);
SetInputFormat(&op_desc);
auto op = paddle::framework::OpRegistry::CreateOp(op_desc); auto op = paddle::framework::OpRegistry::CreateOp(op_desc);
paddle::platform::CPUDeviceContext dev_ctx; paddle::platform::CPUDeviceContext dev_ctx;
paddle::framework::Scope scope; paddle::framework::Scope scope;
......
...@@ -12,9 +12,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ...@@ -12,9 +12,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include <algorithm>
#include "paddle/framework/operator.h" #include "paddle/framework/operator.h"
#include <algorithm>
#include "paddle/framework/op_registry.h"
namespace paddle { namespace paddle {
namespace framework { namespace framework {
...@@ -34,83 +34,172 @@ ExecutionContext::GetEigenDevice<platform::GPUPlace, Eigen::GpuDevice>() const { ...@@ -34,83 +34,172 @@ ExecutionContext::GetEigenDevice<platform::GPUPlace, Eigen::GpuDevice>() const {
#endif #endif
const std::string& OperatorBase::Input(const std::string& name) const { const std::string& OperatorBase::Input(const std::string& name) const {
PADDLE_ENFORCE_NOT_NULL(in_out_idxs_, auto& ins = Inputs(name);
"Input Output Indices could not be nullptr"); PADDLE_ENFORCE_EQ(ins.size(), 1UL,
auto it = in_out_idxs_->find(name); "Op %s input %s should contain only one variable", type_,
PADDLE_ENFORCE(it != in_out_idxs_->end(), "no key [%s] in in_out_idxs_",
name); name);
if (attrs_.count("input_format") == 0) { return ins[0];
return inputs_.at((size_t)it->second);
} else {
const auto& input_format = GetAttr<std::vector<int>>("input_format");
int idx = input_format[it->second];
return inputs_.at((size_t)idx);
}
} }
std::vector<std::string> OperatorBase::Inputs(const std::string& name) const { const std::vector<std::string>& OperatorBase::Inputs(
PADDLE_ENFORCE_NOT_NULL(in_out_idxs_, "IO Idx could not be nullptr"); const std::string& name) const {
auto input_format = GetAttr<std::vector<int>>("input_format"); auto it = inputs_.find(name);
auto offset = in_out_idxs_->at(name); PADDLE_ENFORCE(it != inputs_.end(), "Op %s do not have input %s", type_,
PADDLE_ENFORCE(input_format.at(static_cast<size_t>(offset) + 1) <= name);
static_cast<int>(inputs_.size()), return it->second;
"Input Out Of Range");
return std::vector<std::string>{
inputs_.begin() + input_format.at(offset),
inputs_.begin() + input_format.at(offset + 1)};
} }
const std::string& OperatorBase::Output(const std::string& name) const { const std::string& OperatorBase::Output(const std::string& name) const {
PADDLE_ENFORCE_NOT_NULL(in_out_idxs_, "InOut Indice could not be nullptr"); auto& outs = Outputs(name);
auto it = in_out_idxs_->find(name); PADDLE_ENFORCE_EQ(outs.size(), 1UL,
PADDLE_ENFORCE(it != in_out_idxs_->end(), "no key [%s] in in_out_idxs_", "Op %s output %s should contain only one variable", type_,
name); name);
if (attrs_.count("output_format") == 0) { return outs[0];
return outputs_.at((size_t)it->second);
} else {
const auto& output_format = GetAttr<std::vector<int>>("output_format");
int idx = output_format[it->second];
return outputs_.at((size_t)idx);
}
} }
std::vector<std::string> OperatorBase::Outputs(const std::string& name) const { const std::vector<std::string>& OperatorBase::Outputs(
PADDLE_ENFORCE_NOT_NULL(in_out_idxs_, "InOut Indice could not be nullptr"); const std::string& name) const {
auto output_format = GetAttr<std::vector<int>>("output_format"); auto it = outputs_.find(name);
auto offset = in_out_idxs_->at(name); PADDLE_ENFORCE(it != outputs_.end(), "Op %s does not have output %s", type_,
PADDLE_ENFORCE(output_format.at(static_cast<size_t>(offset) + 1) <= name);
static_cast<int>(outputs_.size()), return it->second;
"Output Out of Range");
return std::vector<std::string>{
outputs_.begin() + output_format.at(offset),
outputs_.begin() + output_format.at(offset + 1)};
} }
std::string OperatorBase::DebugString() const { std::string OperatorBase::DebugString() const {
std::stringstream ss; std::stringstream ss;
ss << "Op(" << type_ << "), inputs:("; ss << "Op(" << type_ << "), inputs:{";
for (size_t i = 0; i < inputs_.size(); ++i) { for (auto it = inputs_.begin(); it != inputs_.end();) {
ss << inputs_[i]; auto& input = *it;
if (i != inputs_.size() - 1) { ss << input.first << "[";
for (size_t i = 0; i < input.second.size(); ++i) {
ss << input.second[i];
if (i != input.second.size() - 1) {
ss << ", ";
}
}
ss << "]";
++it;
if (it != inputs_.end()) {
ss << ", "; ss << ", ";
} }
} }
ss << "), outputs:("; ss << "}, outputs:{";
for (size_t i = 0; i < outputs_.size(); ++i) { for (auto it = outputs_.begin(); it != outputs_.end();) {
ss << outputs_[i]; auto& output = *it;
if (i != outputs_.size() - 1) { ss << output.first << "[";
for (size_t i = 0; i < output.second.size(); ++i) {
ss << output.second[i];
if (i != output.second.size() - 1) {
ss << ", "; ss << ", ";
} }
} }
ss << ")."; ss << "]";
++it;
if (it != outputs_.end()) {
ss << ", ";
}
}
ss << "}.";
return ss.str(); return ss.str();
} }
void OperatorBase::Rename(const std::string& old_name, void OperatorBase::Rename(const std::string& old_name,
const std::string& new_name) { const std::string& new_name) {
std::replace(inputs_.begin(), inputs_.end(), old_name, new_name); for (auto& input : inputs_) {
std::replace(outputs_.begin(), outputs_.end(), old_name, new_name); std::replace(input.second.begin(), input.second.end(), old_name, new_name);
}
for (auto& output : outputs_) {
std::replace(output.second.begin(), output.second.end(), old_name,
new_name);
}
}
OperatorBase::OperatorBase(const std::string& type,
const OperatorBase::VarNameMap& inputs,
const OperatorBase::VarNameMap& outputs,
const AttributeMap& attrs)
: type_(type), inputs_(inputs), outputs_(outputs), attrs_(attrs) {
static std::atomic<size_t> gUniqId(0UL);
for (auto& output : outputs_) {
for (auto& output_name : output.second) {
if (output_name == kTempVarName) {
output_name += type_;
output_name += "@";
output_name += std::to_string(gUniqId.fetch_add(1));
}
}
}
}
std::vector<std::string> OperatorBase::OutputVars(bool has_intermediate) const {
std::vector<std::string> ret_val;
if (has_intermediate) {
// push all outputs into ret_val
for (auto& o : outputs_) {
ret_val.reserve(ret_val.size() + o.second.size());
ret_val.insert(ret_val.end(), o.second.begin(), o.second.end());
}
return ret_val;
}
auto it = OpRegistry::op_info_map().find(type_);
PADDLE_ENFORCE(
it != OpRegistry::op_info_map().end(),
"Operator %s not registered, cannot figure out intermediate outputs",
type_);
PADDLE_ENFORCE(
it->second.proto_ != nullptr,
"Operator %s has no OpProto, cannot figure out intermediate outputs",
type_);
// get all OpProto::Var for outputs
for (auto& o : it->second.proto_->outputs()) {
// ignore all intermediate output
if (o.intermediate()) continue;
auto out = outputs_.find(o.name());
if (out != outputs_.end()) {
ret_val.reserve(ret_val.size() + out->second.size());
ret_val.insert(ret_val.end(), out->second.begin(), out->second.end());
}
}
return ret_val;
}
void OpProtoAndCheckerMaker::Validate() {
validated_ = true;
CheckNoDuplicatedInOutAttrs();
}
OpProtoAndCheckerMaker::VariableBuilder OpProtoAndCheckerMaker::AddInput(
const std::string& name, const std::string& comment) {
auto* input = proto_->add_inputs();
input->set_name(name);
input->set_comment(comment);
return OpProtoAndCheckerMaker::VariableBuilder{input};
}
OpProtoAndCheckerMaker::VariableBuilder OpProtoAndCheckerMaker::AddOutput(
const std::string& name, const std::string& comment) {
auto* output = proto_->add_outputs();
output->set_name(name);
output->set_comment(comment);
return OpProtoAndCheckerMaker::VariableBuilder{output};
}
void OpProtoAndCheckerMaker::CheckNoDuplicatedInOutAttrs() {
std::unordered_set<std::string> names;
auto checker = [&](const std::string& name) {
PADDLE_ENFORCE(!names.count(name), "[%s] is duplicated", name);
names.insert(name);
};
for (auto& attr : proto_->attrs()) {
checker(attr.name());
}
for (auto& input : proto_->inputs()) {
checker(input.name());
}
for (auto& output : proto_->outputs()) {
checker(output.name());
}
} }
} // namespace framework } // namespace framework
......
...@@ -20,8 +20,7 @@ limitations under the License. */ ...@@ -20,8 +20,7 @@ limitations under the License. */
#include <vector> #include <vector>
#include "paddle/framework/attribute.h" #include "paddle/framework/attribute.h"
#include "paddle/framework/op_desc.pb.h" #include "paddle/framework/framework.pb.h"
#include "paddle/framework/op_proto.pb.h"
#include "paddle/framework/scope.h" #include "paddle/framework/scope.h"
#include "paddle/framework/tensor.h" #include "paddle/framework/tensor.h"
#include "paddle/platform/device_context.h" #include "paddle/platform/device_context.h"
...@@ -63,6 +62,11 @@ class ExecutionContext; ...@@ -63,6 +62,11 @@ class ExecutionContext;
*/ */
class OperatorBase { class OperatorBase {
public: public:
using VarNameMap = std::map<std::string, std::vector<std::string>>;
OperatorBase(const std::string& type, const VarNameMap& inputs,
const VarNameMap& outputs, const AttributeMap& attrs);
virtual ~OperatorBase() {} virtual ~OperatorBase() {}
template <typename T> template <typename T>
...@@ -74,10 +78,6 @@ class OperatorBase { ...@@ -74,10 +78,6 @@ class OperatorBase {
virtual std::string DebugString() const; virtual std::string DebugString() const;
/// Init will be called after CreateOperator, you can put some initialization
/// logic here.
virtual void Init() {}
/// InferShape infer the size of Variables used by this Operator with /// InferShape infer the size of Variables used by this Operator with
/// information inside scope /// information inside scope
virtual void InferShape(const Scope& scope) const = 0; virtual void InferShape(const Scope& scope) const = 0;
...@@ -93,31 +93,134 @@ class OperatorBase { ...@@ -93,31 +93,134 @@ class OperatorBase {
/// rename inputs outputs name /// rename inputs outputs name
void Rename(const std::string& old_name, const std::string& new_name); void Rename(const std::string& old_name, const std::string& new_name);
const VarNameMap& Inputs() const { return inputs_; }
const VarNameMap& Outputs() const { return outputs_; }
//! Get a input with argument's name described in `op_proto` //! Get a input with argument's name described in `op_proto`
const std::string& Input(const std::string& name) const; const std::string& Input(const std::string& name) const;
//! Get a input which has multiple variables. //! Get a input which has multiple variables.
//! TODO add a vector_view to prevent memory copy. const std::vector<std::string>& Inputs(const std::string& name) const;
std::vector<std::string> Inputs(const std::string& name) const;
//! Get a output with argument's name described in `op_proto` //! Get a output with argument's name described in `op_proto`
const std::string& Output(const std::string& name) const; const std::string& Output(const std::string& name) const;
//! Get an output which has multiple variables. //! Get an output which has multiple variables.
//! TODO add a vector_view to prevent memory copy. //! TODO add a vector_view to prevent memory copy.
std::vector<std::string> Outputs(const std::string& name) const; const std::vector<std::string>& Outputs(const std::string& name) const;
public: virtual std::vector<std::string> OutputVars(bool has_intermediate) const;
const std::string& Type() const { return type_; }
void SetType(const std::string& type) { type_ = type; }
const AttributeMap& Attrs() const { return attrs_; }
// Return a new operator instance, which is as same as this.
// Use unique_ptr to prevent caller forget to delete this pointer.
virtual std::unique_ptr<OperatorBase> Clone() const = 0;
protected:
std::string type_; std::string type_;
// NOTE: in case of OpGrad, inputs_ contains: // NOTE: in case of OpGrad, inputs_ contains:
// I (Inputs) // I (Inputs)opear
// O (Outputs) // O (Outputs)
// OG (Output Gradients) // OG (Output Gradients)
std::vector<std::string> inputs_; VarNameMap inputs_;
// NOTE: in case of OpGrad, outputs_ contains // NOTE: in case of OpGrad, outputs_ contains
// IG (Inputs Gradients) // IG (Inputs Gradients)
std::vector<std::string> outputs_; VarNameMap outputs_;
AttributeMap attrs_; AttributeMap attrs_;
// store the arguments' offset described in op_desc. };
std::shared_ptr<std::unordered_map<std::string, int>> in_out_idxs_;
// Macro for define a clone method.
// If you are writing an kernel operator, `Clone` will be defined when you
// register it. i.e. `Clone` method is not needed to define by yourself.
#define DEFINE_OP_CLONE_METHOD(CLS) \
std::unique_ptr<OperatorBase> Clone() const final { \
return std::unique_ptr<OperatorBase>(new CLS(*this)); \
}
// Macro for define a default constructor for Operator.
// You can also use
// using PARENT_CLASS::PARENT_CLASS;
// to use parent's constructor.
#define DEFINE_OP_CONSTRUCTOR(CLS, PARENT_CLS) \
CLS(const std::string& type, const VarNameMap& inputs, \
const VarNameMap& outputs, const paddle::framework::AttributeMap& attrs) \
: PARENT_CLS(type, inputs, outputs, attrs) {}
class NOP : public OperatorBase {
public:
using OperatorBase::OperatorBase;
void InferShape(const Scope& scope) const override {}
void Run(const Scope& scope,
const platform::DeviceContext& dev_ctx) const override {}
std::unique_ptr<OperatorBase> Clone() const override {
return std::unique_ptr<OperatorBase>(new NOP(*this));
}
};
// this class not only make proto but also init attribute checkers.
class OpProtoAndCheckerMaker {
public:
OpProtoAndCheckerMaker(OpProto* proto, OpAttrChecker* op_checker)
: proto_(proto), op_checker_(op_checker) {}
~OpProtoAndCheckerMaker() {
PADDLE_ENFORCE(validated_, "should call Validate after build");
}
void Validate();
protected:
struct VariableBuilder {
OpProto::Var* var_;
VariableBuilder& AsDuplicable() {
var_->set_duplicable(true);
return *this;
}
VariableBuilder& AsIntermediate() {
var_->set_intermediate(true);
return *this;
}
VariableBuilder& NotInGradient() {
var_->set_not_in_gradient(true);
return *this;
}
};
VariableBuilder AddInput(const std::string& name, const std::string& comment);
VariableBuilder AddOutput(const std::string& name,
const std::string& comment);
template <typename T>
TypedAttrChecker<T>& AddAttr(const std::string& name,
const std::string& comment,
bool generated = false) {
auto* attr = proto_->add_attrs();
attr->set_name(name);
attr->set_comment(comment);
attr->set_generated(generated);
attr->set_type(AttrTypeID<T>());
return op_checker_->AddAttrChecker<T>(name);
}
void AddComment(const std::string& comment) { proto_->set_comment(comment); }
private:
void CheckNoDuplicatedInOutAttrs();
OpProto* proto_;
OpAttrChecker* op_checker_;
bool validated_{false};
};
class NOPMaker : public OpProtoAndCheckerMaker {
public:
NOPMaker(framework::OpProto* proto, framework::OpAttrChecker* op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) {}
}; };
class InferShapeContext { class InferShapeContext {
...@@ -125,16 +228,12 @@ class InferShapeContext { ...@@ -125,16 +228,12 @@ class InferShapeContext {
InferShapeContext(const OperatorBase& op, const Scope& scope) InferShapeContext(const OperatorBase& op, const Scope& scope)
: op_(op), scope_(scope) {} : op_(op), scope_(scope) {}
size_t InputSize() const { return op_.inputs_.size(); } size_t InputSize(const std::string& name) const {
return op_.Inputs(name).size();
size_t OutputSize() const { return op_.outputs_.size(); }
const Variable* InputVar(const size_t index) const {
return scope_.FindVar(op_.inputs_.at(index));
} }
Variable* OutputVar(const size_t index) const { size_t OutputSize(const std::string& name) const {
return scope_.FindVar(op_.outputs_.at(index)); return op_.Outputs(name).size();
} }
const Variable* InputVar(const std::string& name) const { const Variable* InputVar(const std::string& name) const {
...@@ -166,27 +265,9 @@ class InferShapeContext { ...@@ -166,27 +265,9 @@ class InferShapeContext {
return res; return res;
} }
template <typename T>
const T* Input(const size_t index) const {
auto var = InputVar(index);
PADDLE_ENFORCE_NOT_NULL(var, "Input(%d) should not be nullptr", index);
return &var->Get<T>();
}
template <typename T>
T* Output(const size_t index) const {
auto var = OutputVar(index);
PADDLE_ENFORCE_NOT_NULL(
var,
"Output(%d) not be nullptr, which means variable [%s] does not "
"exist in scope",
index, op_.outputs_[index]);
return var->GetMutable<T>();
}
template <typename T> template <typename T>
const T* Input(const std::string& name) const { const T* Input(const std::string& name) const {
auto var = InputVar(name); auto* var = InputVar(name);
PADDLE_ENFORCE_NOT_NULL(var, "Input(%s) should not be nullptr", name); PADDLE_ENFORCE_NOT_NULL(var, "Input(%s) should not be nullptr", name);
return &var->Get<T>(); return &var->Get<T>();
} }
...@@ -223,7 +304,7 @@ class InferShapeContext { ...@@ -223,7 +304,7 @@ class InferShapeContext {
[&](const std::string& sub_name) { [&](const std::string& sub_name) {
auto var = scope_.FindVar(sub_name); auto var = scope_.FindVar(sub_name);
PADDLE_ENFORCE_NOT_NULL( PADDLE_ENFORCE_NOT_NULL(
var, "MultiOutput(%s:%s) should not be nullptr", name, var, "MultiOutput(%s:%s) should not be nullptr.", name,
sub_name); sub_name);
return var->GetMutable<T>(); return var->GetMutable<T>();
}); });
...@@ -262,6 +343,10 @@ class ExecutionContext : public InferShapeContext { ...@@ -262,6 +343,10 @@ class ExecutionContext : public InferShapeContext {
platform::Place GetPlace() const { return device_context_->GetPlace(); } platform::Place GetPlace() const { return device_context_->GetPlace(); }
const platform::DeviceContext* device_context() const {
return device_context_;
}
const platform::DeviceContext* device_context_; const platform::DeviceContext* device_context_;
}; };
...@@ -304,6 +389,10 @@ class OperatorWithKernel : public OperatorBase { ...@@ -304,6 +389,10 @@ class OperatorWithKernel : public OperatorBase {
using OpKernelMap = using OpKernelMap =
std::unordered_map<OpKernelKey, std::unique_ptr<OpKernel>, OpKernelHash>; std::unordered_map<OpKernelKey, std::unique_ptr<OpKernel>, OpKernelHash>;
OperatorWithKernel(const std::string& type, const VarNameMap& inputs,
const VarNameMap& outputs, const AttributeMap& attrs)
: OperatorBase(type, inputs, outputs, attrs) {}
void InferShape(const Scope& scope) const override { void InferShape(const Scope& scope) const override {
InferShape(InferShapeContext(*this, scope)); InferShape(InferShapeContext(*this, scope));
} }
......
...@@ -23,20 +23,22 @@ static int op_run_num = 0; ...@@ -23,20 +23,22 @@ static int op_run_num = 0;
class OpWithoutKernelTest : public OperatorBase { class OpWithoutKernelTest : public OperatorBase {
public: public:
void Init() override { x = 1; } OpWithoutKernelTest(const std::string& type, const VarNameMap& inputs,
const VarNameMap& outputs, const AttributeMap& attrs)
: OperatorBase(type, inputs, outputs, attrs), x(1) {}
void InferShape(const Scope& scope) const override {} void InferShape(const Scope& scope) const override {}
void Run(const Scope& scope, void Run(const Scope& scope,
const platform::DeviceContext& dev_ctx) const override { const platform::DeviceContext& dev_ctx) const override {
op_run_num++; ++op_run_num;
ASSERT_EQ((int)inputs_.size(), 1); ASSERT_EQ(static_cast<int>(inputs_.size()), 1);
ASSERT_EQ((int)outputs_.size(), 1); ASSERT_EQ(static_cast<int>(outputs_.size()), 1);
ASSERT_EQ(scope.FindVar(inputs_[0]), nullptr); ASSERT_EQ(scope.FindVar(inputs_.at("input")[0]), nullptr);
ASSERT_EQ(x, 1); ASSERT_EQ(x, 1);
ASSERT_NE(scope.FindVar(outputs_[0]), nullptr); ASSERT_NE(scope.FindVar(outputs_.at("output")[0]), nullptr);
} }
public: public:
float x = 0; int x{0};
}; };
class OpeWithoutKernelTestProtoAndCheckerMaker : public OpProtoAndCheckerMaker { class OpeWithoutKernelTestProtoAndCheckerMaker : public OpProtoAndCheckerMaker {
...@@ -54,14 +56,25 @@ class OpeWithoutKernelTestProtoAndCheckerMaker : public OpProtoAndCheckerMaker { ...@@ -54,14 +56,25 @@ class OpeWithoutKernelTestProtoAndCheckerMaker : public OpProtoAndCheckerMaker {
} // namespace framework } // namespace framework
} // namespace paddle } // namespace paddle
REGISTER_OP(test_operator, paddle::framework::OpWithoutKernelTest, static void BuildVar(const std::string& param_name,
std::initializer_list<const char*> arguments,
paddle::framework::OpDesc::Var* var) {
var->set_parameter(param_name);
for (auto& arg_name : arguments) {
*var->mutable_arguments()->Add() = arg_name;
}
}
REGISTER_OP_WITHOUT_GRADIENT(
test_operator, paddle::framework::OpWithoutKernelTest,
paddle::framework::OpeWithoutKernelTestProtoAndCheckerMaker); paddle::framework::OpeWithoutKernelTestProtoAndCheckerMaker);
TEST(OperatorBase, all) { TEST(OperatorBase, all) {
paddle::framework::OpDesc op_desc; paddle::framework::OpDesc op_desc;
op_desc.set_type("test_operator"); op_desc.set_type("test_operator");
*op_desc.mutable_inputs()->Add() = "IN1"; BuildVar("input", {"IN1"}, op_desc.add_inputs());
*op_desc.mutable_outputs()->Add() = "OUT1"; BuildVar("output", {"OUT1"}, op_desc.add_outputs());
auto attr = op_desc.mutable_attrs()->Add(); auto attr = op_desc.mutable_attrs()->Add();
attr->set_name("scale"); attr->set_name("scale");
attr->set_type(paddle::framework::AttrType::FLOAT); attr->set_type(paddle::framework::AttrType::FLOAT);
...@@ -97,6 +110,9 @@ class OpKernelTestProtoAndCheckerMaker : public OpProtoAndCheckerMaker { ...@@ -97,6 +110,9 @@ class OpKernelTestProtoAndCheckerMaker : public OpProtoAndCheckerMaker {
static int cpu_kernel_run_num = 0; static int cpu_kernel_run_num = 0;
class OpWithKernelTest : public OperatorWithKernel { class OpWithKernelTest : public OperatorWithKernel {
public:
using OperatorWithKernel::OperatorWithKernel;
protected: protected:
void InferShape(const framework::InferShapeContext& ctx) const override {} void InferShape(const framework::InferShapeContext& ctx) const override {}
}; };
...@@ -113,33 +129,15 @@ class CPUKernelTest : public OpKernel { ...@@ -113,33 +129,15 @@ class CPUKernelTest : public OpKernel {
} }
}; };
// multiple inputs test
class OperatorMultiInputsTest : public OperatorBase {
public:
void Init() override { x = 1; }
void InferShape(const Scope& scope) const override {}
void Run(const Scope& scope,
const platform::DeviceContext& dev_ctx) const override {
ASSERT_EQ(scope.FindVar(inputs_[0]), nullptr);
ASSERT_EQ(x, 1);
ASSERT_NE(scope.FindVar(outputs_[0]), nullptr);
ASSERT_EQ(Input("x"), "IN1");
ASSERT_EQ(Input("y"), "OUT1");
}
public:
float x = 0;
};
class OpKernelTestMultiInputsProtoAndCheckerMaker class OpKernelTestMultiInputsProtoAndCheckerMaker
: public OpProtoAndCheckerMaker { : public OpProtoAndCheckerMaker {
public: public:
OpKernelTestMultiInputsProtoAndCheckerMaker(OpProto* proto, OpKernelTestMultiInputsProtoAndCheckerMaker(OpProto* proto,
OpAttrChecker* op_checker) OpAttrChecker* op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) { : OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("xs", "inputs of test op").SetMultiple(); AddInput("xs", "inputs of test op").AsDuplicable();
AddInput("k", "input of test op"); AddInput("k", "input of test op");
AddOutput("ys", "outputs of test op").SetMultiple(); AddOutput("ys", "outputs of test op").AsDuplicable();
AddAttr<float>("scale", "scale of cosine op") AddAttr<float>("scale", "scale of cosine op")
.SetDefault(1.0) .SetDefault(1.0)
.LargerThan(0.0); .LargerThan(0.0);
...@@ -187,7 +185,8 @@ class CPUKernalMultiInputsTest : public OpKernel { ...@@ -187,7 +185,8 @@ class CPUKernalMultiInputsTest : public OpKernel {
} // namespace framework } // namespace framework
} // namespace paddle } // namespace paddle
REGISTER_OP(op_with_kernel, paddle::framework::OpWithKernelTest, REGISTER_OP_WITHOUT_GRADIENT(
op_with_kernel, paddle::framework::OpWithKernelTest,
paddle::framework::OpKernelTestProtoAndCheckerMaker); paddle::framework::OpKernelTestProtoAndCheckerMaker);
REGISTER_OP_CPU_KERNEL(op_with_kernel, REGISTER_OP_CPU_KERNEL(op_with_kernel,
paddle::framework::CPUKernelTest<float, float>); paddle::framework::CPUKernelTest<float, float>);
...@@ -196,8 +195,9 @@ REGISTER_OP_CPU_KERNEL(op_with_kernel, ...@@ -196,8 +195,9 @@ REGISTER_OP_CPU_KERNEL(op_with_kernel,
TEST(OpKernel, all) { TEST(OpKernel, all) {
paddle::framework::OpDesc op_desc; paddle::framework::OpDesc op_desc;
op_desc.set_type("op_with_kernel"); op_desc.set_type("op_with_kernel");
*op_desc.mutable_inputs()->Add() = "IN1"; BuildVar("x", {"IN1"}, op_desc.add_inputs());
*op_desc.mutable_outputs()->Add() = "OUT1"; BuildVar("y", {"OUT1"}, op_desc.add_outputs());
auto attr = op_desc.mutable_attrs()->Add(); auto attr = op_desc.mutable_attrs()->Add();
attr->set_name("scale"); attr->set_name("scale");
attr->set_type(paddle::framework::AttrType::FLOAT); attr->set_type(paddle::framework::AttrType::FLOAT);
...@@ -212,7 +212,8 @@ TEST(OpKernel, all) { ...@@ -212,7 +212,8 @@ TEST(OpKernel, all) {
ASSERT_EQ(paddle::framework::cpu_kernel_run_num, 1); ASSERT_EQ(paddle::framework::cpu_kernel_run_num, 1);
} }
REGISTER_OP(op_multi_inputs_with_kernel, paddle::framework::OpWithKernelTest, REGISTER_OP_WITHOUT_GRADIENT(
op_multi_inputs_with_kernel, paddle::framework::OpWithKernelTest,
paddle::framework::OpKernelTestMultiInputsProtoAndCheckerMaker); paddle::framework::OpKernelTestMultiInputsProtoAndCheckerMaker);
REGISTER_OP_CPU_KERNEL(op_multi_inputs_with_kernel, REGISTER_OP_CPU_KERNEL(op_multi_inputs_with_kernel,
paddle::framework::CPUKernalMultiInputsTest); paddle::framework::CPUKernalMultiInputsTest);
...@@ -223,32 +224,15 @@ TEST(OpKernel, multi_inputs) { ...@@ -223,32 +224,15 @@ TEST(OpKernel, multi_inputs) {
OpDesc op_desc; OpDesc op_desc;
op_desc.set_type("op_multi_inputs_with_kernel"); op_desc.set_type("op_multi_inputs_with_kernel");
*op_desc.mutable_inputs()->Add() = "x0"; BuildVar("xs", {"x0", "x1", "x2"}, op_desc.add_inputs());
*op_desc.mutable_inputs()->Add() = "x1"; BuildVar("k", {"k0"}, op_desc.add_inputs());
*op_desc.mutable_inputs()->Add() = "x2"; BuildVar("ys", {"y0", "y1"}, op_desc.add_outputs());
*op_desc.mutable_inputs()->Add() = "k0";
*op_desc.mutable_outputs()->Add() = "y0";
*op_desc.mutable_outputs()->Add() = "y1";
auto attr = op_desc.mutable_attrs()->Add(); auto attr = op_desc.mutable_attrs()->Add();
attr->set_name("scale"); attr->set_name("scale");
attr->set_type(paddle::framework::AttrType::FLOAT); attr->set_type(paddle::framework::AttrType::FLOAT);
attr->set_f(3.14); attr->set_f(3.14);
auto attr0 = op_desc.mutable_attrs()->Add();
attr0->set_name("input_format");
attr0->set_type(paddle::framework::AttrType::INTS);
auto input_format = attr0->mutable_ints();
input_format->Add(0); // x0
input_format->Add(3); // k
input_format->Add(4); // end
auto attr1 = op_desc.mutable_attrs()->Add();
attr1->set_name("output_format");
attr1->set_type(paddle::framework::AttrType::INTS);
auto output_format = attr1->mutable_ints();
output_format->Add(0); // y0
output_format->Add(2); // y1
paddle::platform::CPUDeviceContext cpu_device_context; paddle::platform::CPUDeviceContext cpu_device_context;
paddle::framework::Scope scope; paddle::framework::Scope scope;
scope.NewVar("x0")->GetMutable<Tensor>(); scope.NewVar("x0")->GetMutable<Tensor>();
...@@ -261,3 +245,21 @@ TEST(OpKernel, multi_inputs) { ...@@ -261,3 +245,21 @@ TEST(OpKernel, multi_inputs) {
auto op = paddle::framework::OpRegistry::CreateOp(op_desc); auto op = paddle::framework::OpRegistry::CreateOp(op_desc);
op->Run(scope, cpu_device_context); op->Run(scope, cpu_device_context);
} }
class OperatorClone : public paddle::framework::OperatorBase {
public:
DEFINE_OP_CLONE_METHOD(OperatorClone);
OperatorClone(const std::string& type, const VarNameMap& inputs,
const VarNameMap& outputs,
const paddle::framework::AttributeMap& attrs)
: OperatorBase(type, inputs, outputs, attrs) {}
void InferShape(const paddle::framework::Scope& scope) const override {}
void Run(const paddle::framework::Scope& scope,
const paddle::platform::DeviceContext& dev_ctx) const override {}
};
TEST(Operator, Clone) {
OperatorClone a("ABC", {}, {}, {});
auto b = a.Clone();
ASSERT_EQ(a.Type(), b->Type());
}
\ No newline at end of file
...@@ -20,6 +20,7 @@ limitations under the License. */ ...@@ -20,6 +20,7 @@ limitations under the License. */
#include "paddle/framework/op_registry.h" #include "paddle/framework/op_registry.h"
#include "paddle/framework/tensor_py.h" #include "paddle/framework/tensor_py.h"
#include "paddle/operators/net_op.h" #include "paddle/operators/net_op.h"
#include "paddle/operators/recurrent_op.h"
#include "paddle/platform/enforce.h" #include "paddle/platform/enforce.h"
#include "paddle/platform/place.h" #include "paddle/platform/place.h"
#include "paddle/string/to_string.h" #include "paddle/string/to_string.h"
...@@ -30,8 +31,7 @@ limitations under the License. */ ...@@ -30,8 +31,7 @@ limitations under the License. */
namespace py = pybind11; namespace py = pybind11;
USE_OP(add_two); USE_OP(add_two);
USE_OP_CPU(onehot_cross_entropy); USE_OP(onehot_cross_entropy);
USE_OP_WITHOUT_KERNEL(fc);
USE_OP(sgd); USE_OP(sgd);
USE_OP(mul); USE_OP(mul);
USE_OP(mean); USE_OP(mean);
...@@ -39,7 +39,7 @@ USE_OP(sigmoid); ...@@ -39,7 +39,7 @@ USE_OP(sigmoid);
USE_OP(softmax); USE_OP(softmax);
USE_OP(rowwise_add); USE_OP(rowwise_add);
USE_OP(fill_zeros_like); USE_OP(fill_zeros_like);
USE_OP_WITHOUT_KERNEL(recurrent_op); USE_OP_ITSELF(recurrent_op);
USE_OP(gaussian_random); USE_OP(gaussian_random);
USE_OP(uniform_random); USE_OP(uniform_random);
...@@ -48,41 +48,6 @@ namespace framework { ...@@ -48,41 +48,6 @@ namespace framework {
using Tensor = framework::Tensor; using Tensor = framework::Tensor;
template <typename ClassType>
void ExposeOperator(ClassType &m) {
m.def("infer_shape", &ClassType::type::InferShape)
.def("run", &ClassType::type::Run)
.def("type",
[](const typename ClassType::type &op) -> std::string {
return op.type_;
})
.def("outputs",
[](const typename ClassType::type &op) -> std::vector<std::string> {
return op.outputs_;
})
.def("inputs",
[](const typename ClassType::type &op) -> std::vector<std::string> {
return op.inputs_;
})
.def("support_gpu", &ClassType::type::SupportGPU)
.def("temp_outputs",
[](const typename ClassType::type &op) -> std::vector<std::string> {
auto iter = op.attrs_.find("temporary_index");
std::vector<std::string> ret;
if (iter == op.attrs_.end()) {
return ret;
} else {
auto tmp_idx = boost::get<std::vector<int>>(iter->second);
ret.reserve(tmp_idx.size());
for (auto &index : tmp_idx) {
ret.push_back(op.outputs_.at(index));
}
return ret;
}
})
.def("__str__", &ClassType::type::DebugString);
}
static size_t UniqueIntegerGenerator() { static size_t UniqueIntegerGenerator() {
static std::atomic<size_t> generator; static std::atomic<size_t> generator;
return generator.fetch_add(1); return generator.fetch_add(1);
...@@ -173,13 +138,16 @@ All parameter, weight, gradient are variables in Paddle. ...@@ -173,13 +138,16 @@ All parameter, weight, gradient are variables in Paddle.
//! @note: Be careful! PyBind will return std::string as an unicode, not //! @note: Be careful! PyBind will return std::string as an unicode, not
//! Python str. If you want a str object, you should cast them in Python. //! Python str. If you want a str object, you should cast them in Python.
m.def("get_all_op_protos", []() -> std::vector<py::bytes> { m.def("get_all_op_protos", []() -> std::vector<py::bytes> {
auto &protos = OpRegistry::protos(); auto &op_info_map = OpRegistry::op_info_map();
std::vector<py::bytes> ret_values; std::vector<py::bytes> ret_values;
for (auto it = protos.begin(); it != protos.end(); ++it) { for (auto it = op_info_map.begin(); it != op_info_map.end(); ++it) {
PADDLE_ENFORCE(it->second.IsInitialized(), const OpProto *proto = it->second.proto_;
"OpProto must all be initialized"); if (proto == nullptr) {
continue;
}
PADDLE_ENFORCE(proto->IsInitialized(), "OpProto must all be initialized");
std::string str; std::string str;
PADDLE_ENFORCE(it->second.SerializeToString(&str), PADDLE_ENFORCE(proto->SerializeToString(&str),
"Serialize OpProto Error. This could be a bug of Paddle."); "Serialize OpProto Error. This could be a bug of Paddle.");
ret_values.push_back(py::bytes(str)); ret_values.push_back(py::bytes(str));
} }
...@@ -216,10 +184,9 @@ All parameter, weight, gradient are variables in Paddle. ...@@ -216,10 +184,9 @@ All parameter, weight, gradient are variables in Paddle.
.def(py::init<>()) .def(py::init<>())
.def("__str__", string::to_string<const platform::CPUPlace &>); .def("__str__", string::to_string<const platform::CPUPlace &>);
py::class_<OperatorBase, std::shared_ptr<OperatorBase>> operator_base( py::class_<OperatorBase>(m, "Operator")
m, "Operator"); .def_static("create",
[](py::bytes protobin) {
operator_base.def_static("create", [](py::bytes protobin) {
OpDesc desc; OpDesc desc;
PADDLE_ENFORCE(desc.ParsePartialFromString(protobin), PADDLE_ENFORCE(desc.ParsePartialFromString(protobin),
"Cannot parse user input to OpDesc"); "Cannot parse user input to OpDesc");
...@@ -227,36 +194,59 @@ All parameter, weight, gradient are variables in Paddle. ...@@ -227,36 +194,59 @@ All parameter, weight, gradient are variables in Paddle.
"User OpDesc is not initialized, reason %s", "User OpDesc is not initialized, reason %s",
desc.InitializationErrorString()); desc.InitializationErrorString());
return OpRegistry::CreateOp(desc); return OpRegistry::CreateOp(desc);
}); })
.def("backward",
operator_base.def("backward",
[](const OperatorBase &forwardOp, [](const OperatorBase &forwardOp,
const std::unordered_set<std::string> &no_grad_vars) { const std::unordered_set<std::string> &no_grad_vars) {
return Backward(forwardOp, no_grad_vars); return Backward(forwardOp, no_grad_vars).release();
}); })
.def("infer_shape", &OperatorBase::InferShape)
ExposeOperator(operator_base); .def("run", &OperatorBase::Run)
.def("type",
py::class_<operators::NetOp, std::shared_ptr<operators::NetOp>> net(m, "Net"); [](const OperatorBase &op) -> std::string { return op.Type(); })
.def("outputs",
[](const OperatorBase &op)
-> std::map<std::string, std::vector<std::string>> {
return op.Outputs();
})
.def("inputs", [](const OperatorBase &op) { return op.Inputs(); })
.def("__str__", &OperatorBase::DebugString)
.def("no_intermediate_outputs",
[](const OperatorBase &op) { return op.OutputVars(false); })
.def("support_gpu", &OperatorBase::SupportGPU);
net.def_static("create", py::class_<operators::NetOp, OperatorBase>(m, "Net")
[]() -> std::shared_ptr<operators::NetOp> { .def_static("create",
auto retv = std::make_shared<operators::NetOp>(); []() -> operators::NetOp * {
retv->type_ = "plain_net"; auto *retv = new operators::NetOp;
retv->SetType("plain_net");
return retv; return retv;
}) })
.def("add_op", &operators::NetOp::AddOp) .def("append_op", [](operators::NetOp &self,
.def("add_op", const OperatorBase &op) { self.AppendOp(op); })
[](operators::NetOp &self,
const std::shared_ptr<operators::NetOp> &net) -> void {
self.AddOp(std::static_pointer_cast<OperatorBase>(net));
})
.def("complete_add_op", &operators::NetOp::CompleteAddOp) .def("complete_add_op", &operators::NetOp::CompleteAddOp)
.def("complete_add_op", [](std::shared_ptr<operators::NetOp> &self) { .def("complete_add_op", [](std::shared_ptr<operators::NetOp> &self) {
self->CompleteAddOp(); self->CompleteAddOp();
}); });
ExposeOperator(net); // recurrent_op
py::class_<operators::RecurrentOp, OperatorBase>(m, "RecurrentOp")
.def_static(
"create",
[](py::bytes protobin) -> operators::RecurrentOp * {
OpDesc desc;
PADDLE_ENFORCE(desc.ParsePartialFromString(protobin),
"Cannot parse user input to OpDesc");
PADDLE_ENFORCE(desc.IsInitialized(),
"User OpDesc is not initialized, reason %s",
desc.InitializationErrorString());
auto rnn_op = OpRegistry::CreateOp(desc);
return static_cast<operators::RecurrentOp *>(rnn_op.release());
})
.def("set_stepnet", [](operators::RecurrentOp &self,
const operators::NetOp &net) -> void {
self.set_stepnet(net.Clone());
});
m.def("unique_integer", UniqueIntegerGenerator); m.def("unique_integer", UniqueIntegerGenerator);
......
...@@ -105,6 +105,8 @@ class Tensor { ...@@ -105,6 +105,8 @@ class Tensor {
template <typename T> template <typename T>
inline Tensor Slice(const int& begin_idx, const int& end_idx) const; inline Tensor Slice(const int& begin_idx, const int& end_idx) const;
platform::Place place() const { return holder_->place(); }
private: private:
template <typename T> template <typename T>
inline void check_memory_size() const; inline void check_memory_size() const;
......
...@@ -4,6 +4,10 @@ file(GLOB cpp_files . *Op.cpp) ...@@ -4,6 +4,10 @@ file(GLOB cpp_files . *Op.cpp)
list(APPEND h_files Function.h) list(APPEND h_files Function.h)
list(APPEND cpp_files Function.cpp) list(APPEND cpp_files Function.cpp)
list(APPEND cpp_files BufferArg.cpp) list(APPEND cpp_files BufferArg.cpp)
list(APPEND cpp_files GemmFunctor.cpp)
if(USE_EIGEN_FOR_BLAS)
list(APPEND cpp_files EigenGemm.cpp)
endif(USE_EIGEN_FOR_BLAS)
if(WITH_GPU) if(WITH_GPU)
file(GLOB cu_files . *OpGpu.cu) file(GLOB cu_files . *OpGpu.cu)
...@@ -38,10 +42,11 @@ if(WITH_GPU) ...@@ -38,10 +42,11 @@ if(WITH_GPU)
add_simple_unittest(RowConvOpTest) add_simple_unittest(RowConvOpTest)
add_simple_unittest(BlockExpandOpTest) add_simple_unittest(BlockExpandOpTest)
add_simple_unittest(CropOpTest) add_simple_unittest(CropOpTest)
add_simple_unittest(DepthwiseConvOpTest)
endif() endif()
add_simple_unittest(ConvOpTest)
add_simple_unittest(Im2ColTest) add_simple_unittest(Im2ColTest)
add_simple_unittest(GemmConvOpTest)
endif() endif()
add_style_check_target(paddle_function ${h_files}) add_style_check_target(paddle_function ${h_files})
......
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <gtest/gtest.h>
#include <memory>
#include "Function.h"
#include "FunctionTest.h"
namespace paddle {
enum TestType {
kForwardTest = 0,
kBackwardInputTest = 1,
kBackwardFilterTest = 2,
};
template <DeviceType DType1, DeviceType DType2>
class ConvolutionTest {
public:
ConvolutionTest(const std::string& conv1,
const std::string& conv2,
TestType type,
bool useGroups = true,
std::string algo = "auto") {
for (size_t batchSize : {1, 32}) {
for (size_t inputSize : {7, 14, 54}) {
for (size_t filterSize : {1, 3, 5}) {
for (size_t inputChannels : {3, 64}) {
for (size_t outputChannels : {3, 64}) {
if (inputChannels > outputChannels) break;
size_t groups;
if (!useGroups) {
groups = 1;
} else {
if (outputChannels % inputChannels != 0) continue;
groups = inputChannels;
}
for (size_t stride : {1, 2}) {
for (size_t padding : {0, 1}) {
if (padding >= filterSize) break;
size_t outputSize =
(inputSize - filterSize + 2 * padding + stride) / stride;
VLOG(3) << " batchSize=" << batchSize
<< " inputChannels=" << inputChannels
<< " inputHeight=" << inputSize
<< " inputWidth=" << inputSize
<< " outputChannels=" << outputChannels
<< " filterHeight=" << filterSize
<< " filterWidth=" << filterSize
<< " outputHeight=" << outputSize
<< " outputWidth=" << outputSize
<< " stride=" << stride << " padding=" << padding;
std::vector<size_t> paddings = {padding, padding};
std::vector<size_t> strides = {stride, stride};
Compare2Function<DType1, DType2> test(
conv1,
conv2,
FuncConfig()
.set("paddings", paddings)
.set("strides", strides)
.set("groups", groups)
.set("algo", algo));
TensorShape input{
batchSize, inputChannels, inputSize, inputSize};
TensorShape filter;
if (groups > 1)
filter = TensorShape({groups,
outputChannels / groups,
inputChannels / groups,
filterSize,
filterSize});
else
filter = TensorShape({outputChannels,
inputChannels,
filterSize,
filterSize});
TensorShape output{
batchSize, outputChannels, outputSize, outputSize};
if (type == kForwardTest) {
test.addInputs(BufferArg(VALUE_TYPE_FLOAT, input));
test.addInputs(BufferArg(VALUE_TYPE_FLOAT, filter));
test.addOutputs(BufferArg(VALUE_TYPE_FLOAT, output));
test.run();
} else if (type == kBackwardInputTest) {
test.addInputs(BufferArg(VALUE_TYPE_FLOAT, output));
test.addInputs(BufferArg(VALUE_TYPE_FLOAT, filter));
test.addOutputs(BufferArg(VALUE_TYPE_FLOAT, input), ADD_TO);
test.run();
} else if (type == kBackwardFilterTest) {
test.addInputs(BufferArg(VALUE_TYPE_FLOAT, output));
test.addInputs(BufferArg(VALUE_TYPE_FLOAT, input));
test.addOutputs(BufferArg(VALUE_TYPE_FLOAT, filter),
ADD_TO);
test.run();
}
}
}
}
}
}
}
}
}
};
// Mainly used to test cases where the height and width (input, filter)
// are not equal.
template <DeviceType DType1, DeviceType DType2>
class ConvolutionTest2 {
public:
ConvolutionTest2(const std::string& conv1,
const std::string& conv2,
TestType type,
bool useGroups = true,
std::string algo = "auto") {
for (size_t batchSize : {16}) {
for (size_t inputHeight : {7, 31}) {
for (size_t inputWidth : {10, 54}) {
for (size_t filterHeight : {1, 5}) {
for (size_t filterWidth : {3, 7}) {
for (size_t inputChannels : {7}) {
for (size_t outputChannels : {7}) {
size_t groups;
if (!useGroups) {
groups = 1;
} else {
if (outputChannels % inputChannels != 0) continue;
groups = inputChannels;
}
size_t stride = 1;
size_t padding = 0;
size_t outputHeight =
(inputHeight - filterHeight + 2 * padding + stride) /
stride;
size_t outputWidth =
(inputWidth - filterWidth + 2 * padding + stride) /
stride;
VLOG(3) << " batchSize=" << batchSize
<< " inputChannels=" << inputChannels
<< " inputHeight=" << inputHeight
<< " inputWidth=" << inputWidth
<< " outputChannels=" << outputChannels
<< " filterHeight=" << filterHeight
<< " filterWidth=" << filterWidth
<< " outputHeight=" << outputHeight
<< " outputWidth=" << outputWidth
<< " stride=" << stride << " padding=" << padding;
std::vector<size_t> paddings = {padding, padding};
std::vector<size_t> strides = {stride, stride};
Compare2Function<DType1, DType2> test(
conv1,
conv2,
FuncConfig()
.set("paddings", paddings)
.set("strides", strides)
.set("groups", groups)
.set("algo", algo));
TensorShape input{
batchSize, inputChannels, inputHeight, inputWidth};
TensorShape filter;
if (groups > 1)
filter = TensorShape({groups,
outputChannels / groups,
inputChannels / groups,
filterHeight,
filterWidth});
else
filter = TensorShape({outputChannels,
inputChannels,
filterHeight,
filterWidth});
TensorShape output{
batchSize, outputChannels, outputHeight, outputWidth};
if (type == kForwardTest) {
test.addInputs(BufferArg(VALUE_TYPE_FLOAT, input));
test.addInputs(BufferArg(VALUE_TYPE_FLOAT, filter));
test.addOutputs(BufferArg(VALUE_TYPE_FLOAT, output));
test.run();
} else if (type == kBackwardInputTest) {
test.addInputs(BufferArg(VALUE_TYPE_FLOAT, output));
test.addInputs(BufferArg(VALUE_TYPE_FLOAT, filter));
test.addOutputs(BufferArg(VALUE_TYPE_FLOAT, input), ADD_TO);
test.run();
} else if (type == kBackwardFilterTest) {
test.addInputs(BufferArg(VALUE_TYPE_FLOAT, output));
test.addInputs(BufferArg(VALUE_TYPE_FLOAT, input));
test.addOutputs(BufferArg(VALUE_TYPE_FLOAT, filter),
ADD_TO);
test.run();
}
}
}
}
}
}
}
}
}
};
// ======Start Convolution TEST======
TEST(Forward, GEMM) {
ConvolutionTest<DEVICE_TYPE_CPU, DEVICE_TYPE_CPU> test(
"NaiveConv-CPU", "GemmConv-CPU", kForwardTest, false);
ConvolutionTest2<DEVICE_TYPE_CPU, DEVICE_TYPE_CPU> test2(
"NaiveConv-CPU", "GemmConv-CPU", kForwardTest, false);
}
#ifndef PADDLE_ONLY_CPU
TEST(Forward, GEMM2) {
ConvolutionTest<DEVICE_TYPE_CPU, DEVICE_TYPE_GPU> test(
"GemmConv-CPU", "GemmConv-GPU", kForwardTest, false);
ConvolutionTest2<DEVICE_TYPE_CPU, DEVICE_TYPE_GPU> test2(
"GemmConv-CPU", "GemmConv-GPU", kForwardTest, false);
}
TEST(BackwardInput, GEMM) {
ConvolutionTest<DEVICE_TYPE_CPU, DEVICE_TYPE_GPU> test(
"GemmConvGradInput-CPU",
"GemmConvGradInput-GPU",
kBackwardInputTest,
false);
ConvolutionTest2<DEVICE_TYPE_CPU, DEVICE_TYPE_GPU> test2(
"GemmConvGradInput-CPU",
"GemmConvGradInput-GPU",
kBackwardInputTest,
false);
}
TEST(BackwardFilter, GEMM) {
ConvolutionTest<DEVICE_TYPE_CPU, DEVICE_TYPE_GPU> test(
"GemmConvGradFilter-CPU",
"GemmConvGradFilter-GPU",
kBackwardFilterTest,
false);
ConvolutionTest2<DEVICE_TYPE_CPU, DEVICE_TYPE_GPU> test2(
"GemmConvGradFilter-CPU",
"GemmConvGradFilter-GPU",
kBackwardFilterTest,
false);
}
#endif
// ======End Convolution TEST======
// ======Start DepthwiseConvolution TEST======
// TODO(zhaolong) The depthwise convolution cpu test will be added when the cpu
// version of depthwiseConv is implemented.
#ifndef PADDLE_ONLY_CPU
TEST(DepthwiseConvForward, GEMM2) {
ConvolutionTest<DEVICE_TYPE_CPU, DEVICE_TYPE_GPU> test(
"GemmConv-CPU", "DepthwiseConv-GPU", kForwardTest);
ConvolutionTest2<DEVICE_TYPE_CPU, DEVICE_TYPE_GPU> test2(
"GemmConv-CPU", "DepthwiseConv-GPU", kForwardTest);
}
TEST(DepthwiseConvBackwardInput, GEMM) {
ConvolutionTest<DEVICE_TYPE_CPU, DEVICE_TYPE_GPU> test(
"GemmConvGradInput-CPU",
"DepthwiseConvGradInput-GPU",
kBackwardInputTest);
ConvolutionTest2<DEVICE_TYPE_CPU, DEVICE_TYPE_GPU> test2(
"GemmConvGradInput-CPU",
"DepthwiseConvGradInput-GPU",
kBackwardInputTest);
}
TEST(DepthwiseConvBackwardFilter, GEMM) {
ConvolutionTest<DEVICE_TYPE_CPU, DEVICE_TYPE_GPU> test(
"GemmConvGradFilter-CPU",
"DepthwiseConvGradFilter-GPU",
kBackwardFilterTest);
ConvolutionTest2<DEVICE_TYPE_CPU, DEVICE_TYPE_GPU> test2(
"GemmConvGradFilter-CPU",
"DepthwiseConvGradFilter-GPU",
kBackwardFilterTest);
}
#endif
// ======End DepthwiseConvolution TEST======
} // namespace paddle
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "FunctionTest.h"
namespace paddle {
template <DeviceType DType1, DeviceType DType2>
void forward(Compare2Function<DType1, DType2>& test,
const TensorShape& input,
const TensorShape& filter,
const TensorShape& output) {
test.addInputs(BufferArg(VALUE_TYPE_FLOAT, input));
test.addInputs(BufferArg(VALUE_TYPE_FLOAT, filter));
test.addOutputs(BufferArg(VALUE_TYPE_FLOAT, output));
test.run();
}
template <DeviceType DType1, DeviceType DType2>
void backward_input(Compare2Function<DType1, DType2>& test,
const TensorShape& input,
const TensorShape& filter,
const TensorShape& output) {
test.addInputs(BufferArg(VALUE_TYPE_FLOAT, output));
test.addInputs(BufferArg(VALUE_TYPE_FLOAT, filter));
test.addOutputs(BufferArg(VALUE_TYPE_FLOAT, input), ADD_TO);
test.run();
}
template <DeviceType DType1, DeviceType DType2>
void backward_filter(Compare2Function<DType1, DType2>& test,
const TensorShape& input,
const TensorShape& filter,
const TensorShape& output) {
test.addInputs(BufferArg(VALUE_TYPE_FLOAT, output));
test.addInputs(BufferArg(VALUE_TYPE_FLOAT, input));
test.addOutputs(BufferArg(VALUE_TYPE_FLOAT, filter), ADD_TO);
test.run();
}
template <DeviceType DType1, DeviceType DType2>
using Function = void (*)(Compare2Function<DType1, DType2>& test,
const TensorShape& input,
const TensorShape& filter,
const TensorShape& output);
/**
* \brief A basic convolution function test interface.
*
* \param conv1 type name of convolution function 1.
* \param conv2 type name of convolution function 2.
* \param function test function, can be one of the forward, backward_input
* backward_filter function.
* Example:
* 1. Compare GemmConv's CPU and GPU implementation:
* Convolution<DEVICE_TYPE_CPU, DEVICE_TYPE_GPU>(
* "GemmConv-CPU", "GemmConv-GPU", forward);
*/
template <DeviceType DType1, DeviceType DType2>
void Convolution(const std::string& conv1,
const std::string& conv2,
Function<DType1, DType2> function) {
for (size_t batchSize : {1, 5}) {
for (size_t inputSize : {7, 14, 31}) {
for (size_t filterSize : {1, 3, 5}) {
for (size_t inputChannels : {3, 16}) {
for (size_t outputChannels : {3, 16}) {
if (outputChannels < inputChannels) continue;
for (size_t stride : {1, 2}) {
for (size_t padding : {0, 1}) {
if (padding >= filterSize) break;
// NNPACK only supports stride = 1 if batchSize > 1
if ((conv1 == "NNPACKConv-CPU" || conv2 == "NNPACKConv-CPU") &&
batchSize > 1 && stride > 1)
break;
size_t outputSize =
(inputSize - filterSize + 2 * padding + stride) / stride;
VLOG(3) << " batchSize=" << batchSize
<< " inputChannels=" << inputChannels
<< " inputHeight=" << inputSize
<< " inputWidth=" << inputSize
<< " outputChannels=" << outputChannels
<< " filterHeight=" << filterSize
<< " filterWidth=" << filterSize
<< " outputHeight=" << outputSize
<< " outputWidth=" << outputSize << " stride=" << stride
<< " padding=" << padding;
std::vector<size_t> paddings = {padding, padding};
std::vector<size_t> strides = {stride, stride};
Compare2Function<DType1, DType2> test(
conv1,
conv2,
FuncConfig()
.set("paddings", paddings)
.set("strides", strides)
.set("groups", (size_t)1)
.set("algo", (std::string) "auto"));
TensorShape input{
batchSize, inputChannels, inputSize, inputSize};
TensorShape filter{
outputChannels, inputChannels, filterSize, filterSize};
TensorShape output{
batchSize, outputChannels, outputSize, outputSize};
function(test, input, filter, output);
}
}
}
}
}
}
}
}
/**
* \brief A convolution function test interface for
* image height is not equal image width.
*/
template <DeviceType DType1, DeviceType DType2>
void Convolution2(const std::string& conv1,
const std::string& conv2,
Function<DType1, DType2> function) {
for (size_t batchSize : {4}) {
for (size_t inputHeight : {7, 31}) {
for (size_t inputWidth : {10, 54}) {
for (size_t filterHeight : {1, 5}) {
for (size_t filterWidth : {3, 7}) {
for (size_t inputChannels : {7}) {
for (size_t outputChannels : {7}) {
size_t stride = 1;
size_t padding = 0;
size_t outputHeight =
(inputHeight - filterHeight + 2 * padding + stride) /
stride;
size_t outputWidth =
(inputWidth - filterWidth + 2 * padding + stride) / stride;
VLOG(3) << " batchSize=" << batchSize
<< " inputChannels=" << inputChannels
<< " inputHeight=" << inputHeight
<< " inputWidth=" << inputWidth
<< " outputChannels=" << outputChannels
<< " filterHeight=" << filterHeight
<< " filterWidth=" << filterWidth
<< " outputHeight=" << outputHeight
<< " outputWidth=" << outputWidth
<< " stride=" << stride << " padding=" << padding;
std::vector<size_t> paddings = {padding, padding};
std::vector<size_t> strides = {stride, stride};
Compare2Function<DType1, DType2> test(
conv1,
conv2,
FuncConfig()
.set("paddings", paddings)
.set("strides", strides)
.set("groups", (size_t)1)
.set("algo", (std::string) "auto"));
TensorShape input{
batchSize, inputChannels, inputHeight, inputWidth};
TensorShape filter{
outputChannels, inputChannels, filterHeight, filterWidth};
TensorShape output{
batchSize, outputChannels, outputHeight, outputWidth};
function(test, input, filter, output);
}
}
}
}
}
}
}
}
/**
* \brief A convolution function test interface for depthwise convolution.
*/
template <DeviceType DType1, DeviceType DType2>
void DepthwiseConvolution(const std::string& conv1,
const std::string& conv2,
Function<DType1, DType2> function) {
for (size_t batchSize : {1, 32}) {
for (size_t inputSize : {7, 14, 54}) {
for (size_t filterSize : {3, 4}) {
for (size_t inputChannels : {32}) {
for (size_t outputChannels : {32, 64}) {
for (size_t stride : {1, 2}) {
for (size_t padding : {0, 1}) {
// NNPACK only supports stride = 1 if batchSize > 1,
// and there has some bug when batchSize > 1 and groups != 1
if ((conv1 == "NNPACKConv-CPU" || conv2 == "NNPACKConv-CPU") &&
batchSize > 1)
break;
size_t outputSize =
(inputSize - filterSize + 2 * padding + stride) / stride;
VLOG(3) << " batchSize=" << batchSize
<< " inputChannels=" << inputChannels
<< " inputHeight=" << inputSize
<< " inputWidth=" << inputSize
<< " outputChannels=" << outputChannels
<< " filterHeight=" << filterSize
<< " filterWidth=" << filterSize
<< " outputHeight=" << outputSize
<< " outputWidth=" << outputSize << " stride=" << stride
<< " padding=" << padding;
std::vector<size_t> paddings = {padding, padding};
std::vector<size_t> strides = {stride, stride};
size_t groups = inputChannels;
Compare2Function<DType1, DType2> test(
conv1,
conv2,
FuncConfig()
.set("paddings", paddings)
.set("strides", strides)
.set("groups", groups)
.set("algo", (std::string) "auto"));
TensorShape input{
batchSize, inputChannels, inputSize, inputSize};
TensorShape filter{groups,
outputChannels / groups,
inputChannels / groups,
filterSize,
filterSize};
TensorShape output{
batchSize, outputChannels, outputSize, outputSize};
function(test, input, filter, output);
}
}
}
}
}
}
}
}
} // namespace paddle
...@@ -14,7 +14,6 @@ limitations under the License. */ ...@@ -14,7 +14,6 @@ limitations under the License. */
#include "DepthwiseConvOp.h" #include "DepthwiseConvOp.h"
#include "ConvOp.h" #include "ConvOp.h"
#include "GemmFunctor.h"
namespace paddle { namespace paddle {
......
...@@ -13,7 +13,6 @@ See the License for the specific language governing permissions and ...@@ -13,7 +13,6 @@ See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "DepthwiseConvOp.h" #include "DepthwiseConvOp.h"
#include "GemmFunctor.h"
#include "paddle/math/BaseMatrix.h" #include "paddle/math/BaseMatrix.h"
namespace paddle { namespace paddle {
......
...@@ -13,23 +13,25 @@ See the License for the specific language governing permissions and ...@@ -13,23 +13,25 @@ See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include <gtest/gtest.h> #include <gtest/gtest.h>
#include <paddle/framework/op_desc.pb.h> #include "ConvOpTest.h"
TEST(OpDesc, Create) { namespace paddle {
paddle::framework::OpDesc op_desc;
op_desc.set_type("add");
op_desc.add_inputs("X");
op_desc.add_inputs("Y");
op_desc.add_outputs("Z");
auto attr = op_desc.mutable_attrs()->Add(); #ifndef PADDLE_ONLY_CPU
attr->set_type(paddle::framework::AttrType::FLOAT); TEST(DepthwiseConv, Forward) {
attr->set_f(3.14); DepthwiseConvolution<DEVICE_TYPE_CPU, DEVICE_TYPE_GPU>(
"GemmConv-CPU", "DepthwiseConv-GPU", forward);
}
// required field name is not set, so IsInitialized should be false. TEST(DepthwiseConv, BackwardInput) {
ASSERT_FALSE(op_desc.IsInitialized()); DepthwiseConvolution<DEVICE_TYPE_CPU, DEVICE_TYPE_GPU>(
"GemmConvGradInput-CPU", "DepthwiseConvGradInput-GPU", backward_input);
}
attr->set_name("add"); TEST(DepthwiseConv, BackwardFilter) {
// after all required fields are set, IsInitialized should be true now. DepthwiseConvolution<DEVICE_TYPE_CPU, DEVICE_TYPE_GPU>(
ASSERT_TRUE(op_desc.IsInitialized()); "GemmConvGradFilter-CPU", "DepthwiseConvGradFilter-GPU", backward_filter);
} }
#endif
} // namespace paddle
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <glog/logging.h>
#include "unsupported/Eigen/CXX11/Tensor"
namespace paddle {
template <class T>
struct EigenBlasGemm {
typedef Eigen::TensorMap<Eigen::Tensor<T, 2, Eigen::RowMajor, int>,
Eigen::Aligned>
Matrix;
static void compute(const bool transA,
const bool transB,
const int M,
const int N,
const int K,
const T alpha,
const T* A,
const int lda,
const T* B,
const int ldb,
const T beta,
T* C,
const int ldc) {
Eigen::array<int, 2> sizeA;
if (transA) {
sizeA[0] = K;
sizeA[1] = M;
CHECK_EQ(M, lda);
} else {
sizeA[0] = M;
sizeA[1] = K;
CHECK_EQ(K, lda);
}
Eigen::array<int, 2> sizeB;
if (transB) {
sizeB[0] = N;
sizeB[1] = K;
CHECK_EQ(K, ldb);
} else {
sizeB[0] = K;
sizeB[1] = N;
CHECK_EQ(N, ldb);
}
Eigen::array<int, 2> sizeC;
sizeC[0] = M;
sizeC[1] = N;
CHECK_EQ(N, ldc);
const Matrix a(const_cast<T*>(A), sizeA);
const Matrix b(const_cast<T*>(B), sizeB);
Matrix c(C, sizeC);
typedef typename Eigen::Tensor<T, 2>::DimensionPair DimPair;
Eigen::array<DimPair, 1> dims;
dims[0] = DimPair(1, 0);
dims[0].first = transA ? 0 : 1;
dims[0].second = transB ? 1 : 0;
Eigen::DefaultDevice device;
if (alpha == T(1) && beta == T(0)) {
c.device(device) = a.contract(b, dims);
} else if (alpha == T(1) && beta == T(1)) {
c.device(device) += a.contract(b, dims);
} else {
c.device(device) = alpha * a.contract(b, dims) + beta * c;
}
}
};
#ifdef PADDLE_TYPE_DOUBLE
template class EigenBlasGemm<double>;
#else
template class EigenBlasGemm<float>;
#endif
} // namespace paddle
...@@ -85,7 +85,6 @@ public: ...@@ -85,7 +85,6 @@ public:
} }
Im2ColFunctor<kCFO, Device, real> im2col; Im2ColFunctor<kCFO, Device, real> im2col;
GemmFunctor<Device, real> gemm;
size_t inputOffset = imShape.getElements(); size_t inputOffset = imShape.getElements();
size_t outputOffset = size_t outputOffset =
(outputChannels / groups_) * outputHeight * outputWidth; (outputChannels / groups_) * outputHeight * outputWidth;
...@@ -108,8 +107,8 @@ public: ...@@ -108,8 +107,8 @@ public:
int M = outputChannels / groups_; int M = outputChannels / groups_;
int N = outputHeight * outputWidth; int N = outputHeight * outputWidth;
int K = inputChannels / groups_ * filterHeight * filterWidth; int K = inputChannels / groups_ * filterHeight * filterWidth;
gemm(CblasNoTrans, BlasGemm<Device, real>::compute(false,
CblasNoTrans, false,
M, M,
N, N,
K, K,
...@@ -188,8 +187,6 @@ public: ...@@ -188,8 +187,6 @@ public:
} }
Col2ImFunctor<kCFO, Device, real> col2im; Col2ImFunctor<kCFO, Device, real> col2im;
GemmFunctor<Device, real> gemm;
size_t inputOffset = imShape.getElements(); size_t inputOffset = imShape.getElements();
size_t outputOffset = size_t outputOffset =
(outputChannels / groups_) * outputHeight * outputWidth; (outputChannels / groups_) * outputHeight * outputWidth;
...@@ -205,8 +202,8 @@ public: ...@@ -205,8 +202,8 @@ public:
colData = inputGrad + g * inputOffset; colData = inputGrad + g * inputOffset;
scale = 1.0f; scale = 1.0f;
} }
gemm(CblasTrans, BlasGemm<Device, real>::compute(true,
CblasNoTrans, false,
M, M,
N, N,
K, K,
...@@ -299,7 +296,6 @@ public: ...@@ -299,7 +296,6 @@ public:
} }
Im2ColFunctor<kCFO, Device, real> im2col; Im2ColFunctor<kCFO, Device, real> im2col;
GemmFunctor<Device, real> gemm;
size_t inputOffset = imShape.getElements(); size_t inputOffset = imShape.getElements();
size_t outputOffset = size_t outputOffset =
(outputChannels / groups_) * outputHeight * outputWidth; (outputChannels / groups_) * outputHeight * outputWidth;
...@@ -321,8 +317,8 @@ public: ...@@ -321,8 +317,8 @@ public:
int M = outputChannels / groups_; int M = outputChannels / groups_;
int K = outputHeight * outputWidth; int K = outputHeight * outputWidth;
int N = inputChannels / groups_ * filterHeight * filterWidth; int N = inputChannels / groups_ * filterHeight * filterWidth;
gemm(CblasNoTrans, BlasGemm<Device, real>::compute(false,
CblasTrans, true,
M, M,
N, N,
K, K,
......
...@@ -12,17 +12,39 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ...@@ -12,17 +12,39 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
syntax = "proto2"; #include <gtest/gtest.h>
package paddle.framework; #include "ConvOpTest.h"
// Attribute Type for paddle's Op. namespace paddle {
// Op contains many attributes. Each type of attributes could be different.
// The AttrType will be shared between AttrDesc and AttrProto. TEST(GemmConv, NaiveConv) {
enum AttrType { Convolution<DEVICE_TYPE_CPU, DEVICE_TYPE_CPU>(
INT = 0; "NaiveConv-CPU", "GemmConv-CPU", forward);
FLOAT = 1; Convolution2<DEVICE_TYPE_CPU, DEVICE_TYPE_CPU>(
STRING = 2; "NaiveConv-CPU", "GemmConv-CPU", forward);
INTS = 3;
FLOATS = 4;
STRINGS = 5;
} }
#ifndef PADDLE_ONLY_CPU
TEST(GemmConv, Forward) {
Convolution<DEVICE_TYPE_CPU, DEVICE_TYPE_GPU>(
"GemmConv-CPU", "GemmConv-GPU", forward);
Convolution2<DEVICE_TYPE_CPU, DEVICE_TYPE_GPU>(
"GemmConv-CPU", "GemmConv-GPU", forward);
}
TEST(GemmConv, BackwardInput) {
Convolution<DEVICE_TYPE_CPU, DEVICE_TYPE_GPU>(
"GemmConvGradInput-CPU", "GemmConvGradInput-GPU", backward_input);
Convolution2<DEVICE_TYPE_CPU, DEVICE_TYPE_GPU>(
"GemmConvGradInput-CPU", "GemmConvGradInput-GPU", backward_input);
}
TEST(GemmConv, BackwardFilter) {
Convolution<DEVICE_TYPE_CPU, DEVICE_TYPE_GPU>(
"GemmConvGradFilter-CPU", "GemmConvGradFilter-GPU", backward_filter);
Convolution2<DEVICE_TYPE_CPU, DEVICE_TYPE_GPU>(
"GemmConvGradFilter-CPU", "GemmConvGradFilter-GPU", backward_filter);
}
#endif
} // namespace paddle
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "GemmFunctor.h"
#include "paddle/math/MathFunctions.h"
namespace paddle {
template <class T>
struct BlasGemm<DEVICE_TYPE_CPU, T> {
static void compute(const bool transA,
const bool transB,
const int M,
const int N,
const int K,
const T alpha,
const T* A,
const int lda,
const T* B,
const int ldb,
const T beta,
T* C,
const int ldc) {
#ifdef PADDLE_USE_EIGEN_FOR_BLAS
EigenBlasGemm<T>::compute(
transA, transB, M, N, K, alpha, A, lda, B, ldb, beta, C, ldc);
#else
gemm<T>(transA == false ? CblasNoTrans : CblasTrans,
transB == false ? CblasNoTrans : CblasTrans,
M,
N,
K,
alpha,
A,
lda,
B,
ldb,
beta,
C,
ldc);
#endif
}
};
template <class T>
struct BlasGemm<DEVICE_TYPE_GPU, T> {
static void compute(const bool transA,
const bool transB,
const int M,
const int N,
const int K,
const T alpha,
const T* A,
const int lda,
const T* B,
const int ldb,
const T beta,
T* C,
const int ldc) {
hl_matrix_mul((T*)A,
transA == false ? HPPL_OP_N : HPPL_OP_T,
(T*)B,
transB == false ? HPPL_OP_N : HPPL_OP_T,
C,
M,
N,
K,
alpha,
beta,
lda,
ldb,
ldc);
}
};
template struct BlasGemm<DEVICE_TYPE_CPU, real>;
template struct BlasGemm<DEVICE_TYPE_GPU, real>;
} // namespace paddle
...@@ -14,7 +14,7 @@ limitations under the License. */ ...@@ -14,7 +14,7 @@ limitations under the License. */
#pragma once #pragma once
#include "paddle/math/MathFunctions.h" #include "TensorType.h"
namespace paddle { namespace paddle {
...@@ -24,10 +24,9 @@ namespace paddle { ...@@ -24,10 +24,9 @@ namespace paddle {
// of MatMulFunction, we need to consider the reconstruction of hl_matrix_mul // of MatMulFunction, we need to consider the reconstruction of hl_matrix_mul
// interface. // interface.
template <DeviceType Device, class T> template <DeviceType Device, class T>
class GemmFunctor { struct BlasGemm {
public: static void compute(const bool transA,
void operator()(const CBLAS_TRANSPOSE transA, const bool transB,
const CBLAS_TRANSPOSE TransB,
const int M, const int M,
const int N, const int N,
const int K, const int K,
...@@ -41,11 +40,15 @@ public: ...@@ -41,11 +40,15 @@ public:
const int ldc); const int ldc);
}; };
// TODO(hedaoyuan): Since the definition of the real type in the Paddle
// conflicts with the Eigen library, so compile the Eigen code can not
// include the Paddle header file. And need an EigenBlasGemm template class
// that does not contain the DeviceType parameter.
// I will fix this problem and merge BlasGemm and EigenBlasGemm into one.
template <class T> template <class T>
class GemmFunctor<DEVICE_TYPE_CPU, T> { struct EigenBlasGemm {
public: static void compute(const bool transA,
void operator()(const CBLAS_TRANSPOSE transA, const bool transB,
const CBLAS_TRANSPOSE TransB,
const int M, const int M,
const int N, const int N,
const int K, const int K,
...@@ -56,41 +59,7 @@ public: ...@@ -56,41 +59,7 @@ public:
const int ldb, const int ldb,
const T beta, const T beta,
T* C, T* C,
const int ldc) { const int ldc);
gemm<T>(transA, TransB, M, N, K, alpha, A, lda, B, ldb, beta, C, ldc);
}
};
template <class T>
class GemmFunctor<DEVICE_TYPE_GPU, T> {
public:
void operator()(const CBLAS_TRANSPOSE transA,
const CBLAS_TRANSPOSE TransB,
const int M,
const int N,
const int K,
const T alpha,
const T* A,
const int lda,
const T* B,
const int ldb,
const T beta,
T* C,
const int ldc) {
hl_matrix_mul((T*)A,
transA == CblasNoTrans ? HPPL_OP_N : HPPL_OP_T,
(T*)B,
TransB == CblasNoTrans ? HPPL_OP_N : HPPL_OP_T,
C,
M,
N,
K,
alpha,
beta,
lda,
ldb,
ldc);
}
}; };
} // namespace paddle } // namespace paddle
...@@ -196,22 +196,23 @@ public: ...@@ -196,22 +196,23 @@ public:
CHECK_EQ(status, nnp_status_success); CHECK_EQ(status, nnp_status_success);
} }
} else { } else {
for (size_t g = 0; g < groups_; g++) {
// only supports stride = 1 // only supports stride = 1
CHECK_EQ(strideH(), 1); CHECK_EQ(strideH(), 1);
CHECK_EQ(strideW(), 1); CHECK_EQ(strideW(), 1);
nnp_status status =
nnp_convolution_output(algorithm_, // TODO(hedaoyuan): There has some bug when batchSize > 1 and groups_ > 1.
CHECK_EQ(groups_, static_cast<size_t>(1));
nnp_status status = nnp_convolution_output(algorithm_,
batchSize, batchSize,
inputChannels / groups_, inputChannels,
outputChannels / groups_, outputChannels,
inputSize, inputSize,
padding, padding,
kernelSize, kernelSize,
inputData + inputOffset * g, inputData,
filterData + filterOffset * g, filterData,
nullptr, /* bias */ nullptr, /* bias */
outputData + outputOffset * g, outputData,
bufferPtr, bufferPtr,
sizePtr, sizePtr,
nnp_activation_identity, nnp_activation_identity,
...@@ -221,7 +222,6 @@ public: ...@@ -221,7 +222,6 @@ public:
CHECK_EQ(status, nnp_status_success); CHECK_EQ(status, nnp_status_success);
} }
} }
}
static void create_nnpack_threadpool() { static void create_nnpack_threadpool() {
if (FLAGS_nnpack_num_threads && threadpool_ == nullptr) { if (FLAGS_nnpack_num_threads && threadpool_ == nullptr) {
......
...@@ -13,87 +13,18 @@ See the License for the specific language governing permissions and ...@@ -13,87 +13,18 @@ See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include <gtest/gtest.h> #include <gtest/gtest.h>
#include "paddle/function/Function.h" #include "paddle/function/ConvOpTest.h"
#include "paddle/function/FunctionTest.h"
DEFINE_string(algo,
"auto",
"The algorithm (auto, ft8x8, ft16x16, wt8x8, "
"implicit-gemm, or direct) for computing convolution of NNPACK.");
namespace paddle { namespace paddle {
#define IS_NNPACK_SUPPORT(algo, filterSize, stride) \ TEST(NNPACK, Forward) {
if (algo == "direct" && filterSize != 1) continue; \ Convolution<DEVICE_TYPE_CPU, DEVICE_TYPE_CPU>(
if (algo == "direct" && batchSize != 1) continue; \ "GemmConv-CPU", "NNPACKConv-CPU", forward);
if (algo == "wt8x8" && filterSize != 3) continue; \ }
if (algo == "implicit-gemm" && batchSize != 1) continue; \
if (algo != "auto" && algo != "implicit-gemm" && stride > 1) continue;
class ConvolutionTest {
public:
ConvolutionTest(const std::string& conv1,
const std::string& conv2,
std::string algo = "auto") {
for (size_t batchSize : {1, 32}) {
for (size_t inputSize : {7, 14, 54}) {
for (size_t filterSize : {1, 3, 5}) {
for (size_t inputChannels : {3, 64}) {
for (size_t outputChannels : {3, 64, 128}) {
if (inputChannels < outputChannels) break;
for (size_t stride : {1, 2}) {
// if batchSize > 1 NNPACKConv only supports stride = 1
if (batchSize > 1 && stride > 1) break;
for (size_t padding : {0, 1}) {
if (padding >= filterSize) break;
size_t outputSize =
(inputSize - filterSize + 2 * padding + stride) / stride;
IS_NNPACK_SUPPORT(algo, filterSize, stride);
LOG(INFO) << " batchSize=" << batchSize
<< " inputChannels=" << inputChannels
<< " inputHeight=" << inputSize
<< " inputWidth=" << inputSize
<< " outputChannels=" << outputChannels
<< " filterHeight=" << filterSize
<< " filterWidth=" << filterSize
<< " outputHeight=" << outputSize
<< " outputWidth=" << outputSize
<< " stride=" << stride << " padding=" << padding;
std::vector<size_t> paddings = {padding, padding};
std::vector<size_t> strides = {stride, stride};
Compare2Function<DEVICE_TYPE_CPU, DEVICE_TYPE_CPU> test(
conv1,
conv2,
FuncConfig()
.set("paddings", paddings)
.set("strides", strides)
.set("groups", (size_t)1)
.set("algo", algo));
TensorShape shape0{
batchSize, inputChannels, inputSize, inputSize};
TensorShape shape1{
outputChannels, inputChannels, filterSize, filterSize};
TensorShape shape2{
batchSize, outputChannels, outputSize, outputSize};
test.addInputs(BufferArg(VALUE_TYPE_FLOAT, shape0));
test.addInputs(BufferArg(VALUE_TYPE_FLOAT, shape1));
test.addOutputs(BufferArg(VALUE_TYPE_FLOAT, shape2));
test.run();
}
}
}
}
}
}
}
}
};
TEST(Convolution, NNPACK) { TEST(NNPACK, Depthwise) {
// NNPACK only supports stride = 1 DepthwiseConvolution<DEVICE_TYPE_CPU, DEVICE_TYPE_CPU>(
ConvolutionTest test("GemmConv-CPU", "NNPACKConv-CPU", FLAGS_algo); "GemmConv-CPU", "NNPACKConv-CPU", forward);
} }
} // namespace paddle } // namespace paddle
...@@ -112,7 +112,6 @@ BEGIN_DEFINE_ACTIVATION(softmax) ...@@ -112,7 +112,6 @@ BEGIN_DEFINE_ACTIVATION(softmax)
private: private:
MatrixPtr sftMaxSum_; MatrixPtr sftMaxSum_;
MatrixPtr sftMaxDot_; MatrixPtr sftMaxDot_;
MatrixPtr one_;
public: public:
Error __must_check forward(Argument& act) { Error __must_check forward(Argument& act) {
...@@ -138,14 +137,6 @@ Error __must_check backward(Argument& act) { ...@@ -138,14 +137,6 @@ Error __must_check backward(Argument& act) {
1, 1,
/* trans */ false, /* trans */ false,
useGpu(act.deviceId)); useGpu(act.deviceId));
if (!one_ || one_->getWidth() != outputG->getWidth()) {
Matrix::resizeOrCreate(one_,
1,
outputG->getWidth(),
/* trans */ false,
useGpu(act.deviceId));
one_->one();
}
sftMaxDot_->dotMul(*outputG, *outputV); sftMaxDot_->dotMul(*outputG, *outputV);
sftMaxSum_->colMerge(*sftMaxDot_); sftMaxSum_->colMerge(*sftMaxDot_);
......
...@@ -202,7 +202,7 @@ void NeuralNetwork::prefetch(const std::vector<Argument>& inArgs) { ...@@ -202,7 +202,7 @@ void NeuralNetwork::prefetch(const std::vector<Argument>& inArgs) {
auto mat = dynamic_cast<SparsePrefetchRowCpuMatrix*>( auto mat = dynamic_cast<SparsePrefetchRowCpuMatrix*>(
para->getMat(PARAMETER_VALUE).get()); para->getMat(PARAMETER_VALUE).get());
para->clearGradient(); para->clearGradient();
mat->clearIndices(); if (mat) mat->clearIndices();
} }
} }
} }
......
...@@ -184,7 +184,7 @@ public: ...@@ -184,7 +184,7 @@ public:
} }
void backward(const UpdateCallback& callback) override { void backward(const UpdateCallback& callback) override {
if (biases_) { if (biases_ && biases_->getWGrad()) {
backwardActivation(); backwardActivation();
biases_->getWGrad()->collectBias(*getOutputGrad(), 1); biases_->getWGrad()->collectBias(*getOutputGrad(), 1);
biases_->getParameterPtr()->incUpdate(callback); biases_->getParameterPtr()->incUpdate(callback);
......
...@@ -57,11 +57,14 @@ bool MKLDNNFcLayer::init(const LayerMap& layerMap, ...@@ -57,11 +57,14 @@ bool MKLDNNFcLayer::init(const LayerMap& layerMap,
} }
void MKLDNNFcLayer::convertWeightsFromPaddle() { void MKLDNNFcLayer::convertWeightsFromPaddle() {
if (FLAGS_use_mkldnn_wgt) { if (hasInitedWgt_) {
return; return;
} }
if (hasInitedWgt_) { // TODO(TJ): dst format should get from wgtVal_
int dstFmt = PARAM_FORMAT_MKLDNN_OI;
int srcFmt = weight_->getParameterPtr()->getHeaderFormat();
if (srcFmt == dstFmt) {
return; return;
} }
...@@ -78,6 +81,7 @@ void MKLDNNFcLayer::convertWeightsFromPaddle() { ...@@ -78,6 +81,7 @@ void MKLDNNFcLayer::convertWeightsFromPaddle() {
MatrixPtr paddleWgtT; MatrixPtr paddleWgtT;
paddleWgt->transpose(paddleWgtT, true); paddleWgt->transpose(paddleWgtT, true);
weight_->getW()->copyFrom(*paddleWgtT); weight_->getW()->copyFrom(*paddleWgtT);
weight_->getParameterPtr()->setHeaderFormat(dstFmt);
hasInitedWgt_ = true; hasInitedWgt_ = true;
} }
......
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "Layer.h"
namespace paddle {
/**
* A layer applies a linear transformation to each element in each row of
* the input matrix. For each element, the layer first re-scale it and then
* adds a bias to it.
*
* \f[
* y = wx + b
* \f]
*
* Here, w is the scale and b is the bias. Both w and b are trainable scalars.
*
*/
class ScaleShiftLayer : public Layer {
protected:
std::unique_ptr<Weight> scale_;
std::unique_ptr<Weight> offset_;
public:
explicit ScaleShiftLayer(const LayerConfig& config) : Layer(config) {}
bool init(const LayerMap& layerMap,
const ParameterMap& parameterMap) override;
void forward(PassType passType) override;
void backward(const UpdateCallback& callback = nullptr) override;
};
REGISTER_LAYER(scale_shift, ScaleShiftLayer);
bool ScaleShiftLayer::init(const LayerMap& layerMap,
const ParameterMap& parameterMap) {
Layer::init(layerMap, parameterMap);
CHECK_EQ(inputLayers_.size(), 1U);
scale_.reset(new Weight(1, 1, parameters_[0]));
if (biasParameter_.get() != NULL) {
offset_ = std::unique_ptr<Weight>(new Weight(1, 1, biasParameter_));
}
return true;
}
void ScaleShiftLayer::forward(PassType passType) {
Layer::forward(passType);
MatrixPtr inV = getInputValue(0);
resetOutput(inV->getHeight(), inV->getWidth());
MatrixPtr outV = getOutputValue();
real scaleValue = scale_->getW()->getElement(0, 0);
outV->mulScalar(*inV, scaleValue);
if (offset_) {
real offsetValue = offset_->getW()->getElement(0, 0);
outV->add(offsetValue);
}
}
void ScaleShiftLayer::backward(const UpdateCallback& callback) {
MatrixPtr inV = getInputValue(0);
MatrixPtr inG = getInputGrad(0);
MatrixPtr outV = getOutputValue();
MatrixPtr outG = getOutputGrad();
/* Calculate the parameter gradient for the current layer */
if (scale_->getWGrad()) {
MatrixPtr rowSumMtx;
Matrix::resizeOrCreate(rowSumMtx, outG->getHeight(), 1, false, useGpu_);
// this_i = scaleDest * this_i + scaleSum * \sum_j b_{ij} * c_{ij}
rowSumMtx->sumOfProducts(
/* b= */ *inV, /* c= */ *outG, /* scaleSum= */ 1, /* scaleDest= */ 0.);
// this_i = scaleDest * this_i + scaleSum * \sum_j b_{ji}
scale_->getWGrad()->sumCols(
/* b= */ *rowSumMtx, /* scaleSum= */ 1., /* scaleDest= */ 1.);
scale_->getParameterPtr()->incUpdate(callback);
}
if (offset_ && offset_->getWGrad()) {
MatrixPtr rowSumMtx;
Matrix::resizeOrCreate(rowSumMtx, outG->getHeight(), 1, false, useGpu_);
rowSumMtx->sumRows(*outG, 1., 0.);
offset_->getWGrad()->sumCols(*rowSumMtx, 1., 1.);
offset_->getParameterPtr()->incUpdate(callback);
}
/* Calculate the input layers error */
if (inG) {
real scaleValue = scale_->getW()->getElement(0, 0);
inG->add(*outG, scaleValue);
}
}
} // namespace paddle
...@@ -388,6 +388,11 @@ void initDataLayer(TestConfig testConf, ...@@ -388,6 +388,11 @@ void initDataLayer(TestConfig testConf,
data.grad->zeroMem(); data.grad->zeroMem();
break; break;
case INPUT_SELF_DEFINE_DATA: { case INPUT_SELF_DEFINE_DATA: {
if (testConf.inputDefs[i].ids.size()) {
data.ids = IVector::create(testConf.inputDefs[i].ids.size(), useGpu);
data.ids->copyFrom(testConf.inputDefs[i].ids.data(),
testConf.inputDefs[i].ids.size());
} else if (testConf.inputDefs[i].selfDefinedData) {
size_t height = testConf.inputDefs[i].selfDefinedData->getHeight(); size_t height = testConf.inputDefs[i].selfDefinedData->getHeight();
size_t width = testConf.inputDefs[i].selfDefinedData->getWidth(); size_t width = testConf.inputDefs[i].selfDefinedData->getWidth();
CHECK_GT(static_cast<int>(height), 0); CHECK_GT(static_cast<int>(height), 0);
...@@ -396,6 +401,10 @@ void initDataLayer(TestConfig testConf, ...@@ -396,6 +401,10 @@ void initDataLayer(TestConfig testConf,
data.grad = Matrix::create(height, width, false, useGpu); data.grad = Matrix::create(height, width, false, useGpu);
data.value->copyFrom(*testConf.inputDefs[i].selfDefinedData); data.value->copyFrom(*testConf.inputDefs[i].selfDefinedData);
data.grad->zeroMem(); data.grad->zeroMem();
} else {
LOG(FATAL) << "No self-defined data are given.";
return;
}
const std::vector<int>& labelSeqStartPositions = const std::vector<int>& labelSeqStartPositions =
testConf.inputDefs[i].labelSeqStartPositions; testConf.inputDefs[i].labelSeqStartPositions;
......
...@@ -68,6 +68,7 @@ struct InputDef { ...@@ -68,6 +68,7 @@ struct InputDef {
std::vector<int> labelInitValue; std::vector<int> labelInitValue;
std::vector<int> labelSeqStartPositions; std::vector<int> labelSeqStartPositions;
std::vector<int> labelSubSeqStartPositions; std::vector<int> labelSubSeqStartPositions;
std::vector<int> ids;
MatrixPtr selfDefinedData; MatrixPtr selfDefinedData;
InputDef(InputType type, string nameIn, size_t dimIn, size_t sizeIn) { InputDef(InputType type, string nameIn, size_t dimIn, size_t sizeIn) {
...@@ -95,6 +96,23 @@ struct InputDef { ...@@ -95,6 +96,23 @@ struct InputDef {
isStatic = false; isStatic = false;
} }
InputDef(InputType type,
string nameIn,
const std::vector<int>& ids,
const std::vector<int>& selfDefinedSeqStartPos = {},
const std::vector<int>& selfDefinedSubSeqStartPos = {})
: labelSeqStartPositions(selfDefinedSeqStartPos),
labelSubSeqStartPositions(selfDefinedSubSeqStartPos),
ids(ids) {
selfDefinedData = nullptr;
inputType = type;
name = nameIn;
dim = 0;
sparse = {""};
paraSize = 0;
isStatic = false;
}
InputDef(InputType type, InputDef(InputType type,
string nameIn, string nameIn,
size_t dimIn, size_t dimIn,
......
...@@ -330,9 +330,7 @@ void MKLDNNTester::run(const TestConfig& dnn, ...@@ -330,9 +330,7 @@ void MKLDNNTester::run(const TestConfig& dnn,
log_ = log; log_ = log;
lvl_ = level; lvl_ = level;
// Firstly test FLAGS_use_mkldnn_wgt = false // Firstly test mkldnn init from PARAM_FORMAT_ORIGINAL weight
FLAGS_use_mkldnn_wgt = false;
// reset and run once
reset(dnn, ref, batchSize); reset(dnn, ref, batchSize);
randomWgtDatas(); randomWgtDatas();
clearWgtDiffs(); clearWgtDiffs();
...@@ -342,17 +340,32 @@ void MKLDNNTester::run(const TestConfig& dnn, ...@@ -342,17 +340,32 @@ void MKLDNNTester::run(const TestConfig& dnn,
runOnce(); runOnce();
} }
// Then test FLAGS_use_mkldnn_wgt = true if (parameters_[DNN].empty()) {
FLAGS_use_mkldnn_wgt = true; // has no paramters
// after run once the mkldnn weight has been stored in dnnlayer return;
}
// After run some iterations, the mkldnn weight has been stored in dnnLayer
// and we can also get the mkldnn weight parameter header format.
// Weight parameter should always be index 0 (and bias index 1).
// TODO(TJ): should also consider mean and var format when batchnorm ready
int dnnWgtFmt = parameters_[DNN][0]->getHeaderFormat();
int refWgtFmt = parameters_[REF][0]->getHeaderFormat();
if (dnnWgtFmt == refWgtFmt) {
// weight format are equal, so no need check more
return;
}
// then save the weights and restart again // then save the weights and restart again
vector<VectorPtr> dnnWgts, refWgts; vector<VectorPtr> dnnWgts, refWgts;
CHECK_EQ(parameters_[DNN].size(), parameters_[REF].size()); CHECK_EQ(parameters_[DNN].size(), parameters_[REF].size());
saveWgt(parameters_[DNN], dnnWgts); saveWgt(parameters_[DNN], dnnWgts);
saveWgt(parameters_[REF], refWgts); saveWgt(parameters_[REF], refWgts);
// restart again with flag true // restart again with dnn weight format
reset(dnn, ref, batchSize); reset(dnn, ref, batchSize);
// TODO(TJ): should also considerate mean and var format when batchnorm ready
parameters_[DNN][0]->setHeaderFormat(dnnWgtFmt);
// restore wgt // restore wgt
restoreWgt(dnnWgts, parameters_[DNN]); restoreWgt(dnnWgts, parameters_[DNN]);
......
...@@ -108,7 +108,7 @@ private: ...@@ -108,7 +108,7 @@ private:
* if many(>failRate) wrong(abs(dnn-ref)/abs(ref)>thres) points return the * if many(>failRate) wrong(abs(dnn-ref)/abs(ref)>thres) points return the
* max(diff/ref) * max(diff/ref)
* else return sum(abs(a-b)) / sum(abs(b)) * else return sum(abs(a-b)) / sum(abs(b))
* The return value should smaller than eps when passing. * The return value should be smaller than eps when passing.
*/ */
double getDelta(const real* d1, double getDelta(const real* d1,
const real* d2, const real* d2,
......
...@@ -2007,6 +2007,21 @@ TEST(Layer, RowL2NormLayer) { ...@@ -2007,6 +2007,21 @@ TEST(Layer, RowL2NormLayer) {
} }
} }
TEST(Layer, ScaleShiftLayer) {
const size_t batchSize = 16;
const size_t size = 32;
TestConfig config;
config.layerConfig.set_type("scale_shift");
config.layerConfig.set_size(size);
config.biasSize = 1;
config.inputDefs.push_back(
{INPUT_DATA, "input", /* dim= */ size, /* paraSize= */ 1});
config.layerConfig.add_inputs();
for (auto useGpu : {false, true}) {
testLayerGrad(config, "scale_shift", batchSize, false, useGpu, false);
}
}
int main(int argc, char** argv) { int main(int argc, char** argv) {
testing::InitGoogleTest(&argc, argv); testing::InitGoogleTest(&argc, argv);
initMain(argc, argv); initMain(argc, argv);
......
...@@ -269,7 +269,8 @@ TEST(Compare, img_conv2) { ...@@ -269,7 +269,8 @@ TEST(Compare, img_conv2) {
bool useGpu = FLAGS_use_gpu; bool useGpu = FLAGS_use_gpu;
double eps = FLAGS_checkgrad_eps; double eps = FLAGS_checkgrad_eps;
FLAGS_use_gpu = true; FLAGS_use_gpu = true;
FLAGS_checkgrad_eps = 1e-2; // Sometimes, this unit test will fail with 1e-2
FLAGS_checkgrad_eps = 4e-2;
compareNetwork(config_file_a, config_file_b); compareNetwork(config_file_a, config_file_b);
FLAGS_use_gpu = useGpu; FLAGS_use_gpu = useGpu;
FLAGS_checkgrad_eps = eps; FLAGS_checkgrad_eps = eps;
......
add_subdirectory(detail) add_subdirectory(detail)
cc_library(memory SRCS memory.cc) cc_library(memory SRCS memory.cc)
cc_library(memcpy SRCS memcpy.cc DEPS device_context) cc_library(memcpy SRCS memcpy.cc)
cc_library(paddle_memory cc_library(paddle_memory
DEPS DEPS
......
...@@ -27,7 +27,7 @@ limitations under the License. */ ...@@ -27,7 +27,7 @@ limitations under the License. */
// between host and device. Allocates too much would reduce the amount // between host and device. Allocates too much would reduce the amount
// of memory available to the system for paging. So, by default, we // of memory available to the system for paging. So, by default, we
// should set false to use_pinned_memory. // should set false to use_pinned_memory.
DEFINE_bool(use_pinned_memory, false, "If set, allocate cpu pinned memory."); DEFINE_bool(use_pinned_memory, true, "If set, allocate cpu pinned memory.");
namespace paddle { namespace paddle {
namespace memory { namespace memory {
......
...@@ -16,8 +16,6 @@ limitations under the License. */ ...@@ -16,8 +16,6 @@ limitations under the License. */
#include <cstring> // for memcpy #include <cstring> // for memcpy
#include "paddle/platform/device_context.h"
namespace paddle { namespace paddle {
namespace memory { namespace memory {
......
...@@ -13,22 +13,38 @@ See the License for the specific language governing permissions and ...@@ -13,22 +13,38 @@ See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "paddle/memory/memory.h" #include "paddle/memory/memory.h"
#include <algorithm> // for transform
#include <cstring> // for memcpy
#include <memory> // for unique_ptr
#include <mutex> // for call_once
#include "glog/logging.h"
#include "paddle/memory/detail/buddy_allocator.h" #include "paddle/memory/detail/buddy_allocator.h"
#include "paddle/memory/detail/system_allocator.h" #include "paddle/memory/detail/system_allocator.h"
#include "paddle/platform/gpu_info.h"
#include <cstring> // for memcpy DECLARE_double(fraction_of_gpu_memory_to_use);
namespace paddle { namespace paddle {
namespace memory { namespace memory {
detail::BuddyAllocator* GetCPUBuddyAllocator() { using BuddyAllocator = detail::BuddyAllocator;
static detail::BuddyAllocator* a = nullptr;
if (a == nullptr) { std::once_flag cpu_allocator_flag;
a = new detail::BuddyAllocator(new detail::CPUAllocator, std::once_flag gpu_allocator_flag;
BuddyAllocator* GetCPUBuddyAllocator() {
static std::unique_ptr<BuddyAllocator> a{nullptr};
std::call_once(cpu_allocator_flag, [&]() {
a.reset(new BuddyAllocator(new detail::CPUAllocator,
platform::CpuMinChunkSize(), platform::CpuMinChunkSize(),
platform::CpuMaxChunkSize()); platform::CpuMaxChunkSize()));
} });
return a;
return a.get();
} }
template <> template <>
...@@ -48,20 +64,36 @@ size_t Used<platform::CPUPlace>(platform::CPUPlace place) { ...@@ -48,20 +64,36 @@ size_t Used<platform::CPUPlace>(platform::CPUPlace place) {
#ifndef PADDLE_ONLY_CPU #ifndef PADDLE_ONLY_CPU
detail::BuddyAllocator* GetGPUBuddyAllocator(int gpu_id) { BuddyAllocator* GetGPUBuddyAllocator(int gpu_id) {
static detail::BuddyAllocator** as = NULL; using BuddyAllocVec = std::vector<BuddyAllocator*>;
if (as == NULL) { static std::unique_ptr<BuddyAllocVec, void (*)(BuddyAllocVec * p)> as{
new BuddyAllocVec, [](BuddyAllocVec* p) {
std::for_each(p->begin(), p->end(),
[](BuddyAllocator* p) { delete p; });
}};
// GPU buddy allocators
auto& allocators = *as.get();
// GPU buddy allocator initialization
std::call_once(gpu_allocator_flag, [&]() {
int gpu_num = platform::GetDeviceCount(); int gpu_num = platform::GetDeviceCount();
as = new detail::BuddyAllocator*[gpu_num]; allocators.reserve(gpu_num);
for (int gpu = 0; gpu < gpu_num; gpu++) { for (int gpu = 0; gpu < gpu_num; gpu++) {
platform::SetDeviceId(gpu); platform::SetDeviceId(gpu);
as[gpu] = new detail::BuddyAllocator(new detail::GPUAllocator, allocators.emplace_back(new BuddyAllocator(new detail::GPUAllocator,
platform::GpuMinChunkSize(), platform::GpuMinChunkSize(),
platform::GpuMaxChunkSize()); platform::GpuMaxChunkSize()));
}
} }
VLOG(3) << "\n\nNOTE: each GPU device use "
<< FLAGS_fraction_of_gpu_memory_to_use * 100 << "% of GPU memory.\n"
<< "You can set environment variable '"
<< platform::kEnvFractionGpuMemoryToUse
<< "' to change the fraction of GPU usage.\n\n";
});
platform::SetDeviceId(gpu_id); platform::SetDeviceId(gpu_id);
return as[gpu_id]; return allocators[gpu_id];
} }
template <> template <>
......
...@@ -14,7 +14,6 @@ limitations under the License. */ ...@@ -14,7 +14,6 @@ limitations under the License. */
#pragma once #pragma once
#include "paddle/platform/gpu_info.h"
#include "paddle/platform/place.h" #include "paddle/platform/place.h"
namespace paddle { namespace paddle {
......
...@@ -41,8 +41,11 @@ function(op_library TARGET) ...@@ -41,8 +41,11 @@ function(op_library TARGET)
endif() endif()
endfunction() endfunction()
add_subdirectory(math)
cc_test(gather_test SRCS gather_test.cc DEPS tensor) cc_test(gather_test SRCS gather_test.cc DEPS tensor)
cc_test(scatter_test SRCS scatter_test.cc DEPS tensor)
cc_library(net_op SRCS net_op.cc DEPS op_registry) cc_library(net_op SRCS net_op.cc DEPS op_registry)
cc_test(net_op_test SRCS net_op_test.cc DEPS net_op) cc_test(net_op_test SRCS net_op_test.cc DEPS net_op)
...@@ -50,7 +53,7 @@ op_library(add_op SRCS add_op.cc add_op.cu) ...@@ -50,7 +53,7 @@ op_library(add_op SRCS add_op.cc add_op.cu)
op_library(mean_op SRCS mean_op.cc mean_op.cu) op_library(mean_op SRCS mean_op.cc mean_op.cu)
op_library(mul_op SRCS mul_op.cc mul_op.cu) op_library(mul_op SRCS mul_op.cc mul_op.cu DEPS math_function)
op_library(rowwise_add_op SRCS rowwise_add_op.cu rowwise_add_op.cc) op_library(rowwise_add_op SRCS rowwise_add_op.cu rowwise_add_op.cc)
op_library(sigmoid_op SRCS sigmoid_op.cc sigmoid_op.cu) op_library(sigmoid_op SRCS sigmoid_op.cc sigmoid_op.cu)
...@@ -61,11 +64,7 @@ op_library(fill_zeros_like_op SRCS fill_zeros_like_op.cc fill_zeros_like_op.cu) ...@@ -61,11 +64,7 @@ op_library(fill_zeros_like_op SRCS fill_zeros_like_op.cc fill_zeros_like_op.cu)
op_library(sgd_op SRCS sgd_op.cc sgd_op.cu) op_library(sgd_op SRCS sgd_op.cc sgd_op.cu)
op_library(fc_op
SRCS fc_op.cc
DEPS mul_op rowwise_add_op sigmoid_op softmax_op net_op)
op_library(recurrent_op SRCS recurrent_op.cc rnn/recurrent_op_utils.cc op_library(recurrent_op SRCS recurrent_op.cc rnn/recurrent_op_utils.cc
DEPS op_desc tensor op_registry operator net_op) DEPS framework_proto tensor op_registry operator net_op)
cc_test(recurrent_op_test SRCS recurrent_op_test.cc DEPS recurrent_op gtest mul_op add_op)
op_library(uniform_random_op op_library(uniform_random_op
SRCS uniform_random_op.cc uniform_random_op.cu) SRCS uniform_random_op.cc uniform_random_op.cu)
...@@ -18,16 +18,15 @@ namespace paddle { ...@@ -18,16 +18,15 @@ namespace paddle {
namespace operators { namespace operators {
class AddOp : public framework::OperatorWithKernel { class AddOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected: protected:
void InferShape(const framework::InferShapeContext &ctx) const override { void InferShape(const framework::InferShapeContext &ctx) const override {
PADDLE_ENFORCE_EQ(ctx.InputSize(), 2); PADDLE_ENFORCE_EQ(ctx.Input<Tensor>("X")->dims(),
PADDLE_ENFORCE_EQ(ctx.OutputSize(), 1); ctx.Input<Tensor>("Y")->dims(),
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(0), "Inputs of AddOp must all be set");
PADDLE_ENFORCE(ctx.OutputVar(0) != nullptr,
"Outputs of AddOp must all be set");
PADDLE_ENFORCE(ctx.Input<Tensor>(0)->dims() == ctx.Input<Tensor>(1)->dims(),
"Two input of Add Op's dimension must be same."); "Two input of Add Op's dimension must be same.");
ctx.Output<Tensor>(0)->Resize(ctx.Input<Tensor>(0)->dims()); ctx.Output<Tensor>("Out")->Resize(ctx.Input<Tensor>("X")->dims());
} }
}; };
...@@ -47,6 +46,9 @@ The equation is: Out = X + Y ...@@ -47,6 +46,9 @@ The equation is: Out = X + Y
}; };
class AddOpGrad : public framework::OperatorWithKernel { class AddOpGrad : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected: protected:
void InferShape(const framework::InferShapeContext &ctx) const override {} void InferShape(const framework::InferShapeContext &ctx) const override {}
}; };
...@@ -55,8 +57,7 @@ class AddOpGrad : public framework::OperatorWithKernel { ...@@ -55,8 +57,7 @@ class AddOpGrad : public framework::OperatorWithKernel {
} // namespace paddle } // namespace paddle
namespace ops = paddle::operators; namespace ops = paddle::operators;
REGISTER_OP(add_two, ops::AddOp, ops::AddOpMaker); REGISTER_OP(add_two, ops::AddOp, ops::AddOpMaker, add_two_grad, ops::AddOpGrad);
REGISTER_GRADIENT_OP(add_two, add_two_grad, ops::AddOpGrad);
REGISTER_OP_CPU_KERNEL(add_two, REGISTER_OP_CPU_KERNEL(add_two,
ops::AddKernel<paddle::platform::CPUPlace, float>); ops::AddKernel<paddle::platform::CPUPlace, float>);
...@@ -28,9 +28,9 @@ template <typename Place, typename T> ...@@ -28,9 +28,9 @@ template <typename Place, typename T>
class AddKernel : public framework::OpKernel { class AddKernel : public framework::OpKernel {
public: public:
void Compute(const framework::ExecutionContext& context) const override { void Compute(const framework::ExecutionContext& context) const override {
auto input0 = context.Input<Tensor>(0); auto* input0 = context.Input<Tensor>("X");
auto input1 = context.Input<Tensor>(1); auto* input1 = context.Input<Tensor>("Y");
auto output = context.Output<Tensor>(0); auto* output = context.Output<Tensor>("Out");
output->mutable_data<T>(context.GetPlace()); output->mutable_data<T>(context.GetPlace());
......
...@@ -18,33 +18,31 @@ namespace paddle { ...@@ -18,33 +18,31 @@ namespace paddle {
namespace operators { namespace operators {
class OnehotCrossEntropyOp : public framework::OperatorWithKernel { class OnehotCrossEntropyOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected: protected:
void InferShape(const framework::InferShapeContext &ctx) const override { void InferShape(const framework::InferShapeContext &ctx) const override {
PADDLE_ENFORCE_EQ(ctx.InputSize(), 2, auto *X = ctx.Input<Tensor>("X");
"Input size of OnehotCrossEntropyOp must be two"); auto *label = ctx.Input<Tensor>("label");
PADDLE_ENFORCE_EQ(ctx.OutputSize(), 1,
"Output size of OnehotCrossEntropyOp must be one"); PADDLE_ENFORCE_EQ(X->dims().size(), 2, "X's dimension must be 2.");
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(0), PADDLE_ENFORCE_EQ(label->dims().size(), 1, "label's dimension must be 1.");
"0-th input of OnehotCrossEntropyOp should be set"); PADDLE_ENFORCE_EQ(X->dims()[0], label->dims()[0]);
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(1), ctx.Output<Tensor>("Y")->Resize({X->dims()[0]});
"1-th input of OnehotCrossEntropyOp should be set");
PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar(0),
"Outputs of OnehotCrossEntropyOp must all be set");
PADDLE_ENFORCE_EQ(ctx.Input<Tensor>(0)->dims().size(), 2);
PADDLE_ENFORCE_EQ(ctx.Output<Tensor>(0)->dims().size(), 1,
"label's dimension must be 1.");
ctx.Output<Tensor>(0)->Resize({ctx.Input<Tensor>(0)->dims()[0]});
} }
}; };
class OnehotCrossEntropyGradientOp : public framework::OperatorWithKernel { class OnehotCrossEntropyGradientOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected: protected:
void InferShape(const framework::InferShapeContext &ctx) const override { void InferShape(const framework::InferShapeContext &ctx) const override {
auto X_grad = ctx.Output<Tensor>(framework::GradVarName("X")); auto dX = ctx.Output<Tensor>(framework::GradVarName("X"));
auto X = ctx.Input<Tensor>("X"); auto X = ctx.Input<Tensor>("X");
// TODO(superjom) add enforce here after helper functions ready dX->Resize(X->dims());
X_grad->Resize(X->dims());
} }
}; };
...@@ -69,12 +67,9 @@ OnehotCrossEntropy Operator. ...@@ -69,12 +67,9 @@ OnehotCrossEntropy Operator.
namespace ops = paddle::operators; namespace ops = paddle::operators;
REGISTER_OP(onehot_cross_entropy, ops::OnehotCrossEntropyOp, REGISTER_OP(onehot_cross_entropy, ops::OnehotCrossEntropyOp,
ops::OnehotCrossEntropyOpMaker); ops::OnehotCrossEntropyOpMaker, onehot_cross_entropy_grad,
REGISTER_OP_CPU_KERNEL(
onehot_cross_entropy,
ops::OnehotCrossEntropyOpKernel<paddle::platform::CPUPlace, float>);
REGISTER_GRADIENT_OP(onehot_cross_entropy, onehot_cross_entropy_grad,
ops::OnehotCrossEntropyGradientOp); ops::OnehotCrossEntropyGradientOp);
REGISTER_OP_CPU_KERNEL( REGISTER_OP_CPU_KERNEL(onehot_cross_entropy,
onehot_cross_entropy_grad, ops::OnehotCrossEntropyOpKernel<float>);
ops::OnehotCrossEntropyGradientOpKernel<paddle::platform::CPUPlace, float>); REGISTER_OP_CPU_KERNEL(onehot_cross_entropy_grad,
ops::OnehotCrossEntropyGradientOpKernel<float>);
...@@ -12,10 +12,122 @@ ...@@ -12,10 +12,122 @@
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#define EIGEN_USE_GPU #include "paddle/framework/op_registry.h"
#include "paddle/operators/cross_entropy_op.h" #include "paddle/platform/assert.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
template <typename T>
__host__ __device__ T clipping_log(const T x) {
PADDLE_ASSERT(std::is_floating_point<T>::value);
const T kApproInf = 1e20;
T v = log(x);
if (v == INFINITY) {
return kApproInf;
}
if (v == -INFINITY) {
return -kApproInf;
}
return v;
}
template <typename T>
__global__ void CrossEntropyKernel(T* Y, const T* X, const int* label,
const int N, const int D) {
// TOOD(qingqing) define CUDA_1D_KERNEL_LOOP macro in a common file.
// CUDA_1D_KERNEL_LOOP(i, N) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N;
i += blockDim.x * gridDim.x) {
PADDLE_ASSERT(label[i] >= 0 && label[i] < D);
Y[i] = -clipping_log(X[i * D + label[i]]);
}
}
// TODO(qingqing): make zero setting an common function.
template <typename T>
__global__ void zero(T* X, const int N) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N;
i += blockDim.x * gridDim.x) {
X[i] = 0.0;
}
}
template <typename T>
__global__ void CrossEntropyGradientKernel(T* dX, const T* dY, const T* X,
const int* label, const int N,
const int D) {
// TOOD(qingqing) define CUDA_1D_KERNEL_LOOP macro in a common file.
// CUDA_1D_KERNEL_LOOP(i, N) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N;
i += blockDim.x * gridDim.x) {
int idx = i * D + label[i];
dX[idx] = -dY[i] / X[idx];
}
}
template <typename T>
class OnehotCrossEntropyOpCUDAKernel : public framework::OpKernel {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()),
"It must use GPUPlace.");
auto X = ctx.Input<Tensor>("X");
const T* Xdata = X->data<T>();
const int* label_data = ctx.Input<Tensor>("label")->data<int>();
auto Y = ctx.Output<Tensor>("Y");
Y->mutable_data<T>(ctx.GetPlace());
T* Ydata = Y->data<T>();
int N = X->dims()[0];
int D = X->dims()[1];
int block = 512;
int grid = (N + block - 1) / block;
// TODO(qingqing) launch kernel on specified stream
// base on ExecutionContext.
CrossEntropyKernel<T><<<grid, block>>>(Ydata, Xdata, label_data, N, D);
}
};
template <typename T>
class OnehotCrossEntropyGradientOpCUDAKernel : public framework::OpKernel {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()),
"It must use GPUPlace.");
auto X = ctx.Input<Tensor>("X");
auto dX = ctx.Output<Tensor>(framework::GradVarName("X"));
auto dY = ctx.Input<Tensor>(framework::GradVarName("Y"));
auto label = ctx.Input<Tensor>("label");
auto* dXdata = dX->template mutable_data<T>(ctx.GetPlace());
auto* dYdata = dY->template data<T>();
auto* Xdata = X->template data<T>();
auto* label_data = label->data<int>();
int N = X->dims()[0];
int D = X->dims()[1];
int block = 512;
int grid = (N * D + block - 1) / block;
zero<T><<<grid, block>>>(dXdata, N * D);
grid = (N + block - 1) / block;
// TODO(qingqing): launch kernel on specified stream
// base on ExecutionContext.
CrossEntropyGradientKernel<T><<<grid, block>>>(dXdata, dYdata, Xdata,
label_data, N, D);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators; namespace ops = paddle::operators;
REGISTER_OP_GPU_KERNEL( REGISTER_OP_GPU_KERNEL(onehot_cross_entropy,
onehot_cross_entropy, ops::OnehotCrossEntropyOpCUDAKernel<float>);
ops::OnehotCrossEntropyOpKernel<paddle::platform::GPUPlace, float>); REGISTER_OP_GPU_KERNEL(onehot_cross_entropy_grad,
ops::OnehotCrossEntropyGradientOpCUDAKernel<float>);
...@@ -21,7 +21,7 @@ namespace operators { ...@@ -21,7 +21,7 @@ namespace operators {
using Tensor = framework::Tensor; using Tensor = framework::Tensor;
template <typename T> template <typename T>
T tolerable_value(T x) { inline T tolerable_value(const T x) {
static_assert(std::is_floating_point<T>::value, static_assert(std::is_floating_point<T>::value,
"tolerable_value works only on float, " "tolerable_value works only on float, "
"double and double double."); "double and double double.");
...@@ -39,13 +39,16 @@ T tolerable_value(T x) { ...@@ -39,13 +39,16 @@ T tolerable_value(T x) {
return x; return x;
} }
template <typename Place, typename T> template <typename T>
class OnehotCrossEntropyOpKernel : public framework::OpKernel { class OnehotCrossEntropyOpKernel : public framework::OpKernel {
public: public:
void Compute(const framework::ExecutionContext& ctx) const override { void Compute(const framework::ExecutionContext& ctx) const override {
PADDLE_ENFORCE(platform::is_cpu_place(ctx.GetPlace()),
"It must use CPUPlace.");
auto X = ctx.Input<Tensor>("X"); auto X = ctx.Input<Tensor>("X");
const T* Xdata = X->data<T>(); const T* Xdata = X->data<T>();
const int* label_data = ctx.Input<Tensor>(1)->data<int>(); const int* label_data = ctx.Input<Tensor>("label")->data<int>();
auto Y = ctx.Output<Tensor>("Y"); auto Y = ctx.Output<Tensor>("Y");
Y->mutable_data<T>(ctx.GetPlace()); Y->mutable_data<T>(ctx.GetPlace());
...@@ -62,10 +65,13 @@ class OnehotCrossEntropyOpKernel : public framework::OpKernel { ...@@ -62,10 +65,13 @@ class OnehotCrossEntropyOpKernel : public framework::OpKernel {
} }
}; };
template <typename Place, typename T> template <typename T>
class OnehotCrossEntropyGradientOpKernel : public framework::OpKernel { class OnehotCrossEntropyGradientOpKernel : public framework::OpKernel {
public: public:
void Compute(const framework::ExecutionContext& ctx) const override { void Compute(const framework::ExecutionContext& ctx) const override {
PADDLE_ENFORCE(platform::is_cpu_place(ctx.GetPlace()),
"It must use CPUPlace.");
auto X = ctx.Input<Tensor>("X"); auto X = ctx.Input<Tensor>("X");
auto dX = ctx.Output<Tensor>(framework::GradVarName("X")); auto dX = ctx.Output<Tensor>(framework::GradVarName("X"));
auto dY = ctx.Input<Tensor>(framework::GradVarName("Y")); auto dY = ctx.Input<Tensor>(framework::GradVarName("Y"));
...@@ -79,6 +85,8 @@ class OnehotCrossEntropyGradientOpKernel : public framework::OpKernel { ...@@ -79,6 +85,8 @@ class OnehotCrossEntropyGradientOpKernel : public framework::OpKernel {
const int batch_size = X->dims()[0]; const int batch_size = X->dims()[0];
const int class_num = X->dims()[1]; const int class_num = X->dims()[1];
// TODO(qingqing): make zero setting an common function.
memset(dXdata, 0, sizeof(T) * batch_size * class_num);
for (int i = 0; i < batch_size; ++i) { for (int i = 0; i < batch_size; ++i) {
int index = i * class_num + label_data[i]; int index = i * class_num + label_data[i];
dXdata[index] = -tolerable_value(dYdata[i] / Xdata[index]); dXdata[index] = -tolerable_value(dYdata[i] / Xdata[index]);
......
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/operators/net_op.h"
#include "paddle/framework/eigen.h"
#include "paddle/framework/op_registry.h"
namespace paddle {
namespace operators {
using OpRegistry = framework::OpRegistry;
class FullyConnectedOp : public NetOp {
public:
void Init() override {
AddOp(OpRegistry::CreateOp("mul",
{
Input("X"), Input("W"),
},
{Output("before_act")}, {}));
auto b = Input("b");
if (b != framework::kEmptyVarName) {
AddOp(OpRegistry::CreateOp("rowwise_add",
{Output("before_act"), Input("b")},
{Output("before_act")}, {}));
}
auto activation = GetAttr<std::string>("activation");
AddOp(OpRegistry::CreateOp(activation, {Output("before_act")},
{Output("Y")}, {}));
CompleteAddOp(false);
}
};
class FullyConnectedOpMaker : public framework::OpProtoAndCheckerMaker {
public:
FullyConnectedOpMaker(framework::OpProto *proto,
framework::OpAttrChecker *op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "the input of fc operator");
AddInput("W", "the weight of fc operator");
AddInput("b", "the bias of fc operator");
AddOutput("Y", "the output of fc operator");
AddOutput("before_act", "the before activation output of fc operator")
.SetTemporary();
AddAttr<std::string>("activation", "The activation key for fc layer")
.SetDefault("sigmoid")
.InEnum({"sigmoid", "softmax"});
//! TODO(yuyang18): Complete comment;
AddComment("FullyConnected Operator");
}
};
} // namespace operators
} // namespace paddle
USE_OP(mul);
USE_OP(rowwise_add);
USE_OP(sigmoid);
USE_OP(softmax);
namespace ops = paddle::operators;
REGISTER_OP(fc, ops::FullyConnectedOp, ops::FullyConnectedOpMaker);
...@@ -18,18 +18,13 @@ namespace paddle { ...@@ -18,18 +18,13 @@ namespace paddle {
namespace operators { namespace operators {
class FillZerosLikeOp : public framework::OperatorWithKernel { class FillZerosLikeOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected: protected:
void InferShape(const framework::InferShapeContext &ctx) const override { void InferShape(const framework::InferShapeContext &ctx) const override {
PADDLE_ENFORCE_EQ(ctx.InputSize(), 1UL, ctx.Output<framework::Tensor>("Dst")->Resize(
"Input size of FillZerosLikeOp must be one."); ctx.Input<framework::Tensor>("Src")->dims());
PADDLE_ENFORCE_EQ(ctx.OutputSize(), 1UL,
"Output size of AddOp must be one.");
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(0),
"Input of FillZerosLikeOp must be set.");
PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar(0),
"Output of FillZerosLikeOp must be set.");
ctx.Output<framework::Tensor>(0)->Resize(
ctx.Input<framework::Tensor>(0)->dims());
} }
}; };
...@@ -51,7 +46,8 @@ The output will have the same size with input. ...@@ -51,7 +46,8 @@ The output will have the same size with input.
} // namespace paddle } // namespace paddle
namespace ops = paddle::operators; namespace ops = paddle::operators;
REGISTER_OP(fill_zeros_like, ops::FillZerosLikeOp, ops::FillZerosLikeOpMaker); REGISTER_OP_WITHOUT_GRADIENT(fill_zeros_like, ops::FillZerosLikeOp,
ops::FillZerosLikeOpMaker);
REGISTER_OP_CPU_KERNEL( REGISTER_OP_CPU_KERNEL(
fill_zeros_like, fill_zeros_like,
ops::FillZerosLikeKernel<paddle::platform::CPUPlace, float>); ops::FillZerosLikeKernel<paddle::platform::CPUPlace, float>);
...@@ -23,7 +23,7 @@ template <typename Place, typename T> ...@@ -23,7 +23,7 @@ template <typename Place, typename T>
class FillZerosLikeKernel : public framework::OpKernel { class FillZerosLikeKernel : public framework::OpKernel {
public: public:
void Compute(const framework::ExecutionContext& context) const override { void Compute(const framework::ExecutionContext& context) const override {
auto* output = context.Output<framework::Tensor>(0); auto* output = context.Output<framework::Tensor>("Dst");
output->mutable_data<T>(context.GetPlace()); output->mutable_data<T>(context.GetPlace());
auto t = framework::EigenVector<T>::Flatten(*output); auto t = framework::EigenVector<T>::Flatten(*output);
t.device(context.GetEigenDevice<Place>()) = t.constant(T(0)); t.device(context.GetEigenDevice<Place>()) = t.constant(T(0));
......
...@@ -29,7 +29,7 @@ void CPUGather(const T* params, const int* indices, const int slice_size, ...@@ -29,7 +29,7 @@ void CPUGather(const T* params, const int* indices, const int slice_size,
const int index_size, T* output) { const int index_size, T* output) {
const size_t slice_bytes = slice_size * sizeof(T); const size_t slice_bytes = slice_size * sizeof(T);
for (size_t i = 0; i < index_size; ++i) { for (int i = 0; i < index_size; ++i) {
int index_ = indices[i]; int index_ = indices[i];
memcpy(output + i * slice_size, params + index_ * slice_size, slice_bytes); memcpy(output + i * slice_size, params + index_ * slice_size, slice_bytes);
} }
...@@ -60,7 +60,7 @@ void Gather(const platform::Place& place, const paddle::framework::Tensor* src, ...@@ -60,7 +60,7 @@ void Gather(const platform::Place& place, const paddle::framework::Tensor* src,
// slice size // slice size
int slice_size = 1; int slice_size = 1;
for (size_t i = 1; i < src_dims.size(); ++i) slice_size *= src_dims[i]; for (int i = 1; i < src_dims.size(); ++i) slice_size *= src_dims[i];
// Gathering // Gathering
if (platform::is_cpu_place(place)) { if (platform::is_cpu_place(place)) {
......
...@@ -35,7 +35,7 @@ TEST(Gather, GatherData) { ...@@ -35,7 +35,7 @@ TEST(Gather, GatherData) {
p_src = src->mutable_data<int>(make_ddim({3, 4}), CPUPlace()); p_src = src->mutable_data<int>(make_ddim({3, 4}), CPUPlace());
p_index = index->mutable_data<int>(make_ddim({2}), CPUPlace()); p_index = index->mutable_data<int>(make_ddim({2}), CPUPlace());
for (size_t i = 0; i < 12; ++i) p_src[i] = i; for (int i = 0; i < 12; ++i) p_src[i] = i;
p_index[0] = 1; p_index[0] = 1;
p_index[1] = 0; p_index[1] = 0;
...@@ -43,6 +43,10 @@ TEST(Gather, GatherData) { ...@@ -43,6 +43,10 @@ TEST(Gather, GatherData) {
Gather<int>(CPUPlace(), src, index, output); Gather<int>(CPUPlace(), src, index, output);
for (size_t i = 0; i < 4; ++i) EXPECT_EQ(p_output[i], i + 4); for (int i = 0; i < 4; ++i) EXPECT_EQ(p_output[i], i + 4);
for (size_t i = 4; i < 8; ++i) EXPECT_EQ(p_output[i], i - 4); for (int i = 4; i < 8; ++i) EXPECT_EQ(p_output[i], i - 4);
delete src;
delete index;
delete output;
} }
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
You may obtain a copy of the License at You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0 http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
...@@ -19,33 +16,36 @@ namespace paddle { ...@@ -19,33 +16,36 @@ namespace paddle {
namespace operators { namespace operators {
template <typename T> template <typename T>
class GaussianRandomKernel : public framework::OpKernel { class CPUGaussianRandomKernel : public framework::OpKernel {
public: public:
void Compute(const framework::ExecutionContext& context) const override { void Compute(const framework::ExecutionContext& context) const override {
float mean = context.op_.GetAttr<float>("mean"); float mean = context.op_.GetAttr<float>("mean");
float std = context.op_.GetAttr<float>("std"); float std = context.op_.GetAttr<float>("std");
auto* tensor = context.Output<framework::Tensor>(0); auto* tensor = context.Output<framework::Tensor>("Out");
T* data = tensor->mutable_data<T>(context.GetPlace()); T* data = tensor->mutable_data<T>(context.GetPlace());
// TODO(dzh): attribute does not support unsigned int. unsigned int seed =
// And we need a global random seed configuration. static_cast<unsigned int>(context.op_.GetAttr<int>("seed"));
int seed = context.op_.GetAttr<int>("seed"); std::minstd_rand engine;
if (seed == 0) { if (seed == 0) {
seed = std::random_device()(); seed = std::random_device()();
} }
std::mt19937 g(seed); engine.seed(seed);
std::normal_distribution<T> distribution(mean, std); std::normal_distribution<T> dist(mean, std);
ssize_t size = framework::product(tensor->dims()); ssize_t size = framework::product(tensor->dims());
for (int i = 0; i < size; ++i) { for (ssize_t i = 0; i < size; ++i) {
data[i] = distribution(g); data[i] = dist(engine);
} }
} }
}; };
class GaussianRandomOp : public framework::OperatorWithKernel { class GaussianRandomOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected: protected:
void InferShape(const framework::InferShapeContext& context) const override { void InferShape(const framework::InferShapeContext& context) const override {
auto* tensor = context.Output<framework::Tensor>(0); auto* tensor = context.Output<framework::Tensor>("Out");
auto dims = GetAttr<std::vector<int>>("dims"); auto dims = GetAttr<std::vector<int>>("dims");
PADDLE_ENFORCE(dims.size() > 0UL, PADDLE_ENFORCE(dims.size() > 0UL,
"dims can be one int or array. dims must be set."); "dims can be one int or array. dims must be set.");
...@@ -65,8 +65,8 @@ Use to initialize tensor with gaussian random generator. ...@@ -65,8 +65,8 @@ Use to initialize tensor with gaussian random generator.
)DOC"); )DOC");
AddAttr<std::vector<int>>("dims", "The dimension of random tensor."); AddAttr<std::vector<int>>("dims", "The dimension of random tensor.");
AddAttr<float>("mean", "mean value of random.").SetDefault(.0f); AddAttr<float>("mean", "mean of random tensor.").SetDefault(.0f);
AddAttr<float>("std", "minimum value of random value.").SetDefault(1.0f); AddAttr<float>("std", "std of random tensor.").SetDefault(1.0f);
AddAttr<int>("seed", AddAttr<int>("seed",
"Random seed of generator." "Random seed of generator."
"0 means use system wide seed") "0 means use system wide seed")
...@@ -78,5 +78,6 @@ Use to initialize tensor with gaussian random generator. ...@@ -78,5 +78,6 @@ Use to initialize tensor with gaussian random generator.
} // namespace paddle } // namespace paddle
namespace ops = paddle::operators; namespace ops = paddle::operators;
REGISTER_OP(gaussian_random, ops::GaussianRandomOp, ops::GaussianRandomOpMaker); REGISTER_OP_WITHOUT_GRADIENT(gaussian_random, ops::GaussianRandomOp,
REGISTER_OP_CPU_KERNEL(gaussian_random, ops::GaussianRandomKernel<float>); ops::GaussianRandomOpMaker);
REGISTER_OP_CPU_KERNEL(gaussian_random, ops::CPUGaussianRandomKernel<float>);
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
You may obtain a copy of the License at You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0 http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include <memory> #include <thrust/device_ptr.h>
#include <random> #include <thrust/iterator/counting_iterator.h>
#include "paddle/platform/dynload/curand.h" #include <thrust/random.h>
#include "paddle/platform/gpu_info.h" #include <thrust/transform.h>
#include "paddle/framework/op_registry.h" #include "paddle/framework/op_registry.h"
#include "paddle/framework/operator.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
template <typename T> template <typename T>
class GaussianRandomKernel : public framework::OpKernel { struct GaussianGenerator {
T mean_, std_;
unsigned int seed_;
__host__ __device__ GaussianGenerator(T mean, T std, int seed)
: mean_(mean), std_(std), seed_(seed) {}
__host__ __device__ T operator()(const unsigned int n) const {
thrust::minstd_rand rng;
rng.seed(seed_);
thrust::normal_distribution<T> dist(mean_, std_);
rng.discard(n);
return dist(rng);
}
};
template <typename T>
class GPUGaussianRandomKernel : public framework::OpKernel {
public: public:
void Compute(const framework::ExecutionContext& context) const override { void Compute(const framework::ExecutionContext& context) const override {
float mean = context.op_.GetAttr<float>("mean"); auto* tensor = context.Output<framework::Tensor>("Out");
float std = context.op_.GetAttr<float>("std");
auto* tensor = context.Output<framework::Tensor>(0);
T* data = tensor->mutable_data<T>(context.GetPlace()); T* data = tensor->mutable_data<T>(context.GetPlace());
unsigned int seed =
int seed = context.op_.GetAttr<int>("seed"); static_cast<unsigned int>(context.op_.GetAttr<int>("seed"));
if (seed == 0) { if (seed == 0) {
std::random_device rd; std::random_device rd;
seed = rd(); seed = rd();
} }
curandGenerator_t g; T mean = static_cast<T>(context.op_.GetAttr<float>("mean"));
PADDLE_ENFORCE(platform::dynload::curandCreateGenerator( T std = static_cast<T>(context.op_.GetAttr<float>("std"));
&g, CURAND_RNG_PSEUDO_DEFAULT)); thrust::counting_iterator<unsigned int> index_sequence_begin(0);
PADDLE_ENFORCE( ssize_t N = framework::product(tensor->dims());
platform::dynload::curandSetPseudoRandomGeneratorSeed(g, seed)); thrust::transform(index_sequence_begin, index_sequence_begin + N,
platform::dynload::curandGenerateNormal( thrust::device_ptr<T>(data),
g, data, framework::product(tensor->dims()), mean, std); GaussianGenerator<T>(mean, std, seed));
} }
}; };
} // namespace operators } // namespace operators
} // namespace paddle } // namespace paddle
namespace ops = paddle::operators; REGISTER_OP_GPU_KERNEL(gaussian_random,
REGISTER_OP_GPU_KERNEL(gaussian_random, ops::GaussianRandomKernel<float>); paddle::operators::GPUGaussianRandomKernel<float>);
if(WITH_GPU)
nv_library(math_function SRCS math_function.cc math_function.cu DEPS cblas device_context)
else()
cc_library(math_function SRCS math_function.cc DEPS cblas device_context)
endif()
nv_test(math_function_test SRCS math_function_test.cc DEPS math_function tensor)
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/operators/math/math_function.h"
namespace paddle {
namespace operators {
namespace math {
template <>
void gemm<platform::CPUPlace, float>(const CBLAS_TRANSPOSE transA,
const CBLAS_TRANSPOSE transB, const int M,
const int N, const int K,
const float alpha, const float* A,
const float* B, const float beta, float* C,
platform::DeviceContext* context) {
int lda = (transA == CblasNoTrans) ? K : M;
int ldb = (transB == CblasNoTrans) ? N : K;
int ldc = N;
cblas_sgemm(CblasRowMajor, transA, transB, M, N, K, alpha, A, lda, B, ldb,
beta, C, ldc);
}
template <>
void gemm<platform::CPUPlace, double>(const CBLAS_TRANSPOSE transA,
const CBLAS_TRANSPOSE transB, const int M,
const int N, const int K,
const double alpha, const double* A,
const double* B, const double beta,
double* C,
platform::DeviceContext* context) {
int lda = (transA == CblasNoTrans) ? K : M;
int ldb = (transB == CblasNoTrans) ? N : K;
int ldc = N;
cblas_dgemm(CblasRowMajor, transA, transB, M, N, K, alpha, A, lda, B, ldb,
beta, C, ldc);
}
template <>
void matmul<platform::CPUPlace, float>(const framework::Tensor& matrix_a,
bool trans_a,
const framework::Tensor& matrix_b,
bool trans_b, float alpha,
framework::Tensor* matrix_out,
float beta,
platform::DeviceContext* context) {
auto dim_a = matrix_a.dims();
auto dim_b = matrix_b.dims();
auto dim_out = matrix_out->dims();
PADDLE_ENFORCE(dim_a.size() == 2 && dim_b.size() == 2 && dim_out.size() == 2,
"The input and output of matmul be matrix");
PADDLE_ENFORCE(platform::is_cpu_place(matrix_a.place()) &&
platform::is_cpu_place(matrix_b.place()) &&
platform::is_cpu_place(matrix_out->place()),
"Matrix must all be in CPUPlace");
int M = dim_out[0];
int N = dim_out[1];
int K = (trans_a == false) ? dim_a[1] : dim_a[0];
CBLAS_TRANSPOSE transA = (trans_a == false) ? CblasNoTrans : CblasTrans;
CBLAS_TRANSPOSE transB = (trans_b == false) ? CblasNoTrans : CblasTrans;
gemm<platform::CPUPlace, float>(
transA, transB, M, N, K, alpha, matrix_a.data<float>(),
matrix_b.data<float>(), beta, matrix_out->data<float>(), context);
}
template <>
void matmul<platform::CPUPlace, double>(const framework::Tensor& matrix_a,
bool trans_a,
const framework::Tensor& matrix_b,
bool trans_b, double alpha,
framework::Tensor* matrix_out,
double beta,
platform::DeviceContext* context) {
auto dim_a = matrix_a.dims();
auto dim_b = matrix_b.dims();
auto dim_out = matrix_out->dims();
PADDLE_ENFORCE(dim_a.size() == 2 && dim_b.size() == 2 && dim_out.size() == 2,
"The input and output of matmul be matrix");
PADDLE_ENFORCE(platform::is_cpu_place(matrix_a.place()) &&
platform::is_cpu_place(matrix_b.place()) &&
platform::is_cpu_place(matrix_out->place()),
"Matrix must all be in CPUPlace");
int M = dim_out[0];
int N = dim_out[1];
int K = (trans_a == false) ? dim_a[1] : dim_a[0];
CBLAS_TRANSPOSE transA = (trans_a == false) ? CblasNoTrans : CblasTrans;
CBLAS_TRANSPOSE transB = (trans_b == false) ? CblasNoTrans : CblasTrans;
gemm<platform::CPUPlace, double>(
transA, transB, M, N, K, alpha, matrix_a.data<double>(),
matrix_b.data<double>(), beta, matrix_out->data<double>(), context);
}
} // namespace math
} // namespace operators
} // namespace paddle
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/operators/math/math_function.h"
namespace paddle {
namespace operators {
namespace math {
template <>
void gemm<platform::GPUPlace, float>(const CBLAS_TRANSPOSE transA,
const CBLAS_TRANSPOSE transB, const int M,
const int N, const int K,
const float alpha, const float* A,
const float* B, const float beta, float* C,
platform::DeviceContext* context) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
int lda = (transA == CblasNoTrans) ? K : M;
int ldb = (transB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(transA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(transB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
PADDLE_ENFORCE(platform::dynload::cublasSgemm(
reinterpret_cast<platform::CUDADeviceContext*>(context)->cublas_handle(),
cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void gemm<platform::GPUPlace, double>(const CBLAS_TRANSPOSE transA,
const CBLAS_TRANSPOSE transB, const int M,
const int N, const int K,
const double alpha, const double* A,
const double* B, const double beta,
double* C,
platform::DeviceContext* context) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
int lda = (transA == CblasNoTrans) ? K : M;
int ldb = (transB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(transA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(transB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
PADDLE_ENFORCE(platform::dynload::cublasDgemm(
reinterpret_cast<platform::CUDADeviceContext*>(context)->cublas_handle(),
cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void matmul<platform::GPUPlace, float>(const framework::Tensor& matrix_a,
bool trans_a,
const framework::Tensor& matrix_b,
bool trans_b, float alpha,
framework::Tensor* matrix_out,
float beta,
platform::DeviceContext* context) {
auto dim_a = matrix_a.dims();
auto dim_b = matrix_b.dims();
auto dim_out = matrix_out->dims();
PADDLE_ENFORCE(dim_a.size() == 2 && dim_b.size() == 2 && dim_out.size() == 2,
"The input and output of matmul be matrix");
PADDLE_ENFORCE(platform::is_gpu_place(matrix_a.place()) &&
platform::is_gpu_place(matrix_b.place()) &&
platform::is_gpu_place(matrix_out->place()),
"Matrix must all be in GPUPlace");
int M = dim_out[0];
int N = dim_out[1];
int K = (trans_a == false) ? dim_a[1] : dim_a[0];
CBLAS_TRANSPOSE transA = (trans_a == false) ? CblasNoTrans : CblasTrans;
CBLAS_TRANSPOSE transB = (trans_b == false) ? CblasNoTrans : CblasTrans;
gemm<platform::GPUPlace, float>(
transA, transB, M, N, K, alpha, matrix_a.data<float>(),
matrix_b.data<float>(), beta, matrix_out->data<float>(), context);
}
template <>
void matmul<platform::GPUPlace, double>(const framework::Tensor& matrix_a,
bool trans_a,
const framework::Tensor& matrix_b,
bool trans_b, double alpha,
framework::Tensor* matrix_out,
double beta,
platform::DeviceContext* context) {
auto dim_a = matrix_a.dims();
auto dim_b = matrix_b.dims();
auto dim_out = matrix_out->dims();
PADDLE_ENFORCE(dim_a.size() == 2 && dim_b.size() == 2 && dim_out.size() == 2,
"The input and output of matmul be matrix");
PADDLE_ENFORCE(platform::is_gpu_place(matrix_a.place()) &&
platform::is_gpu_place(matrix_b.place()) &&
platform::is_gpu_place(matrix_out->place()),
"Matrix must all be in GPUPlace");
int M = dim_out[0];
int N = dim_out[1];
int K = (trans_a == false) ? dim_a[1] : dim_a[0];
CBLAS_TRANSPOSE transA = (trans_a == false) ? CblasNoTrans : CblasTrans;
CBLAS_TRANSPOSE transB = (trans_b == false) ? CblasNoTrans : CblasTrans;
gemm<platform::GPUPlace, double>(
transA, transB, M, N, K, alpha, matrix_a.data<double>(),
matrix_b.data<double>(), beta, matrix_out->data<double>(), context);
}
} // namespace math
} // namespace operators
} // namespace paddle
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#ifdef PADDLE_USE_MKLML
#include <mkl_cblas.h>
#include <mkl_lapacke.h>
#include <mkl_vml_functions.h>
#endif
#ifdef PADDLE_USE_MKL
#include <mkl.h>
#include <mkl_lapacke.h>
#endif
#ifdef PADDLE_USE_ATLAS
extern "C" {
#include <cblas.h>
#include <clapack.h>
}
#endif
#ifdef PADDLE_USE_OPENBLAS
#include <cblas.h>
#include <lapacke.h>
#endif
#ifndef LAPACK_FOUND
extern "C" {
#include <cblas.h>
int LAPACKE_sgetrf(int matrix_layout, int m, int n, float* a, int lda,
int* ipiv);
int LAPACKE_dgetrf(int matrix_layout, int m, int n, double* a, int lda,
int* ipiv);
int LAPACKE_sgetri(int matrix_layout, int n, float* a, int lda,
const int* ipiv);
int LAPACKE_dgetri(int matrix_layout, int n, double* a, int lda,
const int* ipiv);
}
#endif
#include <cmath>
#include "paddle/framework/tensor.h"
#include "paddle/platform/device_context.h"
#include "paddle/platform/enforce.h"
namespace paddle {
namespace operators {
namespace math {
// Support continuous memory now
// If transA = N, and transB = N
// Then matrixA: M * K, matrixB: K * N matrixC : M * N
// For more detailed info, please refer to
// http://www.netlib.org/lapack/explore-html/d4/de2/sgemm_8f.html
template <typename Place, typename T>
void gemm(const CBLAS_TRANSPOSE transA, const CBLAS_TRANSPOSE transB,
const int M, const int N, const int K, const T alpha, const T* A,
const T* B, const T beta, T* C, platform::DeviceContext* context);
// matrix multiply with continuous memory
template <typename Place, typename T>
void matmul(const framework::Tensor& matrix_a, bool trans_a,
const framework::Tensor& matrix_b, bool trans_b, T alpha,
framework::Tensor* matrix_out, T beta,
platform::DeviceContext* context);
} // namespace math
} // namespace operators
} // namespace paddle
#include "paddle/operators/math/math_function.h"
#include "gtest/gtest.h"
#ifndef PADDLE_ONLY_CPU
TEST(math_function, notrans_mul_trans) {
paddle::framework::Tensor input1;
paddle::framework::Tensor input1_gpu;
paddle::framework::Tensor input2_gpu;
paddle::framework::Tensor out_gpu;
paddle::framework::Tensor out;
auto* cpu_place = new paddle::platform::CPUPlace();
float* input1_ptr = input1.mutable_data<float>({2, 3}, *cpu_place);
float arr[6] = {0, 1, 2, 3, 4, 5};
memcpy(input1_ptr, arr, 6 * sizeof(float));
auto* gpu_place = new paddle::platform::GPUPlace(0);
paddle::platform::DeviceContext* context =
new paddle::platform::CUDADeviceContext(*gpu_place);
input1_gpu.CopyFrom<float>(input1, *gpu_place);
input2_gpu.CopyFrom<float>(input1, *gpu_place);
out_gpu.mutable_data<float>({2, 2}, *gpu_place);
paddle::operators::math::matmul<paddle::platform::GPUPlace, float>(
input1_gpu, false, input2_gpu, true, 1, &out_gpu, 0, context);
out.CopyFrom<float>(out_gpu, *cpu_place);
float* out_ptr = out.data<float>();
EXPECT_EQ(out_ptr[0], 5);
EXPECT_EQ(out_ptr[1], 14);
EXPECT_EQ(out_ptr[2], 14);
EXPECT_EQ(out_ptr[3], 50);
}
TEST(math_function, trans_mul_notrans) {
paddle::framework::Tensor input1;
paddle::framework::Tensor input1_gpu;
paddle::framework::Tensor input2_gpu;
paddle::framework::Tensor out_gpu;
paddle::framework::Tensor out;
auto* cpu_place = new paddle::platform::CPUPlace();
float* input1_ptr = input1.mutable_data<float>({2, 3}, *cpu_place);
float arr[6] = {0, 1, 2, 3, 4, 5};
memcpy(input1_ptr, arr, 6 * sizeof(float));
auto* gpu_place = new paddle::platform::GPUPlace(0);
paddle::platform::DeviceContext* context =
new paddle::platform::CUDADeviceContext(*gpu_place);
input1_gpu.CopyFrom<float>(input1, *gpu_place);
input2_gpu.CopyFrom<float>(input1, *gpu_place);
out_gpu.mutable_data<float>({3, 3}, *gpu_place);
paddle::operators::math::matmul<paddle::platform::GPUPlace, float>(
input1_gpu, true, input2_gpu, false, 1, &out_gpu, 0, context);
out.CopyFrom<float>(out_gpu, *cpu_place);
float* out_ptr = out.data<float>();
EXPECT_EQ(out_ptr[0], 9);
EXPECT_EQ(out_ptr[1], 12);
EXPECT_EQ(out_ptr[2], 15);
EXPECT_EQ(out_ptr[3], 12);
EXPECT_EQ(out_ptr[4], 17);
EXPECT_EQ(out_ptr[5], 22);
EXPECT_EQ(out_ptr[6], 15);
EXPECT_EQ(out_ptr[7], 22);
EXPECT_EQ(out_ptr[8], 29);
}
#endif
...@@ -18,13 +18,14 @@ namespace paddle { ...@@ -18,13 +18,14 @@ namespace paddle {
namespace operators { namespace operators {
class MeanOp : public framework::OperatorWithKernel { class MeanOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected: protected:
void InferShape(const framework::InferShapeContext &ctx) const override { void InferShape(const framework::InferShapeContext &ctx) const override {
PADDLE_ENFORCE_EQ(ctx.InputSize(), 1, "Input size of AddOp must be one"); PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"),
PADDLE_ENFORCE_EQ(ctx.OutputSize(), 1, "Output size of AddOp must be one"); "Input of MeanOp must be initialized.");
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(0), "input should be set"); ctx.Output<Tensor>("Out")->Resize({1});
PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar(0), "output should be set");
ctx.Output<Tensor>(0)->Resize(framework::make_ddim({1}));
} }
}; };
...@@ -33,12 +34,15 @@ class MeanOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -33,12 +34,15 @@ class MeanOpMaker : public framework::OpProtoAndCheckerMaker {
MeanOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) MeanOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) { : OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "The input of mean op"); AddInput("X", "The input of mean op");
AddOutput("Out", "The output of mean op").IgnoreGradient(); AddOutput("Out", "The output of mean op").NotInGradient();
AddComment("Mean Operator"); AddComment("Mean Operator");
} }
}; };
class MeanGradOp : public framework::OperatorWithKernel { class MeanGradOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected: protected:
void InferShape(const framework::InferShapeContext &ctx) const override { void InferShape(const framework::InferShapeContext &ctx) const override {
ctx.Output<Tensor>(framework::GradVarName("X")) ctx.Output<Tensor>(framework::GradVarName("X"))
...@@ -50,9 +54,8 @@ class MeanGradOp : public framework::OperatorWithKernel { ...@@ -50,9 +54,8 @@ class MeanGradOp : public framework::OperatorWithKernel {
} // namespace paddle } // namespace paddle
namespace ops = paddle::operators; namespace ops = paddle::operators;
REGISTER_OP(mean, ops::MeanOp, ops::MeanOpMaker); REGISTER_OP(mean, ops::MeanOp, ops::MeanOpMaker, mean_grad, ops::MeanGradOp);
REGISTER_OP_CPU_KERNEL(mean, REGISTER_OP_CPU_KERNEL(mean,
ops::MeanKernel<paddle::platform::CPUPlace, float>); ops::MeanKernel<paddle::platform::CPUPlace, float>);
REGISTER_GRADIENT_OP(mean, mean_grad, ops::MeanGradOp);
REGISTER_OP_CPU_KERNEL(mean_grad, REGISTER_OP_CPU_KERNEL(mean_grad,
ops::MeanGradKernel<paddle::platform::CPUPlace, float>); ops::MeanGradKernel<paddle::platform::CPUPlace, float>);
...@@ -31,14 +31,14 @@ template <typename Place, typename T> ...@@ -31,14 +31,14 @@ template <typename Place, typename T>
class MeanKernel : public framework::OpKernel { class MeanKernel : public framework::OpKernel {
public: public:
void Compute(const framework::ExecutionContext& context) const override { void Compute(const framework::ExecutionContext& context) const override {
auto input = context.Input<Tensor>(0); auto* input = context.Input<Tensor>("X");
auto output = context.Output<Tensor>(0); auto* output = context.Output<Tensor>("Out");
output->mutable_data<T>(context.GetPlace()); output->mutable_data<T>(context.GetPlace());
auto X = EigenVector<T>::Flatten(*input); auto X = EigenVector<T>::Flatten(*input);
auto y = EigenScalar<T>::From(*output); auto y = EigenScalar<T>::From(*output);
auto place = context.GetEigenDevice<Place>(); auto& place = context.GetEigenDevice<Place>();
y.device(place) = X.mean(); y.device(place) = X.mean();
} }
...@@ -55,9 +55,10 @@ class MeanGradKernel : public framework::OpKernel { ...@@ -55,9 +55,10 @@ class MeanGradKernel : public framework::OpKernel {
IG->mutable_data<T>(context.GetPlace()); IG->mutable_data<T>(context.GetPlace());
T ig_size = (T)framework::product(IG->dims()); T ig_size = (T)framework::product(IG->dims());
Eigen::DSizes<int, 1> bcast(ig_size);
EigenVector<T>::Flatten(*IG).device(context.GetEigenDevice<Place>()) = EigenVector<T>::Flatten(*IG).device(context.GetEigenDevice<Place>()) =
EigenScalar<T>::From(*OG) / ig_size; (EigenVector<T>::From(*OG) / ig_size).broadcast(bcast);
} }
}; };
......
...@@ -17,12 +17,16 @@ ...@@ -17,12 +17,16 @@
namespace paddle { namespace paddle {
namespace operators { namespace operators {
using framework::Tensor;
class MulOp : public framework::OperatorWithKernel { class MulOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected: protected:
void InferShape(const framework::InferShapeContext &ctx) const override { void InferShape(const framework::InferShapeContext &ctx) const override {
PADDLE_ENFORCE(ctx.InputSize() == 2, "The mul op must take two inputs"); auto dim0 = ctx.Input<Tensor>("X")->dims();
auto dim0 = ctx.Input<Tensor>(0)->dims(); auto dim1 = ctx.Input<Tensor>("Y")->dims();
auto dim1 = ctx.Input<Tensor>(1)->dims();
PADDLE_ENFORCE_EQ(dim0.size(), 2, PADDLE_ENFORCE_EQ(dim0.size(), 2,
"input X(%s) should be a tensor with 2 dims, a matrix", "input X(%s) should be a tensor with 2 dims, a matrix",
ctx.op_.Input("X")); ctx.op_.Input("X"));
...@@ -32,8 +36,7 @@ class MulOp : public framework::OperatorWithKernel { ...@@ -32,8 +36,7 @@ class MulOp : public framework::OperatorWithKernel {
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
dim0[1], dim1[0], dim0[1], dim1[0],
"First matrix's width must be equal with second matrix's height."); "First matrix's width must be equal with second matrix's height.");
PADDLE_ENFORCE_EQ(ctx.OutputSize(), 1, "The mul op takes only one output"); ctx.Output<Tensor>("Out")->Resize({dim0[0], dim1[1]});
ctx.Output<Tensor>(0)->Resize({dim0[0], dim1[1]});
} }
}; };
...@@ -53,11 +56,27 @@ The equation is: Out = X * Y ...@@ -53,11 +56,27 @@ The equation is: Out = X * Y
}; };
class MulOpGrad : public framework::OperatorWithKernel { class MulOpGrad : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected: protected:
void InferShape(const framework::InferShapeContext &ctx) const override {} void InferShape(const framework::InferShapeContext &ctx) const override {
std::string DebugString() const override { PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), "Input(X) should not be null");
LOG(INFO) << "MulGrad"; PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("Y"), "Input(Y) should not be null");
return ""; PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(framework::GradVarName("Out")),
"Input(Out@GRAD) should not be null");
auto x_dims = ctx.Input<Tensor>("X")->dims();
auto y_dims = ctx.Input<Tensor>("Y")->dims();
auto out_dims = ctx.Input<Tensor>(framework::GradVarName("Out"))->dims();
auto *x_grad = ctx.Output<Tensor>(framework::GradVarName("X"));
auto *y_grad = ctx.Output<Tensor>(framework::GradVarName("Y"));
PADDLE_ENFORCE(x_dims[0] == out_dims[0],
"Out@GRAD M X N must equal to X dims 0, M ");
PADDLE_ENFORCE(y_dims[1] == out_dims[1],
"Out@GRAD M X N must equal to Y dims 1, N ");
x_grad->Resize(x_dims);
y_grad->Resize(y_dims);
} }
}; };
...@@ -65,7 +84,7 @@ class MulOpGrad : public framework::OperatorWithKernel { ...@@ -65,7 +84,7 @@ class MulOpGrad : public framework::OperatorWithKernel {
} // namespace paddle } // namespace paddle
namespace ops = paddle::operators; namespace ops = paddle::operators;
REGISTER_OP(mul, ops::MulOp, ops::MulOpMaker); REGISTER_OP(mul, ops::MulOp, ops::MulOpMaker, mul_grad, ops::MulOpGrad);
REGISTER_GRADIENT_OP(mul, mul_grad, ops::MulOpGrad);
REGISTER_OP_CPU_KERNEL(mul, ops::MulKernel<paddle::platform::CPUPlace, float>); REGISTER_OP_CPU_KERNEL(mul, ops::MulKernel<paddle::platform::CPUPlace, float>);
REGISTER_OP_CPU_KERNEL(mul_grad,
ops::MulGradKernel<paddle::platform::CPUPlace, float>);
...@@ -16,5 +16,6 @@ ...@@ -16,5 +16,6 @@
#include "paddle/operators/mul_op.h" #include "paddle/operators/mul_op.h"
namespace ops = paddle::operators; namespace ops = paddle::operators;
REGISTER_OP_GPU_KERNEL(mul, ops::MulKernel<paddle::platform::GPUPlace, float>); REGISTER_OP_GPU_KERNEL(mul, ops::MulKernel<paddle::platform::GPUPlace, float>);
REGISTER_OP_GPU_KERNEL(mul_grad,
ops::MulGradKernel<paddle::platform::GPUPlace, float>);
...@@ -13,6 +13,9 @@ ...@@ -13,6 +13,9 @@
limitations under the License. */ limitations under the License. */
#pragma once #pragma once
#include "paddle/operators/math/math_function.h"
#include "paddle/framework/eigen.h" #include "paddle/framework/eigen.h"
#include "paddle/framework/op_registry.h" #include "paddle/framework/op_registry.h"
...@@ -28,21 +31,34 @@ template <typename Place, typename T> ...@@ -28,21 +31,34 @@ template <typename Place, typename T>
class MulKernel : public framework::OpKernel { class MulKernel : public framework::OpKernel {
public: public:
void Compute(const framework::ExecutionContext& context) const override { void Compute(const framework::ExecutionContext& context) const override {
Eigen::array<Eigen::IndexPair<Eigen::DenseIndex>, 1> dim_pair = { auto* X = context.Input<Tensor>("X");
{Eigen::IndexPair<Eigen::DenseIndex>(1, 0)}}; auto* Y = context.Input<Tensor>("Y");
auto* Z = context.Output<Tensor>("Out");
auto input0 = context.Input<Tensor>("X"); Z->mutable_data<T>(context.GetPlace());
auto input1 = context.Input<Tensor>("Y"); auto* device_context =
auto output = context.Output<Tensor>(0); const_cast<platform::DeviceContext*>(context.device_context_);
math::matmul<Place, T>(*X, false, *Y, false, 1, Z, 0, device_context);
output->mutable_data<T>(context.GetPlace()); }
};
auto X = EigenMatrix<T>::From(*input0); template <typename Place, typename T>
auto Y = EigenMatrix<T>::From(*input1); class MulGradKernel : public framework::OpKernel {
auto Z = EigenMatrix<T>::From(*output); public:
auto place = context.GetEigenDevice<Place>(); void Compute(const framework::ExecutionContext& ctx) const override {
auto* X = ctx.Input<Tensor>("X");
auto* Y = ctx.Input<Tensor>("Y");
auto* dOut = ctx.Input<Tensor>(framework::GradVarName("Out"));
Z.device(place) = X.contract(Y, dim_pair); auto* dX = ctx.Output<Tensor>(framework::GradVarName("X"));
auto* dY = ctx.Output<Tensor>(framework::GradVarName("Y"));
dX->mutable_data<T>(ctx.GetPlace());
dY->mutable_data<T>(ctx.GetPlace());
auto* device_context =
const_cast<platform::DeviceContext*>(ctx.device_context_);
// dX = dOut * Y'. dX: M x K, dOut : M x N, Y : K x N
math::matmul<Place, T>(*dOut, false, *Y, true, 1, dX, 0, device_context);
// dY = X' * dOut. dY: K x N, dOut : M x N, X : M x K
math::matmul<Place, T>(*X, true, *dOut, false, 1, dY, 0, device_context);
} }
}; };
......
...@@ -15,48 +15,42 @@ ...@@ -15,48 +15,42 @@
*/ */
#include "paddle/operators/net_op.h" #include "paddle/operators/net_op.h"
#include <set>
#include "paddle/framework/op_registry.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
const char NetOp::kAll[] = "all";
void NetOp::CompleteAddOp(bool calc) { void NetOp::CompleteAddOp(bool calc) {
add_op_done_ = true; add_op_done_ = true;
if (!calc) return; if (!calc) return;
std::unordered_set<std::string> input_set; std::set<std::string> input_set;
std::unordered_set<std::string> output_set; std::set<std::string> output_set;
std::unordered_set<std::string> temp_output;
for (auto& op : ops_) { for (auto& op : ops_) {
for (auto& ipt : op->inputs_) { for (auto& ipt : op->Inputs()) {
if (!Contains(output_set, ipt)) { // Not other op's output for (auto& var_name : ipt.second) {
input_set.insert(ipt); if (!Contains(output_set, var_name)) { // Not other op's output
input_set.insert(var_name);
} else { } else {
temp_output.insert(ipt); intermediate_outputs_.insert(var_name);
}
} }
for (auto& opt : op->outputs_) {
output_set.insert(opt);
} }
} }
inputs_.reserve(input_set.size()); for (auto& opt : op->Outputs()) {
std::copy(input_set.begin(), input_set.end(), std::back_inserter(inputs_)); for (auto& var_name : opt.second) {
std::sort(inputs_.begin(), inputs_.end()); output_set.insert(var_name);
outputs_.reserve(output_set.size());
std::copy(output_set.begin(), output_set.end(), std::back_inserter(outputs_));
std::sort(outputs_.begin(), outputs_.end());
std::vector<int> tmp_index;
tmp_index.reserve(temp_output.size());
int output_len = static_cast<int>(outputs_.size());
for (int i = 0; i < output_len; ++i) {
if (Contains(temp_output, outputs_[i])) {
tmp_index.push_back(i);
} }
} }
}
attrs_["temporary_index"] = tmp_index; auto& inputs = inputs_[kAll];
inputs.reserve(input_set.size());
std::copy(input_set.begin(), input_set.end(), std::back_inserter(inputs));
auto& outputs = outputs_[kAll];
outputs.reserve(output_set.size());
std::copy(output_set.begin(), output_set.end(), std::back_inserter(outputs));
} }
std::string NetOp::DebugString() const { std::string NetOp::DebugString() const {
...@@ -73,5 +67,32 @@ std::string NetOp::DebugString() const { ...@@ -73,5 +67,32 @@ std::string NetOp::DebugString() const {
bool NetOp::IsNetOp() const { return true; } bool NetOp::IsNetOp() const { return true; }
std::vector<std::string> NetOp::OutputVars(bool has_intermediate) const {
if (has_intermediate) {
return this->outputs_.at(kAll);
}
auto& all = this->outputs_.at(kAll);
std::vector<std::string> ret_val;
for (auto& each : all) {
if (!Contains(intermediate_outputs_, each)) {
ret_val.push_back(each);
}
}
return ret_val;
}
NetOp::NetOp(const std::string& type,
const framework::OperatorBase::VarNameMap& inputs,
const framework::OperatorBase::VarNameMap& outputs,
const framework::AttributeMap& attrs)
: framework::OperatorBase(type, inputs, outputs, attrs) {}
std::unique_ptr<framework::OperatorBase> NetOp::Clone() const {
PADDLE_ENFORCE(
add_op_done_,
"Must clone a sealed NetOp, invoke Net::CompleteAddOp before clone");
return std::unique_ptr<OperatorBase>(new NetOp(*this));
}
} // namespace operators } // namespace operators
} // namespace paddle } // namespace paddle
...@@ -14,6 +14,7 @@ limitations under the License. */ ...@@ -14,6 +14,7 @@ limitations under the License. */
#pragma once #pragma once
#include "paddle/framework/framework.pb.h"
#include "paddle/framework/op_registry.h" #include "paddle/framework/op_registry.h"
namespace paddle { namespace paddle {
...@@ -35,6 +36,21 @@ namespace operators { ...@@ -35,6 +36,21 @@ namespace operators {
*/ */
class NetOp : public framework::OperatorBase { class NetOp : public framework::OperatorBase {
public: public:
static const char kAll[];
NetOp() : framework::OperatorBase("plain_net", {}, {}, {}) {}
NetOp(const std::string& type, const VarNameMap& inputs,
const VarNameMap& outputs, const framework::AttributeMap& attrs);
NetOp(const NetOp& o) : framework::OperatorBase(o.type_, {}, {}, o.attrs_) {
this->ops_.reserve(o.ops_.size());
std::transform(
o.ops_.begin(), o.ops_.end(), std::back_inserter(this->ops_),
[](const std::unique_ptr<framework::OperatorBase>& op) {
return std::unique_ptr<framework::OperatorBase>(op->Clone());
});
this->CompleteAddOp();
}
/** /**
* Infer all the operators' input and output variables' shapes, will be called * Infer all the operators' input and output variables' shapes, will be called
* before every mini-batch * before every mini-batch
...@@ -68,21 +84,28 @@ class NetOp : public framework::OperatorBase { ...@@ -68,21 +84,28 @@ class NetOp : public framework::OperatorBase {
return true; return true;
} }
void AppendOp(const framework::OperatorBase& op) { AppendOp(op.Clone()); }
/** /**
* @brief Add an operator by ptr * @brief Add an operator by ptr
*/ */
void AddOp(const std::shared_ptr<OperatorBase>& op) { void AppendOp(std::unique_ptr<framework::OperatorBase> op) {
PADDLE_ENFORCE(!add_op_done_, "Cannot AddOp when this network is sealed"); PADDLE_ENFORCE(!add_op_done_,
"Cannot AppendOp when this network is sealed");
PADDLE_ENFORCE_NOT_NULL(op, "Cannot Insert Null op"); PADDLE_ENFORCE_NOT_NULL(op, "Cannot Insert Null op");
ops_.push_back(op); ops_.push_back(std::move(op));
} }
void InsertOp(size_t pos, const std::shared_ptr<OperatorBase>& op) { void InsertOp(size_t pos, std::unique_ptr<framework::OperatorBase> op) {
PADDLE_ENFORCE(!add_op_done_, PADDLE_ENFORCE(!add_op_done_,
"Cannot InsertOp when this network is sealed"); "Cannot InsertOp when this network is sealed");
PADDLE_ENFORCE_NOT_NULL(op, "Cannot Insert Null op"); PADDLE_ENFORCE_NOT_NULL(op, "Cannot Insert Null op");
PADDLE_ENFORCE_LE(pos, ops_.size(), "Out of range"); PADDLE_ENFORCE_LE(pos, ops_.size(), "Out of range");
ops_.insert(ops_.begin() + pos, op); ops_.insert(ops_.begin() + pos, std::move(op));
}
void InsertOp(size_t pos, const framework::OperatorBase& op) {
InsertOp(pos, op.Clone());
} }
void CompleteAddOp(bool calculate = true); void CompleteAddOp(bool calculate = true);
...@@ -90,11 +113,15 @@ class NetOp : public framework::OperatorBase { ...@@ -90,11 +113,15 @@ class NetOp : public framework::OperatorBase {
std::string DebugString() const override; std::string DebugString() const override;
bool IsNetOp() const override; bool IsNetOp() const override;
std::vector<std::string> OutputVars(bool has_intermediate) const override;
std::unique_ptr<framework::OperatorBase> Clone() const override;
std::vector<std::shared_ptr<OperatorBase>> ops_; std::vector<std::unique_ptr<framework::OperatorBase>> ops_;
private: private:
bool add_op_done_{false}; bool add_op_done_{false};
std::set<std::string> intermediate_outputs_;
template <typename T, typename KeyType> template <typename T, typename KeyType>
static bool Contains(T container, KeyType key) { static bool Contains(T container, KeyType key) {
......
...@@ -12,6 +12,8 @@ static int run_cnt = 0; ...@@ -12,6 +12,8 @@ static int run_cnt = 0;
class TestOp : public framework::OperatorBase { class TestOp : public framework::OperatorBase {
public: public:
using framework::OperatorBase::OperatorBase;
DEFINE_OP_CLONE_METHOD(TestOp);
void InferShape(const Scope& scope) const override { ++infer_shape_cnt; } void InferShape(const Scope& scope) const override { ++infer_shape_cnt; }
void Run(const Scope& scope, void Run(const Scope& scope,
const platform::DeviceContext& dev_ctx) const override { const platform::DeviceContext& dev_ctx) const override {
...@@ -19,12 +21,6 @@ class TestOp : public framework::OperatorBase { ...@@ -19,12 +21,6 @@ class TestOp : public framework::OperatorBase {
} }
}; };
class EmptyOp : public framework::OperatorBase {
public:
void InferShape(const Scope& scope) const override {}
void Run(const Scope& scope, const DeviceContext& dev_ctx) const override {}
};
template <typename T> template <typename T>
void AssertSameVectorWithoutOrder(const std::vector<T>& expected, void AssertSameVectorWithoutOrder(const std::vector<T>& expected,
const std::vector<T>& actual) { const std::vector<T>& actual) {
...@@ -42,46 +38,51 @@ TEST(OpKernel, all) { ...@@ -42,46 +38,51 @@ TEST(OpKernel, all) {
auto net = std::make_shared<NetOp>(); auto net = std::make_shared<NetOp>();
ASSERT_NE(net, nullptr); ASSERT_NE(net, nullptr);
auto op1 = std::make_shared<TestOp>(); net->AppendOp(std::unique_ptr<TestOp>(
op1->inputs_ = {"x", "w1", "b1"}; new TestOp("test", {{"X", {"x"}}, {"W", {"w1"}}, {"b", {"b1"}}},
op1->outputs_ = {"y"}; {{"Out", {"y"}}}, {})));
net->AddOp(op1); net->AppendOp(std::unique_ptr<TestOp>(
new TestOp("test", {{"X", {"y"}}, {"W", {"w2"}}, {"b", {"b2"}}},
auto op2 = std::make_shared<TestOp>(); {{"Out", {"z"}}}, {})));
op2->inputs_ = {"y", "w2", "b2"};
op2->outputs_ = {"z"};
net->AddOp(op2);
net->CompleteAddOp(); net->CompleteAddOp();
AssertSameVectorWithoutOrder({"x", "w1", "b1", "w2", "b2"}, net->inputs_); AssertSameVectorWithoutOrder({"x", "w1", "b1", "w2", "b2"},
AssertSameVectorWithoutOrder({"y", "z"}, net->outputs_); net->Inputs(NetOp::kAll));
auto tmp_idx_iter = net->attrs_.find("temporary_index"); AssertSameVectorWithoutOrder({"y", "z"}, net->Outputs(NetOp::kAll));
ASSERT_NE(net->attrs_.end(), tmp_idx_iter);
auto& tmp_idx = boost::get<std::vector<int>>(tmp_idx_iter->second);
ASSERT_EQ(1UL, tmp_idx.size());
ASSERT_EQ("y", net->outputs_[tmp_idx[0]]);
Scope scope; auto final_outs = net->OutputVars(false);
platform::CPUDeviceContext dev_ctx;
net->InferShape(scope); ASSERT_EQ(final_outs.size(), 1UL);
net->Run(scope, dev_ctx); ASSERT_EQ(final_outs[0], "z");
ASSERT_EQ(2, infer_shape_cnt);
ASSERT_EQ(2, run_cnt);
ASSERT_THROW(net->AddOp(op2), platform::EnforceNotMet);
} }
TEST(NetOp, insert_op) { TEST(NetOp, insert_op) {
NetOp net; NetOp net;
auto op1 = std::make_shared<EmptyOp>(); auto op1 = std::unique_ptr<framework::NOP>(
op1->inputs_ = {"x", "w1", "b1"}; new framework::NOP("empty", {{"X", {"x"}}, {"W", {"w1"}}, {"b", {"b1"}}},
op1->outputs_ = {"y"}; {{"Out", {"y"}}}, {}));
net.AddOp(op1); net.AppendOp(*op1);
net.InsertOp(0, op1); net.InsertOp(0, *op1);
ASSERT_EQ(2UL, net.ops_.size()); ASSERT_EQ(2UL, net.ops_.size());
net.InsertOp(2, op1); net.InsertOp(2, std::move(op1));
ASSERT_EQ(3UL, net.ops_.size()); ASSERT_EQ(3UL, net.ops_.size());
} }
TEST(NetOp, Clone) {
NetOp net;
net.AppendOp(
std::unique_ptr<framework::NOP>(new framework::NOP{"empty", {}, {}, {}}));
net.AppendOp(std::unique_ptr<framework::NOP>(
new framework::NOP{"empty2", {}, {}, {}}));
net.CompleteAddOp(true);
auto new_net_op = net.Clone();
ASSERT_NE(new_net_op, nullptr);
ASSERT_TRUE(new_net_op->IsNetOp());
auto* new_net = static_cast<NetOp*>(new_net_op.get());
ASSERT_EQ(2, new_net->ops_.size());
ASSERT_EQ(new_net->ops_[0]->Type(), "empty");
ASSERT_EQ(new_net->ops_[1]->Type(), "empty2");
}
} // namespace operators } // namespace operators
} // namespace paddle } // namespace paddle
...@@ -36,15 +36,13 @@ void RecurrentAlgorithm::InferShape(const Scope& scope) const { ...@@ -36,15 +36,13 @@ void RecurrentAlgorithm::InferShape(const Scope& scope) const {
rnn::SegmentInputs(step_scopes, arg_->inlinks, seq_len_, rnn::SegmentInputs(step_scopes, arg_->inlinks, seq_len_,
true /*infer_shape_mode*/); true /*infer_shape_mode*/);
InitMemories(step_scopes[0], true /*infer_shape_mode*/); InitMemories(step_scopes[0], true /*infer_shape_mode*/);
Variable* net = scope.FindVar(arg_->step_net);
PADDLE_ENFORCE(net != nullptr, "failed to get step net");
for (size_t i = 0; i < seq_len_; i++) { for (size_t i = 0; i < seq_len_; i++) {
if (i > 0) { if (i > 0) {
rnn::LinkMemories(step_scopes, arg_->memories, i, -1, rnn::LinkMemories(step_scopes, arg_->memories, i, -1,
true /*infer_shape_mode*/); true /*infer_shape_mode*/);
} }
net->GetMutable<NetOp>()->InferShape(*step_scopes[i]); (*stepnet_)->InferShape(*step_scopes[i]);
} }
rnn::ConcatOutputs(step_scopes, arg_->outlinks, seq_len_, rnn::ConcatOutputs(step_scopes, arg_->outlinks, seq_len_,
true /*infer_shape_mode*/); true /*infer_shape_mode*/);
...@@ -56,7 +54,6 @@ void RecurrentAlgorithm::Run(const Scope& scope, ...@@ -56,7 +54,6 @@ void RecurrentAlgorithm::Run(const Scope& scope,
rnn::SegmentInputs(step_scopes, arg_->inlinks, seq_len_, rnn::SegmentInputs(step_scopes, arg_->inlinks, seq_len_,
false /*infer_shape_mode*/); false /*infer_shape_mode*/);
InitMemories(step_scopes[0], false /*infer_shape_mode*/); InitMemories(step_scopes[0], false /*infer_shape_mode*/);
Variable* net = scope.FindVar(arg_->step_net);
for (size_t step_id = 0; step_id < seq_len_; step_id++) { for (size_t step_id = 0; step_id < seq_len_; step_id++) {
// create output alias variables // create output alias variables
...@@ -64,7 +61,7 @@ void RecurrentAlgorithm::Run(const Scope& scope, ...@@ -64,7 +61,7 @@ void RecurrentAlgorithm::Run(const Scope& scope,
rnn::LinkMemories(step_scopes, arg_->memories, step_id, -1, rnn::LinkMemories(step_scopes, arg_->memories, step_id, -1,
false /*infer_shape_mode*/); false /*infer_shape_mode*/);
} }
net->GetMutable<NetOp>()->Run(*step_scopes[step_id], dev_ctx); (*stepnet_)->Run(*step_scopes[step_id], dev_ctx);
} }
rnn::ConcatOutputs(step_scopes, arg_->outlinks, seq_len_, rnn::ConcatOutputs(step_scopes, arg_->outlinks, seq_len_,
false /*infer_shape_mode*/); false /*infer_shape_mode*/);
...@@ -78,25 +75,28 @@ void RecurrentAlgorithm::CreateScopes(const Scope& scope) const { ...@@ -78,25 +75,28 @@ void RecurrentAlgorithm::CreateScopes(const Scope& scope) const {
auto step_scopes = step_scopes_var->GetMutable<std::vector<Scope*>>(); auto step_scopes = step_scopes_var->GetMutable<std::vector<Scope*>>();
// Now all variables in scope must be created outside of op. // Now all variables in scope must be created outside of op.
auto net_var = scope.FindVar(arg_->step_net); PADDLE_ENFORCE_NOT_NULL(stepnet_);
PADDLE_ENFORCE(net_var != nullptr, "no stepnet called %s in scope", PADDLE_ENFORCE(!(*stepnet_)->Outputs().empty(), "stepnet_ op has no outputs");
arg_->step_net); PADDLE_ENFORCE(!(*stepnet_)->Outputs().empty(), "net_op has no outputs");
auto net_op = net_var->GetMutable<NetOp>();
PADDLE_ENFORCE(!net_op->outputs_.empty(), "net_op has no outputs");
if (seq_len_ > step_scopes->size()) { if (seq_len_ > step_scopes->size()) {
for (size_t i = step_scopes->size(); i < seq_len_; ++i) { for (size_t i = step_scopes->size(); i < seq_len_; ++i) {
auto& step_scope = scope.NewScope(); auto& step_scope = scope.NewScope();
// create step net's temp inputs // create step net's temp inputs
for (auto& input : net_op->inputs_) { for (auto& input : (*stepnet_)->Inputs()) {
// the weight are located in parent scope // the weight are located in parent scope
if (!step_scope.FindVar(input)) for (auto& var_name : input.second) {
step_scope.NewVar(input)->GetMutable<Tensor>(); if (!step_scope.FindVar(var_name)) {
step_scope.NewVar(var_name)->GetMutable<Tensor>();
}
}
} }
// create stepnet's outputs // create stepnet's outputs
for (const auto& output : net_op->outputs_) { for (const auto& output : (*stepnet_)->Outputs()) {
step_scope.NewVar(output); for (auto& var_name : output.second) {
step_scope.NewVar(var_name);
}
} }
step_scopes->emplace_back(&step_scope); step_scopes->emplace_back(&step_scope);
} }
...@@ -130,11 +130,13 @@ const rnn::ArgumentName RecurrentGradientOp::kArgName{ ...@@ -130,11 +130,13 @@ const rnn::ArgumentName RecurrentGradientOp::kArgName{
"inlink@grad", "inlink_alias", "outlink_alias", "inlink@grad", "inlink_alias", "outlink_alias",
"memories", "pre_memories", "boot_memories@grad"}; "memories", "pre_memories", "boot_memories@grad"};
void RecurrentOp::Init() { RecurrentOp::RecurrentOp(const std::string& type,
OperatorBase::Init(); const framework::OperatorBase::VarNameMap& inputs,
std::unique_ptr<rnn::Argument> arg(new rnn::Argument()); const framework::OperatorBase::VarNameMap& outputs,
rnn::InitArgument(kArgName, arg.get(), *this); const framework::AttributeMap& attrs)
alg_.Init(std::move(arg)); : OperatorBase(type, inputs, outputs, attrs) {
rnn::InitArgument(kArgName, &arg_, *this);
alg_.Init(&arg_, &stepnet_);
} }
class RecurrentAlgorithmProtoAndCheckerMaker class RecurrentAlgorithmProtoAndCheckerMaker
...@@ -147,13 +149,12 @@ class RecurrentAlgorithmProtoAndCheckerMaker ...@@ -147,13 +149,12 @@ class RecurrentAlgorithmProtoAndCheckerMaker
// inputs and outputs stored in proto // inputs and outputs stored in proto
AddInput(name.inlinks, AddInput(name.inlinks,
"the inputs that need to be segmented for each step.") "the inputs that need to be segmented for each step.")
.SetMultiple(); .AsDuplicable();
AddInput(name.boot_memories, "variables to initialize memories.") AddInput(name.boot_memories, "variables to initialize memories.")
.SetMultiple(); .AsDuplicable();
AddInput(name.step_net, "network shared by all steps.");
AddOutput(name.outlinks, "the outputs that need to concated for all steps.") AddOutput(name.outlinks, "the outputs that need to concated for all steps.")
.SetMultiple(); .AsDuplicable();
AddOutput(name.step_scopes, "step scopes"); AddOutput(name.step_scopes, "step scopes");
// Attributes stored in AttributeMap // Attributes stored in AttributeMap
...@@ -172,14 +173,12 @@ void RecurrentGradientAlgorithm::Run( ...@@ -172,14 +173,12 @@ void RecurrentGradientAlgorithm::Run(
auto step_scopes = GetStepScopes(scope); auto step_scopes = GetStepScopes(scope);
rnn::SegmentInputs(step_scopes, arg_->inlinks, seq_len_, rnn::SegmentInputs(step_scopes, arg_->inlinks, seq_len_,
false /*infer_shape_mode*/); false /*infer_shape_mode*/);
Variable* net = scope.FindVar(arg_->step_net);
PADDLE_ENFORCE(net != nullptr, "failed to get step net");
for (int step_id = seq_len_ - 1; step_id >= 0; --step_id) { for (int step_id = seq_len_ - 1; step_id >= 0; --step_id) {
if (static_cast<size_t>(step_id) != seq_len_ - 1) { if (static_cast<size_t>(step_id) != seq_len_ - 1) {
rnn::LinkMemories(step_scopes, arg_->memories, step_id, 1, rnn::LinkMemories(step_scopes, arg_->memories, step_id, 1,
false /*infer_shape_mode*/); false /*infer_shape_mode*/);
} }
net->GetMutable<NetOp>()->Run(*step_scopes[step_id], dev_ctx); (*stepnet_)->Run(*step_scopes[step_id], dev_ctx);
} }
LinkBootMemoryGradients(step_scopes[0], false); LinkBootMemoryGradients(step_scopes[0], false);
rnn::ConcatOutputs(step_scopes, arg_->outlinks, seq_len_, rnn::ConcatOutputs(step_scopes, arg_->outlinks, seq_len_,
...@@ -211,29 +210,30 @@ void RecurrentGradientAlgorithm::InferShape(const Scope& scope) const { ...@@ -211,29 +210,30 @@ void RecurrentGradientAlgorithm::InferShape(const Scope& scope) const {
auto step_scopes = GetStepScopes(scope); auto step_scopes = GetStepScopes(scope);
rnn::SegmentInputs(step_scopes, arg_->inlinks, seq_len_, rnn::SegmentInputs(step_scopes, arg_->inlinks, seq_len_,
true /*infer_shape_mode*/); true /*infer_shape_mode*/);
Variable* net = scope.FindVar(arg_->step_net);
PADDLE_ENFORCE(net != nullptr, "failed to get step net");
for (int step_id = seq_len_ - 1; step_id >= 0; --step_id) { for (int step_id = seq_len_ - 1; step_id >= 0; --step_id) {
if (static_cast<size_t>(step_id) != seq_len_ - 1) { if (static_cast<size_t>(step_id) != seq_len_ - 1) {
rnn::LinkMemories(step_scopes, arg_->memories, step_id, 1, rnn::LinkMemories(step_scopes, arg_->memories, step_id, 1,
true /*infer_shape_mode*/); true /*infer_shape_mode*/);
} }
net->GetMutable<NetOp>()->InferShape(*step_scopes[step_id]); (*stepnet_)->InferShape(*step_scopes[step_id]);
} }
rnn::ConcatOutputs(step_scopes, arg_->outlinks, seq_len_, rnn::ConcatOutputs(step_scopes, arg_->outlinks, seq_len_,
true /*infer_shape_mode*/); true /*infer_shape_mode*/);
LinkBootMemoryGradients(step_scopes[0], true /*infer_shape_mode*/); LinkBootMemoryGradients(step_scopes[0], true /*infer_shape_mode*/);
} }
void RecurrentGradientOp::Init() { RecurrentGradientOp::RecurrentGradientOp(
OperatorBase::Init(); const std::string& type, const framework::OperatorBase::VarNameMap& inputs,
std::unique_ptr<rnn::Argument> arg(new rnn::Argument()); const framework::OperatorBase::VarNameMap& outputs,
rnn::InitArgument(kArgName, arg.get(), *this); const framework::AttributeMap& attrs)
alg_.Init(std::move(arg)); : OperatorBase(type, inputs, outputs, attrs) {
rnn::InitArgument(kArgName, &arg_, *this);
alg_.Init(&arg_, &stepnet_);
} }
} // namespace operators } // namespace operators
} // namespace paddle } // namespace paddle
REGISTER_OP(recurrent_op, paddle::operators::RecurrentOp, REGISTER_OP_WITHOUT_GRADIENT(
recurrent_op, paddle::operators::RecurrentOp,
paddle::operators::RecurrentAlgorithmProtoAndCheckerMaker); paddle::operators::RecurrentAlgorithmProtoAndCheckerMaker);
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#pragma once #pragma once
#include "paddle/framework/operator.h" #include "paddle/framework/operator.h"
#include "paddle/operators/net_op.h"
#include "paddle/operators/rnn/recurrent_op_utils.h" #include "paddle/operators/rnn/recurrent_op_utils.h"
namespace paddle { namespace paddle {
...@@ -33,7 +34,12 @@ class RecurrentAlgorithm { ...@@ -33,7 +34,12 @@ class RecurrentAlgorithm {
void Run(const framework::Scope& scope, void Run(const framework::Scope& scope,
const platform::DeviceContext& dev_ctx) const; const platform::DeviceContext& dev_ctx) const;
void Init(std::unique_ptr<rnn::Argument> arg) { arg_ = std::move(arg); } void Init(rnn::Argument* arg,
std::unique_ptr<framework::OperatorBase>* stepnet) {
PADDLE_ENFORCE_NOT_NULL(stepnet, "stepnet should be set before.");
arg_ = arg;
stepnet_ = stepnet;
}
/** /**
* InferShape must be called before Run. * InferShape must be called before Run.
...@@ -58,7 +64,8 @@ class RecurrentAlgorithm { ...@@ -58,7 +64,8 @@ class RecurrentAlgorithm {
void InitMemories(framework::Scope* step_scopes, bool infer_shape_mode) const; void InitMemories(framework::Scope* step_scopes, bool infer_shape_mode) const;
private: private:
std::unique_ptr<rnn::Argument> arg_; std::unique_ptr<framework::OperatorBase>* stepnet_;
rnn::Argument* arg_;
mutable size_t seq_len_; mutable size_t seq_len_;
}; };
...@@ -74,7 +81,12 @@ class RecurrentGradientAlgorithm { ...@@ -74,7 +81,12 @@ class RecurrentGradientAlgorithm {
* operator. * operator.
*/ */
public: public:
void Init(std::unique_ptr<rnn::Argument> arg) { arg_ = std::move(arg); } void Init(rnn::Argument* arg,
std::unique_ptr<framework::OperatorBase>* stepnet) {
PADDLE_ENFORCE_NOT_NULL(stepnet, "stepnet should be set before.");
arg_ = std::move(arg);
stepnet_ = stepnet;
}
void Run(const framework::Scope& scope, void Run(const framework::Scope& scope,
const platform::DeviceContext& dev_ctx) const; const platform::DeviceContext& dev_ctx) const;
...@@ -95,14 +107,22 @@ class RecurrentGradientAlgorithm { ...@@ -95,14 +107,22 @@ class RecurrentGradientAlgorithm {
} }
private: private:
std::unique_ptr<rnn::Argument> arg_; rnn::Argument* arg_;
mutable size_t seq_len_; mutable size_t seq_len_;
std::unique_ptr<framework::OperatorBase>* stepnet_;
}; };
class RecurrentOp final : public framework::OperatorBase { class RecurrentOp : public framework::OperatorBase {
public: public:
void Init() override; RecurrentOp(const std::string& type, const VarNameMap& inputs,
const VarNameMap& outputs, const framework::AttributeMap& attrs);
RecurrentOp(const RecurrentOp& o)
: framework::OperatorBase(
static_cast<const framework::OperatorBase&>(o)) {
// TODO(yuyang18): Implement copy ctor well.
PADDLE_THROW("Not implemented");
}
/** /**
* InferShape must be called before Run. * InferShape must be called before Run.
*/ */
...@@ -115,15 +135,31 @@ class RecurrentOp final : public framework::OperatorBase { ...@@ -115,15 +135,31 @@ class RecurrentOp final : public framework::OperatorBase {
alg_.Run(scope, dev_ctx); alg_.Run(scope, dev_ctx);
} }
void set_stepnet(std::unique_ptr<OperatorBase> net) {
stepnet_ = std::move(net);
}
const OperatorBase& stepnet() const { return *stepnet_; }
static const rnn::ArgumentName kArgName; static const rnn::ArgumentName kArgName;
private: private:
RecurrentAlgorithm alg_; RecurrentAlgorithm alg_;
rnn::Argument arg_;
std::unique_ptr<OperatorBase> stepnet_;
}; };
class RecurrentGradientOp final : public framework::OperatorBase { class RecurrentGradientOp : public framework::OperatorBase {
public: public:
void Init() override; RecurrentGradientOp(const std::string& type, const VarNameMap& inputs,
const VarNameMap& outputs,
const framework::AttributeMap& attrs);
RecurrentGradientOp(const RecurrentGradientOp& o)
: framework::OperatorBase(
static_cast<const framework::OperatorBase&>(o)) {
// TODO(yuyang18): Implement Copy ctor.
PADDLE_THROW("Not Implemented");
}
/** /**
* InferShape must be called before Run. * InferShape must be called before Run.
...@@ -139,8 +175,15 @@ class RecurrentGradientOp final : public framework::OperatorBase { ...@@ -139,8 +175,15 @@ class RecurrentGradientOp final : public framework::OperatorBase {
static const rnn::ArgumentName kArgName; static const rnn::ArgumentName kArgName;
void set_stepnet(std::unique_ptr<OperatorBase> net) {
stepnet_ = std::move(net);
}
const OperatorBase& stepnet() const { return *stepnet_; }
private: private:
RecurrentGradientAlgorithm alg_; RecurrentGradientAlgorithm alg_;
std::unique_ptr<OperatorBase> stepnet_;
rnn::Argument arg_;
}; };
} // namespace operators } // namespace operators
......
/*
Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "paddle/operators/recurrent_op.h"
#include <glog/logging.h>
#include <gtest/gtest.h>
#include "paddle/framework/ddim.h"
#include "paddle/framework/op_registry.h"
#include "paddle/framework/operator.h"
#include "paddle/framework/tensor.h"
#include "paddle/operators/net_op.h"
namespace paddle {
namespace operators {
using framework::make_ddim;
using framework::DDim;
using framework::Tensor;
using framework::Variable;
using framework::Scope;
using framework::OpRegistry;
class RecurrentOpTest : public ::testing::Test {
protected:
virtual void SetUp() override {
CreateGlobalVariables();
CreateStepNet();
CreateRNNOp();
}
virtual void TearDown() override {}
void CreateGlobalVariables() {
// create input, and init content
LOG(INFO) << "create global variable x";
for (auto inlink : std::vector<std::string>{"x", "x0", "x1", "h"}) {
Variable* x = scope_.NewVar(inlink);
DDim dims = make_ddim(std::vector<int>{
10 /*sent size*/, 20 /*batch size*/, 30 /*input dim*/});
x->GetMutable<Tensor>()->mutable_data<float>(dims, platform::CPUPlace());
}
// create output alias just for test
for (auto inlink : std::vector<std::string>{"h@alias"}) {
Variable* x = scope_.NewVar(inlink);
DDim dims =
make_ddim(std::vector<int>{20 /*batch size*/, 30 /*input dim*/});
x->GetMutable<Tensor>()->mutable_data<float>(dims, platform::CPUPlace());
}
LOG(INFO) << "create global variable w";
Variable* w = scope_.NewVar("rnn/w");
w->GetMutable<Tensor>()->mutable_data<float>(
make_ddim(std::vector<int>{30, 30}), platform::CPUPlace());
for (auto boot : std::vector<std::string>{"h_boot"}) {
LOG(INFO) << "create global variable " << boot;
Variable* h_boot = scope_.NewVar(boot);
h_boot->GetMutable<Tensor>()->mutable_data<float>(
make_ddim(std::vector<int>{20 /*batch size*/, 30 /*input dim*/}),
platform::CPUPlace());
}
LOG(INFO) << "create variable step_scopes";
scope_.NewVar("step_scopes");
LOG(INFO) << "create variable h";
scope_.NewVar("h");
}
void CreateRNNOp() {
framework::OpDesc op_desc;
op_desc.set_type("recurrent_op");
// inlinks 0
op_desc.add_inputs("x");
op_desc.add_inputs("x0");
op_desc.add_inputs("x1");
// boot_memories 3
op_desc.add_inputs("h_boot");
// step net 5
op_desc.add_inputs("step_net");
// outlinks 6
op_desc.add_outputs("h");
// step scopes 7
op_desc.add_outputs("step_scopes");
auto _input_format = std::vector<int>{
0, // in_link
3, // memories
4 // step_net
};
auto input_format = op_desc.add_attrs();
input_format->set_name("input_format");
input_format->set_type(paddle::framework::AttrType::INTS);
for (auto i : _input_format) {
input_format->add_ints(i);
}
auto output_format = op_desc.add_attrs();
output_format->set_name("output_format");
output_format->set_type(paddle::framework::AttrType::INTS);
for (auto i : std::vector<int>{0, 1, 2}) {
output_format->add_ints(i);
}
auto inlink_alias = op_desc.add_attrs();
inlink_alias->set_name("inlink_alias");
inlink_alias->set_type(paddle::framework::AttrType::STRINGS);
auto outlink_alias = op_desc.add_attrs();
outlink_alias->set_name("outlink_alias");
outlink_alias->set_type(paddle::framework::AttrType::STRINGS);
auto pre_memories = op_desc.add_attrs();
pre_memories->set_name("pre_memories");
pre_memories->set_type(paddle::framework::AttrType::STRINGS);
auto memories = op_desc.add_attrs();
memories->set_name("memories");
memories->set_type(paddle::framework::AttrType::STRINGS);
// create inlink_alias
for (const auto& item :
std::vector<std::string>{"x@alias", "x0@alias", "x1@alias"}) {
inlink_alias->add_strings(item);
}
// pre memories
for (const auto& item : std::vector<std::string>{"rnn/h@pre"}) {
pre_memories->add_strings(item);
}
// memories
for (const auto& item : std::vector<std::string>{"rnn/h"}) {
memories->add_strings(item);
}
// output alias
for (const auto& item : std::vector<std::string>{"h@alias"}) {
outlink_alias->add_strings(item);
}
rnn_op_ = OpRegistry::CreateOp(op_desc);
LOG(INFO) << "rnn_op finish init";
}
void CreateStepNet() {
LOG(INFO) << "create variable step_net";
Variable* var = scope_.NewVar("step_net");
auto net = var->GetMutable<NetOp>();
net->AddOp(
OpRegistry::CreateOp("mul", {"rnn/h@pre", "rnn/w"}, {"rnn/s"}, {}));
net->AddOp(
OpRegistry::CreateOp("add_two", {"x@alias", "rnn/s"}, {"rnn/h"}, {}));
net->CompleteAddOp();
}
// father scope
Scope scope_;
std::shared_ptr<framework::OperatorBase> rnn_op_;
};
TEST_F(RecurrentOpTest, Run) {
platform::CPUDeviceContext ctx;
rnn_op_->InferShape(scope_);
rnn_op_->Run(scope_, ctx);
}
class RecurrentGradientAlgorithmTest : public ::testing::Test {
protected:
virtual void SetUp() override {
CreateGlobalVariables();
CreateStepScopes();
CreateStepNet();
CreateRNNGradientAlgorithm();
// segment inputs
SegmentInputs();
// link forward memories
LinkeMemories();
}
virtual void TearDown() override {}
void CreateGlobalVariables() {
// inputs: x
LOG(INFO) << "create global variable x";
Variable* x = scope_.NewVar("x");
DDim dims =
make_ddim({10 /*sent size*/, 20 /*batch size*/, 30 /*input dim*/});
x->GetMutable<Tensor>()->mutable_data<float>(dims, platform::CPUPlace());
// inputs: h_boot
LOG(INFO) << "create global variable h_boot";
Variable* h_boot = scope_.NewVar("h_boot");
h_boot->GetMutable<Tensor>()->mutable_data<float>(
make_ddim({20 /*batch size*/, 30 /*input dim*/}), platform::CPUPlace());
// inputs: w
LOG(INFO) << "create global variable w";
Variable* w = scope_.NewVar("rnn/w");
w->GetMutable<Tensor>()->mutable_data<float>(make_ddim({30, 30}),
platform::CPUPlace());
// inputs: h_grad
LOG(INFO) << "create variable h_grad";
Variable* dh = scope_.NewVar("h_grad");
dh->GetMutable<Tensor>()->mutable_data<float>(make_ddim({10, 20, 30}),
platform::CPUPlace());
// inputs: step_scopes
LOG(INFO) << "create variable step_scopes";
scope_.NewVar("step_scopes");
// inputs: step_net
LOG(INFO) << "create variable step_net";
scope_.NewVar("step_net");
// outputs: w_grad
LOG(INFO) << "create global variable w_grad";
scope_.NewVar("rnn/w_grad");
// outputs: x_grad
LOG(INFO) << "create global variable x_grad";
scope_.NewVar("x_grad");
// outputs: h_boot_grad
LOG(INFO) << "create global variable h_boot_grad";
scope_.NewVar("h_boot_grad");
}
void CreateStepScopes() {
auto step_scopes =
scope_.FindVar("step_scopes")->GetMutable<std::vector<Scope*>>();
for (int i = 0; i < 10; ++i) {
auto& scope = scope_.NewScope();
auto pre_t = scope.NewVar("rnn/pre_h")->GetMutable<Tensor>();
pre_t->mutable_data<float>({20, 30}, platform::CPUPlace());
auto tensor = scope.NewVar("rnn/h")->GetMutable<Tensor>();
tensor->mutable_data<float>({20, 30}, platform::CPUPlace());
// for unit test of ConcatOutputs
auto xg = scope.NewVar("rnn/x_grad")->GetMutable<Tensor>();
xg->mutable_data<float>({20, 30}, platform::CPUPlace());
step_scopes->emplace_back(&scope);
}
// last time step
auto g = (*step_scopes)[9]->NewVar("rnn/h_pre_grad")->GetMutable<Tensor>();
g->mutable_data<float>({20, 30}, platform::CPUPlace());
}
void CreateRNNGradientAlgorithm() {
std::unique_ptr<rnn::Argument> arg(new rnn::Argument());
arg->step_net = "step_net";
arg->step_scopes = "step_scopes";
rnn::Link inlink;
inlink.external = "h_grad";
inlink.internal = "rnn/h_grad";
arg->inlinks = std::vector<rnn::Link>{inlink};
rnn::Link outlink;
outlink.external = "x_grad";
outlink.internal = "rnn/x_grad";
arg->outlinks = std::vector<rnn::Link>{outlink};
rnn::MemoryAttr mem_attr;
mem_attr.pre_var = "rnn/h_pre_grad";
mem_attr.var = "rnn/h_grad";
mem_attr.boot_var = "h_boot_grad";
arg->memories = std::vector<rnn::MemoryAttr>{mem_attr};
rnn_grad_algo_.Init(std::move(arg));
}
void CreateStepNet() {
LOG(INFO) << "create variable step_net";
Variable* var = scope_.NewVar("step_net");
auto net = var->GetMutable<NetOp>();
net->AddOp(OpRegistry::CreateOp("mul", {"rnn/h_pre", "rnn/w", "rnn/s_grad"},
{"rnn/h_pre_grad", "rnn/w_grad"}, {}));
net->AddOp(OpRegistry::CreateOp("add_two", {"rnn/h_grad"},
{"rnn/x_grad", "rnn/s_grad"}, {}));
net->CompleteAddOp();
}
void SegmentInputs() {
LOG(INFO) << "segment inputs";
std::vector<std::string> inlinks = {"x"};
std::vector<std::string> inlinks_alias = {"rnn/x"};
rnn::Link inlink;
inlink.external = "x";
inlink.internal = "rnn/x";
auto step_scopes =
scope_.FindVar("step_scopes")->GetMutable<std::vector<Scope*>>();
rnn::SegmentInputs(*step_scopes, std::vector<rnn::Link>{inlink}, 10,
true /*infer_shape_mode*/);
}
void LinkeMemories() {
LOG(INFO) << "link memories";
rnn::MemoryAttr mem_attr;
mem_attr.pre_var = "rnn/h_pre";
mem_attr.var = "rnn/h";
mem_attr.boot_var = "boot_h";
std::vector<rnn::MemoryAttr> memories;
memories.push_back(mem_attr);
auto step_scopes =
scope_.FindVar("step_scopes")->GetMutable<std::vector<Scope*>>();
for (int i = 1; i < 10; ++i) {
rnn::LinkMemories(*step_scopes, memories, i, -1,
true /*infer_shape_mode*/);
}
}
Scope scope_;
RecurrentGradientAlgorithm rnn_grad_algo_;
};
// TEST_F(RecurrentGradientAlgorithmTest, Run) {
// platform::CPUDeviceContext ctx;
// rnn_grad_algo_.Run(scope_, ctx);
// }
} // namespace operators
} // namespace paddle
TEST(RecurrentOp, LinkMemories) {
using namespace paddle::framework;
using namespace paddle::platform;
using namespace paddle::operators;
// create and init step scopes
size_t len = 10;
std::vector<Scope*> step_scopes;
for (size_t i = 0; i < len; ++i) {
auto scope = new Scope();
scope->NewVar("pre_h");
auto tensor = scope->NewVar("h")->GetMutable<Tensor>();
float* data = tensor->mutable_data<float>({15, 20}, CPUPlace());
for (size_t j = 0; j < 15 * 20; ++j) {
data[j] = rand() * (1. / (double)RAND_MAX);
}
step_scopes.push_back(scope);
}
// create MemoryAttr
rnn::MemoryAttr mem_attr;
mem_attr.pre_var = "pre_h";
mem_attr.var = "h";
mem_attr.boot_var = "boot_h";
std::vector<rnn::MemoryAttr> memories;
memories.push_back(mem_attr);
for (size_t i = 1; i < len; ++i) {
rnn::LinkMemories(step_scopes, memories, i, -1, false /*infer_shape_mode*/);
}
// check
for (size_t i = 0; i < len - 1; ++i) {
const float* a =
step_scopes[i]->FindVar("h")->GetMutable<Tensor>()->data<float>();
const float* b = step_scopes[i + 1]
->FindVar("pre_h")
->GetMutable<Tensor>()
->data<float>();
for (size_t j = 0; j < 15 * 20; ++j) {
ASSERT_FLOAT_EQ(a[j], b[j]);
}
}
for (int i = len - 2; i >= 0; --i) {
rnn::LinkMemories(step_scopes, memories, i, 1, false /*infer_shape_mode*/);
}
// check
for (int i = len - 2; i >= 0; --i) {
const float* a =
step_scopes[i]->FindVar("pre_h")->GetMutable<Tensor>()->data<float>();
const float* b =
step_scopes[i + 1]->FindVar("h")->GetMutable<Tensor>()->data<float>();
for (size_t j = 0; j < 15 * 20; ++j) {
ASSERT_FLOAT_EQ(a[j], b[j]);
}
}
for (auto s : step_scopes) {
delete s;
}
}
USE_OP(add_two);
USE_OP(mul);
USE_OP_WITHOUT_KERNEL(recurrent_op);
...@@ -106,7 +106,6 @@ void LinkMemories(const std::vector<Scope*>& scopes, ...@@ -106,7 +106,6 @@ void LinkMemories(const std::vector<Scope*>& scopes,
void InitArgument(const ArgumentName& name, Argument* arg, void InitArgument(const ArgumentName& name, Argument* arg,
const framework::OperatorBase& op) { const framework::OperatorBase& op) {
arg->step_net = op.Input(name.step_net);
arg->step_scopes = op.Output(name.step_scopes); arg->step_scopes = op.Output(name.step_scopes);
auto inlinks = op.Inputs(name.inlinks); auto inlinks = op.Inputs(name.inlinks);
......
...@@ -17,25 +17,28 @@ ...@@ -17,25 +17,28 @@
namespace paddle { namespace paddle {
namespace operators { namespace operators {
class RowWiseAddOp : public framework::OperatorWithKernel { using framework::Tensor;
class RowwiseAddOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected: protected:
void InferShape(const framework::InferShapeContext &ctx) const override { void InferShape(const framework::InferShapeContext &ctx) const override {
PADDLE_ENFORCE(ctx.InputSize() == 2UL, auto dim0 = ctx.Input<Tensor>("X")->dims();
"Two inputs is needed by rowwise add"); auto dim1 = ctx.Input<Tensor>("b")->dims();
auto dim0 = ctx.Input<Tensor>(0)->dims();
auto dim1 = ctx.Input<Tensor>(1)->dims();
PADDLE_ENFORCE(dim0.size() == 2, "Input 0 must be matrix"); PADDLE_ENFORCE(dim0.size() == 2, "Input 0 must be matrix");
PADDLE_ENFORCE(dim1.size() == 1, "The second input must be vector"); PADDLE_ENFORCE(dim1.size() == 1, "The second input must be vector");
PADDLE_ENFORCE(dim0[1] == dim1[0], "The width of two input must be same"); PADDLE_ENFORCE(dim0[1] == dim1[0], "The width of two input must be same");
PADDLE_ENFORCE(ctx.OutputSize() == 1, "The output size must be 1"); PADDLE_ENFORCE(ctx.OutputSize("Out") == 1, "The output size must be 1");
ctx.Output<Tensor>(0)->Resize(ctx.Input<Tensor>(0)->dims()); ctx.Output<Tensor>("Out")->Resize(ctx.Input<Tensor>("X")->dims());
} }
}; };
class RowWiseAddOpMaker : public framework::OpProtoAndCheckerMaker { class RowwiseAddOpMaker : public framework::OpProtoAndCheckerMaker {
public: public:
RowWiseAddOpMaker(framework::OpProto *proto, RowwiseAddOpMaker(framework::OpProto *proto,
framework::OpAttrChecker *op_checker) framework::OpAttrChecker *op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) { : OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "The left input of row-wise add op, must be matrix"); AddInput("X", "The left input of row-wise add op, must be matrix");
...@@ -48,11 +51,32 @@ for i in xrange(X.shape[0]): ...@@ -48,11 +51,32 @@ for i in xrange(X.shape[0]):
)DOC"); )DOC");
} }
}; };
class RowwiseAddGradOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected:
void InferShape(const framework::InferShapeContext &ctx) const override {
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), "X should not be null");
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("b"), "b should not be null");
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(framework::GradVarName("Out")),
"Input(Out@GRAD) should not be null");
auto dims0 = ctx.Input<Tensor>("X")->dims();
auto dims1 = ctx.Input<Tensor>("b")->dims();
PADDLE_ENFORCE_EQ(1, dims1.size(), "b dims should be 1")
ctx.Output<Tensor>(framework::GradVarName("X"))->Resize(dims0);
ctx.Output<Tensor>(framework::GradVarName("b"))->Resize(dims1);
}
};
} // namespace operators } // namespace operators
} // namespace paddle } // namespace paddle
namespace ops = paddle::operators; namespace ops = paddle::operators;
REGISTER_OP(rowwise_add, ops::RowWiseAddOp, ops::RowWiseAddOpMaker); REGISTER_OP(rowwise_add, ops::RowwiseAddOp, ops::RowwiseAddOpMaker,
rowwise_add_grad, ops::RowwiseAddGradOp);
REGISTER_OP_CPU_KERNEL(
rowwise_add, ops::RowwiseAddKernel<paddle::platform::CPUPlace, float>);
REGISTER_OP_CPU_KERNEL( REGISTER_OP_CPU_KERNEL(
rowwise_add, ops::RowWiseAddKernel<paddle::platform::CPUPlace, float>); rowwise_add_grad,
ops::RowwiseAddGradKernel<paddle::platform::CPUPlace, float>);
...@@ -17,4 +17,4 @@ ...@@ -17,4 +17,4 @@
namespace ops = paddle::operators; namespace ops = paddle::operators;
REGISTER_OP_GPU_KERNEL( REGISTER_OP_GPU_KERNEL(
rowwise_add, ops::RowWiseAddKernel<paddle::platform::GPUPlace, float>); rowwise_add, ops::RowwiseAddKernel<paddle::platform::GPUPlace, float>);
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
You may obtain a copy of the License at You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0 http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#pragma once #pragma once
#include "paddle/framework/eigen.h" #include "paddle/framework/eigen.h"
...@@ -28,14 +28,14 @@ template <typename T, int MajorType = Eigen::RowMajor, ...@@ -28,14 +28,14 @@ template <typename T, int MajorType = Eigen::RowMajor,
using EigenMatrix = framework::EigenMatrix<T, MajorType, IndexType>; using EigenMatrix = framework::EigenMatrix<T, MajorType, IndexType>;
template <typename Place, typename T> template <typename Place, typename T>
class RowWiseAddKernel : public framework::OpKernel { class RowwiseAddKernel : public framework::OpKernel {
public: public:
void Compute(const framework::ExecutionContext& context) const override { void Compute(const framework::ExecutionContext& context) const override {
auto out = context.Output<Tensor>(0); auto out = context.Output<Tensor>("Out");
out->mutable_data<T>(context.GetPlace()); out->mutable_data<T>(context.GetPlace());
auto input = EigenMatrix<T>::From(*context.Input<Tensor>(0)); auto input = EigenMatrix<T>::From(*context.Input<Tensor>("X"));
auto bias = EigenVector<T>::From(*context.Input<Tensor>(1)); auto bias = EigenVector<T>::From(*context.Input<Tensor>("b"));
auto output = EigenMatrix<T>::From(*out); auto output = EigenMatrix<T>::From(*out);
const int bias_size = bias.dimension(0); const int bias_size = bias.dimension(0);
...@@ -47,5 +47,25 @@ class RowWiseAddKernel : public framework::OpKernel { ...@@ -47,5 +47,25 @@ class RowWiseAddKernel : public framework::OpKernel {
} }
}; };
template <typename Place, typename T>
class RowwiseAddGradKernel : public framework::OpKernel {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto* dOut = context.Input<Tensor>(framework::GradVarName("Out"));
auto* dX = context.Output<Tensor>(framework::GradVarName("X"));
auto* db = context.Output<Tensor>(framework::GradVarName("b"));
dX->mutable_data<T>(context.GetPlace());
db->mutable_data<T>(context.GetPlace());
auto OutGrad = EigenMatrix<T>::From(*dOut);
auto place = context.GetEigenDevice<Place>();
EigenMatrix<T>::From(*dX).device(place) = OutGrad;
// https://eigen.tuxfamily.org/dox/unsupported/TensorBase_8h_source.html
// colwise add
Eigen::array<int, 1> dims{{0}}; /* dimension to reduce */
EigenVector<T>::Flatten(*db).device(place) = OutGrad.sum(dims);
}
};
} // namespace operators } // namespace operators
} // namespace paddle } // namespace paddle
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <cstring>
#include "paddle/framework/ddim.h"
#include "paddle/framework/eigen.h"
#include "paddle/framework/tensor.h"
#include "paddle/platform/place.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
template <typename T, int MajorType = Eigen::RowMajor,
typename IndexType = Eigen::DenseIndex>
using EigenVector = framework::EigenVector<T, MajorType, IndexType>;
// Implementation of CPU copy
template <typename T>
void CPUScatterUpdate(const paddle::framework::Tensor* src, const int* index,
const size_t index_size,
paddle::framework::Tensor* output) {
paddle::framework::DDim output_dims = output->dims();
for (size_t i = 0; i < index_size; ++i) {
int index_ = index[i];
paddle::framework::Tensor src_ = *src;
paddle::framework::Tensor output_ = *output;
if (index_size > 1) src_ = src->Slice<T>(i, i + 1);
if (output_dims[0] > 1) output_ = output->Slice<T>(index_, index_ + 1);
auto X = EigenVector<T>::Flatten(src_);
auto Y = EigenVector<T>::Flatten(output_);
Y = X + Y;
}
}
// Implementation of GPU scatter:
template <typename T>
void GPUScatterUpdate(const T* src, const int* index, const int slice_size,
const int index_size, T* output);
/**
* Return a updated tensor from source tensor, scattered according to index:
* dst[i] += src[index[i]]
* input[src]: type-T source Tensor
* input[index]: type-int index Tensor (1-D)
* return: output tensor
*/
template <typename T>
void ScatterUpdate(const platform::Place& place,
const paddle::framework::Tensor* src,
const paddle::framework::Tensor* index,
paddle::framework::Tensor* output) {
// check index of shape 1-D
PADDLE_ENFORCE(index->dims().size() == 1);
int index_size = index->dims()[0];
auto src_dims = src->dims();
auto dst_dims = output->dims();
// check src shape and dst shape should match
for (int i = 1; i < src_dims.size(); i++)
PADDLE_ENFORCE(src_dims[i] == dst_dims[i]);
// slice size
size_t slice_size = 1;
for (int i = 0; i < src_dims.size(); ++i) slice_size *= src_dims[i];
if (platform::is_cpu_place(place)) {
CPUScatterUpdate<T>(src, index->data<int>(), index_size, output);
} else {
}
}
} // namespace operators
} // namespace paddle
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/operators/scatter.h"
#include "paddle/framework/ddim.h"
#include "paddle/framework/tensor.h"
#include "paddle/platform/place.h"
#include <gtest/gtest.h>
#include <iostream>
#include <string>
TEST(scatter, ScatterUpdate) {
using namespace paddle::framework;
using namespace paddle::platform;
using namespace paddle::operators;
Tensor* src = new Tensor();
Tensor* index = new Tensor();
Tensor* output = new Tensor();
float* p_src = nullptr;
int* p_index = nullptr;
p_src = src->mutable_data<float>(make_ddim({1, 4}), CPUPlace());
p_index = index->mutable_data<int>(make_ddim({1}), CPUPlace());
for (size_t i = 0; i < 4; ++i) p_src[i] = float(i);
p_index[0] = 1;
float* p_output = output->mutable_data<float>(make_ddim({4, 4}), CPUPlace());
ScatterUpdate<float>(CPUPlace(), src, index, output);
for (size_t i = 0; i < 4; ++i) EXPECT_EQ(p_output[i], float(0));
for (size_t i = 0; i < 4; ++i) EXPECT_EQ(output->data<float>()[i], float(0));
for (size_t i = 4; i < 8; ++i) EXPECT_EQ(p_output[i], float(i - 4));
for (size_t i = 4; i < 8; ++i)
EXPECT_EQ(output->data<float>()[i], float(i - 4));
for (size_t i = 8; i < 16; ++i) EXPECT_EQ(p_output[i], float(0));
for (size_t i = 8; i < 16; ++i) EXPECT_EQ(output->data<float>()[i], float(0));
delete src;
delete index;
delete output;
}
...@@ -18,16 +18,15 @@ namespace paddle { ...@@ -18,16 +18,15 @@ namespace paddle {
namespace operators { namespace operators {
class SGDOp : public framework::OperatorWithKernel { class SGDOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected: protected:
void InferShape(const framework::InferShapeContext &ctx) const override { void InferShape(const framework::InferShapeContext &ctx) const override {
PADDLE_ENFORCE_EQ(ctx.InputSize(), 2, "Input size of SGDOp must be two"); PADDLE_ENFORCE(
PADDLE_ENFORCE_EQ(ctx.OutputSize(), 1, "Output size of SGDOp must be one"); ctx.Input<Tensor>("param")->dims() == ctx.Input<Tensor>("grad")->dims(),
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(0), "inputs[0] mast be set");
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(1), "inputs[1] mast be set");
PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar(0), "outputs[0] mast be set");
PADDLE_ENFORCE(ctx.Input<Tensor>(0)->dims() == ctx.Input<Tensor>(1)->dims(),
"Two input of SGD Op's dimension must be same."); "Two input of SGD Op's dimension must be same.");
ctx.Output<Tensor>(0)->Resize(ctx.Input<Tensor>(0)->dims()); ctx.Output<Tensor>("param_out")->Resize(ctx.Input<Tensor>("param")->dims());
} }
}; };
...@@ -52,6 +51,6 @@ param_out = param - learning_rate * grad; ...@@ -52,6 +51,6 @@ param_out = param - learning_rate * grad;
} // namespace paddle } // namespace paddle
namespace ops = paddle::operators; namespace ops = paddle::operators;
REGISTER_OP(sgd, ops::SGDOp, ops::SGDOpMaker); REGISTER_OP_WITHOUT_GRADIENT(sgd, ops::SGDOp, ops::SGDOpMaker);
REGISTER_OP_CPU_KERNEL(sgd, REGISTER_OP_CPU_KERNEL(sgd,
ops::SGDOpKernel<paddle::platform::CPUPlace, float>); ops::SGDOpKernel<paddle::platform::CPUPlace, float>);
...@@ -30,7 +30,7 @@ class SGDOpKernel : public framework::OpKernel { ...@@ -30,7 +30,7 @@ class SGDOpKernel : public framework::OpKernel {
void Compute(const framework::ExecutionContext& ctx) const override { void Compute(const framework::ExecutionContext& ctx) const override {
auto param = ctx.Input<Tensor>("param"); auto param = ctx.Input<Tensor>("param");
auto grad = ctx.Input<Tensor>("grad"); auto grad = ctx.Input<Tensor>("grad");
auto param_out = ctx.Output<Tensor>(0); auto param_out = ctx.Output<Tensor>("param_out");
float lr = ctx.op_.GetAttr<float>("learning_rate"); float lr = ctx.op_.GetAttr<float>("learning_rate");
param_out->mutable_data<T>(ctx.GetPlace()); param_out->mutable_data<T>(ctx.GetPlace());
......
...@@ -18,11 +18,12 @@ namespace paddle { ...@@ -18,11 +18,12 @@ namespace paddle {
namespace operators { namespace operators {
class SigmoidOp : public framework::OperatorWithKernel { class SigmoidOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected: protected:
void InferShape(const framework::InferShapeContext &ctx) const override { void InferShape(const framework::InferShapeContext &ctx) const override {
PADDLE_ENFORCE(ctx.InputSize() == 1, "Sigmoid Op only have one input"); ctx.Output<Tensor>("Y")->Resize(ctx.Input<Tensor>("X")->dims());
PADDLE_ENFORCE(ctx.OutputSize() == 1, "Sigmoid Op only have one output");
ctx.Output<Tensor>(0)->Resize(ctx.Input<Tensor>(0)->dims());
} }
}; };
...@@ -38,9 +39,13 @@ class SigmoidOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -38,9 +39,13 @@ class SigmoidOpMaker : public framework::OpProtoAndCheckerMaker {
}; };
class SigmoidOpGrad : public framework::OperatorWithKernel { class SigmoidOpGrad : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected: protected:
void InferShape(const framework::InferShapeContext &ctx) const override { void InferShape(const framework::InferShapeContext &ctx) const override {
ctx.Output<Tensor>(0)->Resize(ctx.Input<Tensor>(0)->dims()); ctx.Output<Tensor>(framework::GradVarName("X"))
->Resize(ctx.Input<Tensor>("Y")->dims());
} }
}; };
...@@ -48,9 +53,8 @@ class SigmoidOpGrad : public framework::OperatorWithKernel { ...@@ -48,9 +53,8 @@ class SigmoidOpGrad : public framework::OperatorWithKernel {
} // namespace paddle } // namespace paddle
namespace ops = paddle::operators; namespace ops = paddle::operators;
REGISTER_OP(sigmoid, ops::SigmoidOp, ops::SigmoidOpMaker); REGISTER_OP(sigmoid, ops::SigmoidOp, ops::SigmoidOpMaker, sigmoid_grad,
REGISTER_GRADIENT_OP(sigmoid, sigmoid_grad, ops::SigmoidOpGrad); ops::SigmoidOpGrad);
REGISTER_OP_CPU_KERNEL(sigmoid, REGISTER_OP_CPU_KERNEL(sigmoid,
ops::SigmoidKernel<paddle::platform::CPUPlace, float>); ops::SigmoidKernel<paddle::platform::CPUPlace, float>);
REGISTER_OP_CPU_KERNEL( REGISTER_OP_CPU_KERNEL(
......
...@@ -28,8 +28,8 @@ template <typename Place, typename T> ...@@ -28,8 +28,8 @@ template <typename Place, typename T>
class SigmoidKernel : public framework::OpKernel { class SigmoidKernel : public framework::OpKernel {
public: public:
void Compute(const framework::ExecutionContext& context) const override { void Compute(const framework::ExecutionContext& context) const override {
auto input = context.Input<Tensor>(0); auto input = context.Input<Tensor>("X");
auto output = context.Output<Tensor>(0); auto output = context.Output<Tensor>("Y");
output->mutable_data<T>(context.GetPlace()); output->mutable_data<T>(context.GetPlace());
// The clipping is used in Paddle's raw implenmention // The clipping is used in Paddle's raw implenmention
...@@ -37,7 +37,7 @@ class SigmoidKernel : public framework::OpKernel { ...@@ -37,7 +37,7 @@ class SigmoidKernel : public framework::OpKernel {
auto Y = EigenVector<T>::Flatten(*output); auto Y = EigenVector<T>::Flatten(*output);
auto place = context.GetEigenDevice<Place>(); auto place = context.GetEigenDevice<Place>();
Y.device(place) = 1.0 / (1.0 + (-1.0 * X).exp()); Y.device(place) = 1. / (1. + (-X).exp());
} }
}; };
......
...@@ -18,14 +18,13 @@ namespace paddle { ...@@ -18,14 +18,13 @@ namespace paddle {
namespace operators { namespace operators {
class SoftmaxOp : public framework::OperatorWithKernel { class SoftmaxOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected: protected:
void InferShape(const framework::InferShapeContext &ctx) const override { void InferShape(const framework::InferShapeContext &ctx) const override {
PADDLE_ENFORCE_EQ(ctx.InputSize(), 1UL, PADDLE_ENFORCE(ctx.Input<Tensor>("X")->dims().size() == 2UL,
"Only one input is need for softmax");
PADDLE_ENFORCE_EQ(ctx.Input<Tensor>("X")->dims().size(), 2UL,
"The input of softmax op must be matrix"); "The input of softmax op must be matrix");
PADDLE_ENFORCE_EQ(ctx.OutputSize(), 1UL,
"Only one output is need for softmax");
ctx.Output<Tensor>("Y")->Resize(ctx.Input<Tensor>("X")->dims()); ctx.Output<Tensor>("Y")->Resize(ctx.Input<Tensor>("X")->dims());
} }
}; };
...@@ -42,13 +41,12 @@ class SoftmaxOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -42,13 +41,12 @@ class SoftmaxOpMaker : public framework::OpProtoAndCheckerMaker {
}; };
class SoftmaxOpGrad : public framework::OperatorWithKernel { class SoftmaxOpGrad : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected: protected:
void InferShape(const framework::InferShapeContext &ctx) const override { void InferShape(const framework::InferShapeContext &ctx) const override {
PADDLE_ENFORCE_EQ(ctx.InputSize(), 3UL, PADDLE_ENFORCE(ctx.InputVar("Y") != nullptr, "Input(Y) should not be null");
"Input of SoftmaxOpGrad should be 3, X, Y, YG");
PADDLE_ENFORCE_EQ(ctx.OutputSize(), 1UL,
"Output of SoftmaxOpGrad should be 1");
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("Y"), "Input(Y) should not be null");
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(framework::GradVarName("Y")), PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(framework::GradVarName("Y")),
"Input(Y@GRAD) should not be null"); "Input(Y@GRAD) should not be null");
PADDLE_ENFORCE(ctx.Input<Tensor>("Y")->dims() == PADDLE_ENFORCE(ctx.Input<Tensor>("Y")->dims() ==
...@@ -64,9 +62,9 @@ class SoftmaxOpGrad : public framework::OperatorWithKernel { ...@@ -64,9 +62,9 @@ class SoftmaxOpGrad : public framework::OperatorWithKernel {
namespace ops = paddle::operators; namespace ops = paddle::operators;
REGISTER_OP(softmax, ops::SoftmaxOp, ops::SoftmaxOpMaker); REGISTER_OP(softmax, ops::SoftmaxOp, ops::SoftmaxOpMaker, softmax_grad,
ops::SoftmaxOpGrad);
REGISTER_OP_CPU_KERNEL(softmax, REGISTER_OP_CPU_KERNEL(softmax,
ops::SoftmaxKernel<paddle::platform::CPUPlace, float>); ops::SoftmaxKernel<paddle::platform::CPUPlace, float>);
REGISTER_GRADIENT_OP(softmax, softmax_grad, ops::SoftmaxOpGrad);
REGISTER_OP_CPU_KERNEL( REGISTER_OP_CPU_KERNEL(
softmax_grad, ops::SoftmaxGradKernel<paddle::platform::CPUPlace, float>); softmax_grad, ops::SoftmaxGradKernel<paddle::platform::CPUPlace, float>);
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
You may obtain a copy of the License at You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0 http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
...@@ -27,7 +24,7 @@ template <typename T> ...@@ -27,7 +24,7 @@ template <typename T>
class CPUUniformRandomKernel : public framework::OpKernel { class CPUUniformRandomKernel : public framework::OpKernel {
public: public:
void Compute(const framework::ExecutionContext& context) const override { void Compute(const framework::ExecutionContext& context) const override {
auto* tensor = context.Output<framework::Tensor>(0); auto* tensor = context.Output<framework::Tensor>("Out");
T* data = tensor->mutable_data<T>(context.GetPlace()); T* data = tensor->mutable_data<T>(context.GetPlace());
unsigned int seed = unsigned int seed =
static_cast<unsigned int>(context.op_.GetAttr<int>("seed")); static_cast<unsigned int>(context.op_.GetAttr<int>("seed"));
...@@ -39,18 +36,22 @@ class CPUUniformRandomKernel : public framework::OpKernel { ...@@ -39,18 +36,22 @@ class CPUUniformRandomKernel : public framework::OpKernel {
std::uniform_real_distribution<T> dist( std::uniform_real_distribution<T> dist(
static_cast<T>(context.op_.GetAttr<float>("min")), static_cast<T>(context.op_.GetAttr<float>("min")),
static_cast<T>(context.op_.GetAttr<float>("max"))); static_cast<T>(context.op_.GetAttr<float>("max")));
for (ssize_t i = 0; i < framework::product(tensor->dims()); ++i) { ssize_t size = framework::product(tensor->dims());
for (ssize_t i = 0; i < size; ++i) {
data[i] = dist(engine); data[i] = dist(engine);
} }
} }
}; };
class UniformRandomOp : public framework::OperatorWithKernel { class UniformRandomOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected: protected:
void InferShape(const framework::InferShapeContext& ctx) const override { void InferShape(const framework::InferShapeContext& ctx) const override {
PADDLE_ENFORCE(GetAttr<float>("min") < GetAttr<float>("max"), PADDLE_ENFORCE(GetAttr<float>("min") < GetAttr<float>("max"),
"uniform_random's min must less then max"); "uniform_random's min must less then max");
auto* tensor = ctx.Output<framework::Tensor>(0); auto* tensor = ctx.Output<framework::Tensor>("Out");
auto dims = GetAttr<std::vector<int>>("dims"); auto dims = GetAttr<std::vector<int>>("dims");
tensor->Resize(framework::make_ddim(dims)); tensor->Resize(framework::make_ddim(dims));
} }
...@@ -63,7 +64,6 @@ class UniformRandomOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -63,7 +64,6 @@ class UniformRandomOpMaker : public framework::OpProtoAndCheckerMaker {
: framework::OpProtoAndCheckerMaker(proto, op_checker) { : framework::OpProtoAndCheckerMaker(proto, op_checker) {
AddOutput("Out", "The output tensor of uniform random op"); AddOutput("Out", "The output tensor of uniform random op");
AddComment(R"DOC(Uniform random operator. AddComment(R"DOC(Uniform random operator.
Used to initialize tensor with uniform random generator. Used to initialize tensor with uniform random generator.
)DOC"); )DOC");
AddAttr<std::vector<int>>("dims", "the dimension of random tensor"); AddAttr<std::vector<int>>("dims", "the dimension of random tensor");
...@@ -78,7 +78,7 @@ Used to initialize tensor with uniform random generator. ...@@ -78,7 +78,7 @@ Used to initialize tensor with uniform random generator.
} // namespace operators } // namespace operators
} // namespace paddle } // namespace paddle
REGISTER_OP(uniform_random, paddle::operators::UniformRandomOp, REGISTER_OP_WITHOUT_GRADIENT(uniform_random, paddle::operators::UniformRandomOp,
paddle::operators::UniformRandomOpMaker); paddle::operators::UniformRandomOpMaker);
REGISTER_OP_CPU_KERNEL(uniform_random, REGISTER_OP_CPU_KERNEL(uniform_random,
paddle::operators::CPUUniformRandomKernel<float>); paddle::operators::CPUUniformRandomKernel<float>);
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
You may obtain a copy of the License at You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0 http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
...@@ -46,7 +43,7 @@ template <typename T> ...@@ -46,7 +43,7 @@ template <typename T>
class GPUUniformRandomKernel : public framework::OpKernel { class GPUUniformRandomKernel : public framework::OpKernel {
public: public:
void Compute(const framework::ExecutionContext& context) const override { void Compute(const framework::ExecutionContext& context) const override {
auto* tensor = context.Output<framework::Tensor>(0); auto* tensor = context.Output<framework::Tensor>("Out");
T* data = tensor->mutable_data<T>(context.GetPlace()); T* data = tensor->mutable_data<T>(context.GetPlace());
unsigned int seed = unsigned int seed =
static_cast<unsigned int>(context.op_.GetAttr<int>("seed")); static_cast<unsigned int>(context.op_.GetAttr<int>("seed"));
......
...@@ -48,7 +48,8 @@ Parameter::Parameter(const ParameterConfig& config, bool useGpu, bool doInit) ...@@ -48,7 +48,8 @@ Parameter::Parameter(const ParameterConfig& config, bool useGpu, bool doInit)
deviceId_(-1), deviceId_(-1),
sharedCount_(0), sharedCount_(0),
updateCounter_(0), updateCounter_(0),
updated_(false) { updated_(false),
headerFormat_(PARAM_FORMAT_ORIGINAL) {
setID(-1); /* capture uninitialized id */ setID(-1); /* capture uninitialized id */
if (useGpu_ && FLAGS_parallel_nn) { if (useGpu_ && FLAGS_parallel_nn) {
/* gpu environment is specified by device property */ /* gpu environment is specified by device property */
...@@ -285,7 +286,7 @@ bool Parameter::save(const std::string& filename) const { ...@@ -285,7 +286,7 @@ bool Parameter::save(const std::string& filename) const {
bool Parameter::save(std::ostream& s) const { bool Parameter::save(std::ostream& s) const {
CpuVector vec(*bufs_[PARAMETER_VALUE].get()); CpuVector vec(*bufs_[PARAMETER_VALUE].get());
Header header; Header header;
header.version = kFormatVersion; header.format = headerFormat_;
header.valueSize = sizeof(real); header.valueSize = sizeof(real);
header.size = getSize(); header.size = getSize();
...@@ -344,8 +345,9 @@ bool Parameter::load(std::istream& s) { ...@@ -344,8 +345,9 @@ bool Parameter::load(std::istream& s) {
Header header; Header header;
CHECK(s.read(reinterpret_cast<char*>(&header), sizeof(header))) CHECK(s.read(reinterpret_cast<char*>(&header), sizeof(header)))
<< "Fail to read parameter " << getName(); << "Fail to read parameter " << getName();
CHECK_EQ(header.version, kFormatVersion) << "Incorrect format version: " CHECK(isHeaderFormatSupported(header.format)) << "Incorrect format version: "
<< header.version; << header.format;
headerFormat_ = header.format;
CHECK_EQ(header.size, getSize()) CHECK_EQ(header.size, getSize())
<< "The size (" << header.size << ") in the file does not match the size " << "The size (" << header.size << ") in the file does not match the size "
<< "(" << getSize() << ") of the parameter: " << getName(); << "(" << getSize() << ") of the parameter: " << getName();
......
...@@ -34,6 +34,20 @@ limitations under the License. */ ...@@ -34,6 +34,20 @@ limitations under the License. */
namespace paddle { namespace paddle {
typedef enum {
/// The paddle original basic format
PARAM_FORMAT_ORIGINAL = 0,
/// See mkldnn_memory_format_t in
/// https://github.com/01org/mkl-dnn/blob/master/include/mkldnn_types.h
/// for a detailed description.
/// 2D weights tensor in the format (output channels, input channels).
PARAM_FORMAT_MKLDNN_OI,
/// The total format items numbers
PARAM_FORMAT_ITEMS,
} PARAM_FORMAT;
class SparsePrefetchRowCpuMatrix; class SparsePrefetchRowCpuMatrix;
class Parameter; class Parameter;
...@@ -51,8 +65,11 @@ public: ...@@ -51,8 +65,11 @@ public:
size_t getSize() const { return config_.size(); } size_t getSize() const { return config_.size(); }
bool isFullSize() const { bool isFullSize() const {
if (bufs_[PARAMETER_VALUE]) {
return this->getSize() == bufs_[PARAMETER_VALUE]->getSize(); return this->getSize() == bufs_[PARAMETER_VALUE]->getSize();
} }
return false;
}
inline bool useGpu() const { return useGpu_; } inline bool useGpu() const { return useGpu_; }
...@@ -242,14 +259,30 @@ public: ...@@ -242,14 +259,30 @@ public:
/// Initialize the value to 0 /// Initialize the value to 0
void zeroMem(); void zeroMem();
static const int kFormatVersion = 0;
/// file header structure /// file header structure
struct Header { struct Header {
int32_t version; // = 0, file format version int32_t format; // = PARAM_FORMAT
uint32_t valueSize; // = sizeof(real) uint32_t valueSize; // = sizeof(real)
uint64_t size; // = getSize() uint64_t size; // = getSize()
}; };
/**
* @brief Is the header format supported.
*/
static bool isHeaderFormatSupported(int32_t fmt) {
return fmt < PARAM_FORMAT_ITEMS;
}
/**
* @brief Get the format in header.
*/
int getHeaderFormat() { return headerFormat_; }
/**
* @brief Set the format in header.
*/
void setHeaderFormat(int32_t fmt) { headerFormat_ = fmt; }
/** /**
* @brief Parameter Update Hook. * @brief Parameter Update Hook.
* *
...@@ -321,6 +354,9 @@ protected: ...@@ -321,6 +354,9 @@ protected:
bool updated_; bool updated_;
SparseFormat format_; SparseFormat format_;
/// The header format for saving or loading param
int32_t headerFormat_;
std::vector<std::shared_ptr<IParameterUpdaterHook>> updaterHooks_; std::vector<std::shared_ptr<IParameterUpdaterHook>> updaterHooks_;
public: public:
......
cc_library(cpu_info SRCS cpu_info.cc DEPS gflags glog) cc_library(cpu_info SRCS cpu_info.cc DEPS gflags glog)
cc_test(cpu_info_test SRCS cpu_info_test.cc DEPS cpu_info) cc_test(cpu_info_test SRCS cpu_info_test.cc DEPS cpu_info)
nv_library(gpu_info SRCS gpu_info.cc DEPS gflags) nv_library(gpu_info SRCS gpu_info.cc DEPS gflags glog)
cc_library(place SRCS place.cc) cc_library(place SRCS place.cc)
cc_test(place_test SRCS place_test.cc DEPS place glog gflags) cc_test(place_test SRCS place_test.cc DEPS place glog gflags)
...@@ -9,6 +9,7 @@ cc_test(place_test SRCS place_test.cc DEPS place glog gflags) ...@@ -9,6 +9,7 @@ cc_test(place_test SRCS place_test.cc DEPS place glog gflags)
add_subdirectory(dynload) add_subdirectory(dynload)
cc_test(enforce_test SRCS enforce_test.cc DEPS stringpiece) cc_test(enforce_test SRCS enforce_test.cc DEPS stringpiece)
cc_test(environment_test SRCS environment_test.cc DEPS stringpiece)
IF(WITH_GPU) IF(WITH_GPU)
set(GPU_CTX_DEPS dynload_cuda dynamic_loader) set(GPU_CTX_DEPS dynload_cuda dynamic_loader)
...@@ -16,5 +17,8 @@ ELSE() ...@@ -16,5 +17,8 @@ ELSE()
set(GPU_CTX_DEPS) set(GPU_CTX_DEPS)
ENDIF() ENDIF()
cc_library(device_context SRCS device_context.cc DEPS place eigen3 ${GPU_CTX_DEPS}) # memcpy deoends on device_context, here add deps individually for
# avoiding cycle dependencies
cc_library(device_context SRCS device_context.cc DEPS memory buddy_allocator
system_allocator memory_block meta_data meta_cache place eigen3 ${GPU_CTX_DEPS})
nv_test(device_context_test SRCS device_context_test.cc DEPS device_context gpu_info) nv_test(device_context_test SRCS device_context_test.cc DEPS device_context gpu_info)
...@@ -10,6 +10,7 @@ See the License for the specific language governing permissions and ...@@ -10,6 +10,7 @@ See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "paddle/platform/device_context.h" #include "paddle/platform/device_context.h"
#include "paddle/memory/memory.h"
namespace paddle { namespace paddle {
namespace platform { namespace platform {
...@@ -36,6 +37,59 @@ Place CPUDeviceContext::GetPlace() const { return CPUPlace(); } ...@@ -36,6 +37,59 @@ Place CPUDeviceContext::GetPlace() const { return CPUPlace(); }
#ifndef PADDLE_ONLY_CPU #ifndef PADDLE_ONLY_CPU
class EigenCudaStreamDevice : public Eigen::StreamInterface {
public:
EigenCudaStreamDevice() : scratch_(nullptr), semaphore_(nullptr) {
Eigen::initializeDeviceProp();
}
~EigenCudaStreamDevice() override {}
void Reinitialize(const cudaStream_t* cuda_stream, GPUPlace place) {
stream_ = cuda_stream;
place_ = place;
device_prop_ = &Eigen::m_deviceProperties[place.device];
}
const cudaStream_t& stream() const override { return *stream_; }
const cudaDeviceProp& deviceProperties() const override {
return *device_prop_;
}
void* allocate(size_t num_bytes) const override {
return paddle::memory::Alloc(place_, num_bytes);
}
void deallocate(void* buffer) const override {
paddle::memory::Free(place_, buffer);
}
void* scratchpad() const override {
if (scratch_ == NULL) {
scratch_ = allocate(Eigen::kCudaScratchSize + sizeof(unsigned int));
}
return scratch_;
}
unsigned int* semaphore() const override {
if (semaphore_ == NULL) {
char* scratch =
static_cast<char*>(scratchpad()) + Eigen::kCudaScratchSize;
semaphore_ = reinterpret_cast<unsigned int*>(scratch);
PADDLE_ENFORCE(
cudaMemsetAsync(semaphore_, 0, sizeof(unsigned int), *stream_));
}
return semaphore_;
}
private:
GPUPlace place_;
const cudaStream_t* stream_; // not owned;
const cudaDeviceProp* device_prop_; // not owned;
mutable void* scratch_;
mutable unsigned int* semaphore_;
};
template <> template <>
Eigen::GpuDevice* DeviceContext::get_eigen_device<Eigen::GpuDevice>() const { Eigen::GpuDevice* DeviceContext::get_eigen_device<Eigen::GpuDevice>() const {
return reinterpret_cast<const CUDADeviceContext*>(this)->eigen_device(); return reinterpret_cast<const CUDADeviceContext*>(this)->eigen_device();
...@@ -43,19 +97,9 @@ Eigen::GpuDevice* DeviceContext::get_eigen_device<Eigen::GpuDevice>() const { ...@@ -43,19 +97,9 @@ Eigen::GpuDevice* DeviceContext::get_eigen_device<Eigen::GpuDevice>() const {
CUDADeviceContext::CUDADeviceContext(GPUPlace place) : place_(place) { CUDADeviceContext::CUDADeviceContext(GPUPlace place) : place_(place) {
SetDeviceId(place_.device); SetDeviceId(place_.device);
// TODO(qijun) Pass a created cuda stream to Eigen::CudaStreamDevice directly PADDLE_ENFORCE(cudaStreamCreate(&stream_));
// here will cause segment fault. We must implement a class derived from eigen_stream_.reset(new EigenCudaStreamDevice());
// Eigen::StreamInterface, and reinitialize it with a cuda stream and a gpu id eigen_stream_->Reinitialize(&stream_, place);
// later. Please refer to the implementation of class EigenCudaStreamDevice
// in TensorFlow.
//
// We find that CUDA 7 introduces a new option, the per-thread default stream,
// that has two effects. Please refer to https://devblogs.nvidia.com/
// parallelforall/gpu-pro-tip-cuda-7-streams-simplify-concurrency/
//
// So, we decide to use default stream and add –default-stream per-thread nvcc
// flag. Than, two threads with two CUDADeviceContexts will run parallelly.
eigen_stream_.reset(new Eigen::CudaStreamDevice());
eigen_device_.reset(new Eigen::GpuDevice(eigen_stream_.get())); eigen_device_.reset(new Eigen::GpuDevice(eigen_stream_.get()));
} }
...@@ -70,17 +114,15 @@ CUDADeviceContext::~CUDADeviceContext() { ...@@ -70,17 +114,15 @@ CUDADeviceContext::~CUDADeviceContext() {
PADDLE_ENFORCE(dynload::cudnnDestroy(cudnn_handle_)); PADDLE_ENFORCE(dynload::cudnnDestroy(cudnn_handle_));
} }
if (curand_generator_) {
PADDLE_ENFORCE(dynload::curandDestroyGenerator(curand_generator_));
}
eigen_stream_.reset(); eigen_stream_.reset();
eigen_device_.reset(); eigen_device_.reset();
PADDLE_ENFORCE(cudaStreamDestroy(stream_));
} }
Place CUDADeviceContext::GetPlace() const { return place_; } Place CUDADeviceContext::GetPlace() const { return place_; }
void CUDADeviceContext::Wait() const { void CUDADeviceContext::Wait() const {
PADDLE_ENFORCE(cudaStreamSynchronize(0)); PADDLE_ENFORCE(cudaStreamSynchronize(stream_));
} }
Eigen::GpuDevice* CUDADeviceContext::eigen_device() const { Eigen::GpuDevice* CUDADeviceContext::eigen_device() const {
...@@ -91,6 +133,7 @@ cublasHandle_t CUDADeviceContext::cublas_handle() { ...@@ -91,6 +133,7 @@ cublasHandle_t CUDADeviceContext::cublas_handle() {
if (!cublas_handle_) { if (!cublas_handle_) {
SetDeviceId(place_.device); SetDeviceId(place_.device);
PADDLE_ENFORCE(dynload::cublasCreate(&cublas_handle_)); PADDLE_ENFORCE(dynload::cublasCreate(&cublas_handle_));
PADDLE_ENFORCE(dynload::cublasSetStream(cublas_handle_, stream_));
} }
return cublas_handle_; return cublas_handle_;
} }
...@@ -99,20 +142,12 @@ cudnnHandle_t CUDADeviceContext::cudnn_handle() { ...@@ -99,20 +142,12 @@ cudnnHandle_t CUDADeviceContext::cudnn_handle() {
if (!cudnn_handle_) { if (!cudnn_handle_) {
SetDeviceId(place_.device); SetDeviceId(place_.device);
PADDLE_ENFORCE(dynload::cudnnCreate(&cudnn_handle_)); PADDLE_ENFORCE(dynload::cudnnCreate(&cudnn_handle_));
PADDLE_ENFORCE(dynload::cudnnSetStream(cudnn_handle_, stream_));
} }
return cudnn_handle_; return cudnn_handle_;
} }
curandGenerator_t CUDADeviceContext::curand_generator() { cudaStream_t CUDADeviceContext::stream() { return stream_; }
if (!curand_generator_) {
SetDeviceId(place_.device);
PADDLE_ENFORCE(dynload::curandCreateGenerator(&curand_generator_,
CURAND_RNG_PSEUDO_DEFAULT));
PADDLE_ENFORCE(
dynload::curandSetPseudoRandomGeneratorSeed(curand_generator_, seed_));
}
return curand_generator_;
}
#endif // PADDLE_ONLY_CPU #endif // PADDLE_ONLY_CPU
......
...@@ -17,7 +17,6 @@ limitations under the License. */ ...@@ -17,7 +17,6 @@ limitations under the License. */
#ifndef PADDLE_ONLY_CPU #ifndef PADDLE_ONLY_CPU
#include "paddle/platform/dynload/cublas.h" #include "paddle/platform/dynload/cublas.h"
#include "paddle/platform/dynload/cudnn.h" #include "paddle/platform/dynload/cudnn.h"
#include "paddle/platform/dynload/curand.h"
#include "paddle/platform/gpu_info.h" #include "paddle/platform/gpu_info.h"
#define EIGEN_USE_GPU #define EIGEN_USE_GPU
#endif #endif
...@@ -40,7 +39,7 @@ class DeviceContext { ...@@ -40,7 +39,7 @@ class DeviceContext {
class CPUDeviceContext : public DeviceContext { class CPUDeviceContext : public DeviceContext {
public: public:
CPUDeviceContext(); CPUDeviceContext();
explicit CPUDeviceContext(CPUPlace); explicit CPUDeviceContext(CPUPlace place);
virtual ~CPUDeviceContext() {} virtual ~CPUDeviceContext() {}
Eigen::DefaultDevice* eigen_device() const; Eigen::DefaultDevice* eigen_device() const;
...@@ -52,10 +51,11 @@ class CPUDeviceContext : public DeviceContext { ...@@ -52,10 +51,11 @@ class CPUDeviceContext : public DeviceContext {
}; };
#ifndef PADDLE_ONLY_CPU #ifndef PADDLE_ONLY_CPU
class EigenCudaStreamDevice;
class CUDADeviceContext : public DeviceContext { class CUDADeviceContext : public DeviceContext {
public: public:
explicit CUDADeviceContext(GPUPlace); explicit CUDADeviceContext(GPUPlace place);
virtual ~CUDADeviceContext(); virtual ~CUDADeviceContext();
/*! \brief Wait for all operations completion in the stream. */ /*! \brief Wait for all operations completion in the stream. */
...@@ -74,24 +74,20 @@ class CUDADeviceContext : public DeviceContext { ...@@ -74,24 +74,20 @@ class CUDADeviceContext : public DeviceContext {
/*! \brief Return cudnn handle in the device context. */ /*! \brief Return cudnn handle in the device context. */
cudnnHandle_t cudnn_handle(); cudnnHandle_t cudnn_handle();
/*! \brief Return curand handle in the device context. */ /*! \brief Return cuda stream in the device context. */
curandGenerator_t curand_generator(); cudaStream_t stream();
// clang-format on // clang-format on
private: private:
GPUPlace place_; GPUPlace place_;
private:
std::unique_ptr<Eigen::GpuDevice> eigen_device_; std::unique_ptr<Eigen::GpuDevice> eigen_device_;
std::unique_ptr<Eigen::CudaStreamDevice> eigen_stream_; std::unique_ptr<EigenCudaStreamDevice> eigen_stream_;
private:
uint64_t seed_;
// clang-format off // clang-format off
cudnnHandle_t cudnn_handle_ = nullptr; cudaStream_t stream_{nullptr};
cublasHandle_t cublas_handle_ = nullptr; cudnnHandle_t cudnn_handle_{nullptr};
curandGenerator_t curand_generator_ = nullptr; cublasHandle_t cublas_handle_{nullptr};
// clang-format on // clang-format on
}; };
......
...@@ -43,8 +43,7 @@ TEST(Device, CUDADeviceContext) { ...@@ -43,8 +43,7 @@ TEST(Device, CUDADeviceContext) {
ASSERT_NE(nullptr, cudnn_handle); ASSERT_NE(nullptr, cudnn_handle);
cublasHandle_t cublas_handle = device_context->cublas_handle(); cublasHandle_t cublas_handle = device_context->cublas_handle();
ASSERT_NE(nullptr, cublas_handle); ASSERT_NE(nullptr, cublas_handle);
curandGenerator_t curand_handle = device_context->curand_generator(); ASSERT_NE(nullptr, device_context->stream());
ASSERT_NE(nullptr, curand_handle);
delete device_context; delete device_context;
} }
} }
...@@ -62,12 +62,12 @@ extern void *cublas_dso_handle; ...@@ -62,12 +62,12 @@ extern void *cublas_dso_handle;
DECLARE_DYNAMIC_LOAD_CUBLAS_WRAP(__name) DECLARE_DYNAMIC_LOAD_CUBLAS_WRAP(__name)
#define CUBLAS_BLAS_ROUTINE_EACH(__macro) \ #define CUBLAS_BLAS_ROUTINE_EACH(__macro) \
__macro(cublasSgemv); \ __macro(cublasSgemv_v2); \
__macro(cublasDgemv); \ __macro(cublasDgemv_v2); \
__macro(cublasSgemm); \ __macro(cublasSgemm_v2); \
__macro(cublasDgemm); \ __macro(cublasDgemm_v2); \
__macro(cublasSgeam); \ __macro(cublasSgeam_v2); \
__macro(cublasDgeam); \ __macro(cublasDgeam_v2); \
__macro(cublasCreate_v2); \ __macro(cublasCreate_v2); \
__macro(cublasDestroy_v2); \ __macro(cublasDestroy_v2); \
__macro(cublasSetStream_v2); \ __macro(cublasSetStream_v2); \
......
...@@ -14,14 +14,21 @@ limitations under the License. */ ...@@ -14,14 +14,21 @@ limitations under the License. */
#pragma once #pragma once
#include <execinfo.h> #include <dlfcn.h> // for dladdr
#include <execinfo.h> // for backtrace
#include <iomanip> #include <iomanip>
#include <memory>
#include <sstream> #include <sstream>
#include <stdexcept> #include <stdexcept>
#include <string> #include <string>
#include "paddle/string/printf.h" #include "paddle/string/printf.h"
#include "paddle/string/to_string.h" #include "paddle/string/to_string.h"
#ifdef __GNUC__
#include <cxxabi.h> // for __cxa_demangle
#endif
#ifndef PADDLE_ONLY_CPU #ifndef PADDLE_ONLY_CPU
#include "paddle/platform/dynload/cublas.h" #include "paddle/platform/dynload/cublas.h"
...@@ -39,6 +46,19 @@ limitations under the License. */ ...@@ -39,6 +46,19 @@ limitations under the License. */
namespace paddle { namespace paddle {
namespace platform { namespace platform {
namespace {
#ifdef __GNUC__
inline std::string demangle(std::string name) {
int status = -4; // some arbitrary value to eliminate the compiler warning
std::unique_ptr<char, void (*)(void*)> res{
abi::__cxa_demangle(name.c_str(), NULL, NULL, &status), std::free};
return (status == 0) ? res.get() : name;
}
#else
inline std::string demangle(std::string name) { return name; }
#endif
}
struct EnforceNotMet : public std::exception { struct EnforceNotMet : public std::exception {
std::exception_ptr exp_; std::exception_ptr exp_;
std::string err_str_; std::string err_str_;
...@@ -48,15 +68,29 @@ struct EnforceNotMet : public std::exception { ...@@ -48,15 +68,29 @@ struct EnforceNotMet : public std::exception {
std::rethrow_exception(exp_); std::rethrow_exception(exp_);
} catch (const std::exception& exp) { } catch (const std::exception& exp) {
std::ostringstream sout; std::ostringstream sout;
sout << string::Sprintf("%s at [%s:%d]", exp.what(), f, l) << std::endl; sout << string::Sprintf("%s at [%s:%d]", exp.what(), f, l) << std::endl;
sout << "Call Stacks: " << std::endl; sout << "PaddlePaddle Call Stacks: " << std::endl;
void* call_stack[TRACE_STACK_LIMIT]; void* call_stack[TRACE_STACK_LIMIT];
int sz = backtrace(call_stack, TRACE_STACK_LIMIT); auto size = backtrace(call_stack, TRACE_STACK_LIMIT);
auto line = backtrace_symbols(call_stack, sz); auto symbols = backtrace_symbols(call_stack, size);
for (int i = 0; i < sz; ++i) {
sout << line[i] << std::endl; Dl_info info;
for (int i = 0; i < size; ++i) {
if (dladdr(call_stack[i], &info)) {
auto demangled = demangle(info.dli_sname);
auto addr_offset = static_cast<char*>(call_stack[i]) -
static_cast<char*>(info.dli_saddr);
sout << string::Sprintf("%-3d %*0p %s + %zd\n", i,
2 + sizeof(void*) * 2, call_stack[i],
demangled, addr_offset);
} else {
sout << string::Sprintf("%-3d %*0p\n", i, 2 + sizeof(void*) * 2,
call_stack[i]);
}
} }
free(line); free(symbols);
err_str_ = sout.str(); err_str_ = sout.str();
} }
} }
...@@ -170,7 +204,7 @@ inline void throw_on_error(T e) { ...@@ -170,7 +204,7 @@ inline void throw_on_error(T e) {
* PADDLE_ENFORCE_EQ(a, b); * PADDLE_ENFORCE_EQ(a, b);
* *
* will raise an expression described as follows: * will raise an expression described as follows:
* "enforce a == b failed, 1 != 2" with detailed stack infomation. * "enforce a == b failed, 1 != 2" with detailed stack information.
* *
* extra messages is also supported, for example: * extra messages is also supported, for example:
* PADDLE_ENFORCE(a, b, "some simple enforce failed between %d numbers", 2) * PADDLE_ENFORCE(a, b, "some simple enforce failed between %d numbers", 2)
......
...@@ -12,45 +12,49 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ...@@ -12,45 +12,49 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
syntax = "proto2"; #pragma once
package paddle.framework;
#include <stdlib.h>
import "attribute.proto"; #include <unistd.h>
#include <vector>
// AttrDesc is used to describe Attributes of an Operator. It contain's
// name, type, and value of Attribute. #include "paddle/platform/enforce.h"
// #include "paddle/string/piece.h"
// e.g, for scale=3.0: name=scala, type=AttrType.FLOAT, value=3.0
message AttrDesc { extern char** environ; // for environment variables
required string name = 1;
required AttrType type = 2; namespace paddle {
optional int32 i = 3; namespace platform {
optional float f = 4;
optional string s = 5; inline void SetEnvVariable(const std::string& name, const std::string& value) {
repeated int32 ints = 6; PADDLE_ENFORCE_NE(setenv(name.c_str(), value.c_str(), 1), -1,
repeated float floats = 7; "Failed to set environment variable %s=%s", name, value);
repeated string strings = 8; }
};
inline void UnsetEnvVariable(const std::string& name) {
// Protocol Message to describe an Operator. PADDLE_ENFORCE_NE(unsetenv(name.c_str()), -1,
// "Failed to unset environment variable %s", name);
// In PaddlePaddle, Operator is used to do a certain computation such }
// as "add", "sub", "cosine", etc.
// (1) Operator needs to know the input and output variable names. inline bool IsEnvVarDefined(const std::string& name) {
// (2) Some ops may have special attributes such as "scale" in "CosineOp". return std::getenv(name.c_str()) != nullptr;
// }
// 3rd-party language can build this proto message and call
// AddOp(const OpDesc& op_desc) of Paddle core to create an Operator. inline std::string GetEnvValue(const std::string& name) {
message OpDesc { PADDLE_ENFORCE(IsEnvVarDefined(name),
// input names of this Operator. "Tried to access undefined environment variable %s", name);
repeated string inputs = 1; return std::getenv(name.c_str());
}
// output names of this Operator.
repeated string outputs = 2; inline std::vector<std::string> GetAllEnvVariables() {
std::vector<std::string> vars;
// type of this Operator, such as "add", "sub", "fc". for (auto var = environ; *var != nullptr; ++var) {
required string type = 3; auto tail = string::Index(*var, "=");
auto name = string::SubStr(*var, 0, tail).ToString();
// Attributes of this Operator. e.g., scale=3.0 in cosine op. vars.push_back(name);
repeated AttrDesc attrs = 4; }
}; return vars;
\ No newline at end of file }
} // namespace platform
} // namespace paddle
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/platform/environment.h"
#include "glog/logging.h"
#include "gtest/gtest.h"
TEST(ENVIRONMENT, ACCESS) {
namespace platform = paddle::platform;
namespace string = paddle::string;
platform::SetEnvVariable("PADDLE_USE_ENV", "TRUE");
EXPECT_TRUE(platform::IsEnvVarDefined("PADDLE_USE_ENV"));
EXPECT_EQ(platform::GetEnvValue("PADDLE_USE_ENV"), "TRUE");
platform::UnsetEnvVariable("PADDLE_USE_ENV");
EXPECT_FALSE(platform::IsEnvVarDefined("PADDLE_USE_ENV"));
platform::SetEnvVariable("PADDLE_USE_ENV1", "Hello ");
platform::SetEnvVariable("PADDLE_USE_ENV2", "World, ");
platform::SetEnvVariable("PADDLE_USE_ENV3", "PaddlePaddle!");
std::string env_info;
auto vars = platform::GetAllEnvVariables();
for_each(vars.begin(), vars.end(), [&](const std::string& var) {
env_info += platform::GetEnvValue(var);
});
EXPECT_TRUE(string::Contains(env_info, "Hello World, PaddlePaddle!"));
platform::UnsetEnvVariable("PADDLE_USE_ENV1");
platform::UnsetEnvVariable("PADDLE_USE_ENV2");
platform::UnsetEnvVariable("PADDLE_USE_ENV3");
env_info.clear();
vars = platform::GetAllEnvVariables();
for_each(vars.begin(), vars.end(), [&](const std::string& var) {
env_info += platform::GetEnvValue(var);
});
EXPECT_FALSE(string::Contains(env_info, "Hello World, PaddlePaddle!"));
EXPECT_FALSE(platform::IsEnvVarDefined("PADDLE_USE_ENV1"));
EXPECT_FALSE(platform::IsEnvVarDefined("PADDLE_USE_ENV2"));
EXPECT_FALSE(platform::IsEnvVarDefined("PADDLE_USE_ENV3"));
}
...@@ -13,8 +13,11 @@ See the License for the specific language governing permissions and ...@@ -13,8 +13,11 @@ See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "paddle/platform/gpu_info.h" #include "paddle/platform/gpu_info.h"
#include "gflags/gflags.h" #include "gflags/gflags.h"
#include "paddle/platform/enforce.h" #include "paddle/platform/enforce.h"
#include "paddle/platform/environment.h"
DEFINE_double(fraction_of_gpu_memory_to_use, 0.95, DEFINE_double(fraction_of_gpu_memory_to_use, 0.95,
"Default use 95% of GPU memory for PaddlePaddle," "Default use 95% of GPU memory for PaddlePaddle,"
...@@ -70,6 +73,13 @@ size_t GpuMaxChunkSize() { ...@@ -70,6 +73,13 @@ size_t GpuMaxChunkSize() {
GpuMemoryUsage(available, total); GpuMemoryUsage(available, total);
if (IsEnvVarDefined(kEnvFractionGpuMemoryToUse)) {
auto val = std::stod(GetEnvValue(kEnvFractionGpuMemoryToUse));
PADDLE_ENFORCE_GT(val, 0.0);
PADDLE_ENFORCE_LE(val, 1.0);
FLAGS_fraction_of_gpu_memory_to_use = val;
}
// Reserving the rest memory for page tables, etc. // Reserving the rest memory for page tables, etc.
size_t reserving = (1 - FLAGS_fraction_of_gpu_memory_to_use) * total; size_t reserving = (1 - FLAGS_fraction_of_gpu_memory_to_use) * total;
......
...@@ -18,10 +18,15 @@ limitations under the License. */ ...@@ -18,10 +18,15 @@ limitations under the License. */
#include <cuda_runtime.h> #include <cuda_runtime.h>
#include <stddef.h> #include <stddef.h>
#include <string>
namespace paddle { namespace paddle {
namespace platform { namespace platform {
//! Environment variable: fraction of GPU memory to use on each device.
const std::string kEnvFractionGpuMemoryToUse =
"PADDLE_FRACTION_GPU_MEMORY_TO_USE";
//! Get the total number of GPU devices in system. //! Get the total number of GPU devices in system.
int GetDeviceCount(); int GetDeviceCount();
......
...@@ -65,7 +65,6 @@ void ParameterClient2::initThreads() { ...@@ -65,7 +65,6 @@ void ParameterClient2::initThreads() {
LOG(INFO) << "parallel_thread_num dosent need to set"; LOG(INFO) << "parallel_thread_num dosent need to set";
} }
syncThreadPool_.reset(new SyncThreadPool(threadNum_)); syncThreadPool_.reset(new SyncThreadPool(threadNum_));
startThreads(); startThreads();
} }
...@@ -224,6 +223,14 @@ void ParameterClient2::prepareSendData( ...@@ -224,6 +223,14 @@ void ParameterClient2::prepareSendData(
request.set_cost(cost); request.set_cost(cost);
request.set_batch_status(batchStatus); request.set_batch_status(batchStatus);
CHECK_EQ(request.blocks_size(), 0); CHECK_EQ(request.blocks_size(), 0);
VLOG(10) << "request: trainer_id: " << request.trainer_id()
<< " update_mode" << request.update_mode()
<< " send_back_parameter: " << request.send_back_parameter()
<< " send_back_parameter_type: "
<< request.send_back_parameter_type()
<< " num_samples: " << request.num_samples()
<< " cost: " << request.cost()
<< " batch_status: " << request.batch_status();
} }
for (const auto& segments : parameterSegments) { for (const auto& segments : parameterSegments) {
const auto it = parameterMap_.find(segments.id); const auto it = parameterMap_.find(segments.id);
...@@ -251,11 +258,17 @@ void ParameterClient2::prepareSendData( ...@@ -251,11 +258,17 @@ void ParameterClient2::prepareSendData(
CHECK(sendMat != nullptr) << "sendMat is nullptr"; CHECK(sendMat != nullptr) << "sendMat is nullptr";
syncThreadPool_->exec([&](int tid, size_t numThreads) { syncThreadPool_->exec([&](int tid, size_t numThreads) {
std::lock_guard<std::mutex> guard(sparseAutoGrowthMutex_);
const auto& localIndices = prefetchMat->getLocalIndices(); const auto& localIndices = prefetchMat->getLocalIndices();
/// num of sparse rows /// num of sparse rows
size_t nLocalBlocks = localIndices.size(); size_t nLocalBlocks = localIndices.size();
uint64_t beginDim = 0; uint64_t beginDim = 0;
uint64_t endDim = 0; uint64_t endDim = 0;
// FIXME(typhoonzero): let it resize first
prefetchMat->getLocalRow(nLocalBlocks + 1);
sendMat->getLocalRow(nLocalBlocks + 1);
for (size_t row = 0; row < nLocalBlocks; ++row) { for (size_t row = 0; row < nLocalBlocks; ++row) {
int64_t blockId = localIndices[row]; // local row -> sparse row int64_t blockId = localIndices[row]; // local row -> sparse row
int serverId = std::abs((blockId + nameHash) % serviceNum_); int serverId = std::abs((blockId + nameHash) % serviceNum_);
...@@ -275,7 +288,6 @@ void ParameterClient2::prepareSendData( ...@@ -275,7 +288,6 @@ void ParameterClient2::prepareSendData(
block->set_begin_pos(row * blockSize); block->set_begin_pos(row * blockSize);
/// block len /// block len
block->set_block_size(endDim - beginDim); block->set_block_size(endDim - beginDim);
if (sendingPara) { if (sendingPara) {
sendJob->parallelInputIovs[serverId].push_back( sendJob->parallelInputIovs[serverId].push_back(
{sendMat->getLocalRow(row), sizeof(real) * (size_t)blockSize}); {sendMat->getLocalRow(row), sizeof(real) * (size_t)blockSize});
......
...@@ -583,6 +583,7 @@ protected: ...@@ -583,6 +583,7 @@ protected:
#ifndef PADDLE_DISABLE_TIMER #ifndef PADDLE_DISABLE_TIMER
uint64_t forwardbackwordTime_; uint64_t forwardbackwordTime_;
#endif #endif
std::mutex sparseAutoGrowthMutex_;
/// map id to parameter used for decoding protobuf data /// map id to parameter used for decoding protobuf data
std::unordered_map<size_t, ParameterPtr> parameterMap_; std::unordered_map<size_t, ParameterPtr> parameterMap_;
......
...@@ -1032,8 +1032,8 @@ void ParameterServer2::loadValueVector(const LoadValueRequest& request, ...@@ -1032,8 +1032,8 @@ void ParameterServer2::loadValueVector(const LoadValueRequest& request,
Parameter::Header header; Parameter::Header header;
CHECK(fs.read(reinterpret_cast<char*>(&header), sizeof(header))) CHECK(fs.read(reinterpret_cast<char*>(&header), sizeof(header)))
<< "Fail to read parameters in pserver"; << "Fail to read parameters in pserver";
CHECK_EQ(header.version, Parameter::kFormatVersion) CHECK(Parameter::isHeaderFormatSupported(header.format))
<< "Incorrect format version: " << header.version; << "Incorrect format version: " << header.format;
CHECK_EQ(header.size, (size_t)size_) CHECK_EQ(header.size, (size_t)size_)
<< "The size (" << header.size << ") in the file does not match the size " << "The size (" << header.size << ") in the file does not match the size "
<< "(" << size_ << ") of the pserver: " << serverId_; << "(" << size_ << ") of the pserver: " << serverId_;
...@@ -1063,7 +1063,8 @@ void ParameterServer2::saveValueVector(const SaveValueRequest& request, ...@@ -1063,7 +1063,8 @@ void ParameterServer2::saveValueVector(const SaveValueRequest& request,
CpuVector& vec = vectors_[PARAMETER_APPLY] ? *vectors_[PARAMETER_APPLY] CpuVector& vec = vectors_[PARAMETER_APPLY] ? *vectors_[PARAMETER_APPLY]
: *vectors_[PARAMETER_VALUE]; : *vectors_[PARAMETER_VALUE];
Parameter::Header header; Parameter::Header header;
header.version = Parameter::kFormatVersion; // TODO(TJ): save param headerFormat_
header.format = PARAM_FORMAT_ORIGINAL;
header.valueSize = sizeof(real); header.valueSize = sizeof(real);
header.size = size_; header.size = size_;
......
...@@ -82,10 +82,6 @@ EOF ...@@ -82,10 +82,6 @@ EOF
fi fi
# To build documentation, we need to run cmake again after installing
# PaddlePaddle. This awkwardness is due to
# https://github.com/PaddlePaddle/Paddle/issues/1854. It also
# describes a solution.
if [[ ${WITH_DOC:-OFF} == "ON" ]]; then if [[ ${WITH_DOC:-OFF} == "ON" ]]; then
cat <<EOF cat <<EOF
======================================== ========================================
...@@ -93,11 +89,6 @@ Building documentation ... ...@@ -93,11 +89,6 @@ Building documentation ...
In /paddle/build_doc In /paddle/build_doc
======================================== ========================================
EOF EOF
# build documentation need install Paddle before
make install -j `nproc`
pip install /usr/local/opt/paddle/share/wheels/*.whl
paddle version
mkdir -p /paddle/build_doc mkdir -p /paddle/build_doc
pushd /paddle/build_doc pushd /paddle/build_doc
cmake .. \ cmake .. \
...@@ -106,7 +97,8 @@ EOF ...@@ -106,7 +97,8 @@ EOF
-DWITH_AVX=${WITH_AVX:-ON} \ -DWITH_AVX=${WITH_AVX:-ON} \
-DWITH_SWIG_PY=ON \ -DWITH_SWIG_PY=ON \
-DWITH_STYLE_CHECK=OFF -DWITH_STYLE_CHECK=OFF
make paddle_docs paddle_docs_cn make -j `nproc` gen_proto_py
make -j `nproc` paddle_docs paddle_docs_cn
popd popd
fi fi
...@@ -128,25 +120,6 @@ EOF ...@@ -128,25 +120,6 @@ EOF
/woboq/indexgenerator/codebrowser_indexgenerator $WOBOQ_OUT /woboq/indexgenerator/codebrowser_indexgenerator $WOBOQ_OUT
fi fi
# generate deb package for current build
# FIXME(typhoonzero): should we remove paddle/scripts/deb ?
if [[ ${WITH_DEB:-ON} == "ON" ]]; then
cat <<EOF
========================================
Generating .deb package ...
========================================
EOF
set +e
cpack -D CPACK_GENERATOR='DEB' -j `nproc` ..
err_code=$?
if [ ${err_code} -ne 0 ]; then
# cat error logs if cpack failed.
cat /paddle/build/_CPack_Packages/Linux/DEB/PreinstallOutput.log
exit ${err_code}
fi
set -e
fi
cat <<EOF cat <<EOF
======================================== ========================================
Generate /paddle/build/Dockerfile ... Generate /paddle/build/Dockerfile ...
...@@ -166,15 +139,15 @@ EOF ...@@ -166,15 +139,15 @@ EOF
fi fi
cat >> /paddle/build/Dockerfile <<EOF cat >> /paddle/build/Dockerfile <<EOF
# Use different deb file when building different type of images ADD python/dist/*.whl /
ADD *.deb /
# run paddle version to install python packages first # run paddle version to install python packages first
RUN apt-get update &&\ RUN apt-get update &&\
apt-get install -y wget python-pip && pip install -U pip && \ apt-get install -y wget python-pip && pip install -U pip && \
dpkg -i /*.deb ; apt-get install -f -y && \ pip install /*.whl; apt-get install -f -y && \
apt-get clean -y && \ apt-get clean -y && \
rm -f /*.deb && \ rm -f /*.whl && \
paddle version paddle version && \
ldconfig
${DOCKERFILE_CUDNN_DSO} ${DOCKERFILE_CUDNN_DSO}
${DOCKERFILE_GPU_ENV} ${DOCKERFILE_GPU_ENV}
ADD go/cmd/pserver/pserver /usr/bin/ ADD go/cmd/pserver/pserver /usr/bin/
...@@ -182,3 +155,7 @@ ADD go/cmd/master/master /usr/bin/ ...@@ -182,3 +155,7 @@ ADD go/cmd/master/master /usr/bin/
# default command shows the paddle version and exit # default command shows the paddle version and exit
CMD ["paddle", "version"] CMD ["paddle", "version"]
EOF EOF
set +xe
printf "If you need to install PaddlePaddle in develop docker image,"
printf "please make install or pip install build/python/dist/*.whl.\n"
...@@ -18,6 +18,8 @@ function version(){ ...@@ -18,6 +18,8 @@ function version(){
echo "PaddlePaddle @PADDLE_VERSION@, compiled with" echo "PaddlePaddle @PADDLE_VERSION@, compiled with"
echo " with_avx: @WITH_AVX@" echo " with_avx: @WITH_AVX@"
echo " with_gpu: @WITH_GPU@" echo " with_gpu: @WITH_GPU@"
echo " with_mkldnn: @WITH_MKLDNN"
echo " with_mklml: @WITH_MKLML@"
echo " with_double: @WITH_DOUBLE@" echo " with_double: @WITH_DOUBLE@"
echo " with_python: @WITH_PYTHON@" echo " with_python: @WITH_PYTHON@"
echo " with_rdma: @WITH_RDMA@" echo " with_rdma: @WITH_RDMA@"
...@@ -54,8 +56,7 @@ if [ -z "${PADDLE_NO_STAT+x}" ]; then ...@@ -54,8 +56,7 @@ if [ -z "${PADDLE_NO_STAT+x}" ]; then
fi fi
fi fi
PADDLE_BIN_PATH="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
MYDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
if [ ! -z "${DEBUGGER}" ]; then if [ ! -z "${DEBUGGER}" ]; then
echo "Using debug command ${DEBUGGER}" echo "Using debug command ${DEBUGGER}"
...@@ -91,34 +92,16 @@ else: ...@@ -91,34 +92,16 @@ else:
sys.exit(0) sys.exit(0)
EOF EOF
if [ $? -eq 1 ]; then # Older version installed, or not installed at all
echo "First time run paddle, need to install some python dependencies."
# setuptools normalizes package version, so we need to use normalized
# package version for paddle python package
PYTHON_PADDLE_VERSION=$(python -c 'import packaging.version
import setuptools
print str(packaging.version.Version("@PADDLE_VERSION@"))
' 2>/dev/null)
BASEDIR=$(dirname "$0")
pip install ${BASEDIR}/../opt/paddle/share/wheels/*-${PYTHON_PADDLE_VERSION}-*.whl
if [ $? -ne 0 ]; then
echo "pip install wheels failed. "
echo "Please use 'sudo paddle' at the first time you use PaddlePaddle"
echo "PaddlePaddle will install some python dependencies automatically."
exit 1
fi
echo "Python dependencies are installed."
fi
case "$1" in case "$1" in
"train") "train")
${DEBUGGER} $MYDIR/../opt/paddle/bin/paddle_trainer ${@:2} ${DEBUGGER} $PADDLE_BIN_PATH/paddle_trainer ${@:2}
;; ;;
"merge_model") "merge_model")
${DEBUGGER} $MYDIR/../opt/paddle/bin/paddle_merge_model ${@:2} ${DEBUGGER} $PADDLE_BIN_PATH/paddle_merge_model ${@:2}
;; ;;
"pserver") "pserver")
${DEBUGGER} $MYDIR/../opt/paddle/bin/paddle_pserver_main ${@:2} ${DEBUGGER} $PADDLE_BIN_PATH/paddle_pserver_main ${@:2}
;; ;;
"dump_config") "dump_config")
python -m paddle.utils.dump_config ${@:2} python -m paddle.utils.dump_config ${@:2}
...@@ -127,7 +110,7 @@ case "$1" in ...@@ -127,7 +110,7 @@ case "$1" in
python -m paddle.utils.make_model_diagram ${@:2} python -m paddle.utils.make_model_diagram ${@:2}
;; ;;
"usage") "usage")
$MYDIR/../opt/paddle/bin/paddle_usage ${@:2} $PADDLE_BIN_PATH/paddle_usage ${@:2}
;; ;;
"version") "version")
version version
......
...@@ -68,7 +68,7 @@ void NewRemoteParameterUpdater::init( ...@@ -68,7 +68,7 @@ void NewRemoteParameterUpdater::init(
LOG(INFO) << "paddle_begin_init_params start"; LOG(INFO) << "paddle_begin_init_params start";
// NOTE: convert V1 OptimizatioinConfig proto to V2 OptimizerConfig. // NOTE: convert V1 OptimizatioinConfig proto to V2 OptimizerConfig.
// This makes golang pserver compatible with handy V1 demos. // This makes golang pserver compatible with handy V1 demos.
// TODO: Refine or remove these ugly converting lines // TODO(wuyi): Refine or remove these ugly converting lines
OptimizerConfig optimizerConfigV2; OptimizerConfig optimizerConfigV2;
if (trainerConfig_.learning_method() == "momentum") { if (trainerConfig_.learning_method() == "momentum") {
optimizerConfigV2.set_optimizer(paddle::OptimizerConfig::SGD); optimizerConfigV2.set_optimizer(paddle::OptimizerConfig::SGD);
......
...@@ -29,7 +29,6 @@ DECLARE_bool(with_gpu); ...@@ -29,7 +29,6 @@ DECLARE_bool(with_gpu);
DECLARE_bool(parallel_nn); DECLARE_bool(parallel_nn);
DECLARE_string(config_args); DECLARE_string(config_args);
DECLARE_bool(use_mkldnn); DECLARE_bool(use_mkldnn);
DECLARE_bool(use_mkldnn_wgt);
const char *kConfigParserModuleName = "paddle.trainer.config_parser"; const char *kConfigParserModuleName = "paddle.trainer.config_parser";
const char *kConfigParserFuncName = "parse_config_and_serialize"; const char *kConfigParserFuncName = "parse_config_and_serialize";
...@@ -47,7 +46,6 @@ TrainerConfigHelper::TrainerConfigHelper(const std::string &configFilePath) ...@@ -47,7 +46,6 @@ TrainerConfigHelper::TrainerConfigHelper(const std::string &configFilePath)
<< ",with_cost=" << FLAGS_with_cost << ",use_gpu=" << FLAGS_use_gpu << ",with_cost=" << FLAGS_with_cost << ",use_gpu=" << FLAGS_use_gpu
<< ",parallel_nn=" << FLAGS_parallel_nn << ",parallel_nn=" << FLAGS_parallel_nn
<< ",use_mkldnn=" << FLAGS_use_mkldnn << ",use_mkldnn=" << FLAGS_use_mkldnn
<< ",use_mkldnn_wgt=" << FLAGS_use_mkldnn_wgt
<< ",cudnn_version=" << hl_get_cudnn_lib_version(); << ",cudnn_version=" << hl_get_cudnn_lib_version();
if (!FLAGS_config_args.empty()) { if (!FLAGS_config_args.empty()) {
configArgs << "," << FLAGS_config_args; configArgs << "," << FLAGS_config_args;
......
...@@ -27,7 +27,6 @@ DEFINE_bool(use_mkldnn, false, "Default still keep use CPU training"); ...@@ -27,7 +27,6 @@ DEFINE_bool(use_mkldnn, false, "Default still keep use CPU training");
DEFINE_bool(use_mkldnn, false, "Only support CPU training"); DEFINE_bool(use_mkldnn, false, "Only support CPU training");
#endif #endif
DEFINE_bool(use_mkldnn_wgt, false, "Init weight from CPU weight");
DEFINE_bool(parallel_nn, DEFINE_bool(parallel_nn,
false, false,
"Whether to use multi-threads to calculate one neural network." "Whether to use multi-threads to calculate one neural network."
......
...@@ -41,4 +41,3 @@ DECLARE_string(predict_file); ...@@ -41,4 +41,3 @@ DECLARE_string(predict_file);
DECLARE_bool(prev_batch_state); DECLARE_bool(prev_batch_state);
DECLARE_string(init_model_path); DECLARE_string(init_model_path);
DECLARE_bool(use_mkldnn); DECLARE_bool(use_mkldnn);
DECLARE_bool(use_mkldnn_wgt);
...@@ -21,6 +21,18 @@ if(WITH_GOLANG) ...@@ -21,6 +21,18 @@ if(WITH_GOLANG)
add_dependencies(copy_paddle_master paddle_master) add_dependencies(copy_paddle_master paddle_master)
endif(WITH_GOLANG) endif(WITH_GOLANG)
set(MKL_SHARED_LIBS "")
set(MKL_DEPENDS "")
if(WITH_MKLML)
list(APPEND MKL_SHARED_LIBS ${MKLML_LIB} ${MKLML_IOMP_LIB})
list(APPEND MKL_DEPENDS mklml)
endif()
if(WITH_MKLDNN)
list(APPEND MKL_SHARED_LIBS "${MKLDNN_LIB}" "${MKLDNN_LIB}.0")
list(APPEND MKL_DEPENDS mkldnn)
endif()
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/setup.py.in configure_file(${CMAKE_CURRENT_SOURCE_DIR}/setup.py.in
${CMAKE_CURRENT_BINARY_DIR}/setup.py) ${CMAKE_CURRENT_BINARY_DIR}/setup.py)
...@@ -38,8 +50,11 @@ add_custom_command(OUTPUT ${PADDLE_PYTHON_BUILD_DIR}/.timestamp ...@@ -38,8 +50,11 @@ add_custom_command(OUTPUT ${PADDLE_PYTHON_BUILD_DIR}/.timestamp
COMMAND ${CMAKE_COMMAND} -E copy_directory ${PADDLE_PYTHON_BUILD_DIR}/lib* ${PADDLE_PYTHON_BUILD_DIR}/lib-python COMMAND ${CMAKE_COMMAND} -E copy_directory ${PADDLE_PYTHON_BUILD_DIR}/lib* ${PADDLE_PYTHON_BUILD_DIR}/lib-python
DEPENDS gen_proto_py copy_paddle_pybind framework_py_proto ${PY_FILES} ${external_project_dependencies} ${COPY_PADDLE_MASTER}) DEPENDS gen_proto_py copy_paddle_pybind framework_py_proto ${PY_FILES} ${external_project_dependencies} ${COPY_PADDLE_MASTER})
add_custom_target(paddle_python ALL DEPENDS set(paddle_python_deps ${PADDLE_PYTHON_BUILD_DIR}/.timestamp paddle_pserver_main paddle_trainer paddle_merge_model ${MKL_DEPENDS})
${PADDLE_PYTHON_BUILD_DIR}/.timestamp paddle_pserver_main paddle_trainer paddle_merge_model python_api_wheel) if(WITH_SWIG_PY)
list(APPEND paddle_python_deps python_api_wheel)
endif()
add_custom_target(paddle_python ALL DEPENDS ${paddle_python_deps})
set(PADDLE_PYTHON_PACKAGE_DIR ${CMAKE_CURRENT_BINARY_DIR}/dist/) set(PADDLE_PYTHON_PACKAGE_DIR ${CMAKE_CURRENT_BINARY_DIR}/dist/)
......
...@@ -2232,6 +2232,20 @@ class ClipLayer(LayerBase): ...@@ -2232,6 +2232,20 @@ class ClipLayer(LayerBase):
self.config.inputs[0].clip_conf.max = max self.config.inputs[0].clip_conf.max = max
@config_layer('scale_shift')
class ScaleShiftLayer(LayerBase):
def __init__(self, name, inputs, bias=True, **xargs):
super(ScaleShiftLayer, self).__init__(
name, 'scale_shift', 0, inputs=inputs, **xargs)
config_assert(
len(self.inputs) == 1,
'ScaleShiftLayer must have one and only one input.')
input_layer = self.get_input_layer(0)
self.set_layer_size(input_layer.size)
self.create_input_parameter(0, 1, [1, 1])
self.create_bias_parameter(bias, 1)
# key: cost type # key: cost type
# value: cost class # value: cost class
g_cost_map = {} g_cost_map = {}
......
...@@ -298,8 +298,8 @@ def pnpair_evaluator( ...@@ -298,8 +298,8 @@ def pnpair_evaluator(
input, input,
label, label,
info, info,
name=None, weight=None,
weight=None, ): name=None, ):
""" """
Positive-negative pair rate Evaluator which adapts to rank task like Positive-negative pair rate Evaluator which adapts to rank task like
learning to rank. This evaluator must contain at least three layers. learning to rank. This evaluator must contain at least three layers.
...@@ -308,27 +308,31 @@ def pnpair_evaluator( ...@@ -308,27 +308,31 @@ def pnpair_evaluator(
.. code-block:: python .. code-block:: python
eval = pnpair_evaluator(input, info, label) eval = pnpair_evaluator(input, label, info)
:param name: Evaluator name.
:type name: None|basestring
:param input: Input Layer name. The output prediction of network. :param input: Input Layer name. The output prediction of network.
:type input: LayerOutput :type input: LayerOutput
:param label: Label layer name. :param label: Label layer name.
:type label: LayerOutput :type label: LayerOutput
:param info: Label layer name. (TODO, explaination) :param info: Info layer name. (TODO, explaination)
:type info: LayerOutput :type info: LayerOutput
:param weight: Weight Layer name. It should be a matrix with size :param weight: Weight Layer name. It should be a matrix with size
[sample_num, 1]. (TODO, explaination) [sample_num, 1]. (TODO, explaination)
:type weight: LayerOutput :type weight: LayerOutput
:param name: Evaluator name.
:type name: None|basestring
""" """
if not isinstance(input, list):
input = [input]
if label:
input.append(label)
if info:
input.append(info)
evaluator_base( evaluator_base(
name=name,
type="pnpair",
input=input, input=input,
label=label, type="pnpair",
info=info, weight=weight,
weight=weight) name=name, )
@evaluator(EvaluatorAttribute.FOR_CLASSIFICATION) @evaluator(EvaluatorAttribute.FOR_CLASSIFICATION)
......
...@@ -134,6 +134,7 @@ __all__ = [ ...@@ -134,6 +134,7 @@ __all__ = [
'slice_projection', 'slice_projection',
'seq_slice_layer', 'seq_slice_layer',
'kmax_sequence_score_layer', 'kmax_sequence_score_layer',
'scale_shift_layer',
] ]
...@@ -232,6 +233,7 @@ class LayerType(object): ...@@ -232,6 +233,7 @@ class LayerType(object):
SEQ_SLICE = 'seq_slice' SEQ_SLICE = 'seq_slice'
KMAX_SEQ_SCORE = 'kmax_seq_score' KMAX_SEQ_SCORE = 'kmax_seq_score'
SCALE_SHIFT_LAYER = 'scale_shift'
@staticmethod @staticmethod
def is_layer_type(type_name): def is_layer_type(type_name):
...@@ -6278,3 +6280,43 @@ def kmax_sequence_score_layer(input, name=None, beam_size=1): ...@@ -6278,3 +6280,43 @@ def kmax_sequence_score_layer(input, name=None, beam_size=1):
return LayerOutput( return LayerOutput(
name, LayerType.KMAX_SEQ_SCORE, parents=[input], size=input.size) name, LayerType.KMAX_SEQ_SCORE, parents=[input], size=input.size)
@wrap_name_default("scale_shift")
@wrap_param_attr_default()
@wrap_bias_attr_default()
def scale_shift_layer(input, name=None, param_attr=None, bias_attr=None):
"""
A layer applies a linear transformation to each element in each row of
the input matrix. For each element, the layer first re-scale it and then
adds a bias to it.
This layer is very like the SlopeInterceptLayer, except the scale and
bias are trainable.
.. math::
y = w * x + b
.. code-block:: python
scale_shift = scale_shift_layer(input=input_layer, bias_attr=False)
:param name: The Layer Name.
:type name: basestring
:param input: The input layer.
:type input: LayerOutput.
:param param_attr: The parameter attribute of scaling.
:type param_attr: ParameterAttribute
:param bias_attr: The parameter attribute of shifting.
:type bias_attr: ParameterAttribute
:return: LayerOutput object.
:rtype: LayerOutput
"""
Layer(
name=name,
type=LayerType.SCALE_SHIFT_LAYER,
inputs=Input(input.name, **param_attr.attr),
bias=ParamAttr.to_bias(bias_attr))
return LayerOutput(
name, LayerType.SCALE_SHIFT_LAYER, parents=[input], size=input.size)
...@@ -8,6 +8,7 @@ test_spp_layer test_bilinear_interp test_maxout test_bi_grumemory math_ops ...@@ -8,6 +8,7 @@ test_spp_layer test_bilinear_interp test_maxout test_bi_grumemory math_ops
test_seq_concat_reshape test_pad test_smooth_l1 test_multiplex_layer test_seq_concat_reshape test_pad test_smooth_l1 test_multiplex_layer
test_prelu_layer test_row_conv test_detection_output_layer test_multibox_loss_layer test_prelu_layer test_row_conv test_detection_output_layer test_multibox_loss_layer
test_recursive_topology test_gated_unit_layer test_clip_layer test_row_l2_norm_layer test_recursive_topology test_gated_unit_layer test_clip_layer test_row_l2_norm_layer
test_seq_slice_layer test_kmax_seq_socre_layer test_seq_select_layers) test_kmax_seq_socre_layer test_seq_select_layers test_scale_shift_layer
test_seq_slice_layer)
export whole_configs=(test_split_datasource) export whole_configs=(test_split_datasource)
type: "nn"
layers {
name: "data"
type: "data"
size: 100
active_type: ""
}
layers {
name: "__scale_shift_0__"
type: "scale_shift"
size: 100
active_type: ""
inputs {
input_layer_name: "data"
input_parameter_name: "___scale_shift_0__.w0"
}
}
layers {
name: "__scale_shift_1__"
type: "scale_shift"
size: 100
active_type: ""
inputs {
input_layer_name: "data"
input_parameter_name: "___scale_shift_1__.w0"
}
bias_parameter_name: "___scale_shift_1__.wbias"
}
parameters {
name: "___scale_shift_0__.w0"
size: 1
initial_mean: 0.0
initial_std: 1.0
dims: 1
dims: 1
initial_strategy: 0
initial_smart: true
}
parameters {
name: "___scale_shift_1__.w0"
size: 1
initial_mean: 0.0
initial_std: 1.0
dims: 1
dims: 1
initial_strategy: 0
initial_smart: true
}
parameters {
name: "___scale_shift_1__.wbias"
size: 1
initial_mean: 0.0
initial_std: 0.0
dims: 1
dims: 1
initial_strategy: 0
initial_smart: false
}
input_layer_names: "data"
output_layer_names: "__scale_shift_0__"
output_layer_names: "__scale_shift_1__"
sub_models {
name: "root"
layer_names: "data"
layer_names: "__scale_shift_0__"
layer_names: "__scale_shift_1__"
input_layer_names: "data"
output_layer_names: "__scale_shift_0__"
output_layer_names: "__scale_shift_1__"
is_recurrent_layer_group: false
}
from paddle.trainer_config_helpers import *
data = data_layer(name='data', size=100)
scale = scale_shift_layer(input=data, bias_attr=False)
scale_shift = scale_shift_layer(input=data)
outputs(scale, scale_shift)
import paddle.v2.framework.core as core import paddle.v2.framework.core as core
import paddle.v2.framework.proto.op_proto_pb2 as op_proto_pb2 import paddle.v2.framework.proto.framework_pb2 as framework_pb2
import paddle.v2.framework.proto.op_desc_pb2 as op_desc_pb2
import paddle.v2.framework.proto.attribute_pb2 as attribute_pb2
def get_all_op_protos(): def get_all_op_protos():
...@@ -12,11 +10,15 @@ def get_all_op_protos(): ...@@ -12,11 +10,15 @@ def get_all_op_protos():
protostrs = core.get_all_op_protos() protostrs = core.get_all_op_protos()
ret_values = [] ret_values = []
for pbstr in protostrs: for pbstr in protostrs:
op_proto = op_proto_pb2.OpProto.FromString(str(pbstr)) op_proto = framework_pb2.OpProto.FromString(str(pbstr))
ret_values.append(op_proto) ret_values.append(op_proto)
return ret_values return ret_values
def is_str(s):
return isinstance(s, str) or isinstance(s, unicode)
class OpDescCreationMethod(object): class OpDescCreationMethod(object):
""" """
A Functor object to convert user input(use key word args) to OpDesc based on A Functor object to convert user input(use key word args) to OpDesc based on
...@@ -27,7 +29,7 @@ class OpDescCreationMethod(object): ...@@ -27,7 +29,7 @@ class OpDescCreationMethod(object):
""" """
def __init__(self, op_proto): def __init__(self, op_proto):
if not isinstance(op_proto, op_proto_pb2.OpProto): if not isinstance(op_proto, framework_pb2.OpProto):
raise TypeError("Argument should be OpProto") raise TypeError("Argument should be OpProto")
self.__op_proto__ = op_proto self.__op_proto__ = op_proto
...@@ -39,26 +41,34 @@ class OpDescCreationMethod(object): ...@@ -39,26 +41,34 @@ class OpDescCreationMethod(object):
""" """
if len(args) != 0: if len(args) != 0:
raise ValueError("Only keyword arguments is supported by Paddle") raise ValueError("Only keyword arguments is supported by Paddle")
op_desc = op_desc_pb2.OpDesc() op_desc = framework_pb2.OpDesc()
# Inputs for input_parameter in self.__op_proto__.inputs:
ipts, ipt_format, _ = OpDescCreationMethod.extract_input_or_output( input_arguments = kwargs.get(input_parameter.name, [])
"input", kwargs, self.__op_proto__.inputs) if is_str(input_arguments):
op_desc.inputs.extend(ipts) input_arguments = [input_arguments]
if ipt_format is not None:
op_desc.attrs.extend([ipt_format]) if not input_parameter.duplicable and len(input_arguments) > 1:
raise ValueError("Input %s only accepts one input, but give %d"
# Outputs % (input_parameter.name, len(input_arguments)))
outs, out_format, tmp_index = OpDescCreationMethod.extract_input_or_output(
"output", kwargs, self.__op_proto__.outputs) ipt = op_desc.inputs.add()
op_desc.outputs.extend(outs) ipt.parameter = input_parameter.name
if out_format is not None: ipt.arguments.extend(input_arguments)
op_desc.attrs.extend([out_format])
if len(tmp_index) != 0: for output_parameter in self.__op_proto__.outputs:
tmp_index_attr = op_desc.attrs.add() output_arguments = kwargs.get(output_parameter.name, [])
tmp_index_attr.type = attribute_pb2.INTS if is_str(output_arguments):
tmp_index_attr.name = "temporary_index" output_arguments = [output_arguments]
tmp_index_attr.ints.extend(tmp_index)
if not output_parameter.duplicable and len(output_arguments) > 1:
raise ValueError(
"Output %s only accepts one output, but give %d" %
(output_parameter.name, len(output_arguments)))
out = op_desc.outputs.add()
out.parameter = output_parameter.name
out.arguments.extend(output_arguments)
# Types # Types
op_desc.type = self.__op_proto__.type op_desc.type = self.__op_proto__.type
...@@ -72,17 +82,17 @@ class OpDescCreationMethod(object): ...@@ -72,17 +82,17 @@ class OpDescCreationMethod(object):
new_attr = op_desc.attrs.add() new_attr = op_desc.attrs.add()
new_attr.name = attr.name new_attr.name = attr.name
new_attr.type = attr.type new_attr.type = attr.type
if attr.type == attribute_pb2.INT: if attr.type == framework_pb2.INT:
new_attr.i = user_defined_attr new_attr.i = user_defined_attr
elif attr.type == attribute_pb2.FLOAT: elif attr.type == framework_pb2.FLOAT:
new_attr.f = user_defined_attr new_attr.f = user_defined_attr
elif attr.type == attribute_pb2.STRING: elif attr.type == framework_pb2.STRING:
new_attr.s = user_defined_attr new_attr.s = user_defined_attr
elif attr.type == attribute_pb2.INTS: elif attr.type == framework_pb2.INTS:
new_attr.ints.extend(user_defined_attr) new_attr.ints.extend(user_defined_attr)
elif attr.type == attribute_pb2.FLOATS: elif attr.type == framework_pb2.FLOATS:
new_attr.floats.extend(user_defined_attr) new_attr.floats.extend(user_defined_attr)
elif attr.type == attribute_pb2.STRINGS: elif attr.type == framework_pb2.STRINGS:
new_attr.strings.extend(user_defined_attr) new_attr.strings.extend(user_defined_attr)
else: else:
raise NotImplementedError("Not support attribute type " + raise NotImplementedError("Not support attribute type " +
...@@ -90,50 +100,6 @@ class OpDescCreationMethod(object): ...@@ -90,50 +100,6 @@ class OpDescCreationMethod(object):
return op_desc return op_desc
@staticmethod
def extract_input_or_output(in_out, kwargs, meta):
"""
Extract input variable names or output variable names from key-word
arguments, which base on VarProtos.
:param in_out: "input" or "output"
:param kwargs: key-word arguments that user inputted.
:param meta: a list of VarProto
:return: The three object will be return. The variable names. The
input_format or output_format attribute(None if the input or output is
not multiple). The temporary variable index list.
"""
multiple = OpDescCreationMethod.any_is_true((m.multiple for m in meta))
tmp_index = []
retv = []
if multiple:
var_format = op_desc_pb2.AttrDesc()
var_format.type = attribute_pb2.INTS
var_format.name = "%s_format" % in_out
var_format.ints.append(0)
for var in meta:
var_name = var.name
if var.temporary:
var_name = [core.var_names.temp()]
tmp_index.append(len(retv))
else:
var_name = kwargs.get(var_name, [])
if not isinstance(var_name, list):
var_name = [var_name]
retv.extend(var_name)
var_format.ints.append(len(var_name) + var_format.ints[-1])
return retv, var_format, tmp_index
else:
for var in meta:
if var.temporary:
retv.append(kwargs.get(var.name, core.var_names.temp()))
tmp_index.append(len(retv))
else:
retv.append(kwargs.get(var.name, core.var_names.empty()))
return retv, None, tmp_index
@staticmethod @staticmethod
def any_is_true(generator): def any_is_true(generator):
""" """
...@@ -146,13 +112,12 @@ class OpDescCreationMethod(object): ...@@ -146,13 +112,12 @@ class OpDescCreationMethod(object):
class OpInfo(object): class OpInfo(object):
def __init__(self, name, method, inputs, outputs, attrs, no_temp_outputs): def __init__(self, name, method, inputs, outputs, attrs):
self.name = name self.name = name
self.method = method self.method = method
self.inputs = inputs self.inputs = inputs
self.outputs = outputs self.outputs = outputs
self.attrs = attrs self.attrs = attrs
self.no_temp_outputs = no_temp_outputs
def create_op_creation_method(op_proto): def create_op_creation_method(op_proto):
...@@ -170,10 +135,7 @@ def create_op_creation_method(op_proto): ...@@ -170,10 +135,7 @@ def create_op_creation_method(op_proto):
name=op_proto.type, name=op_proto.type,
inputs=[var.name for var in op_proto.inputs], inputs=[var.name for var in op_proto.inputs],
outputs=[var.name for var in op_proto.outputs], outputs=[var.name for var in op_proto.outputs],
attrs=[attr.name for attr in op_proto.attrs], attrs=[attr.name for attr in op_proto.attrs])
no_temp_outputs=[
var.name for var in op_proto.outputs if not var.temporary
])
class OperatorFactory(object): class OperatorFactory(object):
...@@ -214,8 +176,27 @@ class OperatorFactory(object): ...@@ -214,8 +176,27 @@ class OperatorFactory(object):
def get_op_attr_names(self, type): def get_op_attr_names(self, type):
return self.get_op_info(type).attrs return self.get_op_info(type).attrs
def get_op_no_temp_output_names(self, type):
return self.get_op_info(type).no_temp_outputs class __RecurrentOp__(object):
__proto__ = None
type = 'recurrent_op'
def __init__(self):
# cache recurrent_op's proto
if self.__proto__ is None:
for op_proto in get_all_op_protos():
if op_proto.type == self.type:
self.__proto__ = op_proto
def __call__(self, *args, **kwargs):
if self.type not in args and 'type' not in kwargs:
kwargs['type'] = self.type
# create proto
create_method = OpDescCreationMethod(self.__proto__)
proto = create_method(*args, **kwargs)
# create rnnop
return core.RecurrentOp.create(proto.SerializeToString())
Operator = OperatorFactory() # Default global factory Operator = OperatorFactory() # Default global factory
RecurrentOp = __RecurrentOp__()
py_test(test_net SRCS test_net.py) py_test(test_net SRCS test_net.py)
py_test(test_fc_op SRCS test_fc_op.py)
py_test(test_scope SRCS test_scope.py) py_test(test_scope SRCS test_scope.py)
py_test(test_tensor SRCS test_tensor.py) py_test(test_tensor SRCS test_tensor.py)
...@@ -23,5 +22,8 @@ py_test(test_rowwise_add_op SRCS test_rowwise_add_op.py) ...@@ -23,5 +22,8 @@ py_test(test_rowwise_add_op SRCS test_rowwise_add_op.py)
py_test(test_default_scope_funcs SRCS test_default_scope_funcs.py) py_test(test_default_scope_funcs SRCS test_default_scope_funcs.py)
py_test(test_operator SRCS test_operator.py) py_test(test_operator SRCS test_operator.py)
# py_test(test_gaussian_random_op SRCS test_gaussian_random_op.py) py_test(test_gaussian_random_op SRCS test_gaussian_random_op.py)
py_test(test_uniform_random_op SRCS test_uniform_random_op.py) py_test(test_uniform_random_op SRCS test_uniform_random_op.py)
py_test(test_recurrent_op SRCS test_recurrent_op.py)
py_test(test_sgd_op SRCS test_sgd_op.py)
py_test(test_gradient_checker SRCS test_gradient_checker.py)
import unittest import unittest
import numpy import numpy
import itertools
import paddle.v2.framework.core as core import paddle.v2.framework.core as core
from paddle.v2.framework.op import Operator from paddle.v2.framework.op import Operator
...@@ -8,6 +9,7 @@ __all__ = ['get_numeric_gradient'] ...@@ -8,6 +9,7 @@ __all__ = ['get_numeric_gradient']
def create_op(op_type): def create_op(op_type):
# TODO need to set attrs
kwargs = dict() kwargs = dict()
for in_name in Operator.get_op_input_names(op_type): for in_name in Operator.get_op_input_names(op_type):
kwargs[in_name] = in_name kwargs[in_name] = in_name
...@@ -53,17 +55,19 @@ def get_numeric_gradient(op, ...@@ -53,17 +55,19 @@ def get_numeric_gradient(op,
tensor.set(input_values[var_name], core.CPUPlace()) tensor.set(input_values[var_name], core.CPUPlace())
# Create all output variable in local_scope # Create all output variable in local_scope
for output in op.outputs(): opts = op.outputs()
for key in opts:
for output in opts[key]:
if local_scope.find_var(output) is None: if local_scope.find_var(output) is None:
local_scope.new_var(output).get_tensor() local_scope.new_var(output).get_tensor()
op.infer_shape(local_scope) op.infer_shape(local_scope)
# allocate output memory # allocate output memory
for output in op.outputs(): for key in opts:
local_scope.find_var(output).get_tensor().alloc_float(core.CPUPlace()) for output in opts[key]:
local_scope.find_var(output).get_tensor().alloc_float(core.CPUPlace(
))
# TODO(yuyang18): Only CPU is support now.
cpu_ctx = core.DeviceContext.create(core.CPUPlace()) cpu_ctx = core.DeviceContext.create(core.CPUPlace())
def get_output(): def get_output():
...@@ -73,31 +77,143 @@ def get_numeric_gradient(op, ...@@ -73,31 +77,143 @@ def get_numeric_gradient(op,
def product(dim): def product(dim):
return reduce(lambda a, b: a * b, dim, 1) return reduce(lambda a, b: a * b, dim, 1)
# get the input tensor that we want to get it's numeric gradient.
tensor_to_check = local_scope.find_var(input_to_check).get_tensor() tensor_to_check = local_scope.find_var(input_to_check).get_tensor()
tensor_size = product(tensor_to_check.get_dims()) tensor_size = product(tensor_to_check.get_dims())
# prepare a numpy array to store the gradient.
gradient_flat = numpy.zeros(shape=(tensor_size, ), dtype='float32') gradient_flat = numpy.zeros(shape=(tensor_size, ), dtype='float32')
# we only compute gradient of one element each time.
# we use a for loop to compute the gradient of every element.
for i in xrange(tensor_size): for i in xrange(tensor_size):
# get one input element throw it's index i.
origin = tensor_to_check.get_float_element(i) origin = tensor_to_check.get_float_element(i)
# add delta to it, run op and then get the sum of the result tensor.
x_pos = origin + delta x_pos = origin + delta
tensor_to_check.set_float_element(i, x_pos) tensor_to_check.set_float_element(i, x_pos)
y_pos = get_output() y_pos = get_output()
# plus delta to this element, run op and get the sum of the result tensor.
x_neg = origin - delta x_neg = origin - delta
tensor_to_check.set_float_element(i, x_neg) tensor_to_check.set_float_element(i, x_neg)
y_neg = get_output() y_neg = get_output()
tensor_to_check.set_float_element(i, origin) # restore old value # restore old value
tensor_to_check.set_float_element(i, origin)
# compute the gradient of this element and store it into a numpy array.
gradient_flat[i] = (y_pos - y_neg) / delta / 2 gradient_flat[i] = (y_pos - y_neg) / delta / 2
# reshape the gradient result to the shape of the source tensor.
return gradient_flat.reshape(tensor_to_check.get_dims()) return gradient_flat.reshape(tensor_to_check.get_dims())
class GradientChecker(unittest.TestCase): class GradientChecker(unittest.TestCase):
def assert_is_close(self, numeric_grads, scope, max_relative_error, def __get_gradient(self, forward_op, backward_op, input_value, grad_names,
msg_prefix): place):
for name in numeric_grads: """Get the input gradients after running forward and backward operators
b = numpy.array(scope.find_var(grad_var_name(name)).get_tensor()) on the given places.
a = numeric_grads[name]
:param forward_op: forward operator
:type forward_op: Operator
:param backward_op: backward operator
:type backward_op: Operator
:param input_value: input values.
:type input_value: dict{string:numpy.array}
:param grad_names: the names of returned input gradients.
:type input_value: a list of string
:param place: the device type.
:type place: CPUPlace or GPUPlace
:return: the input grdients of given grad_names.
:rtype: a list of numpy.array
"""
scope = core.Scope()
ctx = core.DeviceContext.create(place)
inputs = forward_op.inputs()
in_names = [item for k in inputs for item in inputs[k]]
outputs = forward_op.outputs()
out_names = [item for k in outputs for item in outputs[k]]
# create input var and set value
for name, value in input_value.iteritems():
if name not in in_names:
raise ValueError(name + "does not exist in Op's inputs.")
var = scope.new_var(name).get_tensor()
var.set_dims(value.shape)
var.set(value, place)
# run forward op
for out_name in out_names:
scope.new_var(out_name)
forward_op.infer_shape(scope)
forward_op.run(scope, ctx)
# set output var's shape
# set output grad to ones
for name in out_names:
out_tensor = scope.find_var(name).get_tensor()
grad_tensor = scope.new_var(grad_var_name(name)).get_tensor()
grad_tensor.set_dims(out_tensor.shape())
data = numpy.ones(out_tensor.shape(), dtype=numpy.float32)
grad_tensor.set(data, place)
# run backward op
for name in backward_op.outputs():
scope.new_var(name)
backward_op.infer_shape(scope)
backward_op.run(scope, ctx)
outs = [
numpy.array(scope.find_var(name).get_tensor())
for name in grad_names
]
return outs
def compare_grad(self, forward_op, input_value):
""" Compare the input gradients between CPU and GPU for the given forward
operator.
:param forward_op: forward operator
:type forward_op: Operator
:param input_value: input values.
:type input_value: dict{string:numpy.array}
:raises: AssertionError, there is different gradient value.
"""
backward_op = core.Operator.backward(forward_op, set())
# return if not compile with GPU or not implementing GPU kernel
if not (core.is_compile_gpu() and backward_op.support_gpu()):
return
outputs = backward_op.outputs()
out_names = [item for k in outputs for item in outputs[k]]
cpu_grads = self.__get_gradient(forward_op, backward_op, input_value,
out_names, core.CPUPlace())
gpu_grads = self.__get_gradient(forward_op, backward_op, input_value,
out_names, core.GPUPlace(0))
for c_grad, g_grad, name in itertools.izip(cpu_grads, gpu_grads,
out_names):
self.assertTrue(
numpy.allclose(
c_grad, g_grad, atol=1e-4),
"output name: " + name + " has diff")
def __assert_is_close(self, numeric_grads, analytic_grads, names,
max_relative_error, msg_prefix):
"""Use relative error for the comparison.
:param numeric_grads: the numerical graidents.
:type numeric_grads: a list of numpy.array
:param analytic_grads: the analytical graidents.
:type analytic_grads: a list of numpy.array
:param name: the names of gradients, used to print for debug.
:type names: a list of string
:param msg_prefix: string info, used to print for debug.
:type msf_prefix: string
"""
for a, b, name in itertools.izip(numeric_grads, analytic_grads, names):
abs_a = numpy.abs(a) abs_a = numpy.abs(a)
# if abs_a is nearly zero, then use abs error for a, not relative # if abs_a is nearly zero, then use abs error for a, not relative
# error. # error.
...@@ -136,107 +252,32 @@ class GradientChecker(unittest.TestCase): ...@@ -136,107 +252,32 @@ class GradientChecker(unittest.TestCase):
if no_grad_set is None: if no_grad_set is None:
no_grad_set = set() no_grad_set = set()
tmp_outs = forward_op.temp_outputs() no_tmp_out = forward_op.no_intermediate_outputs()
no_tmp_out = filter(lambda name: name not in tmp_outs,
forward_op.outputs())
if len(no_tmp_out) != 1: if len(no_tmp_out) != 1:
raise ValueError("non temp out_names should be 1") raise ValueError("non temp out_names should be 1")
in_names = forward_op.inputs() inputs = forward_op.inputs()
in_names = [item for k in inputs for item in inputs[k]]
for no_grad in no_grad_set: for no_grad in no_grad_set:
if no_grad not in in_names: if no_grad not in in_names:
raise ValueError("no_grad should be in in_names") raise ValueError("no_grad should be in in_names")
backward_op = core.Operator.backward(forward_op, no_grad_set) backward_op = core.Operator.backward(forward_op, no_grad_set)
places = [core.CPUPlace()] places = [core.CPUPlace()]
if not only_cpu and core.is_compile_gpu() and backward_op.support_gpu(): if not only_cpu and core.is_compile_gpu() and backward_op.support_gpu():
places.append(core.GPUPlace(0)) places.append(core.GPUPlace(0))
numeric_grad = dict() # get numerical gradients
# get numeric gradient numeric_grads = [
for check_name in inputs_to_check: get_numeric_gradient(forward_op, input_vars, output_name, name)
numeric_grad[check_name] = \ for name in inputs_to_check
get_numeric_gradient(forward_op, input_vars, output_name, ]
check_name)
# get operator gradient according to different device check_names = [grad_var_name(name) for name in inputs_to_check]
for place in places: for place in places:
scope = core.Scope() # get analytical gradients according to different device
ctx = core.DeviceContext.create(place) analytic_grads = self.__get_gradient(forward_op, backward_op,
input_vars, check_names, place)
# create input var and set value self.__assert_is_close(numeric_grads, analytic_grads, check_names,
for name, value in input_vars.iteritems(): max_relative_error,
if name not in in_names:
raise ValueError(name + " not in op.inputs_")
var = scope.new_var(name).get_tensor()
var.set_dims(value.shape)
var.set(value, place)
# create output var
for out_name in forward_op.outputs():
scope.new_var(out_name).get_tensor()
# infer the shape of output var and compute/set value of output var
forward_op.infer_shape(scope)
forward_op.run(scope, ctx)
# create output grad var
# set shape as the output var
# set value of this grad to ones
for name in forward_op.outputs():
out_tensor = scope.find_var(name).get_tensor()
grad_tensor = scope.new_var(grad_var_name(name)).get_tensor()
grad_tensor.set_dims(out_tensor.shape())
data = 1.0 * numpy.ones(out_tensor.shape())
grad_tensor.set(data, place)
# create input grad var
for name in backward_op.outputs():
scope.new_var(name).get_tensor()
# infer the shape of input gradient var and compute/set it's value
# with backward op
backward_op.infer_shape(scope)
backward_op.run(scope, ctx)
self.assert_is_close(numeric_grad, scope, max_relative_error,
"Gradient Check On %s" % str(place)) "Gradient Check On %s" % str(place))
if __name__ == '__main__':
class GetNumericGradientTest(unittest.TestCase):
def test_add_op(self):
add_op = Operator('add_two', X="X", Y="Y", Out="Z")
x = numpy.random.random((10, 1)).astype("float32")
y = numpy.random.random((10, 1)).astype("float32")
arr = get_numeric_gradient(add_op, {'X': x, "Y": y}, 'Z', 'X')
self.assertAlmostEqual(arr.mean(), 1.0, delta=1e-2)
def test_softmax_op(self):
def stable_softmax(x):
"""Compute the softmax of vector x in a numerically stable way."""
shiftx = x - numpy.max(x)
exps = numpy.exp(shiftx)
return exps / numpy.sum(exps)
def label_softmax_grad(Y, dY):
dX = Y * 0.0
for i in range(Y.shape[0]):
d = numpy.dot(Y[i, :], dY[i, :])
dX[i, :] = Y[i, :] * (dY[i, :] - d)
return dX
softmax_op = Operator("softmax", X="X", Y="Y")
X = numpy.random.random((2, 2)).astype("float32")
Y = numpy.apply_along_axis(stable_softmax, 1, X)
dY = numpy.ones(Y.shape)
dX = label_softmax_grad(Y, dY)
arr = get_numeric_gradient(softmax_op, {"X": X}, 'Y', 'X')
numpy.testing.assert_almost_equal(arr, dX, decimal=1e-2)
unittest.main()
...@@ -64,7 +64,8 @@ class OpTestMeta(type): ...@@ -64,7 +64,8 @@ class OpTestMeta(type):
actual = numpy.array(scope.find_var(out_name).get_tensor()) actual = numpy.array(scope.find_var(out_name).get_tensor())
expect = self.outputs[out_name] expect = self.outputs[out_name]
self.assertTrue( self.assertTrue(
numpy.allclose(actual, expect), numpy.allclose(
actual, expect, atol=1e-05),
"output name: " + out_name + "has diff") "output name: " + out_name + "has diff")
obj.test_all = test_all obj.test_all = test_all
......
...@@ -19,14 +19,5 @@ class TestAddOp(unittest.TestCase): ...@@ -19,14 +19,5 @@ class TestAddOp(unittest.TestCase):
self.outputs = {'Out': self.inputs['X'] + self.inputs['Y']} self.outputs = {'Out': self.inputs['X'] + self.inputs['Y']}
class TestAddGradOp(unittest.TestCase):
def test_add_grad(self):
op = Operator('add_two', X="X", Y="Y", Out="Out")
backward_op = core.Operator.backward(op, set())
self.assertEqual(backward_op.type(), "add_two_grad")
expected = '''Op(add_two_grad), inputs:(X, Y, Out, Out@GRAD), outputs:(X@GRAD, Y@GRAD).'''
self.assertEqual(expected, str(backward_op))
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
...@@ -8,9 +8,8 @@ class TestCrossEntropy(unittest.TestCase): ...@@ -8,9 +8,8 @@ class TestCrossEntropy(unittest.TestCase):
__metaclass__ = OpTestMeta __metaclass__ = OpTestMeta
def setUp(self): def setUp(self):
# TODO this unit test is not passed
self.type = "onehot_cross_entropy" self.type = "onehot_cross_entropy"
batch_size = 100 batch_size = 30
class_num = 10 class_num = 10
X = numpy.random.random((batch_size, class_num)).astype("float32") X = numpy.random.random((batch_size, class_num)).astype("float32")
label = 5 * numpy.ones(batch_size).astype("int32") label = 5 * numpy.ones(batch_size).astype("int32")
...@@ -22,9 +21,9 @@ class TestCrossEntropy(unittest.TestCase): ...@@ -22,9 +21,9 @@ class TestCrossEntropy(unittest.TestCase):
class CrossEntropyGradOpTest(GradientChecker): class CrossEntropyGradOpTest(GradientChecker):
def test_softmax_grad(self): def test_check_grad(self):
op = create_op("onehot_cross_entropy") op = create_op("onehot_cross_entropy")
batch_size = 100 batch_size = 30
class_num = 10 class_num = 10
inputs = { inputs = {
"X": numpy.random.uniform( "X": numpy.random.uniform(
......
import paddle.v2.framework.core as core
import unittest
import numpy
from paddle.v2.framework.op import Operator
class TestFc(unittest.TestCase):
def test_fc(self):
scope = core.Scope()
place = core.CPUPlace()
x = scope.new_var("X")
x_tensor = x.get_tensor()
x_tensor.set_dims([1000, 784])
x_tensor.alloc_float(place)
w = scope.new_var("W")
w_tensor = w.get_tensor()
w_tensor.set_dims([784, 100])
w_tensor.alloc_float(place)
w_tensor.set(numpy.random.random((784, 100)).astype("float32"), place)
# Set a real numpy array here.
# x_tensor.set(numpy.array([]))
op = Operator("fc", X="X", Y="Y", W="W")
for out in op.outputs():
if scope.find_var(out) is None:
scope.new_var(out).get_tensor()
tensor = scope.find_var("Y").get_tensor()
op.infer_shape(scope)
self.assertEqual([1000, 100], tensor.shape())
ctx = core.DeviceContext.create(place)
op.run(scope, ctx)
# After complete all ops, check Y is expect or not.
if __name__ == '__main__':
unittest.main()
import unittest
import numpy
from paddle.v2.framework.op import Operator
from gradient_checker import GradientChecker
from gradient_checker import get_numeric_gradient
class GetNumericGradientTest(unittest.TestCase):
def test_add_op(self):
add_op = Operator('add_two', X="X", Y="Y", Out="Z")
x = numpy.random.random((10, 1)).astype("float32")
y = numpy.random.random((10, 1)).astype("float32")
arr = get_numeric_gradient(add_op, {'X': x, "Y": y}, 'Z', 'X')
self.assertAlmostEqual(arr.mean(), 1.0, delta=1e-4)
def test_softmax_op(self):
def stable_softmax(x):
"""Compute the softmax of vector x in a numerically stable way."""
shiftx = x - numpy.max(x)
exps = numpy.exp(shiftx)
return exps / numpy.sum(exps)
def label_softmax_grad(Y, dY):
dX = Y * 0.0
for i in range(Y.shape[0]):
d = numpy.dot(Y[i, :], dY[i, :])
dX[i, :] = Y[i, :] * (dY[i, :] - d)
return dX
softmax_op = Operator("softmax", X="X", Y="Y")
X = numpy.random.random((2, 2)).astype("float32")
Y = numpy.apply_along_axis(stable_softmax, 1, X)
dY = numpy.ones(Y.shape)
dX = label_softmax_grad(Y, dY)
arr = get_numeric_gradient(softmax_op, {"X": X}, 'Y', 'X')
numpy.testing.assert_almost_equal(arr, dX, decimal=1e-2)
if __name__ == '__main__':
unittest.main()
import unittest import unittest
from op_test_util import OpTestMeta from op_test_util import OpTestMeta
from gradient_checker import GradientChecker, create_op
import numpy as np import numpy as np
...@@ -12,5 +13,12 @@ class TestMeanOp(unittest.TestCase): ...@@ -12,5 +13,12 @@ class TestMeanOp(unittest.TestCase):
self.outputs = {'Out': np.mean(self.inputs['X'])} self.outputs = {'Out': np.mean(self.inputs['X'])}
class MeanGradOpTest(GradientChecker):
def test_normal(self):
op = create_op("mean")
inputs = {"X": np.random.random((10, 10)).astype("float32")}
self.check_grad(op, inputs, set("X"), "Out")
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
import unittest import unittest
from op_test_util import OpTestMeta
import numpy as np import numpy as np
from gradient_checker import GradientChecker, create_op
from op_test_util import OpTestMeta
class TestMulOp(unittest.TestCase): class TestMulOp(unittest.TestCase):
...@@ -15,5 +16,19 @@ class TestMulOp(unittest.TestCase): ...@@ -15,5 +16,19 @@ class TestMulOp(unittest.TestCase):
self.outputs = {'Out': np.dot(self.inputs['X'], self.inputs['Y'])} self.outputs = {'Out': np.dot(self.inputs['X'], self.inputs['Y'])}
class MulGradOpTest(GradientChecker):
def test_mul(self):
op = create_op("mul")
inputs = {
'X': np.random.random((32, 84)).astype("float32"),
'Y': np.random.random((84, 100)).astype("float32")
}
# mul op will enlarge the relative error
self.check_grad(
op, inputs, set(["X", "Y"]), "Out", max_relative_error=0.5)
# TODO(dzh,qijun) : mulgrad test case need transpose feature of blas library
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
...@@ -3,25 +3,34 @@ from paddle.v2.framework.op import Operator ...@@ -3,25 +3,34 @@ from paddle.v2.framework.op import Operator
import unittest import unittest
def fc(X, W, Y):
ret_v = core.Net.create()
ret_v.append_op(Operator("mul", X="X", Y="W", Out="pre_activation"))
ret_v.append_op(Operator("sigmoid", X="pre_activation", Y=Y))
ret_v.complete_add_op(True)
return ret_v
class TestNet(unittest.TestCase): class TestNet(unittest.TestCase):
def test_net_all(self): def test_net_all(self):
net = core.Net.create() net = core.Net.create()
op1 = Operator("add_two", X="X", Y="Y", Out="Out") op1 = Operator("add_two", X="X", Y="Y", Out="Out")
net.add_op(op1) net.append_op(op1)
net2 = core.Net.create() net2 = core.Net.create()
net2.add_op(Operator("fc", X="X", W="w", Y="fc.out")) net2.append_op(fc(X="X", W="w", Y="fc.out"))
net2.complete_add_op(True) net2.complete_add_op(True)
net.add_op(net2) net.append_op(net2)
net.complete_add_op(True) net.complete_add_op(True)
expected = ''' expected = '''
Op(plain_net), inputs:(@EMPTY@, X, Y, w), outputs:(@TEMP@fc@0, Out, fc.out). Op(plain_net), inputs:{all[W, X, Y]}, outputs:{all[Out, fc.out, pre_activation]}.
Op(add_two), inputs:(X, Y), outputs:(Out). Op(add_two), inputs:{X[X], Y[Y]}, outputs:{Out[Out]}.
Op(plain_net), inputs:(@EMPTY@, X, w), outputs:(@TEMP@fc@0, fc.out). Op(plain_net), inputs:{all[W, X]}, outputs:{all[fc.out, pre_activation]}.
Op(fc), inputs:(X, w, @EMPTY@), outputs:(fc.out, @TEMP@fc@0). Op(plain_net), inputs:{all[W, X]}, outputs:{all[fc.out, pre_activation]}.
Op(mul), inputs:(X, w), outputs:(@TEMP@fc@0). Op(mul), inputs:{X[X], Y[W]}, outputs:{Out[pre_activation]}.
Op(sigmoid), inputs:(@TEMP@fc@0), outputs:(fc.out). Op(sigmoid), inputs:{X[pre_activation]}, outputs:{Y[fc.out]}.
''' '''
self.assertEqual(expected, "\n" + str(net)) self.assertEqual(expected, "\n" + str(net))
......
import unittest import unittest
import paddle.v2.framework.op as op import paddle.v2.framework.op as op
import paddle.v2.framework.core as core import paddle.v2.framework.core as core
import paddle.v2.framework.proto.op_proto_pb2 as op_proto_pb2 import paddle.v2.framework.proto.framework_pb2 as framework_pb2
import paddle.v2.framework.proto.op_desc_pb2 as op_desc_pb2
import paddle.v2.framework.proto.attribute_pb2 as attribute_pb2
class TestGetAllProtos(unittest.TestCase): class TestGetAllProtos(unittest.TestCase):
...@@ -17,7 +15,7 @@ class TestGetAllProtos(unittest.TestCase): ...@@ -17,7 +15,7 @@ class TestGetAllProtos(unittest.TestCase):
class TestOpDescCreationMethod(unittest.TestCase): class TestOpDescCreationMethod(unittest.TestCase):
def test_plain_input_output(self): def test_plain_input_output(self):
op_proto = op_proto_pb2.OpProto() op_proto = framework_pb2.OpProto()
op_proto.type = "test" op_proto.type = "test"
ipt = op_proto.inputs.add() ipt = op_proto.inputs.add()
ipt.name = "X" ipt.name = "X"
...@@ -37,25 +35,32 @@ class TestOpDescCreationMethod(unittest.TestCase): ...@@ -37,25 +35,32 @@ class TestOpDescCreationMethod(unittest.TestCase):
method = op.OpDescCreationMethod(op_proto) method = op.OpDescCreationMethod(op_proto)
output = method(X="a", Y="b", Z="c") output = method(X="a", Y="b", Z="c")
expected = framework_pb2.OpDesc()
expected = op_desc_pb2.OpDesc()
expected.type = "test" expected.type = "test"
expected.inputs.extend(["a", "b"]) ipt_0 = expected.inputs.add()
expected.outputs.append("c") ipt_0.parameter = "X"
ipt_0.arguments.extend(["a"])
ipt_1 = expected.inputs.add()
ipt_1.parameter = 'Y'
ipt_1.arguments.extend(['b'])
opt = expected.outputs.add()
opt.parameter = "Z"
opt.arguments.extend(["c"])
self.assertEqual(expected, output) self.assertEqual(expected, output)
def test_multiple_input_plain_output(self): def test_multiple_input_plain_output(self):
op_proto = op_proto_pb2.OpProto() op_proto = framework_pb2.OpProto()
op_proto.type = "fc" op_proto.type = "fc"
ipt = op_proto.inputs.add() ipt = op_proto.inputs.add()
ipt.name = "X" ipt.name = "X"
ipt.comment = "" ipt.comment = ""
ipt.multiple = True ipt.duplicable = True
ipt = op_proto.inputs.add() ipt = op_proto.inputs.add()
ipt.name = "W" ipt.name = "W"
ipt.comment = "" ipt.comment = ""
ipt.multiple = True ipt.duplicable = True
ipt = op_proto.inputs.add() ipt = op_proto.inputs.add()
ipt.name = "b" ipt.name = "b"
...@@ -70,30 +75,50 @@ class TestOpDescCreationMethod(unittest.TestCase): ...@@ -70,30 +75,50 @@ class TestOpDescCreationMethod(unittest.TestCase):
method = op.OpDescCreationMethod(op_proto) method = op.OpDescCreationMethod(op_proto)
generated1 = method(X="x", W="w", b="b", Y="y") generated1 = method(X="x", W="w", b="b", Y="y")
expected1 = op_desc_pb2.OpDesc() expected1 = framework_pb2.OpDesc()
expected1.inputs.extend(['x', 'w', 'b']) tmp = expected1.inputs.add()
expected1.outputs.extend(['y']) tmp.parameter = "X"
tmp.arguments.extend(['x'])
tmp = expected1.inputs.add()
tmp.parameter = 'W'
tmp.arguments.extend(['w'])
tmp = expected1.inputs.add()
tmp.parameter = 'b'
tmp.arguments.extend(['b'])
tmp = expected1.outputs.add()
tmp.parameter = 'Y'
tmp.arguments.extend(['y'])
expected1.type = 'fc' expected1.type = 'fc'
attr = expected1.attrs.add()
attr.name = 'input_format'
attr.type = attribute_pb2.INTS
attr.ints.extend([0, 1, 2, 3])
self.assertEqual(expected1, generated1) self.assertEqual(expected1, generated1)
generated2 = method( generated2 = method(
X=['x1', 'x2', 'x3'], b='b', W=['w1', 'w2', 'w3'], Y='y') X=['x1', 'x2', 'x3'], b='b', W=['w1', 'w2', 'w3'], Y='y')
expected2 = op_desc_pb2.OpDesc() expected2 = framework_pb2.OpDesc()
expected2.inputs.extend(['x1', 'x2', 'x3', 'w1', 'w2', 'w3', 'b'])
expected2.outputs.extend(['y']) tmp = expected2.inputs.add()
tmp.parameter = "X"
tmp.arguments.extend(['x1', 'x2', 'x3'])
tmp = expected2.inputs.add()
tmp.parameter = 'W'
tmp.arguments.extend(['w1', 'w2', 'w3'])
tmp = expected2.inputs.add()
tmp.parameter = 'b'
tmp.arguments.extend(['b'])
tmp = expected2.outputs.add()
tmp.parameter = 'Y'
tmp.arguments.extend(['y'])
expected2.type = 'fc' expected2.type = 'fc'
attr = expected2.attrs.add()
attr.name = 'input_format'
attr.type = attribute_pb2.INTS
attr.ints.extend([0, 3, 6, 7])
self.assertEqual(expected2, generated2) self.assertEqual(expected2, generated2)
def test_attrs(self): def test_attrs(self):
op_proto = op_proto_pb2.OpProto() op_proto = framework_pb2.OpProto()
op_proto.type = "test" op_proto.type = "test"
ipt = op_proto.inputs.add() ipt = op_proto.inputs.add()
ipt.name = 'X' ipt.name = 'X'
...@@ -105,12 +130,12 @@ class TestOpDescCreationMethod(unittest.TestCase): ...@@ -105,12 +130,12 @@ class TestOpDescCreationMethod(unittest.TestCase):
attr.comment = "" attr.comment = ""
attr.type = type attr.type = type
__add_attr__("int_attr", attribute_pb2.INT) __add_attr__("int_attr", framework_pb2.INT)
__add_attr__("float_attr", attribute_pb2.FLOAT) __add_attr__("float_attr", framework_pb2.FLOAT)
__add_attr__("string_attr", attribute_pb2.STRING) __add_attr__("string_attr", framework_pb2.STRING)
__add_attr__("ints_attr", attribute_pb2.INTS) __add_attr__("ints_attr", framework_pb2.INTS)
__add_attr__("floats_attr", attribute_pb2.FLOATS) __add_attr__("floats_attr", framework_pb2.FLOATS)
__add_attr__("strings_attr", attribute_pb2.STRINGS) __add_attr__("strings_attr", framework_pb2.STRINGS)
op_proto.comment = "" op_proto.comment = ""
self.assertTrue(op_proto.IsInitialized()) self.assertTrue(op_proto.IsInitialized())
...@@ -126,76 +151,52 @@ class TestOpDescCreationMethod(unittest.TestCase): ...@@ -126,76 +151,52 @@ class TestOpDescCreationMethod(unittest.TestCase):
floats_attr=[0.2, 3.2, 4.5], floats_attr=[0.2, 3.2, 4.5],
strings_attr=["a", "b", "c"]) strings_attr=["a", "b", "c"])
expected = op_desc_pb2.OpDesc() expected = framework_pb2.OpDesc()
expected.type = "test" expected.type = "test"
expected.inputs.extend(['a'])
ipt = expected.inputs.add()
ipt.parameter = "X"
ipt.arguments.extend(['a'])
attr = expected.attrs.add() attr = expected.attrs.add()
attr.name = "int_attr" attr.name = "int_attr"
attr.type = attribute_pb2.INT attr.type = framework_pb2.INT
attr.i = 10 attr.i = 10
attr = expected.attrs.add() attr = expected.attrs.add()
attr.name = "float_attr" attr.name = "float_attr"
attr.type = attribute_pb2.FLOAT attr.type = framework_pb2.FLOAT
attr.f = 3.2 attr.f = 3.2
attr = expected.attrs.add() attr = expected.attrs.add()
attr.name = "string_attr" attr.name = "string_attr"
attr.type = attribute_pb2.STRING attr.type = framework_pb2.STRING
attr.s = "test_str" attr.s = "test_str"
attr = expected.attrs.add() attr = expected.attrs.add()
attr.name = "ints_attr" attr.name = "ints_attr"
attr.type = attribute_pb2.INTS attr.type = framework_pb2.INTS
attr.ints.extend([0, 1, 2, 3, 4]) attr.ints.extend([0, 1, 2, 3, 4])
attr = expected.attrs.add() attr = expected.attrs.add()
attr.name = 'floats_attr' attr.name = 'floats_attr'
attr.type = attribute_pb2.FLOATS attr.type = framework_pb2.FLOATS
attr.floats.extend([0.2, 3.2, 4.5]) attr.floats.extend([0.2, 3.2, 4.5])
attr = expected.attrs.add() attr = expected.attrs.add()
attr.name = 'strings_attr' attr.name = 'strings_attr'
attr.type = attribute_pb2.STRINGS attr.type = framework_pb2.STRINGS
attr.strings.extend(['a', 'b', 'c']) attr.strings.extend(['a', 'b', 'c'])
self.assertEqual(expected, generated) self.assertEqual(expected, generated)
def test_input_temporary_output(self):
op_proto = op_proto_pb2.OpProto()
op_proto.type = "test"
out = op_proto.outputs.add()
out.name = "OUT"
out.comment = ""
out = op_proto.outputs.add()
out.name = "TMP"
out.comment = ""
out.temporary = True
out = op_proto.outputs.add()
out.name = "OUT2"
out.comment = ""
op_proto.comment = ""
method = op.OpDescCreationMethod(op_proto)
generated = method(OUT="a", OUT2="b")
desc = op_desc_pb2.OpDesc()
desc.outputs.extend(["a", core.var_names.temp(), "b"])
desc.type = "test"
attr = desc.attrs.add()
attr.name = "temporary_index"
attr.type = attribute_pb2.INTS
attr.ints.append(2)
self.assertEqual(generated, desc)
class TestOpCreations(unittest.TestCase): class TestOpCreations(unittest.TestCase):
def test_all(self): def test_all(self):
add_op = op.Operator("add_two", X="a", Y="b", Out="z") add_op = op.Operator("add_two", X="a", Y="b", Out="z")
self.assertIsNotNone(add_op) self.assertIsNotNone(add_op)
# Invoke C++ DebugString() # Invoke C++ DebugString()
self.assertEqual('Op(add_two), inputs:(a, b), outputs:(z).', self.assertEqual('Op(add_two), inputs:{X[a], Y[b]}, outputs:{Out[z]}.',
str(add_op)) str(add_op))
......
import paddle.v2.framework.proto.op_proto_pb2 as op_proto_lib import paddle.v2.framework.proto.framework_pb2 as framework_pb2
import paddle.v2.framework.proto.attribute_pb2 as attr_type_lib
import unittest import unittest
class TestFrameworkProto(unittest.TestCase): class TestFrameworkProto(unittest.TestCase):
def test_all(self): def test_all(self):
op_proto = op_proto_lib.OpProto() op_proto = framework_pb2.OpProto()
ipt0 = op_proto.inputs.add() ipt0 = op_proto.inputs.add()
ipt0.name = "a" ipt0.name = "a"
ipt0.comment = "the input of cosine op" ipt0.comment = "the input of cosine op"
...@@ -19,7 +18,7 @@ class TestFrameworkProto(unittest.TestCase): ...@@ -19,7 +18,7 @@ class TestFrameworkProto(unittest.TestCase):
attr = op_proto.attrs.add() attr = op_proto.attrs.add()
attr.name = "scale" attr.name = "scale"
attr.comment = "scale of cosine op" attr.comment = "scale of cosine op"
attr.type = attr_type_lib.FLOAT attr.type = framework_pb2.FLOAT
op_proto.type = "cos" op_proto.type = "cos"
self.assertTrue(op_proto.IsInitialized()) self.assertTrue(op_proto.IsInitialized())
......
...@@ -2,19 +2,74 @@ import logging ...@@ -2,19 +2,74 @@ import logging
import paddle.v2.framework.core as core import paddle.v2.framework.core as core
import unittest import unittest
import numpy as np import numpy as np
import paddle.v2.framework.create_op_creation_methods as creation from paddle.v2.framework.op import Operator, RecurrentOp
ops = creation.op_creations
def py_sigmoid(x):
return 1. / (1. + np.exp(-x))
def create_tensor(scope, name, shape):
class PySimpleRNN(object):
'''
A simple implementation of RNN based on numpy, to futhur test RecurrentOp's alogorithm
'''
def __init__(self, input_dim=30, batch_size=50, weight_dim=15, sent_len=11):
self.x = np.random.normal(size=(sent_len, batch_size, input_dim))
self.W = np.random.normal(size=(input_dim, input_dim))
self.U = np.random.normal(size=(input_dim, input_dim))
self.h_boot = np.random.normal(size=(batch_size, input_dim))
# memories
self.mems = [
np.zeros(shape=(batch_size, input_dim)) for i in range(sent_len)
]
def forward(self):
xs = self.segment_inputs()
for step_id in range(self.x.shape[0]):
self.step(step_id, xs[step_id])
return self.concat_outputs()
def segment_inputs(self):
return [self.x[i] for i in range(self.x.shape[0])]
def concat_outputs(self):
return np.array(self.mems)
def step(self, step_id, x):
'''
run a step
'''
mem = self.mems[step_id]
if step_id > 0:
pre_mem = self.mems[step_id - 1]
else:
pre_mem = self.h_boot
xW = np.matmul(x, self.W)
hU = np.matmul(mem, self.U)
sum = xW + hU
self.mems[step_id] = py_sigmoid(sum)
class PySimpleRNNTest(unittest.TestCase):
def setUp(self):
self.rnn = PySimpleRNN()
def test_forward(self):
output = self.rnn.forward()
print 'output', output
def create_tensor(scope, name, shape, np_data):
tensor = scope.new_var(name).get_tensor() tensor = scope.new_var(name).get_tensor()
tensor.set_dims(shape) tensor.set_dims(shape)
tensor.set(np.random.random(shape), core.CPUPlace()) tensor.set(np_data, core.CPUPlace())
return tensor return tensor
class TestRNN(unittest.TestCase): class TestRecurrentOp(unittest.TestCase):
''' '''
Test RNNOp Test RNNOp
...@@ -36,33 +91,44 @@ class TestRNN(unittest.TestCase): ...@@ -36,33 +91,44 @@ class TestRNN(unittest.TestCase):
weight_dim = 15 weight_dim = 15
sent_len = 11 sent_len = 11
def init(self): def setUp(self):
self.py_rnn = PySimpleRNN(self.input_dim, self.batch_size,
self.weight_dim, self.sent_len)
def forward(self):
self.scope = core.Scope() self.scope = core.Scope()
self.create_global_variables() self.create_global_variables()
self.create_rnn_op()
self.create_step_net() self.create_step_net()
rnn_op = self.create_rnn_op()
ctx = core.DeviceContext.create(core.CPUPlace()) ctx = core.DeviceContext.create(core.CPUPlace())
print 'infer_shape' self.rnnop.infer_shape(self.scope)
rnn_op.infer_shape(self.scope) self.rnnop.run(self.scope, ctx)
return np.array(self.scope.find_var("h").get_tensor())
rnn_op.run(self.scope, ctx)
def create_global_variables(self): def create_global_variables(self):
# create inlink # create inlink
x_np_data = self.py_rnn.x
create_tensor(self.scope, "x", create_tensor(self.scope, "x",
[self.sent_len, self.batch_size, self.input_dim]) [self.sent_len, self.batch_size, self.input_dim],
create_tensor(self.scope, "W", [self.input_dim, self.input_dim]) x_np_data)
create_tensor(self.scope, "U", [self.input_dim, self.input_dim]) W_np_data = self.py_rnn.W
create_tensor(self.scope, "h_boot", [self.batch_size, self.input_dim]) create_tensor(self.scope, "W", [self.input_dim, self.input_dim],
W_np_data)
U_np_data = self.py_rnn.U
create_tensor(self.scope, "U", [self.input_dim, self.input_dim],
U_np_data)
h_boot_np_data = self.py_rnn.h_boot
create_tensor(self.scope, "h_boot", [self.batch_size, self.input_dim],
h_boot_np_data)
self.scope.new_var("step_scopes") self.scope.new_var("step_scopes")
self.scope.new_var("h@alias") self.scope.new_var("h@alias")
self.scope.new_var("h") self.scope.new_var("h")
def create_rnn_op(self): def create_rnn_op(self):
# create RNNOp # create RNNOp
rnnop = ops.recurrent_op( self.rnnop = RecurrentOp(
# inputs # inputs
inlinks=["x"], inlinks=["x"],
boot_memories=["h_boot"], boot_memories=["h_boot"],
...@@ -75,23 +141,27 @@ class TestRNN(unittest.TestCase): ...@@ -75,23 +141,27 @@ class TestRNN(unittest.TestCase):
outlink_alias=["h@alias"], outlink_alias=["h@alias"],
pre_memories=["h@pre"], pre_memories=["h@pre"],
memories=["h@alias"]) memories=["h@alias"])
return rnnop
def create_step_net(self): def create_step_net(self):
var = self.scope.new_var("stepnet") stepnet = core.Net.create()
stepnet = var.get_net() x_fc_op = Operator("mul", X="x@alias", Y="W", Out="Wx")
h_fc_op = Operator("mul", X="h@pre", Y="U", Out="Uh")
x_fc_op = ops.fc(X="x@alias", W="W", Y="Wx") sum_op = Operator("add_two", X="Wx", Y="Uh", Out="sum")
h_fc_op = ops.fc(X="h@pre", W="U", Y="Uh") sig_op = Operator("sigmoid", X="sum", Y="h@alias")
sum_op = ops.add_two(X="Wx", Y="Uh", Out="sum")
sig_op = ops.sigmoid(X="sum", Y="h@alias")
for op in [x_fc_op, h_fc_op, sum_op, sig_op]: for op in [x_fc_op, h_fc_op, sum_op, sig_op]:
stepnet.add_op(op) stepnet.append_op(op)
stepnet.complete_add_op(True) stepnet.complete_add_op(True)
self.rnnop.set_stepnet(stepnet)
def test_recurrent(self):
self.init() def test_forward(self):
print 'test recurrent op forward'
pd_output = self.forward()
py_output = self.py_rnn.forward()
print 'pd_output', pd_output
print
print 'py_output', py_output
self.assertEqual(pd_output.shape, py_output.shape)
if __name__ == '__main__': if __name__ == '__main__':
......
import unittest import unittest
from op_test_util import OpTestMeta
import numpy as np import numpy as np
from op_test_util import OpTestMeta
from gradient_checker import GradientChecker, create_op
class TestRowwiseAddOp(unittest.TestCase): class TestRowwiseAddOp(unittest.TestCase):
...@@ -15,5 +16,15 @@ class TestRowwiseAddOp(unittest.TestCase): ...@@ -15,5 +16,15 @@ class TestRowwiseAddOp(unittest.TestCase):
self.outputs = {'Out': np.add(self.inputs['X'], self.inputs['b'])} self.outputs = {'Out': np.add(self.inputs['X'], self.inputs['b'])}
class RowwiseAddGradOpTest(GradientChecker):
def test_rowwise_add(self):
op = create_op("rowwise_add")
inputs = {
"X": np.random.uniform(0.1, 1, [5, 10]).astype("float32"),
"b": np.random.uniform(0.1, 1, [10]).astype("float32")
}
self.check_grad(op, inputs, set(["X", "b"]), "Out")
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
import unittest import unittest
from op_test_util import OpTestMeta
import numpy as np import numpy as np
from op_test_util import OpTestMeta
from gradient_checker import GradientChecker, create_op
class TestSigmoidOp(unittest.TestCase): class TestSigmoidOp(unittest.TestCase):
...@@ -8,12 +9,20 @@ class TestSigmoidOp(unittest.TestCase): ...@@ -8,12 +9,20 @@ class TestSigmoidOp(unittest.TestCase):
def setUp(self): def setUp(self):
self.type = "sigmoid" self.type = "sigmoid"
self.inputs = {'X': np.random.random((32, 100)).astype("float32")} self.inputs = {'X': np.random.random((15, 31)).astype("float32")}
self.outputs = {'Y': 1 / (1 + np.exp(-self.inputs['X']))} self.outputs = {'Y': 1 / (1 + np.exp(-self.inputs['X']))}
#class TestSigmoidGradOp(unittest.TestCase): class TestSigmoidGradOp(GradientChecker):
#TODO(qingqing) add unit test def test_grad(self):
op = create_op("sigmoid")
inputs = {"X": np.random.uniform(0.1, 1, [11, 17]).astype("float32")}
# compare gpu and cpu results for backward op.
# this test will be skiped if only compiling CPU version.
self.compare_grad(op, inputs)
# check gradients
self.check_grad(op, inputs, set("X"), "Y", max_relative_error=0.007)
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
...@@ -127,16 +127,7 @@ class Parameters(object): ...@@ -127,16 +127,7 @@ class Parameters(object):
""" """
return iter(self.__param_conf__) return iter(self.__param_conf__)
def __getitem__(self, key): def __getter_inner(self, key, param_type):
"""
Get parameter by parameter name. It uses Python dict syntax.
:note: It will always copy the parameter from C++ side.
:param key: Parameter name
:type key: basestring
:return: parameter value
:rtype: np.ndarray
"""
import py_paddle.swig_paddle as api import py_paddle.swig_paddle as api
shape = self.get_shape(key) shape = self.get_shape(key)
...@@ -152,7 +143,7 @@ class Parameters(object): ...@@ -152,7 +143,7 @@ class Parameters(object):
each_gradient_machine, key) each_gradient_machine, key)
# for simplify implementation now, we always copy from C++ # for simplify implementation now, we always copy from C++
assert isinstance(param, api.Parameter) assert isinstance(param, api.Parameter)
val = param.getBuf(api.PARAMETER_VALUE) val = param.getBuf(param_type)
assert isinstance(val, api.Vector) assert isinstance(val, api.Vector)
val = val.copyToNumpyArray() val = val.copyToNumpyArray()
return val return val
...@@ -160,6 +151,19 @@ class Parameters(object): ...@@ -160,6 +151,19 @@ class Parameters(object):
raise RuntimeError("Unexpected branch") raise RuntimeError("Unexpected branch")
def __getitem__(self, key):
"""
Get parameter by parameter name. It uses Python dict syntax.
:note: It will always copy the parameter from C++ side.
:param key: Parameter name
:type key: basestring
:return: parameter value
:rtype: np.ndarray
"""
import py_paddle.swig_paddle as api
return self.__getter_inner(key, api.PARAMETER_VALUE)
def get_shape(self, key): def get_shape(self, key):
""" """
get shape of the parameter. get shape of the parameter.
...@@ -216,6 +220,19 @@ class Parameters(object): ...@@ -216,6 +220,19 @@ class Parameters(object):
""" """
return self.__getitem__(key=parameter_name) return self.__getitem__(key=parameter_name)
def get_grad(self, key):
"""
Get grandient by parameter name.
:note: It will always copy the parameter from C++ side.
:param key: parameter name
:type key: basestring
:return: The grandient matrix.
:rtype: np.ndarray
"""
import py_paddle.swig_paddle as api
return self.__getter_inner(key, api.PARAMETER_GRADIENT)
def set(self, parameter_name, value): def set(self, parameter_name, value):
""" """
Set parameter by parameter name & matrix. Set parameter by parameter name & matrix.
...@@ -264,7 +281,13 @@ class Parameters(object): ...@@ -264,7 +281,13 @@ class Parameters(object):
size = reduce(lambda a, b: a * b, param.shape) size = reduce(lambda a, b: a * b, param.shape)
f.write(struct.pack("IIQ", 0, 4, size)) f.write(struct.pack("IIQ", 0, 4, size))
param = param.astype(np.float32) param = param.astype(np.float32)
f.write(param.tostring()) s = param.tostring()
wrote_size = 0
buf = buffer(s, wrote_size, 65535)
while buf: # f.write crashes with big data blog.
f.write(buf)
wrote_size += 65535
buf = buffer(s, wrote_size, 65535)
def deserialize(self, name, f): def deserialize(self, name, f):
""" """
......
...@@ -57,7 +57,7 @@ def text_file(path): ...@@ -57,7 +57,7 @@ def text_file(path):
return reader return reader
def recordio_local(paths, buf_size=100): def recordio(paths, buf_size=100):
""" """
Creates a data reader from given RecordIO file paths separated by ",", Creates a data reader from given RecordIO file paths separated by ",",
glob pattern is supported. glob pattern is supported.
...@@ -67,15 +67,19 @@ def recordio_local(paths, buf_size=100): ...@@ -67,15 +67,19 @@ def recordio_local(paths, buf_size=100):
import recordio as rec import recordio as rec
import paddle.v2.reader.decorator as dec import paddle.v2.reader.decorator as dec
import cPickle as pickle
def reader(): def reader():
a = ','.join(paths) if isinstance(paths, basestring):
f = rec.reader(a) path = paths
else:
path = ",".join(paths)
f = rec.reader(path)
while True: while True:
r = f.read() r = f.read()
if r is None: if r is None:
break break
yield r yield pickle.loads(r)
f.close() f.close()
return dec.buffered(reader, buf_size) return dec.buffered(reader, buf_size)
......
...@@ -34,5 +34,27 @@ class TestTextFile(unittest.TestCase): ...@@ -34,5 +34,27 @@ class TestTextFile(unittest.TestCase):
self.assertEqual(e, str(idx * 2) + " " + str(idx * 2 + 1)) self.assertEqual(e, str(idx * 2) + " " + str(idx * 2 + 1))
class TestRecordIO(unittest.TestCase):
def do_test(self, path):
reader = paddle.v2.reader.creator.recordio(path)
idx = 0
for e in reader():
if idx == 0:
self.assertEqual(e, (1, 2, 3))
elif idx == 1:
self.assertEqual(e, (4, 5, 6))
idx += 1
self.assertEqual(idx, 2)
def test_recordIO(self):
self.do_test(
os.path.join(
os.path.dirname(__file__), "test_reader_recordio.dat"))
self.do_test([
os.path.join(
os.path.dirname(__file__), "test_reader_recordio.dat")
])
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
...@@ -27,16 +27,24 @@ class SGD(object): ...@@ -27,16 +27,24 @@ class SGD(object):
SGD Trainer combines data reader, network topolopy and update_equation together SGD Trainer combines data reader, network topolopy and update_equation together
to train/test a neural network. to train/test a neural network.
:param update_equation: The optimizer object.
:type update_equation: paddle.v2.optimizer.Optimizer
:param cost: Target cost that neural network should be optimized. :param cost: Target cost that neural network should be optimized.
:type cost: paddle.v2.config_base.Layer :type cost: paddle.v2.config_base.Layer
:param parameters: The parameters dictionary. :param parameters: The parameters dictionary.
:type parameters: paddle.v2.parameters.Parameters :type parameters: paddle.v2.parameters.Parameters
:param update_equation: The optimizer object.
:type update_equation: paddle.v2.optimizer.Optimizer
:param extra_layers: Some layers in the neural network graph are not :param extra_layers: Some layers in the neural network graph are not
in the path of cost layer. in the path of cost layer.
:param pserver_spec: pserver location, eg: localhost:3000
:type extra_layers: paddle.v2.config_base.Layer :type extra_layers: paddle.v2.config_base.Layer
:param is_local: Whether trainning locally
:type is_local: bool
:param pserver_spec: comma string for pserver location,
eg:127.10.0.10:3000,127.10.0.11:3000,
and this parameter is only used for fault
tolerant mode cluster training.
:type pserver_spec: string
:param use_etcd: Whether using etcd pserver.
:param use_etcd: bool
""" """
def __init__(self, def __init__(self,
...@@ -161,14 +169,14 @@ class SGD(object): ...@@ -161,14 +169,14 @@ class SGD(object):
self.__parameter_updater__.update(each_param) self.__parameter_updater__.update(each_param)
cost_sum = out_args.sum() cost_sum = out_args.sum()
cost = cost_sum / len(data_batch) cost = cost_sum / len(data_batch)
self.__parameter_updater__.finishBatch(cost)
batch_evaluator.finish()
event_handler( event_handler(
v2_event.EndIteration( v2_event.EndIteration(
pass_id=pass_id, pass_id=pass_id,
batch_id=batch_id, batch_id=batch_id,
cost=cost, cost=cost,
evaluator=batch_evaluator)) evaluator=batch_evaluator))
self.__parameter_updater__.finishBatch(cost)
batch_evaluator.finish()
self.__parameter_updater__.finishPass() self.__parameter_updater__.finishPass()
pass_evaluator.finish() pass_evaluator.finish()
......
requests==2.9.2
numpy>=1.12
protobuf==3.1
recordio>=0.1.0
matplotlib
rarfile
scipy>=0.19.0
Pillow
nltk>=3.2.2
from setuptools import setup, Distribution from setuptools import setup, Distribution
class BinaryDistribution(Distribution): class BinaryDistribution(Distribution):
def has_ext_modules(foo): def has_ext_modules(foo):
return True return True
...@@ -18,19 +17,25 @@ packages=['paddle', ...@@ -18,19 +17,25 @@ packages=['paddle',
'paddle.v2.framework.proto', 'paddle.v2.framework.proto',
'py_paddle'] 'py_paddle']
setup_requires=["requests", with open('@PADDLE_SOURCE_DIR@/python/requirements.txt') as f:
"numpy>=1.12", setup_requires = f.read().splitlines()
"protobuf==3.1",
"recordio",
"matplotlib",
"rarfile",
"scipy>=0.19.0",
"Pillow",
"nltk>=3.2.2"]
if '${CMAKE_SYSTEM_PROCESSOR}' not in ['arm', 'armv7-a', 'aarch64']: if '${CMAKE_SYSTEM_PROCESSOR}' not in ['arm', 'armv7-a', 'aarch64']:
setup_requires+=["opencv-python"] setup_requires+=["opencv-python"]
# the prefix is sys.prefix which should always be usr
paddle_bin_dir = 'opt/paddle/bin'
paddle_bins = ['${PADDLE_BINARY_DIR}/paddle/scripts/paddle_usage',
'${PADDLE_BINARY_DIR}/paddle/trainer/paddle_trainer',
'${PADDLE_BINARY_DIR}/paddle/trainer/paddle_merge_model',
'${PADDLE_BINARY_DIR}/paddle/pserver/paddle_pserver_main',
'${PADDLE_BINARY_DIR}/paddle/scripts/paddle']
paddle_rt_lib_dir = 'lib'
paddle_rt_libs = ['${WARPCTC_LIBRARIES}']
if '${MKL_SHARED_LIBS}'!= '':
paddle_rt_libs += '${MKL_SHARED_LIBS}'.split(';')
setup(name='paddlepaddle', setup(name='paddlepaddle',
version='${PADDLE_VERSION}', version='${PADDLE_VERSION}',
description='Parallel Distributed Deep Learning', description='Parallel Distributed Deep Learning',
...@@ -48,11 +53,7 @@ setup(name='paddlepaddle', ...@@ -48,11 +53,7 @@ setup(name='paddlepaddle',
'paddle.v2.framework.proto': '${PADDLE_BINARY_DIR}/paddle/framework', 'paddle.v2.framework.proto': '${PADDLE_BINARY_DIR}/paddle/framework',
'py_paddle': '${PADDLE_SOURCE_DIR}/paddle/py_paddle' 'py_paddle': '${PADDLE_SOURCE_DIR}/paddle/py_paddle'
}, },
scripts=['${PADDLE_BINARY_DIR}/paddle/scripts/paddle'], scripts=paddle_bins,
distclass=BinaryDistribution, distclass=BinaryDistribution,
data_files=[('/usr/local/opt/paddle/bin', data_files=[(paddle_rt_lib_dir, paddle_rt_libs)]
['${PADDLE_BINARY_DIR}/paddle/scripts/paddle_usage',
'${PADDLE_BINARY_DIR}/paddle/trainer/paddle_trainer',
'${PADDLE_BINARY_DIR}/paddle/trainer/paddle_merge_model',
'${PADDLE_BINARY_DIR}/paddle/pserver/paddle_pserver_main'])]
) )
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册