提交 bb58b63b 编写于 作者: C caoying03

Merge branch 'develop' into softmax_with_cross_entropy_op

...@@ -66,7 +66,7 @@ endif() ...@@ -66,7 +66,7 @@ endif()
if(ANDROID OR IOS) if(ANDROID OR IOS)
if(ANDROID) if(ANDROID)
if(AND ${CMAKE_SYSTEM_VERSION} VERSION_LESS "16") if(${CMAKE_SYSTEM_VERSION} VERSION_LESS "16")
message(FATAL_ERROR "Unsupport standalone toolchains with Android API level lower than 16") message(FATAL_ERROR "Unsupport standalone toolchains with Android API level lower than 16")
elseif(${CMAKE_SYSTEM_VERSION} VERSION_LESS "21") elseif(${CMAKE_SYSTEM_VERSION} VERSION_LESS "21")
# TODO: support glog for Android api 16 ~ 19 in the future # TODO: support glog for Android api 16 ~ 19 in the future
......
...@@ -106,22 +106,22 @@ function(merge_static_libs TARGET_NAME) ...@@ -106,22 +106,22 @@ function(merge_static_libs TARGET_NAME)
endforeach() endforeach()
list(REMOVE_DUPLICATES libs_deps) list(REMOVE_DUPLICATES libs_deps)
if(APPLE) # Use OSX's libtool to merge archives # To produce a library we need at least one source file.
# To produce a library we need at least one source file. # It is created by add_custom_command below and will helps
# It is created by add_custom_command below and will helps # also help to track dependencies.
# also help to track dependencies. set(target_SRCS ${CMAKE_CURRENT_BINARY_DIR}/${TARGET_NAME}_dummy.c)
set(dummyfile ${CMAKE_CURRENT_BINARY_DIR}/${TARGET_NAME}_dummy.c)
if(APPLE) # Use OSX's libtool to merge archives
# Make the generated dummy source file depended on all static input # Make the generated dummy source file depended on all static input
# libs. If input lib changes,the source file is touched # libs. If input lib changes,the source file is touched
# which causes the desired effect (relink). # which causes the desired effect (relink).
add_custom_command(OUTPUT ${dummyfile} add_custom_command(OUTPUT ${target_SRCS}
COMMAND ${CMAKE_COMMAND} -E touch ${dummyfile} COMMAND ${CMAKE_COMMAND} -E touch ${target_SRCS}
DEPENDS ${libs}) DEPENDS ${libs})
# Generate dummy staic lib # Generate dummy staic lib
file(WRITE ${dummyfile} "const char * dummy = \"${dummyfile}\";") file(WRITE ${target_SRCS} "const char *dummy = \"${target_SRCS}\";")
add_library(${TARGET_NAME} STATIC ${dummyfile}) add_library(${TARGET_NAME} STATIC ${target_SRCS})
target_link_libraries(${TARGET_NAME} ${libs_deps}) target_link_libraries(${TARGET_NAME} ${libs_deps})
foreach(lib ${libs}) foreach(lib ${libs})
...@@ -130,11 +130,14 @@ function(merge_static_libs TARGET_NAME) ...@@ -130,11 +130,14 @@ function(merge_static_libs TARGET_NAME)
endforeach() endforeach()
add_custom_command(TARGET ${TARGET_NAME} POST_BUILD add_custom_command(TARGET ${TARGET_NAME} POST_BUILD
COMMAND rm "${CMAKE_CURRENT_BINARY_DIR}/lib${TARGET_NAME}.a" COMMAND rm "${CMAKE_CURRENT_BINARY_DIR}/lib${TARGET_NAME}.a"
COMMAND /usr/bin/libtool -static -o "${CMAKE_CURRENT_BINARY_DIR}/lib${TARGET_NAME}.a" ${libfiles}) COMMAND /usr/bin/libtool -static -o "${CMAKE_CURRENT_BINARY_DIR}/lib${TARGET_NAME}.a" ${libfiles}
)
else() # general UNIX: use "ar" to extract objects and re-add to a common lib else() # general UNIX: use "ar" to extract objects and re-add to a common lib
set(target_DIR ${CMAKE_CURRENT_BINARY_DIR}/${TARGET_NAME}.dir)
foreach(lib ${libs}) foreach(lib ${libs})
set(objlistfile ${lib}.objlist) # list of objects in the input library set(objlistfile ${target_DIR}/${lib}.objlist) # list of objects in the input library
set(objdir ${lib}.objdir) set(objdir ${target_DIR}/${lib}.objdir)
add_custom_command(OUTPUT ${objdir} add_custom_command(OUTPUT ${objdir}
COMMAND ${CMAKE_COMMAND} -E make_directory ${objdir} COMMAND ${CMAKE_COMMAND} -E make_directory ${objdir}
...@@ -142,31 +145,32 @@ function(merge_static_libs TARGET_NAME) ...@@ -142,31 +145,32 @@ function(merge_static_libs TARGET_NAME)
add_custom_command(OUTPUT ${objlistfile} add_custom_command(OUTPUT ${objlistfile}
COMMAND ${CMAKE_AR} -x "$<TARGET_FILE:${lib}>" COMMAND ${CMAKE_AR} -x "$<TARGET_FILE:${lib}>"
COMMAND ${CMAKE_AR} -t "$<TARGET_FILE:${lib}>" > ../${objlistfile} COMMAND ${CMAKE_AR} -t "$<TARGET_FILE:${lib}>" > ${objlistfile}
DEPENDS ${lib} ${objdir} DEPENDS ${lib} ${objdir}
WORKING_DIRECTORY ${objdir}) WORKING_DIRECTORY ${objdir})
# Empty dummy source file that goes into merged library list(APPEND target_OBJS "${objlistfile}")
set(mergebase ${lib}.mergebase.c)
add_custom_command(OUTPUT ${mergebase}
COMMAND ${CMAKE_COMMAND} -E touch ${mergebase}
DEPENDS ${objlistfile})
list(APPEND mergebases "${mergebase}")
endforeach() endforeach()
add_library(${TARGET_NAME} STATIC ${mergebases}) # Make the generated dummy source file depended on all static input
# libs. If input lib changes,the source file is touched
# which causes the desired effect (relink).
add_custom_command(OUTPUT ${target_SRCS}
COMMAND ${CMAKE_COMMAND} -E touch ${target_SRCS}
DEPENDS ${libs} ${target_OBJS})
# Generate dummy staic lib
file(WRITE ${target_SRCS} "const char *dummy = \"${target_SRCS}\";")
add_library(${TARGET_NAME} STATIC ${target_SRCS})
target_link_libraries(${TARGET_NAME} ${libs_deps}) target_link_libraries(${TARGET_NAME} ${libs_deps})
# Get the file name of the generated library # Get the file name of the generated library
set(outlibfile "$<TARGET_FILE:${TARGET_NAME}>") set(target_LIBNAME "$<TARGET_FILE:${TARGET_NAME}>")
foreach(lib ${libs}) add_custom_command(TARGET ${TARGET_NAME} POST_BUILD
add_custom_command(TARGET ${TARGET_NAME} POST_BUILD COMMAND ${CMAKE_AR} crs ${target_LIBNAME} `find ${target_DIR} -name '*.o'`
COMMAND ${CMAKE_AR} cr ${outlibfile} *.o COMMAND ${CMAKE_RANLIB} ${target_LIBNAME}
COMMAND ${CMAKE_RANLIB} ${outlibfile} WORKING_DIRECTORY ${target_DIR})
WORKING_DIRECTORY ${lib}.objdir)
endforeach()
endif() endif()
endfunction(merge_static_libs) endfunction(merge_static_libs)
...@@ -196,7 +200,7 @@ function(cc_library TARGET_NAME) ...@@ -196,7 +200,7 @@ function(cc_library TARGET_NAME)
add_style_check_target(${TARGET_NAME} ${cc_library_SRCS} ${cc_library_HEADERS}) add_style_check_target(${TARGET_NAME} ${cc_library_SRCS} ${cc_library_HEADERS})
else(cc_library_SRCS) else(cc_library_SRCS)
if (cc_library_DEPS) if(cc_library_DEPS)
merge_static_libs(${TARGET_NAME} ${cc_library_DEPS}) merge_static_libs(${TARGET_NAME} ${cc_library_DEPS})
else() else()
message(FATAL "Please specify source file or library in cc_library.") message(FATAL "Please specify source file or library in cc_library.")
......
...@@ -25,7 +25,7 @@ function(target_circle_link_libraries TARGET_NAME) ...@@ -25,7 +25,7 @@ function(target_circle_link_libraries TARGET_NAME)
endif() endif()
endforeach() endforeach()
if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang" OR "${CMAKE_CXX_COMPILER_ID}" STREQUAL "AppleClang") if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang" OR "${CMAKE_CXX_COMPILER_ID}" STREQUAL "AppleClang")
if(IOS AND NOT IOS_ENABLE_BITCODE) if(NOT IOS_ENABLE_BITCODE)
list(APPEND LIBS "-undefined dynamic_lookup") list(APPEND LIBS "-undefined dynamic_lookup")
endif() endif()
endif() endif()
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
## Ingredients ## Ingredients
As our design principle is starting from the essence: how could we As our design principle is starting from the essence: how could we
allow users to express and solve their problems at neural networks. allow users to express and solve their problems as neural networks.
Some essential concepts that our API have to provide include: Some essential concepts that our API have to provide include:
1. A *topology* is an expression of *layers*. 1. A *topology* is an expression of *layers*.
...@@ -233,7 +233,7 @@ paddle.dist_train(model, ...@@ -233,7 +233,7 @@ paddle.dist_train(model,
num_parameter_servers=15) num_parameter_servers=15)
``` ```
The pseudo code if `paddle.dist_train` is as follows: The pseudo code of `paddle.dist_train` is as follows:
```python ```python
def dist_train(topology, parameters, trainer, reader, ...): def dist_train(topology, parameters, trainer, reader, ...):
......
## Auto Gradient Checker Design ## Auto Gradient Checker Design
## Backgraound: ## Backgraound:
- Operator forward computing is easy to check if the result is right because it has a clear definition. **But** backpropagation is a notoriously difficult algorithm to debug and get right: - Generally, it is easy to check whether the forward computation of an Operator is correct or not. However, backpropagation is a notoriously difficult algorithm to debug and get right:
- 1. you should get the right backpropagation formula according to the forward computation. 1. you should get the right backpropagation formula according to the forward computation.
- 2. you should implement it right in CPP. 2. you should implement it right in CPP.
- 3. it's difficult to prepare test data. 3. it's difficult to prepare test data.
- Auto gradient check gets a numeric gradient by forward Operator and use it as a reference of the backward Operator's result. It has several advantages: - Auto gradient checking gets a numerical gradient by forward Operator and use it as a reference of the backward Operator's result. It has several advantages:
- 1. numeric gradient checker only need forward operator. 1. numerical gradient checker only need forward operator.
- 2. user only need to prepare the input data for forward Operator. 2. user only need to prepare the input data for forward Operator.
## Mathematical Theory ## Mathematical Theory
The following two document from stanford has a detailed explanation of how to get numeric gradient and why it's useful. The following two document from Stanford has a detailed explanation of how to get numerical gradient and why it's useful.
- [Gradient checking and advanced optimization(en)](http://deeplearning.stanford.edu/wiki/index.php/Gradient_checking_and_advanced_optimization) - [Gradient checking and advanced optimization(en)](http://deeplearning.stanford.edu/wiki/index.php/Gradient_checking_and_advanced_optimization)
- [Gradient checking and advanced optimization(cn)](http://ufldl.stanford.edu/wiki/index.php/%E6%A2%AF%E5%BA%A6%E6%A3%80%E9%AA%8C%E4%B8%8E%E9%AB%98%E7%BA%A7%E4%BC%98%E5%8C%96) - [Gradient checking and advanced optimization(cn)](http://ufldl.stanford.edu/wiki/index.php/%E6%A2%AF%E5%BA%A6%E6%A3%80%E9%AA%8C%E4%B8%8E%E9%AB%98%E7%BA%A7%E4%BC%98%E5%8C%96)
...@@ -20,7 +20,7 @@ The following two document from stanford has a detailed explanation of how to ge ...@@ -20,7 +20,7 @@ The following two document from stanford has a detailed explanation of how to ge
## Numeric Gradient Implementation ## Numeric Gradient Implementation
### Python Interface ### Python Interface
```python ```python
def get_numeric_gradient(op, def get_numerical_gradient(op,
input_values, input_values,
output_name, output_name,
input_to_check, input_to_check,
...@@ -30,13 +30,13 @@ def get_numeric_gradient(op, ...@@ -30,13 +30,13 @@ def get_numeric_gradient(op,
Get Numeric Gradient for an operator's input. Get Numeric Gradient for an operator's input.
:param op: C++ operator instance, could be an network :param op: C++ operator instance, could be an network
:param input_values: The input variables. Should be an dictionary, key is :param input_values: The input variables. Should be an dictionary, whose key is
variable name. Value is numpy array. variable name, and value is numpy array.
:param output_name: The final output variable name. :param output_name: The final output variable name.
:param input_to_check: The input variable need to get gradient. :param input_to_check: The input variable with respect to which to compute the gradient.
:param delta: The perturbation value for numeric gradient method. The :param delta: The perturbation value for numeric gradient method. The
smaller delta is, the more accurate result will get. But if that delta is smaller delta is, the more accurate result will get. But if that delta is
too small, it could occur numerical stability problem. too small, it will suffer from numerical stability problem.
:param local_scope: The local scope used for get_numeric_gradient. :param local_scope: The local scope used for get_numeric_gradient.
:return: The gradient array in numpy format. :return: The gradient array in numpy format.
""" """
...@@ -45,28 +45,28 @@ def get_numeric_gradient(op, ...@@ -45,28 +45,28 @@ def get_numeric_gradient(op,
### Explaination: ### Explaination:
- Why need `output_name` - Why need `output_name`
- One Operator may have multiple Output, you can get independent gradient from each Output. So user should set one output to calculate. - An Operator may have multiple Output, one can get independent gradient from each Output. So caller should specify the name of the output variable.
- Why need `input_to_check` - Why need `input_to_check`
- One operator may have multiple inputs. Gradient Op can calculate the gradient of these Inputs at the same time. But Numeric Gradient needs to calculate them one by one. So `get_numeric_gradient` is designed to calculate the gradient for one input. If you need to compute multiple inputs, you can call `get_numeric_gradient` multiple times. - One operator may have multiple inputs. Gradient Op can calculate the gradient of these inputs at the same time. But Numeric Gradient needs to calculate them one by one. So `get_numeric_gradient` is designed to calculate the gradient for one input. If you need to compute multiple inputs, you can call `get_numeric_gradient` multiple times.
### Core Algorithm Implementation ### Core Algorithm Implementation
```python ```python
# we only compute gradient of one element each time. # we only compute gradient of one element a time.
# we use a for loop to compute the gradient of every element. # we use a for loop to compute the gradient of each element.
for i in xrange(tensor_size): for i in xrange(tensor_size):
# get one input element throw it's index i. # get one input element by its index i.
origin = tensor_to_check.get_float_element(i) origin = tensor_to_check.get_float_element(i)
# add delta to it, run op and then get the sum of the result tensor. # add delta to it, run op and then get the new value of the result tensor.
x_pos = origin + delta x_pos = origin + delta
tensor_to_check.set_float_element(i, x_pos) tensor_to_check.set_float_element(i, x_pos)
y_pos = get_output() y_pos = get_output()
# plus delta to this element, run op and get the sum of the result tensor. # plus delta to this element, run op and get the new value of the result tensor.
x_neg = origin - delta x_neg = origin - delta
tensor_to_check.set_float_element(i, x_neg) tensor_to_check.set_float_element(i, x_neg)
y_neg = get_output() y_neg = get_output()
...@@ -85,15 +85,15 @@ def get_numeric_gradient(op, ...@@ -85,15 +85,15 @@ def get_numeric_gradient(op,
Each Operator Kernel has three kinds of Gradient: Each Operator Kernel has three kinds of Gradient:
- 1. Numeric Gradient 1. Numerical gradient
- 2. CPU Operator Gradient 2. CPU kernel gradient
- 3. GPU Operator Gradient(if supported) 3. GPU kernel gradient (if supported)
Numeric Gradient Only relies on forward Operator. So we use Numeric Gradient as the reference value. The numerical gradient only relies on forward Operator. So we use the numerical gradient as the reference value. And the gradient checking is performed in the following three steps:
- 1. calculate the numeric gradient. 1. calculate the numerical gradient
- 2. calculate CPU kernel Gradient with the backward Operator and compare it with the numeric gradient. 2. calculate CPU kernel gradient with the backward Operator and compare it with the numerical gradient
- 3. calculate GPU kernel Gradient with the backward Operator and compare it with the numeric gradient.(if support GPU) 3. calculate GPU kernel gradient with the backward Operator and compare it with the numeric gradient (if supported)
#### Python Interface #### Python Interface
...@@ -110,8 +110,8 @@ Numeric Gradient Only relies on forward Operator. So we use Numeric Gradient as ...@@ -110,8 +110,8 @@ Numeric Gradient Only relies on forward Operator. So we use Numeric Gradient as
:param forward_op: used to create backward_op :param forward_op: used to create backward_op
:param input_vars: numpy value of input variable. The following :param input_vars: numpy value of input variable. The following
computation will use these variables. computation will use these variables.
:param inputs_to_check: inputs var names that should check gradient. :param inputs_to_check: the input variable with respect to which to compute the gradient.
:param output_name: output name that used to :param output_name: The final output variable name.
:param max_relative_error: The relative tolerance parameter. :param max_relative_error: The relative tolerance parameter.
:param no_grad_set: used when create backward ops :param no_grad_set: used when create backward ops
:param only_cpu: only compute and check gradient on cpu kernel. :param only_cpu: only compute and check gradient on cpu kernel.
...@@ -120,24 +120,24 @@ Numeric Gradient Only relies on forward Operator. So we use Numeric Gradient as ...@@ -120,24 +120,24 @@ Numeric Gradient Only relies on forward Operator. So we use Numeric Gradient as
``` ```
### How to check if two numpy array is close enough? ### How to check if two numpy array is close enough?
if `abs_numeric_grad` is nearly zero, then use abs error for numeric_grad, not relative if `abs_numerical_grad` is nearly zero, then use abs error for numerical_grad
```python ```python
numeric_grad = ... numerical_grad = ...
operator_grad = numpy.array(scope.find_var(grad_var_name(name)).get_tensor()) operator_grad = numpy.array(scope.find_var(grad_var_name(name)).get_tensor())
abs_numeric_grad = numpy.abs(numeric_grad) abs_numerical_grad = numpy.abs(numerical_grad)
# if abs_numeric_grad is nearly zero, then use abs error for numeric_grad, not relative # if abs_numerical_grad is nearly zero, then use abs error for numeric_grad, not relative
# error. # error.
abs_numeric_grad[abs_numeric_grad < 1e-3] = 1 abs_numerical_grad[abs_numerical_grad < 1e-3] = 1
diff_mat = numpy.abs(abs_numeric_grad - operator_grad) / abs_numeric_grad diff_mat = numpy.abs(abs_numerical_grad - operator_grad) / abs_numerical_grad
max_diff = numpy.max(diff_mat) max_diff = numpy.max(diff_mat)
``` ```
#### Notes: #### Notes:
1,The Input data for auto gradient checker should be reasonable to avoid numeric problem. The Input data for auto gradient checker should be reasonable to avoid numerical stability problem.
#### Refs: #### Refs:
......
...@@ -53,12 +53,12 @@ Let's explain using an example. Suppose that we are going to compose the FC usi ...@@ -53,12 +53,12 @@ Let's explain using an example. Suppose that we are going to compose the FC usi
```python ```python
def operator.mul(X1, X2): def operator.mul(X1, X2):
O = Var() O = Var()
paddle.cpp.create_operator("mul", input={X1, Y1], output=O) paddle.cpp.create_operator("mul", input={X1, Y1}, output=O)
return O return O
def operator.add(X1, X2): def operator.add(X1, X2):
O = Var() O = Var()
paddle.cpp.create_operator("add", input={X1, X2], output=O) paddle.cpp.create_operator("add", input={X1, X2}, output=O)
return O return O
``` ```
......
...@@ -56,7 +56,7 @@ For each parameter, like W and b created by `layer.fc`, marked as double circles ...@@ -56,7 +56,7 @@ For each parameter, like W and b created by `layer.fc`, marked as double circles
## Block and Graph ## Block and Graph
The word block and graph are interchangable in the desgin of PaddlePaddle. A [Block[(https://github.com/PaddlePaddle/Paddle/pull/3708) is a metaphore of the code and local variables in a pair of curly braces in programming languages, where operators are like statements or instructions. A graph of operators and variables is a representation of the block. The word block and graph are interchangable in the desgin of PaddlePaddle. A [Block](https://github.com/PaddlePaddle/Paddle/pull/3708) is a metaphore of the code and local variables in a pair of curly braces in programming languages, where operators are like statements or instructions. A graph of operators and variables is a representation of the block.
A Block keeps operators in an array `BlockDesc::ops` A Block keeps operators in an array `BlockDesc::ops`
...@@ -67,4 +67,4 @@ message BlockDesc { ...@@ -67,4 +67,4 @@ message BlockDesc {
} }
``` ```
in the order that there appear in user programs, like the Python program at the beginning of this article. We can imagine that in `ops`, we have some forward operators, followed by some gradient operators, and then some optimization operators. in the order that they appear in user programs, like the Python program at the beginning of this article. We can imagine that in `ops`, we have some forward operators, followed by some gradient operators, and then some optimization operators.
# Design Doc: The C++ Class `Parameters` # Design Doc: The C++ Class `Parameters`
`Parameters` is a concept we designed in Paddle V2 API. `Parameters` is a container of parameters, and make Paddle can shared parameter between topologies. We described usages of `Parameter` in [api.md](./api.md). `Parameters` is a concept we designed in PaddlePaddle V2 API. `Parameters` is a container of parameters, which makes PaddlePaddle capable of sharing parameter between topologies. We described usages of `Parameter` in [api.md](./api.md).
We used Python to implement Parameters when designing V2 API before. There are several defects for current implementation: We used Python to implement Parameters when designing V2 API before. There are several defects for the current implementation:
* We just use `memcpy` to share Parameters between topologies, but this is very inefficient. * We just use `memcpy` to share Parameters between topologies, but this is very inefficient.
* We did not implement share Parameters while training. We just trigger `memcpy` when start training. * We did not support sharing Parameters while training. We just trigger `memcpy` when start training.
It is necessary that we implement Parameters in CPP side. However, it could be a code refactoring for Paddle, because Paddle was designed for training only one topology before, i.e., each GradientMachine contains its Parameter as a data member. In current Paddle implementation, there are three concepts associated with `Parameters`: It is necessary that we implement Parameters in CPP side. However, it could result a code refactoring for PaddlePaddle, because PaddlePaddle was designed for training only one topology before, i.e., each GradientMachine contains its Parameter as a data member. In current PaddlePaddle implementation, there are three concepts associated with `Parameters`:
1. `paddle::Parameter`. A `Parameters` is a container for `paddle::Parameter`. 1. `paddle::Parameter`. A `Parameters` is a container for `paddle::Parameter`.
It is evident that we should use `paddle::Parameter` when developing `Parameters`. It is evident that we should use `paddle::Parameter` when developing `Parameters`.
However, the `Parameter` class contains many functions and does not have a clear interface. However, the `Parameter` class contains many functions and does not have a clear interface.
It contains `create/store Parameter`, `serialize/deserialize`, `optimize(i.e SGD)`, `randomize/zero`. It contains `create/store Parameter`, `serialize/deserialize`, `optimize(i.e SGD)`, `randomize/zero`.
When we developing `Parameters`, we only use `create/store Parameter` functionality. When we developing `Parameters`, we only use `create/store Parameter` functionality.
We should extract functionalities of Parameter into many classes to clean Paddle CPP implementation. We should extract functionalities of Parameter into many classes to clean PaddlePaddle CPP implementation.
2. `paddle::GradientMachine` and its sub-classes, e.g., `paddle::MultiGradientMachine`, `paddle::NeuralNetwork`. 2. `paddle::GradientMachine` and its sub-classes, e.g., `paddle::MultiGradientMachine`, `paddle::NeuralNetwork`.
We should pass `Parameters` to `paddle::GradientMachine` when `forward/backward` to avoid `memcpy` between topologies. We should pass `Parameters` to `paddle::GradientMachine` when `forward/backward` to avoid `memcpy` between topologies.
...@@ -24,7 +24,7 @@ Also, we should handle multi-GPU/CPU training, because `forward` and `backward` ...@@ -24,7 +24,7 @@ Also, we should handle multi-GPU/CPU training, because `forward` and `backward`
So `Parameters` should be used by `paddle::ParameterUpdater`, and `paddle::ParameterUpdater` should optimize `Parameters` (by SGD). So `Parameters` should be used by `paddle::ParameterUpdater`, and `paddle::ParameterUpdater` should optimize `Parameters` (by SGD).
The step by step approach for implementation Parameters in Paddle C++ core is listed below. Each step should be a PR and could be merged into Paddle one by one. The step by step approach for implementation Parameters in PaddlePaddle C++ core is listed below. Each step should be a PR and could be merged into PaddlePaddle one by one.
1. Clean `paddle::Parameter` interface. Extract the functionalities of `paddle::Parameter` to prepare for the implementation of Parameters. 1. Clean `paddle::Parameter` interface. Extract the functionalities of `paddle::Parameter` to prepare for the implementation of Parameters.
......
# Design Doc: ProgramDesc
The basic structure of a PaddlePaddle program is some nested blocks, as a C++ or Java program.
As described in [graph.md](./graph.md), the first five lines of the following PaddlePaddle program
```python
x = layer.data("images")
l = layer.data("label")
y = layer.fc(x)
cost = layer.mse(y, l)
optimize(cost)
train(cost, reader=mnist.train())
```
generates, or compiles, a PaddelPaddle program, which is represented by the following protobuf message:
```protobuf
message ProgramDesc {
repeated BlockDesc blocks = 1;
}
message BlockDesc {
required int32 parent = 1;
repeated VarDesc vars = 2;
repeated OpDesc ops = 3;
}
message OpDesc {
AttrDesc attrs = 1;
...
}
message AttrDesc {
required AttrType type = 1;
// index into ProgramDesc::blocks when type==BLOCK
optional int32 block = 2;
...
}
```
When each of the first five lines runs, related Python function, e.g., `layer.fc`, calls C++ InferShape functions. This InferShape function needs to access the properties of VarDesc's accessed by the current OpDesc. These VarDesc's might not be defined in the current block, but in some ancestor blocks. This requires that we can trace the parent of a block.
A nested block is often an attribute of an operator, most likely, an IfElseOp or a WhileOp. In above solution, all blocks are in `ProgramDesc::blocks`, this implicitly assigns a zero-based ID to each block -- the index of the block in `ProgramDesc::blocks`. So that `AttrDesc::block` could be an integer block ID.
With this design, the InferShape function should take the following parameters:
```c++
void InferShape(int current_block,
int current_operator,
ProgramDesc* program // might change VarDesc values.
) {
...
}
```
where
- `current_block` indices into `ProgramDesc::blocks`,
- `current_operator` indices into `BlockDesc::ops`.
...@@ -52,7 +52,7 @@ Here are valid outputs: ...@@ -52,7 +52,7 @@ Here are valid outputs:
# a mini batch of three data items, each data item is a list (single column). # a mini batch of three data items, each data item is a list (single column).
[([1,1,1],), [([1,1,1],),
([2,2,2],), ([2,2,2],),
([3,3,3],), ([3,3,3],)]
``` ```
Please note that each item inside the list must be a tuple, below is an invalid output: Please note that each item inside the list must be a tuple, below is an invalid output:
......
...@@ -15,7 +15,7 @@ The goal of refactorizaiton include: ...@@ -15,7 +15,7 @@ The goal of refactorizaiton include:
1. Users write Python programs to describe the graphs and run it (locally or remotely). 1. Users write Python programs to describe the graphs and run it (locally or remotely).
1. A graph is composed of *variabels* and *operators*. 1. A graph is composed of *variables* and *operators*.
1. The description of graphs must be able to be serialized/deserialized, so it 1. The description of graphs must be able to be serialized/deserialized, so it
...@@ -140,7 +140,7 @@ Compile Time -> IR -> Runtime ...@@ -140,7 +140,7 @@ Compile Time -> IR -> Runtime
* `thrust` has the same API as C++ standard library. Using `transform` can quickly implement a customized elementwise kernel. * `thrust` has the same API as C++ standard library. Using `transform` can quickly implement a customized elementwise kernel.
* `thrust` has more complex API, like `scan`, `reduce`, `reduce_by_key`. * `thrust` has more complex API, like `scan`, `reduce`, `reduce_by_key`.
* Hand-writing `GPUKernel` and `CPU` code * Hand-writing `GPUKernel` and `CPU` code
* Do not write `.h`. CPU Kernel should be in `.cc`. CPU kernel should be in `.cu`. (`GCC` cannot compile GPU code.) * Do not write `.h`. CPU Kernel should be in `.cc`. GPU kernel should be in `.cu`. (`GCC` cannot compile GPU code.)
--- ---
# Operator Register # Operator Register
......
# Paddle发行规范 # PaddlePaddle发行规范
Paddle使用git-flow branching model做分支管理,使用[Semantic Versioning](http://semver.org/)标准表示Paddle版本号。 PaddlePaddle使用git-flow branching model做分支管理,使用[Semantic Versioning](http://semver.org/)标准表示PaddlePaddle版本号。
Paddle每次发新的版本,遵循以下流程: PaddlePaddle每次发新的版本,遵循以下流程:
1.`develop`分支派生出新的分支,分支名为`release/版本号`。例如,`release/0.10.0` 1.`develop`分支派生出新的分支,分支名为`release/版本号`。例如,`release/0.10.0`
2. 将新分支的版本打上tag,tag为`版本号rc.Patch号`。第一个tag为`0.10.0rc1`,第二个为`0.10.0rc2`,依次类推。 2. 将新分支的版本打上tag,tag为`版本号rc.Patch号`。第一个tag为`0.10.0rc1`,第二个为`0.10.0rc2`,依次类推。
...@@ -27,14 +27,14 @@ Paddle每次发新的版本,遵循以下流程: ...@@ -27,14 +27,14 @@ Paddle每次发新的版本,遵循以下流程:
需要注意的是: 需要注意的是:
* `release/版本号`分支一旦建立,一般不允许再从`develop`分支合入`release/版本号`。这样保证`release/版本号`分支功能的封闭,方便测试人员测试Paddle的行为。 * `release/版本号`分支一旦建立,一般不允许再从`develop`分支合入`release/版本号`。这样保证`release/版本号`分支功能的封闭,方便测试人员测试PaddlePaddle的行为。
*`release/版本号`分支存在的时候,如果有bugfix的行为,需要将bugfix的分支同时merge到`master`, `develop``release/版本号`这三个分支。 *`release/版本号`分支存在的时候,如果有bugfix的行为,需要将bugfix的分支同时merge到`master`, `develop``release/版本号`这三个分支。
# Paddle 分支规范 # PaddlePaddle 分支规范
Paddle开发过程使用[git-flow](http://nvie.com/posts/a-successful-git-branching-model/)分支规范,并适应github的特性做了一些区别。 PaddlePaddle开发过程使用[git-flow](http://nvie.com/posts/a-successful-git-branching-model/)分支规范,并适应github的特性做了一些区别。
* Paddle的主版本库遵循[git-flow](http://nvie.com/posts/a-successful-git-branching-model/)分支规范。其中: * PaddlePaddle的主版本库遵循[git-flow](http://nvie.com/posts/a-successful-git-branching-model/)分支规范。其中:
* `master`分支为稳定(stable branch)版本分支。每一个`master`分支的版本都是经过单元测试和回归测试的版本。 * `master`分支为稳定(stable branch)版本分支。每一个`master`分支的版本都是经过单元测试和回归测试的版本。
* `develop`分支为开发(develop branch)版本分支。每一个`develop`分支的版本都经过单元测试,但并没有经过回归测试。 * `develop`分支为开发(develop branch)版本分支。每一个`develop`分支的版本都经过单元测试,但并没有经过回归测试。
* `release/版本号`分支为每一次Release时建立的临时分支。在这个阶段的代码正在经历回归测试。 * `release/版本号`分支为每一次Release时建立的临时分支。在这个阶段的代码正在经历回归测试。
...@@ -42,18 +42,18 @@ Paddle开发过程使用[git-flow](http://nvie.com/posts/a-successful-git-branch ...@@ -42,18 +42,18 @@ Paddle开发过程使用[git-flow](http://nvie.com/posts/a-successful-git-branch
* 其他用户的fork版本库并不需要严格遵守[git-flow](http://nvie.com/posts/a-successful-git-branching-model/)分支规范,但所有fork的版本库的所有分支都相当于特性分支。 * 其他用户的fork版本库并不需要严格遵守[git-flow](http://nvie.com/posts/a-successful-git-branching-model/)分支规范,但所有fork的版本库的所有分支都相当于特性分支。
* 建议,开发者fork的版本库使用`develop`分支同步主版本库的`develop`分支 * 建议,开发者fork的版本库使用`develop`分支同步主版本库的`develop`分支
* 建议,开发者fork的版本库中,再基于`develop`版本fork出自己的功能分支。 * 建议,开发者fork的版本库中,再基于`develop`版本fork出自己的功能分支。
* 当功能分支开发完毕后,向Paddle的主版本库提交`Pull Reuqest`,进而进行代码评审。 * 当功能分支开发完毕后,向PaddlePaddle的主版本库提交`Pull Reuqest`,进而进行代码评审。
* 在评审过程中,开发者修改自己的代码,可以继续在自己的功能分支提交代码。 * 在评审过程中,开发者修改自己的代码,可以继续在自己的功能分支提交代码。
* BugFix分支也是在开发者自己的fork版本库维护,与功能分支不同的是,BugFix分支需要分别给主版本库的`master``develop`与可能有的`release/版本号`分支,同时提起`Pull Request` * BugFix分支也是在开发者自己的fork版本库维护,与功能分支不同的是,BugFix分支需要分别给主版本库的`master``develop`与可能有的`release/版本号`分支,同时提起`Pull Request`
# Paddle回归测试列表 # PaddlePaddle回归测试列表
本列表说明Paddle发版之前需要测试的功能点。 本列表说明PaddlePaddle发版之前需要测试的功能点。
## Paddle Book中所有章节 ## PaddlePaddle Book中所有章节
Paddle每次发版本首先要保证Paddle Book中所有章节功能的正确性。功能的正确性包括验证Paddle目前的`paddle_trainer`训练和纯使用`Python`训练模型正确性。 PaddlePaddle每次发版本首先要保证PaddlePaddle Book中所有章节功能的正确性。功能的正确性包括验证PaddlePaddle目前的`paddle_trainer`训练和纯使用`Python`训练模型正确性。
| | 新手入门章节 | 识别数字 | 图像分类 | 词向量 | 情感分析 | 语意角色标注 | 机器翻译 | 个性化推荐 | | | 新手入门章节 | 识别数字 | 图像分类 | 词向量 | 情感分析 | 语意角色标注 | 机器翻译 | 个性化推荐 |
| --- | --- | --- | --- | --- | --- | --- | --- | --- | | --- | --- | --- | --- | --- | --- | --- | --- | --- |
......
...@@ -17,7 +17,7 @@ Scope is an association of a name to variable. All variables belong to `Scope`. ...@@ -17,7 +17,7 @@ Scope is an association of a name to variable. All variables belong to `Scope`.
1. Scope only contains a map of a name to variable. 1. Scope only contains a map of a name to variable.
All parameters, data, states in a Net should be variables and stored inside a scope. Each op should get inputs and outputs to do computation from a scope, such as data buffer, state(momentum) etc. All parameters, data, states in a Net should be variables and stored inside a scope. Each op should get inputs and outputs to do computation from a scope, such as data buffer, state (momentum) etc.
1. Variable can only be created by Scope and a variable can only be got from Scope. User cannot create or get a variable outside a scope. This is a constraints of our framework, and will keep our framework simple and clear. 1. Variable can only be created by Scope and a variable can only be got from Scope. User cannot create or get a variable outside a scope. This is a constraints of our framework, and will keep our framework simple and clear.
...@@ -32,7 +32,7 @@ Scope is an association of a name to variable. All variables belong to `Scope`. ...@@ -32,7 +32,7 @@ Scope is an association of a name to variable. All variables belong to `Scope`.
1. Scope should destruct all Variables inside it when itself is destructed. User can never store `Variable` pointer somewhere else. 1. Scope should destruct all Variables inside it when itself is destructed. User can never store `Variable` pointer somewhere else.
Because Variable can only be got from Scope. When destroying Scope, we also need to destroy all the Variables in it. If user store `Variable` pointer to private data member or some global variable, the pointer will be a invalid pointer when associated `Scope` is destroyed. Because Variable can only be got from Scope. When destroying Scope, we also need to destroy all the Variables in it. If user store `Variable` pointer to private data member or some global variable, the pointer will be an invalid pointer when associated `Scope` is destroyed.
```cpp ```cpp
class Scope { class Scope {
...@@ -50,7 +50,7 @@ class Scope { ...@@ -50,7 +50,7 @@ class Scope {
Just like [scope](https://en.wikipedia.org/wiki/Scope_(computer_science)) in programming languages, `Scope` in the neural network can also be a local scope. There are two attributes about local scope. Just like [scope](https://en.wikipedia.org/wiki/Scope_(computer_science)) in programming languages, `Scope` in the neural network can also be a local scope. There are two attributes about local scope.
1. We can create local variables in a local scope. When that local scope are destroyed, all local variables should also be destroyed. 1. We can create local variables in a local scope. When that local scope is destroyed, all local variables should also be destroyed.
2. Variables in a parent scope can be retrieved from local scopes of that parent scope, i.e., when user get a variable from a scope, it will try to search this variable in current scope. If there is no such variable in the local scope, `scope` will keep searching from its parent, until the variable is found or there is no parent. 2. Variables in a parent scope can be retrieved from local scopes of that parent scope, i.e., when user get a variable from a scope, it will try to search this variable in current scope. If there is no such variable in the local scope, `scope` will keep searching from its parent, until the variable is found or there is no parent.
```cpp ```cpp
...@@ -121,4 +121,4 @@ Also, as the parent scope is a `shared_ptr`, we can only `Create()` a scope shar ...@@ -121,4 +121,4 @@ Also, as the parent scope is a `shared_ptr`, we can only `Create()` a scope shar
## Orthogonal interface ## Orthogonal interface
`FindVar` will return `nullptr` when `name` is not found. It can be used as `Contains` method. `NewVar` will return a `Error` when there is a name conflict locally. Combine `FindVar` and `NewVar`, we can implement `NewVar` easily. `FindVar` will return `nullptr` when `name` is not found. It can be used as `Contains` method. `NewVar` will return an `Error` when there is a name conflict locally. Combine `FindVar` and `NewVar`, we can implement `NewVar` easily.
...@@ -6,9 +6,9 @@ The Interaction between Python and C++ can be simplified as two steps: ...@@ -6,9 +6,9 @@ The Interaction between Python and C++ can be simplified as two steps:
1. C++ tells Python how many Ops there are, and what parameter do users need to offer to initialize a new Op. Python then builds API for each Op at compile time. 1. C++ tells Python how many Ops there are, and what parameter do users need to offer to initialize a new Op. Python then builds API for each Op at compile time.
2. Users invoke APIs built by Python and provide necessary parameters. These parameters will be sent to C++ fo finish Op construction task. 2. Users invoke APIs built by Python and provide necessary parameters. These parameters will be sent to C++ for finishing the Op construction task.
### Message form C++ to Python ### Message from C++ to Python
We define a Protobuf message class `OpProto` to hold message needed in the first step. What should an `OpProto` contain? This question is equivalent to “What message do we need to offer, to build a Python API which is legal and user oriented and can use to describe a whole Op.” We define a Protobuf message class `OpProto` to hold message needed in the first step. What should an `OpProto` contain? This question is equivalent to “What message do we need to offer, to build a Python API which is legal and user oriented and can use to describe a whole Op.”
...@@ -193,7 +193,7 @@ def fc_layer(input, size, with_bias, activation): ...@@ -193,7 +193,7 @@ def fc_layer(input, size, with_bias, activation):
elif: elif:
# ... # ...
return act_output; return act_output;
``` ```
### Low Leval API ### Low Leval API
......
## Background ## Background
PaddlePaddle divides the description of neural network computation graph into two stages: compile time and runtime. PaddlePaddle divides the description of neural network computation graph into two stages: compile time and runtime.
PaddlePaddle use proto message to describe compile time graph for PaddlePaddle use proto message to describe compile time graph because
1. Computation graph should be able to be saved to a file. 1. Computation graph should be able to be saved to a file.
1. In distributed training, the graph will be serialized and send to multiple workers. 1. In distributed training, the graph will be serialized and send to multiple workers.
......
...@@ -158,17 +158,23 @@ PaddlePaddle的参数使用名字 :code:`name` 作为参数的ID,相同名字 ...@@ -158,17 +158,23 @@ PaddlePaddle的参数使用名字 :code:`name` 作为参数的ID,相同名字
这里 :code:`hidden_a` 和 :code:`hidden_b` 使用了同样的parameter和bias。并且softmax层的两个输入也使用了同样的参数 :code:`softmax_param`。 这里 :code:`hidden_a` 和 :code:`hidden_b` 使用了同样的parameter和bias。并且softmax层的两个输入也使用了同样的参数 :code:`softmax_param`。
7. \*-cp27mu-linux_x86_64.whl is not a supported wheel on this platform. 7. paddlepaddle\*.whl is not a supported wheel on this platform.
------------------------------------------------------------------------ ------------------------------------------------------------------------
出现这个问题的主要原因是,系统编译wheel包的时候,使用的 :code:`wheel` 包是最新的, 出现这个问题的主要原因是,没有找到和当前系统匹配的paddlepaddle安装包。最新的paddlepaddle python安装包支持Linux x86_64和MacOS 10.12操作系统,并安装了python 2.7和pip 9.0.1。
而系统中的 :code:`pip` 包比较老。具体的解决方法是,更新 :code:`pip` 包并重新编译PaddlePaddle。
更新 :code:`pip` 包的方法是\: 更新 :code:`pip` 包的方法是\:
.. code-block:: bash .. code-block:: bash
pip install --upgrade pip pip install --upgrade pip
如果还不行,可以执行 :code:`python -c "import pip; print(pip.pep425tags.get_supported())"` 获取当前系统支持的python包的后缀,
并对比是否和正在安装的后缀一致。
如果系统支持的是 :code:`linux_x86_64` 而安装包是 :code:`manylinux1_x86_64` ,需要升级pip版本到最新;
如果系统支持 :code:`manylinux1_x86_64` 而安装包(本地)是 :code:`linux_x86_64` ,可以重命名这个whl包为 :code:`manylinux1_x86_64` 再安装。
8. python相关的单元测试都过不了 8. python相关的单元测试都过不了
-------------------------------- --------------------------------
...@@ -310,7 +316,7 @@ Paddle二进制在运行时捕获了浮点数异常,只要出现浮点数异 ...@@ -310,7 +316,7 @@ Paddle二进制在运行时捕获了浮点数异常,只要出现浮点数异
* 模型一直不收敛,发散到了一个数值特别大的地方。 * 模型一直不收敛,发散到了一个数值特别大的地方。
* 训练数据有问题,导致参数收敛到了一些奇异的情况。或者输入数据尺度过大,有些特征的取值达到数百万,这时进行矩阵乘法运算就可能导致浮点数溢出。 * 训练数据有问题,导致参数收敛到了一些奇异的情况。或者输入数据尺度过大,有些特征的取值达到数百万,这时进行矩阵乘法运算就可能导致浮点数溢出。
主要的解决办法是减小学习或者对数据进行归一化处理。 主要的解决办法是减小学习或者对数据进行归一化处理。
15. 编译安装后执行 import paddle.v2 as paddle 报ImportError: No module named v2 15. 编译安装后执行 import paddle.v2 as paddle 报ImportError: No module named v2
------------------------------------------------------------------------ ------------------------------------------------------------------------
...@@ -373,3 +379,15 @@ PaddlePaddle保存的模型参数文件内容由16字节头信息和网络参数 ...@@ -373,3 +379,15 @@ PaddlePaddle保存的模型参数文件内容由16字节头信息和网络参数
parameters = paddle.parameters.create(my_cost) parameters = paddle.parameters.create(my_cost)
parameters.set('emb', load_parameter(emb_param_file, 30000, 256)) parameters.set('emb', load_parameter(emb_param_file, 30000, 256))
18. 集群多节点训练,日志中保存均为网络通信类错误
------------------------------
集群多节点训练,日志报错为网络通信类错误,比如 :code:`Connection reset by peer` 等。
此类报错通常是由于某一个节点的错误导致这个节点的训练进程退出,从而引发其他节点无法连接导致,可以参考下面的步骤排查:
* 从 :code:`train.log` , :code:`server.log` 找到最早报错的地方,查看是否是其他错误引发的报错(比如FPE,内存不足,磁盘空间不足等)。
* 如果发现最早的报错就是网络通信的问题,很有可能是非独占方式执行导致的端口冲突,可以联系OP,看当前MPI集群是否支持resource=full参数提交,如果支持增加此参数提交,并更换job 端口。
* 如果当前MPI集群并不支持任务独占模式,可以联系OP是否可以更换集群或升级当前集群。
\ No newline at end of file
...@@ -54,9 +54,9 @@ class MulOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -54,9 +54,9 @@ class MulOpMaker : public framework::OpProtoAndCheckerMaker {
public: public:
MulOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) MulOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) { : OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "The first input of mul op"); AddInput("X", "(Tensor), 2D tensor of size (M x K)");
AddInput("Y", "The second input of mul op"); AddInput("Y", "(Tensor), 2D tensor of size (K x N)");
AddOutput("Out", "The output of mul op"); AddOutput("Out", "(Tensor), 2D tensor of size (M x N)");
AddComment(R"DOC( AddComment(R"DOC(
Two Element Mul Operator. Two Element Mul Operator.
The equation is: Out = X * Y The equation is: Out = X * Y
...@@ -72,7 +72,7 @@ The equation is: Out = X * Y ...@@ -72,7 +72,7 @@ The equation is: Out = X * Y
构造函数里通过`AddInput`添加输入参数,通过`AddOutput`添加输出参数,通过`AddComment`添加Op的注释。这些函数会将对应内容添加到`OpProto`中。 构造函数里通过`AddInput`添加输入参数,通过`AddOutput`添加输出参数,通过`AddComment`添加Op的注释。这些函数会将对应内容添加到`OpProto`中。
上面的代码在`MulOp`中添加两个输入`X``Y`,添加了一个输出`Out`,并解释了各自含义,命名请遵守命名规范 上面的代码在`MulOp`中添加两个输入`X``Y`,添加了一个输出`Out`,并解释了各自含义,命名请遵守[命名规范](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/operators/name_convention.md)
再以[`ScaleOp`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/operators/scale_op.cc#L37)为例: 再以[`ScaleOp`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/operators/scale_op.cc#L37)为例:
......
# Cluster bootstrapping tool survey
## Abstract
In order to bring up a cluster from bare metal machine to a fully functional kubernetes cluster for Paddlepaddle to run, we need to utilize some tools. Here we are going to compare [Sextant](https://github.com/k8sp/sextant) and [Tectonic installer](https://github.com/coreos/tectonic-installer)
## Basic assumptions
Here are some basic assumptions before we move on to details
1. You are an administrator of a bare metal machine cluster, which means:
* you have full control to each of the machines.
* you have full control to the network which machines are connected to.
2. Machines can be booted from network with PEX or iPXE
3. You understand the [general procedure to bring up a cluster](#appendix-general-procedure-to-bring-up-a-cluster)
if your cluster is able to mark above items with checkmarks, then keep reading.
## Comparing Sextant and Tectonic installer
### Sextant
Sextant is an end2end solution to bring up a bare metal cluster to a fully functional k8s cluster, it integrates DHCP, name service, PEX, cloud-config-service, docker registry services altogether.
#### Pros
1. End2End: basically all admin need to do is to config the cluster.yaml and power on the cluster.
2. Offline cluster configuration: Sextant has 2 phases during working with it, config time and deploy time. when admin is configuring, it requires admin's machine has internet connectivity, which will download some images, etc. But in deploy time, it's completely OK to go offline since all dependencies are ready during config time.
3. docker registry integrated.
4. GPU machine took care of.
### Cons
1. k8s API server is not deployed with high availability in considering by default.
2. No grouping support.
3. No API interface, a one-off service.
### Tectonic installer
First of all, Tectonic is not free, it requires coreos.com account as a step of installation, and free user can only create less than 10 nodes.
Tectonic is a suite of software which wraps around k8s and providing more utility regarding dev ops, ie,
Tectonic installer as it's named, it installs Tectonic to a bare metal cluster which means it's not totally an equivalent of Sextant. At the "booting a cluster" part, it mostly utilizes [Matchbox](https://github.com/coreos/matchbox), which is a general cluster bootstrapper.
Matchbox's Approach is similar to Sexstant.
### Pros
1. supports grouping machines.
2. supports running provisioning service in rtk. (not a big deal though).
3. supports http/gRPC API interface.
4. supports multi-template.
### Cons
1. Not an e2e solution to bring up a cluster, need a lot of extra work and other software.
2. [Not fully supporting](https://github.com/coreos/matchbox/issues/550) centOS deployment yet.
## Conclusion
Sextant is a better solution overall for paddle cloud deploying to a bare metal cluster. It would be great if Sextant can also 1) deploy k8s api server with high availability by default; 2) not designed as a one-off service.
## Appendix: General procedure to bring up a cluster
It's physically impossible for a cluster admin to manually install OS and applications into cluster nodes one by one, here is what an admin would do in cloud industry:
1. setup a bootstrap machine with static IP in the cluster, which has following services:
* DHCP: assigns ip address for rest of the nodes.
* name service: to map node name to a IP
* PXE related services: the booting related info will be delivered to newly booted machines as their IP is assigned via DHCP service, PXE service will provide further booting and installing info and image with TFTP and http protocol.
* cluster config service: this is for providing cluster node with OS config via http
* optional docker registry: a built-in docker registry makes the whole cluster independent from connecting internet, and speeds up software distribution.
2. New node powers on, it will
* broadcast the request for an IP address
* DHCP server assigns the IP address, and deliver the PXE booting related info to the node.
* cluster node will request config files with booting info delivered with DHCP via the TFTP service, and in most of the cases, the config file will point to a http service for the booting image.
* Since PXE is configured with initrd, it will utilize the cloud config service and do further installations like coreOS or K8s installations.
* then restart the node.
For further understanding, following 2 links from Matchbox are some good readings:
* [Machine lifecycle](https://github.com/coreos/matchbox/blob/master/Documentation/machine-lifecycle.md)
* [PXE booting](https://github.com/coreos/matchbox/blob/master/Documentation/network-booting.md)
...@@ -62,6 +62,7 @@ if(ANDROID) ...@@ -62,6 +62,7 @@ if(ANDROID)
LIBRARY DESTINATION lib/${ANDROID_ABI}) LIBRARY DESTINATION lib/${ANDROID_ABI})
execute_process( execute_process(
COMMAND ${GIT_EXECUTABLE} log --pretty=oneline -1 COMMAND ${GIT_EXECUTABLE} log --pretty=oneline -1
WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}
OUTPUT_VARIABLE GIT_COMMITS_LIST OUTPUT_VARIABLE GIT_COMMITS_LIST
RESULT_VARIABLE GIT_COMMITS_LIST_RESULT RESULT_VARIABLE GIT_COMMITS_LIST_RESULT
ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE) ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE)
...@@ -81,8 +82,7 @@ if(ANDROID) ...@@ -81,8 +82,7 @@ if(ANDROID)
)" )"
) )
else(ANDROID) else(ANDROID)
install(TARGETS paddle_capi_whole install(TARGETS paddle_capi_whole ARCHIVE DESTINATION lib)
ARCHIVE DESTINATION lib)
if(NOT IOS) if(NOT IOS)
install(TARGETS paddle_capi_shared DESTINATION lib) install(TARGETS paddle_capi_shared DESTINATION lib)
endif() endif()
......
...@@ -19,12 +19,14 @@ cc_test(scope_test SRCS scope_test.cc DEPS scope) ...@@ -19,12 +19,14 @@ cc_test(scope_test SRCS scope_test.cc DEPS scope)
proto_library(framework_proto SRCS framework.proto) proto_library(framework_proto SRCS framework.proto)
cc_library(attribute SRCS attribute.cc DEPS framework_proto) cc_library(attribute SRCS attribute.cc DEPS framework_proto)
cc_library(op_proto_maker SRCS op_proto_maker.cc DEPS framework_proto attribute)
cc_test(op_proto_maker_test SRCS op_proto_maker_test.cc DEPS op_proto_maker)
cc_library(op_info SRCS op_info.cc DEPS attribute framework_proto) cc_library(op_info SRCS op_info.cc DEPS attribute framework_proto)
cc_library(operator SRCS operator.cc DEPS op_info device_context tensor scope) cc_library(operator SRCS operator.cc DEPS op_info device_context tensor scope)
cc_test(operator_test SRCS operator_test.cc DEPS operator op_registry) cc_test(operator_test SRCS operator_test.cc DEPS operator op_registry)
cc_library(grad_op_builder SRCS grad_op_builder.cc DEPS operator) cc_library(grad_op_builder SRCS grad_op_builder.cc DEPS operator)
cc_library(op_registry SRCS op_registry.cc DEPS grad_op_builder) cc_library(op_registry SRCS op_registry.cc DEPS grad_op_builder op_proto_maker)
cc_test(op_registry_test SRCS op_registry_test.cc DEPS op_registry) cc_test(op_registry_test SRCS op_registry_test.cc DEPS op_registry)
cc_test(grad_op_builder_test SRCS grad_op_builder_test.cc DEPS grad_op_builder op_registry add_op) cc_test(grad_op_builder_test SRCS grad_op_builder_test.cc DEPS grad_op_builder op_registry add_op)
......
...@@ -19,6 +19,19 @@ limitations under the License. */ ...@@ -19,6 +19,19 @@ limitations under the License. */
namespace paddle { namespace paddle {
namespace framework { namespace framework {
static ProgramDesc* g_program_desc = nullptr;
ProgramDesc& GetProgramDesc() {
if (g_program_desc == nullptr) {
g_program_desc = new ProgramDesc();
}
return *g_program_desc;
}
template <>
AttrType AttrTypeID<bool>() {
return BOOLEAN;
}
template <> template <>
AttrType AttrTypeID<int>() { AttrType AttrTypeID<int>() {
return INT; return INT;
...@@ -32,6 +45,10 @@ AttrType AttrTypeID<std::string>() { ...@@ -32,6 +45,10 @@ AttrType AttrTypeID<std::string>() {
return STRING; return STRING;
} }
template <> template <>
AttrType AttrTypeID<std::vector<bool>>() {
return BOOLEANS;
}
template <>
AttrType AttrTypeID<std::vector<int>>() { AttrType AttrTypeID<std::vector<int>>() {
return INTS; return INTS;
} }
...@@ -47,40 +64,54 @@ template <> ...@@ -47,40 +64,54 @@ template <>
AttrType AttrTypeID<std::vector<std::pair<int, int>>>() { AttrType AttrTypeID<std::vector<std::pair<int, int>>>() {
return INT_PAIRS; return INT_PAIRS;
} }
template <>
AttrType AttrTypeID<BlockDesc>() {
return BLOCK;
}
Attribute GetAttrValue(const OpDesc::Attr& attr_desc) { Attribute GetAttrValue(const OpDesc::Attr& attr_desc) {
switch (attr_desc.type()) { switch (attr_desc.type()) {
case paddle::framework::AttrType::INT: { case framework::AttrType::BOOLEAN: {
return attr_desc.b();
}
case framework::AttrType::INT: {
return attr_desc.i(); return attr_desc.i();
} }
case paddle::framework::AttrType::FLOAT: { case framework::AttrType::FLOAT: {
return attr_desc.f(); return attr_desc.f();
} }
case paddle::framework::AttrType::STRING: { case framework::AttrType::STRING: {
return attr_desc.s(); return attr_desc.s();
} }
case paddle::framework::AttrType::INTS: { case framework::AttrType::BOOLEANS: {
std::vector<bool> val(attr_desc.bools_size());
for (int i = 0; i < attr_desc.bools_size(); ++i) {
val[i] = attr_desc.bools(i);
}
return val;
}
case framework::AttrType::INTS: {
std::vector<int> val(attr_desc.ints_size()); std::vector<int> val(attr_desc.ints_size());
for (int i = 0; i < attr_desc.ints_size(); ++i) { for (int i = 0; i < attr_desc.ints_size(); ++i) {
val[i] = attr_desc.ints(i); val[i] = attr_desc.ints(i);
} }
return val; return val;
} }
case paddle::framework::AttrType::FLOATS: { case framework::AttrType::FLOATS: {
std::vector<float> val(attr_desc.floats_size()); std::vector<float> val(attr_desc.floats_size());
for (int i = 0; i < attr_desc.floats_size(); ++i) { for (int i = 0; i < attr_desc.floats_size(); ++i) {
val[i] = attr_desc.floats(i); val[i] = attr_desc.floats(i);
} }
return val; return val;
} }
case paddle::framework::AttrType::STRINGS: { case framework::AttrType::STRINGS: {
std::vector<std::string> val(attr_desc.strings_size()); std::vector<std::string> val(attr_desc.strings_size());
for (int i = 0; i < attr_desc.strings_size(); ++i) { for (int i = 0; i < attr_desc.strings_size(); ++i) {
val[i] = attr_desc.strings(i); val[i] = attr_desc.strings(i);
} }
return val; return val;
} }
case paddle::framework::AttrType::INT_PAIRS: { case framework::AttrType::INT_PAIRS: {
std::vector<std::pair<int, int>> val(attr_desc.int_pairs_size()); std::vector<std::pair<int, int>> val(attr_desc.int_pairs_size());
for (int i = 0; i < attr_desc.int_pairs_size(); ++i) { for (int i = 0; i < attr_desc.int_pairs_size(); ++i) {
val[i].first = attr_desc.int_pairs(i).first(); val[i].first = attr_desc.int_pairs(i).first();
...@@ -88,6 +119,9 @@ Attribute GetAttrValue(const OpDesc::Attr& attr_desc) { ...@@ -88,6 +119,9 @@ Attribute GetAttrValue(const OpDesc::Attr& attr_desc) {
} }
return val; return val;
} }
case framework::AttrType::BLOCK: {
return GetProgramDesc().mutable_blocks(attr_desc.block_idx());
}
} }
PADDLE_ENFORCE(false, "Unknown OpDesc::AttrDesc::type !"); PADDLE_ENFORCE(false, "Unknown OpDesc::AttrDesc::type !");
return boost::blank(); return boost::blank();
......
...@@ -27,13 +27,16 @@ limitations under the License. */ ...@@ -27,13 +27,16 @@ limitations under the License. */
namespace paddle { namespace paddle {
namespace framework { namespace framework {
typedef boost::variant<boost::blank, int, float, std::string, std::vector<int>, typedef boost::variant<boost::blank, bool, int, float, std::string,
std::vector<float>, std::vector<std::string>, std::vector<bool>, std::vector<int>, std::vector<float>,
std::vector<std::pair<int, int>>> std::vector<std::string>,
std::vector<std::pair<int, int>>, BlockDesc*>
Attribute; Attribute;
typedef std::unordered_map<std::string, Attribute> AttributeMap; typedef std::unordered_map<std::string, Attribute> AttributeMap;
ProgramDesc& GetProgramDesc();
template <typename T> template <typename T>
AttrType AttrTypeID(); AttrType AttrTypeID();
......
...@@ -166,9 +166,8 @@ static std::unique_ptr<OperatorBase> BackwardRecursive( ...@@ -166,9 +166,8 @@ static std::unique_ptr<OperatorBase> BackwardRecursive(
// If part of input gradient of that operator is not calculated, fill // If part of input gradient of that operator is not calculated, fill
// zero variables to that input gradient. // zero variables to that input gradient.
net->AppendOp(OpRegistry::CreateOp("fill_zeros_like", net->AppendOp(OpRegistry::CreateOp("fill_zeros_like", {{"X", {prefix}}},
{{"Src", {prefix}}}, {{"Y", {grad_input}}}, {}));
{{"Dst", {grad_input}}}, {}));
} }
return false; return false;
}); });
......
...@@ -127,8 +127,8 @@ class FillZeroOpMaker : public OpProtoAndCheckerMaker { ...@@ -127,8 +127,8 @@ class FillZeroOpMaker : public OpProtoAndCheckerMaker {
public: public:
FillZeroOpMaker(OpProto *proto, OpAttrChecker *op_checker) FillZeroOpMaker(OpProto *proto, OpAttrChecker *op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) { : OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("Src", "x"); AddInput("X", "x");
AddOutput("Dst", "out"); AddOutput("Y", "out");
AddComment(""); AddComment("");
} }
}; };
...@@ -325,10 +325,10 @@ TEST(Backward, op_part_of_output_are_not_need) { ...@@ -325,10 +325,10 @@ TEST(Backward, op_part_of_output_are_not_need) {
auto &fill_zero = *net->ops_[0]; auto &fill_zero = *net->ops_[0];
ASSERT_EQ("fill_zeros_like", fill_zero.Type()); ASSERT_EQ("fill_zeros_like", fill_zero.Type());
ASSERT_EQ(1UL, fill_zero.Inputs("Src").size()); ASSERT_EQ(1UL, fill_zero.Inputs("X").size());
ASSERT_EQ("Z", fill_zero.Input("Src")); ASSERT_EQ("Z", fill_zero.Input("X"));
ASSERT_EQ(1UL, fill_zero.Outputs("Dst").size()); ASSERT_EQ(1UL, fill_zero.Outputs("Y").size());
ASSERT_EQ(std::string("Z") + f::kZeroVarSuffix, fill_zero.Output("Dst")); ASSERT_EQ(std::string("Z") + f::kZeroVarSuffix, fill_zero.Output("Y"));
auto &d_many_out = *net->ops_[1]; auto &d_many_out = *net->ops_[1];
ASSERT_EQ("many_output_op_grad", d_many_out.Type()); ASSERT_EQ("many_output_op_grad", d_many_out.Type());
......
...@@ -292,5 +292,13 @@ DDim flatten_to_2d(const DDim& src, int num_col_dims) { ...@@ -292,5 +292,13 @@ DDim flatten_to_2d(const DDim& src, int num_col_dims) {
DDim flatten_to_1d(const DDim& src) { return make_ddim({product(src)}); } DDim flatten_to_1d(const DDim& src) { return make_ddim({product(src)}); }
DDim stride(const DDim& ddim) {
std::vector<int64_t> strides(ddim.size());
strides[ddim.size() - 1] = 1;
for (int i = ddim.size() - 2; i >= 0; --i) {
strides[i] = strides[i + 1] * ddim[i + 1];
}
return framework::make_ddim(strides);
}
} // namespace framework } // namespace framework
} // namespace paddle } // namespace paddle
...@@ -121,6 +121,7 @@ DDim flatten_to_2d(const DDim& src, int num_col_dims); ...@@ -121,6 +121,7 @@ DDim flatten_to_2d(const DDim& src, int num_col_dims);
DDim flatten_to_1d(const DDim& src); DDim flatten_to_1d(const DDim& src);
DDim stride(const DDim& ddim);
} // namespace framework } // namespace framework
} // namespace paddle } // namespace paddle
......
...@@ -23,6 +23,9 @@ enum AttrType { ...@@ -23,6 +23,9 @@ enum AttrType {
FLOATS = 4; FLOATS = 4;
STRINGS = 5; STRINGS = 5;
INT_PAIRS = 6; INT_PAIRS = 6;
BOOLEAN = 7;
BOOLEANS = 8;
BLOCK = 9;
} }
message IntPair { message IntPair {
...@@ -44,6 +47,9 @@ message OpDesc { ...@@ -44,6 +47,9 @@ message OpDesc {
repeated float floats = 7; repeated float floats = 7;
repeated string strings = 8; repeated string strings = 8;
repeated IntPair int_pairs = 9; repeated IntPair int_pairs = 9;
optional bool b = 10;
repeated bool bools = 11;
optional int32 block_idx = 12;
}; };
message Var { message Var {
...@@ -100,7 +106,7 @@ enum DataType { ...@@ -100,7 +106,7 @@ enum DataType {
message LoDTensorDesc { message LoDTensorDesc {
required DataType data_type = 1; required DataType data_type = 1;
repeated int32 dims = 2; // [UNK, 640, 480] is saved as [-1, 640, 480] repeated int64 dims = 2; // [UNK, 640, 480] is saved as [-1, 640, 480]
optional int32 lod_level = 3 [ default = 0 ]; optional int32 lod_level = 3 [ default = 0 ];
} }
...@@ -108,3 +114,12 @@ message VarDesc { ...@@ -108,3 +114,12 @@ message VarDesc {
required string name = 1; required string name = 1;
optional LoDTensorDesc lod_tensor = 2; optional LoDTensorDesc lod_tensor = 2;
} }
message BlockDesc {
required int32 idx = 1;
required int32 parent_idx = 2;
repeated VarDesc vars = 3;
repeated OpDesc ops = 4;
}
message ProgramDesc { repeated BlockDesc blocks = 1; }
...@@ -72,20 +72,16 @@ bool operator==(const LoD& a, const LoD& b) { ...@@ -72,20 +72,16 @@ bool operator==(const LoD& a, const LoD& b) {
return true; return true;
} }
void LoDTensor::SliceLevels(size_t level_begin, size_t level_end) { void LoDTensor::ShrinkLevels(size_t level_begin, size_t level_end) {
auto new_lod = framework::SliceLevels(lod_, level_begin, level_end); auto new_lod = framework::SliceLevels(lod_, level_begin, level_end);
lod_ = new_lod; lod_ = new_lod;
} }
void LoDTensor::SliceInLevel(size_t level, size_t elem_begin, size_t elem_end) { void LoDTensor::ShrinkInLevel(size_t level, size_t elem_begin,
PADDLE_ENFORCE(level < NumLevels(), "level [%d] out of range [%d]", level, size_t elem_end) {
NumLevels()); PADDLE_ENFORCE_LT(level, NumLevels());
PADDLE_ENFORCE(elem_begin < NumElements(level), PADDLE_ENFORCE_LT(elem_begin, NumElements(level));
"element begin [%d] out of range [%d]", elem_begin, PADDLE_ENFORCE_LT(elem_end, NumElements(level) + 1);
NumElements(level));
PADDLE_ENFORCE(elem_end < NumElements(level) + 1,
"element end [%d] out of range [%d]", elem_end,
NumElements(level));
auto new_lod = framework::SliceInLevel(lod_, level, elem_begin, elem_end); auto new_lod = framework::SliceInLevel(lod_, level, elem_begin, elem_end);
lod_ = new_lod; lod_ = new_lod;
......
...@@ -89,15 +89,15 @@ class LoDTensor : public Tensor { ...@@ -89,15 +89,15 @@ class LoDTensor : public Tensor {
} }
/* /*
* Slice of levels[level_begin:level_end] * Shrink levels[level_begin:level_end]
*/ */
void SliceLevels(size_t level_begin, size_t level_end); void ShrinkLevels(size_t level_begin, size_t level_end);
/* /*
* Slice of elements of a level, [elem_begin: elem_end] * Shrink elements of a level, [elem_begin: elem_end]
* @note: low performance in slice lod_. * @note: low performance in slice lod_.
*/ */
void SliceInLevel(size_t level, size_t elem_begin, size_t elem_end); void ShrinkInLevel(size_t level, size_t elem_begin, size_t elem_end);
private: private:
LoD lod_; LoD lod_;
......
...@@ -4,7 +4,7 @@ PaddlePaddle's RNN doesn't require that all instances have the same length. To ...@@ -4,7 +4,7 @@ PaddlePaddle's RNN doesn't require that all instances have the same length. To
## Challenge of Variable-length Inputs ## Challenge of Variable-length Inputs
People usually represent a mini-batch by a Tensor. For example, a mini-batch of 10 images, each of size 32x32, is a 10x32x32 Tensor. So a transformation, T, of all images can be a matrix multiplication of the 32x32xO-dimensional tensor T and the 10x32x32 Tensor. People usually represent a mini-batch by a Tensor. For example, a mini-batch of 10 images, each of size 32x32, is a 10x32x32 Tensor. So a transformation, T, of all images can be a matrix multiplication of the 10xOx32-dimensional tensor T and the 10x32x32 Tensor.
Another example is that each mini-batch contains 32 sentences, where each word is a D-dimensional one-hot vector. If all sentences have the same length L, we can represent this mini-batch by a 32xLxD tensor. However, in most cases, sentences have variable lengths, and we will need an index data structure to record these variable lengths. Another example is that each mini-batch contains 32 sentences, where each word is a D-dimensional one-hot vector. If all sentences have the same length L, we can represent this mini-batch by a 32xLxD tensor. However, in most cases, sentences have variable lengths, and we will need an index data structure to record these variable lengths.
...@@ -54,7 +54,7 @@ In summary, as long as that the essential elements (words or images) have the s ...@@ -54,7 +54,7 @@ In summary, as long as that the essential elements (words or images) have the s
- The first dimension size L has an additonal property -- a LoD index as a nested vector: - The first dimension size L has an additonal property -- a LoD index as a nested vector:
```c++ ```c++
typedef std::vector<std::vector> > LoD; typedef std::vector<std::<vector>> LoD;
``` ```
- The LoD index is not necessary when there are only two levels and all elements of the second level have length 1. - The LoD index is not necessary when there are only two levels and all elements of the second level have length 1.
...@@ -100,7 +100,7 @@ Let's go on slicing this slice. Its <1,1>-slice is ...@@ -100,7 +100,7 @@ Let's go on slicing this slice. Its <1,1>-slice is
The algorithm, with over-simplified data structure, is defined as The algorithm, with over-simplified data structure, is defined as
```c++ ```c++
typedef vector<vector<int> > LoD; typedef std::vector<std::vector<int>> LoD;
struct LoDTensor { struct LoDTensor {
LoD lod_; LoD lod_;
......
...@@ -56,11 +56,11 @@ TEST_F(LoDTensorTester, NumElements) { ...@@ -56,11 +56,11 @@ TEST_F(LoDTensorTester, NumElements) {
ASSERT_EQ(lod_tensor_.NumElements(2), 8UL); ASSERT_EQ(lod_tensor_.NumElements(2), 8UL);
} }
TEST_F(LoDTensorTester, SliceLevels) { TEST_F(LoDTensorTester, ShrinkLevels) {
// slice 1 level // slice 1 level
for (size_t level = 0; level < 3UL; ++level) { for (size_t level = 0; level < 3UL; ++level) {
LoDTensor new_lod_tensor = lod_tensor_; LoDTensor new_lod_tensor = lod_tensor_;
new_lod_tensor.SliceLevels(level, level + 1); new_lod_tensor.ShrinkLevels(level, level + 1);
ASSERT_EQ(new_lod_tensor.NumLevels(), 1UL); ASSERT_EQ(new_lod_tensor.NumLevels(), 1UL);
ASSERT_EQ(new_lod_tensor.NumElements(0), lod_tensor_.NumElements(level)); ASSERT_EQ(new_lod_tensor.NumElements(0), lod_tensor_.NumElements(level));
ASSERT_EQ(new_lod_tensor.data<float>(), lod_tensor_.data<float>()); ASSERT_EQ(new_lod_tensor.data<float>(), lod_tensor_.data<float>());
...@@ -68,7 +68,7 @@ TEST_F(LoDTensorTester, SliceLevels) { ...@@ -68,7 +68,7 @@ TEST_F(LoDTensorTester, SliceLevels) {
// slice 2 level // slice 2 level
for (size_t level = 0; level < 2UL; ++level) { for (size_t level = 0; level < 2UL; ++level) {
LoDTensor new_lod_tensor = lod_tensor_; LoDTensor new_lod_tensor = lod_tensor_;
new_lod_tensor.SliceLevels(level, level + 2); new_lod_tensor.ShrinkLevels(level, level + 2);
ASSERT_EQ(new_lod_tensor.NumLevels(), 2UL); ASSERT_EQ(new_lod_tensor.NumLevels(), 2UL);
ASSERT_EQ(new_lod_tensor.NumElements(0), lod_tensor_.NumElements(level)); ASSERT_EQ(new_lod_tensor.NumElements(0), lod_tensor_.NumElements(level));
ASSERT_EQ(new_lod_tensor.NumElements(1), ASSERT_EQ(new_lod_tensor.NumElements(1),
...@@ -77,10 +77,10 @@ TEST_F(LoDTensorTester, SliceLevels) { ...@@ -77,10 +77,10 @@ TEST_F(LoDTensorTester, SliceLevels) {
} }
} }
TEST_F(LoDTensorTester, SliceInLevel) { TEST_F(LoDTensorTester, ShrinkInLevel) {
size_t level = 0; size_t level = 0;
LoDTensor new_lod_tensor = lod_tensor_; LoDTensor new_lod_tensor = lod_tensor_;
new_lod_tensor.SliceInLevel(level, 0, 2); new_lod_tensor.ShrinkInLevel(level, 0, 2);
EXPECT_EQ(new_lod_tensor.NumLevels(), 3UL); EXPECT_EQ(new_lod_tensor.NumLevels(), 3UL);
EXPECT_EQ(new_lod_tensor.NumElements(0), 2UL); EXPECT_EQ(new_lod_tensor.NumElements(0), 2UL);
EXPECT_EQ(new_lod_tensor.NumElements(1), 4UL); EXPECT_EQ(new_lod_tensor.NumElements(1), 4UL);
...@@ -89,7 +89,7 @@ TEST_F(LoDTensorTester, SliceInLevel) { ...@@ -89,7 +89,7 @@ TEST_F(LoDTensorTester, SliceInLevel) {
level = 1; level = 1;
new_lod_tensor = lod_tensor_; new_lod_tensor = lod_tensor_;
new_lod_tensor.SliceInLevel(level, 0, 2); new_lod_tensor.ShrinkInLevel(level, 0, 2);
ASSERT_EQ(new_lod_tensor.NumLevels(), 2UL); ASSERT_EQ(new_lod_tensor.NumLevels(), 2UL);
ASSERT_EQ(new_lod_tensor.NumElements(0), 2UL); ASSERT_EQ(new_lod_tensor.NumElements(0), 2UL);
ASSERT_EQ(new_lod_tensor.NumElements(1), 4UL); ASSERT_EQ(new_lod_tensor.NumElements(1), 4UL);
......
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/framework/op_proto_maker.h"
namespace paddle {
namespace framework {
void OpProtoAndCheckerMaker::Validate() {
validated_ = true;
CheckNoDuplicatedInOutAttrs();
}
OpProtoAndCheckerMaker::VariableBuilder OpProtoAndCheckerMaker::AddInput(
const std::string& name, const std::string& comment) {
auto* input = proto_->add_inputs();
input->set_name(name);
input->set_comment(comment);
return OpProtoAndCheckerMaker::VariableBuilder{input};
}
OpProtoAndCheckerMaker::VariableBuilder OpProtoAndCheckerMaker::AddOutput(
const std::string& name, const std::string& comment) {
auto* output = proto_->add_outputs();
output->set_name(name);
output->set_comment(comment);
return OpProtoAndCheckerMaker::VariableBuilder{output};
}
void OpProtoAndCheckerMaker::CheckNoDuplicatedInOutAttrs() {
std::unordered_set<std::string> names;
auto checker = [&](const std::string& name) {
PADDLE_ENFORCE(!names.count(name), "[%s] is duplicated", name);
names.insert(name);
};
for (auto& attr : proto_->attrs()) {
checker(attr.name());
}
for (auto& input : proto_->inputs()) {
checker(input.name());
}
for (auto& output : proto_->outputs()) {
checker(output.name());
}
}
} // namespace framework
} // namespace paddle
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "paddle/framework/attribute.h"
#include "paddle/framework/framework.pb.h"
namespace paddle {
namespace framework {
// this class not only make proto but also init attribute checkers.
class OpProtoAndCheckerMaker {
public:
OpProtoAndCheckerMaker(OpProto* proto, OpAttrChecker* op_checker)
: proto_(proto), op_checker_(op_checker) {}
virtual ~OpProtoAndCheckerMaker() {
PADDLE_ENFORCE(validated_, "should call Validate after build");
}
void Validate();
protected:
struct VariableBuilder {
OpProto::Var* var_;
VariableBuilder& AsDuplicable() {
var_->set_duplicable(true);
return *this;
}
VariableBuilder& AsIntermediate() {
var_->set_intermediate(true);
return *this;
}
VariableBuilder& NotInGradient() {
var_->set_not_in_gradient(true);
return *this;
}
};
VariableBuilder AddInput(const std::string& name, const std::string& comment);
VariableBuilder AddOutput(const std::string& name,
const std::string& comment);
template <typename T>
TypedAttrChecker<T>& AddAttr(const std::string& name,
const std::string& comment,
bool generated = false) {
auto* attr = proto_->add_attrs();
attr->set_name(name);
attr->set_comment(comment);
attr->set_generated(generated);
attr->set_type(AttrTypeID<T>());
return op_checker_->AddAttrChecker<T>(name);
}
void AddComment(const std::string& comment) { proto_->set_comment(comment); }
private:
void CheckNoDuplicatedInOutAttrs();
OpProto* proto_;
OpAttrChecker* op_checker_;
bool validated_{false};
};
class NOPMaker : public OpProtoAndCheckerMaker {
public:
NOPMaker(framework::OpProto* proto, framework::OpAttrChecker* op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) {}
};
} // namespace framework
} // namespace paddle
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/framework/op_proto_maker.h"
#include "gtest/gtest.h"
class TestAttrProtoMaker : public paddle::framework::OpProtoAndCheckerMaker {
public:
TestAttrProtoMaker(paddle::framework::OpProto* proto,
paddle::framework::OpAttrChecker* op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) {
AddAttr<float>("scale", "scale of test op");
AddAttr<float>("scale", "scale of test op");
}
};
TEST(ProtoMaker, DuplicatedAttr) {
paddle::framework::OpProto op_proto;
paddle::framework::OpAttrChecker op_checker;
auto proto_maker = TestAttrProtoMaker(&op_proto, &op_checker);
ASSERT_THROW(proto_maker.Validate(), paddle::platform::EnforceNotMet);
}
class TestInOutProtoMaker : public paddle::framework::OpProtoAndCheckerMaker {
public:
TestInOutProtoMaker(paddle::framework::OpProto* proto,
paddle::framework::OpAttrChecker* op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("input", "input of test op");
AddInput("input", "input of test op");
}
};
TEST(ProtoMaker, DuplicatedInOut) {
paddle::framework::OpProto op_proto;
paddle::framework::OpAttrChecker op_checker;
auto proto_maker = TestInOutProtoMaker(&op_proto, &op_checker);
ASSERT_THROW(proto_maker.Validate(), paddle::platform::EnforceNotMet);
}
\ No newline at end of file
...@@ -24,6 +24,7 @@ limitations under the License. */ ...@@ -24,6 +24,7 @@ limitations under the License. */
#include "paddle/framework/framework.pb.h" #include "paddle/framework/framework.pb.h"
#include "paddle/framework/grad_op_builder.h" #include "paddle/framework/grad_op_builder.h"
#include "paddle/framework/op_info.h" #include "paddle/framework/op_info.h"
#include "paddle/framework/op_proto_maker.h"
#include "paddle/framework/operator.h" #include "paddle/framework/operator.h"
#include "paddle/framework/scope.h" #include "paddle/framework/scope.h"
......
...@@ -60,8 +60,8 @@ std::string OperatorBase::Output(const std::string& name) const { ...@@ -60,8 +60,8 @@ std::string OperatorBase::Output(const std::string& name) const {
const std::vector<std::string>& OperatorBase::Outputs( const std::vector<std::string>& OperatorBase::Outputs(
const std::string& name) const { const std::string& name) const {
auto it = outputs_.find(name); auto it = outputs_.find(name);
PADDLE_ENFORCE(it != outputs_.end(), "Op %s does not have output %s", type_, PADDLE_ENFORCE(it != outputs_.end(), "Op %s does not have output called %s",
name); type_, name);
return it->second; return it->second;
} }
...@@ -207,64 +207,25 @@ const std::vector<const Tensor*> InferShapeContext::MultiInput<Tensor>( ...@@ -207,64 +207,25 @@ const std::vector<const Tensor*> InferShapeContext::MultiInput<Tensor>(
} }
template <> template <>
Tensor* ExecutionContext::Output<Tensor>(const std::string& name) const { Tensor* InferShapeContext::Output<Tensor>(const std::string& name) const {
auto* var = OutputVar(name); auto var = OutputVar(name);
return var == nullptr ? nullptr : const_cast<Tensor*>(GetTensorFromVar(var)); return var == nullptr ? nullptr : var->GetMutable<LoDTensor>();
} }
template <> template <>
std::vector<Tensor*> ExecutionContext::MultiOutput<Tensor>( std::vector<Tensor*> InferShapeContext::MultiOutput<Tensor>(
const std::string& name) const { const std::string& name) const {
auto names = op().Outputs(name); auto names = op().Outputs(name);
std::vector<Tensor*> res; std::vector<Tensor*> res;
res.reserve(names.size()); res.reserve(names.size());
std::transform(names.begin(), names.end(), std::back_inserter(res), std::transform(names.begin(), names.end(), std::back_inserter(res),
[&](const std::string& sub_name) { [&](const std::string& sub_name) {
auto var = scope().FindVar(sub_name); auto var = scope_.FindVar(sub_name);
return var == nullptr return var == nullptr ? nullptr
? nullptr : var->GetMutable<LoDTensor>();
: const_cast<Tensor*>(GetTensorFromVar(var));
}); });
return res; return res;
} }
void OpProtoAndCheckerMaker::Validate() {
validated_ = true;
CheckNoDuplicatedInOutAttrs();
}
OpProtoAndCheckerMaker::VariableBuilder OpProtoAndCheckerMaker::AddInput(
const std::string& name, const std::string& comment) {
auto* input = proto_->add_inputs();
input->set_name(name);
input->set_comment(comment);
return OpProtoAndCheckerMaker::VariableBuilder{input};
}
OpProtoAndCheckerMaker::VariableBuilder OpProtoAndCheckerMaker::AddOutput(
const std::string& name, const std::string& comment) {
auto* output = proto_->add_outputs();
output->set_name(name);
output->set_comment(comment);
return OpProtoAndCheckerMaker::VariableBuilder{output};
}
void OpProtoAndCheckerMaker::CheckNoDuplicatedInOutAttrs() {
std::unordered_set<std::string> names;
auto checker = [&](const std::string& name) {
PADDLE_ENFORCE(!names.count(name), "[%s] is duplicated", name);
names.insert(name);
};
for (auto& attr : proto_->attrs()) {
checker(attr.name());
}
for (auto& input : proto_->inputs()) {
checker(input.name());
}
for (auto& output : proto_->outputs()) {
checker(output.name());
}
}
} // namespace framework } // namespace framework
} // namespace paddle } // namespace paddle
...@@ -167,71 +167,6 @@ class NOP : public OperatorBase { ...@@ -167,71 +167,6 @@ class NOP : public OperatorBase {
} }
}; };
// this class not only make proto but also init attribute checkers.
class OpProtoAndCheckerMaker {
public:
OpProtoAndCheckerMaker(OpProto* proto, OpAttrChecker* op_checker)
: proto_(proto), op_checker_(op_checker) {}
~OpProtoAndCheckerMaker() {
PADDLE_ENFORCE(validated_, "should call Validate after build");
}
void Validate();
protected:
struct VariableBuilder {
OpProto::Var* var_;
VariableBuilder& AsDuplicable() {
var_->set_duplicable(true);
return *this;
}
VariableBuilder& AsIntermediate() {
var_->set_intermediate(true);
return *this;
}
VariableBuilder& NotInGradient() {
var_->set_not_in_gradient(true);
return *this;
}
};
VariableBuilder AddInput(const std::string& name, const std::string& comment);
VariableBuilder AddOutput(const std::string& name,
const std::string& comment);
template <typename T>
TypedAttrChecker<T>& AddAttr(const std::string& name,
const std::string& comment,
bool generated = false) {
auto* attr = proto_->add_attrs();
attr->set_name(name);
attr->set_comment(comment);
attr->set_generated(generated);
attr->set_type(AttrTypeID<T>());
return op_checker_->AddAttrChecker<T>(name);
}
void AddComment(const std::string& comment) { proto_->set_comment(comment); }
private:
void CheckNoDuplicatedInOutAttrs();
OpProto* proto_;
OpAttrChecker* op_checker_;
bool validated_{false};
};
class NOPMaker : public OpProtoAndCheckerMaker {
public:
NOPMaker(framework::OpProto* proto, framework::OpAttrChecker* op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) {}
};
class InferShapeContext { class InferShapeContext {
public: public:
InferShapeContext(const OperatorBase& op, const Scope& scope) InferShapeContext(const OperatorBase& op, const Scope& scope)
...@@ -277,9 +212,9 @@ class InferShapeContext { ...@@ -277,9 +212,9 @@ class InferShapeContext {
return res; return res;
} }
std::vector<const Variable*> MultiOutputVar(const std::string& name) const { std::vector<Variable*> MultiOutputVar(const std::string& name) const {
auto names = op_.Outputs(name); auto names = op_.Outputs(name);
std::vector<const Variable*> res; std::vector<Variable*> res;
res.reserve(names.size()); res.reserve(names.size());
std::transform(names.begin(), names.end(), std::back_inserter(res), std::transform(names.begin(), names.end(), std::back_inserter(res),
[this](const std::string& name) { [this](const std::string& name) {
...@@ -336,6 +271,20 @@ class InferShapeContext { ...@@ -336,6 +271,20 @@ class InferShapeContext {
return &var->Get<Tensor>(); return &var->Get<Tensor>();
} }
void ShareLoD(const std::string& in, const std::string& out, size_t i = 0,
size_t j = 0) const {
PADDLE_ENFORCE_LT(i, InputSize(in));
PADDLE_ENFORCE_LT(j, OutputSize(out));
auto* in_var = MultiInputVar(in)[i];
auto* out_var = MultiOutputVar(out)[j];
if (!in_var->IsType<LoDTensor>()) return;
PADDLE_ENFORCE(out_var->IsType<LoDTensor>(),
"The %d-th output of Output(%s) must be LoDTensor.", j, out);
auto in_tensor = in_var->Get<LoDTensor>();
auto* out_tensor = out_var->GetMutable<LoDTensor>();
out_tensor->set_lod(in_tensor.lod());
}
private: private:
const OperatorBase& op_; const OperatorBase& op_;
const Scope& scope_; const Scope& scope_;
...@@ -348,6 +297,13 @@ template <> ...@@ -348,6 +297,13 @@ template <>
const std::vector<const Tensor*> InferShapeContext::MultiInput<Tensor>( const std::vector<const Tensor*> InferShapeContext::MultiInput<Tensor>(
const std::string& name) const; const std::string& name) const;
template <>
Tensor* InferShapeContext::Output<Tensor>(const std::string& name) const;
template <>
std::vector<Tensor*> InferShapeContext::MultiOutput<Tensor>(
const std::string& name) const;
template <typename T> template <typename T>
struct EigenDeviceConverter; struct EigenDeviceConverter;
...@@ -380,38 +336,10 @@ class ExecutionContext : public InferShapeContext { ...@@ -380,38 +336,10 @@ class ExecutionContext : public InferShapeContext {
return device_context_; return device_context_;
} }
// redefine Output function,
// use Variable::Get instead of Variable::GetMutable
template <typename T>
T* Output(const std::string& name) const {
auto var = OutputVar(name);
return var == nullptr ? nullptr : const_cast<T*>(&var->Get<T>());
}
// redefine MultiOutput function.
// use Variable::Get instead of Variable::GetMutable
template <typename T>
std::vector<T*> MultiOutput(const std::string& name) const {
auto names = op().Outputs(name);
std::vector<T*> res;
res.reserve(names.size());
std::transform(
names.begin(), names.end(), std::back_inserter(res),
[&](const std::string& sub_name) { return Output<T>(sub_name); });
return res;
}
private: private:
const platform::DeviceContext& device_context_; const platform::DeviceContext& device_context_;
}; };
template <>
Tensor* ExecutionContext::Output<Tensor>(const std::string& name) const;
template <>
std::vector<Tensor*> ExecutionContext::MultiOutput<Tensor>(
const std::string& name) const;
class OpKernel { class OpKernel {
public: public:
/** /**
......
...@@ -264,37 +264,3 @@ TEST(Operator, Clone) { ...@@ -264,37 +264,3 @@ TEST(Operator, Clone) {
auto b = a.Clone(); auto b = a.Clone();
ASSERT_EQ(a.Type(), b->Type()); ASSERT_EQ(a.Type(), b->Type());
} }
class TestAttrProtoMaker : public paddle::framework::OpProtoAndCheckerMaker {
public:
TestAttrProtoMaker(paddle::framework::OpProto* proto,
paddle::framework::OpAttrChecker* op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) {
AddAttr<float>("scale", "scale of test op");
AddAttr<float>("scale", "scale of test op");
}
};
TEST(ProtoMaker, DuplicatedAttr) {
paddle::framework::OpProto op_proto;
paddle::framework::OpAttrChecker op_checker;
auto proto_maker = TestAttrProtoMaker(&op_proto, &op_checker);
ASSERT_THROW(proto_maker.Validate(), paddle::platform::EnforceNotMet);
}
class TestInOutProtoMaker : public paddle::framework::OpProtoAndCheckerMaker {
public:
TestInOutProtoMaker(paddle::framework::OpProto* proto,
paddle::framework::OpAttrChecker* op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("input", "input of test op");
AddInput("input", "input of test op");
}
};
TEST(ProtoMaker, DuplicatedInOut) {
paddle::framework::OpProto op_proto;
paddle::framework::OpAttrChecker op_checker;
auto proto_maker = TestInOutProtoMaker(&op_proto, &op_checker);
ASSERT_THROW(proto_maker.Validate(), paddle::platform::EnforceNotMet);
}
\ No newline at end of file
...@@ -58,6 +58,8 @@ class Scope { ...@@ -58,6 +58,8 @@ class Scope {
/// nullptr if cannot find. /// nullptr if cannot find.
Variable* FindVar(const std::string& name) const; Variable* FindVar(const std::string& name) const;
const Scope& parent() const { return *parent_; }
/// Find the scope or an ancestor scope that contains the given variable. /// Find the scope or an ancestor scope that contains the given variable.
const Scope* FindScope(const Variable* var) const; const Scope* FindScope(const Variable* var) const;
......
...@@ -29,16 +29,19 @@ limitations under the License. */ ...@@ -29,16 +29,19 @@ limitations under the License. */
namespace paddle { namespace paddle {
namespace framework { namespace pybind {
namespace details { namespace details {
template <bool less, size_t i, typename... args> template <bool less, size_t i, typename... args>
struct CastToPyBufferImpl; struct CastToPyBufferImpl;
} }
} // namespace pybind
namespace framework {
class Tensor { class Tensor {
public: public:
template <bool less, size_t i, typename... args> template <bool less, size_t i, typename... args>
friend struct details::CastToPyBufferImpl; friend struct pybind::details::CastToPyBufferImpl;
template <typename T, size_t D, int MajorType, typename IndexType> template <typename T, size_t D, int MajorType, typename IndexType>
friend struct EigenTensor; friend struct EigenTensor;
...@@ -165,12 +168,6 @@ class Tensor { ...@@ -165,12 +168,6 @@ class Tensor {
/*! points to dimensions of memory block. */ /*! points to dimensions of memory block. */
DDim dims_; DDim dims_;
/**
* A cache of the number of elements in a tensor.
* Would be 0 for an uninitialized tensor.
*/
int64_t numel_;
/** /**
* @brief A PlaceHolder may be shared by more than one tensor. * @brief A PlaceHolder may be shared by more than one tensor.
* *
......
...@@ -130,26 +130,29 @@ inline Tensor Tensor::Slice(const int& begin_idx, const int& end_idx) const { ...@@ -130,26 +130,29 @@ inline Tensor Tensor::Slice(const int& begin_idx, const int& end_idx) const {
PADDLE_ENFORCE_LE(end_idx, dims_[0], "Slice end index is out of bound."); PADDLE_ENFORCE_LE(end_idx, dims_[0], "Slice end index is out of bound.");
PADDLE_ENFORCE_LT(begin_idx, end_idx, PADDLE_ENFORCE_LT(begin_idx, end_idx,
"Begin index must be less than end index."); "Begin index must be less than end index.");
PADDLE_ENFORCE_NE(dims_[0], 1, "Can not slice a tensor with dims_[0] = 1.");
size_t base = numel() / dims_[0]; if (dims_[0] == 1) {
Tensor dst; return *this;
dst.holder_ = holder_; } else {
DDim dst_dims = dims_; size_t base = numel() / dims_[0];
dst_dims[0] = end_idx - begin_idx; Tensor dst;
dst.Resize(dst_dims); dst.holder_ = holder_;
dst.offset_ = offset_ + begin_idx * base * sizeof(T); DDim dst_dims = dims_;
return dst; dst_dims[0] = end_idx - begin_idx;
dst.Resize(dst_dims);
dst.offset_ = offset_ + begin_idx * base * sizeof(T);
return dst;
}
} }
inline Tensor& Tensor::Resize(const DDim& dims) { inline Tensor& Tensor::Resize(const DDim& dims) {
dims_ = dims; dims_ = dims;
numel_ = product(dims_);
return *this; return *this;
} }
inline const DDim& Tensor::dims() const { return dims_; } inline const DDim& Tensor::dims() const { return dims_; }
inline int64_t Tensor::numel() const { return numel_; } inline int64_t Tensor::numel() const { return product(dims_); }
template <typename T> template <typename T>
inline Tensor ReshapeToMatrix(const Tensor& src, int num_col_dims) { inline Tensor ReshapeToMatrix(const Tensor& src, int num_col_dims) {
......
...@@ -131,8 +131,9 @@ public: ...@@ -131,8 +131,9 @@ public:
fwdPD_.reset(new eltwise_fwd::primitive_desc(fwdDesc, eng)); fwdPD_.reset(new eltwise_fwd::primitive_desc(fwdDesc, eng));
// use inplace for forward but save input value before submit // use inplace for forward but save input value before submit
inVal_ = val_; inVal_ = val_;
if (act.grad) { copyInVal_ = nullptr;
// only copy when need do backward if (act.grad && algo == mkldnn::algorithm::eltwise_tanh) {
// tanh need save src input for backward
inVal_ = MKLDNNMatrix::create(nullptr, val_->getPrimitiveDesc()); inVal_ = MKLDNNMatrix::create(nullptr, val_->getPrimitiveDesc());
copyInVal_ = std::make_shared<mkldnn::reorder>(*val_, *inVal_); copyInVal_ = std::make_shared<mkldnn::reorder>(*val_, *inVal_);
CHECK(copyInVal_) << "should not be emptry"; CHECK(copyInVal_) << "should not be emptry";
......
...@@ -449,13 +449,14 @@ void MKLDNNConvLayer::resetOutGrad( ...@@ -449,13 +449,14 @@ void MKLDNNConvLayer::resetOutGrad(
cvtOutGrad_ = nullptr; cvtOutGrad_ = nullptr;
if (!outputIsOnlyMKLDNN()) { if (!outputIsOnlyMKLDNN()) {
const MatrixPtr& cpuOut = getOutput(CPU_DEVICE).grad; const MatrixPtr& cpuOut = getOutput(CPU_DEVICE).grad;
outMat->setData(cpuOut->getData());
// same PrimitiveDesc with cpuInVal_ // same PrimitiveDesc with cpuInVal_
CHECK(cpuOutVal_); CHECK(cpuOutVal_);
cpuOutGrad_ = MKLDNNMatrix::create(cpuOut, cpuOutVal_->getPrimitiveDesc()); cpuOutGrad_ = MKLDNNMatrix::create(cpuOut, cpuOutVal_->getPrimitiveDesc());
if (cpuOutGrad_->getPrimitiveDesc() == out->getPrimitiveDesc()) { if (cpuOutGrad_->getPrimitiveDesc() == out->getPrimitiveDesc()) {
outMat->setData(cpuOut->getData());
out = cpuOutGrad_; out = cpuOutGrad_;
} else { } else {
out = MKLDNNMatrix::create(nullptr, wgtPD->diff_dst_primitive_desc());
cvtOutGrad_ = MKLDNNMatrix::createReorder(cpuOutGrad_, out); cvtOutGrad_ = MKLDNNMatrix::createReorder(cpuOutGrad_, out);
CHECK(cvtOutGrad_); CHECK(cvtOutGrad_);
} }
......
...@@ -232,6 +232,7 @@ void MKLDNNFcLayer::resetBwdBuffers(MKLDNNMatrixPtr& in, ...@@ -232,6 +232,7 @@ void MKLDNNFcLayer::resetBwdBuffers(MKLDNNMatrixPtr& in,
void MKLDNNFcLayer::resetOutGrad(MKLDNNMatrixPtr& out) { void MKLDNNFcLayer::resetOutGrad(MKLDNNMatrixPtr& out) {
// TODO(TJ): merge outgrad // TODO(TJ): merge outgrad
int device = outputIsOnlyMKLDNN() ? MKLDNN_DEVICE : CPU_DEVICE; int device = outputIsOnlyMKLDNN() ? MKLDNN_DEVICE : CPU_DEVICE;
output_.grad->setData(getOutput(device).grad->getData());
// for MKLDNN device: // for MKLDNN device:
// can not directly cast outputgrad to mkldnnmatrix, // can not directly cast outputgrad to mkldnnmatrix,
// since each layer can not write the inputgrad to mkldnn inputgrad. // since each layer can not write the inputgrad to mkldnn inputgrad.
......
...@@ -141,18 +141,16 @@ public: ...@@ -141,18 +141,16 @@ public:
} }
void backward(const UpdateCallback& callback) override { void backward(const UpdateCallback& callback) override {
/* Do derivation */ { if (needResetBwd_) {
resetBwd(pipelineBwd_, inGrad_, wgtGrad_, biasGrad_, outGrad_);
needResetBwd_ = false;
}
{
REGISTER_TIMER_INFO("BpActTimer", getName().c_str()); REGISTER_TIMER_INFO("BpActTimer", getName().c_str());
backwardActivation(); backwardActivation();
} }
{ {
REGISTER_TIMER_INFO("mkldnn_bwdTimer", getName().c_str()); REGISTER_TIMER_INFO("mkldnn_bwdTimer", getName().c_str());
if (needResetBwd_) {
resetBwd(pipelineBwd_, inGrad_, wgtGrad_, biasGrad_, outGrad_);
needResetBwd_ = false;
}
stream_->submit(pipelineBwd_); stream_->submit(pipelineBwd_);
} }
......
...@@ -26,17 +26,26 @@ DECLARE_bool(thread_local_rand_use_global_seed); ...@@ -26,17 +26,26 @@ DECLARE_bool(thread_local_rand_use_global_seed);
DECLARE_bool(use_gpu); DECLARE_bool(use_gpu);
DECLARE_bool(use_mkldnn); DECLARE_bool(use_mkldnn);
struct testFCDesc { #define RUN_MKLDNN_TEST(DNN_CONFIG, REF_CONFIG, DESC) \
MKLDNNTester tester; \
for (auto bs : {DESC.bs, 1}) { \
tester.run(DNN_CONFIG, REF_CONFIG, bs, DESC.ih, DESC.iw); \
}
#define RUN_MKLDNN_TEST_LAYER(DNN_CONFIG, REF_TYPE, DESC) \
TestConfig ref = DNN_CONFIG; \
ref.layerConfig.set_type(REF_TYPE); \
RUN_MKLDNN_TEST(DNN_CONFIG, ref, DESC)
struct testFcDesc {
int bs; int bs;
int ic; int ic;
int oc; int oc;
int ih, iw; // oh == ow == 1 int ih, iw; // oh == ow == 1
}; };
void testFcLayer(const testFCDesc& pm) { static void getMKLDNNFcConfig(TestConfig& cfg, const testFcDesc& pm) {
const std::string compareTypes[] = {"mkldnn_fc", "fc"}; cfg.layerConfig.set_type("mkldnn_fc");
TestConfig cfg;
cfg.layerConfig.set_type(compareTypes[0]);
cfg.layerConfig.set_size(pm.oc); cfg.layerConfig.set_size(pm.oc);
cfg.inputDefs.push_back( cfg.inputDefs.push_back(
{INPUT_DATA, {INPUT_DATA,
...@@ -44,25 +53,25 @@ void testFcLayer(const testFCDesc& pm) { ...@@ -44,25 +53,25 @@ void testFcLayer(const testFCDesc& pm) {
/* size of input layer= */ size_t(pm.ic * pm.ih * pm.iw), /* size of input layer= */ size_t(pm.ic * pm.ih * pm.iw),
/* size of weight= */ size_t(pm.oc * pm.ic * pm.ih * pm.iw)}); /* size of weight= */ size_t(pm.oc * pm.ic * pm.ih * pm.iw)});
cfg.layerConfig.add_inputs(); cfg.layerConfig.add_inputs();
}
MKLDNNTester tester; void testFcLayer(const testFcDesc& pm) {
TestConfig dnnConfig;
getMKLDNNFcConfig(dnnConfig, pm);
for (auto biasSize : {pm.oc, 0}) { for (auto biasSize : {pm.oc, 0}) {
cfg.biasSize = biasSize; dnnConfig.biasSize = biasSize;
TestConfig ref = cfg; RUN_MKLDNN_TEST_LAYER(dnnConfig, "fc", pm)
ref.layerConfig.set_type(compareTypes[1]);
for (auto bs : {pm.bs, 1}) {
tester.run(cfg, ref, bs, pm.ih, pm.iw);
}
} }
} }
TEST(MKLDNNLayer, FcLayer) { TEST(MKLDNNLayer, FcLayer) {
testFcLayer({/*bs*/ 2, /*ic*/ 2, /*oc*/ 3, /*ih*/ 1, /*iw*/ 1}); /* bs, ic, ih, iw, oc */
testFcLayer({/*bs*/ 3, /*ic*/ 7, /*oc*/ 19, /*ih*/ 1, /*iw*/ 1}); testFcLayer({2, 2, 1, 1, 3});
testFcLayer({/*bs*/ 8, /*ic*/ 16, /*oc*/ 32, /*ih*/ 13, /*iw*/ 13}); testFcLayer({3, 7, 1, 1, 19});
testFcLayer({/*bs*/ 4, /*ic*/ 12, /*oc*/ 18, /*ih*/ 13, /*iw*/ 11}); testFcLayer({8, 16, 13, 13, 32});
testFcLayer({/*bs*/ 2, /*ic*/ 64, /*oc*/ 32, /*ih*/ 16, /*iw*/ 16}); testFcLayer({4, 12, 13, 13, 18});
testFcLayer({/*bs*/ 15, /*ic*/ 3, /*oc*/ 6, /*ih*/ 16, /*iw*/ 16}); testFcLayer({2, 64, 16, 16, 32});
testFcLayer({15, 3, 16, 16, 6});
} }
struct testConvDesc { struct testConvDesc {
...@@ -75,13 +84,10 @@ struct testConvDesc { ...@@ -75,13 +84,10 @@ struct testConvDesc {
int dh, dw; int dh, dw;
}; };
void testConvLayer(const testConvDesc& pm) { static void getMKLDNNConvConfig(TestConfig& cfg, const testConvDesc& pm) {
const std::string compareTypes[] = {"mkldnn_conv", "exconv"}; cfg.layerConfig.set_type("mkldnn_conv");
TestConfig cfg;
cfg.layerConfig.set_type(compareTypes[0]);
cfg.layerConfig.set_num_filters(pm.oc); cfg.layerConfig.set_num_filters(pm.oc);
cfg.layerConfig.set_size(pm.oc * pm.oh * pm.ow); cfg.layerConfig.set_size(pm.oc * pm.oh * pm.ow);
// cfg.layerConfig.set_partial_sum(1); // TODO: check it
cfg.layerConfig.set_shared_biases(true); cfg.layerConfig.set_shared_biases(true);
cfg.inputDefs.push_back( cfg.inputDefs.push_back(
{INPUT_DATA, {INPUT_DATA,
...@@ -115,15 +121,14 @@ void testConvLayer(const testConvDesc& pm) { ...@@ -115,15 +121,14 @@ void testConvLayer(const testConvDesc& pm) {
int oh = outputSize(pm.ih, fh, pm.ph, pm.sh, true); int oh = outputSize(pm.ih, fh, pm.ph, pm.sh, true);
CHECK_EQ(ow, pm.ow) << "output size check failed"; CHECK_EQ(ow, pm.ow) << "output size check failed";
CHECK_EQ(oh, pm.oh) << "output size check failed"; CHECK_EQ(oh, pm.oh) << "output size check failed";
}
MKLDNNTester tester; void testConvLayer(const testConvDesc& pm) {
TestConfig dnnConfig;
getMKLDNNConvConfig(dnnConfig, pm);
for (auto biasSize : {pm.oc, 0}) { for (auto biasSize : {pm.oc, 0}) {
cfg.biasSize = biasSize; dnnConfig.biasSize = biasSize;
TestConfig ref = cfg; RUN_MKLDNN_TEST_LAYER(dnnConfig, "exconv", pm)
ref.layerConfig.set_type(compareTypes[1]);
for (auto bs : {pm.bs, 1}) {
tester.run(cfg, ref, bs, pm.ih, pm.iw);
}
} }
} }
...@@ -143,7 +148,7 @@ TEST(MKLDNNLayer, ConvLayer) { ...@@ -143,7 +148,7 @@ TEST(MKLDNNLayer, ConvLayer) {
} }
struct testPoolDesc { struct testPoolDesc {
int bs, ch; // input channel and output channel are the same int bs, ic; // input channel and output channel are the same
int ih, iw; int ih, iw;
int oh, ow; int oh, ow;
int fh, fw; int fh, fw;
...@@ -151,19 +156,18 @@ struct testPoolDesc { ...@@ -151,19 +156,18 @@ struct testPoolDesc {
int sh, sw; int sh, sw;
}; };
void testPoolLayer(const testPoolDesc& pm) { static void getMKLDNNPoolConfig(TestConfig& cfg, const testPoolDesc& pm) {
const std::string compareTypes[] = {"mkldnn_pool", "pool"}; cfg.layerConfig.set_type("mkldnn_pool");
TestConfig cfg; cfg.layerConfig.set_size(pm.ic * pm.oh * pm.ow);
cfg.layerConfig.set_type(compareTypes[0]);
cfg.layerConfig.set_size(pm.ch * pm.oh * pm.ow);
cfg.inputDefs.push_back( cfg.inputDefs.push_back(
{INPUT_DATA, {INPUT_DATA,
"layer_0", "layer_0",
/* size of input layer= */ size_t(pm.ch * pm.ih * pm.iw), /* size of input layer= */ size_t(pm.ic * pm.ih * pm.iw),
0}); 0});
LayerInputConfig* input = cfg.layerConfig.add_inputs(); LayerInputConfig* input = cfg.layerConfig.add_inputs();
PoolConfig* pool = input->mutable_pool_conf(); PoolConfig* pool = input->mutable_pool_conf();
pool->set_channels(pm.ch); pool->set_pool_type("avg-projection");
pool->set_channels(pm.ic);
pool->set_img_size(pm.iw); pool->set_img_size(pm.iw);
pool->set_img_size_y(pm.ih); pool->set_img_size_y(pm.ih);
pool->set_output_x(pm.ow); pool->set_output_x(pm.ow);
...@@ -179,20 +183,21 @@ void testPoolLayer(const testPoolDesc& pm) { ...@@ -179,20 +183,21 @@ void testPoolLayer(const testPoolDesc& pm) {
int ow = outputSize(pm.iw, pm.fw, pm.pw, pm.sw, false); int ow = outputSize(pm.iw, pm.fw, pm.pw, pm.sw, false);
CHECK_EQ(ow, pm.ow) << "output size check failed"; CHECK_EQ(ow, pm.ow) << "output size check failed";
CHECK_EQ(oh, pm.oh) << "output size check failed"; CHECK_EQ(oh, pm.oh) << "output size check failed";
}
MKLDNNTester tester; void testPoolLayer(const testPoolDesc& pm) {
TestConfig dnnConfig;
getMKLDNNPoolConfig(dnnConfig, pm);
LayerInputConfig* input = dnnConfig.layerConfig.mutable_inputs(0);
PoolConfig* pool = input->mutable_pool_conf();
for (auto type : {"max-projection", "avg-projection"}) { for (auto type : {"max-projection", "avg-projection"}) {
pool->set_pool_type(type); pool->set_pool_type(type);
TestConfig ref = cfg; RUN_MKLDNN_TEST_LAYER(dnnConfig, "pool", pm)
ref.layerConfig.set_type(compareTypes[1]);
for (auto bs : {pm.bs, 1}) {
tester.run(cfg, ref, bs, pm.ih, pm.iw);
}
} }
} }
TEST(MKLDNNLayer, PoolLayer) { TEST(MKLDNNLayer, PoolLayer) {
/* bs, ch, ih, iw, oh, ow, fh, fw, ph, pw, sh, sw*/ /* bs, ch, ih, iw, oh, ow, fh, fw, ph, pw, sh, sw */
testPoolLayer({2, 1, 4, 4, 2, 2, 3, 3, 0, 0, 2, 2}); testPoolLayer({2, 1, 4, 4, 2, 2, 3, 3, 0, 0, 2, 2});
testPoolLayer({10, 8, 16, 16, 8, 8, 2, 2, 0, 0, 2, 2}); testPoolLayer({10, 8, 16, 16, 8, 8, 2, 2, 0, 0, 2, 2});
testPoolLayer({4, 2, 5, 5, 3, 3, 3, 3, 1, 1, 2, 2}); testPoolLayer({4, 2, 5, 5, 3, 3, 3, 3, 1, 1, 2, 2});
...@@ -204,44 +209,36 @@ TEST(MKLDNNLayer, PoolLayer) { ...@@ -204,44 +209,36 @@ TEST(MKLDNNLayer, PoolLayer) {
} }
struct testActDesc { struct testActDesc {
int bs, ch; int bs, ic, ih, iw;
int ih, iw;
}; };
static void getAddtoConfig(TestConfig& cfg, const testActDesc& pm) { static void getAddtoConfig(TestConfig& cfg, const testActDesc& pm) {
cfg.biasSize = 0; cfg.biasSize = 0;
cfg.layerConfig.set_type("addto"); cfg.layerConfig.set_type("addto");
cfg.layerConfig.set_size(pm.ch * pm.ih * pm.iw); size_t layerSize = pm.ih * pm.ih * pm.iw;
cfg.inputDefs.push_back( cfg.layerConfig.set_size(layerSize);
{INPUT_DATA, cfg.inputDefs.push_back({INPUT_DATA, "layer_0", layerSize, 0});
"layer_0",
/* size of input layer= */ size_t(pm.ch * pm.ih * pm.iw),
0});
cfg.layerConfig.add_inputs(); cfg.layerConfig.add_inputs();
} }
void testActivation(std::string& type, const testActDesc& pm) { void testActivation(std::string& actType, const testActDesc& pm) {
const std::string compareTypes[] = {type, type.erase(0, 7)}; // TODO(TJ): mkldnn_softmax not implemented, paddle do not have elu activation
if (actType == "mkldnn_softmax" || actType == "mkldnn_elu") {
return;
}
const std::string compareTypes[] = {actType, actType.erase(0, 7)};
TestConfig cfg; TestConfig cfg;
getAddtoConfig(cfg, pm); getAddtoConfig(cfg, pm);
TestConfig ref = cfg; TestConfig ref = cfg;
cfg.layerConfig.set_active_type(compareTypes[0]); cfg.layerConfig.set_active_type(compareTypes[0]);
ref.layerConfig.set_active_type(compareTypes[1]); ref.layerConfig.set_active_type(compareTypes[1]);
MKLDNNTester tester; RUN_MKLDNN_TEST(cfg, ref, pm)
for (auto bs : {pm.bs, 1}) {
tester.run(cfg, ref, bs, pm.ih, pm.iw);
}
} }
TEST(MKLDNNActivation, Activations) { TEST(MKLDNNActivation, Activations) {
auto types = MKLDNNActivation::getAllRegisteredTypes(); auto types = MKLDNNActivation::getAllRegisteredTypes();
// TODO(TJ): mkldnn_softmax not implemented, paddle do not have elu activation
std::set<string> excluded{"mkldnn_softmax", "mkldnn_elu"};
for (auto type : types) { for (auto type : types) {
if (excluded.count(type)) { /* bs, c, h, w*/
continue;
}
testActivation(type, {16, 64, 32, 32}); testActivation(type, {16, 64, 32, 32});
} }
} }
......
...@@ -55,6 +55,13 @@ function(op_library TARGET) ...@@ -55,6 +55,13 @@ function(op_library TARGET)
set(pybind_flag 1) set(pybind_flag 1)
endif() endif()
# activation_op contains several operators
if ("${TARGET}" STREQUAL "activation_op")
set(pybind_flag 1)
# It's enough to just adding one operator to pybind
file(APPEND ${pybind_file} "USE_OP(sigmoid);\n")
endif()
# pybind USE_NO_KERNEL_OP # pybind USE_NO_KERNEL_OP
file(READ ${TARGET}.cc TARGET_CONTENT) file(READ ${TARGET}.cc TARGET_CONTENT)
string(REGEX MATCH "OperatorWithKernel" regex_result "${TARGET_CONTENT}") string(REGEX MATCH "OperatorWithKernel" regex_result "${TARGET_CONTENT}")
...@@ -96,3 +103,4 @@ set(GLOB_OP_LIB ${OP_LIBRARY} CACHE INTERNAL "Global OP library") ...@@ -96,3 +103,4 @@ set(GLOB_OP_LIB ${OP_LIBRARY} CACHE INTERNAL "Global OP library")
cc_test(gather_test SRCS gather_test.cc DEPS tensor) cc_test(gather_test SRCS gather_test.cc DEPS tensor)
cc_test(net_op_test SRCS net_op_test.cc DEPS net_op) cc_test(net_op_test SRCS net_op_test.cc DEPS net_op)
cc_test(scatter_test SRCS scatter_test.cc DEPS tensor) cc_test(scatter_test SRCS scatter_test.cc DEPS tensor)
cc_test(strided_memcpy_test SRCS strided_memcpy_test.cc DEPS tensor paddle_memory)
...@@ -39,7 +39,8 @@ class AccuracyOp : public framework::OperatorWithKernel { ...@@ -39,7 +39,8 @@ class AccuracyOp : public framework::OperatorWithKernel {
PADDLE_ENFORCE_EQ(inference->dims()[0], label->dims()[0], PADDLE_ENFORCE_EQ(inference->dims()[0], label->dims()[0],
"inference size must be the same as label size"); "inference size must be the same as label size");
ctx.Output<framework::LoDTensor>("Accuracy")->Resize({1}); ctx.Output<framework::Tensor>("Accuracy")->Resize({1});
ctx.ShareLoD("Inference", /*->*/ "Accuracy");
} }
}; };
...@@ -54,11 +55,15 @@ class AccuracyOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -54,11 +55,15 @@ class AccuracyOpMaker : public framework::OpProtoAndCheckerMaker {
// TODO(typhoonzero): AddInput("Weight", ... // TODO(typhoonzero): AddInput("Weight", ...
AddOutput("Accuracy", "The accuracy of current batch"); AddOutput("Accuracy", "The accuracy of current batch");
AddComment( AddComment(R"DOC(
R"DOC(Accuracy. It will print accuracy rate for classification. Accuracy. It will print accuracy rate for classification.
The accuracy is: The accuracy is:
.. math:: .. math::
accuracy = \\frac{NumOfCorrectPredicts}{NumOfAllSamples})DOC"); accuracy = \\frac{NumOfCorrectPredicts}{NumOfAllSamples})
Both the input `Inference` and `Label` can carry the LoD (Level of Details)
information, or not. But the output only shares the LoD with input `Inference`.
)DOC");
} }
}; };
......
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/operators/activation_op.h"
namespace paddle {
namespace operators {
class ActivationOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected:
void InferShape(const framework::InferShapeContext &ctx) const override {
ctx.Output<framework::Tensor>("Y")->Resize(
ctx.Input<framework::Tensor>("X")->dims());
ctx.ShareLoD("X", /*->*/ "Y");
}
};
class ActivationOpGrad : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected:
void InferShape(const framework::InferShapeContext &ctx) const override {
ctx.Output<framework::Tensor>(framework::GradVarName("X"))
->Resize(ctx.Input<framework::Tensor>("Y")->dims());
}
};
class SigmoidOpMaker : public framework::OpProtoAndCheckerMaker {
public:
SigmoidOpMaker(framework::OpProto *proto,
framework::OpAttrChecker *op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "Input of Sigmoid operator");
AddOutput("Y", "Output of Sigmoid operator");
AddComment("Sigmoid activation operator, sigmoid = 1 / (1 + exp(-x))");
}
};
class ExpOpMaker : public framework::OpProtoAndCheckerMaker {
public:
ExpOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "Input of Exp operator");
AddOutput("Y", "Output of Exp operator");
AddComment("Exp activation operator, exp(x) = e^x");
}
};
class ReluOpMaker : public framework::OpProtoAndCheckerMaker {
public:
ReluOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "Input of Relu operator");
AddOutput("Y", "Output of Relu operator");
AddComment("Relu activation operator, relu(x) = max(x, 0)");
}
};
class TanhOpMaker : public framework::OpProtoAndCheckerMaker {
public:
TanhOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "Input of Tanh operator");
AddOutput("Y", "Output of Tanh operator");
AddComment(
"Tanh activation operator, tanh = (exp(x) - exp(-x)) / (exp(x) + "
"exp(-x))");
}
};
class SqrtOpMaker : public framework::OpProtoAndCheckerMaker {
public:
SqrtOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "Input of Sqrt operator");
AddOutput("Y", "Output of Sqrt operator");
AddComment("Sqrt activation operator, sqrt(x) = x^(1/2)");
}
};
class AbsOpMaker : public framework::OpProtoAndCheckerMaker {
public:
AbsOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "Input of Abs operator");
AddOutput("Y", "Output of Abs operator");
AddComment("Abs activation operator, abs(x) = |x|");
}
};
class ReciprocalOpMaker : public framework::OpProtoAndCheckerMaker {
public:
ReciprocalOpMaker(framework::OpProto *proto,
framework::OpAttrChecker *op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "Input of Reciprocal operator");
AddOutput("Y", "Output of Reciprocal operator");
AddComment("Reciprocal activation operator, reciprocal(x) = 1 / x");
}
};
class LogOpMaker : public framework::OpProtoAndCheckerMaker {
public:
LogOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "Input of Log operator");
AddOutput("Y", "Output of Log operator");
AddComment("Log activation operator, log(x) = natural logarithm of x");
}
};
class SquareOpMaker : public framework::OpProtoAndCheckerMaker {
public:
SquareOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "Input of Square operator");
AddOutput("Y", "Output of Square operator");
AddComment("Square activation operator, square(x) = x^2");
}
};
template <typename AttrType>
class BReluOpMaker : public framework::OpProtoAndCheckerMaker {
public:
BReluOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "Input of BRelu operator");
AddOutput("Y", "Output of BRelu operator");
AddComment("BRelu activation operator, brelu = max(min(x, t_min), t_max)");
AddAttr<AttrType>("t_min", "The min marginal value of BRelu")
.SetDefault(static_cast<AttrType>(0));
AddAttr<AttrType>("t_max", "The max marginal value of BRelu")
.SetDefault(static_cast<AttrType>(24));
}
};
template <typename AttrType>
class SoftReluOpMaker : public framework::OpProtoAndCheckerMaker {
public:
SoftReluOpMaker(framework::OpProto *proto,
framework::OpAttrChecker *op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "Input of SoftRelu operator");
AddOutput("Y", "Output of SoftRelu operator");
AddComment(
"SoftRelu activation operator, soft_relu = log(1 + exp(max(min(x, "
"threshold), threshold)))");
AddAttr<AttrType>("threshold", "The threshold value of SoftRelu")
.SetDefault(static_cast<AttrType>(40));
}
};
template <typename AttrType>
class PowOpMaker : public framework::OpProtoAndCheckerMaker {
public:
PowOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "Input of Pow operator");
AddOutput("Y", "Output of Pow operator");
AddComment("Pow activation operator, pow(x, factor) = x^factor");
AddAttr<AttrType>("factor", "The exponential factor of Pow")
.SetDefault(static_cast<AttrType>(1));
}
};
template <typename AttrType>
class STanhOpMaker : public framework::OpProtoAndCheckerMaker {
public:
STanhOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "Input of STanh operator");
AddOutput("Y", "Output of STanh operator");
AddComment("STanh activation operator, stanh = b * tanh(a * x)");
AddAttr<AttrType>("scale_a", "The scale parameter of a for the input")
.SetDefault(static_cast<AttrType>(2 / 3));
AddAttr<AttrType>("scale_b", "The scale parameter of b for the input")
.SetDefault(static_cast<AttrType>(1.7159));
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP(sigmoid, ops::ActivationOp, ops::SigmoidOpMaker, sigmoid_grad,
ops::ActivationOpGrad);
REGISTER_OP_CPU_KERNEL(sigmoid,
ops::ActivationKernel<paddle::platform::CPUPlace, float,
ops::SigmoidFunctor<float>>);
REGISTER_OP_CPU_KERNEL(
sigmoid_grad, ops::ActivationGradKernel<paddle::platform::CPUPlace, float,
ops::SigmoidGradFunctor<float>>);
REGISTER_OP(exp, ops::ActivationOp, ops::ExpOpMaker, exp_grad,
ops::ActivationOpGrad);
REGISTER_OP_CPU_KERNEL(
exp,
ops::ActivationKernel<paddle::platform::CPUPlace, float, ops::ExpFunctor>);
REGISTER_OP_CPU_KERNEL(exp_grad,
ops::ActivationGradKernel<paddle::platform::CPUPlace,
float, ops::ExpGradFunctor>);
REGISTER_OP(relu, ops::ActivationOp, ops::ReluOpMaker, relu_grad,
ops::ActivationOpGrad);
REGISTER_OP_CPU_KERNEL(relu,
ops::ActivationKernel<paddle::platform::CPUPlace, float,
ops::ReluFunctor<float>>);
REGISTER_OP_CPU_KERNEL(
relu_grad, ops::ActivationGradKernel<paddle::platform::CPUPlace, float,
ops::ReluGradFunctor<float>>);
REGISTER_OP(tanh, ops::ActivationOp, ops::TanhOpMaker, tanh_grad,
ops::ActivationOpGrad);
REGISTER_OP_CPU_KERNEL(
tanh,
ops::ActivationKernel<paddle::platform::CPUPlace, float, ops::TanhFunctor>);
REGISTER_OP_CPU_KERNEL(
tanh_grad, ops::ActivationGradKernel<paddle::platform::CPUPlace, float,
ops::TanhGradFunctor<float>>);
REGISTER_OP(sqrt, ops::ActivationOp, ops::SqrtOpMaker, sqrt_grad,
ops::ActivationOpGrad);
REGISTER_OP_CPU_KERNEL(
sqrt,
ops::ActivationKernel<paddle::platform::CPUPlace, float, ops::SqrtFunctor>);
REGISTER_OP_CPU_KERNEL(
sqrt_grad, ops::ActivationGradKernel<paddle::platform::CPUPlace, float,
ops::SqrtGradFunctor<float>>);
REGISTER_OP(abs, ops::ActivationOp, ops::AbsOpMaker, abs_grad,
ops::ActivationOpGrad);
REGISTER_OP_CPU_KERNEL(
abs,
ops::ActivationKernel<paddle::platform::CPUPlace, float, ops::AbsFunctor>);
REGISTER_OP_CPU_KERNEL(abs_grad,
ops::ActivationGradKernel<paddle::platform::CPUPlace,
float, ops::AbsGradFunctor>);
REGISTER_OP(reciprocal, ops::ActivationOp, ops::ReciprocalOpMaker,
reciprocal_grad, ops::ActivationOpGrad);
REGISTER_OP_CPU_KERNEL(reciprocal,
ops::ActivationKernel<paddle::platform::CPUPlace, float,
ops::ReciprocalFunctor<float>>);
REGISTER_OP_CPU_KERNEL(
reciprocal_grad,
ops::ActivationGradKernel<paddle::platform::CPUPlace, float,
ops::ReciprocalGradFunctor<float>>);
REGISTER_OP(log, ops::ActivationOp, ops::LogOpMaker, log_grad,
ops::ActivationOpGrad);
REGISTER_OP_CPU_KERNEL(
log,
ops::ActivationKernel<paddle::platform::CPUPlace, float, ops::LogFunctor>);
REGISTER_OP_CPU_KERNEL(
log_grad, ops::ActivationGradKernel<paddle::platform::CPUPlace, float,
ops::LogGradFunctor<float>>);
REGISTER_OP(square, ops::ActivationOp, ops::SquareOpMaker, square_grad,
ops::ActivationOpGrad);
REGISTER_OP_CPU_KERNEL(square,
ops::ActivationKernel<paddle::platform::CPUPlace, float,
ops::SquareFunctor>);
REGISTER_OP_CPU_KERNEL(
square_grad, ops::ActivationGradKernel<paddle::platform::CPUPlace, float,
ops::SquareGradFunctor<float>>);
REGISTER_OP(brelu, ops::ActivationOp, ops::BReluOpMaker<float>, brelu_grad,
ops::ActivationOpGrad);
REGISTER_OP_CPU_KERNEL(brelu,
ops::BReluKernel<paddle::platform::CPUPlace, float>);
REGISTER_OP_CPU_KERNEL(brelu_grad,
ops::BReluGradKernel<paddle::platform::CPUPlace, float>);
REGISTER_OP(soft_relu, ops::ActivationOp, ops::SoftReluOpMaker<float>,
soft_relu_grad, ops::ActivationOpGrad);
REGISTER_OP_CPU_KERNEL(soft_relu,
ops::SoftReluKernel<paddle::platform::CPUPlace, float>);
REGISTER_OP_CPU_KERNEL(
soft_relu_grad, ops::SoftReluGradKernel<paddle::platform::CPUPlace, float>);
REGISTER_OP(pow, ops::ActivationOp, ops::PowOpMaker<float>, pow_grad,
ops::ActivationOpGrad);
REGISTER_OP_CPU_KERNEL(pow, ops::PowKernel<paddle::platform::CPUPlace, float>);
REGISTER_OP_CPU_KERNEL(pow_grad,
ops::PowGradKernel<paddle::platform::CPUPlace, float>);
REGISTER_OP(stanh, ops::ActivationOp, ops::STanhOpMaker<float>, stanh_grad,
ops::ActivationOpGrad);
REGISTER_OP_CPU_KERNEL(stanh,
ops::STanhKernel<paddle::platform::CPUPlace, float>);
REGISTER_OP_CPU_KERNEL(stanh_grad,
ops::STanhGradKernel<paddle::platform::CPUPlace, float>);
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#define EIGEN_USE_GPU
#include "paddle/operators/activation_op.h"
namespace ops = paddle::operators;
REGISTER_OP_GPU_KERNEL(sigmoid,
ops::ActivationKernel<paddle::platform::GPUPlace, float,
ops::SigmoidFunctor<float>>);
REGISTER_OP_GPU_KERNEL(
sigmoid_grad, ops::ActivationGradKernel<paddle::platform::GPUPlace, float,
ops::SigmoidGradFunctor<float>>);
REGISTER_OP_GPU_KERNEL(
exp,
ops::ActivationKernel<paddle::platform::GPUPlace, float, ops::ExpFunctor>);
REGISTER_OP_GPU_KERNEL(exp_grad,
ops::ActivationGradKernel<paddle::platform::GPUPlace,
float, ops::ExpGradFunctor>);
REGISTER_OP_GPU_KERNEL(relu,
ops::ActivationKernel<paddle::platform::GPUPlace, float,
ops::ReluFunctor<float>>);
REGISTER_OP_GPU_KERNEL(
relu_grad, ops::ActivationGradKernel<paddle::platform::GPUPlace, float,
ops::ReluGradFunctor<float>>);
REGISTER_OP_GPU_KERNEL(
tanh,
ops::ActivationKernel<paddle::platform::GPUPlace, float, ops::TanhFunctor>);
REGISTER_OP_GPU_KERNEL(
tanh_grad, ops::ActivationGradKernel<paddle::platform::GPUPlace, float,
ops::TanhGradFunctor<float>>);
REGISTER_OP_GPU_KERNEL(
sqrt,
ops::ActivationKernel<paddle::platform::GPUPlace, float, ops::SqrtFunctor>);
REGISTER_OP_GPU_KERNEL(
sqrt_grad, ops::ActivationGradKernel<paddle::platform::GPUPlace, float,
ops::SqrtGradFunctor<float>>);
REGISTER_OP_GPU_KERNEL(
abs,
ops::ActivationKernel<paddle::platform::GPUPlace, float, ops::AbsFunctor>);
REGISTER_OP_GPU_KERNEL(abs_grad,
ops::ActivationGradKernel<paddle::platform::GPUPlace,
float, ops::AbsGradFunctor>);
REGISTER_OP_GPU_KERNEL(reciprocal,
ops::ActivationKernel<paddle::platform::GPUPlace, float,
ops::ReciprocalFunctor<float>>);
REGISTER_OP_GPU_KERNEL(
reciprocal_grad,
ops::ActivationGradKernel<paddle::platform::GPUPlace, float,
ops::ReciprocalGradFunctor<float>>);
REGISTER_OP_GPU_KERNEL(
log,
ops::ActivationKernel<paddle::platform::GPUPlace, float, ops::LogFunctor>);
REGISTER_OP_GPU_KERNEL(
log_grad, ops::ActivationGradKernel<paddle::platform::GPUPlace, float,
ops::LogGradFunctor<float>>);
REGISTER_OP_GPU_KERNEL(square,
ops::ActivationKernel<paddle::platform::GPUPlace, float,
ops::SquareFunctor>);
REGISTER_OP_GPU_KERNEL(
square_grad, ops::ActivationGradKernel<paddle::platform::GPUPlace, float,
ops::SquareGradFunctor<float>>);
REGISTER_OP_GPU_KERNEL(brelu,
ops::BReluKernel<paddle::platform::GPUPlace, float>);
REGISTER_OP_GPU_KERNEL(brelu_grad,
ops::BReluGradKernel<paddle::platform::GPUPlace, float>);
REGISTER_OP_GPU_KERNEL(soft_relu,
ops::SoftReluKernel<paddle::platform::GPUPlace, float>);
REGISTER_OP_GPU_KERNEL(
soft_relu_grad, ops::SoftReluGradKernel<paddle::platform::GPUPlace, float>);
REGISTER_OP_GPU_KERNEL(pow, ops::PowKernel<paddle::platform::GPUPlace, float>);
REGISTER_OP_GPU_KERNEL(pow_grad,
ops::PowGradKernel<paddle::platform::GPUPlace, float>);
REGISTER_OP_GPU_KERNEL(stanh,
ops::STanhKernel<paddle::platform::GPUPlace, float>);
REGISTER_OP_GPU_KERNEL(stanh_grad,
ops::STanhGradKernel<paddle::platform::GPUPlace, float>);
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "paddle/framework/eigen.h"
#include "paddle/framework/op_registry.h"
namespace paddle {
namespace operators {
template <typename Place, typename T, typename Functor>
class ActivationKernel : public framework::OpKernel {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto* X = context.Input<framework::Tensor>("X");
auto* Y = context.Output<framework::Tensor>("Y");
Y->mutable_data<T>(context.GetPlace());
auto x = framework::EigenVector<T>::Flatten(*X);
auto y = framework::EigenVector<T>::Flatten(*Y);
auto place = context.GetEigenDevice<Place>();
Functor functor;
functor(place, x, y);
}
};
template <typename Place, typename T, typename Functor>
class ActivationGradKernel : public framework::OpKernel {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto* X = context.Input<framework::Tensor>("X");
auto* Y = context.Input<framework::Tensor>("Y");
auto* dY = context.Input<framework::Tensor>(framework::GradVarName("Y"));
auto* dX = context.Output<framework::Tensor>(framework::GradVarName("X"));
dX->mutable_data<T>(context.GetPlace());
auto dy = framework::EigenVector<T>::Flatten(*dY);
auto x = framework::EigenVector<T>::Flatten(*X);
auto y = framework::EigenVector<T>::Flatten(*Y);
auto dx = framework::EigenVector<T>::Flatten(*dX);
auto place = context.GetEigenDevice<Place>();
Functor functor;
functor(place, x, y, dy, dx);
}
};
// sigmoid(x) = 1 / (1 + exp(-x))
template <typename T>
struct SigmoidFunctor {
template <typename Device, typename X, typename Y>
void operator()(Device d, X x, Y y) {
y.device(d) = static_cast<T>(1) / (static_cast<T>(1) + (-x).exp());
}
};
template <typename T>
struct SigmoidGradFunctor {
template <typename Device, typename X, typename Y, typename dY, typename dX>
void operator()(Device d, X x, Y y, dY dy, dX dx) {
dx.device(d) = dy * y * (static_cast<T>(1) - y);
}
};
// exp(x) = e^x
struct ExpFunctor {
template <typename Device, typename X, typename Y>
void operator()(Device d, X x, Y y) {
y.device(d) = x.exp();
}
};
struct ExpGradFunctor {
template <typename Device, typename X, typename Y, typename dY, typename dX>
void operator()(Device d, X x, Y y, dY dy, dX dx) {
dx.device(d) = dy * y;
}
};
// relu(x) = max(x, 0)
template <typename T>
struct ReluFunctor {
template <typename Device, typename X, typename Y>
void operator()(Device d, X x, Y y) {
y.device(d) = x.cwiseMax(static_cast<T>(0));
}
};
template <typename T>
struct ReluGradFunctor {
template <typename Device, typename X, typename Y, typename dY, typename dX>
void operator()(Device d, X x, Y y, dY dy, dX dx) {
dx.device(d) = dy * (x > static_cast<T>(0)).template cast<T>();
}
};
// tanh(x) = (exp(x) - exp(-x)) / (exp(x) + exp(-x))
struct TanhFunctor {
template <typename Device, typename X, typename Y>
void operator()(Device d, X x, Y y) {
y.device(d) = x.tanh();
}
};
template <typename T>
struct TanhGradFunctor {
template <typename Device, typename X, typename Y, typename dY, typename dX>
void operator()(Device d, X x, Y y, dY dy, dX dx) {
dx.device(d) = dy * (static_cast<T>(1) - y * y);
}
};
// sqrt(x) = x^(1/2)
struct SqrtFunctor {
template <typename Device, typename X, typename Y>
void operator()(Device d, X x, Y y) {
y.device(d) = x.sqrt();
}
};
template <typename T>
struct SqrtGradFunctor {
template <typename Device, typename X, typename Y, typename dY, typename dX>
void operator()(Device d, X x, Y y, dY dy, dX dx) {
const Y y_conj = Eigen::numext::conj(y);
dx.device(d) = static_cast<T>(0.5) * dy / y_conj;
}
};
// abs(x) = |x|
struct AbsFunctor {
template <typename Device, typename X, typename Y>
void operator()(Device d, X x, Y y) {
y.device(d) = x.abs();
}
};
struct AbsGradFunctor {
template <typename Device, typename X, typename Y, typename dY, typename dX>
void operator()(Device d, X x, Y y, dY dy, dX dx) {
dx.device(d) = dy * x.sign();
}
};
// reciprocal(x) = 1 / x
template <typename T>
struct ReciprocalFunctor {
template <typename Device, typename X, typename Y>
void operator()(Device d, X x, Y y) {
y.device(d) = static_cast<T>(1) / x;
}
};
template <typename T>
struct ReciprocalGradFunctor {
template <typename Device, typename X, typename Y, typename dY, typename dX>
void operator()(Device d, X x, Y y, dY dy, dX dx) {
dx.device(d) = dy * static_cast<T>(-1) * y * y;
}
};
// log(x) = natural logarithm of x
struct LogFunctor {
template <typename Device, typename X, typename Y>
void operator()(Device d, X x, Y y) {
y.device(d) = x.log();
}
};
template <typename T>
struct LogGradFunctor {
template <typename Device, typename X, typename Y, typename dY, typename dX>
void operator()(Device d, X x, Y y, dY dy, dX dx) {
dx.device(d) = dy * (static_cast<T>(1) / x);
}
};
// square(x) = x^2
struct SquareFunctor {
template <typename Device, typename X, typename Y>
void operator()(Device d, X x, Y y) {
y.device(d) = x.square();
}
};
template <typename T>
struct SquareGradFunctor {
template <typename Device, typename X, typename Y, typename dY, typename dX>
void operator()(Device d, X x, Y y, dY dy, dX dx) {
dx.device(d) = dy * static_cast<T>(2) * x;
}
};
template <typename Place, typename T, typename AttrType = T>
class BReluKernel : public framework::OpKernel {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto* X = context.Input<framework::Tensor>("X");
auto* Y = context.Output<framework::Tensor>("Y");
auto t_min = static_cast<T>(context.Attr<AttrType>("t_min"));
auto t_max = static_cast<T>(context.Attr<AttrType>("t_max"));
Y->mutable_data<T>(context.GetPlace());
auto x = framework::EigenVector<T>::Flatten(*X);
auto y = framework::EigenVector<T>::Flatten(*Y);
auto place = context.GetEigenDevice<Place>();
y.device(place) = x.cwiseMax(t_min).cwiseMin(t_max);
}
};
template <typename Place, typename T, typename AttrType = T>
class BReluGradKernel : public framework::OpKernel {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto* X = context.Input<framework::Tensor>("X");
auto* dY = context.Input<framework::Tensor>(framework::GradVarName("Y"));
auto* dX = context.Output<framework::Tensor>(framework::GradVarName("X"));
auto t_min = static_cast<T>(context.Attr<AttrType>("t_min"));
auto t_max = static_cast<T>(context.Attr<AttrType>("t_max"));
dX->mutable_data<T>(context.GetPlace());
auto dy = framework::EigenVector<T>::Flatten(*dY);
auto x = framework::EigenVector<T>::Flatten(*X);
auto dx = framework::EigenVector<T>::Flatten(*dX);
auto place = context.GetEigenDevice<Place>();
dx.device(place) = dy * ((x > t_min) * (x < t_max)).template cast<T>();
}
};
template <typename Place, typename T, typename AttrType = T>
class SoftReluKernel : public framework::OpKernel {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto* X = context.Input<framework::Tensor>("X");
auto* Y = context.Output<framework::Tensor>("Y");
auto threshold = static_cast<T>(context.Attr<AttrType>("threshold"));
Y->mutable_data<T>(context.GetPlace());
auto x = framework::EigenVector<T>::Flatten(*X);
auto y = framework::EigenVector<T>::Flatten(*Y);
auto place = context.GetEigenDevice<Place>();
auto temp = x.cwiseMax(-threshold).cwiseMin(threshold).eval();
y.device(place) = (static_cast<T>(1) + temp.exp()).log();
}
};
template <typename Place, typename T, typename AttrType = T>
class SoftReluGradKernel : public framework::OpKernel {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto* X = context.Input<framework::Tensor>("X");
auto* Y = context.Input<framework::Tensor>("Y");
auto* dY = context.Input<framework::Tensor>(framework::GradVarName("Y"));
auto* dX = context.Output<framework::Tensor>(framework::GradVarName("X"));
auto threshold = static_cast<T>(context.Attr<AttrType>("threshold"));
dX->mutable_data<T>(context.GetPlace());
auto x = framework::EigenVector<T>::Flatten(*X);
auto y = framework::EigenVector<T>::Flatten(*Y);
auto dy = framework::EigenVector<T>::Flatten(*dY);
auto dx = framework::EigenVector<T>::Flatten(*dX);
auto place = context.GetEigenDevice<Place>();
auto temp = ((x > -threshold) * (x < threshold)).template cast<T>().eval();
dx.device(place) = dy * (static_cast<T>(1) - (-y).exp()) * temp;
}
};
template <typename Place, typename T, typename AttrType = T>
class PowKernel : public framework::OpKernel {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto* X = context.Input<framework::Tensor>("X");
auto* Y = context.Output<framework::Tensor>("Y");
auto factor = static_cast<T>(context.Attr<AttrType>("factor"));
Y->mutable_data<T>(context.GetPlace());
auto x = framework::EigenVector<T>::Flatten(*X);
auto y = framework::EigenVector<T>::Flatten(*Y);
auto place = context.GetEigenDevice<Place>();
y.device(place) = x.pow(factor);
}
};
template <typename Place, typename T, typename AttrType = T>
class PowGradKernel : public framework::OpKernel {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto* X = context.Input<framework::Tensor>("X");
auto* dY = context.Input<framework::Tensor>(framework::GradVarName("Y"));
auto* dX = context.Output<framework::Tensor>(framework::GradVarName("X"));
auto factor = static_cast<T>(context.Attr<AttrType>("factor"));
dX->mutable_data<T>(context.GetPlace());
auto dy = framework::EigenVector<T>::Flatten(*dY);
auto x = framework::EigenVector<T>::Flatten(*X);
auto dx = framework::EigenVector<T>::Flatten(*dX);
auto place = context.GetEigenDevice<Place>();
dx.device(place) = dy * factor * x.pow(factor - static_cast<T>(1));
}
};
template <typename Place, typename T, typename AttrType = T>
class STanhKernel : public framework::OpKernel {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto* X = context.Input<framework::Tensor>("X");
auto* Y = context.Output<framework::Tensor>("Y");
auto scale_a = static_cast<T>(context.Attr<AttrType>("scale_a"));
auto scale_b = static_cast<T>(context.Attr<AttrType>("scale_b"));
Y->mutable_data<T>(context.GetPlace());
auto x = framework::EigenVector<T>::Flatten(*X);
auto y = framework::EigenVector<T>::Flatten(*Y);
auto place = context.GetEigenDevice<Place>();
y.device(place) = scale_b * (scale_a * x).tanh();
}
};
template <typename Place, typename T, typename AttrType = T>
class STanhGradKernel : public framework::OpKernel {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto* X = context.Input<framework::Tensor>("X");
auto* dY = context.Input<framework::Tensor>(framework::GradVarName("Y"));
auto* dX = context.Output<framework::Tensor>(framework::GradVarName("X"));
auto scale_a = static_cast<T>(context.Attr<AttrType>("scale_a"));
auto scale_b = static_cast<T>(context.Attr<AttrType>("scale_b"));
dX->mutable_data<T>(context.GetPlace());
auto dy = framework::EigenVector<T>::Flatten(*dY);
auto x = framework::EigenVector<T>::Flatten(*X);
auto dx = framework::EigenVector<T>::Flatten(*dX);
auto place = context.GetEigenDevice<Place>();
auto temp = (scale_a * x).tanh() * (scale_a * x).tanh();
dx.device(place) = dy * scale_a * scale_b * (static_cast<T>(1) - temp);
}
};
} // namespace operators
} // namespace paddle
...@@ -33,7 +33,7 @@ class AddOp : public framework::OperatorWithKernel { ...@@ -33,7 +33,7 @@ class AddOp : public framework::OperatorWithKernel {
PADDLE_ENFORCE_EQ(ctx.Input<Tensor>("X")->dims(), PADDLE_ENFORCE_EQ(ctx.Input<Tensor>("X")->dims(),
ctx.Input<Tensor>("Y")->dims(), ctx.Input<Tensor>("Y")->dims(),
"Two input of Add Op's dimension must be same."); "Two input of Add Op's dimension must be same.");
ctx.Output<framework::LoDTensor>("Out")->Resize( ctx.Output<framework::Tensor>("Out")->Resize(
ctx.Input<Tensor>("X")->dims()); ctx.Input<Tensor>("X")->dims());
} }
}; };
......
...@@ -12,46 +12,64 @@ ...@@ -12,46 +12,64 @@
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "paddle/operators/sigmoid_op.h" #include "paddle/operators/clip_op.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
class SigmoidOp : public framework::OperatorWithKernel { class ClipOp : public framework::OperatorWithKernel {
public: public:
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
protected: protected:
void InferShape(const framework::InferShapeContext &ctx) const override { void InferShape(const framework::InferShapeContext &ctx) const override {
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"),
"Input(X) of SigmoidOp should not be null."); "Input(X) of ClipOp should not be null.");
PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar("Y"), PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar("Out"),
"Output(Y) of SigmoidOp should not be null."); "Output(Out) of ClipOp should not be null.");
auto x_dims = ctx.Input<Tensor>("X")->dims();
ctx.Output<framework::LoDTensor>("Y")->Resize( auto max = Attr<float>("max");
ctx.Input<Tensor>("X")->dims()); auto min = Attr<float>("min");
PADDLE_ENFORCE_LT(min, max, "max should be greater than min.");
ctx.Output<Tensor>("Out")->Resize(x_dims);
ctx.ShareLoD("X", /*->*/ "Out");
} }
}; };
class SigmoidOpMaker : public framework::OpProtoAndCheckerMaker { template <typename AttrType>
class ClipOpMaker : public framework::OpProtoAndCheckerMaker {
public: public:
SigmoidOpMaker(framework::OpProto *proto, ClipOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker)
framework::OpAttrChecker *op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) { : OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "sigmoid input"); AddInput("X",
AddOutput("Y", "sigmoid output"); "(Tensor)The input of clip op."
AddComment("Sigmoid function"); "The input should be a k-D tensor(k > 0 and k < 7)");
AddOutput("Out", "(Tensor)The output of clip op with shape as input(X)");
AddAttr<AttrType>(
"min", "(float)Minimum value, under which element is replaced by min.");
AddAttr<AttrType>(
"max", "(float)Maximum value, above which element is replaced by max");
AddComment(R"DOC(
Clip operator limits the given input within an interval. The interval is
specified with arguments 'min' and 'max'.
)DOC");
} }
}; };
class SigmoidOpGrad : public framework::OperatorWithKernel { class ClipOpGrad : public framework::OperatorWithKernel {
public: public:
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
protected: protected:
void InferShape(const framework::InferShapeContext &ctx) const override { void InferShape(const framework::InferShapeContext &ctx) const override {
ctx.Output<framework::LoDTensor>(framework::GradVarName("X")) PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), "Input(X) should not be null");
->Resize(ctx.Input<Tensor>("Y")->dims()); PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(framework::GradVarName("Out")),
"Input(Out@GRAD) should not be null");
auto x_dims = ctx.Input<Tensor>("X")->dims();
auto *x_grad = ctx.Output<Tensor>(framework::GradVarName("X"));
if (x_grad != nullptr) {
x_grad->Resize(x_dims);
}
} }
}; };
...@@ -59,9 +77,9 @@ class SigmoidOpGrad : public framework::OperatorWithKernel { ...@@ -59,9 +77,9 @@ class SigmoidOpGrad : public framework::OperatorWithKernel {
} // namespace paddle } // namespace paddle
namespace ops = paddle::operators; namespace ops = paddle::operators;
REGISTER_OP(sigmoid, ops::SigmoidOp, ops::SigmoidOpMaker, sigmoid_grad, REGISTER_OP(clip, ops::ClipOp, ops::ClipOpMaker<float>, clip_grad,
ops::SigmoidOpGrad); ops::ClipOpGrad);
REGISTER_OP_CPU_KERNEL(sigmoid, REGISTER_OP_CPU_KERNEL(clip,
ops::SigmoidKernel<paddle::platform::CPUPlace, float>); ops::ClipKernel<paddle::platform::CPUPlace, float>);
REGISTER_OP_CPU_KERNEL( REGISTER_OP_CPU_KERNEL(clip_grad,
sigmoid_grad, ops::SigmoidGradKernel<paddle::platform::CPUPlace, float>); ops::ClipGradKernel<paddle::platform::CPUPlace, float>);
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/operators/clip_op.h"
namespace ops = paddle::operators;
REGISTER_OP_GPU_KERNEL(clip,
ops::ClipKernel<paddle::platform::GPUPlace, float>);
REGISTER_OP_GPU_KERNEL(clip_grad,
ops::ClipGradKernel<paddle::platform::GPUPlace, float>);
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "paddle/framework/eigen.h"
#include "paddle/framework/op_registry.h"
#include "paddle/platform/transform.h"
namespace paddle {
namespace operators {
using framework::Tensor;
using platform::Transform;
template <typename T>
class ClipFunctor {
public:
explicit ClipFunctor(const T min, const T max) : min_(min), max_(max) {}
HOSTDEVICE T operator()(const T& x) const {
if (x < min_)
return min_;
else if (x > max_)
return max_;
else
return x;
}
private:
T min_;
T max_;
};
template <typename T>
class ClipGradFunctor {
public:
explicit ClipGradFunctor(const T min, const T max) : min_(min), max_(max) {}
HOSTDEVICE T operator()(const T& x, const T& y) const {
return (y > min_ && y < max_) ? x : 0;
}
private:
T min_;
T max_;
};
template <typename Place, typename T>
class ClipKernel : public framework::OpKernel {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto max = context.Attr<T>("max");
auto min = context.Attr<T>("min");
auto* x = context.Input<Tensor>("X");
auto* out = context.Output<Tensor>("Out");
T* out_data = out->mutable_data<T>(context.GetPlace());
const T* x_data = x->data<T>();
int64_t numel = x->numel();
Transform<Place> trans;
trans(context.device_context(), x_data, x_data + numel, out_data,
ClipFunctor<T>(min, max));
}
};
template <typename Place, typename T>
class ClipGradKernel : public framework::OpKernel {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto max = context.Attr<T>("max");
auto min = context.Attr<T>("min");
auto* d_out = context.Input<Tensor>(framework::GradVarName("Out"));
auto* d_x = context.Output<Tensor>(framework::GradVarName("X"));
if (d_x != nullptr) {
auto* x = context.Input<Tensor>("X");
int64_t numel = d_out->numel();
auto* d_x_data = d_x->mutable_data<T>(context.GetPlace());
const T* d_out_data = d_out->data<T>();
const T* x_data = x->data<T>();
Transform<Place> trans;
trans(context.device_context(), d_out_data, d_out_data + numel, x_data,
d_x_data, ClipGradFunctor<T>(min, max));
}
}
};
} // namespace operators
} // namespace paddle
...@@ -29,7 +29,7 @@ class ConcatOp : public framework::OperatorWithKernel { ...@@ -29,7 +29,7 @@ class ConcatOp : public framework::OperatorWithKernel {
"Output(Out) of ConcatOp should not be null."); "Output(Out) of ConcatOp should not be null.");
auto ins = ctx.MultiInput<framework::Tensor>("X"); auto ins = ctx.MultiInput<framework::Tensor>("X");
auto *out = ctx.Output<framework::LoDTensor>("Out"); auto *out = ctx.Output<framework::Tensor>("Out");
size_t axis = static_cast<size_t>(ctx.Attr<int>("axis")); size_t axis = static_cast<size_t>(ctx.Attr<int>("axis"));
size_t n = ins.size(); size_t n = ins.size();
......
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/operators/gemm_conv2d_op.h"
namespace paddle {
namespace operators {
int outputSize(int input_size, int filter_size, int padding, int stride) {
int output_size = (input_size - filter_size + 2 * padding) / stride + 1;
return output_size;
}
class Conv2DOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected:
void InferShape(const framework::InferShapeContext &ctx) const override {
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("Input"),
"Input(Input) of Conv2DOp should not be null.");
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("Filter"),
"Input(Filter) of Conv2DOp should not be null.");
PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar("Output"),
"Output(Output) of Conv2DOp should not be null.");
auto in = ctx.Input<Tensor>("Input");
auto filter = ctx.Input<Tensor>("Filter");
auto out = ctx.Output<framework::Tensor>("Output");
std::vector<int> strides = Attr<std::vector<int>>("strides");
std::vector<int> paddings = Attr<std::vector<int>>("paddings");
int groups = Attr<int>("groups");
int input_channels = in->dims()[1];
int output_channels = filter->dims()[0];
PADDLE_ENFORCE_EQ(in->dims().size(), 4, "Conv2DOp input should be 4-D.");
PADDLE_ENFORCE_EQ(filter->dims().size(), 4,
"Conv2DOp filter should be 4-D.");
PADDLE_ENFORCE_EQ(input_channels, filter->dims()[1] * groups,
"The number of input channels should be equal to filter "
"channels * groups.");
PADDLE_ENFORCE_EQ(
output_channels % groups, 0,
"The number of output channels should be divided by groups.");
auto output_height =
outputSize(in->dims()[2], filter->dims()[2], paddings[0], strides[0]);
auto output_width =
outputSize(in->dims()[3], filter->dims()[3], paddings[1], strides[1]);
out->Resize(
{in->dims()[0], filter->dims()[0], output_height, output_width});
}
};
class Conv2DOpMaker : public framework::OpProtoAndCheckerMaker {
public:
Conv2DOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) {
AddInput(
"Input",
"The input tensor of convolution operator. "
"The format of input tensor is NCHW. Where N is batch size, C is the "
"number of channels, H and W is the height and width of image.");
AddInput(
"Filter",
"The filter tensor of convolution operator."
"The format of the filter tensor is MCHW, where M is the number of "
"output image channels, C is the number of input image channels, "
"H and W is height and width of filter. "
"If the groups attribute is greater than 1, C equal the number of "
"input image channels divided by the groups.");
AddOutput("Output",
"The output tensor of convolution operator."
"The format of output tensor is also NCHW.");
AddAttr<std::vector<int>>("strides", "strides of convolution operator.")
.SetDefault({1, 1});
AddAttr<std::vector<int>>("paddings", "paddings of convolution operator.")
.SetDefault({0, 0});
AddAttr<int>(
"groups",
"group size of convolution operator. "
"Refer to grouped convolution in Alex Krizhevsky's paper: "
"when group=2, the first half of the filters are only connected to the "
"first half of the input channels, and the second half only connected "
"to the second half.")
.SetDefault(1);
AddComment(R"DOC(
The convolution operation calculates the output based on the input, filter
and strides, paddings, groups parameters. The size of each dimension of the
parameters is checked in the infer-shape.
)DOC");
}
};
class Conv2DOpGrad : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected:
void InferShape(const framework::InferShapeContext &ctx) const override {
auto in = ctx.Input<Tensor>("Input");
auto filter = ctx.Input<Tensor>("Filter");
auto d_in = ctx.Output<framework::Tensor>(framework::GradVarName("Input"));
auto d_filter =
ctx.Output<framework::Tensor>(framework::GradVarName("Filter"));
if (d_in) d_in->Resize(in->dims());
if (d_filter) d_filter->Resize(filter->dims());
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP(conv2d, ops::Conv2DOp, ops::Conv2DOpMaker, conv2d_grad,
ops::Conv2DOpGrad);
REGISTER_OP_CPU_KERNEL(
conv2d, ops::GemmConv2DKernel<paddle::platform::CPUPlace, float>);
REGISTER_OP_CPU_KERNEL(
conv2d_grad, ops::GemmConvGrad2DKernel<paddle::platform::CPUPlace, float>);
/* Copyright (c) 2016 PaddlePaddle Authors All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/operators/gemm_conv2d_op.h"
namespace ops = paddle::operators;
REGISTER_OP_GPU_KERNEL(
conv2d, ops::GemmConv2DKernel<paddle::platform::GPUPlace, float>);
REGISTER_OP_GPU_KERNEL(
conv2d_grad, ops::GemmConvGrad2DKernel<paddle::platform::GPUPlace, float>);
...@@ -54,9 +54,10 @@ class CosSimOp : public framework::OperatorWithKernel { ...@@ -54,9 +54,10 @@ class CosSimOp : public framework::OperatorWithKernel {
" just 1 (which will be broadcasted to match Input(X))."); " just 1 (which will be broadcasted to match Input(X)).");
// resize tensor // resize tensor
ctx.Output<framework::LoDTensor>("Out")->Resize({x_dims[0], 1}); ctx.Output<framework::Tensor>("Out")->Resize({x_dims[0], 1});
ctx.Output<framework::LoDTensor>("XNorm")->Resize({x_dims[0], 1}); ctx.Output<framework::Tensor>("XNorm")->Resize({x_dims[0], 1});
ctx.Output<framework::LoDTensor>("YNorm")->Resize({y_dims[0], 1}); ctx.Output<framework::Tensor>("YNorm")->Resize({y_dims[0], 1});
ctx.ShareLoD("X", /*->*/ "Out");
} }
}; };
...@@ -81,10 +82,13 @@ Cosine Similarity Operator. ...@@ -81,10 +82,13 @@ Cosine Similarity Operator.
The equation is: Out = X^T * Y / (sqrt(X^T * X) * sqrt(Y^T * Y)). The equation is: Out = X^T * Y / (sqrt(X^T * X) * sqrt(Y^T * Y)).
Input(X) and Input(Y) must have the same shape, except that the 1st dimension The input `X` and `Y` must have the same shape, except that the 1st dimension
of Input(Y) could be just 1 (different from Input(X)), which will be of input `Y` could be just 1 (different from input `X`), which will be
broadcasted to match the shape of Input(X) before computing their cosine broadcasted to match the shape of input `X` before computing their cosine
similarity. similarity.
Both the input `X` and `Y` can carry the LoD (Level of Details) information,
or not. But the output only shares the LoD with input `X`.
)DOC"); )DOC");
} }
}; };
...@@ -139,10 +143,8 @@ class CosSimOpGrad : public framework::OperatorWithKernel { ...@@ -139,10 +143,8 @@ class CosSimOpGrad : public framework::OperatorWithKernel {
"Shape of Input(Out@Grad) must be [X.Dim(0), 1]."); "Shape of Input(Out@Grad) must be [X.Dim(0), 1].");
// resize tensor // resize tensor
auto *x_grad = auto *x_grad = ctx.Output<framework::Tensor>(framework::GradVarName("X"));
ctx.Output<framework::LoDTensor>(framework::GradVarName("X")); auto *y_grad = ctx.Output<framework::Tensor>(framework::GradVarName("Y"));
auto *y_grad =
ctx.Output<framework::LoDTensor>(framework::GradVarName("Y"));
if (x_grad) x_grad->Resize(x_dims); if (x_grad) x_grad->Resize(x_dims);
if (y_grad) y_grad->Resize(y_dims); if (y_grad) y_grad->Resize(y_dims);
} }
......
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/operators/crop_op.h"
#include <boost/lexical_cast.hpp>
namespace paddle {
namespace operators {
using framework::Tensor;
class CropOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected:
void InferShape(const framework::InferShapeContext &ctx) const override {
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"),
"Input(X) of CropOp should not be null.");
PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar("Out"),
"Output(Out) of CropOp should not be null.");
auto x_dim = ctx.Input<Tensor>("X")->dims();
auto *y = ctx.Input<Tensor>("Y");
auto *out = ctx.Output<Tensor>("Out");
if (y == nullptr) {
auto shape = Attr<std::vector<int>>("shape");
PADDLE_ENFORCE_EQ(
int64_t(shape.size()), x_dim.size(),
"Shape size should be equal to dimention size of input tensor.");
std::vector<int64_t> tensor_shape(shape.size());
for (size_t i = 0; i < shape.size(); ++i) {
tensor_shape[i] = static_cast<int64_t>(shape[i]);
}
out->Resize(framework::make_ddim(tensor_shape));
} else {
PADDLE_ENFORCE_EQ(framework::arity(x_dim), framework::arity(y->dims()),
"Tensor rank of both CropOp's "
"inputs must be same.");
out->Resize(y->dims());
}
}
};
class CropOpMaker : public framework::OpProtoAndCheckerMaker {
public:
CropOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X",
"The input of pad op. "
"The input should be a k-D tensor(k > 0 and k < 7)");
AddInput("Y",
"The input used as reference for cropping"
" with the same dimension as X. ");
AddOutput("Out",
"The output of crop op "
"with the same dimension as X.");
AddAttr<std::vector<int>>("offsets",
"A list<int> describing offsets to be cropped."
"The size of offsets list should be as same as "
"dimension size of input X.");
AddAttr<std::vector<int>>("shape",
"A list<int> describing the shape of output."
"The size of shape list should be as same as "
"dimension size of input X.")
.SetDefault(std::vector<int>());
AddComment(R"DOC(
Crop Operator.
Crop input into output, as specified by offsets and shape.
There are two ways to set shape:
1. referenc input: crop input X as shape as reference input.
The dimension of reference input should
be as same as input X.
2. shape list: crop input X by shape described by a list<int>.
The size of shape list should be as same as
dimension size of input X.
The input should be a k-D tensor(k > 0 and k < 7). As an example:
Given:
X = [[0, 1, 2, 0, 0]
[0, 3, 4, 0, 0]
[0, 0, 0, 0, 0]]
and
offsets = [0, 1]
and
shape = [2, 2]
then we get
Out = [[1, 2],
[3, 4]]
)DOC");
}
};
class CropOpGrad : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected:
void InferShape(const framework::InferShapeContext &ctx) const override {
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), "Input(X) should not be null");
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(framework::GradVarName("Out")),
"Input(Out@GRAD) should not be null");
auto x_dims = ctx.Input<Tensor>("X")->dims();
auto *x_grad = ctx.Output<Tensor>(framework::GradVarName("X"));
if (x_grad != nullptr) {
x_grad->Resize(x_dims);
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP(crop, ops::CropOp, ops::CropOpMaker, crop_grad, ops::CropOpGrad);
REGISTER_OP_CPU_KERNEL(crop, ops::CropKernel<float>);
REGISTER_OP_CPU_KERNEL(crop_grad,
ops::CropGradKernel<paddle::platform::CPUPlace, float>);
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#define EIGEN_USE_GPU
#include "paddle/operators/crop_op.h"
namespace ops = paddle::operators;
REGISTER_OP_GPU_KERNEL(crop, ops::CropKernel<float>);
REGISTER_OP_GPU_KERNEL(crop_grad,
ops::CropGradKernel<paddle::platform::GPUPlace, float>);
/* Copyright (c) 2016 CropdleCropdle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "paddle/framework/eigen.h"
#include "paddle/framework/op_registry.h"
#include "paddle/operators/strided_memcpy.h"
namespace paddle {
namespace operators { // Internal
template <typename T, size_t D, int MajorType = Eigen::RowMajor,
typename IndexType = Eigen::DenseIndex>
using EigenTensor = framework::EigenTensor<T, D, MajorType, IndexType>;
using framework::Tensor;
template <typename T>
class CropKernel : public framework::OpKernel {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto* x = context.Input<Tensor>("X");
auto* out = context.Output<Tensor>("Out");
const T* x_data = x->data<T>();
T* out_data = out->mutable_data<T>(context.GetPlace());
auto x_stride = framework::stride(x->dims());
auto out_stride = framework::stride(out->dims());
auto offsets = context.Attr<std::vector<int>>("offsets");
PADDLE_ENFORCE_EQ(
x->dims().size(), offsets.size(),
"Offsets size should be equal to dimension size of input tensor.");
int64_t offset = 0;
for (int i = 0; i < offsets.size(); ++i) {
offset += (x_stride[i] * offsets[i]);
}
StridedMemcpy<T>(context.device_context(), x_data + offset, x_stride,
out->dims(), out_stride, out_data);
}
};
template <typename Place, typename T, size_t D>
void CropGradFunction(const framework::ExecutionContext& context) {
auto* d_x = context.Output<Tensor>(framework::GradVarName("X"));
if (d_x != nullptr) {
auto* d_out = context.Input<Tensor>(framework::GradVarName("Out"));
d_x->mutable_data<T>(context.GetPlace());
auto offsets = context.Attr<std::vector<int>>("offsets");
Eigen::array<std::pair<int, int>, D> paddings;
for (int i = 0; i < D; ++i) {
paddings[i].first = offsets[i];
paddings[i].second = d_x->dims()[i] - d_out->dims()[i] - offsets[i];
}
auto d_x_tensor = EigenTensor<T, D>::From(*d_x);
auto d_out_tensor = EigenTensor<T, D>::From(*d_out);
d_x_tensor.device(context.GetEigenDevice<Place>()) =
d_out_tensor.pad(paddings, 0);
}
}
template <typename Place, typename T>
class CropGradKernel : public framework::OpKernel {
public:
void Compute(const framework::ExecutionContext& context) const override {
size_t rank =
context.Input<Tensor>(framework::GradVarName("Out"))->dims().size();
switch (rank) {
case 1:
CropGradFunction<Place, T, 1>(context);
break;
case 2:
CropGradFunction<Place, T, 2>(context);
break;
case 3:
CropGradFunction<Place, T, 3>(context);
break;
case 4:
CropGradFunction<Place, T, 4>(context);
break;
case 5:
CropGradFunction<Place, T, 5>(context);
break;
case 6:
CropGradFunction<Place, T, 6>(context);
break;
default:
PADDLE_THROW(
"CropOp only support tensors with no more than 6 dimensions.");
}
}
};
} // namespace operators
} // namespace paddle
...@@ -17,8 +17,6 @@ limitations under the License. */ ...@@ -17,8 +17,6 @@ limitations under the License. */
namespace paddle { namespace paddle {
namespace operators { namespace operators {
using framework::LoDTensor;
class CrossEntropyOp : public framework::OperatorWithKernel { class CrossEntropyOp : public framework::OperatorWithKernel {
public: public:
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
...@@ -35,23 +33,21 @@ class CrossEntropyOp : public framework::OperatorWithKernel { ...@@ -35,23 +33,21 @@ class CrossEntropyOp : public framework::OperatorWithKernel {
PADDLE_ENFORCE_EQ(x->dims().size(), 2, "Input(X)'s rank must be 2."); PADDLE_ENFORCE_EQ(x->dims().size(), 2, "Input(X)'s rank must be 2.");
PADDLE_ENFORCE_EQ(label->dims().size(), 2, PADDLE_ENFORCE_EQ(label->dims().size(), 2,
"Input(Label)'s rank must be 2."); "Input(Label)'s rank must be 2.");
// TODO(xinghai-sun): remove this check after swtiching to bool
PADDLE_ENFORCE(ctx.Attr<int>("soft_label") == 0 ||
ctx.Attr<int>("soft_label") == 1);
PADDLE_ENFORCE_EQ(x->dims()[0], label->dims()[0], PADDLE_ENFORCE_EQ(x->dims()[0], label->dims()[0],
"The 1st dimension of Input(X) and Input(Label) must " "The 1st dimension of Input(X) and Input(Label) must "
"be equal."); "be equal.");
if (ctx.Attr<int>("soft_label") == 1) { if (ctx.Attr<bool>("soft_label")) {
PADDLE_ENFORCE_EQ(x->dims()[1], label->dims()[1], PADDLE_ENFORCE_EQ(x->dims()[1], label->dims()[1],
"If Attr(soft_label) == 1, The 2nd dimension of " "If Attr(soft_label) == true, The 2nd dimension of "
"Input(X) and Input(Label) must be equal."); "Input(X) and Input(Label) must be equal.");
} else { } else {
PADDLE_ENFORCE_EQ(label->dims()[1], 1, PADDLE_ENFORCE_EQ(label->dims()[1], 1,
"If Attr(soft_label) == 0, The 2nd dimension of " "If Attr(soft_label) == false, The 2nd dimension of "
"Input(Label) must be 1."); "Input(Label) must be 1.");
} }
ctx.Output<LoDTensor>("Y")->Resize({x->dims()[0], 1}); ctx.Output<Tensor>("Y")->Resize({x->dims()[0], 1});
ctx.ShareLoD("X", /*->*/ "Y");
} }
}; };
...@@ -74,9 +70,6 @@ class CrossEntropyGradientOp : public framework::OperatorWithKernel { ...@@ -74,9 +70,6 @@ class CrossEntropyGradientOp : public framework::OperatorWithKernel {
PADDLE_ENFORCE_EQ(dy->dims().size(), 2, "Input(Y@Grad)'s rank must be 2."); PADDLE_ENFORCE_EQ(dy->dims().size(), 2, "Input(Y@Grad)'s rank must be 2.");
PADDLE_ENFORCE_EQ(label->dims().size(), 2, PADDLE_ENFORCE_EQ(label->dims().size(), 2,
"Input(Label)'s rank must be 2."); "Input(Label)'s rank must be 2.");
// TODO(xinghai-sun): remove this check after swtiching to bool
PADDLE_ENFORCE(ctx.Attr<int>("soft_label") == 0 ||
ctx.Attr<int>("soft_label") == 1);
PADDLE_ENFORCE_EQ(x->dims()[0], label->dims()[0], PADDLE_ENFORCE_EQ(x->dims()[0], label->dims()[0],
"The 1st dimension of Input(X) and Input(Label) must " "The 1st dimension of Input(X) and Input(Label) must "
"be equal."); "be equal.");
...@@ -85,17 +78,17 @@ class CrossEntropyGradientOp : public framework::OperatorWithKernel { ...@@ -85,17 +78,17 @@ class CrossEntropyGradientOp : public framework::OperatorWithKernel {
"be equal."); "be equal.");
PADDLE_ENFORCE_EQ(dy->dims()[1], 1, PADDLE_ENFORCE_EQ(dy->dims()[1], 1,
"The 2nd dimension of Input(Y@Grad) must be 1."); "The 2nd dimension of Input(Y@Grad) must be 1.");
if (ctx.Attr<int>("soft_label") == 1) { if (ctx.Attr<bool>("soft_label")) {
PADDLE_ENFORCE_EQ(x->dims()[1], label->dims()[1], PADDLE_ENFORCE_EQ(x->dims()[1], label->dims()[1],
"If Attr(soft_label) == 1, The 2nd dimension of " "If Attr(soft_label) == true, The 2nd dimension of "
"Input(X) and Input(Label) must be equal."); "Input(X) and Input(Label) must be equal.");
} else { } else {
PADDLE_ENFORCE_EQ(label->dims()[1], 1, PADDLE_ENFORCE_EQ(label->dims()[1], 1,
"If Attr(soft_label) == 0, The 2nd dimension of " "If Attr(soft_label) == false, The 2nd dimension of "
"Input(Label) must be 1."); "Input(Label) must be 1.");
} }
auto dx = ctx.Output<LoDTensor>(framework::GradVarName("X")); auto dx = ctx.Output<Tensor>(framework::GradVarName("X"));
dx->Resize(x->dims()); dx->Resize(x->dims());
} }
}; };
...@@ -108,7 +101,8 @@ class CrossEntropyOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -108,7 +101,8 @@ class CrossEntropyOpMaker : public framework::OpProtoAndCheckerMaker {
AddInput("X", "The first input of CrossEntropyOp"); AddInput("X", "The first input of CrossEntropyOp");
AddInput("Label", "The second input of CrossEntropyOp"); AddInput("Label", "The second input of CrossEntropyOp");
AddOutput("Y", "The output of CrossEntropyOp"); AddOutput("Y", "The output of CrossEntropyOp");
AddAttr<int>("soft_label", "Is soft label. Default zero.").SetDefault(0); AddAttr<bool>("soft_label", "Is soft label. Default zero.")
.SetDefault(false);
AddComment(R"DOC( AddComment(R"DOC(
CrossEntropy Operator. CrossEntropy Operator.
...@@ -116,12 +110,12 @@ CrossEntropy Operator. ...@@ -116,12 +110,12 @@ CrossEntropy Operator.
It supports both standard cross-entropy and soft-label cross-entropy loss It supports both standard cross-entropy and soft-label cross-entropy loss
computation. computation.
1) One-hot cross-entropy: 1) One-hot cross-entropy:
soft_label = 0, Label[i, 0] indicates the class index for sample i: soft_label = False, Label[i, 0] indicates the class index for sample i:
Y[i] = -log(X[i, Label[i]]) Y[i] = -log(X[i, Label[i]])
2) Soft-label cross-entropy: 2) Soft-label cross-entropy:
soft_label = 1, Label[i, j] indicates the soft label of class j soft_label = True, Label[i, j] indicates the soft label of class j
for sample i: for sample i:
Y[i] = \sum_j{-Label[i, j] * log(X[i, j])} Y[i] = \sum_j{-Label[i, j] * log(X[i, j])}
...@@ -133,6 +127,9 @@ computation. ...@@ -133,6 +127,9 @@ computation.
As a special case of 2), when each row of Input(Label) has only one As a special case of 2), when each row of Input(Label) has only one
non-zero element (equals 1), soft-label cross-entropy degenerates to a non-zero element (equals 1), soft-label cross-entropy degenerates to a
one-hot cross-entropy with one-hot label representation. one-hot cross-entropy with one-hot label representation.
Both the input `X` and `Label` can carry the LoD (Level of Details) information,
or not. But the output only shares the LoD with input `X`.
)DOC"); )DOC");
} }
}; };
......
...@@ -102,7 +102,7 @@ class CrossEntropyOpCUDAKernel : public framework::OpKernel { ...@@ -102,7 +102,7 @@ class CrossEntropyOpCUDAKernel : public framework::OpKernel {
int grid = (n + block - 1) / block; int grid = (n + block - 1) / block;
// TODO(qingqing) launch kernel on specified stream // TODO(qingqing) launch kernel on specified stream
// base on ExecutionContext. // base on ExecutionContext.
if (ctx.Attr<int>("soft_label") == 1) { if (ctx.Attr<bool>("soft_label")) {
auto* label_data = ctx.Input<Tensor>("Label")->data<T>(); auto* label_data = ctx.Input<Tensor>("Label")->data<T>();
SoftCrossEntropyKernel<T><<<grid, block>>>(y_data, x_data, label_data, n, SoftCrossEntropyKernel<T><<<grid, block>>>(y_data, x_data, label_data, n,
d); d);
...@@ -137,7 +137,7 @@ class CrossEntropyGradientOpCUDAKernel : public framework::OpKernel { ...@@ -137,7 +137,7 @@ class CrossEntropyGradientOpCUDAKernel : public framework::OpKernel {
grid = (n + block - 1) / block; grid = (n + block - 1) / block;
// TODO(qingqing): launch kernel on specified stream // TODO(qingqing): launch kernel on specified stream
// base on ExecutionContext. // base on ExecutionContext.
if (ctx.Attr<int>("soft_label") == 1) { if (ctx.Attr<bool>("soft_label")) {
auto* label_data = label->data<T>(); auto* label_data = label->data<T>();
SoftCrossEntropyGradientKernel<T><<<grid, block>>>( SoftCrossEntropyGradientKernel<T><<<grid, block>>>(
dx_data, dy_data, x_data, label_data, n, d); dx_data, dy_data, x_data, label_data, n, d);
......
...@@ -51,7 +51,7 @@ class CrossEntropyOpKernel : public framework::OpKernel { ...@@ -51,7 +51,7 @@ class CrossEntropyOpKernel : public framework::OpKernel {
int batch_size = x->dims()[0]; int batch_size = x->dims()[0];
int class_num = x->dims()[1]; int class_num = x->dims()[1];
if (ctx.Attr<int>("soft_label") == 1) { if (ctx.Attr<bool>("soft_label")) {
auto* label_data = ctx.Input<Tensor>("Label")->data<T>(); auto* label_data = ctx.Input<Tensor>("Label")->data<T>();
int index = 0; int index = 0;
for (int i = 0; i < batch_size; ++i) { for (int i = 0; i < batch_size; ++i) {
...@@ -92,7 +92,7 @@ class CrossEntropyGradientOpKernel : public framework::OpKernel { ...@@ -92,7 +92,7 @@ class CrossEntropyGradientOpKernel : public framework::OpKernel {
int class_num = x->dims()[1]; int class_num = x->dims()[1];
// TODO(qingqing): make zero setting an common function. // TODO(qingqing): make zero setting an common function.
if (ctx.Attr<int>("soft_label") == 1) { if (ctx.Attr<bool>("soft_label")) {
auto* label_data = ctx.Input<Tensor>("Label")->data<T>(); auto* label_data = ctx.Input<Tensor>("Label")->data<T>();
int index = 0; int index = 0;
for (int i = 0; i < batch_size; ++i) { for (int i = 0; i < batch_size; ++i) {
......
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "paddle/framework/ddim.h"
#include "paddle/memory/memcpy.h"
#include "paddle/platform/device_context.h"
namespace paddle {
namespace operators {
namespace detail {
template <typename T, int Rank>
struct StridedMemcpyFunctor;
template <typename T>
struct StridedMemcpyFunctor<T, 1> {
void operator()(const platform::DeviceContext& dev_ctx, const T* src,
framework::Dim<1> src_stride, framework::Dim<1> dst_dim,
framework::Dim<1> dst_stride, T* dst) const {
auto place = dev_ctx.GetPlace();
if (platform::is_cpu_place(place)) {
auto& cpu_place = boost::get<platform::CPUPlace>(place);
memory::Copy(cpu_place, dst, cpu_place, src, sizeof(T) * dst_dim.head);
} else {
#ifndef PADDLE_ONLY_CPU
auto& gpu_place = boost::get<platform::GPUPlace>(place);
auto& cuda_ctx =
reinterpret_cast<const platform::CUDADeviceContext&>(dev_ctx);
memory::Copy(gpu_place, dst, gpu_place, src, sizeof(T) * dst_dim.head,
cuda_ctx.stream());
#else
PADDLE_THROW("Paddle is not compiled with GPU");
#endif
}
}
};
template <typename T, int Rank>
struct StridedMemcpyFunctor {
void operator()(const platform::DeviceContext& dev_ctx, const T* src,
framework::Dim<Rank> src_stride, framework::Dim<Rank> dst_dim,
framework::Dim<Rank> dst_stride, T* dst) const {
for (int64_t i = 0; i < dst_dim.head; ++i) {
StridedMemcpyFunctor<T, Rank - 1> func;
func(dev_ctx, src, src_stride.tail, dst_dim.tail, dst_stride.tail, dst);
src += src_stride.head;
dst += dst_stride.head;
}
}
};
template <typename T>
struct StridedCopyDimVisitor : public boost::static_visitor<void> {
StridedCopyDimVisitor(const platform::DeviceContext& dev_ctx, const T* src,
const framework::DDim& src_stride,
const framework::DDim& dst_stride, T* dst)
: dev_ctx_(dev_ctx),
src_(src),
src_stride_(src_stride),
dst_stride_(dst_stride),
dst_(dst) {}
template <typename Dim>
void operator()(Dim dst_dim) const {
Dim src_stride = boost::get<Dim>(src_stride_);
Dim dst_stride = boost::get<Dim>(dst_stride_);
constexpr int dim = Dim::dimensions;
StridedMemcpyFunctor<T, dim> functor;
functor(dev_ctx_, src_, src_stride, dst_dim, dst_stride, dst_);
}
const platform::DeviceContext& dev_ctx_;
const T* src_;
const framework::DDim& src_stride_;
const framework::DDim& dst_stride_;
T* dst_;
};
} // namespace detail
} // namespace operators
} // namespace paddle
...@@ -18,7 +18,6 @@ namespace paddle { ...@@ -18,7 +18,6 @@ namespace paddle {
namespace operators { namespace operators {
using framework::Tensor; using framework::Tensor;
using framework::LoDTensor;
class DropoutOp : public framework::OperatorWithKernel { class DropoutOp : public framework::OperatorWithKernel {
public: public:
...@@ -29,15 +28,13 @@ class DropoutOp : public framework::OperatorWithKernel { ...@@ -29,15 +28,13 @@ class DropoutOp : public framework::OperatorWithKernel {
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), "Input(X) must not be null."); PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), "Input(X) must not be null.");
PADDLE_ENFORCE_GE(ctx.Attr<float>("dropout_prob"), 0); PADDLE_ENFORCE_GE(ctx.Attr<float>("dropout_prob"), 0);
PADDLE_ENFORCE_LE(ctx.Attr<float>("dropout_prob"), 1); PADDLE_ENFORCE_LE(ctx.Attr<float>("dropout_prob"), 1);
// TODO(xinghai-sun): remove this check after swtiching to bool
PADDLE_ENFORCE(ctx.Attr<int>("is_training") == 0 ||
ctx.Attr<int>("is_training") == 1);
auto dims = ctx.Input<Tensor>("X")->dims(); auto dims = ctx.Input<Tensor>("X")->dims();
ctx.Output<LoDTensor>("Out")->Resize(dims); ctx.Output<Tensor>("Out")->Resize(dims);
if (ctx.Attr<int>("is_training") == 1) { if (ctx.Attr<bool>("is_training")) {
ctx.Output<LoDTensor>("Mask")->Resize(dims); ctx.Output<Tensor>("Mask")->Resize(dims);
} }
ctx.ShareLoD("X", /*->*/ "Out");
} }
}; };
...@@ -49,8 +46,7 @@ class DropoutOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -49,8 +46,7 @@ class DropoutOpMaker : public framework::OpProtoAndCheckerMaker {
: OpProtoAndCheckerMaker(proto, op_checker) { : OpProtoAndCheckerMaker(proto, op_checker) {
AddAttr<AttrType>("dropout_prob", "Probability of setting units to zero.") AddAttr<AttrType>("dropout_prob", "Probability of setting units to zero.")
.SetDefault(.5f); .SetDefault(.5f);
// TODO(xinghai-sun): use bool for is_training after bool is supported. AddAttr<bool>("is_training", "Whether in training phase.").SetDefault(true);
AddAttr<int>("is_training", "Whether in training phase.").SetDefault(1);
AddAttr<int>("seed", "Dropout random seed.").SetDefault(0); AddAttr<int>("seed", "Dropout random seed.").SetDefault(0);
AddInput("X", "The input of dropout op."); AddInput("X", "The input of dropout op.");
AddOutput("Out", "The output of dropout op."); AddOutput("Out", "The output of dropout op.");
...@@ -59,7 +55,7 @@ class DropoutOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -59,7 +55,7 @@ class DropoutOpMaker : public framework::OpProtoAndCheckerMaker {
AddComment(R"DOC( AddComment(R"DOC(
Dropout Operator. Dropout Operator.
"Dropout" refers to randomly dropping out units in a nerual network. It is a 'Dropout' refers to randomly dropping out units in a nerual network. It is a
regularization technique for reducing overfitting by preventing neuron regularization technique for reducing overfitting by preventing neuron
co-adaption during training. The dropout operator randomly set (according to co-adaption during training. The dropout operator randomly set (according to
the given dropout probability) the outputs of some units to zero, while others the given dropout probability) the outputs of some units to zero, while others
...@@ -75,8 +71,8 @@ class DropoutOpGrad : public framework::OperatorWithKernel { ...@@ -75,8 +71,8 @@ class DropoutOpGrad : public framework::OperatorWithKernel {
protected: protected:
void InferShape(const framework::InferShapeContext &ctx) const override { void InferShape(const framework::InferShapeContext &ctx) const override {
PADDLE_ENFORCE_EQ(ctx.Attr<int>("is_training"), 1, PADDLE_ENFORCE(ctx.Attr<bool>("is_training"),
"GradOp is only callable when is_training is true"); "GradOp is only callable when is_training is true");
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), "Input(X) must not be null."); PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), "Input(X) must not be null.");
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("Mask"), "Mask must not be null."); PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("Mask"), "Mask must not be null.");
...@@ -85,9 +81,6 @@ class DropoutOpGrad : public framework::OperatorWithKernel { ...@@ -85,9 +81,6 @@ class DropoutOpGrad : public framework::OperatorWithKernel {
PADDLE_ENFORCE_GE(ctx.Attr<AttrType>("dropout_prob"), 0); PADDLE_ENFORCE_GE(ctx.Attr<AttrType>("dropout_prob"), 0);
PADDLE_ENFORCE_LE(ctx.Attr<AttrType>("dropout_prob"), 1); PADDLE_ENFORCE_LE(ctx.Attr<AttrType>("dropout_prob"), 1);
// TODO(xinghai-sun): remove this check after swtiching to bool
PADDLE_ENFORCE(ctx.Attr<int>("is_training") == 0 ||
ctx.Attr<int>("is_training") == 1);
auto x_dims = ctx.Input<Tensor>("X")->dims(); auto x_dims = ctx.Input<Tensor>("X")->dims();
auto out_dims = ctx.Input<Tensor>(framework::GradVarName("Out"))->dims(); auto out_dims = ctx.Input<Tensor>(framework::GradVarName("Out"))->dims();
PADDLE_ENFORCE_EQ(x_dims, out_dims, PADDLE_ENFORCE_EQ(x_dims, out_dims,
...@@ -96,7 +89,7 @@ class DropoutOpGrad : public framework::OperatorWithKernel { ...@@ -96,7 +89,7 @@ class DropoutOpGrad : public framework::OperatorWithKernel {
PADDLE_ENFORCE_EQ(x_dims, mask_dims, PADDLE_ENFORCE_EQ(x_dims, mask_dims,
"Dimensions of Input(X) and Mask must be the same."); "Dimensions of Input(X) and Mask must be the same.");
auto *x_grad = ctx.Output<LoDTensor>(framework::GradVarName("X")); auto *x_grad = ctx.Output<Tensor>(framework::GradVarName("X"));
x_grad->Resize(x_dims); x_grad->Resize(x_dims);
} }
}; };
......
...@@ -59,7 +59,7 @@ class GPUDropoutKernel : public framework::OpKernel { ...@@ -59,7 +59,7 @@ class GPUDropoutKernel : public framework::OpKernel {
auto Y = EigenMatrix<T>::Reshape(*y, 1); auto Y = EigenMatrix<T>::Reshape(*y, 1);
auto place = context.GetEigenDevice<Place>(); auto place = context.GetEigenDevice<Place>();
if (context.Attr<int>("is_training") == 1) { if (context.Attr<bool>("is_training")) {
auto* mask = context.Output<Tensor>("Mask"); auto* mask = context.Output<Tensor>("Mask");
auto* mask_data = mask->mutable_data<T>(context.GetPlace()); auto* mask_data = mask->mutable_data<T>(context.GetPlace());
int size = framework::product(mask->dims()); int size = framework::product(mask->dims());
......
...@@ -35,7 +35,7 @@ class CPUDropoutKernel : public framework::OpKernel { ...@@ -35,7 +35,7 @@ class CPUDropoutKernel : public framework::OpKernel {
auto* y_data = y->mutable_data<T>(context.GetPlace()); auto* y_data = y->mutable_data<T>(context.GetPlace());
AttrType dropout_prob = context.Attr<AttrType>("dropout_prob"); AttrType dropout_prob = context.Attr<AttrType>("dropout_prob");
if (context.Attr<int>("is_training") == 1) { if (context.Attr<bool>("is_training")) {
auto* mask = context.Output<Tensor>("Mask"); auto* mask = context.Output<Tensor>("Mask");
auto* mask_data = mask->mutable_data<T>(context.GetPlace()); auto* mask_data = mask->mutable_data<T>(context.GetPlace());
int seed = context.Attr<int>("seed"); int seed = context.Attr<int>("seed");
...@@ -65,8 +65,8 @@ template <typename Place, typename T> ...@@ -65,8 +65,8 @@ template <typename Place, typename T>
class DropoutGradKernel : public framework::OpKernel { class DropoutGradKernel : public framework::OpKernel {
public: public:
void Compute(const framework::ExecutionContext& context) const override { void Compute(const framework::ExecutionContext& context) const override {
PADDLE_ENFORCE_EQ(context.Attr<int>("is_training"), 1, PADDLE_ENFORCE(context.Attr<bool>("is_training"),
"GradOp is only callable when is_training is true"); "GradOp is only callable when is_training is true");
auto* grad_x = context.Output<Tensor>(framework::GradVarName("X")); auto* grad_x = context.Output<Tensor>(framework::GradVarName("X"));
auto* grad_y = context.Input<Tensor>(framework::GradVarName("Out")); auto* grad_y = context.Input<Tensor>(framework::GradVarName("Out"));
......
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/operators/elementwise_add_op.h"
namespace paddle {
namespace operators {
class ElementwiseAddOpMaker : public ElementwiseOpMaker {
public:
ElementwiseAddOpMaker(framework::OpProto* proto,
framework::OpAttrChecker* op_checker)
: ElementwiseOpMaker(proto, op_checker) {
SetComment("add", "Out = X + Y");
AddComment(comment_);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP(elementwise_add, ops::ElementwiseOp, ops::ElementwiseAddOpMaker,
elementwise_add_grad, ops::ElementwiseOpGrad);
REGISTER_OP_CPU_KERNEL(
elementwise_add,
ops::ElementwiseAddKernel<paddle::platform::CPUPlace, float>);
REGISTER_OP_CPU_KERNEL(
elementwise_add_grad,
ops::ElementwiseAddGradKernel<paddle::platform::CPUPlace, float>);
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#define EIGEN_USE_GPU
#include "paddle/operators/elementwise_add_op.h"
namespace ops = paddle::operators;
REGISTER_OP_GPU_KERNEL(
elementwise_add,
ops::ElementwiseAddKernel<paddle::platform::GPUPlace, float>);
REGISTER_OP_GPU_KERNEL(
elementwise_add_grad,
ops::ElementwiseAddGradKernel<paddle::platform::GPUPlace, float>);
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "paddle/operators/elementwise_op.h"
namespace paddle {
namespace operators {
template <typename Place, typename T>
class ElementwiseAddKernel : public framework::OpKernel {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
ElementwiseCompute<EigenAddFunctor, Place, T>(ctx);
}
};
template <typename T>
struct ElementwiseAddGradFunctor {
template <typename Device, typename X, typename Y, typename Z, typename dX,
typename dY, typename dZ>
void operator()(Device d, X x, Y y, Z z, dX dx, dY dy, dZ dz) {
auto dz_e = framework::EigenVector<T>::Flatten(*dz);
if (dx) {
auto dx_e = framework::EigenVector<T>::Flatten(*dx);
dx_e.device(d) = dz_e;
}
if (dy) {
auto dy_e = framework::EigenVector<T>::Flatten(*dy);
dy_e.device(d) = dz_e;
}
}
};
template <typename T>
struct ElementwiseAddOneGradFunctor {
template <typename Device, typename X, typename Y, typename Z, typename dX,
typename dY, typename dZ>
void operator()(Device d, X x, Y y, Z z, dX dx, dY dy, dZ dz) {
auto dz_e = framework::EigenVector<T>::Flatten(*dz);
if (dx) {
auto dx_e = framework::EigenVector<T>::Flatten(*dx);
dx_e.device(d) = dz_e;
}
if (dy) {
auto dy_e = framework::EigenVector<T>::Flatten(*dy);
dy_e.device(d) = dz_e.sum();
}
}
};
template <typename T>
struct ElementwiseAddBroadCastGradFunctor {
template <typename Device, typename X, typename Y, typename Z, typename dX,
typename dY, typename dZ, typename Pre, typename N>
void operator()(Device d, X x, Y y, Z z, dX dx, dY dy, dZ dz, Pre pre, N n) {
auto dz_e = framework::EigenVector<T>::Flatten(*dz);
if (dx) {
auto dx_e = framework::EigenVector<T>::Flatten(*dx);
dx_e.device(d) = dz_e;
}
if (dy) {
auto dy_e = framework::EigenVector<T>::Flatten(*dy);
dy_e.device(d) = dz_e.reshape(Eigen::DSizes<int, 2>(pre, n))
.sum(Eigen::array<int, 1>{{0}});
}
}
};
template <typename T>
struct ElementwiseAddBroadCast2GradFunctor {
template <typename Device, typename X, typename Y, typename Z, typename dX,
typename dY, typename dZ, typename Pre, typename N, typename Post>
void operator()(Device d, X x, Y y, Z z, dX dx, dY dy, dZ dz, Pre pre, N n,
Post post) {
auto dz_e = framework::EigenVector<T>::Flatten(*dz);
if (dx) {
auto dx_e = framework::EigenVector<T>::Flatten(*dx);
dx_e.device(d) = dz_e;
}
if (dy) {
auto dy_e = framework::EigenVector<T>::Flatten(*dy);
dy_e.device(d) = dz_e.reshape(Eigen::DSizes<int, 3>(pre, n, post))
.sum(Eigen::array<int, 2>{{0, 2}});
}
}
};
template <typename Place, typename T>
class ElementwiseAddGradKernel : public framework::OpKernel {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
ElementwiseGradCompute<Place, T, ElementwiseAddGradFunctor<T>,
ElementwiseAddOneGradFunctor<T>,
ElementwiseAddBroadCastGradFunctor<T>,
ElementwiseAddBroadCast2GradFunctor<T>>(ctx);
}
};
} // namespace operators
} // namespace paddle
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/operators/elementwise_div_op.h"
namespace paddle {
namespace operators {
class ElementwiseDivOpMaker : public ElementwiseOpMaker {
public:
ElementwiseDivOpMaker(framework::OpProto* proto,
framework::OpAttrChecker* op_checker)
: ElementwiseOpMaker(proto, op_checker) {
SetComment("Div", "Out = X / Y");
AddComment(comment_);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP(elementwise_div, ops::ElementwiseOp, ops::ElementwiseDivOpMaker,
elementwise_div_grad, ops::ElementwiseOpGrad);
REGISTER_OP_CPU_KERNEL(
elementwise_div,
ops::ElementwiseDivKernel<paddle::platform::CPUPlace, float>);
REGISTER_OP_CPU_KERNEL(
elementwise_div_grad,
ops::ElementwiseDivGradKernel<paddle::platform::CPUPlace, float>);
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#define EIGEN_USE_GPU
#include "paddle/operators/elementwise_div_op.h"
namespace ops = paddle::operators;
REGISTER_OP_GPU_KERNEL(
elementwise_div,
ops::ElementwiseDivKernel<paddle::platform::GPUPlace, float>);
REGISTER_OP_GPU_KERNEL(
elementwise_div_grad,
ops::ElementwiseDivGradKernel<paddle::platform::GPUPlace, float>);
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "paddle/operators/elementwise_op.h"
namespace paddle {
namespace operators {
template <typename Place, typename T>
class ElementwiseDivKernel : public framework::OpKernel {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
ElementwiseCompute<EigenDivFunctor, Place, T>(ctx);
}
};
template <typename T>
struct ElementwiseDivGradFunctor {
template <typename Device, typename X, typename Y, typename Z, typename dX,
typename dY, typename dZ>
void operator()(Device d, X x, Y y, Z z, dX dx, dY dy, dZ dz) {
auto y_e = framework::EigenVector<T>::Flatten(*y);
auto z_e = framework::EigenVector<T>::Flatten(*z);
auto dz_e = framework::EigenVector<T>::Flatten(*dz);
if (dx) {
auto dx_e = framework::EigenVector<T>::Flatten(*dx);
dx_e.device(d) = dz_e / y_e;
}
if (dy) {
auto dy_e = framework::EigenVector<T>::Flatten(*dy);
dy_e.device(d) = -1.0 * dz_e * z_e / y_e;
}
}
};
template <typename T>
struct ElementwiseDivBroadCastGradFunctor {
template <typename Device, typename X, typename Y, typename Z, typename dX,
typename dY, typename dZ, typename Pre, typename N>
void operator()(Device d, X x, Y y, Z z, dX dx, dY dy, dZ dz, Pre pre, N n) {
auto x_e = framework::EigenVector<T>::Flatten(*x);
auto y_e = framework::EigenVector<T>::Flatten(*y);
auto dz_e = framework::EigenVector<T>::Flatten(*dz);
auto y_e_bcast = y_e.reshape(Eigen::DSizes<int, 2>(1, n))
.broadcast(Eigen::DSizes<int, 2>(pre, 1))
.reshape(Eigen::DSizes<int, 1>(x_e.size()));
if (dx) {
auto dx_e = framework::EigenVector<T>::Flatten(*dx);
dx_e.device(d) = dz_e / y_e_bcast;
}
if (dy) {
auto dy_e = framework::EigenVector<T>::Flatten(*dy);
dy_e.device(d) = (-1.0 * (x_e * dz_e) / (y_e_bcast * y_e_bcast))
.reshape(Eigen::DSizes<int, 2>(pre, n))
.sum(Eigen::array<int, 1>{{0}});
}
}
};
template <typename T>
struct ElementwiseDivBroadCast2GradFunctor {
template <typename Device, typename X, typename Y, typename Z, typename dX,
typename dY, typename dZ, typename Pre, typename N, typename Post>
void operator()(Device d, X x, Y y, Z z, dX dx, dY dy, dZ dz, Pre pre, N n,
Post post) {
auto x_e = framework::EigenVector<T>::Flatten(*x);
auto y_e = framework::EigenVector<T>::Flatten(*y);
auto dz_e = framework::EigenVector<T>::Flatten(*dz);
auto y_e_bcast = y_e.reshape(Eigen::DSizes<int, 3>(1, n, 1))
.broadcast(Eigen::DSizes<int, 3>(pre, 1, post))
.reshape(Eigen::DSizes<int, 1>(x_e.size()));
if (dx) {
auto dx_e = framework::EigenVector<T>::Flatten(*dx);
dx_e.device(d) = dz_e / y_e_bcast;
}
if (dy) {
auto dy_e = framework::EigenVector<T>::Flatten(*dy);
dy_e.device(d) = (-1.0 * (x_e * dz_e) / (y_e_bcast * y_e_bcast))
.reshape(Eigen::DSizes<int, 3>(pre, n, post))
.sum(Eigen::array<int, 2>{{0, 2}});
}
}
};
template <typename Place, typename T>
class ElementwiseDivGradKernel : public framework::OpKernel {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
ElementwiseGradCompute<Place, T, ElementwiseDivGradFunctor<T>,
ElementwiseDivGradFunctor<T>,
ElementwiseDivBroadCastGradFunctor<T>,
ElementwiseDivBroadCast2GradFunctor<T>>(ctx);
}
};
} // namespace operators
} // namespace paddle
...@@ -17,101 +17,25 @@ ...@@ -17,101 +17,25 @@
namespace paddle { namespace paddle {
namespace operators { namespace operators {
using Tensor = framework::Tensor; class ElementwiseMulOpMaker : public ElementwiseOpMaker {
class ElementWiseMulOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected:
void InferShape(const framework::InferShapeContext &ctx) const override {
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"),
"Input(X) of ElementWiseMulOp should not be null.");
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("Y"),
"Input(Y) of ElementWiseMulOp should not be null.");
PADDLE_ENFORCE_NOT_NULL(
ctx.OutputVar("Out"),
"Output(Out) of ElementWiseMulOp should not be null.");
auto x_dim = ctx.Input<Tensor>("X")->dims();
auto y_dim = ctx.Input<Tensor>("Y")->dims();
PADDLE_ENFORCE_GE(x_dim.size(), y_dim.size(),
"Rank of first input must >= rank of second input.")
ctx.Output<framework::LoDTensor>("Out")->Resize(x_dim);
}
};
class ElementWiseMulOpMaker : public framework::OpProtoAndCheckerMaker {
public: public:
ElementWiseMulOpMaker(framework::OpProto *proto, ElementwiseMulOpMaker(framework::OpProto* proto,
framework::OpAttrChecker *op_checker) framework::OpAttrChecker* op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) { : ElementwiseOpMaker(proto, op_checker) {
AddInput("X", "The first input of elementwise mul op"); SetComment("Mul", "Out = X ⊙ Y");
AddInput("Y", "The second input of elementwise mul op"); AddComment(comment_);
AddAttr<int>("axis",
R"DOC(
When shape(Y) does not equal shape(X),Y will be broadcasted
to match the shape of X and axis should be dimension index Y in X
)DOC")
.SetDefault(-1)
.EqualGreaterThan(-1);
AddOutput("Out", "The output of elementwise mul op");
AddComment(R"DOC(
Limited elementwise multiple operator.The equation is: Out = X ⊙ Y.
1. The shape of Y should be same with X or
2. Y's shape is a subset of X.
Y will be broadcasted to match the shape of X and axis should be dimension index Y in X.
example:
shape(X) = (2, 3, 4, 5), shape(Y) = (,)
shape(X) = (2, 3, 4, 5), shape(Y) = (5,)
shape(X) = (2, 3, 4, 5), shape(Y) = (4, 5)
shape(X) = (2, 3, 4, 5), shape(Y) = (3, 4), with axis=1
shape(X) = (2, 3, 4, 5), shape(Y) = (2), with axis=0
)DOC");
} }
}; };
class ElementWiseMulOpGrad : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected:
void InferShape(const framework::InferShapeContext &ctx) const override {
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), "Input(X) should not be null");
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("Y"), "Input(Y) should not be null");
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(framework::GradVarName("Out")),
"Input(Out@GRAD) should not be null");
auto x_dims = ctx.Input<Tensor>("X")->dims();
auto y_dims = ctx.Input<Tensor>("Y")->dims();
auto out_dims = ctx.Input<Tensor>(framework::GradVarName("Out"))->dims();
auto *x_grad =
ctx.Output<framework::LoDTensor>(framework::GradVarName("X"));
auto *y_grad =
ctx.Output<framework::LoDTensor>(framework::GradVarName("Y"));
PADDLE_ENFORCE_GE(x_dims.size(), y_dims.size(),
"Rank of first input must >= rank of second input.")
if (x_grad) {
x_grad->Resize(x_dims);
}
if (y_grad) {
y_grad->Resize(y_dims);
}
}
};
} // namespace operators } // namespace operators
} // namespace paddle } // namespace paddle
namespace ops = paddle::operators; namespace ops = paddle::operators;
REGISTER_OP(elementwise_mul, ops::ElementWiseMulOp, ops::ElementWiseMulOpMaker, REGISTER_OP(elementwise_mul, ops::ElementwiseOp, ops::ElementwiseMulOpMaker,
elementwise_mul_grad, ops::ElementWiseMulOpGrad); elementwise_mul_grad, ops::ElementwiseOpGrad);
REGISTER_OP_CPU_KERNEL( REGISTER_OP_CPU_KERNEL(
elementwise_mul, elementwise_mul,
ops::ElementWiseMulKernel<paddle::platform::CPUPlace, float>); ops::ElementwiseMulKernel<paddle::platform::CPUPlace, float>);
REGISTER_OP_CPU_KERNEL( REGISTER_OP_CPU_KERNEL(
elementwise_mul_grad, elementwise_mul_grad,
ops::ElementWiseMulGradKernel<paddle::platform::CPUPlace, float>); ops::ElementwiseMulGradKernel<paddle::platform::CPUPlace, float>);
...@@ -19,7 +19,7 @@ namespace ops = paddle::operators; ...@@ -19,7 +19,7 @@ namespace ops = paddle::operators;
REGISTER_OP_GPU_KERNEL( REGISTER_OP_GPU_KERNEL(
elementwise_mul, elementwise_mul,
ops::ElementWiseMulKernel<paddle::platform::GPUPlace, float>); ops::ElementwiseMulKernel<paddle::platform::GPUPlace, float>);
REGISTER_OP_GPU_KERNEL( REGISTER_OP_GPU_KERNEL(
elementwise_mul_grad, elementwise_mul_grad,
ops::ElementWiseMulGradKernel<paddle::platform::GPUPlace, float>); ops::ElementwiseMulGradKernel<paddle::platform::GPUPlace, float>);
...@@ -13,171 +13,104 @@ ...@@ -13,171 +13,104 @@
limitations under the License. */ limitations under the License. */
#pragma once #pragma once
#include "paddle/framework/eigen.h" #include "paddle/operators/elementwise_op.h"
#include "paddle/framework/op_registry.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
/*
* Out = X ⊙ Y
* 1. shape(X) = (2, 3, 4, 5), shape(Y) = (3, 4), with axis=1
* pre=2, n=3*4, post=5
* 2. shape(X) = (2, 3, 4, 5), shape(Y) = (4,5)
* pre=2*3, n=4*5, post=1
*/
inline void get_mid_dims(const framework::DDim& x_dims,
const framework::DDim& y_dims, const int axis,
int& pre, int& n, int& post) {
pre = 1;
n = 1;
post = 1;
for (int i = 0; i < axis; ++i) {
pre *= x_dims[i];
}
for (int i = 0; i < y_dims.size(); ++i) {
PADDLE_ENFORCE_EQ(x_dims[i + axis], y_dims[i],
"Broadcast dimension mismatch.");
n *= y_dims[i];
}
for (int i = axis + y_dims.size(); i < x_dims.size(); ++i) {
post *= x_dims[i];
}
}
template <typename Place, typename T> template <typename Place, typename T>
class ElementWiseMulKernel : public framework::OpKernel { class ElementwiseMulKernel : public framework::OpKernel {
public: public:
void Compute(const framework::ExecutionContext& ctx) const override { void Compute(const framework::ExecutionContext& ctx) const override {
using Tensor = framework::Tensor; ElementwiseCompute<EigenMulFunctor, Place, T>(ctx);
}
auto* x = ctx.Input<Tensor>("X"); };
auto* y = ctx.Input<Tensor>("Y");
auto* z = ctx.Output<Tensor>("Out");
z->mutable_data<T>(ctx.GetPlace());
template <typename T>
struct ElementwiseMulGradFunctor {
template <typename Device, typename X, typename Y, typename Z, typename dX,
typename dY, typename dZ>
void operator()(Device d, X x, Y y, Z z, dX dx, dY dy, dZ dz) {
auto x_e = framework::EigenVector<T>::Flatten(*x); auto x_e = framework::EigenVector<T>::Flatten(*x);
auto y_e = framework::EigenVector<T>::Flatten(*y); auto y_e = framework::EigenVector<T>::Flatten(*y);
auto z_e = framework::EigenVector<T>::Flatten(*z); auto dz_e = framework::EigenVector<T>::Flatten(*dz);
auto x_dims = x->dims(); if (dx) {
auto y_dims = y->dims(); auto dx_e = framework::EigenVector<T>::Flatten(*dx);
PADDLE_ENFORCE_GE(x_dims.size(), y_dims.size(), dx_e.device(d) = dz_e * y_e;
"Rank of first input must >= rank of second input.")
if (x_dims == y_dims || product(y_dims) == 1) {
z_e.device(ctx.GetEigenDevice<Place>()) = x_e * y_e;
return;
} }
int axis = ctx.Attr<int>("axis"); if (dy) {
axis = (axis == -1 ? x_dims.size() - y_dims.size() : axis); auto dy_e = framework::EigenVector<T>::Flatten(*dy);
PADDLE_ENFORCE(axis >= 0 && axis < x_dims.size(), dy_e.device(d) = x_e * dz_e;
"Axis should be in range [0, x_dims)");
int pre, n, post;
get_mid_dims(x_dims, y_dims, axis, pre, n, post);
if (post == 1) {
auto y_bcast = y_e.reshape(Eigen::DSizes<int, 2>(1, n))
.broadcast(Eigen::DSizes<int, 2>(pre, 1))
.reshape(Eigen::DSizes<int, 1>(x_e.size()));
z_e.device(ctx.GetEigenDevice<Place>()) = x_e * y_bcast;
return;
} else {
auto y_bcast = y_e.reshape(Eigen::DSizes<int, 3>(1, n, 1))
.broadcast(Eigen::DSizes<int, 3>(pre, 1, post))
.reshape(Eigen::DSizes<int, 1>(x_e.size()));
z_e.device(ctx.GetEigenDevice<Place>()) = x_e * y_bcast;
return;
} }
} }
}; };
template <typename Place, typename T> template <typename T>
class ElementWiseMulGradKernel : public framework::OpKernel { struct ElementwiseMulBroadCastGradFunctor {
public: template <typename Device, typename X, typename Y, typename Z, typename dX,
void Compute(const framework::ExecutionContext& ctx) const override { typename dY, typename dZ, typename Pre, typename N>
using Tensor = framework::Tensor; void operator()(Device d, X x, Y y, Z z, dX dx, dY dy, dZ dz, Pre pre, N n) {
auto* x = ctx.Input<Tensor>("X");
auto* y = ctx.Input<Tensor>("Y");
auto* dout = ctx.Input<Tensor>(framework::GradVarName("Out"));
auto x_e = framework::EigenVector<T>::Flatten(*x); auto x_e = framework::EigenVector<T>::Flatten(*x);
auto y_e = framework::EigenVector<T>::Flatten(*y); auto y_e = framework::EigenVector<T>::Flatten(*y);
auto dout_e = framework::EigenVector<T>::Flatten(*dout); auto dz_e = framework::EigenVector<T>::Flatten(*dz);
auto x_dims = x->dims(); auto y_e_bcast = y_e.reshape(Eigen::DSizes<int, 2>(1, n))
auto y_dims = y->dims(); .broadcast(Eigen::DSizes<int, 2>(pre, 1))
.reshape(Eigen::DSizes<int, 1>(x_e.size()));
auto* dx = ctx.Output<Tensor>(framework::GradVarName("X"));
auto* dy = ctx.Output<Tensor>(framework::GradVarName("Y"));
if (dx) { if (dx) {
dx->mutable_data<T>(ctx.GetPlace()); auto dx_e = framework::EigenVector<T>::Flatten(*dx);
dx_e.device(d) = dz_e * y_e_bcast;
} }
if (dy) { if (dy) {
dy->mutable_data<T>(ctx.GetPlace()); auto dy_e = framework::EigenVector<T>::Flatten(*dy);
dy_e.device(d) = (x_e * dz_e)
.reshape(Eigen::DSizes<int, 2>(pre, n))
.sum(Eigen::array<int, 1>{{0}});
} }
}
};
if (x_dims == y_dims || product(y_dims) == 1) { template <typename T>
if (dx) { struct ElementwiseMulBroadCast2GradFunctor {
auto dx_e = framework::EigenVector<T>::Flatten(*dx); template <typename Device, typename X, typename Y, typename Z, typename dX,
dx_e.device(ctx.GetEigenDevice<Place>()) = dout_e * y_e; typename dY, typename dZ, typename Pre, typename N, typename Post>
} void operator()(Device d, X x, Y y, Z z, dX dx, dY dy, dZ dz, Pre pre, N n,
Post post) {
if (dy) { auto x_e = framework::EigenVector<T>::Flatten(*x);
auto dy_e = framework::EigenVector<T>::Flatten(*dy); auto y_e = framework::EigenVector<T>::Flatten(*y);
dy_e.device(ctx.GetEigenDevice<Place>()) = x_e * dout_e; auto dz_e = framework::EigenVector<T>::Flatten(*dz);
}
return; auto y_e_bcast = y_e.reshape(Eigen::DSizes<int, 3>(1, n, 1))
.broadcast(Eigen::DSizes<int, 3>(pre, 1, post))
.reshape(Eigen::DSizes<int, 1>(x_e.size()));
if (dx) {
auto dx_e = framework::EigenVector<T>::Flatten(*dx);
dx_e.device(d) = dz_e * y_e_bcast;
} }
int axis = ctx.Attr<int>("axis"); if (dy) {
axis = (axis == -1 ? x_dims.size() - y_dims.size() : axis); auto dy_e = framework::EigenVector<T>::Flatten(*dy);
dy_e.device(d) = (x_e * dz_e)
int pre, n, post; .reshape(Eigen::DSizes<int, 3>(pre, n, post))
get_mid_dims(x_dims, y_dims, axis, pre, n, post); .sum(Eigen::array<int, 2>{{0, 2}});
// TODO(gongweibao): wrap reshape to a function.
if (post == 1) {
auto y_e_bcast = y_e.reshape(Eigen::DSizes<int, 2>(1, n))
.broadcast(Eigen::DSizes<int, 2>(pre, 1))
.reshape(Eigen::DSizes<int, 1>(x_e.size()));
if (dx) {
auto dx_e = framework::EigenVector<T>::Flatten(*dx);
dx_e.device(ctx.GetEigenDevice<Place>()) = dout_e * y_e_bcast;
}
if (dy) {
auto dy_e = framework::EigenVector<T>::Flatten(*dy);
dy_e.device(ctx.GetEigenDevice<Place>()) =
(x_e * dout_e)
.reshape(Eigen::DSizes<int, 2>(pre, n))
.sum(Eigen::array<int, 1>{{0}});
}
return;
} else {
auto y_e_bcast = y_e.reshape(Eigen::DSizes<int, 3>(1, n, 1))
.broadcast(Eigen::DSizes<int, 3>(pre, 1, post))
.reshape(Eigen::DSizes<int, 1>(x_e.size()));
if (dx) {
auto dx_e = framework::EigenVector<T>::Flatten(*dx);
dx_e.device(ctx.GetEigenDevice<Place>()) = dout_e * y_e_bcast;
}
if (dy) {
auto dy_e = framework::EigenVector<T>::Flatten(*dy);
dy_e.device(ctx.GetEigenDevice<Place>()) =
(x_e * dout_e)
.reshape(Eigen::DSizes<int, 3>(pre, n, post))
.sum(Eigen::array<int, 2>{{0, 2}});
}
return;
} }
} }
}; };
template <typename Place, typename T>
class ElementwiseMulGradKernel : public framework::OpKernel {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
ElementwiseGradCompute<Place, T, ElementwiseMulGradFunctor<T>,
ElementwiseMulGradFunctor<T>,
ElementwiseMulBroadCastGradFunctor<T>,
ElementwiseMulBroadCast2GradFunctor<T>>(ctx);
}
};
} // namespace operators } // namespace operators
} // namespace paddle } // namespace paddle
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <iostream>
#include "paddle/framework/eigen.h"
#include "paddle/framework/op_registry.h"
#include "paddle/operators/math/math_function.h"
namespace paddle {
namespace operators {
/*
* Out = X ⊙ Y
* If Y's shape does not match X' shape, they will be reshaped.
* For example:
* 1. shape(X) = (2, 3, 4, 5), shape(Y) = (3, 4), with axis=1
* pre=2, n=3*4, post=5
* x.shape(2, 12, 5) * y.shape(1,12,1).broadcast(2,12,5)
* 2. shape(X) = (2, 3, 4, 5), shape(Y) = (4,5)
* pre=2*3, n=4*5, post=1
* x.shape(2, 3, 20) * y.shape(1,1,20).broadcast(2,3,20)
*/
inline void get_mid_dims(const framework::DDim& x_dims,
const framework::DDim& y_dims, const int axis,
int& pre, int& n, int& post) {
pre = 1;
n = 1;
post = 1;
for (int i = 0; i < axis; ++i) {
pre *= x_dims[i];
}
for (int i = 0; i < y_dims.size(); ++i) {
PADDLE_ENFORCE_EQ(x_dims[i + axis], y_dims[i],
"Broadcast dimension mismatch.");
n *= y_dims[i];
}
for (int i = axis + y_dims.size(); i < x_dims.size(); ++i) {
post *= x_dims[i];
}
}
#define EIGEN_FUNCTOR(name, eigen_op) \
struct Eigen##name##Functor { \
template <typename Place, typename T> \
inline void Run(const framework::Tensor* x, const framework::Tensor* y, \
framework::Tensor* z, \
const framework::ExecutionContext& ctx) { \
auto x_e = framework::EigenVector<T>::Flatten(*x); \
auto y_e = framework::EigenVector<T>::Flatten(*y); \
auto z_e = framework::EigenVector<T>::Flatten(*z); \
z_e.device(ctx.GetEigenDevice<Place>()) = eigen_op(x_e, y_e); \
} \
template <typename Place, typename T> \
inline void RunBroadCast(const framework::Tensor* x, \
const framework::Tensor* y, framework::Tensor* z, \
const framework::ExecutionContext& ctx, int pre, \
int n) { \
auto x_e = framework::EigenVector<T>::Flatten(*x); \
auto y_e = framework::EigenVector<T>::Flatten(*y); \
auto z_e = framework::EigenVector<T>::Flatten(*z); \
auto y_bcast = y_e.reshape(Eigen::DSizes<int, 2>(1, n)) \
.broadcast(Eigen::DSizes<int, 2>(pre, 1)) \
.reshape(Eigen::DSizes<int, 1>(x_e.size())); \
z_e.device(ctx.GetEigenDevice<Place>()) = eigen_op(x_e, y_bcast); \
} \
template <typename Place, typename T> \
inline void RunBroadCast2(const framework::Tensor* x, \
const framework::Tensor* y, \
framework::Tensor* z, \
const framework::ExecutionContext& ctx, int pre, \
int n, int post) { \
auto x_e = framework::EigenVector<T>::Flatten(*x); \
auto y_e = framework::EigenVector<T>::Flatten(*y); \
auto z_e = framework::EigenVector<T>::Flatten(*z); \
auto y_bcast = y_e.reshape(Eigen::DSizes<int, 3>(1, n, 1)) \
.broadcast(Eigen::DSizes<int, 3>(pre, 1, post)) \
.reshape(Eigen::DSizes<int, 1>(x_e.size())); \
z_e.device(ctx.GetEigenDevice<Place>()) = eigen_op(x_e, y_bcast); \
} \
}
template <class functor, typename Place, typename T>
void ElementwiseCompute(const framework::ExecutionContext& ctx) {
using Tensor = framework::Tensor;
auto* x = ctx.Input<Tensor>("X");
auto* y = ctx.Input<Tensor>("Y");
auto* z = ctx.Output<Tensor>("Out");
z->mutable_data<T>(ctx.GetPlace());
auto x_dims = x->dims();
auto y_dims = y->dims();
PADDLE_ENFORCE_GE(x_dims.size(), y_dims.size(),
"Rank of first input must >= rank of second input.")
if (x_dims == y_dims || product(y_dims) == 1) {
functor f;
f.template Run<Place, T>(x, y, z, ctx);
return;
}
int axis = ctx.Attr<int>("axis");
axis = (axis == -1 ? x_dims.size() - y_dims.size() : axis);
PADDLE_ENFORCE(axis >= 0 && axis < x_dims.size(),
"Axis should be in range [0, x_dims)");
int pre, n, post;
get_mid_dims(x_dims, y_dims, axis, pre, n, post);
if (post == 1) {
functor f;
f.template RunBroadCast<Place, T>(x, y, z, ctx, pre, n);
return;
} else {
functor f;
f.template RunBroadCast2<Place, T>(x, y, z, ctx, pre, n, post);
return;
}
}
#define EIGEN_ADD(x, y) ((x) + (y))
EIGEN_FUNCTOR(Add, EIGEN_ADD);
#define EIGEN_SUB(x, y) ((x) - (y))
EIGEN_FUNCTOR(Sub, EIGEN_SUB);
#define EIGEN_MUL(x, y) ((x) * (y))
EIGEN_FUNCTOR(Mul, EIGEN_MUL);
#define EIGEN_DIV(x, y) ((x) / (y))
EIGEN_FUNCTOR(Div, EIGEN_DIV);
template <typename Place, typename T, typename functor, typename functor1,
typename broadcastfunctor, typename broadcast2functor>
void ElementwiseGradCompute(const framework::ExecutionContext& ctx) {
using Tensor = framework::Tensor;
auto* x = ctx.Input<Tensor>("X");
auto* y = ctx.Input<Tensor>("Y");
auto* out = ctx.Input<Tensor>("Out");
auto* dout = ctx.Input<Tensor>(framework::GradVarName("Out"));
auto place = ctx.GetEigenDevice<Place>();
auto x_dims = x->dims();
auto y_dims = y->dims();
auto* dx = ctx.Output<Tensor>(framework::GradVarName("X"));
auto* dy = ctx.Output<Tensor>(framework::GradVarName("Y"));
if (dx) {
dx->mutable_data<T>(ctx.GetPlace());
}
if (dy) {
dy->mutable_data<T>(ctx.GetPlace());
}
if (x_dims == y_dims) {
functor f;
f(place, x, y, out, dx, dy, dout);
return;
}
if (product(y_dims) == 1) {
functor1 f;
f(place, x, y, out, dx, dy, dout);
return;
}
int axis = ctx.Attr<int>("axis");
axis = (axis == -1 ? x_dims.size() - y_dims.size() : axis);
int pre, n, post;
get_mid_dims(x_dims, y_dims, axis, pre, n, post);
if (post == 1) {
broadcastfunctor f;
f(place, x, y, out, dx, dy, dout, pre, n);
return;
} else {
broadcast2functor f;
f(place, x, y, out, dx, dy, dout, pre, n, post);
return;
}
}
class ElementwiseOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected:
using Tensor = framework::Tensor;
void InferShape(const framework::InferShapeContext& ctx) const override {
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"),
"Input(X) of elementwise op should not be null");
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("Y"),
"Input(Y) of elementwise op should not be null");
PADDLE_ENFORCE_NOT_NULL(
ctx.OutputVar("Out"),
"Output(Out) of elementwise op should not be null.");
auto x_dim = ctx.Input<Tensor>("X")->dims();
auto y_dim = ctx.Input<Tensor>("Y")->dims();
PADDLE_ENFORCE_GE(x_dim.size(), y_dim.size(),
"Rank of first input must >= rank of second input.")
ctx.Output<framework::Tensor>("Out")->Resize(x_dim);
ctx.ShareLoD("X", /*->*/ "Out");
}
};
class ElementwiseOpMaker : public framework::OpProtoAndCheckerMaker {
public:
ElementwiseOpMaker(framework::OpProto* proto,
framework::OpAttrChecker* op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", R"DOC(
The first input of elementwise op, it's a tensor of any dimensions.
)DOC");
AddInput("Y", R"DOC(
The sencond input of elementwise op, it's a tensor and it's dimensions
must be small or equal to X's dimensions.
)DOC");
AddAttr<int>("axis",
R"DOC(
When the shape(Y) does not equal the shape(X),Y will be broadcasted
to match the shape of X and axis should be dimension index Y in X
)DOC")
.SetDefault(-1)
.EqualGreaterThan(-1);
AddOutput("Out", "The output of elementwise op");
comment_ = R"DOC(
Limited elementwise {name} operator.The equation is: Out = {equation}.
1. The shape of Y should be same with X or
2. Y's shape is a subset of X.
Y will be broadcasted to match the shape of X and axis should be dimension index Y in X.
example:
shape(X) = (2, 3, 4, 5), shape(Y) = (,)
shape(X) = (2, 3, 4, 5), shape(Y) = (5,)
shape(X) = (2, 3, 4, 5), shape(Y) = (4, 5)
shape(X) = (2, 3, 4, 5), shape(Y) = (3, 4), with axis=1
shape(X) = (2, 3, 4, 5), shape(Y) = (2), with axis=0
Both the input X and Y can carry the LoD (Level of Details) information,
or not. But the output only shares the LoD with input X.
)DOC";
AddComment(comment_);
}
protected:
std::string comment_;
void Replace(std::string& src, std::string from, std::string to) {
std::size_t len_from = std::strlen(from.c_str());
std::size_t len_to = std::strlen(to.c_str());
for (std::size_t pos = src.find(from); pos != std::string::npos;
pos = src.find(from, pos + len_to)) {
src.replace(pos, len_from, to);
}
}
void SetComment(std::string name, std::string equation) {
Replace(comment_, "{name}", name);
Replace(comment_, "{equation}", equation);
}
};
class ElementwiseOpGrad : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
using Tensor = framework::Tensor;
protected:
void InferShape(const framework::InferShapeContext& ctx) const override {
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), "Input(X) should not be null");
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("Y"), "Input(Y) should not be null");
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(framework::GradVarName("Out")),
"Input(Out@GRAD) should not be null");
auto x_dims = ctx.Input<Tensor>("X")->dims();
auto y_dims = ctx.Input<Tensor>("Y")->dims();
auto out_dims = ctx.Input<Tensor>(framework::GradVarName("Out"))->dims();
auto* x_grad = ctx.Output<framework::Tensor>(framework::GradVarName("X"));
auto* y_grad = ctx.Output<framework::Tensor>(framework::GradVarName("Y"));
PADDLE_ENFORCE_GE(x_dims.size(), y_dims.size(),
"Rank of first input must >= rank of second input.")
if (x_grad) {
x_grad->Resize(x_dims);
}
if (y_grad) {
y_grad->Resize(y_dims);
}
}
};
} // namespace operators
} // namespace paddle
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/operators/elementwise_sub_op.h"
namespace paddle {
namespace operators {
class ElementwiseSubOpMaker : public ElementwiseOpMaker {
public:
ElementwiseSubOpMaker(framework::OpProto* proto,
framework::OpAttrChecker* op_checker)
: ElementwiseOpMaker(proto, op_checker) {
SetComment("Sub", "Out = X - Y");
AddComment(comment_);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP(elementwise_sub, ops::ElementwiseOp, ops::ElementwiseSubOpMaker,
elementwise_sub_grad, ops::ElementwiseOpGrad);
REGISTER_OP_CPU_KERNEL(
elementwise_sub,
ops::ElementwiseSubKernel<paddle::platform::CPUPlace, float>);
REGISTER_OP_CPU_KERNEL(
elementwise_sub_grad,
ops::ElementwiseSubGradKernel<paddle::platform::CPUPlace, float>);
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#define EIGEN_USE_GPU
#include "paddle/operators/elementwise_sub_op.h"
namespace ops = paddle::operators;
REGISTER_OP_GPU_KERNEL(
elementwise_sub,
ops::ElementwiseSubKernel<paddle::platform::GPUPlace, float>);
REGISTER_OP_GPU_KERNEL(
elementwise_sub_grad,
ops::ElementwiseSubGradKernel<paddle::platform::GPUPlace, float>);
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "paddle/operators/elementwise_op.h"
namespace paddle {
namespace operators {
template <typename Place, typename T>
class ElementwiseSubKernel : public framework::OpKernel {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
ElementwiseCompute<EigenSubFunctor, Place, T>(ctx);
}
};
template <typename T>
struct ElementwiseSubGradFunctor {
template <typename Device, typename X, typename Y, typename Z, typename dX,
typename dY, typename dZ>
void operator()(Device d, X x, Y y, Z z, dX dx, dY dy, dZ dz) {
auto dz_e = framework::EigenVector<T>::Flatten(*dz);
if (dx) {
auto dx_e = framework::EigenVector<T>::Flatten(*dx);
dx_e.device(d) = dz_e;
}
if (dy) {
auto dy_e = framework::EigenVector<T>::Flatten(*dy);
dy_e.device(d) = (-1.0) * dz_e;
}
}
};
template <typename T>
struct ElementwiseSubOneGradFunctor {
template <typename Device, typename X, typename Y, typename Z, typename dX,
typename dY, typename dZ>
void operator()(Device d, X x, Y y, Z z, dX dx, dY dy, dZ dz) {
auto dz_e = framework::EigenVector<T>::Flatten(*dz);
if (dx) {
auto dx_e = framework::EigenVector<T>::Flatten(*dx);
dx_e.device(d) = dz_e;
}
if (dy) {
auto dy_e = framework::EigenVector<T>::Flatten(*dy);
dy_e.device(d) = (-1.0) * dz_e.sum();
}
}
};
template <typename T>
struct ElementwiseSubBroadCastGradFunctor {
template <typename Device, typename X, typename Y, typename Z, typename dX,
typename dY, typename dZ, typename Pre, typename N>
void operator()(Device d, X x, Y y, Z z, dX dx, dY dy, dZ dz, Pre pre, N n) {
auto dz_e = framework::EigenVector<T>::Flatten(*dz);
if (dx) {
auto dx_e = framework::EigenVector<T>::Flatten(*dx);
dx_e.device(d) = dz_e;
}
if (dy) {
auto dy_e = framework::EigenVector<T>::Flatten(*dy);
dy_e.device(d) = (-1.0) *
dz_e.reshape(Eigen::DSizes<int, 2>(pre, n))
.sum(Eigen::array<int, 1>{{0}});
}
}
};
template <typename T>
struct ElementwiseSubBroadCast2GradFunctor {
template <typename Device, typename X, typename Y, typename Z, typename dX,
typename dY, typename dZ, typename Pre, typename N, typename Post>
void operator()(Device d, X x, Y y, Z z, dX dx, dY dy, dZ dz, Pre pre, N n,
Post post) {
auto dz_e = framework::EigenVector<T>::Flatten(*dz);
if (dx) {
auto dx_e = framework::EigenVector<T>::Flatten(*dx);
dx_e.device(d) = dz_e;
}
if (dy) {
auto dy_e = framework::EigenVector<T>::Flatten(*dy);
dy_e.device(d) = (-1.0) *
dz_e.reshape(Eigen::DSizes<int, 3>(pre, n, post))
.sum(Eigen::array<int, 2>{{0, 2}});
}
}
};
template <typename Place, typename T>
class ElementwiseSubGradKernel : public framework::OpKernel {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
ElementwiseGradCompute<Place, T, ElementwiseSubGradFunctor<T>,
ElementwiseSubOneGradFunctor<T>,
ElementwiseSubBroadCastGradFunctor<T>,
ElementwiseSubBroadCast2GradFunctor<T>>(ctx);
}
};
} // namespace operators
} // namespace paddle
...@@ -186,6 +186,9 @@ W_i is a 2-D matrix of size (K x N), where N means the number of neurons ...@@ -186,6 +186,9 @@ W_i is a 2-D matrix of size (K x N), where N means the number of neurons
in the fully connected layer. B is a 1-D vector of size N. in the fully connected layer. B is a 1-D vector of size N.
Thus, the output Out is a 2-D matrix of size (M x N). Thus, the output Out is a 2-D matrix of size (M x N).
Activation type can be set to `identity` (default), `sigmoid` or `softmax`. Activation type can be set to `identity` (default), `sigmoid` or `softmax`.
All the inputs can carry the LoD (Level of Details) information,
or not. But the output only shares the LoD with first input (`X[0]`).
)DOC"); )DOC");
} }
}; };
......
...@@ -23,15 +23,14 @@ class FillZerosLikeOp : public framework::OperatorWithKernel { ...@@ -23,15 +23,14 @@ class FillZerosLikeOp : public framework::OperatorWithKernel {
protected: protected:
void InferShape(const framework::InferShapeContext &ctx) const override { void InferShape(const framework::InferShapeContext &ctx) const override {
PADDLE_ENFORCE_NOT_NULL( PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"),
ctx.InputVar("Src"), "Input(X) of FillZerosLikeOp should not be null.");
"Input(Src) of FillZerosLikeOp should not be null."); PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar("Y"),
PADDLE_ENFORCE_NOT_NULL( "Output(Y) of FillZerosLikeOp should not be null.");
ctx.OutputVar("Dst"),
"Output(Dst) of FillZerosLikeOp should not be null."); ctx.Output<framework::Tensor>("Y")->Resize(
ctx.Input<framework::Tensor>("X")->dims());
ctx.Output<framework::LoDTensor>("Dst")->Resize( ctx.ShareLoD("X", /*->*/ "Y");
ctx.Input<framework::Tensor>("Src")->dims());
} }
}; };
...@@ -40,8 +39,8 @@ class FillZerosLikeOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -40,8 +39,8 @@ class FillZerosLikeOpMaker : public framework::OpProtoAndCheckerMaker {
FillZerosLikeOpMaker(framework::OpProto *proto, FillZerosLikeOpMaker(framework::OpProto *proto,
framework::OpAttrChecker *op_checker) framework::OpAttrChecker *op_checker)
: framework::OpProtoAndCheckerMaker(proto, op_checker) { : framework::OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("Src", "The input of fill-zeros-like op."); AddInput("X", "The input of fill-zeros-like op.");
AddOutput("Dst", "The varibale will be filled up with zeros."); AddOutput("Y", "The varibale will be filled up with zeros.");
AddComment(R"DOC( AddComment(R"DOC(
Fill up a vriable with zeros. Fill up a vriable with zeros.
......
...@@ -23,7 +23,7 @@ template <typename Place, typename T> ...@@ -23,7 +23,7 @@ template <typename Place, typename T>
class FillZerosLikeKernel : public framework::OpKernel { class FillZerosLikeKernel : public framework::OpKernel {
public: public:
void Compute(const framework::ExecutionContext& context) const override { void Compute(const framework::ExecutionContext& context) const override {
auto* output = context.Output<framework::Tensor>("Dst"); auto* output = context.Output<framework::Tensor>("Y");
output->mutable_data<T>(context.GetPlace()); output->mutable_data<T>(context.GetPlace());
auto t = framework::EigenVector<T>::Flatten(*output); auto t = framework::EigenVector<T>::Flatten(*output);
t.device(context.GetEigenDevice<Place>()) = t.constant(static_cast<T>(0)); t.device(context.GetEigenDevice<Place>()) = t.constant(static_cast<T>(0));
......
...@@ -35,7 +35,7 @@ class GatherOp : public framework::OperatorWithKernel { ...@@ -35,7 +35,7 @@ class GatherOp : public framework::OperatorWithKernel {
PADDLE_ENFORCE_GE(batch_size, 0, "Batch size must be >0"); PADDLE_ENFORCE_GE(batch_size, 0, "Batch size must be >0");
framework::DDim output_dims(ctx.Input<Tensor>("X")->dims()); framework::DDim output_dims(ctx.Input<Tensor>("X")->dims());
output_dims[0] = batch_size; output_dims[0] = batch_size;
ctx.Output<framework::LoDTensor>("Out")->Resize(output_dims); ctx.Output<framework::Tensor>("Out")->Resize(output_dims);
} }
}; };
...@@ -45,7 +45,7 @@ class GatherGradOp : public framework::OperatorWithKernel { ...@@ -45,7 +45,7 @@ class GatherGradOp : public framework::OperatorWithKernel {
protected: protected:
void InferShape(const framework::InferShapeContext &ctx) const override { void InferShape(const framework::InferShapeContext &ctx) const override {
auto X_grad = ctx.Output<framework::LoDTensor>(framework::GradVarName("X")); auto X_grad = ctx.Output<framework::Tensor>(framework::GradVarName("X"));
auto X = ctx.Input<Tensor>("X"); auto X = ctx.Input<Tensor>("X");
X_grad->Resize(X->dims()); X_grad->Resize(X->dims());
......
...@@ -48,7 +48,7 @@ class GaussianRandomOp : public framework::OperatorWithKernel { ...@@ -48,7 +48,7 @@ class GaussianRandomOp : public framework::OperatorWithKernel {
ctx.OutputVar("Out"), ctx.OutputVar("Out"),
"Output(Out) of GaussianRandomOp should not be null."); "Output(Out) of GaussianRandomOp should not be null.");
auto* tensor = ctx.Output<framework::LoDTensor>("Out"); auto* tensor = ctx.Output<framework::Tensor>("Out");
auto dims = Attr<std::vector<int>>("dims"); auto dims = Attr<std::vector<int>>("dims");
std::vector<int64_t> temp; std::vector<int64_t> temp;
temp.reserve(dims.size()); temp.reserve(dims.size());
......
此差异已折叠。
...@@ -32,9 +32,10 @@ class LookupTableOp : public framework::OperatorWithKernel { ...@@ -32,9 +32,10 @@ class LookupTableOp : public framework::OperatorWithKernel {
auto table_t = ctx.Input<Tensor>("W"); auto table_t = ctx.Input<Tensor>("W");
auto ids_t = ctx.Input<Tensor>("Ids"); auto ids_t = ctx.Input<Tensor>("Ids");
auto output_t = ctx.Output<framework::LoDTensor>("Out"); auto output_t = ctx.Output<framework::Tensor>("Out");
output_t->Resize({ids_t->dims()[0], table_t->dims()[1]}); output_t->Resize({ids_t->dims()[0], table_t->dims()[1]});
ctx.ShareLoD("Ids", /*->*/ "Out");
} }
}; };
...@@ -50,9 +51,13 @@ class LookupTableOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -50,9 +51,13 @@ class LookupTableOpMaker : public framework::OpProtoAndCheckerMaker {
"An input with type int32 or int64" "An input with type int32 or int64"
"contains the ids to be looked up in W."); "contains the ids to be looked up in W.");
AddOutput("Out", "The lookup results, which have the same type with W."); AddOutput("Out", "The lookup results, which have the same type with W.");
AddComment( AddComment(R"DOC(
"This operator is used to perform lookups on the parameter W," This operator is used to perform lookups on the parameter W,
"then concatenated into a dense tensor."); then concatenated into a dense tensor.
The input `Ids` can carry the LoD (Level of Details) information,
or not. And the output only shares the LoD with input `Ids`.
)DOC");
} }
}; };
...@@ -64,7 +69,7 @@ class LookupTableOpGrad : public framework::OperatorWithKernel { ...@@ -64,7 +69,7 @@ class LookupTableOpGrad : public framework::OperatorWithKernel {
void InferShape(const framework::InferShapeContext &context) const override { void InferShape(const framework::InferShapeContext &context) const override {
auto table = context.Input<Tensor>("W"); auto table = context.Input<Tensor>("W");
auto d_table = auto d_table =
context.Output<framework::LoDTensor>(framework::GradVarName("W")); context.Output<framework::Tensor>(framework::GradVarName("W"));
d_table->Resize(table->dims()); d_table->Resize(table->dims());
} }
}; };
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册