提交 82bff6ee 编写于 作者: R ranqiu

Update the annotation of layers.py

......@@ -22,6 +22,7 @@ cmake-build-*
# generated while compiling
python/paddle/v2/framework/core.so
paddle/pybind/pybind.h
CMakeFiles
cmake_install.cmake
paddle/.timestamp
......
......@@ -67,6 +67,9 @@ endif()
if(ANDROID)
if(${CMAKE_SYSTEM_VERSION} VERSION_LESS "16")
message(FATAL_ERROR "Unsupport standalone toolchains with Android API level lower than 16")
elseif(${CMAKE_SYSTEM_VERSION} VERSION_LESS "21")
# TODO: support glog for Android api 16 ~ 19 in the future
message(WARNING "Using the unofficial git repository <https://github.com/Xreki/glog.git> instead")
endif()
set(WITH_GPU OFF CACHE STRING
......
......@@ -6,13 +6,14 @@ RUN /bin/bash -c 'if [[ -n ${UBUNTU_MIRROR} ]]; then sed -i 's#http://archive.ub
# ENV variables
ARG ANDROID_ABI
ARG ANDROID_API
ENV ANDROID_ABI=${ANDROID_ABI:-"armeabi-v7a"}
ENV ANDROID_API=${ANDROID_API:-21}
ENV HOME=/root \
ANDROID_NDK_HOME=/opt/android-ndk-linux \
ANDROID_ARM_STANDALONE_TOOLCHAIN=/opt/arm-toolchain \
ANDROID_ARM64_STANDALONE_TOOLCHAIN=/opt/arm64-toolchain
ANDROID_TOOLCHAINS_DIR=/opt/toolchains
RUN apt-get update && \
apt-get install -y \
......@@ -42,14 +43,12 @@ RUN pip install --upgrade pip && \
pip install pre-commit
# Android NDK
RUN mkdir /opt/android-ndk-tmp && \
RUN mkdir -p ${ANDROID_TOOLCHAINS_DIR} && \
mkdir -p /opt/android-ndk-tmp && \
cd /opt/android-ndk-tmp && \
wget -q https://dl.google.com/android/repository/android-ndk-r14b-linux-x86_64.zip && \
unzip -q android-ndk-r14b-linux-x86_64.zip && \
mv android-ndk-r14b ${ANDROID_NDK_HOME} && \
${ANDROID_NDK_HOME}/build/tools/make-standalone-toolchain.sh --arch=arm --platform=android-23 --install-dir=${ANDROID_ARM_STANDALONE_TOOLCHAIN} && \
${ANDROID_NDK_HOME}/build/tools/make-standalone-toolchain.sh --arch=arm64 --platform=android-23 --install-dir=${ANDROID_ARM64_STANDALONE_TOOLCHAIN} && \
rm -rf /opt/android-ndk-tmp && \
rm -rf ${ANDROID_NDK_HOME}
rm -rf /opt/android-ndk-tmp
CMD ["bash", "/paddle/paddle/scripts/docker/build_android.sh"]
......@@ -26,9 +26,9 @@ set(IGNORE_PATTERN
.*ImportanceSampler.*
.*cblas\\.h.*
.*\\.pb\\.txt
.*LtrDataProvider.*
.*MultiDataProvider.*
.*pb.*)
.*pb.*
.*pybind.h)
# add_style_check_target
#
......
......@@ -18,9 +18,9 @@ SET(GFLAGS_SOURCES_DIR ${THIRD_PARTY_PATH}/gflags)
SET(GFLAGS_INSTALL_DIR ${THIRD_PARTY_PATH}/install/gflags)
SET(GFLAGS_INCLUDE_DIR "${GFLAGS_INSTALL_DIR}/include" CACHE PATH "gflags include directory." FORCE)
IF(WIN32)
set(GFLAGS_LIBRARIES "${GFLAGS_INSTALL_DIR}/lib/gflags.lib" CACHE FILEPATH "GFLAGS_LIBRARIES" FORCE)
set(GFLAGS_LIBRARIES "${GFLAGS_INSTALL_DIR}/lib/gflags.lib" CACHE FILEPATH "GFLAGS_LIBRARIES" FORCE)
ELSE(WIN32)
set(GFLAGS_LIBRARIES "${GFLAGS_INSTALL_DIR}/lib/libgflags.a" CACHE FILEPATH "GFLAGS_LIBRARIES" FORCE)
set(GFLAGS_LIBRARIES "${GFLAGS_INSTALL_DIR}/lib/libgflags.a" CACHE FILEPATH "GFLAGS_LIBRARIES" FORCE)
ENDIF(WIN32)
INCLUDE_DIRECTORIES(${GFLAGS_INCLUDE_DIR})
......@@ -56,3 +56,12 @@ SET_PROPERTY(TARGET gflags PROPERTY IMPORTED_LOCATION ${GFLAGS_LIBRARIES})
ADD_DEPENDENCIES(gflags extern_gflags)
LIST(APPEND external_project_dependencies gflags)
IF(WITH_C_API)
INSTALL(DIRECTORY ${GFLAGS_INCLUDE_DIR} DESTINATION third_party/gflags)
IF(ANDROID)
INSTALL(FILES ${GFLAGS_LIBRARIES} DESTINATION third_party/gflags/lib/${ANDROID_ABI})
ELSE()
INSTALL(FILES ${GFLAGS_LIBRARIES} DESTINATION third_party/gflags/lib)
ENDIF()
ENDIF()
......@@ -19,9 +19,9 @@ SET(GLOG_INSTALL_DIR ${THIRD_PARTY_PATH}/install/glog)
SET(GLOG_INCLUDE_DIR "${GLOG_INSTALL_DIR}/include" CACHE PATH "glog include directory." FORCE)
IF(WIN32)
SET(GLOG_LIBRARIES "${GLOG_INSTALL_DIR}/lib/libglog.lib" CACHE FILEPATH "glog library." FORCE)
SET(GLOG_LIBRARIES "${GLOG_INSTALL_DIR}/lib/libglog.lib" CACHE FILEPATH "glog library." FORCE)
ELSE(WIN32)
SET(GLOG_LIBRARIES "${GLOG_INSTALL_DIR}/lib/libglog.a" CACHE FILEPATH "glog library." FORCE)
SET(GLOG_LIBRARIES "${GLOG_INSTALL_DIR}/lib/libglog.a" CACHE FILEPATH "glog library." FORCE)
ENDIF(WIN32)
INCLUDE_DIRECTORIES(${GLOG_INCLUDE_DIR})
......@@ -56,3 +56,12 @@ ADD_DEPENDENCIES(glog extern_glog gflags)
LINK_LIBRARIES(glog gflags)
LIST(APPEND external_project_dependencies glog)
IF(WITH_C_API)
INSTALL(DIRECTORY ${GLOG_INCLUDE_DIR} DESTINATION third_party/glog)
IF(ANDROID)
INSTALL(FILES ${GLOG_LIBRARIES} DESTINATION third_party/glog/lib/${ANDROID_ABI})
ELSE()
INSTALL(FILES ${GLOG_LIBRARIES} DESTINATION third_party/glog/lib)
ENDIF()
ENDIF()
......@@ -73,6 +73,26 @@ IF(NOT ${CBLAS_FOUND})
UPDATE_COMMAND ""
CONFIGURE_COMMAND ""
)
IF(WITH_C_API)
INSTALL(DIRECTORY ${CBLAS_INC_DIR} DESTINATION third_party/openblas)
# Because libopenblas.a is a symbolic link of another library, thus need to
# install the whole directory.
IF(ANDROID)
SET(TMP_INSTALL_DIR third_party/openblas/lib/${ANDROID_ABI})
ELSE()
SET(TMP_INSTALL_DIR third_party/openblas/lib)
ENDIF()
INSTALL(CODE "execute_process(
COMMAND ${CMAKE_COMMAND} -E copy_directory ${CBLAS_INSTALL_DIR}/lib
destination ${CMAKE_INSTALL_PREFIX}/${TMP_INSTALL_DIR}
)"
)
INSTALL(CODE "MESSAGE(STATUS \"Installing: \"
\"${CBLAS_INSTALL_DIR}/lib -> ${CMAKE_INSTALL_PREFIX}/${TMP_INSTALL_DIR}\"
)"
)
ENDIF()
ENDIF(NOT ${CBLAS_FOUND})
MESSAGE(STATUS "BLAS library: ${CBLAS_LIBRARIES}")
......
......@@ -223,6 +223,15 @@ IF(NOT PROTOBUF_FOUND)
SET(PROTOBUF_PROTOC_LIBRARY ${extern_protobuf_PROTOC_LIBRARY}
CACHE FILEPATH "protoc library." FORCE)
IF(WITH_C_API)
INSTALL(DIRECTORY ${PROTOBUF_INCLUDE_DIR} DESTINATION third_party/protobuf)
IF(ANDROID)
INSTALL(FILES ${PROTOBUF_LIBRARY} DESTINATION third_party/protobuf/lib/${ANDROID_ABI})
ELSE()
INSTALL(FILES ${PROTOBUF_LIBRARY} DESTINATION third_party/protobuf/lib)
ENDIF()
ENDIF()
IF(CMAKE_CROSSCOMPILING)
PROMPT_PROTOBUF_LIB(protobuf_host extern_protobuf)
ELSE()
......
......@@ -49,3 +49,12 @@ ExternalProject_Add(
)
LIST(APPEND external_project_dependencies zlib)
IF(WITH_C_API)
INSTALL(DIRECTORY ${ZLIB_INCLUDE_DIR} DESTINATION third_party/zlib)
IF(ANDROID)
INSTALL(FILES ${ZLIB_LIBRARIES} DESTINATION third_party/zlib/lib/${ANDROID_ABI})
ELSE()
INSTALL(FILES ${ZLIB_LIBRARIES} DESTINATION third_party/zlib/lib)
ENDIF()
ENDIF()
# Design Doc: Block and Scope
## The Representation of Computation
Both deep learning systems and programming languages help users describe computation procedures. These systems use various representations of computation:
- Caffe, Torch, and Paddle: sequences of layers.
- TensorFlow, Caffe2, Mxnet: graphs of operators.
- PaddlePaddle: nested blocks, like C++ and Java programs.
## Block in Programming Languages and Deep Learning
In programming languages, a block is a pair of curly braces that includes local variables definitions and a sequence of instructions, or operators.
Blocks work with control flow structures like `if`, `else`, and `for`, which have equivalents in deep learning:
| programming languages | PaddlePaddle |
|-----------------------|-----------------------|
| for, while loop | RNN, WhileOp |
| if, if-else, switch | IfElseOp, SwitchOp |
| sequential execution | a sequence of layers |
A key difference is that a C++ program describes a one pass computation, whereas a deep learning program describes both the forward and backward passes.
## Stack Frames and the Scope Hierarchy
The existence of the backward makes the execution of a block of traditional programs and PaddlePaddle different to each other:
| programming languages | PaddlePaddle |
|-----------------------|-------------------------------|
| stack | scope hierarchy |
| stack frame | scope |
| push at entering block| push at entering block |
| pop at leaving block | destroy at minibatch completes|
1. In traditional programs:
- When the execution enters the left curly brace of a block, the runtime pushes a frame into the stack, where it realizes local variables.
- After the execution leaves the right curly brace, the runtime pops the frame.
- The maximum number of frames in the stack is the maximum depth of nested blocks.
1. In PaddlePaddle
- When the execution enters a block, PaddlePaddle adds a new scope, where it realizes variables.
- PaddlePaddle doesn't pop a scope after the execution of the block because variables therein are to be used by the backward pass. So it has a stack forest known as a *scope hierarchy*.
- The height of the highest tree is the maximum depth of nested blocks.
- After the process of a minibatch, PaddlePaddle destroys the scope hierarchy.
## Use Blocks in C++ and PaddlePaddle Programs
Let us consolidate the discussion by presenting some examples.
### Blocks with `if-else` and `IfElseOp`
The following C++ programs shows how blocks are used with the `if-else` structure:
```c++
int x = 10;
int y = 20;
int out;
bool cond = false;
if (cond) {
int z = x + y;
out = softmax(z);
} else {
int z = fc(x);
out = z;
}
```
An equivalent PaddlePaddle program from the design doc of the [IfElseOp operator](./if_else_op.md) is as follows:
```python
import paddle as pd
x = var(10)
y = var(20)
cond = var(false)
ie = pd.create_ifelseop(inputs=[x], output_num=1)
with ie.true_block():
x = ie.inputs(true, 0)
z = operator.add(x, y)
ie.set_output(true, 0, operator.softmax(z))
with ie.false_block():
x = ie.inputs(false, 0)
z = layer.fc(x)
ie.set_output(true, 0, operator.softmax(z))
out = b(cond)
```
In both examples, the left branch computes `softmax(x+y)` and the right branch computes `fc(x)`.
A difference is that variables in the C++ program contain scalar values, whereas those in the PaddlePaddle programs are mini-batches of instances. The `ie.input(true, 0)` invocation returns instances in the 0-th input, `x`, that corresponds to true values in `cond` as the local variable `x`, where `ie.input(false, 0)` returns instances corresponding to false values.
### Blocks with `for` and `RNNOp`
The following RNN model from the [RNN design doc](./rnn.md)
```python
x = sequence([10, 20, 30])
m = var(0)
W = tensor()
U = tensor()
rnn = create_rnn(inputs=[input])
with rnn.stepnet() as net:
x = net.set_inputs(0)
h = net.add_memory(init=m)
fc_out = pd.matmul(W, x)
hidden_out = pd.matmul(U, h.pre(n=1))
sum = pd.add_two(fc_out, hidden_out)
act = pd.sigmoid(sum)
h.update(act) # update memory with act
net.set_outputs(0, act, hidden_out) # two outputs
o1, o2 = rnn()
print o1, o2
```
has its equivalent C++ program as follows
```c++
int* x = {10, 20, 30};
int m = 0;
int W = some_value();
int U = some_other_value();
int mem[sizeof(x) / sizeof(x[0]) + 1];
int o1[sizeof(x) / sizeof(x[0]) + 1];
int o2[sizeof(x) / sizeof(x[0]) + 1];
for (int i = 1; i <= sizeof(x)/sizeof(x[0]); ++i) {
int x = x[i-1];
if (i == 1) mem[0] = m;
int fc_out = W * x;
int hidden_out = Y * mem[i-1];
int sum = fc_out + hidden_out;
int act = sigmoid(sum);
mem[i] = act;
o1[i] = act;
o2[i] = hidden_out;
}
print_array(o1);
print_array(o2);
```
## Compilation and Execution
Like TensorFlow programs, a PaddlePaddle program is written in Python. The first part describes a neural network as a protobuf message, and the rest part executes the message for training or inference.
The generation of this protobuf message is like what a compiler generates a binary executable file. The execution of the message that the OS executes the binary file.
## The "Binary Executable File Format"
The definition of the protobuf message is as follows:
```protobuf
message BlockDesc {
repeated VarDesc vars = 1;
repeated OpDesc ops = 2;
}
```
The step net in above RNN example would look like
```
BlockDesc {
vars = {
VarDesc {...} // x
VarDesc {...} // h
VarDesc {...} // fc_out
VarDesc {...} // hidden_out
VarDesc {...} // sum
VarDesc {...} // act
}
ops = {
OpDesc {...} // matmul
OpDesc {...} // add_two
OpDesc {...} // sigmoid
}
};
```
Also, the RNN operator in above example is serialized into a protobuf message of type `OpDesc` and would look like:
```
OpDesc {
inputs = {0} // the index of x
outputs = {5, 3} // indices of act and hidden_out
attrs {
"memories" : {1} // the index of h
"step_net" : <above step net>
}
};
```
This `OpDesc` value is in the `ops` field of the `BlockDesc` value representing the global block.
## The Compilation of Blocks
During the generation of the Protobuf message, the Block should store VarDesc (the Protobuf message which describes Variable) and OpDesc (the Protobuf message which describes Operator).
VarDesc in a block should have its name scope to avoid local variables affect parent block's name scope.
Child block's name scopes should inherit the parent's so that OpDesc in child block can reference a VarDesc that stored in parent block. For example
```python
a = pd.Varaible(shape=[20, 20])
b = pd.fc(a, params=["fc.w", "fc.b"])
rnn = pd.create_rnn()
with rnn.stepnet() as net:
x = net.set_inputs(a)
# reuse fc's parameter
fc_without_b = pd.get_variable("fc.w")
net.set_outputs(fc_without_b)
out = rnn()
```
the method `pd.get_variable` can help retrieve a Variable by a name, a Variable may store in a parent block, but might be retrieved in a child block, so block should have a variable scope that supports inheritance.
In compiler design, the symbol table is a data structure created and maintained by compilers to store information about the occurrence of various entities such as variable names, function names, classes, etc.
To store the definition of variables and operators, we define a C++ class `SymbolTable`, like the one used in compilers.
`SymbolTable` can do the following stuff:
- store the definitions (some names and attributes) of variables and operators,
- to verify if a variable was declared,
- to make it possible to implement type checking (offer Protobuf message pointers to `InferShape` handlers).
```c++
// Information in SymbolTable is enough to trace the dependency graph. So maybe
// the Eval() interface takes a SymbolTable is enough.
class SymbolTable {
public:
SymbolTable(SymbolTable* parent) : parent_(parent) {}
OpDesc* NewOp(const string& name="");
// TODO determine whether name is generated by python or C++
// currently assume that a unique name will be generated by C++ if the
// argument name left default.
VarDesc* NewVar(const string& name="");
// find a VarDesc by name, if recursive true, find parent's SymbolTable
// recursively.
// this interface is introduced to support InferShape, find protobuf messages
// of variables and operators, pass pointers into InferShape.
// operator
//
// NOTE maybe some C++ classes such as VarDescBuilder and OpDescBuilder should
// be proposed and embedded into pybind to enable python operate on C++ pointers.
VarDesc* FindVar(const string& name, bool recursive=true);
OpDesc* FindOp(const string& name);
BlockDesc Compile() const;
private:
SymbolTable* parent_;
map<string, OpDesc> ops_;
map<string, VarDesc> vars_;
};
```
After all the description of variables and operators is added into SymbolTable,
the block has enough information to run.
The `Block` class takes a `BlockDesc` as input, and provide `Run` and `InferShape` functions.
```c++
namespace {
class Block : OperatorBase {
public:
Block(const BlockDesc& desc) desc_(desc) {}
void InferShape(const framework::Scope& scope) const override {
if (!symbols_ready_) {
CreateVariables(scope);
CreateOperators();
}
// should run InferShape first.
for (auto& op : runtime_table_.ops()) {
op->InferShape(scope);
}
}
void Run(const framework::Scope& scope,
const platform::DeviceContext& dev_ctx) const override {
PADDLE_ENFORCE(symbols_ready_, "operators and variables should be created first.");
for (auto& op : runtime_table_.ops()) {
op->Run(scope, dev_ctx);
}
}
void CreateVariables(const framework::Scope& scope);
void CreateOperators();
// some other necessary interfaces of NetOp are list below
// ...
private:
BlockDesc desc_;
bool symbols_ready_{false};
};
```
## The Execution of Blocks
Block inherits from OperatorBase, which has a Run method.
Block's Run method will run its operators sequentially.
There is another important interface called `Eval`, which take some arguments called targets, and generate a minimal graph which takes targets as the end points and creates a new Block,
after `Run`, `Eval` will get the latest value and return the targets.
The definition of Eval is as follows:
```c++
// clean a block description by targets using the corresponding dependency graph.
// return a new BlockDesc with minimal number of operators.
// NOTE not return a Block but the block's description so that this can be distributed
// to a cluster.
BlockDesc Prune(const BlockDesc& desc, vector<string> targets);
void Block::Eval(const vector<string>& targets,
const framework::Scope& scope,
const platform::DeviceContext& dev_ctx) {
BlockDesc min_desc = Prune(desc_, targets);
Block min_block(min_desc);
min_block.Run(scope, dev_ctx);
}
```
IfOp should have only one branch. An IfOp operator takes a `cond` variable whose value must be a vector of N boolean elements. Its return value has M (M<=N) instances, each corresponds to a true element in `cond`.
```python
import paddle as pd
x = var()
y = var()
cond = var()
b = pd.create_ifop(inputs=[x], output_num=1)
with b.true_block():
x = b.inputs(0)
z = operator.add(x, y)
b.set_output(0, operator.softmax(z))
out = b(cond)
```
If we want the output still has N instances, we can use IfElseOp with a default value, whose minibatch size must be N:
IfOp should have only one branch. An IfOp operator takes a `cond` variable whose value must be a vector of N boolean elements. Its return value has N instances. If cond[i] == True, input instance input[i] will go through true_block() and generate output[i]; otherwise it will produce output from false_bloack().
```python
import paddle as pd
......@@ -39,7 +21,7 @@ with b.false_block():
out = b(cond)
```
If only true_block is set in an IfElseOp, we can have a default value for false as:
If only true_block is set in an IfElseOp, a special case is that we can have a default value for false as:
```python
import paddle as pd
......
digraph G {
rnn [label="1-th level RNN" shape=box]
subgraph cluster0 {
label = "time step 0"
sent0 [label="sentence"]
sent1 [label="sentence"]
rnn1 [label="2-th level RNN" shape=box]
sent0 -> rnn1
sent1 -> rnn1
}
subgraph cluster1 {
label = "time step 1"
sent2 [label="sentence"]
sent3 [label="sentence"]
rnn2 [label="2-th level RNN" shape=box]
sent2 -> rnn2
sent3 -> rnn2
}
subgraph cluster2 {
label = "time step 2"
sent4 [label="sentence"]
sent5 [label="sentence"]
rnn3 [label="2-th level RNN" shape=box]
sent4 -> rnn3
sent5 -> rnn3
}
para0 [label="paragraph info 0"]
para1 [label="paragraph info 1"]
para2 [label="paragraph info 2"]
rnn1 -> para0
rnn2 -> para1
rnn3 -> para2
para0 -> rnn
para1 -> rnn
para2 -> rnn
chapter [label="chapter info"]
rnn -> chapter
}
digraph G {
label = "simple RNN implementation"
ranksep=2;
//graph [nodesep=1, ranksep=1];
node[nodesep=1]
subgraph cluster0 {
label = "global scope"
rankdir = TB
W
boot_memory
input
output
}
subgraph cluster1 {
label = "step-scope 0"
rankdir = TB
memory0[label="memory"]
prememory0[label="pre-memory"]
step_input0[label="step input"]
step_output0[label="step output"]
}
subgraph cluster2 {
label = "step-scope 1"
rankdir = TB
memory1[label="memory"]
prememory1[label="pre-memory"]
step_input1[label="step input"]
step_output1[label="step output"]
}
subgraph cluster3 {
label = "step-scope 2"
rankdir = TB
memory2[label="memory"]
prememory2[label="pre-memory"]
step_input2[label="step input"]
step_output2[label="step output"]
}
stepnet [shape=box]
stepnet0 [shape=box, style=dashed]
stepnet1 [shape=box, style=dashed]
stepnet2 [shape=box, style=dashed]
edge[color=blue]
boot_memory -> prememory0 [label="init" color="blue"]
memory0 -> prememory1 [label="copy/reference" color="blue"]
memory1 -> prememory2 [label="copy/reference" color="blue"]
edge[color=black]
W -> stepnet0[constraint=false, style=dashed]
W -> stepnet1[constraint=false, style=dashed]
W -> stepnet2[constraint=false, style=dashed]
memory0 -> stepnet0[style=dashed]
prememory0 -> stepnet0 -> step_output0[style=dashed]
memory1 -> stepnet1[style=dashed]
prememory1 -> stepnet1 -> step_output1[style=dashed]
memory2 -> stepnet2[style=dashed]
prememory2 -> stepnet2 -> step_output2[style=dashed]
input -> step_input0
input -> step_input1
input -> step_input2
step_input0 -> stepnet0 [style=dashed]
step_input1 -> stepnet1[style=dashed]
step_input2 -> stepnet2[style=dashed]
step_output0 -> output
step_output1 -> output
step_output2 -> output
stepnet0 -> stepnet[style=dashed]
stepnet1 -> stepnet[style=dashed]
stepnet2 -> stepnet[style=dashed]
}
digraph G {
chapter [label="chapter"]
subgraph cluster0 {
label = "paragraph 0"
top_rnn0[label="top rnn step 0" shape=box]
p0 [label="paragraph 0"]
p1 [label="paragraph 1"]
}
subgraph cluster1{
label = "paragraph 1"
top_rnn1[label="top rnn step 1" shape=box]
p2 [label="paragraph 0"]
p3 [label="paragraph 1"]
}
subgraph cluster_p0 {
label = "sentence 0"
low_rnn0 [label="low rnn step 0" shape=box]
s00 [label="sentence 0"]
s01 [label="sentence 1"]
low_rnn0 -> s00
low_rnn0 -> s01
}
subgraph cluster_p1 {
label = "sentence 1"
low_rnn1 [label="low rnn step 1" shape=box]
s10 [label="sentence 0"]
s11 [label="sentence 1"]
low_rnn1 -> s10
low_rnn1 -> s11
}
subgraph cluster_p2 {
label = "sentence 1"
low_rnn2 [label="low rnn step 0" shape=box]
s20 [label="sentence 0"]
s21 [label="sentence 1"]
low_rnn2 -> s20
low_rnn2 -> s21
}
subgraph cluster_p3 {
label = "sentence 1"
low_rnn3 [label="low rnn step 1" shape=box]
s30 [label="sentence 0"]
s31 [label="sentence 1"]
low_rnn3 -> s30
low_rnn3 -> s31
}
chapter -> top_rnn0
chapter -> top_rnn1
top_rnn0 -> p0
top_rnn0 -> p1
top_rnn1 -> p2
top_rnn1 -> p3
p0 -> low_rnn0
p1 -> low_rnn1
p2 -> low_rnn2
p3 -> low_rnn3
}
# RNNOp design
This document is about an RNN operator which requires that instances in a mini-batch have the same length. We will have a more flexible RNN operator.
## RNN Algorithm Implementation
<p aligh="center">
<img src="./images/rnn.jpg"/>
</p>
The above diagram shows an RNN unrolled into a full network.
There are several important concepts:
- *step-net*: the sub-graph to run at each step,
- *memory*, $h_t$, the state of the current step,
- *ex-memory*, $h_{t-1}$, the state of the previous step,
- *initial memory value*, the ex-memory of the first step.
### Step-scope
There could be local variables defined in step-nets. PaddlePaddle runtime realizes these variables in *step-scopes* -- scopes created for each step.
<p aligh="center">
<img src="./images/rnn.png"/><br/>
Figure 2 the RNN's data flow
</p>
Please be aware that all steps run the same step-net. Each step
1. creates the step-scope,
2. realizes local variables, including step-outputs, in the step-scope, and
3. runs the step-net, which could use these variables.
The RNN operator will compose its output from step outputs in step scopes.
### Memory and Ex-memory
Let's give more details about memory and ex-memory via a simply example:
$$
h_t = U h_{t-1} + W x_t
$$,
where $h_t$ and $h_{t-1}$ are the memory and ex-memory of step $t$'s respectively.
In the implementation, we can make an ex-memory variable either "refers to" the memory variable of the previous step,
or copy the value of the previous memory value to the current ex-memory variable.
### Usage in Python
For more information on Block, please refer to the [design doc](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/block.md).
We can define an RNN's step-net using Block:
```python
import paddle as pd
X = some_op() # x is some operator's output, and is a LoDTensor
a = some_op()
# declare parameters
W = pd.Variable(shape=[20, 30])
U = pd.Variable(shape=[20, 30])
rnn = pd.create_rnn_op(output_num=1)
with rnn.stepnet():
x = rnn.add_input(X)
# declare a memory (rnn's step)
h = rnn.add_memory(init=a)
# h.pre_state() means previous memory of rnn
new_state = pd.add_two( pd.matmul(W, x) + pd.matmul(U, h.pre_state()))
# update current memory
h.update(new_state)
# indicate that h variables in all step scopes should be merged
rnn.add_outputs(h)
out = rnn()
```
Python API functions in above example:
- `rnn.add_input` indicates the parameter is a variable that will be segmented into step-inputs.
- `rnn.add_memory` creates a variable used as the memory.
- `rnn.add_outputs` mark the variables that will be concatenated across steps into the RNN output.
### Nested RNN and LoDTensor
An RNN whose step-net includes other RNN operators is known as an *nested RNN*.
For example, we could have a 2-level RNN, where the top level corresponds to paragraphs, and the lower level corresponds to sentences.
The following figure illustrates the feeding of text into the lower level, one sentence each step, and the feeding of step outputs to the top level. The final top level output is about the whole text.
<p aligh="center">
<img src="./images/2_level_rnn.png"/>
</p>
```python
import paddle as pd
W = pd.Variable(shape=[20, 30])
U = pd.Variable(shape=[20, 30])
W0 = pd.Variable(shape=[20, 30])
U0 = pd.Variable(shape=[20, 30])
# a is output of some op
a = some_op()
# chapter_data is a set of 128-dim word vectors
# the first level of LoD is sentence
# the second level of LoD is chapter
chapter_data = pd.Variable(shape=[None, 128], type=pd.lod_tensor, level=2)
def lower_level_rnn(paragraph):
'''
x: the input
'''
rnn = pd.create_rnn_op(output_num=1)
with rnn.stepnet():
sentence = rnn.add_input(paragraph, level=0)
h = rnn.add_memory(shape=[20, 30])
h.update(
pd.matmul(W, sentence) + pd.matmul(U, h.pre_state()))
# get the last state as sentence's info
rnn.add_outputs(h)
return rnn
top_level_rnn = pd.create_rnn_op(output_num=1)
with top_level_rnn.stepnet():
paragraph_data = rnn.add_input(chapter_data, level=1)
low_rnn = lower_level_rnn(paragraph_data)
paragraph_out = low_rnn()
h = rnn.add_memory(init=a)
h.update(
pd.matmul(W0, paragraph_data) + pd.matmul(U0, h.pre_state()))
top_level_rnn.add_outputs(h)
# just output the last step
chapter_out = top_level_rnn(output_all_steps=False)
```
in above example, the construction of the `top_level_rnn` calls `lower_level_rnn`. The input is a LoD Tensor. The top level RNN segments input text data into paragraphs, and the lower level RNN segments each paragraph into sentences.
By default, the `RNNOp` will concatenate the outputs from all the time steps,
if the `output_all_steps` set to False, it will only output the final time step.
<p align="center">
<img src="images/rnn_2level_data.png"/>
</p>
......@@ -34,7 +34,7 @@ Kernel实现 | CPU、GPU共享Kernel实现在`.h`文件中,否则,CPU
注册Op | Op注册实现在`.cc`文件;Kernel注册CPU实现在`.cc`文件中,GPU实现在`.cu`文件中
实现新的op都添加至目录[paddle/operators](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/operators)下,文件命名以`*_op.h`(如有) 、 `*_op.cc``*_op.cu`(如有)结尾。
实现新的op都添加至目录[paddle/operators](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/operators)下,文件命名以`*_op.h`(如有) 、 `*_op.cc``*_op.cu`(如有)结尾。**系统会根据文件名自动构建op和其对应的Python扩展。**
下面以矩阵乘操作,即[MulOp](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/operators/mul_op.cc)为例来介绍如何写带Kernel的Operator。
......@@ -224,45 +224,15 @@ MulOp(const std::string &type, const framework::VariableNameMap &inputs,
### 5. 编译
- 简单**无特殊依赖**的OP无需修改CMakeList.txt文件。[paddle/operators/CMakeLists.txt](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/operators/CMakeLists.txt) 会自动将 `paddle/operators` 目录下新增的 `*_op.cc` 文件加入编译。
- 较为复杂、**有额外依赖** 的operator仍需要修改[paddle/operators/CMakeLists.txt](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/operators/CMakeLists.txt)。如,`mul_op` 依赖 `math_function`,需要在`CMakeLists.txt`中添加如下内容:
运行下面命令可以进行编译:
```
op_library(mul_op SRCS mul_op.cc mul_op.cu DEPS math_function) +
```
- 运行下面命令可以进行编译:
```
make mul_op
```
```
make mul_op
```
## 绑定Python
- 绑定Python
在 [`paddle/pybind/pybind.cc
`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/pybind/pybind.cc) 使用`USE_OP`告知编译器需要链接的Op,具体解释参考[代码注释](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/op_registry.h#L81)。
```
USE_OP(mul);
```
如果只实现了CPU版本,则使用`USE_CPU_ONLY_OP`:
```
USE_CPU_ONLY_OP(gather);
```
如果OP不带Kernel,则使用`USE_NO_KENREL_OP`:
```
USE_NO_KENREL_OP(recurrent);
```
- 生成库
无需修改 [`paddle/pybind/CMakeLists.txt`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/pybind/CMakeLists.txt)文件,`paddle/operators` 目录下新增的 `*_op.cc` 文件会被自动添加链接到生成的lib库中。
系统会对新增的op自动绑定Python,并链接到生成的lib库中。
## 实现单元测试
......@@ -354,11 +324,7 @@ class TestMulGradOp(GradientChecker):
### 编译和执行单元测试
单元测试编写完成之后,在[`python/paddle/v2/framework/tests/CMakeLists.txt`](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/v2/framework/tests/CMakeLists.txt)中添加以下内容,将单元测试加入工程:
```
py_test(test_mul_op SRCS test_mul_op.py)
```
`python/paddle/v2/framework/tests` 目录下新增的 `test_*.py` 单元测试会被自动加入工程进行编译。
请注意,**不同于Op的编译测试,运行单元测试测时需要编译整个工程**,并且编译时需要打开`WITH_TESTING`, 即`cmake paddle_dir -DWITH_TESTING=ON`。编译成功后,执行下面的命令来运行单元测试:
......@@ -371,3 +337,10 @@ make test ARGS="-R test_mul_op -V"
```bash
ctest -R test_mul_op
```
## 注意事项
- 为每个Op创建单独的`*_op.h`(如有)、`*_op.cc``*_op.cu`(如有)。不允许一个文件中包含多个Op,这将会导致编译出错。
- 注册Op时的类型名,需要和该Op的名字一样。即不允许在`A_op.cc`里面,注册`REGISTER_OP(B, ...)`等,这将会导致单元测试出错。
- 如果Op没有实现GPU Kernel,请不要创建空的`*_op.cu`,这将会导致单元测试出错。
- 如果多个Op依赖一些共用的函数,可以创建非`*_op.*`格式的文件来存放,如`gather.h`文件。
......@@ -64,9 +64,29 @@ link_paddle_exe(paddle_capi_shared)
install(FILES ${CAPI_HEADERS} DESTINATION include/paddle)
install(FILES ${CMAKE_CURRENT_BINARY_DIR}/config.h DESTINATION include/paddle)
if(ANDROID)
execute_process(
COMMAND ${GIT_EXECUTABLE} log --pretty=oneline -1
OUTPUT_VARIABLE GIT_COMMITS_LIST
RESULT_VARIABLE GIT_COMMITS_LIST_RESULT
ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE)
if(${GIT_COMMITS_LIST_RESULT})
set(GIT_COMMITS_LIST "No commits.")
endif()
install(FILES ${CMAKE_CURRENT_BINARY_DIR}/${capi_whole_library}
DESTINATION lib/${ANDROID_ABI})
install(TARGETS paddle_capi_shared DESTINATION lib/${ANDROID_ABI})
install(CODE "FILE(WRITE ${CMAKE_INSTALL_PREFIX}/lib/${ANDROID_ABI}/BUILD.txt
\"Compiler:\n\"
\"\\t${CMAKE_C_COMPILER}\\n\"
\"\\t${CMAKE_CXX_COMPILER}\\n\"
\"Compiler Flags:\\n\"
\"\\t${CMAKE_F_FLAGS}\\n\"
\"\\t${CMAKE_CXX_FLAGS}\\n\"
\"Android API: ${CMAKE_SYSTEM_VERSION}\\n\"
\"Lastest commit:\\n\"
\"\\t${GIT_COMMITS_LIST}\\n\"
)"
)
else(ANDROID)
install(FILES ${CMAKE_CURRENT_BINARY_DIR}/${capi_whole_library} DESTINATION lib)
install(TARGETS paddle_capi_shared DESTINATION lib)
......
......@@ -2,11 +2,22 @@
## Motivation
In Neural Network, the backpropagation algorithm follows the chain rule, so we need to compound the gradient operators/expressions together with the chain rule. Every forward network needs a backward network to construct the full computation graph, the operator/expression's backward pass will be generated respect to forward pass.
In Neural Network, many model is solved by the the backpropagation algorithm(known as BP) at present. Technically it caculates the gradient of the loss function, then distributed back through the networks. Follows the chain rule, so we need a module chains the gradient operators/expressions together with to construct the backward pass. Every forward network needs a backward network to construct the full computation graph, the operator/expression's backward pass will be generated respect to forward pass.
## Backward Operator Registry
## Implementation
A backward network is built up with several backward operators. Backward operators take forward operators' inputs outputs, and output gradients and then calculate its input gradients.
In this design doc, we exported only one API for generating the backward pass.
```c++
std::unique_ptr<OperatorBase> Backward(const OperatorBase& forwardOp,
const std::unordered_set<std::string>& no_grad_vars);
```
The implementation behind it can be divided into two parts, **Backward Operator Creating** and **Backward Operator Building**.
### Backward Operator Registry
A backward network is built up with several backward operators. Backward operators take forward operators' inputs, outputs, and output gradients and then calculate its input gradients.
| | forward operator | backward operator
| ---------------------- | ---------------- |------------------------- |
......@@ -25,7 +36,7 @@ REGISTER_OP(mul, MulOp, MulOpMaker, mul_grad, MulOpGrad);
`mul_grad` is the type of backward operator, and `MulOpGrad` is its class name.
## Backward Opeartor Creating
### Backward Opeartor Creating
Given a certain forward operator, we can get its corresponding backward operator by calling:
......@@ -43,40 +54,47 @@ The function `BuildGradOp` will sequentially execute following processes:
4. Building backward operator with `inputs`, `outputs` and forward operator's attributes.
## Backward Network Building
A backward network is a series of backward operators. The main idea of building a backward network is creating backward operators in the inverted sequence and put them together.
### Backward Network Building
In our design, the network itself is also a kind of operator. So the operators contained by a big network may be some small network.
given a forward network, it generates the backward network. We only care about the Gradients—`OutputGradients`, `InputGradients`.
A backward network is a series of backward operators. The main idea of building a backward network is creating backward operators in the inverted sequence and append them together one by one. There is some corner case need to process specially.
1. Op
when the input forward network is an Op, return its gradient Operator Immediately.
When the input forward network is an Op, return its gradient Operator Immediately. If all of its outputs are in no gradient set, then return a special `NOP`.
2. NetOp
when the input forward network is a NetOp, it needs to call the sub NetOp/Operators backward function recursively. During the process, we need to collect the `OutputGradients` name according to the forward NetOp.
In our design, the network itself is also a kind of operator(**NetOp**). So the operators contained by a big network may be some small network. When the input forward network is a NetOp, it needs to call the sub NetOp/Operators backward function recursively. During the process, we need to collect the `OutputGradients` name according to the forward NetOp.
3. RnnOp
RnnOp is a nested stepnet operator. Backward module need to recusively call `Backward` for every stepnet.
4. Sharing Variables
**sharing variables**. As illustrated in the pictures, two operator's share the same variable name of W@GRAD, which will overwrite their sharing input variable.
<p align="center">
<img src="./images/duplicate_op.png" width="50%" ><br/>
**shared variable**. As illustrated in the pictures, two operator's `Output` `Gradient` will overwrite their shared input variable.
​ pic 1. Sharing variables in operators.
<p align="center">
<img src="./images/duplicate_op.png" width="50%" ><br/>
</p>
1. Shared variable in operators.
​ Sharing variable between operators or same input variable used in multiple operators leads to a duplicate gradient variable. As demo show above, we need to rename gradient name recursively and add a generic add operator to replace the overwrite links.
</p>
<p align="center">
<img src="images/duplicate_op2.png" width="40%" ><br/>
Share variable between operators or same input variable used in multiple operators leads to a duplicate gradient variable. As demo show above, we need to rename gradient name recursively and add a generic add operator replace the overwrite links.
​ pic 2. Replace sharing variable's gradient with `Add` operator.
<p align="center">
<img src="images/duplicate_op2.png" width="50%" ><br/>
</p>
2. Replace shared variable's gradient with `Add` operator.
​ Because our framework finds variables accord to their names, we need to rename the output links. We add a suffix of number to represent its position in clockwise.
</p>
5. Part of Gradient is Zero.
In the whole graph, there is some case of that one operator's gradient is not needed, but its input's gradient is a dependency link of other operator, we need to fill a same shape gradient matrix in the position. In our implement, we insert a special `fillZeroLike` operator.
​ Then collect the sub graph `OutputGradients`/`InputGradients` as the NetOp's and return it.
Follow these rules above, then collect the sub graph `OutputGradients`/`InputGradients` as the NetOp's and return it.
......@@ -51,18 +51,15 @@ bool operator==(const LoD& a, const LoD& b);
* LoDTensor (Level of details Tensor)
* see https://en.wikipedia.org/wiki/Level_of_details for reference.
*/
class LoDTensor {
class LoDTensor : public Tensor {
public:
LoDTensor() {}
LoDTensor(const LoD& lod, Tensor* t) : lod_(lod), tensor_(t) {}
void set_lod(const LoD& lod) { lod_ = lod; }
void set_tensor(Tensor* tensor) { tensor_ = tensor; }
explicit LoDTensor(const LoD& lod) : lod_(lod) {}
Tensor& tensor() { return *tensor_; }
void set_lod(const LoD& lod) { lod_ = lod; }
LoD lod() { return lod_; }
LoD lod() const { return lod_; }
/*
* Get a element from LoD.
......@@ -104,7 +101,6 @@ class LoDTensor {
private:
LoD lod_;
Tensor* tensor_; // not owned
};
} // namespace framework
} // namespace paddle
......@@ -36,69 +36,64 @@ class LoDTensorTester : public ::testing::Test {
ASSERT_EQ(lod.size(), 3UL);
tensor.Resize({20 /*batch size*/, 128 /*dim*/});
lod_tensor_.Resize({20 /*batch size*/, 128 /*dim*/});
// malloc memory
tensor.mutable_data<float>(place);
lod_tensor_.mutable_data<float>(place);
lod_tensor.set_lod(lod);
lod_tensor.set_tensor(&tensor);
lod_tensor_.set_lod(lod);
}
protected:
platform::CPUPlace place;
Tensor tensor;
LoDTensor lod_tensor;
LoDTensor lod_tensor_;
};
TEST_F(LoDTensorTester, NumLevels) { ASSERT_EQ(lod_tensor.NumLevels(), 3UL); }
TEST_F(LoDTensorTester, NumLevels) { ASSERT_EQ(lod_tensor_.NumLevels(), 3UL); }
TEST_F(LoDTensorTester, NumElements) {
ASSERT_EQ(lod_tensor.NumElements(0), 2UL);
ASSERT_EQ(lod_tensor.NumElements(1), 4UL);
ASSERT_EQ(lod_tensor.NumElements(2), 8UL);
ASSERT_EQ(lod_tensor_.NumElements(0), 2UL);
ASSERT_EQ(lod_tensor_.NumElements(1), 4UL);
ASSERT_EQ(lod_tensor_.NumElements(2), 8UL);
}
TEST_F(LoDTensorTester, SliceLevels) {
// slice 1 level
for (size_t level = 0; level < 3UL; ++level) {
LoDTensor new_lod_tensor = lod_tensor;
LoDTensor new_lod_tensor = lod_tensor_;
new_lod_tensor.SliceLevels(level, level + 1);
ASSERT_EQ(new_lod_tensor.NumLevels(), 1UL);
ASSERT_EQ(new_lod_tensor.NumElements(0), lod_tensor.NumElements(level));
ASSERT_EQ(new_lod_tensor.tensor().data<float>(),
lod_tensor.tensor().data<float>());
ASSERT_EQ(new_lod_tensor.NumElements(0), lod_tensor_.NumElements(level));
ASSERT_EQ(new_lod_tensor.data<float>(), lod_tensor_.data<float>());
}
// slice 2 level
for (size_t level = 0; level < 2UL; ++level) {
LoDTensor new_lod_tensor = lod_tensor;
LoDTensor new_lod_tensor = lod_tensor_;
new_lod_tensor.SliceLevels(level, level + 2);
ASSERT_EQ(new_lod_tensor.NumLevels(), 2UL);
ASSERT_EQ(new_lod_tensor.NumElements(0), lod_tensor.NumElements(level));
ASSERT_EQ(new_lod_tensor.NumElements(1), lod_tensor.NumElements(level + 1));
ASSERT_EQ(new_lod_tensor.tensor().data<float>(),
lod_tensor.tensor().data<float>());
ASSERT_EQ(new_lod_tensor.NumElements(0), lod_tensor_.NumElements(level));
ASSERT_EQ(new_lod_tensor.NumElements(1),
lod_tensor_.NumElements(level + 1));
ASSERT_EQ(new_lod_tensor.data<float>(), lod_tensor_.data<float>());
}
}
TEST_F(LoDTensorTester, SliceInLevel) {
size_t level = 0;
LoDTensor new_lod_tensor = lod_tensor;
LoDTensor new_lod_tensor = lod_tensor_;
new_lod_tensor.SliceInLevel(level, 0, 2);
EXPECT_EQ(new_lod_tensor.NumLevels(), 3UL);
EXPECT_EQ(new_lod_tensor.NumElements(0), 2UL);
EXPECT_EQ(new_lod_tensor.NumElements(1), 4UL);
EXPECT_EQ(new_lod_tensor.NumElements(2), 8UL);
ASSERT_EQ(new_lod_tensor.tensor().data<float>(),
lod_tensor.tensor().data<float>());
ASSERT_EQ(new_lod_tensor.data<float>(), lod_tensor_.data<float>());
level = 1;
new_lod_tensor = lod_tensor;
new_lod_tensor = lod_tensor_;
new_lod_tensor.SliceInLevel(level, 0, 2);
ASSERT_EQ(new_lod_tensor.NumLevels(), 2UL);
ASSERT_EQ(new_lod_tensor.NumElements(0), 2UL);
ASSERT_EQ(new_lod_tensor.NumElements(1), 4UL);
ASSERT_EQ(new_lod_tensor.tensor().data<float>(),
lod_tensor.tensor().data<float>());
ASSERT_EQ(new_lod_tensor.data<float>(), lod_tensor_.data<float>());
}
} // namespace framework
......
......@@ -26,18 +26,16 @@ __global__ void test(size_t* a, int size) {
}
TEST(LoDTensor, LoDInGPU) {
paddle::framework::Tensor tensor;
paddle::framework::LoDTensor lod_tensor;
paddle::platform::GPUPlace place(0);
paddle::framework::LoD src_lod;
src_lod.push_back(std::vector<size_t>{0, 2, 4, 6, 8, 10, 12, 14});
tensor.Resize({14, 16});
tensor.mutable_data<float>(place);
lod_tensor.Resize({14, 16});
lod_tensor.mutable_data<float>(place);
lod_tensor.set_lod(src_lod);
lod_tensor.set_tensor(&tensor);
CHECK_EQ(lod_tensor.lod_element(0, 2), 4);
CHECK_EQ(lod_tensor.lod_element(0, 4), 8);
......
......@@ -186,6 +186,48 @@ void OperatorBase::GenerateTemporaryNames() {
}
}
template <>
const Tensor* InferShapeContext::Input<Tensor>(const std::string& name) const {
auto* var = InputVar(name);
return var == nullptr ? nullptr : GetTensorFromVar(var);
}
template <>
const std::vector<const Tensor*> InferShapeContext::MultiInput<Tensor>(
const std::string& name) const {
auto names = op().Inputs(name);
std::vector<const Tensor*> res;
res.reserve(names.size());
std::transform(names.begin(), names.end(), std::back_inserter(res),
[&](const std::string& sub_name) {
auto var = scope_.FindVar(sub_name);
return var == nullptr ? nullptr : GetTensorFromVar(var);
});
return res;
}
template <>
Tensor* ExecutionContext::Output<Tensor>(const std::string& name) const {
auto* var = OutputVar(name);
return var == nullptr ? nullptr : const_cast<Tensor*>(GetTensorFromVar(var));
}
template <>
std::vector<Tensor*> ExecutionContext::MultiOutput<Tensor>(
const std::string& name) const {
auto names = op().Outputs(name);
std::vector<Tensor*> res;
res.reserve(names.size());
std::transform(names.begin(), names.end(), std::back_inserter(res),
[&](const std::string& sub_name) {
auto var = scope().FindVar(sub_name);
return var == nullptr
? nullptr
: const_cast<Tensor*>(GetTensorFromVar(var));
});
return res;
}
void OpProtoAndCheckerMaker::Validate() {
validated_ = true;
CheckNoDuplicatedInOutAttrs();
......
......@@ -22,6 +22,7 @@ limitations under the License. */
#include "op_info.h"
#include "paddle/framework/attribute.h"
#include "paddle/framework/framework.pb.h"
#include "paddle/framework/lod_tensor.h"
#include "paddle/framework/scope.h"
#include "paddle/framework/tensor.h"
#include "paddle/platform/device_context.h"
......@@ -326,11 +327,27 @@ class InferShapeContext {
return res;
}
const Tensor* GetTensorFromVar(const Variable* var) const {
if (var->IsType<LoDTensor>()) {
return &var->Get<LoDTensor>();
}
PADDLE_ENFORCE(var->IsType<Tensor>(),
"The Input(%s) must be LoDTensor or Tensor.");
return &var->Get<Tensor>();
}
private:
const OperatorBase& op_;
const Scope& scope_;
};
template <>
const Tensor* InferShapeContext::Input<Tensor>(const std::string& name) const;
template <>
const std::vector<const Tensor*> InferShapeContext::MultiInput<Tensor>(
const std::string& name) const;
template <typename T>
struct EigenDeviceConverter;
......@@ -363,9 +380,37 @@ class ExecutionContext : public InferShapeContext {
return device_context_;
}
// redefine Output function,
// use Variable::Get instead of Variable::GetMutable
template <typename T>
T* Output(const std::string& name) const {
auto var = OutputVar(name);
return var == nullptr ? nullptr : const_cast<T*>(&var->Get<T>());
}
// redefine MultiOutput function.
// use Variable::Get instead of Variable::GetMutable
template <typename T>
std::vector<T*> MultiOutput(const std::string& name) const {
auto names = op().Outputs(name);
std::vector<T*> res;
res.reserve(names.size());
std::transform(
names.begin(), names.end(), std::back_inserter(res),
[&](const std::string& sub_name) { return Output<T>(sub_name); });
return res;
}
const platform::DeviceContext* device_context_;
};
template <>
Tensor* ExecutionContext::Output<Tensor>(const std::string& name) const;
template <>
std::vector<Tensor*> ExecutionContext::MultiOutput<Tensor>(
const std::string& name) const;
class OpKernel {
public:
/**
......
......@@ -81,6 +81,9 @@ class Tensor {
/*! Return the dimensions of the memory block. */
inline const DDim& dims() const;
/*! Return the numel of the memory block. */
inline int64_t numel() const;
/*! Resize the dimensions of the memory block. */
inline Tensor& Resize(const DDim& dims);
......@@ -162,6 +165,12 @@ class Tensor {
/*! points to dimensions of memory block. */
DDim dims_;
/**
* A cache of the number of elements in a tensor.
* Would be 0 for an uninitialized tensor.
*/
int64_t numel_;
/**
* @brief A PlaceHolder may be shared by more than one tensor.
*
......
......@@ -22,9 +22,9 @@ namespace framework {
template <typename T>
inline void Tensor::check_memory_size() const {
PADDLE_ENFORCE_NOT_NULL(
holder_, "Tenosr holds no memory. Call Tensor::mutable_data first.");
holder_, "Tensor holds no memory. Call Tensor::mutable_data first.");
PADDLE_ENFORCE_GE(
holder_->size(), product(dims_) * sizeof(T) + offset_,
holder_->size(), numel() * sizeof(T) + offset_,
"Tensor's dims_ is out of bound. Call Tensor::mutable_data "
"first to re-allocate memory.\n"
"or maybe the required data-type mismatches the data already stored.");
......@@ -54,11 +54,11 @@ inline T* Tensor::mutable_data(DDim dims, platform::Place place) {
template <typename T>
inline T* Tensor::mutable_data(platform::Place place) {
static_assert(std::is_pod<T>::value, "T must be POD");
PADDLE_ENFORCE_GT(product(dims_), 0,
PADDLE_ENFORCE_GT(numel(), 0,
"Tensor's numel must be larger than zero to call "
"Tensor::mutable_data. Call Tensor::set_dim first.");
/* some versions of boost::variant don't have operator!= */
int64_t size = product(dims_) * sizeof(T);
int64_t size = numel() * sizeof(T);
if (holder_ == nullptr || !(holder_->place() == place) ||
holder_->size() < size + offset_) {
if (platform::is_cpu_place(place)) {
......@@ -97,7 +97,7 @@ inline void Tensor::CopyFrom(const Tensor& src,
auto dst_ptr = static_cast<void*>(mutable_data<T>(dst_place));
auto size = product(src.dims_) * sizeof(T);
auto size = src.numel() * sizeof(T);
if (platform::is_cpu_place(src_place) && platform::is_cpu_place(dst_place)) {
memory::Copy(boost::get<platform::CPUPlace>(dst_place), dst_ptr,
......@@ -131,7 +131,7 @@ inline Tensor Tensor::Slice(const int& begin_idx, const int& end_idx) const {
PADDLE_ENFORCE_LT(begin_idx, end_idx,
"Begin index must be less than end index.");
PADDLE_ENFORCE_NE(dims_[0], 1, "Can not slice a tensor with dims_[0] = 1.");
size_t base = product(dims_) / dims_[0];
size_t base = numel() / dims_[0];
Tensor dst;
dst.holder_ = holder_;
DDim dst_dims = dims_;
......@@ -143,11 +143,14 @@ inline Tensor Tensor::Slice(const int& begin_idx, const int& end_idx) const {
inline Tensor& Tensor::Resize(const DDim& dims) {
dims_ = dims;
numel_ = product(dims_);
return *this;
}
inline const DDim& Tensor::dims() const { return dims_; }
inline int64_t Tensor::numel() const { return numel_; }
template <typename T>
inline Tensor ReshapeToMatrix(const Tensor& src, int num_col_dims) {
Tensor res;
......
......@@ -36,7 +36,7 @@ TEST(Tensor, DataAssert) {
} catch (paddle::platform::EnforceNotMet err) {
caught = true;
std::string msg =
"holder_ should not be null\nTenosr holds no memory. Call "
"holder_ should not be null\nTensor holds no memory. Call "
"Tensor::mutable_data first.";
const char* what = err.what();
for (size_t i = 0; i < msg.length(); ++i) {
......@@ -112,7 +112,7 @@ TEST(Tensor, ShareDataWith) {
} catch (paddle::platform::EnforceNotMet err) {
caught = true;
std::string msg =
"holder_ should not be null\nTenosr holds no memory. Call "
"holder_ should not be null\nTensor holds no memory. Call "
"Tensor::mutable_data first.";
const char* what = err.what();
for (size_t i = 0; i < msg.length(); ++i) {
......@@ -274,4 +274,4 @@ TEST(Tensor, ReshapeToMatrix) {
Tensor res = ReshapeToMatrix<int>(src, 2);
ASSERT_EQ(res.dims()[0], 2 * 3);
ASSERT_EQ(res.dims()[1], 4 * 9);
}
\ No newline at end of file
}
......@@ -594,7 +594,7 @@ struct StridePadding {
float32x4_t s1 = vdupq_n_f32(0.f);
for (int s = 0; s < step; s++) {
float32x4_t s0 = vld1q_f32(input);
float32x4x2_t v = {s0, s1};
float32x4x2_t v = {{s0, s1}};
vst2q_f32(inputPadding, v);
input += 4;
inputPadding += 8;
......
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "ExpandConvBaseLayer.h"
#include "paddle/utils/Logging.h"
namespace paddle {
bool ExpandConvBaseLayer::init(const LayerMap &layerMap,
const ParameterMap &parameterMap) {
/* Initialize the basic convolutional parent class */
ConvBaseLayer::init(layerMap, parameterMap);
int index = 0;
for (auto &inputConfig : config_.inputs()) {
const ConvConfig &conf = inputConfig.conv_conf();
/* Consistent caffe mode for multiple input */
caffeMode_ = conf.caffe_mode();
// create a new weight
size_t height, width;
height = filterPixels_[index] * filterChannels_[index];
width = (!isDeconv_) ? numFilters_ : channels_[index];
CHECK_EQ(parameters_[index]->getSize(), width * height);
Weight *w = new Weight(height, width, parameters_[index]);
weights_.emplace_back(w);
index++;
}
if (biasParameter_.get()) {
if (sharedBiases_) {
CHECK_EQ((size_t)numFilters_, biasParameter_->getSize());
biases_ =
std::unique_ptr<Weight>(new Weight(numFilters_, 1, biasParameter_));
} else {
biases_ =
std::unique_ptr<Weight>(new Weight(getSize(), 1, biasParameter_));
}
}
getOutputSize();
return true;
}
size_t ExpandConvBaseLayer::getOutputSize() {
CHECK_NE(inputLayers_.size(), 0UL);
size_t layerSize = ConvBaseLayer::calOutputSize();
return layerSize;
}
void ExpandConvBaseLayer::addSharedBias() {
size_t mapW = getOutputSize() / numFilters_;
size_t mapH = getOutputValue()->getElementCnt() / mapW;
MatrixPtr out =
Matrix::create(getOutputValue()->getData(), mapH, mapW, false, useGpu_);
Matrix::resizeOrCreate(transOutValue_, mapW, mapH, false, useGpu_);
out->transpose(transOutValue_, false); // false means no memory allocation
transOutValue_->reshape(transOutValue_->getElementCnt() / numFilters_,
numFilters_);
MatrixPtr bias = Matrix::create(biases_->getW()->getData(),
1,
biases_->getW()->getElementCnt(),
false,
useGpu_);
transOutValue_->addBias(*bias, 1.0f);
transOutValue_->reshape(mapW, mapH);
transOutValue_->transpose(out, false); // false means no memory allocation
out->clear();
bias->clear();
}
void ExpandConvBaseLayer::addUnsharedBias() {
MatrixPtr outValue = getOutputValue();
MatrixPtr bias = Matrix::create(biases_->getW()->getData(),
1,
biases_->getW()->getElementCnt(),
false,
useGpu_);
outValue->addBias(*bias, 1.0f);
}
void ExpandConvBaseLayer::bpropSharedBias(MatrixPtr biases, MatrixPtr v) {
size_t mapW = getOutputSize() / numFilters_;
size_t mapH = v->getElementCnt() / mapW;
MatrixPtr vTmp = Matrix::create(v->getData(), mapH, mapW, false, useGpu_);
Matrix::resizeOrCreate(transOutValue_, mapW, mapH, false, useGpu_);
vTmp->transpose(transOutValue_, false); // false means no memory allocation
transOutValue_->reshape(transOutValue_->getElementCnt() / numFilters_,
numFilters_);
biases->collectBias(*transOutValue_, 1.0f);
}
void ExpandConvBaseLayer::bpropBiases(MatrixPtr v) {
MatrixPtr biases = Matrix::create(biases_->getWGrad()->getData(),
1,
biases_->getWGrad()->getElementCnt(),
false,
useGpu_);
if (sharedBiases_) {
bpropSharedBias(biases, v);
} else {
biases->collectBias(*v, 1.0f);
}
biases->clear();
}
} // namespace paddle
......@@ -36,7 +36,36 @@ inline bool isDepthwiseConv(int channels, int groups) {
bool ExpandConvLayer::init(const LayerMap &layerMap,
const ParameterMap &parameterMap) {
/* Initialize the basic convolutional parent class */
ExpandConvBaseLayer::init(layerMap, parameterMap);
ConvBaseLayer::init(layerMap, parameterMap);
int index = 0;
for (auto &inputConfig : config_.inputs()) {
const ConvConfig &conf = inputConfig.conv_conf();
/* Consistent caffe mode for multiple input */
caffeMode_ = conf.caffe_mode();
// create a new weight
size_t height, width;
height = filterPixels_[index] * filterChannels_[index];
width = (!isDeconv_) ? numFilters_ : channels_[index];
CHECK_EQ(parameters_[index]->getSize(), width * height);
Weight *w = new Weight(height, width, parameters_[index]);
weights_.emplace_back(w);
index++;
}
if (biasParameter_.get()) {
if (sharedBiases_) {
CHECK_EQ((size_t)numFilters_, biasParameter_->getSize());
biases_ = std::unique_ptr<Weight>(
new Weight(1, numFilters_, biasParameter_, 0));
} else {
biases_ =
std::unique_ptr<Weight>(new Weight(1, getSize(), biasParameter_, 0));
}
}
getOutputSize();
size_t numInputs = config_.inputs_size();
inputShape_.resize(numInputs);
......@@ -108,6 +137,12 @@ bool ExpandConvLayer::init(const LayerMap &layerMap,
return true;
}
size_t ExpandConvLayer::getOutputSize() {
CHECK_NE(inputLayers_.size(), 0UL);
size_t layerSize = ConvBaseLayer::calOutputSize();
return layerSize;
}
// i is the index of input layers
#define BACKWARD_INPUT(i, inputs, outputs) \
backward_[2 * i]->calc(inputs, outputs)
......@@ -155,11 +190,7 @@ void ExpandConvLayer::forward(PassType passType) {
/* add the bias-vector */
if (biases_.get()) {
if (sharedBiases_) {
addSharedBias();
} else {
addUnsharedBias();
}
output_.value->addBias(*biases_->getW(), 1.0, sharedBiases_);
}
/* activation */
......@@ -171,7 +202,7 @@ void ExpandConvLayer::backward(const UpdateCallback &callback) {
MatrixPtr outGrad = getOutputGrad();
if (biases_ && biases_->getWGrad()) {
bpropBiases(outGrad);
biases_->getWGrad()->collectBias(*getOutputGrad(), 1, sharedBiases_);
/* Increasing the number of gradient */
biases_->getParameterPtr()->incUpdate(callback);
}
......
......@@ -15,7 +15,7 @@ limitations under the License. */
#pragma once
#include <vector>
#include "ExpandConvBaseLayer.h"
#include "ConvBaseLayer.h"
#include "paddle/math/Matrix.h"
namespace paddle {
......@@ -28,10 +28,9 @@ namespace paddle {
* The config file api is img_conv_layer.
*/
class ExpandConvLayer : public ExpandConvBaseLayer {
class ExpandConvLayer : public ConvBaseLayer {
public:
explicit ExpandConvLayer(const LayerConfig& config)
: ExpandConvBaseLayer(config) {}
explicit ExpandConvLayer(const LayerConfig& config) : ConvBaseLayer(config) {}
~ExpandConvLayer() {}
......@@ -41,6 +40,8 @@ public:
void forward(PassType passType) override;
void backward(const UpdateCallback& callback) override;
size_t getOutputSize();
protected:
std::vector<TensorShape> inputShape_;
std::vector<TensorShape> filterShape_;
......
/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "MKLDNNConvLayer.h"
#include "paddle/math/MathUtils.h"
#include "paddle/utils/Logging.h"
using namespace mkldnn; // NOLINT
typedef memory::format format;
namespace paddle {
REGISTER_LAYER(mkldnn_conv, MKLDNNConvLayer);
bool MKLDNNConvLayer::init(const LayerMap& layerMap,
const ParameterMap& parameterMap) {
if (!MKLDNNLayer::init(layerMap, parameterMap)) {
return false;
}
CHECK_EQ(inputLayers_.size(), 1) << "Only support one input layer yet";
CHECK_EQ(inputLayers_.size(), parameters_.size());
CHECK(config_.shared_biases()) << "Only support shared biases yet";
oc_ = config_.num_filters();
const ConvConfig& conf = config_.inputs(0).conv_conf();
ic_ = conf.channels();
fw_ = conf.filter_size();
fh_ = conf.filter_size_y();
pw_ = conf.padding();
ph_ = conf.padding_y();
dw_ = conf.dilation();
dh_ = conf.dilation_y();
sw_ = conf.stride();
sh_ = conf.stride_y();
gp_ = conf.groups();
oh_ = conf.output_y();
ow_ = conf.output_x();
ih_ = conf.img_size_y();
iw_ = conf.img_size();
caffeMode_ = conf.caffe_mode();
CHECK(caffeMode_) << "Only support caffe mode yet";
CHECK(dh_ == 1 && dw_ == 1) << "Only support dilation 1 yet";
// check group setting
CHECK_EQ((oc_ / gp_) * gp_, oc_) << "group is indivisible for oc";
CHECK_EQ((ic_ / gp_) * gp_, ic_) << "group is indivisible for ic";
// create weight
size_t height = oc_ / gp_;
size_t width = ic_ * fh_ * fw_;
CHECK_EQ(parameters_[0]->getSize(), height * width);
weight_ =
std::unique_ptr<Weight>(new Weight(height, width, parameters_[0], 0));
// create biases
if (biasParameter_.get() != NULL) {
biases_ = std::unique_ptr<Weight>(new Weight(1, oc_, biasParameter_));
}
return true;
}
void MKLDNNConvLayer::convertWeightsFromPaddle() {
if (hasInitedWgt_) {
return;
}
CHECK(wgtVal_) << "should have been initialized";
// the paddle weight format is oihw or goihw
auto targetDim = wgtVal_->getDims();
auto srcFmt = (gp_ == 1) ? memory::format::oihw : memory::format::goihw;
wgtVal_->reorderDataFrom(wgtVal_, srcFmt, targetDim);
hasInitedWgt_ = true;
}
void MKLDNNConvLayer::convertWeightsToPaddle() {
CHECK(wgtVal_) << "should have been initialized";
auto targetDim = wgtVal_->getDims();
auto dstFmt = (gp_ == 1) ? memory::format::oihw : memory::format::goihw;
wgtVal_->reorderDataTo(wgtVal_, dstFmt, targetDim);
}
void MKLDNNConvLayer::reshape(
int& bs, int& ic, int& ih, int& iw, int oc, int& oh, int& ow) {
reshapeInput(bs, ih, iw);
// cal output sizes
// oc can not be changed
int fh = (fh_ - 1) * dh_ + 1;
int fw = (fw_ - 1) * dw_ + 1;
oh = outputSize(ih, fh, ph_, sh_, caffeMode_);
ow = outputSize(iw, fw, pw_, sw_, caffeMode_);
reshapeOutput(oh, ow);
resizeOutput(bs, oc * oh * ow);
printSizeInfo();
}
void MKLDNNConvLayer::resetFwd(std::vector<primitive>& pipeline,
MKLDNNMatrixPtr& in,
MKLDNNMatrixPtr& wgt,
MKLDNNMatrixPtr& bias,
MKLDNNMatrixPtr& out) {
resetFwdPD(fwdPD_);
resetFwdBuffers(fwdPD_, in, wgt, bias, out);
resetFwdPipeline(pipeline, fwdPD_, in, wgt, bias, out);
printValueFormatFlow();
}
void MKLDNNConvLayer::resetBwd(std::vector<primitive>& pipeline,
MKLDNNMatrixPtr& in,
MKLDNNMatrixPtr& wgt,
MKLDNNMatrixPtr& bias,
MKLDNNMatrixPtr& out) {
std::shared_ptr<conv_bwdWgt::primitive_desc> bwdWgtPD;
std::shared_ptr<conv_bwdData::primitive_desc> bwdDataPD;
resetBwdWgtPD(bwdWgtPD);
resetBwdDataPD(bwdDataPD);
resetBwdBuffers(bwdWgtPD, bwdDataPD, in, wgt, bias, out);
resetBwdPipeline(pipeline, bwdWgtPD, bwdDataPD, in, wgt, bias, out);
printGradFormatFlow();
}
void MKLDNNConvLayer::updateInputData() {
cpuInVal_->setData(getInputValue(0, CPU_DEVICE)->getData());
}
void MKLDNNConvLayer::updateWeights(const UpdateCallback& callback) {
weight_->getParameterPtr()->incUpdate(callback);
if (biases_ && biases_->getWGrad()) {
biases_->getParameterPtr()->incUpdate(callback);
}
}
void MKLDNNConvLayer::loadConvSettings(memory::dims& wgt,
memory::dims& bias,
memory::dims& stride,
memory::dims& dilation,
memory::dims& padL,
memory::dims& padR) {
wgt = (gp_ == 1) ? memory::dims{oc_, ic_, fh_, fw_}
: memory::dims{gp_, oc_ / gp_, ic_ / gp_, fh_, fw_};
bias = memory::dims{oc_};
stride = memory::dims{sh_, sw_};
padL = memory::dims{ph_, pw_};
padR = getPaddingR();
// note: mkldnn dilation start from 0
dilation = memory::dims{dh_ - 1, dw_ - 1};
}
void MKLDNNConvLayer::resetFwdPD(
std::shared_ptr<conv_fwd::primitive_desc>& pd) {
// dims for conv
memory::dims inDims = memory::dims{bs_, ic_, ih_, iw_};
memory::dims outDims = memory::dims{bs_, oc_, oh_, ow_};
memory::dims wgtDims, biasDims, strides, dilations, padL, padR;
loadConvSettings(wgtDims, biasDims, strides, dilations, padL, padR);
prop_kind pk = passType_ == PASS_TEST ? prop_kind::forward_scoring
: prop_kind::forward_training;
algorithm algo = algorithm::convolution_direct;
padding_kind padKind = padding_kind::zero;
conv_fwd::desc fwdDesc =
biases_ && biases_->getW()
? conv_fwd::desc(pk,
algo,
MKLDNNMatrix::createMemoryDesc(inDims),
MKLDNNMatrix::createMemoryDesc(wgtDims),
MKLDNNMatrix::createMemoryDesc(biasDims),
MKLDNNMatrix::createMemoryDesc(outDims),
strides,
dilations,
padL,
padR,
padKind)
: conv_fwd::desc(pk,
algo,
MKLDNNMatrix::createMemoryDesc(inDims),
MKLDNNMatrix::createMemoryDesc(wgtDims),
MKLDNNMatrix::createMemoryDesc(outDims),
strides,
dilations,
padL,
padR,
padKind);
pd.reset(new conv_fwd::primitive_desc(fwdDesc, engine_));
}
void MKLDNNConvLayer::resetFwdBuffers(
std::shared_ptr<conv_fwd::primitive_desc>& pd,
MKLDNNMatrixPtr& in,
MKLDNNMatrixPtr& wgt,
MKLDNNMatrixPtr& bias,
MKLDNNMatrixPtr& out) {
CHECK(pd);
resetInValue(pd, in);
resetWgtBiasValue(pd, wgt, bias);
resetOutValue(pd, out);
}
void MKLDNNConvLayer::resetFwdPipeline(
std::vector<primitive>& pipeline,
std::shared_ptr<conv_fwd::primitive_desc>& pd,
MKLDNNMatrixPtr& in,
MKLDNNMatrixPtr& wgt,
MKLDNNMatrixPtr& bias,
MKLDNNMatrixPtr& out) {
pipeline.clear();
if (cvtInVal_) {
pipeline.push_back(*cvtInVal_);
}
if (bias) {
fwd_.reset(new conv_fwd(*pd, *in, *wgt, *bias, *out));
} else {
fwd_.reset(new conv_fwd(*pd, *in, *wgt, *out));
}
pipeline.push_back(*fwd_);
if (cvtOutVal_) {
pipeline.push_back(*cvtOutVal_);
}
}
void MKLDNNConvLayer::resetInValue(
std::shared_ptr<conv_fwd::primitive_desc>& pd, MKLDNNMatrixPtr& in) {
const MatrixPtr& inMat = inputLayers_[0]->getOutput().value;
in = MKLDNNMatrix::create(inMat, pd->src_primitive_desc());
// create buffer and reorder if input value do not match
cpuInVal_ = nullptr;
cvtInVal_ = nullptr;
if (inputIsOnlyMKLDNN()) {
MKLDNNMatrixPtr dnnIn = std::dynamic_pointer_cast<MKLDNNMatrix>(inMat);
CHECK(dnnIn) << "Input should be MKLDNNMatrix";
if (dnnIn->getPrimitiveDesc() != in->getPrimitiveDesc()) {
CHECK_EQ(dnnIn->getFormat(), format::nc);
CHECK(ih_ == 1 && iw_ == 1) << "when input is nc format";
// create a new one with nchw format and same data
memory::dims inDims = memory::dims{bs_, ic_, 1, 1};
dnnIn = MKLDNNMatrix::create(inMat, inDims, format::nchw, engine_);
CHECK(dnnIn->getPrimitiveDesc() == in->getPrimitiveDesc());
}
in = dnnIn;
} else {
const MatrixPtr& cpuIn = getInputValue(0, CPU_DEVICE);
memory::dims inDims = memory::dims{bs_, ic_, ih_, iw_};
cpuInVal_ = MKLDNNMatrix::create(cpuIn, inDims, format::nchw, engine_);
if (cpuInVal_->getPrimitiveDesc() != in->getPrimitiveDesc()) {
// create new mkldnn matrix
in = MKLDNNMatrix::create(nullptr, pd->src_primitive_desc());
cvtInVal_ = MKLDNNMatrix::createReorder(cpuInVal_, in);
CHECK(cvtInVal_) << "should not be emptry";
} else {
in = cpuInVal_;
}
}
}
void MKLDNNConvLayer::resetWgtBiasValue(
std::shared_ptr<conv_fwd::primitive_desc>& pd,
MKLDNNMatrixPtr& wgt,
MKLDNNMatrixPtr& bias) {
wgt = MKLDNNMatrix::create(weight_->getW(), pd->weights_primitive_desc());
VLOG(MKLDNN_FMTS) << "Weight value format: " << wgt->getFormat();
bias = (biases_ && biases_->getW())
? MKLDNNMatrix::create(biases_->getW(), pd->bias_primitive_desc())
: nullptr;
}
void MKLDNNConvLayer::resetOutValue(
std::shared_ptr<conv_fwd::primitive_desc>& pd, MKLDNNMatrixPtr& out) {
out = MKLDNNMatrix::create(output_.value, pd->dst_primitive_desc());
// change original output value from cpu matrix to mkldnn matrix
output_.value = std::dynamic_pointer_cast<Matrix>(out);
// create reorder if output value has cpu device and pd do not match
cpuOutVal_ = nullptr;
cpuOutVal_ = nullptr;
if (!outputIsOnlyMKLDNN()) {
const MatrixPtr& cpuOut = getOutput(CPU_DEVICE).value;
memory::dims outDims = memory::dims{bs_, oc_, oh_, ow_};
cpuOutVal_ = MKLDNNMatrix::create(cpuOut, outDims, format::nchw, engine_);
if (cpuOutVal_->getPrimitiveDesc() != out->getPrimitiveDesc()) {
cvtOutVal_ = MKLDNNMatrix::createReorder(out, cpuOutVal_);
CHECK(cvtOutVal_) << "should not be emptry";
} else {
// CPU output share the same data of MKLDNN output
cpuOut->setData(out->getData());
cpuOutVal_ = out;
}
}
}
void MKLDNNConvLayer::resetBwdWgtPD(
std::shared_ptr<conv_bwdWgt::primitive_desc>& pd) {
memory::dims wgtDims, biasDims, strides, dilations, padL, padR;
loadConvSettings(wgtDims, biasDims, strides, dilations, padL, padR);
// create backward weight using input, output and weight value memory desc
CHECK(inVal_) << "Should have input value";
CHECK(outVal_) << "Should have output value";
CHECK(wgtVal_) << "Should have weight value";
algorithm algo = algorithm::convolution_direct;
padding_kind padKind = padding_kind::zero;
auto bwdWgtDesc = biasVal_ != nullptr
? conv_bwdWgt::desc(algo,
inVal_->getMemoryDesc(),
wgtVal_->getMemoryDesc(),
biasVal_->getMemoryDesc(),
outVal_->getMemoryDesc(),
strides,
padL,
padR,
padKind)
: conv_bwdWgt::desc(algo,
inVal_->getMemoryDesc(),
wgtVal_->getMemoryDesc(),
outVal_->getMemoryDesc(),
strides,
padL,
padR,
padKind);
pd.reset(new conv_bwdWgt::primitive_desc(bwdWgtDesc, engine_, *fwdPD_));
CHECK(pd->src_primitive_desc() == inVal_->getPrimitiveDesc())
<< "primitive desc of in value should equal";
CHECK(pd->diff_dst_primitive_desc() == outVal_->getPrimitiveDesc())
<< "primitive desc of out grad should equal the out value";
CHECK(pd->diff_weights_primitive_desc() == wgtVal_->getPrimitiveDesc())
<< "primitive desc of weight grad should equal the weight value";
}
void MKLDNNConvLayer::resetBwdDataPD(
std::shared_ptr<conv_bwdData::primitive_desc>& pd) {
pd = nullptr;
if (inputLayers_[0]->getOutput().grad == nullptr) {
return;
}
memory::dims wgtDims, biasDims, strides, dilations, padL, padR;
loadConvSettings(wgtDims, biasDims, strides, dilations, padL, padR);
CHECK(inVal_) << "Should have input value";
CHECK(outVal_) << "Should have output value";
// create backward data using input and output value memory desc
// but using weight memory desc with any format
auto bwdDataDesc = conv_bwdData::desc(algorithm::convolution_direct,
inVal_->getMemoryDesc(),
MKLDNNMatrix::createMemoryDesc(wgtDims),
outVal_->getMemoryDesc(),
strides,
padL,
padR,
padding_kind::zero);
pd.reset(new conv_bwdData::primitive_desc(bwdDataDesc, engine_, *fwdPD_));
CHECK(pd->diff_src_primitive_desc() == inVal_->getPrimitiveDesc())
<< "primitive desc of in grad should equal the in value";
CHECK(pd->diff_dst_primitive_desc() == outVal_->getPrimitiveDesc())
<< "primitive desc of out grad should equal";
}
void MKLDNNConvLayer::resetBwdBuffers(
std::shared_ptr<conv_bwdWgt::primitive_desc>& wgtPD,
std::shared_ptr<conv_bwdData::primitive_desc>& dataPD,
MKLDNNMatrixPtr& in,
MKLDNNMatrixPtr& wgt,
MKLDNNMatrixPtr& bias,
MKLDNNMatrixPtr& out) {
CHECK(wgtPD);
resetOutGrad(wgtPD, out);
resetWgtBiasGrad(wgtPD, wgt, bias);
resetInGrad(dataPD, in);
resetWgtValBwdData(dataPD, wgtValBwdData_);
}
void MKLDNNConvLayer::resetBwdPipeline(
std::vector<primitive>& pipeline,
std::shared_ptr<conv_bwdWgt::primitive_desc>& wgtPD,
std::shared_ptr<conv_bwdData::primitive_desc>& dataPD,
MKLDNNMatrixPtr& in,
MKLDNNMatrixPtr& wgt,
MKLDNNMatrixPtr& bias,
MKLDNNMatrixPtr& out) {
pipeline.clear();
if (cvtOutGrad_) {
pipeline.push_back(*cvtOutGrad_);
}
// add bwdWgt handle
if (bias) {
bwdWgt_.reset(new conv_bwdWgt(*wgtPD, *inVal_, *out, *wgt, *bias));
} else {
bwdWgt_.reset(new conv_bwdWgt(*wgtPD, *inVal_, *out, *wgt));
}
pipeline.push_back(*bwdWgt_);
if (dataPD == nullptr) {
return;
}
if (cvtWgtVal_) {
pipeline.push_back(*cvtWgtVal_);
}
// add bwdData handle
CHECK(wgtValBwdData_) << "Should have weight memory";
bwdData_.reset(new conv_bwdData(*dataPD, *out, *wgtValBwdData_, *in));
pipeline.push_back(*bwdData_);
if (cvtInGrad_) {
pipeline.push_back(*cvtInGrad_);
}
}
void MKLDNNConvLayer::resetOutGrad(
std::shared_ptr<conv_bwdWgt::primitive_desc>& wgtPD, MKLDNNMatrixPtr& out) {
const MatrixPtr& outMat = output_.grad;
out = MKLDNNMatrix::create(outMat, wgtPD->diff_dst_primitive_desc());
CHECK(outVal_ != nullptr &&
out->getPrimitiveDesc() == outVal_->getPrimitiveDesc())
<< "primitive desc of out grad and value should be equal";
// TODO(TJ): merge outgrad
// create reorder if has output grad does not match
cpuOutGrad_ = nullptr;
cvtOutGrad_ = nullptr;
if (!outputIsOnlyMKLDNN()) {
const MatrixPtr& cpuOut = getOutput(CPU_DEVICE).grad;
// same PrimitiveDesc with cpuInVal_
CHECK(cpuOutVal_);
cpuOutGrad_ = MKLDNNMatrix::create(cpuOut, cpuOutVal_->getPrimitiveDesc());
if (cpuOutGrad_->getPrimitiveDesc() == out->getPrimitiveDesc()) {
outMat->setData(cpuOut->getData());
out = cpuOutGrad_;
} else {
cvtOutGrad_ = MKLDNNMatrix::createReorder(cpuOutGrad_, out);
CHECK(cvtOutGrad_);
}
}
}
void MKLDNNConvLayer::resetWgtBiasGrad(
std::shared_ptr<conv_bwdWgt::primitive_desc>& wgtPD,
MKLDNNMatrixPtr& wgt,
MKLDNNMatrixPtr& bias) {
wgt = MKLDNNMatrix::create(weight_->getWGrad(),
wgtPD->diff_weights_primitive_desc());
CHECK(nullptr != wgtVal_ &&
wgt->getPrimitiveDesc() == wgtVal_->getPrimitiveDesc())
<< "primitive desc of weight grad and value should be equal";
VLOG(MKLDNN_FMTS) << "weight grad format: " << wgt->getFormat();
bias = nullptr;
if (biasVal_ == nullptr) {
return;
}
bias = MKLDNNMatrix::create(biases_->getWGrad(),
wgtPD->diff_bias_primitive_desc());
CHECK(bias->getPrimitiveDesc() == biasVal_->getPrimitiveDesc())
<< "primitive desc of bias grad should equal the bias value";
}
void MKLDNNConvLayer::resetInGrad(
std::shared_ptr<conv_bwdData::primitive_desc>& dataPD,
MKLDNNMatrixPtr& in) {
if (dataPD == nullptr) {
return;
}
// TODO(TJ): use outputMaps_ ways to get the inGrad_ when merge outgrad done
in = MKLDNNMatrix::create(inputLayers_[0]->getOutput().grad,
dataPD->diff_src_primitive_desc());
CHECK(nullptr != inVal_ &&
in->getPrimitiveDesc() == inVal_->getPrimitiveDesc())
<< "primitive desc of input grad and value should be equal";
// create reorder if has output grad does not match
cpuInGrad_ = nullptr;
cvtInGrad_ = nullptr;
if (!inputIsOnlyMKLDNN()) {
const MatrixPtr& cpuIn = getInputGrad(0, CPU_DEVICE);
// same PrimitiveDesc with cpuInVal_
CHECK(cpuInVal_);
cpuInGrad_ = MKLDNNMatrix::create(cpuIn, cpuInVal_->getPrimitiveDesc());
if (cpuInGrad_->getPrimitiveDesc() != in->getPrimitiveDesc()) {
const MatrixPtr& dnnIn = getInputGrad(0, MKLDNN_DEVICE);
in = MKLDNNMatrix::create(dnnIn, in->getPrimitiveDesc());
cvtInGrad_ = MKLDNNMatrix::createReorder(in, cpuInGrad_);
CHECK(cvtInGrad_);
} else {
in = cpuInGrad_;
}
}
}
void MKLDNNConvLayer::resetWgtValBwdData(
std::shared_ptr<conv_bwdData::primitive_desc>& dataPD,
MKLDNNMatrixPtr& wgt) {
if (dataPD == nullptr) {
return;
}
// create new weight value for backward data, and create reorder if necessary
// since the primitive_desc would be different with wgtVal_
CHECK(wgtVal_) << "should have weight value";
if (dataPD->weights_primitive_desc() != wgtVal_->getPrimitiveDesc()) {
wgtValBwdData_ =
MKLDNNMatrix::create(nullptr, dataPD->weights_primitive_desc());
cvtWgtVal_ = MKLDNNMatrix::createReorder(wgtVal_, wgtValBwdData_);
CHECK(cvtWgtVal_);
} else {
wgtValBwdData_ = wgtVal_;
}
VLOG(MKLDNN_FMTS) << "weight value format for backward data"
<< wgtValBwdData_->getFormat();
}
} // namespace paddle
/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "MKLDNNLayer.h"
#include "mkldnn.hpp"
namespace paddle {
typedef mkldnn::convolution_forward conv_fwd;
typedef mkldnn::convolution_backward_weights conv_bwdWgt;
typedef mkldnn::convolution_backward_data conv_bwdData;
/**
* @brief A subclass of MKLDNNLayer conv layer.
*
* The config file api is mkldnn_conv
*/
class MKLDNNConvLayer : public MKLDNNLayer {
protected:
// padding height and width
int ph_, pw_;
// stride height and width
int sh_, sw_;
// dilation height and width
int dh_, dw_;
// filter(kenerl) height and width
int fh_, fw_;
// group number
int gp_;
// in resetBwdData, the format of wgtValBwdData_ is different with wgtVal_
MKLDNNMatrixPtr wgtValBwdData_;
// convert handle from wgtVal_ to wgtValBwdData_
std::shared_ptr<mkldnn::reorder> cvtWgtVal_;
// save forward primitive_desc, which can be used backward
std::shared_ptr<conv_fwd::primitive_desc> fwdPD_;
// MKLDNNMatrixPtr which should be created from CPU Device
MKLDNNMatrixPtr cpuInVal_;
MKLDNNMatrixPtr cpuInGrad_;
MKLDNNMatrixPtr cpuOutVal_;
MKLDNNMatrixPtr cpuOutGrad_;
// convert handle between CPU device and MKLDNN device
std::shared_ptr<mkldnn::reorder> cvtInVal_;
std::shared_ptr<mkldnn::reorder> cvtInGrad_;
std::shared_ptr<mkldnn::reorder> cvtOutVal_;
std::shared_ptr<mkldnn::reorder> cvtOutGrad_;
// whether the weight has been init
bool hasInitedWgt_;
// true by default, which impact the calculation of output image size.
// details can refer to mathUtil.h
bool caffeMode_;
// weight and bias
std::unique_ptr<Weight> weight_;
std::unique_ptr<Weight> biases_;
public:
explicit MKLDNNConvLayer(const LayerConfig& config)
: MKLDNNLayer(config), hasInitedWgt_(false), caffeMode_(true) {}
~MKLDNNConvLayer() {}
bool init(const LayerMap& layerMap,
const ParameterMap& parameterMap) override;
void reshape(
int& bs, int& ic, int& ih, int& iw, int oc, int& oh, int& ow) override;
void resetFwd(std::vector<mkldnn::primitive>& pipeline,
MKLDNNMatrixPtr& in,
MKLDNNMatrixPtr& wgt,
MKLDNNMatrixPtr& bias,
MKLDNNMatrixPtr& out) override;
void resetBwd(std::vector<mkldnn::primitive>& pipeline,
MKLDNNMatrixPtr& in,
MKLDNNMatrixPtr& wgt,
MKLDNNMatrixPtr& bias,
MKLDNNMatrixPtr& out) override;
void updateInputData() override;
void updateWeights(const UpdateCallback& callback) override;
void convertWeightsFromPaddle() override;
void convertWeightsToPaddle() override;
void printSizeInfo() override {
MKLDNNLayer::printSizeInfo();
VLOG(MKLDNN_SIZES) << getName() << ": fh: " << fh_ << ", fw: " << fw_
<< ": ph: " << ph_ << ", pw: " << pw_ << ", sh: " << sh_
<< ", sw: " << sw_ << ", dh: " << dh_ << ", dw: " << dw_;
}
void printValueFormatFlow() override {
if (cpuInVal_) {
VLOG(MKLDNN_FMTS) << cpuInVal_->getFormat() << " >>>";
}
MKLDNNLayer::printValueFormatFlow();
if (cpuOutVal_) {
VLOG(MKLDNN_FMTS) << " >>> " << cpuOutVal_->getFormat();
}
}
void printGradFormatFlow() override {
if (cpuInGrad_) {
VLOG(MKLDNN_FMTS) << cpuInGrad_->getFormat() << " <<<";
}
MKLDNNLayer::printGradFormatFlow();
if (cpuOutGrad_) {
VLOG(MKLDNN_FMTS) << " <<< " << cpuOutGrad_->getFormat();
}
}
protected:
/**
* load the dims settings of this conv
*/
void loadConvSettings(mkldnn::memory::dims& wgt,
mkldnn::memory::dims& bias,
mkldnn::memory::dims& stride,
mkldnn::memory::dims& dilation,
mkldnn::memory::dims& padL,
mkldnn::memory::dims& padR);
/**
* reset the forward primitive descriptor.
*/
void resetFwdPD(std::shared_ptr<conv_fwd::primitive_desc>& pd);
/**
* reset the MKLDNNMatrix buffers used in forward.
*/
void resetFwdBuffers(std::shared_ptr<conv_fwd::primitive_desc>& pd,
MKLDNNMatrixPtr& in,
MKLDNNMatrixPtr& wgt,
MKLDNNMatrixPtr& bias,
MKLDNNMatrixPtr& out);
/**
* reset the forward pipeline.
*/
void resetFwdPipeline(std::vector<mkldnn::primitive>& pipeline,
std::shared_ptr<conv_fwd::primitive_desc>& pd,
MKLDNNMatrixPtr& in,
MKLDNNMatrixPtr& wgt,
MKLDNNMatrixPtr& bias,
MKLDNNMatrixPtr& out);
/**
* reset MKLDNNMatrix of input value
*/
void resetInValue(std::shared_ptr<conv_fwd::primitive_desc>& pd,
MKLDNNMatrixPtr& in);
/**
* reset MKLDNNMatrix of weight and bias value
*/
void resetWgtBiasValue(std::shared_ptr<conv_fwd::primitive_desc>& pd,
MKLDNNMatrixPtr& wgt,
MKLDNNMatrixPtr& bias);
/**
* reset MKLDNNMatrix of output value
*/
void resetOutValue(std::shared_ptr<conv_fwd::primitive_desc>& pd,
MKLDNNMatrixPtr& out);
/**
* reset the backward weight primitive descriptor.
*/
void resetBwdWgtPD(std::shared_ptr<conv_bwdWgt::primitive_desc>& pd);
/**
* reset the backward data primitive descriptor.
*/
void resetBwdDataPD(std::shared_ptr<conv_bwdData::primitive_desc>& pd);
/**
* reset the MKLDNNMatrix buffers used in backward.
*/
void resetBwdBuffers(std::shared_ptr<conv_bwdWgt::primitive_desc>& wgtPD,
std::shared_ptr<conv_bwdData::primitive_desc>& dataPD,
MKLDNNMatrixPtr& in,
MKLDNNMatrixPtr& wgt,
MKLDNNMatrixPtr& bias,
MKLDNNMatrixPtr& out);
/**
* reset the backward pipeline.
*/
void resetBwdPipeline(std::vector<mkldnn::primitive>& pipeline,
std::shared_ptr<conv_bwdWgt::primitive_desc>& wgtPD,
std::shared_ptr<conv_bwdData::primitive_desc>& dataPD,
MKLDNNMatrixPtr& in,
MKLDNNMatrixPtr& wgt,
MKLDNNMatrixPtr& bias,
MKLDNNMatrixPtr& out);
/**
* reset MKLDNNMatrix of output grad
*/
void resetOutGrad(std::shared_ptr<conv_bwdWgt::primitive_desc>& wgtPD,
MKLDNNMatrixPtr& out);
/**
* reset MKLDNNMatrix of weight and bias grad
*/
void resetWgtBiasGrad(std::shared_ptr<conv_bwdWgt::primitive_desc>& wgtPD,
MKLDNNMatrixPtr& wgt,
MKLDNNMatrixPtr& bias);
/**
* reset MKLDNNMatrix of input grad
*/
void resetInGrad(std::shared_ptr<conv_bwdData::primitive_desc>& dataPD,
MKLDNNMatrixPtr& in);
/**
* reset MKLDNNMatrix of weight value for backward data
* since the primitive_desc would be different with wgtVal_
*/
void resetWgtValBwdData(std::shared_ptr<conv_bwdData::primitive_desc>& dataPD,
MKLDNNMatrixPtr& wgt);
/**
* get padding_r according to
* https://github.com/01org/mkl-dnn/blob/master/tests/gtests/
* test_convolution_forward_common.hpp
* @note: mkldnn dilation start from 0 while paddle start from 1
*/
mkldnn::memory::dims getPaddingR() const {
mkldnn::memory::dims padR = {ph_, pw_};
for (int i = 0; i < 2; ++i) {
if ((ih_ - ((fh_ - 1) * dh_ + 1) + ph_ + padR[0]) / sh_ + 1 != oh_) {
++padR[0];
}
if ((iw_ - ((fw_ - 1) * dw_ + 1) + pw_ + padR[1]) / sw_ + 1 != ow_) {
++padR[1];
}
}
return padR;
}
};
} // namespace paddle
......@@ -17,9 +17,6 @@ limitations under the License. */
using namespace mkldnn; // NOLINT
typedef memory::format format;
typedef inner_product_forward fc_fwd;
typedef inner_product_backward_weights fc_bwdWgt;
typedef inner_product_backward_data fc_bwdData;
namespace paddle {
......@@ -93,35 +90,88 @@ void MKLDNNFcLayer::reshape(
printSizeInfo();
}
void MKLDNNFcLayer::resetFwd(std::vector<mkldnn::primitive>& pipeline,
void MKLDNNFcLayer::resetFwd(std::vector<primitive>& pipeline,
MKLDNNMatrixPtr& in,
MKLDNNMatrixPtr& wgt,
MKLDNNMatrixPtr& bias,
MKLDNNMatrixPtr& out) {
pipeline.clear();
bool hasBias = biases_ && biases_->getW();
const MatrixPtr& wgtVal = weight_->getW();
const MatrixPtr& biasVal = hasBias ? biases_->getW() : nullptr;
const MatrixPtr& outVal = output_.value;
resetFwdBuffers(in, wgt, bias, out);
resetFwdPD(fwdPD_, in, wgt, bias, out);
resetFwdPipeline(pipeline, fwdPD_, in, wgt, bias, out);
printValueFormatFlow();
}
void MKLDNNFcLayer::resetBwd(std::vector<primitive>& pipeline,
MKLDNNMatrixPtr& in,
MKLDNNMatrixPtr& wgt,
MKLDNNMatrixPtr& bias,
MKLDNNMatrixPtr& out) {
std::shared_ptr<fc_bwdWgt::primitive_desc> bwdWgtPD;
std::shared_ptr<fc_bwdData::primitive_desc> bwdDataPD;
resetBwdBuffers(in, wgt, bias, out);
resetBwdWgtPD(bwdWgtPD, wgt, bias, out);
resetBwdDataPD(bwdDataPD, in, out);
resetBwdPipeline(pipeline, bwdWgtPD, bwdDataPD, in, wgt, bias, out);
printGradFormatFlow();
}
void MKLDNNFcLayer::updateInputData() {
inVal_->setData(getInputValue(0, CPU_DEVICE)->getData());
}
void MKLDNNFcLayer::updateWeights(const UpdateCallback& callback) {
weight_->getParameterPtr()->incUpdate(callback);
if (biases_ && biases_->getWGrad()) {
biases_->getParameterPtr()->incUpdate(callback);
}
}
void MKLDNNFcLayer::resetFwdBuffers(MKLDNNMatrixPtr& in,
MKLDNNMatrixPtr& wgt,
MKLDNNMatrixPtr& bias,
MKLDNNMatrixPtr& out) {
resetInValue(in);
resetWgtBiasValue(wgt, bias);
resetOutValue(out);
}
void MKLDNNFcLayer::resetInValue(MKLDNNMatrixPtr& in) {
if (inputIsOnlyMKLDNN()) {
const MatrixPtr& inVal = getInputValue(0);
in = std::dynamic_pointer_cast<MKLDNNMatrix>(inVal);
const MatrixPtr& dnnIn = getInputValue(0);
in = std::dynamic_pointer_cast<MKLDNNMatrix>(dnnIn);
CHECK(in) << "Input should be MKLDNNMatrix";
} else {
CHECK_EQ(getPrev(0)->getDeviceId(), CPU_DEVICE) << "Only support CPU yet";
const MatrixPtr& inVal = getInputValue(0, CPU_DEVICE);
const MatrixPtr& cpuIn = getInputValue(0, CPU_DEVICE);
in = MKLDNNMatrix::create(
inVal, memory::dims{bs_, ic_, ih_, iw_}, format::nchw, engine_);
cpuIn, {bs_, ic_, ih_, iw_}, format::nchw, engine_);
}
in->downSpatial();
}
void MKLDNNFcLayer::resetWgtBiasValue(MKLDNNMatrixPtr& wgt,
MKLDNNMatrixPtr& bias) {
wgt = MKLDNNMatrix::create(
wgtVal, memory::dims{oc_, ic_, ih_, iw_}, format::oihw, engine_);
weight_->getW(), {oc_, ic_, ih_, iw_}, format::oihw, engine_);
wgt->downSpatial();
bias = hasBias ? MKLDNNMatrix::create(biasVal, {oc_}, format::x, engine_)
: nullptr;
out = MKLDNNMatrix::create(outVal, {bs_, oc_}, format::nc, engine_);
bias = (biases_ && biases_->getW())
? MKLDNNMatrix::create(biases_->getW(), {oc_}, format::x, engine_)
: nullptr;
}
void MKLDNNFcLayer::resetOutValue(MKLDNNMatrixPtr& out) {
out = MKLDNNMatrix::create(output_.value, {bs_, oc_}, format::nc, engine_);
// change original output value to mkldnn output value
output_.value = std::dynamic_pointer_cast<Matrix>(out);
if (!outputIsOnlyMKLDNN()) {
......@@ -129,46 +179,59 @@ void MKLDNNFcLayer::resetFwd(std::vector<mkldnn::primitive>& pipeline,
// just share point
getOutput(CPU_DEVICE).value->setData(output_.value->getData());
}
}
// create forward handle
void MKLDNNFcLayer::resetFwdPD(std::shared_ptr<fc_fwd::primitive_desc>& pd,
MKLDNNMatrixPtr in,
MKLDNNMatrixPtr wgt,
MKLDNNMatrixPtr bias,
MKLDNNMatrixPtr out) {
CHECK(in);
CHECK(wgt);
CHECK(out);
prop_kind pk = prop_kind::forward;
fc_fwd::desc fwdDesc = hasBias ? fc_fwd::desc(pk,
in->getMemoryDesc(),
wgt->getMemoryDesc(),
bias->getMemoryDesc(),
out->getMemoryDesc())
: fc_fwd::desc(pk,
in->getMemoryDesc(),
wgt->getMemoryDesc(),
out->getMemoryDesc());
fc_fwd::primitive_desc fwdPD = fc_fwd::primitive_desc(fwdDesc, engine_);
if (hasBias) {
fwd_.reset(new fc_fwd(fwdPD, *in, *wgt, *bias, *out));
fc_fwd::desc fwdDesc = bias != nullptr ? fc_fwd::desc(pk,
in->getMemoryDesc(),
wgt->getMemoryDesc(),
bias->getMemoryDesc(),
out->getMemoryDesc())
: fc_fwd::desc(pk,
in->getMemoryDesc(),
wgt->getMemoryDesc(),
out->getMemoryDesc());
pd.reset(new fc_fwd::primitive_desc(fwdDesc, engine_));
}
void MKLDNNFcLayer::resetFwdPipeline(
std::vector<primitive>& pipeline,
std::shared_ptr<fc_fwd::primitive_desc>& pd,
MKLDNNMatrixPtr& in,
MKLDNNMatrixPtr& wgt,
MKLDNNMatrixPtr& bias,
MKLDNNMatrixPtr& out) {
pipeline.clear();
if (bias) {
fwd_.reset(new fc_fwd(*pd, *in, *wgt, *bias, *out));
} else {
fwd_.reset(new fc_fwd(fwdPD, *in, *wgt, *out));
fwd_.reset(new fc_fwd(*pd, *in, *wgt, *out));
}
printValueFormatFlow();
pipeline.push_back(*fwd_);
}
void MKLDNNFcLayer::resetBwd(std::vector<mkldnn::primitive>& pipeline,
MKLDNNMatrixPtr& in,
MKLDNNMatrixPtr& wgt,
MKLDNNMatrixPtr& bias,
MKLDNNMatrixPtr& out) {
pipeline.clear();
if (!needResetBwd_) {
return;
}
needResetBwd_ = false;
bool hasBias = biases_ && biases_->getWGrad();
void MKLDNNFcLayer::resetBwdBuffers(MKLDNNMatrixPtr& in,
MKLDNNMatrixPtr& wgt,
MKLDNNMatrixPtr& bias,
MKLDNNMatrixPtr& out) {
resetOutGrad(out);
resetWgtBiasGrad(wgt, bias);
/// backward weight
CHECK(inVal_) << "Should have input value";
const MatrixPtr& wgtGrad = weight_->getWGrad();
const MatrixPtr& biasGrad = hasBias ? biases_->getWGrad() : nullptr;
resetInGrad(in);
}
void MKLDNNFcLayer::resetOutGrad(MKLDNNMatrixPtr& out) {
// TODO(TJ): merge outgrad
int device = outputIsOnlyMKLDNN() ? MKLDNN_DEVICE : CPU_DEVICE;
// for MKLDNN device:
......@@ -178,66 +241,88 @@ void MKLDNNFcLayer::resetBwd(std::vector<mkldnn::primitive>& pipeline,
// for CPU device:
// fc do not need to convert from cpu device since output is always nc format
// only need create from cpu device
const MatrixPtr& outGrad = getOutput(device).grad;
out = MKLDNNMatrix::create(outGrad, outVal_->getPrimitiveDesc());
wgt = MKLDNNMatrix::create(wgtGrad, wgtVal_->getPrimitiveDesc());
bias = hasBias ? MKLDNNMatrix::create(biasGrad, biasVal_->getPrimitiveDesc())
: nullptr;
// create memory primitive desc
fc_fwd::desc fwdDesc = fc_fwd::desc(prop_kind::forward,
inVal_->getMemoryDesc(),
wgt->getMemoryDesc(),
out->getMemoryDesc());
fc_fwd::primitive_desc fwdPD = fc_fwd::primitive_desc(fwdDesc, engine_);
fc_bwdWgt::desc bwdWgtDesc = hasBias
? fc_bwdWgt::desc(inVal_->getMemoryDesc(),
wgt->getMemoryDesc(),
bias->getMemoryDesc(),
out->getMemoryDesc())
: fc_bwdWgt::desc(inVal_->getMemoryDesc(),
wgt->getMemoryDesc(),
out->getMemoryDesc());
fc_bwdWgt::primitive_desc bwdWgtPD =
fc_bwdWgt::primitive_desc(bwdWgtDesc, engine_, fwdPD);
if (hasBias) {
bwdWgt_.reset(new fc_bwdWgt(bwdWgtPD, *inVal_, *out, *wgt, *bias));
} else {
bwdWgt_.reset(new fc_bwdWgt(bwdWgtPD, *inVal_, *out, *wgt));
CHECK(outVal_);
out =
MKLDNNMatrix::create(getOutput(device).grad, outVal_->getPrimitiveDesc());
}
void MKLDNNFcLayer::resetWgtBiasGrad(MKLDNNMatrixPtr& wgt,
MKLDNNMatrixPtr& bias) {
CHECK(wgtVal_);
wgt = MKLDNNMatrix::create(weight_->getWGrad(), wgtVal_->getPrimitiveDesc());
bias = nullptr;
if (biasVal_ == nullptr) {
return;
}
pipeline.push_back(*bwdWgt_);
bias =
MKLDNNMatrix::create(biases_->getWGrad(), biasVal_->getPrimitiveDesc());
}
/// backward data
void MKLDNNFcLayer::resetInGrad(MKLDNNMatrixPtr& in) {
in = nullptr;
const MatrixPtr& inGrad = inputLayers_[0]->getOutput().grad;
if (inGrad == nullptr) {
return;
}
if (getInput(0, MKLDNN_DEVICE).getAllCount() > 1) {
// TODO(TJ): use outputMaps_ ways to get the inGrad_ when merge outgrad done
} else {
in = MKLDNNMatrix::create(inGrad, inVal_->getPrimitiveDesc());
}
fc_bwdData::desc bwdDataDesc = fc_bwdData::desc(
inVal_->getMemoryDesc(), wgt->getMemoryDesc(), out->getMemoryDesc());
fc_bwdData::primitive_desc bwdDataPD =
fc_bwdData::primitive_desc(bwdDataDesc, engine_, fwdPD);
// TODO(TJ): use outputMaps_ ways to get the inGrad_ when merge outgrad done
CHECK(inVal_);
in = MKLDNNMatrix::create(inGrad, inVal_->getPrimitiveDesc());
}
CHECK(wgtVal_) << "Should have weight memory";
bwdData_.reset(new fc_bwdData(bwdDataPD, *out, *wgtVal_, *in));
printGradFormatFlow();
pipeline.push_back(*bwdData_);
void MKLDNNFcLayer::resetBwdWgtPD(
std::shared_ptr<fc_bwdWgt::primitive_desc>& pd,
MKLDNNMatrixPtr& wgt,
MKLDNNMatrixPtr& bias,
MKLDNNMatrixPtr& out) {
CHECK(inVal_);
fc_bwdWgt::desc bwdWgtDesc = bias ? fc_bwdWgt::desc(inVal_->getMemoryDesc(),
wgt->getMemoryDesc(),
bias->getMemoryDesc(),
out->getMemoryDesc())
: fc_bwdWgt::desc(inVal_->getMemoryDesc(),
wgt->getMemoryDesc(),
out->getMemoryDesc());
pd.reset(new fc_bwdWgt::primitive_desc(bwdWgtDesc, engine_, *fwdPD_));
}
void MKLDNNFcLayer::updateInputData() {
inVal_->setData(getInputValue(0, CPU_DEVICE)->getData());
void MKLDNNFcLayer::resetBwdDataPD(
std::shared_ptr<fc_bwdData::primitive_desc>& pd,
MKLDNNMatrixPtr& in,
MKLDNNMatrixPtr& out) {
pd = nullptr;
if (in == nullptr) {
return;
}
CHECK(wgtVal_);
fc_bwdData::desc bwdDataDesc = fc_bwdData::desc(
in->getMemoryDesc(), wgtVal_->getMemoryDesc(), out->getMemoryDesc());
pd.reset(new fc_bwdData::primitive_desc(bwdDataDesc, engine_, *fwdPD_));
}
void MKLDNNFcLayer::updateWeights(const UpdateCallback& callback) {
weight_->getParameterPtr()->incUpdate(callback);
if (biases_ && biases_->getWGrad()) {
biases_->getParameterPtr()->incUpdate(callback);
void MKLDNNFcLayer::resetBwdPipeline(
std::vector<primitive>& pipeline,
std::shared_ptr<fc_bwdWgt::primitive_desc>& bwdWgtPD,
std::shared_ptr<fc_bwdData::primitive_desc>& bwdDataPD,
MKLDNNMatrixPtr& in,
MKLDNNMatrixPtr& wgt,
MKLDNNMatrixPtr& bias,
MKLDNNMatrixPtr& out) {
pipeline.clear();
CHECK(inVal_);
if (bias) {
bwdWgt_.reset(new fc_bwdWgt(*bwdWgtPD, *inVal_, *out, *wgt, *bias));
} else {
bwdWgt_.reset(new fc_bwdWgt(*bwdWgtPD, *inVal_, *out, *wgt));
}
pipeline.push_back(*bwdWgt_);
if (bwdDataPD == nullptr) {
return;
}
CHECK(wgtVal_) << "Should have weight memory";
bwdData_.reset(new fc_bwdData(*bwdDataPD, *out, *wgtVal_, *in));
pipeline.push_back(*bwdData_);
}
} // namespace paddle
......@@ -18,6 +18,9 @@ limitations under the License. */
#include "mkldnn.hpp"
namespace paddle {
typedef mkldnn::inner_product_forward fc_fwd;
typedef mkldnn::inner_product_backward_weights fc_bwdWgt;
typedef mkldnn::inner_product_backward_data fc_bwdData;
/**
* @brief A subclass of MKLDNNLayer fc layer.
......@@ -32,6 +35,9 @@ protected:
// if has already init the weight
bool hasInitedWgt_;
// save forward primitive_desc, which can be used backward
std::shared_ptr<fc_fwd::primitive_desc> fwdPD_;
// fc weight and bias
std::unique_ptr<Weight> weight_;
std::unique_ptr<Weight> biases_;
......@@ -67,6 +73,59 @@ public:
void convertWeightsFromPaddle() override;
void convertWeightsToPaddle() override;
protected:
/**
* Forward functions: reset buffers(input, output, weight and bias),
* reset primitive descriptor,
* reset pipeline.
*/
void resetFwdBuffers(MKLDNNMatrixPtr& in,
MKLDNNMatrixPtr& wgt,
MKLDNNMatrixPtr& bias,
MKLDNNMatrixPtr& out);
void resetInValue(MKLDNNMatrixPtr& in);
void resetWgtBiasValue(MKLDNNMatrixPtr& wgt, MKLDNNMatrixPtr& bias);
void resetOutValue(MKLDNNMatrixPtr& out);
void resetFwdPD(std::shared_ptr<fc_fwd::primitive_desc>& pd,
MKLDNNMatrixPtr in,
MKLDNNMatrixPtr wgt,
MKLDNNMatrixPtr bias,
MKLDNNMatrixPtr out);
void resetFwdPipeline(std::vector<mkldnn::primitive>& pipeline,
std::shared_ptr<fc_fwd::primitive_desc>& pd,
MKLDNNMatrixPtr& in,
MKLDNNMatrixPtr& wgt,
MKLDNNMatrixPtr& bias,
MKLDNNMatrixPtr& out);
/**
* Backward functions: reset buffers(input, output, weight and bias),
* reset primitive descriptor for backward weight,
* reset primitive descriptor for backward data,
* reset pipeline.
*/
void resetBwdBuffers(MKLDNNMatrixPtr& in,
MKLDNNMatrixPtr& wgt,
MKLDNNMatrixPtr& bias,
MKLDNNMatrixPtr& out);
void resetOutGrad(MKLDNNMatrixPtr& out);
void resetWgtBiasGrad(MKLDNNMatrixPtr& wgt, MKLDNNMatrixPtr& bias);
void resetInGrad(MKLDNNMatrixPtr& in);
void resetBwdWgtPD(std::shared_ptr<fc_bwdWgt::primitive_desc>& pd,
MKLDNNMatrixPtr& wgt,
MKLDNNMatrixPtr& bias,
MKLDNNMatrixPtr& out);
void resetBwdDataPD(std::shared_ptr<fc_bwdData::primitive_desc>& pd,
MKLDNNMatrixPtr& in,
MKLDNNMatrixPtr& out);
void resetBwdPipeline(std::vector<mkldnn::primitive>& pipeline,
std::shared_ptr<fc_bwdWgt::primitive_desc>& bwdWgtPD,
std::shared_ptr<fc_bwdData::primitive_desc>& bwdDataPD,
MKLDNNMatrixPtr& in,
MKLDNNMatrixPtr& wgt,
MKLDNNMatrixPtr& bias,
MKLDNNMatrixPtr& out);
};
} // namespace paddle
......@@ -83,8 +83,7 @@ void SwitchOrderLayer::forward(PassType passType) {
setOutDims();
resetOutput(outDims_[0], outDims_[1] * outDims_[2] * outDims_[3]);
if (heightAxis_.size() > 0) {
getOutputValue()->reshape(reshapeHeight_, reshapeWidth_);
getOutputGrad()->reshape(reshapeHeight_, reshapeWidth_);
resetOutput(reshapeHeight_, reshapeWidth_);
}
// switch NCHW to NHWC
......
......@@ -17,6 +17,7 @@ limitations under the License. */
#include <vector>
#include "MKLDNNTester.h"
#include "ModelConfig.pb.h"
#include "paddle/math/MathUtils.h"
using namespace paddle; // NOLINT
......@@ -63,6 +64,83 @@ TEST(MKLDNNLayer, FcLayer) {
testFcLayer({/*bs*/ 15, /*ic*/ 3, /*oc*/ 6, /*ih*/ 16, /*iw*/ 16});
}
struct testConvDesc {
int bs, gp;
int ic, ih, iw;
int oc, oh, ow;
int fh, fw;
int ph, pw;
int sh, sw;
int dh, dw;
};
void testConvLayer(const testConvDesc& pm) {
const std::string compareTypes[] = {"mkldnn_conv", "exconv"};
TestConfig cfg;
cfg.layerConfig.set_type(compareTypes[0]);
cfg.layerConfig.set_num_filters(pm.oc);
cfg.layerConfig.set_size(pm.oc * pm.oh * pm.ow);
// cfg.layerConfig.set_partial_sum(1); // TODO: check it
cfg.layerConfig.set_shared_biases(true);
cfg.inputDefs.push_back(
{INPUT_DATA,
"layer_0",
/* size of input layer= */ size_t(pm.ic * pm.ih * pm.iw),
/* size of weight= */ size_t(pm.oc * pm.ic * pm.fh * pm.fw / pm.gp)});
LayerInputConfig* input = cfg.layerConfig.add_inputs();
ConvConfig* conv = input->mutable_conv_conf();
conv->set_groups(pm.gp);
conv->set_img_size(pm.iw);
conv->set_img_size_y(pm.ih);
conv->set_output_x(pm.ow);
conv->set_output_y(pm.oh);
conv->set_filter_size(pm.fw);
conv->set_filter_size_y(pm.fh);
conv->set_channels(pm.ic);
conv->set_padding(pm.pw);
conv->set_padding_y(pm.ph);
conv->set_stride(pm.sw);
conv->set_stride_y(pm.sh);
conv->set_dilation(pm.dw);
conv->set_dilation_y(pm.dh);
conv->set_caffe_mode(true);
conv->set_filter_channels(conv->channels() / conv->groups());
CHECK_EQ(conv->filter_channels() * pm.gp, conv->channels())
<< "it is indivisible";
int fh = (pm.fh - 1) * pm.dh + 1;
int fw = (pm.fw - 1) * pm.dw + 1;
int ow = outputSize(pm.iw, fw, pm.pw, pm.sw, true);
int oh = outputSize(pm.ih, fh, pm.ph, pm.sh, true);
CHECK_EQ(ow, pm.ow) << "output size check failed";
CHECK_EQ(oh, pm.oh) << "output size check failed";
MKLDNNTester tester;
for (auto biasSize : {pm.oc, 0}) {
cfg.biasSize = biasSize;
TestConfig ref = cfg;
ref.layerConfig.set_type(compareTypes[1]);
for (auto bs : {pm.bs, 1}) {
tester.run(cfg, ref, bs, pm.ih, pm.iw);
}
}
}
TEST(MKLDNNLayer, ConvLayer) {
/* bs, gp, ic, ih, iw, oc, oh, ow, fh, fw, ph, pw, sh, sw, dh, dw */
testConvLayer({2, 1, 3, 32, 32, 16, 32, 32, 3, 3, 1, 1, 1, 1, 1, 1});
testConvLayer({2, 1, 8, 16, 16, 8, 16, 16, 3, 3, 1, 1, 1, 1, 1, 1});
testConvLayer({3, 1, 16, 32, 32, 3, 32, 32, 3, 3, 1, 1, 1, 1, 1, 1});
testConvLayer({8, 1, 16, 18, 18, 32, 18, 18, 3, 3, 1, 1, 1, 1, 1, 1});
testConvLayer({16, 1, 1, 42, 31, 32, 23, 11, 4, 5, 3, 2, 2, 3, 1, 1});
testConvLayer({2, 1, 8, 16, 16, 8, 8, 8, 3, 3, 1, 1, 2, 2, 1, 1});
testConvLayer({3, 1, 8, 13, 13, 8, 7, 7, 3, 3, 1, 1, 2, 2, 1, 1});
// with groups
testConvLayer({2, 2, 4, 5, 5, 8, 5, 5, 3, 3, 1, 1, 1, 1, 1, 1});
testConvLayer({2, 3, 3, 5, 5, 3, 5, 5, 3, 3, 1, 1, 1, 1, 1, 1});
testConvLayer({4, 4, 16, 3, 3, 16, 3, 3, 3, 3, 1, 1, 1, 1, 1, 1});
}
// TODO(TJ): add branch test
int main(int argc, char** argv) {
......
......@@ -49,6 +49,27 @@ MKLDNNMatrixPtr MKLDNNMatrix::create(MatrixPtr m,
return create(m, memory::primitive_desc(memory::desc(dims, dtype, fmt), eg));
}
std::shared_ptr<reorder> MKLDNNMatrix::createReorder(const MKLDNNMatrixPtr& src,
const MKLDNNMatrixPtr& dst,
bool checkData) {
if (src == dst || src->getPrimitiveDesc() == dst->getPrimitiveDesc()) {
return nullptr;
}
if (checkData && (src->getData() == dst->getData())) {
LOG(FATAL) << "can not create reorder with inplace data";
return nullptr;
}
memory::dims srcDims = src->getDims();
memory::dims dstDims = dst->getDims();
CHECK_EQ(srcDims.size(), dstDims.size());
for (size_t i = 0; i < srcDims.size(); ++i) {
CHECK_EQ(srcDims[i], dstDims[i]);
}
return std::make_shared<reorder>(*src, *dst);
}
void MKLDNNMatrix::reorderDataFrom(const MKLDNNMatrixPtr& m,
memory::format srcFmt,
memory::dims targetDim) {
......
......@@ -52,6 +52,32 @@ public:
mkldnn::engine& eg,
mkldnn::memory::data_type dtype = mkldnn::memory::data_type::f32);
/**
* Create Memory descriptor.
* default with any format and f32 dtype
*/
static mkldnn::memory::desc createMemoryDesc(
const mkldnn::memory::dims& dims,
const mkldnn::memory::format& fmt = mkldnn::memory::format::any,
const mkldnn::memory::data_type& dtype = mkldnn::memory::data_type::f32) {
return mkldnn::memory::desc(dims, dtype, fmt);
}
/**
* Create reorder primitive.
* Create a mkldnn::reorder handle for converting src MKLDNNMatrix to dst.
* checkData: whether to check the data handle of src and dst.
* if true, it will check the data and do not allow them equal;
* otherwise, it will not check them, then the reorder created
* may have inplace buffer.
* Do not set false, if you can not guarantee the inplace logical
* would work with your reorder.
*/
static std::shared_ptr<mkldnn::reorder> createReorder(
const MKLDNNMatrixPtr& src,
const MKLDNNMatrixPtr& dst,
bool checkData = true);
public:
/**
* Reorder this MKLDNNMatrix from other format.
......
......@@ -62,6 +62,24 @@ void Copy<platform::GPUPlace, platform::GPUPlace>(platform::GPUPlace dst_place,
}
}
template <>
void Copy<platform::CPUPlace, platform::GPUPlace>(platform::CPUPlace dst_place,
void* dst,
platform::GPUPlace src_place,
const void* src, size_t num) {
platform::SetDeviceId(src_place.device);
platform::GpuMemcpySync(dst, src, num, cudaMemcpyDeviceToHost);
}
template <>
void Copy<platform::GPUPlace, platform::CPUPlace>(platform::GPUPlace dst_place,
void* dst,
platform::CPUPlace src_place,
const void* src, size_t num) {
platform::SetDeviceId(dst_place.device);
platform::GpuMemcpySync(dst, src, num, cudaMemcpyHostToDevice);
}
#endif // PADDLE_ONLY_CPU
} // namespace memory
......
file(GLOB GENERAL_OPS RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" "*_op.cc")
string(REPLACE ".cc" "" GENERAL_OPS "${GENERAL_OPS}")
set(pybind_file ${PADDLE_SOURCE_DIR}/paddle/pybind/pybind.h)
file(WRITE ${pybind_file} "// Generated by the paddle/operator/CMakeLists.txt. DO NOT EDIT!\n\n")
function(op_library TARGET)
# op_library is a function to create op library. The interface is same as
# cc_library. But it handle split GPU/CPU code and link some common library
......@@ -7,10 +9,11 @@ function(op_library TARGET)
set(OP_LIBRARY ${TARGET} ${OP_LIBRARY} PARENT_SCOPE)
set(cc_srcs)
set(cu_srcs)
set(op_common_deps operator op_registry)
set(op_common_deps operator op_registry math_function)
set(options "")
set(oneValueArgs "")
set(multiValueArgs SRCS DEPS)
set(pybind_flag 0)
cmake_parse_arguments(op_library "${options}" "${oneValueArgs}"
"${multiValueArgs}" ${ARGN})
......@@ -46,22 +49,42 @@ function(op_library TARGET)
cc_library(${TARGET} SRCS ${cc_srcs} DEPS ${op_library_DEPS}
${op_common_deps})
endif()
# net_op doesn't need pybind
if ("${TARGET}" STREQUAL "net_op")
set(pybind_flag 1)
endif()
# pybind USE_NO_KERNEL_OP
file(READ ${TARGET}.cc TARGET_CONTENT)
string(REGEX MATCH "OperatorWithKernel" regex_result "${TARGET_CONTENT}")
string(REPLACE "_op" "" TARGET "${TARGET}")
if (${pybind_flag} EQUAL 0 AND regex_result STREQUAL "")
file(APPEND ${pybind_file} "USE_NO_KERNEL_OP(${TARGET});\n")
set(pybind_flag 1)
endif()
# pybind USE_CPU_ONLY_OP
list(LENGTH cu_srcs cu_srcs_len)
if (${pybind_flag} EQUAL 0 AND ${cu_srcs_len} EQUAL 0)
file(APPEND ${pybind_file} "USE_CPU_ONLY_OP(${TARGET});\n")
set(pybind_flag 1)
endif()
# pybind USE_OP
if (${pybind_flag} EQUAL 0)
file(APPEND ${pybind_file} "USE_OP(${TARGET});\n")
endif()
endfunction()
add_subdirectory(math)
set(DEPS_OPS
identity_op
minus_op
mul_op
recurrent_op
scale_op)
op_library(identity_op DEPS scale_op)
op_library(minus_op DEPS scale_op)
op_library(mul_op DEPS math_function)
cond_op)
op_library(recurrent_op SRCS recurrent_op.cc rnn/recurrent_op_utils.cc
DEPS framework_proto tensor operator net_op)
op_library(scale_op DEPS net_op)
DEPS framework_proto tensor net_op)
op_library(cond_op SRCS cond_op.cc DEPS framework_proto tensor operator net_op)
list(REMOVE_ITEM GENERAL_OPS ${DEPS_OPS})
foreach(src ${GENERAL_OPS})
......
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/operators/accuracy_op.h"
namespace paddle {
namespace operators {
class AccuracyOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected:
void InferShape(const framework::InferShapeContext &ctx) const override {
PADDLE_ENFORCE_NOT_NULL(
ctx.InputVar("Inference"),
"Input(Inference) of AccuracyOp should not be null.");
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("Label"),
"Input(Label) of AccuracyOp should not be null.");
PADDLE_ENFORCE_NOT_NULL(
ctx.OutputVar("Accuracy"),
"Output(Accuracy) of AccuracyOp should not be null.");
auto *inference = ctx.Input<framework::Tensor>("Inference");
auto *label = ctx.Input<framework::Tensor>("Label");
PADDLE_ENFORCE_EQ(label->dims().size(), 1, "label must be a vector");
PADDLE_ENFORCE_EQ(inference->dims()[0], label->dims()[0],
"inference size must be the same as label size");
ctx.Output<framework::LoDTensor>("Accuracy")->Resize({1});
}
};
class AccuracyOpMaker : public framework::OpProtoAndCheckerMaker {
public:
AccuracyOpMaker(framework::OpProto *proto,
framework::OpAttrChecker *op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) {
// TODO(typhoonzero): support both inference value and indices.
AddInput("Inference", "topk(indices) the network output");
AddInput("Label", "Label of the training data");
// TODO(typhoonzero): AddInput("Weight", ...
AddOutput("Accuracy", "The accuracy of current batch");
AddComment(
R"DOC(Accuracy. It will print accuracy rate for classification.
The accuracy is:
.. math::
accuracy = \\frac{NumOfCorrectPredicts}{NumOfAllSamples})DOC");
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_WITHOUT_GRADIENT(accuracy, ops::AccuracyOp, ops::AccuracyOpMaker);
REGISTER_OP_CPU_KERNEL(accuracy,
ops::AccuracyKernel<paddle::platform::CPUPlace, float>);
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <thrust/execution_policy.h>
#include <thrust/reduce.h>
#include "paddle/operators/accuracy_op.h"
#include "paddle/platform/cuda_helper.h"
namespace paddle {
namespace operators {
using platform::PADDLE_CUDA_NUM_THREADS;
template <int BlockSize>
__global__ void AccuracyCudaKernel(const int N, const int D, const int* Xdata,
const int* labeldata, float* accuracy) {
int count = 0;
__shared__ int total[BlockSize];
// support only 1 block
for (int i = threadIdx.x; i < (N); i += BlockSize) {
for (int j = 0; j < D; ++j) {
if (Xdata[i * D + j] == labeldata[i]) {
++count;
break;
}
}
}
total[threadIdx.x] = count;
__syncthreads();
// reduce the count with init value 0, and output accuracy.
int result = thrust::reduce(thrust::device, total, total + BlockSize, 0);
if (threadIdx.x == 0) {
*accuracy = static_cast<float>(result) / static_cast<float>(N);
}
}
template <typename T>
class AccuracyOpCUDAKernel : public framework::OpKernel {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()),
"It must use GPUPlace.");
auto* inference = ctx.Input<Tensor>("Inference");
auto* label = ctx.Input<Tensor>("Label");
auto* accuracy = ctx.Output<Tensor>("Accuracy");
// FIXME(typhoonzero): only support indices currently
// if add support for output values, how to detect the data type?
const int* inference_data = inference->data<int>();
const int* label_data = label->data<int>();
float* accuracy_data = accuracy->mutable_data<float>(ctx.GetPlace());
size_t num_samples = inference->dims()[0];
size_t infer_width = inference->dims()[1];
cudaMemset((void**)&accuracy_data, 0, sizeof(float));
if (num_samples == 0) {
return;
}
AccuracyCudaKernel<PADDLE_CUDA_NUM_THREADS><<<1, PADDLE_CUDA_NUM_THREADS>>>(
num_samples, infer_width, inference_data, label_data, accuracy_data);
}
};
} // namespace operators
} // namespace paddle
REGISTER_OP_GPU_KERNEL(accuracy,
paddle::operators::AccuracyOpCUDAKernel<float>);
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <algorithm>
#include "paddle/framework/eigen.h"
#include "paddle/framework/op_registry.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
template <typename T, int MajorType = Eigen::RowMajor,
typename IndexType = Eigen::DenseIndex>
using EigenMatrix = framework::EigenMatrix<T, MajorType, IndexType>;
template <typename T, int MajorType = Eigen::RowMajor,
typename IndexType = Eigen::DenseIndex>
using EigenVector = framework::EigenVector<T, MajorType, IndexType>;
template <typename T, int MajorType = Eigen::RowMajor,
typename IndexType = Eigen::DenseIndex>
using EigenScalar = framework::EigenScalar<T, MajorType, IndexType>;
template <typename Place, typename T>
class AccuracyKernel : public framework::OpKernel {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* inference = ctx.Input<Tensor>("Inference");
auto* label = ctx.Input<Tensor>("Label");
auto* accuracy = ctx.Output<Tensor>("Accuracy");
float* accuracy_data = accuracy->mutable_data<float>(ctx.GetPlace());
const T* inference_data = inference->data<T>();
const T* label_data = label->data<T>();
size_t num_samples = inference->dims()[0];
size_t class_dim = inference->dims()[1];
*accuracy_data = 0.0f;
if (num_samples == 0) {
return;
}
int num_correct = 0;
// assume inference is already the topk of the output
for (size_t i = 0; i < num_samples; ++i) {
PADDLE_ENFORCE_GE(label_data[i], 0, "label must >= 0");
for (size_t j = 0; j < class_dim; ++j) {
if (inference_data[i * class_dim + j] == label_data[i]) {
++num_correct;
break;
}
}
}
// FIXME(typhoonzero): we don't accumulate the accuracy for now.
*accuracy_data =
static_cast<float>(num_correct) / static_cast<float>(num_samples);
}
};
} // namespace operators
} // namespace paddle
......@@ -23,10 +23,18 @@ class AddOp : public framework::OperatorWithKernel {
protected:
void InferShape(const framework::InferShapeContext &ctx) const override {
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"),
"Input(X) of AddOp should not be null.");
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("Y"),
"Input(Y) of AddOp should not be null.");
PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar("Out"),
"Output(Out) of AddOp should not be null.");
PADDLE_ENFORCE_EQ(ctx.Input<Tensor>("X")->dims(),
ctx.Input<Tensor>("Y")->dims(),
"Two input of Add Op's dimension must be same.");
ctx.Output<Tensor>("Out")->Resize(ctx.Input<Tensor>("X")->dims());
ctx.Output<framework::LoDTensor>("Out")->Resize(
ctx.Input<Tensor>("X")->dims());
}
};
......
......@@ -25,8 +25,11 @@ class ConcatOp : public framework::OperatorWithKernel {
protected:
void InferShape(const framework::InferShapeContext &ctx) const override {
PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar("Out"),
"Output(Out) of ConcatOp should not be null.");
auto ins = ctx.MultiInput<framework::Tensor>("X");
auto *out = ctx.Output<framework::Tensor>("Out");
auto *out = ctx.Output<framework::LoDTensor>("Out");
size_t axis = static_cast<size_t>(ctx.Attr<int>("axis"));
size_t n = ins.size();
......
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#define EIGEN_USE_GPU
#include "paddle/operators/concat_op.h"
namespace ops = paddle::operators;
// TODO(Yancey1989) Add GPU kernel
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/operators/cond_op.h"
#include <cstring>
#include <sstream>
#include "paddle/framework/op_registry.h"
#include "paddle/operators/gather.h"
#include "paddle/operators/net_op.h"
#include "paddle/operators/scatter.h"
namespace paddle {
namespace operators {
using Scope = framework::Scope;
using Variable = framework::Variable;
using Tensor = framework::Tensor;
using LoDTensor = framework::LoDTensor;
using DDim = framework::DDim;
void CondOp::CreateScope(const Scope& scope) const {
auto sub_scopes_var = scope.FindVar("SubScopes");
PADDLE_ENFORCE_NOT_NULL(sub_scopes_var,
"Output(SubScopes) of CondOp should not be null.");
auto sub_scopes = sub_scopes_var->GetMutable<std::vector<Scope*>>();
auto& sub_scope = scope.NewScope();
sub_scopes->push_back(&sub_scope);
}
void CondOp::CreateIndexTensor(const Scope& scope) const {
auto index_tensors_var = scope.FindVar("IndexTensors");
PADDLE_ENFORCE_NOT_NULL(index_tensors_var,
"Output(IndexTensors) of CondOp should not be null.");
auto& index_tensors =
*index_tensors_var->GetMutable<std::vector<LoDTensor>>();
index_tensors.push_back(LoDTensor());
}
void CondOp::InferShape(const Scope& scope) const {
auto sub_scopes_var = scope.FindVar("SubScopes");
PADDLE_ENFORCE_NOT_NULL(sub_scopes_var,
"Output(SubScopes) of CondOp should not be null.");
auto& sub_scopes = *sub_scopes_var->GetMutable<std::vector<Scope*>>();
for (int i = 0; i < 2; ++i) {
// Create two sub scopes for true and false branches
// sub_scopes[0] for the true branch and sub_scopes[1] for the false
// branch
CreateScope(scope);
// Create two tensors for true and false indices
// index_tensors[0] for the true branch and index_tensors[1] for the false
// branch
CreateIndexTensor(scope);
PADDLE_ENFORCE(!Inputs("Xs").empty(),
"Inputs(Xs) of CondOp can't be empty.");
for (auto& input : Inputs("Xs")) {
// Create a new tensor in sub-scope for input-type tensor
Variable* v = sub_scopes[i]->NewVar(input);
LoDTensor* sub_input = v->GetMutable<LoDTensor>();
sub_input->Resize(scope.FindVar(input)->GetMutable<LoDTensor>()->dims());
}
for (auto& output : (*sub_net_op_[i]).Outputs()) {
for (auto& var_name : output.second) {
sub_scopes[i]->NewVar(var_name);
}
}
// each net calls InferShape
sub_net_op_[i]->InferShape(*sub_scopes[i]);
}
for (auto& output : Outputs("Outs")) {
LoDTensor* tensor_t_out =
sub_scopes[0]->FindVar(output)->GetMutable<LoDTensor>();
PADDLE_ENFORCE_NOT_NULL(tensor_t_out, "True output should not be NULL");
LoDTensor* tensor_f_out =
sub_scopes[1]->FindVar(output)->GetMutable<LoDTensor>();
PADDLE_ENFORCE_NOT_NULL(tensor_f_out, "False output should not be NULL");
auto* tensor_out_var = scope.FindVar(output);
PADDLE_ENFORCE_NOT_NULL(tensor_out_var, "Output not found");
LoDTensor* tensor_out = tensor_out_var->GetMutable<LoDTensor>();
PADDLE_ENFORCE_NOT_NULL(tensor_t_out,
"True output tensor should not be NULL");
// check output size should be same
PADDLE_ENFORCE_EQ(tensor_t_out->dims(), tensor_f_out->dims(),
"Outputs not of the same shape");
tensor_out->Resize(tensor_t_out->dims());
// tensor_out->mutable_data<float>(tensor_out->dims(),
// platform::CPUPlace());
tensor_out->mutable_data<float>(platform::CPUPlace());
}
}
void CondOp::Run(const Scope& scope,
const platform::DeviceContext& dev_ctx) const {
auto* sub_scopes_var = scope.FindVar("SubScopes");
PADDLE_ENFORCE_NOT_NULL(sub_scopes_var,
"Output(SubScopes) of CondOp should not be null.");
auto sub_scopes = sub_scopes_var->Get<std::vector<Scope*>>();
auto* index_tensors_var = scope.FindVar("IndexTensors");
PADDLE_ENFORCE_NOT_NULL(index_tensors_var,
"Output(IndexTensors) of CondOp should not be null.");
auto index_tensors = index_tensors_var->Get<std::vector<LoDTensor>>();
std::string cond_name = Input("Cond");
Variable* cond_var = scope.FindVar(cond_name);
PADDLE_ENFORCE_NOT_NULL(cond_var,
"Input(Cond) of CondOp should not be null.");
const LoDTensor* cond = cond_var->GetMutable<LoDTensor>();
// Step 1: get the true/false index at runtime
// index_[0]: vector<int>, contains all index for cond[i] == true
// index_[1]: vector<int>, contains all index for cond[i] == false
for (int i = 0; i < 2; ++i) index_[i].clear();
const int* cond_data = cond->data<int>();
for (int i = 0; i < cond->dims()[0]; ++i) {
if (cond_data[i])
index_[0].push_back(i);
else
index_[1].push_back(i);
}
// put index_[0] and index_[1] into two tensors:
// index_tensor_[0] and index_tensor_[1]
DDim dim = paddle::framework::make_ddim({0});
for (int i = 0; i < 2; ++i) {
dim[0] = index_[i].size();
int* tmp_ptr =
index_tensors[i].mutable_data<int>(dim, platform::CPUPlace());
index_tensors[i].Resize(dim);
memcpy(tmp_ptr, index_[i].data(), dim[0] * sizeof(int));
}
// Step 2: collect data by calling gather
for (int i = 0; i < 2; ++i) {
// i= 0/i for True and False branches respectively
for (auto& input : Inputs("Xs")) {
// find Tensor
Variable* v = scope.FindVar(input);
PADDLE_ENFORCE_NOT_NULL(v);
LoDTensor* tensor_parent = v->GetMutable<LoDTensor>();
v = sub_scopes[i]->FindVar(input);
PADDLE_ENFORCE_NOT_NULL(v);
LoDTensor* tensor_child = v->GetMutable<LoDTensor>();
// Resize child
DDim dim = tensor_child->dims();
dim[0] = index_[i].size();
tensor_child->Resize(dim);
tensor_child->mutable_data<float>(dim, platform::CPUPlace());
Gather<float>(dev_ctx.GetPlace(), tensor_parent, &index_tensors[i],
tensor_child);
}
}
// Step 3: run
for (int i = 0; i < 2; ++i) {
sub_net_op_[i]->Run(*sub_scopes[i], dev_ctx);
}
// Step 4: merge output results
PADDLE_ENFORCE(!Outputs("Outs").empty(),
"Outputs(Outs) of CondOp can't be empty.");
for (int i = 0; i < 2; ++i) {
// i= 0/i for True and False branches respectively
for (auto& output : Outputs("Outs")) {
// find Tensor
Variable* v = scope.FindVar(output);
PADDLE_ENFORCE_NOT_NULL(v);
LoDTensor* tensor_parent = v->GetMutable<LoDTensor>();
v = sub_scopes[i]->FindVar(output);
PADDLE_ENFORCE_NOT_NULL(v);
LoDTensor* tensor_child = v->GetMutable<LoDTensor>();
ScatterUpdate<float>(dev_ctx.GetPlace(), tensor_child, &index_tensors[i],
tensor_parent);
}
}
}
class CondOpProtoAndCheckerMaker : public framework::OpProtoAndCheckerMaker {
public:
CondOpProtoAndCheckerMaker(framework::OpProto* proto,
framework::OpAttrChecker* op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("Cond", "The condition, which is a bool vector");
AddInput("Xs", "Inputs of Subnets").AsDuplicable();
AddOutput("Outs", "Outputs of Cond_Op after merge").AsDuplicable();
AddOutput("SubScopes", "sub scopes for true and false branches");
AddOutput("IndexTensors", "Index Tensors contains indices for true/false");
AddComment(R"DOC(
Sample dependent Cond Operator:
Given Cond[i] as a 1/0 vector to indicate true/false
The equation is:
Out[i] = subnet_t[i], if Cond[i] == true
Out[i] = subnet_t[i], if Cond[i] == false
)DOC");
}
};
} // namespace operators
} // namespace paddle
REGISTER_OP_WITHOUT_GRADIENT(cond, paddle::operators::CondOp,
paddle::operators::CondOpProtoAndCheckerMaker);
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <vector>
#include "glog/logging.h"
#include "paddle/framework/ddim.h"
#include "paddle/framework/eigen.h"
#include "paddle/framework/operator.h"
#include "paddle/framework/tensor.h"
#include "paddle/operators/net_op.h"
namespace paddle {
namespace operators {
/*
* @brief CondOp is a dynamic if-else Operator
*
* It has a input tensor named cond indicating which netop each instance will
* run.
*
* if cond == 1, it will run true_net, which is a NetOp.
*
* if cond == 0, it will run false_net, which is another NetOp.
*/
class CondOp : public framework::OperatorBase {
public:
CondOp(const std::string& type, const framework::VariableNameMap& inputs,
const framework::VariableNameMap& outputs,
const framework::AttributeMap& attrs)
: OperatorBase(type, inputs, outputs, attrs) {
index_.resize(2);
sub_net_op_.resize(2);
}
CondOp(const CondOp& o)
: framework::OperatorBase(
static_cast<const framework::OperatorBase&>(o)) {
// TODO(yuyang18): Implement copy ctor well.
PADDLE_THROW("Not implemented");
}
void CreateScope(const framework::Scope& scope) const;
void CreateIndexTensor(const framework::Scope& scope) const;
/*
* InferShape must be called before Run.
*/
void InferShape(const framework::Scope& scope) const override;
/*
* Set True Block
*/
void set_truenet(std::unique_ptr<OperatorBase>&& net) {
sub_net_op_[0] = std::move(net);
}
/*
* Set False Block
*/
void set_falsenet(std::unique_ptr<OperatorBase>&& net) {
sub_net_op_[1] = std::move(net);
}
void Run(const framework::Scope& scope,
const platform::DeviceContext& dev_ctx) const override;
private:
// sub_net_op_[0]: subnet_t
// sub_net_op_[1]: subnet_f
std::vector<std::unique_ptr<framework::OperatorBase>> sub_net_op_;
// index_[0]: True_index;
// index_[1]: False_index;
mutable std::vector<std::vector<int>> index_;
};
} // namespace operators
} // namespace paddle
......@@ -25,16 +25,38 @@ class CosSimOp : public framework::OperatorWithKernel {
protected:
void InferShape(const framework::InferShapeContext &ctx) const override {
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), "Input(X) must not be null.");
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("Y"), "Input(Y) must not be null.");
PADDLE_ENFORCE_EQ(ctx.Input<Tensor>("X")->dims(),
ctx.Input<Tensor>("Y")->dims(),
"Dimensions of Input(X) and Input(Y) must be the same.");
auto dims = ctx.Input<Tensor>("X")->dims();
ctx.Output<Tensor>("Out")->Resize({dims[0], 1});
ctx.Output<Tensor>("XNorm")->Resize({dims[0], 1});
ctx.Output<Tensor>("YNorm")->Resize({dims[0], 1});
// notnull check
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"),
"Input(X) of CosSimOp should not be null.");
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("Y"),
"Input(Y) of CosSimOp should not be null.");
PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar("Out"),
"Output(Out) of CosSimOp should not be null.");
PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar("XNorm"),
"Output(XNorm) of CosSimOp should not be null.");
PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar("YNorm"),
"Output(YNorm) of CosSimOp should not be null.");
// shape check
auto x_dims = ctx.Input<Tensor>("X")->dims();
auto y_dims = ctx.Input<Tensor>("Y")->dims();
PADDLE_ENFORCE_EQ(x_dims.size(), y_dims.size(),
"Ranks of Input(X) and Input(Y) must be equal.");
PADDLE_ENFORCE_GE(x_dims.size(), 2,
"Rank of Input(X) must not be less than 2.");
PADDLE_ENFORCE_EQ(framework::slice_ddim(x_dims, 1, x_dims.size()),
framework::slice_ddim(y_dims, 1, y_dims.size()),
"All dimensions except the 1st of Input(X) and Input(Y) "
"must be equal.");
PADDLE_ENFORCE(x_dims[0] == y_dims[0] || y_dims[0] == 1,
"The 1st dimension of Input(Y) must be equal to Input(X) or"
" just 1 (which will be broadcasted to match Input(X)).");
// resize tensor
ctx.Output<framework::LoDTensor>("Out")->Resize({x_dims[0], 1});
ctx.Output<framework::LoDTensor>("XNorm")->Resize({x_dims[0], 1});
ctx.Output<framework::LoDTensor>("YNorm")->Resize({y_dims[0], 1});
}
};
......@@ -42,16 +64,27 @@ class CosSimOpMaker : public framework::OpProtoAndCheckerMaker {
public:
CosSimOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "The first input of cos_sim op.");
AddInput("Y", "The second input of cos_sim op.");
AddInput("X", "The 1st input of cos_sim op.");
AddInput("Y", "The 2nd input of cos_sim op.");
AddOutput("Out", "The output of cos_sim op.");
AddOutput("XNorm", "Row norm of the first input.").AsIntermediate();
AddOutput("YNorm", "Row norm of the second input.").AsIntermediate();
AddOutput("XNorm",
"Norm of the first input, reduced along the 1st "
"dimension.")
.AsIntermediate();
AddOutput("YNorm",
"Norm of the second input, reduced along the 1st "
"dimension.")
.AsIntermediate();
AddComment(R"DOC(
Cosine Similarity Operator.
The equation is: Out = X^T * Y / (sqrt(X^T * X) * sqrt(Y^T * Y))
The equation is: Out = X^T * Y / (sqrt(X^T * X) * sqrt(Y^T * Y)).
Input(X) and Input(Y) must have the same shape, except that the 1st dimension
of Input(Y) could be just 1 (different from Input(X)), which will be
broadcasted to match the shape of Input(X) before computing their cosine
similarity.
)DOC");
}
};
......@@ -62,34 +95,54 @@ class CosSimOpGrad : public framework::OperatorWithKernel {
protected:
void InferShape(const framework::InferShapeContext &ctx) const override {
// notnull check
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), "Input(X) must not be null.");
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("Y"), "Input(Y) must not be null.");
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("XNorm"),
"Input(XNorm) must not be null.");
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("YNorm"),
"Input(YNorm) must not be null.");
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("Out"),
"Input(Out) must not be null.");
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(framework::GradVarName("Out")),
"Input(Out@GRAD) must not be null.");
// shape check
auto x_dims = ctx.Input<Tensor>("X")->dims();
auto y_dims = ctx.Input<Tensor>("Y")->dims();
auto xnorm_dims = ctx.Input<Tensor>("XNorm")->dims();
auto ynorm_dims = ctx.Input<Tensor>("YNorm")->dims();
auto out_dims = ctx.Input<Tensor>(framework::GradVarName("Out"))->dims();
PADDLE_ENFORCE_EQ(x_dims, y_dims,
"Dimensions of Input(X) and Input(Y) must be the same.");
PADDLE_ENFORCE_EQ(xnorm_dims[0], x_dims[0],
"1st dimension of XNorm must equal that of Input(X).");
PADDLE_ENFORCE_EQ(xnorm_dims[1], 1, "2st dimension of XNorm must be one.");
PADDLE_ENFORCE_EQ(ynorm_dims[0], y_dims[0],
"1st dimension of YNorm must equal that of Input(Y).");
PADDLE_ENFORCE_EQ(ynorm_dims[1], 1, "2st dimension of YNorm must be one.");
PADDLE_ENFORCE_EQ(out_dims[0], x_dims[0],
"1st dimension of Out@GRAD must equal that of Input(X)");
PADDLE_ENFORCE_EQ(out_dims[1], 1, "1st dimension of Out@GRAD must be one.");
auto *x_grad = ctx.Output<Tensor>(framework::GradVarName("X"));
auto *y_grad = ctx.Output<Tensor>(framework::GradVarName("Y"));
auto out_dims = ctx.Input<Tensor>("Out")->dims();
auto out_grad_dims =
ctx.Input<Tensor>(framework::GradVarName("Out"))->dims();
PADDLE_ENFORCE_GE(x_dims.size(), y_dims.size(),
"Ranks of Input(X) and Input(Y) must be equal.");
PADDLE_ENFORCE_GE(x_dims.size(), 2,
"Rank of Input(X) must not be less than 2.");
PADDLE_ENFORCE_EQ(framework::slice_ddim(x_dims, 1, x_dims.size()),
framework::slice_ddim(y_dims, 1, y_dims.size()),
"All dimensions except the 1st of Input(X) and Input(Y) "
"must be equal.");
PADDLE_ENFORCE(x_dims[0] == y_dims[0] || y_dims[0] == 1,
"The 1st dimension of Input(Y) must be equal to Input(X) or"
" just 1 (which will be broadcasted to match Input(X)).");
auto target_xnorm_dims = framework::make_ddim({x_dims[0], 1});
auto target_ynorm_dims = framework::make_ddim({y_dims[0], 1});
PADDLE_ENFORCE_EQ(xnorm_dims, target_xnorm_dims,
"Shape of Input(XNorm) must be [X.Dim(0), 1].");
PADDLE_ENFORCE_EQ(ynorm_dims, target_ynorm_dims,
"Shape of Input(YNorm) must be [Y.Dim(0), 1].");
PADDLE_ENFORCE_EQ(out_dims, target_xnorm_dims,
"Shape of Input(Out) must be [X.Dim(0), 1].");
PADDLE_ENFORCE_EQ(out_grad_dims, target_xnorm_dims,
"Shape of Input(Out@Grad) must be [X.Dim(0), 1].");
// resize tensor
auto *x_grad =
ctx.Output<framework::LoDTensor>(framework::GradVarName("X"));
auto *y_grad =
ctx.Output<framework::LoDTensor>(framework::GradVarName("Y"));
if (x_grad) x_grad->Resize(x_dims);
if (y_grad) y_grad->Resize(y_dims);
}
......
......@@ -31,30 +31,38 @@ template <typename Place, typename T>
class CosSimKernel : public framework::OpKernel {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto* input_x = context.Input<Tensor>("X");
auto* input_y = context.Input<Tensor>("Y");
auto* output_z = context.Output<Tensor>("Out");
auto* output_x_norm = context.Output<Tensor>("XNorm");
auto* output_y_norm = context.Output<Tensor>("YNorm");
// get Tensor
auto* in_x = context.Input<Tensor>("X");
auto* in_y = context.Input<Tensor>("Y");
auto* out_z = context.Output<Tensor>("Out");
auto* out_x_norm = context.Output<Tensor>("XNorm");
auto* out_y_norm = context.Output<Tensor>("YNorm");
out_z->mutable_data<T>(context.GetPlace());
out_x_norm->mutable_data<T>(context.GetPlace());
out_y_norm->mutable_data<T>(context.GetPlace());
output_z->mutable_data<T>(context.GetPlace());
output_x_norm->mutable_data<T>(context.GetPlace());
output_y_norm->mutable_data<T>(context.GetPlace());
auto dims = input_x->dims();
int size = static_cast<int>(framework::product(dims));
auto new_dims = framework::make_ddim({dims[0], size / dims[0]});
auto x = EigenMatrix<T>::From(*input_x, new_dims);
auto y = EigenMatrix<T>::From(*input_y, new_dims);
auto z = EigenVector<T>::Flatten(*output_z);
auto x_norm = EigenVector<T>::Flatten(*output_x_norm);
auto y_norm = EigenVector<T>::Flatten(*output_y_norm);
// convert Tensor to Eigen Tensor
int rows_x = in_x->dims()[0];
int rows_y = in_y->dims()[0];
auto x = EigenMatrix<T>::Reshape(*in_x, 1);
auto y = EigenMatrix<T>::Reshape(*in_y, 1);
auto z = EigenVector<T>::Flatten(*out_z);
auto x_norm = EigenVector<T>::Flatten(*out_x_norm);
auto y_norm = EigenVector<T>::Flatten(*out_y_norm);
// compute
auto place = context.GetEigenDevice<Place>();
auto xy = (x * y).sum(Eigen::array<int, 1>({{1}}));
x_norm.device(place) = x.square().sum(Eigen::array<int, 1>({{1}})).sqrt();
y_norm.device(place) = y.square().sum(Eigen::array<int, 1>({{1}})).sqrt();
z.device(place) = xy / x_norm / y_norm;
auto row_along = Eigen::array<int, 1>({{1}});
x_norm.device(place) = x.square().sum(row_along).sqrt();
y_norm.device(place) = y.square().sum(row_along).sqrt();
if (rows_x == rows_y) {
auto xy = (x * y).sum(Eigen::array<int, 1>({{1}}));
z.device(place) = xy / x_norm / y_norm;
} else {
Eigen::DSizes<int, 2> bcast(rows_x, 1);
auto xy = (x * y.broadcast(bcast)).sum(row_along);
z.device(place) = xy / x_norm / y_norm.broadcast(bcast);
}
}
};
......@@ -62,43 +70,72 @@ template <typename Place, typename T>
class CosSimGradKernel : public framework::OpKernel {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto* input_x = context.Input<Tensor>("X");
auto* input_y = context.Input<Tensor>("Y");
auto* input_z = context.Input<Tensor>("Out");
auto* input_x_norm = context.Input<Tensor>("XNorm");
auto* input_y_norm = context.Input<Tensor>("YNorm");
auto* output_grad_x = context.Output<Tensor>(framework::GradVarName("X"));
auto* output_grad_y = context.Output<Tensor>(framework::GradVarName("Y"));
auto* input_grad_z = context.Input<Tensor>(framework::GradVarName("Out"));
// get Tensor
auto* in_x = context.Input<Tensor>("X");
auto* in_y = context.Input<Tensor>("Y");
auto* in_z = context.Input<Tensor>("Out");
auto* in_x_norm = context.Input<Tensor>("XNorm");
auto* in_y_norm = context.Input<Tensor>("YNorm");
auto* out_grad_x = context.Output<Tensor>(framework::GradVarName("X"));
auto* out_grad_y = context.Output<Tensor>(framework::GradVarName("Y"));
auto* in_grad_z = context.Input<Tensor>(framework::GradVarName("Out"));
auto dims = input_x->dims();
int size = static_cast<int>(framework::product(dims));
auto new_dims = framework::make_ddim({dims[0], size / dims[0]});
auto x = EigenMatrix<T>::From(*input_x, new_dims);
auto y = EigenMatrix<T>::From(*input_y, new_dims);
auto z = EigenMatrix<T>::From(*input_z);
auto x_norm = EigenMatrix<T>::From(*input_x_norm);
auto y_norm = EigenMatrix<T>::From(*input_y_norm);
auto dz = EigenMatrix<T>::From(*input_grad_z);
// convert Tensor to Eigen Tensor
auto x = EigenMatrix<T>::Reshape(*in_x, 1);
auto y = EigenMatrix<T>::Reshape(*in_y, 1);
auto z = EigenMatrix<T>::Reshape(*in_z, 1);
auto x_norm = EigenMatrix<T>::Reshape(*in_x_norm, 1);
auto y_norm = EigenMatrix<T>::Reshape(*in_y_norm, 1);
auto dz = EigenMatrix<T>::Reshape(*in_grad_z, 1);
Eigen::DSizes<int, 2> bcast(1, new_dims[1]);
auto z_bcast = z.broadcast(bcast);
auto dz_bcast = dz.broadcast(bcast);
// compute gradident
int rows_x = in_x->dims()[0];
int rows_y = in_y->dims()[0];
int cols = framework::product(in_x->dims()) / rows_x;
Eigen::DSizes<int, 2> bcast_cols(1, cols);
auto z_bcast = z.broadcast(bcast_cols);
auto dz_bcast = dz.broadcast(bcast_cols);
auto x_snorm_bcast = x_norm.square().eval().broadcast(bcast_cols);
auto place = context.GetEigenDevice<Place>();
auto x_snorm_bcast = x_norm.square().eval().broadcast(bcast);
auto y_snorm_bcast = y_norm.square().eval().broadcast(bcast);
auto norm_prod_bcast = (x_norm * y_norm).eval().broadcast(bcast);
if (output_grad_x) {
output_grad_x->mutable_data<T>(context.GetPlace());
auto dx = EigenMatrix<T>::From(*output_grad_x, new_dims);
dx.device(place) =
dz_bcast * (y / norm_prod_bcast - z_bcast * x / x_snorm_bcast);
}
if (output_grad_y) {
output_grad_y->mutable_data<T>(context.GetPlace());
auto dy = EigenMatrix<T>::From(*output_grad_y, new_dims);
dy.device(place) =
dz_bcast * (x / norm_prod_bcast - z_bcast * y / y_snorm_bcast);
if (rows_x == rows_y) {
auto y_snorm_bcast = y_norm.square().eval().broadcast(bcast_cols);
auto norm_prod_bcast = (x_norm * y_norm).eval().broadcast(bcast_cols);
// compute dx
if (out_grad_x) {
out_grad_x->mutable_data<T>(context.GetPlace());
auto dx = EigenMatrix<T>::Reshape(*out_grad_x, 1);
auto grad = y / norm_prod_bcast - z_bcast * x / x_snorm_bcast;
dx.device(place) = dz_bcast * grad;
}
// compute dy
if (out_grad_y) {
out_grad_y->mutable_data<T>(context.GetPlace());
auto dy = EigenMatrix<T>::Reshape(*out_grad_y, 1);
auto grad = x / norm_prod_bcast - z_bcast * y / y_snorm_bcast;
dy.device(place) = dz_bcast * grad;
}
} else {
Eigen::DSizes<int, 2> bcast_rows(rows_x, 1);
Eigen::DSizes<int, 2> bcast_rows_cols(rows_x, cols);
auto y_bcast = y.broadcast(bcast_rows);
auto y_snorm_bcast = y_norm.square().eval().broadcast(bcast_rows_cols);
auto norm_prod_bcast = (x_norm * y_norm.eval().broadcast(bcast_rows))
.eval()
.broadcast(bcast_cols);
// compute dx
if (out_grad_x) {
out_grad_x->mutable_data<T>(context.GetPlace());
auto dx = EigenMatrix<T>::Reshape(*out_grad_x, 1);
auto grad = y_bcast / norm_prod_bcast - z_bcast * x / x_snorm_bcast;
dx.device(place) = dz_bcast * grad;
}
// compute dy
if (out_grad_y) {
out_grad_y->mutable_data<T>(context.GetPlace());
auto dy = EigenMatrix<T>::Reshape(*out_grad_y, 1);
auto grad = x / norm_prod_bcast - z_bcast * y_bcast / y_snorm_bcast;
dy.device(place) = (dz_bcast * grad).sum(Eigen::array<int, 1>({{0}}));
}
}
}
};
......
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/operators/elementwise_mul_op.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
class ElementWiseMulOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected:
void InferShape(const framework::InferShapeContext &ctx) const override {
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"),
"Input(X) of ElementWiseMulOp should not be null.");
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("Y"),
"Input(Y) of ElementWiseMulOp should not be null.");
PADDLE_ENFORCE_NOT_NULL(
ctx.OutputVar("Out"),
"Output(Out) of ElementWiseMulOp should not be null.");
auto x_dim = ctx.Input<Tensor>("X")->dims();
auto y_dim = ctx.Input<Tensor>("Y")->dims();
PADDLE_ENFORCE_GE(x_dim.size(), y_dim.size(),
"Rank of first input must >= rank of second input.")
ctx.Output<framework::LoDTensor>("Out")->Resize(x_dim);
}
};
class ElementWiseMulOpMaker : public framework::OpProtoAndCheckerMaker {
public:
ElementWiseMulOpMaker(framework::OpProto *proto,
framework::OpAttrChecker *op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "The first input of elementwise mul op");
AddInput("Y", "The second input of elementwise mul op");
AddAttr<int>("axis",
R"DOC(
When shape(Y) does not equal shape(X),Y will be broadcasted
to match the shape of X and axis should be dimension index Y in X
)DOC")
.SetDefault(-1)
.EqualGreaterThan(-1);
AddOutput("Out", "The output of elementwise mul op");
AddComment(R"DOC(
Limited elementwise multiple operator.The equation is: Out = X ⊙ Y.
1. The shape of Y should be same with X or
2. Y's shape is a subset of X.
Y will be broadcasted to match the shape of X and axis should be dimension index Y in X.
example:
shape(X) = (2, 3, 4, 5), shape(Y) = (,)
shape(X) = (2, 3, 4, 5), shape(Y) = (5,)
shape(X) = (2, 3, 4, 5), shape(Y) = (4, 5)
shape(X) = (2, 3, 4, 5), shape(Y) = (3, 4), with axis=1
shape(X) = (2, 3, 4, 5), shape(Y) = (2), with axis=0
)DOC");
}
};
class ElementWiseMulOpGrad : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected:
void InferShape(const framework::InferShapeContext &ctx) const override {
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), "Input(X) should not be null");
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("Y"), "Input(Y) should not be null");
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(framework::GradVarName("Out")),
"Input(Out@GRAD) should not be null");
auto x_dims = ctx.Input<Tensor>("X")->dims();
auto y_dims = ctx.Input<Tensor>("Y")->dims();
auto out_dims = ctx.Input<Tensor>(framework::GradVarName("Out"))->dims();
auto *x_grad =
ctx.Output<framework::LoDTensor>(framework::GradVarName("X"));
auto *y_grad =
ctx.Output<framework::LoDTensor>(framework::GradVarName("Y"));
PADDLE_ENFORCE_GE(x_dims.size(), y_dims.size(),
"Rank of first input must >= rank of second input.")
if (x_grad) {
x_grad->Resize(x_dims);
}
if (y_grad) {
y_grad->Resize(y_dims);
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP(elementwise_mul, ops::ElementWiseMulOp, ops::ElementWiseMulOpMaker,
elementwise_mul_grad, ops::ElementWiseMulOpGrad);
REGISTER_OP_CPU_KERNEL(
elementwise_mul,
ops::ElementWiseMulKernel<paddle::platform::CPUPlace, float>);
REGISTER_OP_CPU_KERNEL(
elementwise_mul_grad,
ops::ElementWiseMulGradKernel<paddle::platform::CPUPlace, float>);
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#define EIGEN_USE_GPU
#include "paddle/operators/elementwise_mul_op.h"
namespace ops = paddle::operators;
REGISTER_OP_GPU_KERNEL(
elementwise_mul,
ops::ElementWiseMulKernel<paddle::platform::GPUPlace, float>);
REGISTER_OP_GPU_KERNEL(
elementwise_mul_grad,
ops::ElementWiseMulGradKernel<paddle::platform::GPUPlace, float>);
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "paddle/framework/eigen.h"
#include "paddle/framework/op_registry.h"
namespace paddle {
namespace operators {
/*
* Out = X ⊙ Y
* 1. shape(X) = (2, 3, 4, 5), shape(Y) = (3, 4), with axis=1
* pre=2, n=3*4, post=5
* 2. shape(X) = (2, 3, 4, 5), shape(Y) = (4,5)
* pre=2*3, n=4*5, post=1
*/
inline void get_mid_dims(const framework::DDim& x_dims,
const framework::DDim& y_dims, const int axis,
int& pre, int& n, int& post) {
pre = 1;
n = 1;
post = 1;
for (int i = 0; i < axis; ++i) {
pre *= x_dims[i];
}
for (int i = 0; i < y_dims.size(); ++i) {
PADDLE_ENFORCE_EQ(x_dims[i + axis], y_dims[i],
"Broadcast dimension mismatch.");
n *= y_dims[i];
}
for (int i = axis + y_dims.size(); i < x_dims.size(); ++i) {
post *= x_dims[i];
}
}
template <typename Place, typename T>
class ElementWiseMulKernel : public framework::OpKernel {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
using Tensor = framework::Tensor;
auto* x = ctx.Input<Tensor>("X");
auto* y = ctx.Input<Tensor>("Y");
auto* z = ctx.Output<Tensor>("Out");
z->mutable_data<T>(ctx.GetPlace());
auto x_e = framework::EigenVector<T>::Flatten(*x);
auto y_e = framework::EigenVector<T>::Flatten(*y);
auto z_e = framework::EigenVector<T>::Flatten(*z);
auto x_dims = x->dims();
auto y_dims = y->dims();
PADDLE_ENFORCE_GE(x_dims.size(), y_dims.size(),
"Rank of first input must >= rank of second input.")
if (x_dims == y_dims || product(y_dims) == 1) {
z_e.device(ctx.GetEigenDevice<Place>()) = x_e * y_e;
return;
}
int axis = ctx.Attr<int>("axis");
axis = (axis == -1 ? x_dims.size() - y_dims.size() : axis);
PADDLE_ENFORCE(axis >= 0 && axis < x_dims.size(),
"Axis should be in range [0, x_dims)");
int pre, n, post;
get_mid_dims(x_dims, y_dims, axis, pre, n, post);
if (post == 1) {
auto y_bcast = y_e.reshape(Eigen::DSizes<int, 2>(1, n))
.broadcast(Eigen::DSizes<int, 2>(pre, 1))
.reshape(Eigen::DSizes<int, 1>(x_e.size()));
z_e.device(ctx.GetEigenDevice<Place>()) = x_e * y_bcast;
return;
} else {
auto y_bcast = y_e.reshape(Eigen::DSizes<int, 3>(1, n, 1))
.broadcast(Eigen::DSizes<int, 3>(pre, 1, post))
.reshape(Eigen::DSizes<int, 1>(x_e.size()));
z_e.device(ctx.GetEigenDevice<Place>()) = x_e * y_bcast;
return;
}
}
};
template <typename Place, typename T>
class ElementWiseMulGradKernel : public framework::OpKernel {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
using Tensor = framework::Tensor;
auto* x = ctx.Input<Tensor>("X");
auto* y = ctx.Input<Tensor>("Y");
auto* dout = ctx.Input<Tensor>(framework::GradVarName("Out"));
auto x_e = framework::EigenVector<T>::Flatten(*x);
auto y_e = framework::EigenVector<T>::Flatten(*y);
auto dout_e = framework::EigenVector<T>::Flatten(*dout);
auto x_dims = x->dims();
auto y_dims = y->dims();
auto* dx = ctx.Output<Tensor>(framework::GradVarName("X"));
auto* dy = ctx.Output<Tensor>(framework::GradVarName("Y"));
if (dx) {
dx->mutable_data<T>(ctx.GetPlace());
}
if (dy) {
dy->mutable_data<T>(ctx.GetPlace());
}
if (x_dims == y_dims || product(y_dims) == 1) {
if (dx) {
auto dx_e = framework::EigenVector<T>::Flatten(*dx);
dx_e.device(ctx.GetEigenDevice<Place>()) = dout_e * y_e;
}
if (dy) {
auto dy_e = framework::EigenVector<T>::Flatten(*dy);
dy_e.device(ctx.GetEigenDevice<Place>()) = x_e * dout_e;
}
return;
}
int axis = ctx.Attr<int>("axis");
axis = (axis == -1 ? x_dims.size() - y_dims.size() : axis);
int pre, n, post;
get_mid_dims(x_dims, y_dims, axis, pre, n, post);
// TODO(gongweibao): wrap reshape to a function.
if (post == 1) {
auto y_e_bcast = y_e.reshape(Eigen::DSizes<int, 2>(1, n))
.broadcast(Eigen::DSizes<int, 2>(pre, 1))
.reshape(Eigen::DSizes<int, 1>(x_e.size()));
if (dx) {
auto dx_e = framework::EigenVector<T>::Flatten(*dx);
dx_e.device(ctx.GetEigenDevice<Place>()) = dout_e * y_e_bcast;
}
if (dy) {
auto dy_e = framework::EigenVector<T>::Flatten(*dy);
dy_e.device(ctx.GetEigenDevice<Place>()) =
(x_e * dout_e)
.reshape(Eigen::DSizes<int, 2>(pre, n))
.sum(Eigen::array<int, 1>{{0}});
}
return;
} else {
auto y_e_bcast = y_e.reshape(Eigen::DSizes<int, 3>(1, n, 1))
.broadcast(Eigen::DSizes<int, 3>(pre, 1, post))
.reshape(Eigen::DSizes<int, 1>(x_e.size()));
if (dx) {
auto dx_e = framework::EigenVector<T>::Flatten(*dx);
dx_e.device(ctx.GetEigenDevice<Place>()) = dout_e * y_e_bcast;
}
if (dy) {
auto dy_e = framework::EigenVector<T>::Flatten(*dy);
dy_e.device(ctx.GetEigenDevice<Place>()) =
(x_e * dout_e)
.reshape(Eigen::DSizes<int, 3>(pre, n, post))
.sum(Eigen::array<int, 2>{{0, 2}});
}
return;
}
}
};
} // namespace operators
} // namespace paddle
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/framework/op_registry.h"
#include "paddle/operators/net_op.h"
namespace paddle {
namespace operators {
class FCOp : public NetOp {
public:
FCOp(const std::string &type, const framework::VariableNameMap &inputs,
const framework::VariableNameMap &outputs,
const framework::AttributeMap &attrs)
: NetOp(type, inputs, outputs, attrs) {
PADDLE_ENFORCE(!Inputs("X").empty(),
"Inputs(X) of FCOp should not be null.");
PADDLE_ENFORCE(!Inputs("W").empty(),
"Inputs(W) of FCOp should not be null.");
PADDLE_ENFORCE(!Outputs("MulOut").empty(),
"Outputs(MulOut) of FCOp should not be null.");
PADDLE_ENFORCE_NE(Output("Out"), framework::kEmptyVarName,
"Output(Out) of FCOp should not be null.");
auto x = Inputs("X");
auto w = Inputs("W");
auto mul_out = Outputs("MulOut");
PADDLE_ENFORCE_EQ(
x.size(), w.size(),
"The size of inputs X(%d) should be the same as that of weights W(%d).",
x.size(), w.size());
PADDLE_ENFORCE_EQ(mul_out.size(), x.size(),
"The size of intermediate mul_out(%d) should be the same "
"as that of inputs X(%d).",
mul_out.size(), x.size());
size_t n = x.size();
PADDLE_ENFORCE_GE(n, static_cast<size_t>(1),
"The size of inputs X(%d) should be no less than 1.", n);
auto x_num_col_dims = Attr<std::vector<int>>("xNumColDims");
// Set all values or set no values (use the default value)
if (!x_num_col_dims.empty()) {
PADDLE_ENFORCE_EQ(x_num_col_dims.size(), n,
"The size of attribute xNumColDims(%d) should be the "
"same as that of inputs X(%d).",
x_num_col_dims.size(), n);
} else {
x_num_col_dims.resize(n);
for (size_t i = 0; i < n; i++) {
x_num_col_dims[i] = 1;
}
}
// mul_out[i] = X[i] * W[i]
for (size_t i = 0; i < n; i++) {
framework::AttributeMap mul_attr;
mul_attr["x_num_col_dims"] = static_cast<int>(x_num_col_dims[i]);
mul_attr["y_num_col_dims"] = static_cast<int>(1);
AppendOp(
framework::OpRegistry::CreateOp("mul", {{"X", {x[i]}}, {"Y", {w[i]}}},
{{"Out", {mul_out[i]}}}, mul_attr));
}
// sum_out = X[0] * W[0] + ... + X[n-1] * W[n-1]
auto sum_out = mul_out[0];
if (n > 1) {
PADDLE_ENFORCE_NE(Output("SumOut"), framework::kEmptyVarName,
"Output(SumOut) of FCOp should not be null when the "
"size of Inputs(X) > 1.");
sum_out = Output("SumOut");
AppendOp(framework::OpRegistry::CreateOp("sum", {{"X", {mul_out}}},
{{"Out", {sum_out}}}, {}));
} else {
if (Output("SumOut") != framework::kEmptyVarName) {
this->Rename(Output("SumOut"), framework::kEmptyVarName);
}
}
// add_out = sum_out + b
auto b = Input("B");
auto add_out = sum_out;
if (b != framework::kEmptyVarName) {
PADDLE_ENFORCE_NE(
Output("AddOut"), framework::kEmptyVarName,
"Output(AddOut) of FCOp should not be null when Input(B) is set.");
add_out = Output("AddOut");
AppendOp(framework::OpRegistry::CreateOp(
"rowwise_add", {{"X", {sum_out}}, {"b", {Input("B")}}},
{{"Out", {add_out}}}, {}));
} else {
if (Output("AddOut") != framework::kEmptyVarName) {
this->Rename(Output("AddOut"), framework::kEmptyVarName);
}
}
auto activation = Attr<std::string>("activation");
AppendOp(framework::OpRegistry::CreateOp(activation, {{"X", {add_out}}},
{{"Y", {Output("Out")}}}, {}));
CompleteAddOp(false);
}
};
class FCOpMaker : public framework::OpProtoAndCheckerMaker {
public:
FCOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X",
"(A vector of Tensors) each input Tensor can be of arbitrary "
"dimension, and will be reshaped to a 2-D matrix of size "
"(minibatch, number_of_input_features) according to attribute "
"xNumColDims.")
.AsDuplicable();
AddInput("W",
"(A vector of Tensors) the weights of FC operator, a "
"vector of 2-D matrix of size "
"(number_of_input_features, number_of_neurons).")
.AsDuplicable();
AddInput("B",
"(Tensor) the bias of FC operator, a 1-D vector of size "
"number_of_neurons.");
AddOutput("Out",
"(Tensor) the activated output matrix of FC operator, a 2-D "
"matrix of size (minibatch, number_of_neurons).");
AddOutput("MulOut",
"(A vector of Tensors) the intermediate outputs of FC operator, "
"each Tensor saving the product of X_i * W_i.")
.AsIntermediate()
.AsDuplicable();
AddOutput(
"SumOut",
"(Tensor) the intermediate output of FC operator, "
"saving the sum of the products of X and W, that is sum{X_i * W_i}.")
.AsIntermediate();
AddOutput("AddOut",
"(Tensor) the non-actived output of FC operator, "
"saving sum{X_i * W_i} + B.")
.AsIntermediate();
AddAttr<std::string>(
"activation",
"(string, default identity) the activation type of FC operator.")
.SetDefault("identity")
.InEnum({"identity", "sigmoid", "softmax"});
AddAttr<std::vector<int>>(
"xNumColDims",
"(std::vector<int>) The inputs Tensors of FC operator can be of "
"more than 2 dimensions. In that case, each input Tensor `X_i` will be "
"reshaped to a 2-D matrix. The matrix's first dimension "
"(the length of column) will be the product of `X_i`'s last "
"`xNumColDims_i` dimensions, that is "
"`X_i.dims[0] x ... x X_i.dims[xNumColDims_i - 1]`. "
"The matrix's second dimension (the length of row) will be the product "
"of `X_i`'s first `rank - xNumColDims_i` dimensions, that is "
"`X_i.dims[xNumColDims_i] x ... x X_i.dims[rank - 1]`)")
.SetDefault(std::vector<int>{});
AddComment(R"DOC(
Fully Connected Operator, known as Fully Connected Layer or Inner Product Layer
in Convolutional Neural Networks. Neurons in a fully connected layer have
full connections to all activations in the previous layer.
It computes an inner product of a set of
learned weights with a matrix multiplication followed by a bias offset
(optionally).
Equation:
Out = Act(sum_n{X_i * W_i} + B)
where X_i is Tensor that will be reshaped to a 2-D matrix of size (M x K),
usually M is the minibatch size and K is the number of input features.
W_i is a 2-D matrix of size (K x N), where N means the number of neurons
in the fully connected layer. B is a 1-D vector of size N.
Thus, the output Out is a 2-D matrix of size (M x N).
Activation type can be set to `identity` (default), `sigmoid` or `softmax`.
)DOC");
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_WITHOUT_GRADIENT(fc, ops::FCOp, ops::FCOpMaker);
......@@ -23,7 +23,14 @@ class FillZerosLikeOp : public framework::OperatorWithKernel {
protected:
void InferShape(const framework::InferShapeContext &ctx) const override {
ctx.Output<framework::Tensor>("Dst")->Resize(
PADDLE_ENFORCE_NOT_NULL(
ctx.InputVar("Src"),
"Input(Src) of FillZerosLikeOp should not be null.");
PADDLE_ENFORCE_NOT_NULL(
ctx.OutputVar("Dst"),
"Output(Dst) of FillZerosLikeOp should not be null.");
ctx.Output<framework::LoDTensor>("Dst")->Resize(
ctx.Input<framework::Tensor>("Src")->dims());
}
};
......
......@@ -24,11 +24,18 @@ class GatherOp : public framework::OperatorWithKernel {
protected:
void InferShape(const framework::InferShapeContext &ctx) const override {
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"),
"Input(X) of GatherOp should not be null.");
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("Index"),
"Input(Index) of GatherOp should not be null.");
PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar("Out"),
"Output(Out) of GatherOp should not be null.");
int batch_size = ctx.Input<Tensor>("Index")->dims()[0];
PADDLE_ENFORCE_GE(batch_size, 0, "Batch size must be >0");
framework::DDim output_dims(ctx.Input<Tensor>("X")->dims());
output_dims[0] = batch_size;
ctx.Output<Tensor>("Out")->Resize(output_dims);
ctx.Output<framework::LoDTensor>("Out")->Resize(output_dims);
}
};
......@@ -38,7 +45,7 @@ class GatherGradOp : public framework::OperatorWithKernel {
protected:
void InferShape(const framework::InferShapeContext &ctx) const override {
auto X_grad = ctx.Output<Tensor>(framework::GradVarName("X"));
auto X_grad = ctx.Output<framework::LoDTensor>(framework::GradVarName("X"));
auto X = ctx.Input<Tensor>("X");
X_grad->Resize(X->dims());
......
......@@ -31,7 +31,7 @@ class CPUGaussianRandomKernel : public framework::OpKernel {
}
engine.seed(seed);
std::normal_distribution<T> dist(mean, std);
int64_t size = framework::product(tensor->dims());
int64_t size = tensor->numel();
for (int64_t i = 0; i < size; ++i) {
data[i] = dist(engine);
}
......@@ -43,8 +43,12 @@ class GaussianRandomOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel;
protected:
void InferShape(const framework::InferShapeContext& context) const override {
auto* tensor = context.Output<framework::Tensor>("Out");
void InferShape(const framework::InferShapeContext& ctx) const override {
PADDLE_ENFORCE_NOT_NULL(
ctx.OutputVar("Out"),
"Output(Out) of GaussianRandomOp should not be null.");
auto* tensor = ctx.Output<framework::LoDTensor>("Out");
auto dims = Attr<std::vector<int>>("dims");
std::vector<int64_t> temp;
temp.reserve(dims.size());
......
......@@ -50,8 +50,8 @@ class GPUGaussianRandomKernel : public framework::OpKernel {
T mean = static_cast<T>(context.Attr<float>("mean"));
T std = static_cast<T>(context.Attr<float>("std"));
thrust::counting_iterator<unsigned int> index_sequence_begin(0);
ssize_t N = framework::product(tensor->dims());
thrust::transform(index_sequence_begin, index_sequence_begin + N,
int64_t size = tensor->numel();
thrust::transform(index_sequence_begin, index_sequence_begin + size,
thrust::device_ptr<T>(data),
GaussianGenerator<T>(mean, std, seed));
}
......
......@@ -27,7 +27,7 @@ class IdentityOpMaker : public framework::OpProtoAndCheckerMaker {
framework::OpAttrChecker *op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "The input tensor of identity operator.");
AddOutput("Out", "The output tensor of identity operator.");
AddOutput("Y", "The output tensor of identity operator.");
AddComment(R"DOC(
The identity operator is an alias of the scale operator
with the attribute scale fixed to 1.0.
......@@ -42,9 +42,15 @@ class IdentityOp : public NetOp {
const framework::VariableNameMap &outputs,
const framework::AttributeMap &attrs)
: NetOp(type, inputs, outputs, attrs) {
PADDLE_ENFORCE_NE(Input("X"), framework::kEmptyVarName,
"Input(X) of IdentityOp should not be null.");
PADDLE_ENFORCE_NE(Output("Y"), framework::kEmptyVarName,
"Output(Y) of IdentityOp should not be null.");
AppendOp(framework::OpRegistry::CreateOp(
"scale", {{"X", {Input("X")}}}, {{"Out", {Output("Out")}}},
"scale", {{"X", {Input("X")}}}, {{"Out", {Output("Y")}}},
{{"scale", static_cast<AttrType>(1)}}));
CompleteAddOp(false);
}
};
......
......@@ -22,10 +22,17 @@ class LookupTableOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel;
protected:
void InferShape(const framework::InferShapeContext &context) const override {
auto table_t = context.Input<Tensor>("W");
auto ids_t = context.Input<Tensor>("Ids");
auto output_t = context.Output<Tensor>("Out");
void InferShape(const framework::InferShapeContext &ctx) const override {
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("W"),
"Input(W) of LookupTableOp should not be null.");
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("Ids"),
"Input(Ids) of LookupTableOp should not be null.");
PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar("Out"),
"Output(Out) of LookupTableOp should not be null.");
auto table_t = ctx.Input<Tensor>("W");
auto ids_t = ctx.Input<Tensor>("Ids");
auto output_t = ctx.Output<framework::LoDTensor>("Out");
output_t->Resize({ids_t->dims()[0], table_t->dims()[1]});
}
......@@ -56,7 +63,8 @@ class LookupTableOpGrad : public framework::OperatorWithKernel {
protected:
void InferShape(const framework::InferShapeContext &context) const override {
auto table = context.Input<Tensor>("W");
auto d_table = context.Output<Tensor>(framework::GradVarName("W"));
auto d_table =
context.Output<framework::LoDTensor>(framework::GradVarName("W"));
d_table->Resize(table->dims());
}
};
......
......@@ -70,7 +70,7 @@ class LookupTableCUDAKernel : public framework::OpKernel {
size_t N = table_t->dims()[0];
size_t D = table_t->dims()[1];
size_t K = product(ids_t->dims());
size_t K = ids_t->numel();
auto ids = ids_t->data<int32_t>();
auto table = table_t->data<T>();
auto output = output_t->mutable_data<T>(context.GetPlace());
......@@ -91,7 +91,7 @@ class LookupTableGradCUDAKernel : public framework::OpKernel {
int N = d_table_t->dims()[0];
int D = d_table_t->dims()[1];
int K = product(ids_t->dims());
int K = ids_t->numel();
const int32_t* ids = ids_t->data<int32_t>();
const T* d_output = d_output_t->data<T>();
T* d_table = d_table_t->mutable_data<T>(context.GetPlace());
......
......@@ -35,7 +35,7 @@ class LookupTableKernel : public framework::OpKernel {
auto ids = ids_t->data<int32_t>();
auto table = table_t->data<T>();
auto output = output_t->mutable_data<T>(context.GetPlace());
for (ssize_t i = 0; i < product(ids_t->dims()); ++i) {
for (int64_t i = 0; i < ids_t->numel(); ++i) {
PADDLE_ENFORCE_LT(ids[i], N);
PADDLE_ENFORCE_GE(ids[i], 0);
memcpy(output + i * D, table + ids[i] * D, D * sizeof(T));
......@@ -61,7 +61,7 @@ class LookupTableGradKernel : public framework::OpKernel {
t.device(context.GetEigenDevice<platform::CPUPlace>()) =
t.constant(static_cast<T>(0));
for (ssize_t i = 0; i < product(ids_t->dims()); ++i) {
for (int64_t i = 0; i < ids_t->numel(); ++i) {
PADDLE_ENFORCE_LT(ids[i], N);
PADDLE_ENFORCE_GE(ids[i], 0);
for (int j = 0; j < D; ++j) {
......
......@@ -24,8 +24,10 @@ class MeanOp : public framework::OperatorWithKernel {
protected:
void InferShape(const framework::InferShapeContext &ctx) const override {
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"),
"Input of MeanOp must be initialized.");
ctx.Output<Tensor>("Out")->Resize({1});
"Input(X) of MeanOp should not be null.");
PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar("Out"),
"Output(Out) of MeanOp should not be null.");
ctx.Output<framework::LoDTensor>("Out")->Resize({1});
}
};
......@@ -45,7 +47,7 @@ class MeanGradOp : public framework::OperatorWithKernel {
protected:
void InferShape(const framework::InferShapeContext &ctx) const override {
ctx.Output<Tensor>(framework::GradVarName("X"))
ctx.Output<framework::LoDTensor>(framework::GradVarName("X"))
->Resize(ctx.Input<Tensor>("X")->dims());
}
};
......
......@@ -49,12 +49,11 @@ class MeanGradKernel : public framework::OpKernel {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto OG = context.Input<Tensor>(framework::GradVarName("Out"));
PADDLE_ENFORCE(framework::product(OG->dims()) == 1,
"Mean Gradient should be scalar");
PADDLE_ENFORCE(OG->numel() == 1, "Mean Gradient should be scalar");
auto IG = context.Output<Tensor>(framework::GradVarName("X"));
IG->mutable_data<T>(context.GetPlace());
T ig_size = (T)framework::product(IG->dims());
T ig_size = static_cast<T>(IG->numel());
Eigen::DSizes<int, 1> bcast(ig_size);
EigenVector<T>::Flatten(*IG).device(context.GetEigenDevice<Place>()) =
......
......@@ -27,14 +27,20 @@ class MinusOp : public framework::OperatorWithKernel {
protected:
void InferShape(const framework::InferShapeContext &ctx) const override {
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"),
"Input(X) of MinusOp should not be null.");
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("Y"),
"Input(Y) of MinusOp should not be null.");
PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar("Out"),
"Output(Out) of MinusOp should not be null.");
auto *left_tensor = ctx.Input<framework::Tensor>("X");
auto *right_tensor = ctx.Input<framework::Tensor>("Y");
PADDLE_ENFORCE_EQ(
framework::product(left_tensor->dims()),
framework::product(right_tensor->dims()),
left_tensor->numel(), right_tensor->numel(),
"Minus operator must take two tensor with same num of elements");
ctx.Output<framework::Tensor>("Out")->Resize(left_tensor->dims());
ctx.Output<framework::LoDTensor>("Out")->Resize(left_tensor->dims());
}
};
......@@ -65,7 +71,7 @@ class MinusGradOp : public NetOp {
// x_grad = out_grad
AppendOp(framework::OpRegistry::CreateOp("identity", {{"X", {out_grad}}},
{{"Out", {x_grad}}}, {}));
{{"Y", {x_grad}}}, {}));
framework::AttributeMap scale_attr;
scale_attr["scale"] = static_cast<AttrType>(-1);
......@@ -78,8 +84,6 @@ class MinusGradOp : public NetOp {
} // namespace operators
} // namespace paddle
USE_OP(scale);
USE_NO_KERNEL_OP(identity);
namespace ops = paddle::operators;
REGISTER_OP(minus, ops::MinusOp, ops::MinusOpMaker, minus_grad,
ops::MinusGradOp<float>);
......
......@@ -18,6 +18,7 @@ namespace paddle {
namespace operators {
using framework::Tensor;
using framework::LoDTensor;
class MulOp : public framework::OperatorWithKernel {
public:
......@@ -25,6 +26,13 @@ class MulOp : public framework::OperatorWithKernel {
protected:
void InferShape(const framework::InferShapeContext &ctx) const override {
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"),
"Input(X) of MulOp should not be null.");
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("Y"),
"Input(Y) of MulOp should not be null.");
PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar("Out"),
"Output(Out) of MulOp should not be null.");
auto x_dims = ctx.Input<Tensor>("X")->dims();
auto y_dims = ctx.Input<Tensor>("Y")->dims();
int x_num_col_dims = Attr<int>("x_num_col_dims");
......@@ -45,7 +53,8 @@ class MulOp : public framework::OperatorWithKernel {
PADDLE_ENFORCE_EQ(
x_mat_dims[1], y_mat_dims[0],
"First matrix's width must be equal with second matrix's height.");
ctx.Output<Tensor>("Out")->Resize({x_mat_dims[0], y_mat_dims[1]});
ctx.Output<framework::LoDTensor>("Out")->Resize(
{x_mat_dims[0], y_mat_dims[1]});
}
};
......@@ -94,8 +103,10 @@ class MulOpGrad : public framework::OperatorWithKernel {
auto x_dims = ctx.Input<Tensor>("X")->dims();
auto y_dims = ctx.Input<Tensor>("Y")->dims();
auto out_dims = ctx.Input<Tensor>(framework::GradVarName("Out"))->dims();
auto *x_grad = ctx.Output<Tensor>(framework::GradVarName("X"));
auto *y_grad = ctx.Output<Tensor>(framework::GradVarName("Y"));
auto *x_grad =
ctx.Output<framework::LoDTensor>(framework::GradVarName("X"));
auto *y_grad =
ctx.Output<framework::LoDTensor>(framework::GradVarName("Y"));
auto x_mat_dims =
framework::flatten_to_2d(x_dims, Attr<int>("x_num_col_dims"));
......
## Operator's Parameter Name Convention
To make the operator document itself more clear, we recommend operator names obey the listing conventions.
### OpProtoMaker names
When defining an operator in Paddle, a corresponding [OpProtoMaker](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/operator.h#L170) (TODO: OpProtoMaker Doc)need to be defined. All the Input/Output and Attributes will write into the [OpProto](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/framework.proto#L61) , and will be used in client language to create operator.
- Input/Output.
- Input/Output names follow the **CamelCase**. e.g. `X`, `Y`, `Matrix`, `LastAxisInMatrix`. Input/Output much more like Variables, we prefer to meaningful English words.
- If an operator's Input/Output are tensors in math, not match to any meaningful words, input name should starts from `X`. e.g. `X`, `Y`, and output name should starts from `Out`. e.g. `Out`. This rule intends making operators which have few inputs/outputs unified.
- Attribute.
- Attribute name follows the **camelCase**. e.g. `x`, `y`, `axis`, `rowwiseMatrix`. Also, attribute name prefers to meaningful English words.
- Comments.
- Input/Output/Attr comment follow the format of **(type,default value) usage**, corresponding to which type it can be and how it will be used in the operator. e.g. Attribute in Accumulator`"gamma" `,`(float, default 1.0) Accumulation multiplier`.
- Operator comment format of` R"DOC(your comment here)DOC"`. You should explain the input/output of the operator first. If there is math calculation in this operator, you should write the equation in the comment. e.g. `Out = X + Y`.
- Order.
- Follow the order of Input/Output, then Attribute, then Comments. See the example in best practice.
### Best Practice
Here we give some examples to show how these rules will be used.
- The operator has one input, one output. e.g.`relu`, inputs: `X`, outputs: `Out`.
- The operator has two input, one output. e.g. `rowwise_add`, inputs : `X`, `Y`, outputs : `Out`.
- The operator contains attribute. e.g. `cosine`, inputs : `X`, `axis`, outputs : `Out`.
We give a full example of Accumulator Operator.
```c++
class AccumulateOpMaker : public framework::OpProtoAndCheckerMaker {
public:
AccumulateOpMaker(framework::OpProto *proto,
framework::OpAttrChecker *op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "(Tensor) The input tensor that has to be accumulated to the output tensor.
If the output size is not the same as input size,
the output tensor is first reshaped and initialized to zero, and only then, accumulation is done.");
AddOutput("Out", "(Tensor) Accumulated output tensor");
AddAttr<float>("gamma", "(float, default 1.0) Accumulation multiplier").SetDefault(1.0f);
AddComment(R"DOC(
Accumulate operator accumulates the input tensor to the output tensor. If the
output tensor already has the right size, we add to it; otherwise, we first
initialize the output tensor to all zeros, and then do accumulation. Any
further calls to the operator, given that no one else fiddles with the output
in the interim, will do simple accumulations.
Accumulation is done as shown:
Out = 1*X + gamma*Out
where X is the input tensor, Out is the output tensor and gamma is the multiplier
argument.
)DOC");
}
};
```
......@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/operators/cross_entropy_op.h"
#include "paddle/operators/onehot_cross_entropy_op.h"
namespace paddle {
namespace operators {
......@@ -23,13 +23,23 @@ class OnehotCrossEntropyOp : public framework::OperatorWithKernel {
protected:
void InferShape(const framework::InferShapeContext &ctx) const override {
PADDLE_ENFORCE_NOT_NULL(
ctx.InputVar("X"),
"Input(X) of OnehotCrossEntropyOp should not be null.");
PADDLE_ENFORCE_NOT_NULL(
ctx.InputVar("label"),
"Input(label) of OnehotCrossEntropyOp should not be null.");
PADDLE_ENFORCE_NOT_NULL(
ctx.OutputVar("Y"),
"Output(Y) of OnehotCrossEntropyOp should not be null.");
auto *X = ctx.Input<Tensor>("X");
auto *label = ctx.Input<Tensor>("label");
PADDLE_ENFORCE_EQ(X->dims().size(), 2, "X's dimension must be 2.");
PADDLE_ENFORCE_EQ(label->dims().size(), 1, "label's dimension must be 1.");
PADDLE_ENFORCE_EQ(X->dims()[0], label->dims()[0]);
ctx.Output<Tensor>("Y")->Resize({X->dims()[0]});
ctx.Output<framework::LoDTensor>("Y")->Resize({X->dims()[0], 1});
}
};
......@@ -39,7 +49,7 @@ class OnehotCrossEntropyGradientOp : public framework::OperatorWithKernel {
protected:
void InferShape(const framework::InferShapeContext &ctx) const override {
auto dX = ctx.Output<Tensor>(framework::GradVarName("X"));
auto dX = ctx.Output<framework::LoDTensor>(framework::GradVarName("X"));
auto X = ctx.Input<Tensor>("X");
dX->Resize(X->dims());
......
此差异已折叠。
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#define EIGEN_USE_GPU
#include "paddle/operators/pad_op.h"
namespace ops = paddle::operators;
REGISTER_OP_GPU_KERNEL(pad, ops::PadKernel<paddle::platform::GPUPlace, float>);
REGISTER_OP_GPU_KERNEL(pad_grad,
ops::PadGradKernel<paddle::platform::GPUPlace, float>);
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
......@@ -53,8 +53,8 @@ class GPUUniformRandomKernel : public framework::OpKernel {
T min = static_cast<T>(context.Attr<float>("min"));
T max = static_cast<T>(context.Attr<float>("max"));
thrust::counting_iterator<unsigned int> index_sequence_begin(0);
ssize_t N = framework::product(tensor->dims());
thrust::transform(index_sequence_begin, index_sequence_begin + N,
int64_t size = tensor->numel();
thrust::transform(index_sequence_begin, index_sequence_begin + size,
thrust::device_ptr<T>(data),
UniformGenerator<T>(min, max, seed));
}
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册