diff --git a/CMakeLists.txt b/CMakeLists.txt index 4921226ec1c90a969fa1cfc383823820500c7757..4783095194dc9c6409dc31c95588f46c9bee7c61 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -86,6 +86,14 @@ if(ANDROID OR IOS) "Disable MKLDNN when cross-compiling for Android and iOS" FORCE) set(WITH_MKLML OFF CACHE STRING "Disable MKLML package when cross-compiling for Android and iOS" FORCE) + + # Compile PaddlePaddle mobile inference library + if (NOT WITH_C_API) + set(WITH_C_API ON CACHE STRING + "Always compile the C_API when cross-compiling for Android and iOS" FORCE) + endif() + set(MOBILE_INFERENCE ON) + add_definitions(-DPADDLE_MOBILE_INFERENCE) endif() set(THIRD_PARTY_PATH "${CMAKE_BINARY_DIR}/third_party" CACHE STRING @@ -160,9 +168,11 @@ endif(USE_NNPACK) add_subdirectory(proto) -# "add_subdirectory(go)" should be placed after the following loine, -# because it depends on paddle/optimizer. -add_subdirectory(paddle/optimizer) +if(NOT MOBILE_INFERENCE) + # "add_subdirectory(go)" should be placed after the following loine, + # because it depends on paddle/optimizer. + add_subdirectory(paddle/optimizer) +endif() # "add_subdirectory(paddle)" and "add_subdirectory(python)" should be # placed after this block, because they depends on it. diff --git a/cmake/util.cmake b/cmake/util.cmake index d1aee3e170a2d143ac06b438725e907e96f041c8..117ab7f49cdf4a568cd203b2b17767643d0b2d50 100644 --- a/cmake/util.cmake +++ b/cmake/util.cmake @@ -73,25 +73,43 @@ function(link_paddle_exe TARGET_NAME) generate_rdma_links() endif() - target_circle_link_libraries(${TARGET_NAME} - ARCHIVE_START - paddle_gserver - paddle_function - ARCHIVE_END - paddle_pserver - paddle_trainer_lib - paddle_network - paddle_math - paddle_utils - paddle_parameter - paddle_proto - paddle_cuda - paddle_optimizer - ${EXTERNAL_LIBS} - ${CMAKE_THREAD_LIBS_INIT} - ${CMAKE_DL_LIBS} - ${RDMA_LD_FLAGS} - ${RDMA_LIBS}) + if(MOBILE_INFERENCE) + target_circle_link_libraries(${TARGET_NAME} + ARCHIVE_START + paddle_gserver + paddle_function + ARCHIVE_END + paddle_math + paddle_utils + paddle_parameter + paddle_proto + paddle_cuda + ${EXTERNAL_LIBS} + ${CMAKE_THREAD_LIBS_INIT} + ${CMAKE_DL_LIBS} + ${RDMA_LD_FLAGS} + ${RDMA_LIBS}) + else() + target_circle_link_libraries(${TARGET_NAME} + ARCHIVE_START + paddle_gserver + paddle_function + ARCHIVE_END + paddle_pserver + paddle_trainer_lib + paddle_network + paddle_math + paddle_utils + paddle_parameter + paddle_proto + paddle_cuda + paddle_optimizer + ${EXTERNAL_LIBS} + ${CMAKE_THREAD_LIBS_INIT} + ${CMAKE_DL_LIBS} + ${RDMA_LD_FLAGS} + ${RDMA_LIBS}) + endif() if(ANDROID) target_link_libraries(${TARGET_NAME} log) diff --git a/doc/design/block.md b/doc/design/block.md index be8800122035984df281692fc40009c397565046..4d5dd4ba95a686d18b2339c69f0316c340681909 100644 --- a/doc/design/block.md +++ b/doc/design/block.md @@ -55,17 +55,23 @@ Let us consolidate the discussion by presenting some examples. The following C++ programs shows how blocks are used with the `if-else` structure: ```c++ +namespace pd = paddle; + int x = 10; -int y = 20; -int out; +int y = 1; +int z = 10; bool cond = false; +int o1, o2; if (cond) { int z = x + y; - out = softmax(z); + o1 = z; + o2 = pd::layer::softmax(z); } else { - int z = fc(x); - out = z; + int d = pd::layer::fc(z); + o1 = d; + o2 = d+1; } + ``` An equivalent PaddlePaddle program from the design doc of the [IfElseOp operator](./if_else_op.md) is as follows: @@ -73,57 +79,55 @@ An equivalent PaddlePaddle program from the design doc of the [IfElseOp operator ```python import paddle as pd -x = var(10) -y = var(20) -cond = var(false) -ie = pd.create_ifelseop(inputs=[x], output_num=1) +x = minibatch([10, 20, 30]) # shape=[None, 1] +y = var(1) # shape=[1], value=1 +z = minibatch([10, 20, 30]) # shape=[None, 1] +cond = larger_than(x, 15) # [false, true, true] + +ie = pd.ifelse() with ie.true_block(): - x = ie.inputs(true, 0) - z = operator.add(x, y) - ie.set_output(true, 0, operator.softmax(z)) + d = pd.layer.add_scalar(x, y) + ie.output(d, pd.layer.softmax(d)) with ie.false_block(): - x = ie.inputs(false, 0) - z = layer.fc(x) - ie.set_output(true, 0, operator.softmax(z)) -out = b(cond) + d = pd.layer.fc(z) + ie.output(d, d+1) +o1, o2 = ie(cond) ``` -In both examples, the left branch computes `softmax(x+y)` and the right branch computes `fc(x)`. +In both examples, the left branch computes `x+y` and `softmax(x+y)`, the right branch computes `x+1` and `fc(x)`. A difference is that variables in the C++ program contain scalar values, whereas those in the PaddlePaddle programs are mini-batches of instances. The `ie.input(true, 0)` invocation returns instances in the 0-th input, `x`, that corresponds to true values in `cond` as the local variable `x`, where `ie.input(false, 0)` returns instances corresponding to false values. + ### Blocks with `for` and `RNNOp` The following RNN model from the [RNN design doc](./rnn.md) ```python -x = sequence([10, 20, 30]) -m = var(0) -W = tensor() -U = tensor() - -rnn = create_rnn(inputs=[input]) -with rnn.stepnet() as net: - x = net.set_inputs(0) - h = net.add_memory(init=m) - fc_out = pd.matmul(W, x) - hidden_out = pd.matmul(U, h.pre(n=1)) - sum = pd.add_two(fc_out, hidden_out) - act = pd.sigmoid(sum) - h.update(act) # update memory with act - net.set_outputs(0, act, hidden_out) # two outputs - +x = sequence([10, 20, 30]) # shape=[None, 1] +m = var(0) # shape=[1] +W = var(0.314, param=true) # shape=[1] +U = var(0.375, param=true) # shape=[1] + +rnn = pd.rnn() +with rnn.step(): + h = rnn.memory(init = m) + hh = rnn.previous_memory(h) + a = layer.fc(W, x) + b = layer.fc(U, hh) + s = pd.add(a, b) + act = pd.sigmoid(s) + rnn.update_memory(h, act) + rnn.output(a, b) o1, o2 = rnn() -print o1, o2 ``` - has its equivalent C++ program as follows ```c++ int* x = {10, 20, 30}; -int m = 0; -int W = some_value(); -int U = some_other_value(); +int* m = {0}; +int* W = {0.314}; +int* U = {0.375}; int mem[sizeof(x) / sizeof(x[0]) + 1]; int o1[sizeof(x) / sizeof(x[0]) + 1]; @@ -131,20 +135,16 @@ int o2[sizeof(x) / sizeof(x[0]) + 1]; for (int i = 1; i <= sizeof(x)/sizeof(x[0]); ++i) { int x = x[i-1]; if (i == 1) mem[0] = m; - int fc_out = W * x; - int hidden_out = Y * mem[i-1]; - int sum = fc_out + hidden_out; + int a = W * x; + int b = Y * mem[i-1]; + int s = fc_out + hidden_out; int act = sigmoid(sum); mem[i] = act; o1[i] = act; o2[i] = hidden_out; } - -print_array(o1); -print_array(o2); ``` - ## Compilation and Execution Like TensorFlow programs, a PaddlePaddle program is written in Python. The first part describes a neural network as a protobuf message, and the rest part executes the message for training or inference. @@ -210,11 +210,11 @@ a = pd.Varaible(shape=[20, 20]) b = pd.fc(a, params=["fc.w", "fc.b"]) rnn = pd.create_rnn() -with rnn.stepnet() as net: - x = net.set_inputs(a) +with rnn.stepnet() + x = a.as_step_input() # reuse fc's parameter fc_without_b = pd.get_variable("fc.w") - net.set_outputs(fc_without_b) + rnn.output(fc_without_b) out = rnn() ``` diff --git a/doc/design/if_else_op.md b/doc/design/if_else_op.md index 954a19c0733358c235eae3cffe134c23dac94c95..26d140f06db4ecefa86be015eaa731ffddc6910c 100644 --- a/doc/design/if_else_op.md +++ b/doc/design/if_else_op.md @@ -1,41 +1,51 @@ -IfOp should have only one branch. An IfOp operator takes a `cond` variable whose value must be a vector of N boolean elements. Its return value has N instances. If cond[i] == True, input instance input[i] will go through true_block() and generate output[i]; otherwise it will produce output from false_bloack(). +# The `IfElse` Operator -```python -import paddle as pd +PaddlePaddle's `IfElse` operator differs from TensorFlow's: -x = var() -y = var() -cond = var() -default_value = var() -b = pd.create_ifelseop(inputs=[x], output_num=1) -with b.true_block(): - x = b.inputs(0) - z = operator.add(x, y) - b.set_output(0, operator.softmax(z)) - -with b.false_block(): - x = b.inputs(0) - z = layer.fc(x) - b.set_output(0, operator.softmax(z)) - -out = b(cond) -``` +- the TensorFlow version takes a scalar boolean value as the condition so that the whole mini-batch goes to either the true or the false branch, whereas +- the PaddlePaddle version takes a vector of boolean value as the condition, and instances corresponding to true values go to the true branch, those corresponding to false values go to the false branch. + +## Example + +The following PaddlePaddle program shows the usage of the IfElse operator: -If only true_block is set in an IfElseOp, a special case is that we can have a default value for false as: ```python import paddle as pd -x = var() -y = var() -cond = var() -default_value = var() -b = pd.create_ifelseop(inputs=[x], output_num=1, default_value) - -with b.true_block(): - x = b.inputs(0) - z = operator.add(x, y) - b.set_output(0, operator.softmax(z)) +x = minibatch([10, 20, 30]) # shape=[None, 1] +y = var(1) # shape=[1], value=1 +z = minibatch([10, 20, 30]) # shape=[None, 1] +cond = larger_than(x, 15) # [false, true, true] + +ie = pd.ifelse() +with ie.true_block(): + d = pd.layer.add(x, y) + ie.output(d, pd.layer.softmax(d)) +with ie.false_block(): + d = pd.layer.fc(z) + ie.output(d, d+1) +o1, o2 = ie(cond) +``` -out = b(cond) +A challenge to implement the `IfElse` operator is to infer those variables to be split, or, say, to identify the variable of the mini-batch or those derived from the mini-batch. + +An equivalent C++ program is as follows: + +```c++ +namespace pd = paddle; + +int x = 10; +int y = 1; +int z = 10; +bool cond = false; +int o1, o2; +if (cond) { + int d = x + y; + o1 = z; + o2 = pd::layer::softmax(z); +} else { + int d = pd::layer::fc(z); + o1 = d; + o2 = d+1; +} ``` -where default_value is a list of vars for `cond` == False. diff --git a/doc/design/program.md b/doc/design/program.md index fb8f86ac07af403c9fee015f2a3adbfaa3c6d631..bd2456787c4e336d357a65255a8274a7c9e465cc 100644 --- a/doc/design/program.md +++ b/doc/design/program.md @@ -1,8 +1,10 @@ -# Design Doc: ProgramDesc +# Design Doc: PaddlePaddle Programs -The basic structure of a PaddlePaddle program is some nested blocks, as a C++ or Java program. +## Compile and Execution + +A PaddlePaddle program consists of two parts -- the first generates a `ProgramDesc` protobuf message that describes the program, and the second runs this message using a C++ class `Executor`. -As described in [graph.md](./graph.md), the first five lines of the following PaddlePaddle program +A simple example PaddlePaddle program can be found in [graph.md](./graph.md): ```python x = layer.data("images") @@ -13,36 +15,112 @@ optimize(cost) train(cost, reader=mnist.train()) ``` -generates, or compiles, a PaddelPaddle program, which is represented by the following protobuf message: +The first five lines of the following PaddlePaddle program generates, or, compiles, the `ProgramDesc` message. The last line runs it. -```protobuf -message ProgramDesc { - repeated BlockDesc blocks = 1; +## Programs and Blocks + +The basic structure of a PaddlePaddle program is some nested blocks, as a C++ or Java program. + +- program: some nested blocks +- [block](./block.md): + - some local variable definitions, and + - a sequence of operators + +The concept of block comes from usual programs. For example, the following C++ program has three blocks: + +```c++ +int main() { // block 0 + int i = 0; + if (i < 10) { // block 1 + for (int j = 0; j < 10; j++) { // block 2 + } + } + return 0; } +``` + +The following PaddlePaddle program has three blocks: + +```python +import paddle as pd // block 0 + +x = minibatch([10, 20, 30]) # shape=[None, 1] +y = var(1) # shape=[1], value=1 +z = minibatch([10, 20, 30]) # shape=[None, 1] +cond = larger_than(x, 15) # [false, true, true] +ie = pd.ifelse() +with ie.true_block(): // block 1 + d = pd.layer.add_scalar(x, y) + ie.output(d, pd.layer.softmax(d)) +with ie.false_block(): // block 2 + d = pd.layer.fc(z) + ie.output(d, d+1) +o1, o2 = ie(cond) +``` + +## `BlockDesc` and `ProgramDesc` + +All protobuf messages are defined in `framework.proto`. + +`BlockDesc` is straight-forward -- it includes local variable definitions, `vars`, and a sequence of operators, `ops`. + +```protobuf message BlockDesc { required int32 parent = 1; repeated VarDesc vars = 2; repeated OpDesc ops = 3; } +``` + +The parent ID indicates the parent block so that operators in a block can refer to variables defined locally and also those defined in their ancestor blocks. + +All hierarchical blocks in a program are flattened and stored in an array. The block ID is the index of the block in this array. + +```protobuf +message ProgramDesc { + repeated BlockDesc blocks = 1; +} +``` + + +### Global Block +The global block is the first one in the above array. + +## Operators that Use Blocks + +In the above example, the operator `IfElseOp` has two blocks -- the true branch and the false branch. + +The definition of `OpDesc` shows that an operator could have some attributes: + +```protobuf message OpDesc { AttrDesc attrs = 1; ... } +``` + +and an attribute could be of type block, which is, in fact, a block ID as described above: +``` message AttrDesc { - required AttrType type = 1; + required string name = 1; - // index into ProgramDesc::blocks when type==BLOCK - optional int32 block = 2; + enum AttrType { + INT = 1, + STRING = 2, + ... + BLOCK = ... + } + required AttrType type = 2; + + optional int32 block = 10; // when type == BLOCK ... } ``` -When each of the first five lines runs, related Python function, e.g., `layer.fc`, calls C++ InferShape functions. This InferShape function needs to access the properties of VarDesc's accessed by the current OpDesc. These VarDesc's might not be defined in the current block, but in some ancestor blocks. This requires that we can trace the parent of a block. - -A nested block is often an attribute of an operator, most likely, an IfElseOp or a WhileOp. In above solution, all blocks are in `ProgramDesc::blocks`, this implicitly assigns a zero-based ID to each block -- the index of the block in `ProgramDesc::blocks`. So that `AttrDesc::block` could be an integer block ID. +## InferShape With this design, the InferShape function should take the following parameters: diff --git a/doc/design/tensor_array.md b/doc/design/tensor_array.md index a0419ec002159893b035fae1300fce489e68936a..8378e97bf7cfaae54c36b1b92e202b16e4fe1e28 100644 --- a/doc/design/tensor_array.md +++ b/doc/design/tensor_array.md @@ -1,39 +1,250 @@ # Design for TensorArray +This design doc presents the necessity of a new C++ class `TensorArray`. +In addition to the very simple C++ implementation + +```c++ +class TensorArray { + public: + explicit TensorArray(const LoDTensor&); + explicit TensorArray(size_t size); + + private: + vector values_; +}; +``` + +We also need to expose it to PaddlePaddle's Python API, +because users would want to use it with our very flexible operators `WhileLoop`. +An example for a RNN based on dynamic operators is + +```python +input = pd.data(...) +num_steps = Var(12) + +TensorArray states(size=num_steps) +TensorArray step_inputs(unstack_from=input) +TensorArray step_outputs(size=num_steps) + +W = Tensor(...) +U = Tensor(...) +default_state = some_op() + +step = Var(1) + +wloop = paddle.create_whileloop(loop_vars=[step]) +with wloop.frame(): + wloop.break_if(pd.equal(step, num_steps) + pre_state = states.read(step-1, default_state) + step_input = step_inputs.read(step) + state = pd.sigmoid(pd.matmul(U, pre_state) + pd.matmul(W, step_input)) + states.write(step, state) + step_outputs.write(step, state) # output state + step.update(state+1) + +output = step_outputs.stack() +``` + +## Background +Steps are one of the core concepts of RNN. In each time step of RNN, there should be several input segments, states, and output segments; all these components act like arrays, for example, call `states[step_id]` will get the state in `step_id`th time step. + +An RNN can be implemented with the following pseudocode + +```c++ +Array states; +Array input_segments; +Array output_segments; +Parameter W, U; + +step = 1 +seq_len = 12 +while_loop { + if (step == seq_len) break; + states[step] = sigmoid(W * states[step-1] + U * input_segments[step]); + output_segments[step] = states[step] // take state as output + step++; +} +``` +According to the [RNN roadmap](https://github.com/PaddlePaddle/Paddle/issues/4561), there are several different RNNs that PaddlePaddle will eventually support. + +Currently, the basic RNN implementation supported by PaddlePaddle is the `recurrent_op` which takes tensors as input and splits them into `input_segments`. + + +Since a tensor cannot store variable-length sequences directly, PaddlePaddle implements the tensor with level of details (`LoDTensor` for short). +Segmenting the `LoDTensor` is much more complicated than splitting a tensor, that makes it necessary to refactor the `recurrent_op` with `LoDTensor` segmenting support. + +As the next step in RNN support, `dynamic_recurrent_op` should be introduced to handle inputs with variable-length sequences. + +The implementation is similar to `recurrent_op`. +The key difference is the way **the original input `LoDTensors` and outupts are split to get the `input_segments` and the `output_segments`.** + + +Though it can't be built over `recurrent_op` or `dynamic_recurrent_op` directly, +the logic behind splitting a tensor or a LoD tensor into `input_segments` remains the same. + +## Why `TensorArray` +The logic behind splitting the inputs to segments, states and outputs is similar and can be shared in a seperate module. + +The array of `states`, `input_segments` and `output_segments` would be exposed to users when writing a dynamic RNN model similar to the above pseudo codes. + +So there should be an array-like container, which can store the segments of a tensor or LoD tensor. + +**This container can store an array of tensors and provides several methods to split a tensor or a LoD tensor** . +This is where the notion of `TensorArray` comes from. + +## Introduce TensorArray to uniform all the three RNNs TensorArray as a new concept is borrowed from TensorFlow, it is meant to be used with dynamic iteration primitives such as `while_loop` and `map_fn`. This concept can be used to support our new design of dynamic operations, and help to refactor some existing variant-sentence-related layers, -such as `RecurrentGradientMachine`. +such as `recurrent_op`, `RecurrentGradientMachine`. In [our design for dynamic RNN](https://github.com/PaddlePaddle/Paddle/pull/4401), `TensorArray` is used to segment inputs and store states in all time steps. By providing some methods similar to a C++ array, -the definition of some state-based dynamic models such as RNN could be more natural and highly flexible. - -## Dynamic-Related Methods -Some basic methods should be proposed as follows: - -### stack() -Pack the values in a `TensorArray` into a tensor with rank one higher than each tensor in `values`. -### unstack(axis=0) -Unpacks the given dimension of a rank-`R` tensor into rank-`(R-1)` tensors. -### concat() -Return the values in the `TensorArray` as a concatenated Tensor. -### write(index, value, data_shared=true) -Write value into index of the TensorArray. -### read(index) -Read the value at location `index` in the `TensorArray`. -### size() -Return the number of values. +the definition of some state-based dynamic models such as RNN can be more natural and highly flexible. + +## Dynamic-operations on TensorArray + +`TensorArray` will be used directly when defining dynamic models, so some operators listed below should be implemented + +```python +# several helper operators for TensorArray +def tensor_array_stack(ta, tensor): + ''' + get a tensor array `ta`, return a packed `tensor`. + ''' + pass + +def tensor_array_unstack(tensor, ta): + ''' + get a `tensor`, unstack it and get a tensor array `ta`. + ''' + pass + +def tensor_array_write(ta, index, tensor, data_shared): + ''' + get a `tensor` and a scalar tensor `index`, write `tensor` into index-th + value of the tensor array `ta`. + `data_shared` is an attribute that specifies whether to copy or reference the tensors. + ''' + pass + +def tensor_array_read(ta, index, tensor): + ''' + get a tensor array `ta`, a scalar tensor `index`, read the index-th value of + `ta` and return as the `tensor`. + ''' + pass + +def tensor_array_size(ta, tensor): + ''' + get a tensor array `ta`, return the size of `ta` and return as the scalar `tensor`. + ''' + pass +``` + +It is trivial for users to use so many low-level operators, so some helper methods should be proposed in python wrapper to make `TensorArray` easier to use, +for example + +```python +class TensorArray: + def __init__(self, name): + self.name = name + self.desc = TensorArrayDesc() + + def stack(self, name=None): + ''' + Pack the values in a `TensorArray` into a tensor with rank one higher + than each tensor in `values`. + `stack` can be used to split tensor into time steps for RNN or whileloop. + + @name: str + the name of the variable to output. + ''' + tensor = NewVar(name) + tensor_array_stack(self.name, tensor) + return tensor + + def unstack(self, input): + ''' + Unpacks the given dimension of a rank-`R` tensor into rank-`(R-1)` tensors. + `unstack` can be used to concatenate all the time steps for RNN or whileloop. + + @input: str + the name of input tensor + ''' + tensor_array_unstack(tensor, self.name) + + def write(self, index, value, data_shared=True): + ''' + Write value into index of the TensorArray. + If `data_shared` is set to True, than the index-th value in TensorArray will + be shared with the tensor passed in. + + @index: str + name of a scalar tensor + @value: str + name of a tensor + @data_shared: bool + ''' + tensor_array_write(self.name, index, value, data_shared) + + def read(self, index, output): + ''' + Read the value at location `index` in the `TensorArray`. + + @index: str + name of a scalar tensor + @output: + name of a output variable + ''' + tensor_array_read(self.name, index, output) + + + def size(self, output): + ''' + Return the number of values. + + @output: str + name of a scalar tensor + ''' + tensor_array_size(self.name, output) +``` ## LoDTensor-related Supports -The `RecurrentGradientMachine` in Paddle serves as a flexible RNN layer; it takes variant length sequences as input, -because each step of RNN could only take a tensor-represented batch of data as input, +The `RecurrentGradientMachine` in Paddle serves as a flexible RNN layer; it takes varience-length sequences as input, and output sequences too. + +Since each step of RNN can only take a tensor-represented batch of data as input, some preprocess should be taken on the inputs such as sorting the sentences by their length in descending order and cut each word and pack to new batches. -Such cut-like operations can be embedded into `TensorArray` as general methods called `unpack` and `pack`. +Such cut-like operations can be embedded into `TensorArray` as general methods called `unpack` and `pack`, +these two operations are similar to `stack` and `unstack` except that they operate on variable-length sequences formated as a LoD tensor rather than a tensor. + +Some definitions are like + +```python +def unpack(level): + ''' + Split LodTensor in some `level` and generate batches, if set `sort_by_length`, + will sort by length. -With these two methods, a variant-sentence-RNN can be implemented like + Returns: + - a new `TensorArray`, whose values are LodTensors and represents batches + of data. + - an int32 Tensor, which stores the map from the new batch's indices to + original LoDTensor + ''' + pass + +def pack(level, indices_map): + ''' + Recover the original LoD-arranged LoDTensor with the values in a `TensorArray` + and `level` and `indices_map`. + ''' + pass +``` + +With these two methods, a varience-length sentence supported RNN can be implemented like ```c++ // input is the varient-length data @@ -58,16 +269,3 @@ LoDTensor rnn_output = ta.pack(ta, indice_map); ``` the code above shows that by embedding the LoDTensor-related preprocess operations into `TensorArray`, the implementation of a RNN that supports varient-length sentences is far more concise than `RecurrentGradientMachine` because the latter mixes all the codes together, hard to read and extend. - - -some details are as follows. - -### unpack(level, sort_by_length) -Split LodTensor in some `level` and generate batches, if set `sort_by_length`, will sort by length. - -Returns: - -- a new `TensorArray`, whose values are LodTensors and represents batches of data. -- an int32 Tensor, which stores the map from the new batch's indices to original LoDTensor -### pack(level, indices_map) -Recover the original LoD-arranged LoDTensor with the values in a `TensorArray` and `level` and `indices_map`. diff --git a/paddle/CMakeLists.txt b/paddle/CMakeLists.txt index b435de80a224571d16efdee168541aa301c3f73a..7d2becbdd772747d77890321fce6721d8d17fb30 100644 --- a/paddle/CMakeLists.txt +++ b/paddle/CMakeLists.txt @@ -1,27 +1,32 @@ add_subdirectory(cuda) add_subdirectory(function) add_subdirectory(utils) -add_subdirectory(testing) add_subdirectory(math) -add_subdirectory(parameter) add_subdirectory(gserver) -add_subdirectory(pserver) -add_subdirectory(trainer) -add_subdirectory(scripts) -add_subdirectory(string) - -if(Boost_FOUND) - add_subdirectory(memory) - add_subdirectory(platform) - add_subdirectory(framework) - add_subdirectory(operators) - add_subdirectory(pybind) -endif() +add_subdirectory(parameter) +add_subdirectory(testing) -if(WITH_C_API) +if(MOBILE_INFERENCE) add_subdirectory(capi) -endif() +else() + add_subdirectory(pserver) + add_subdirectory(trainer) + add_subdirectory(string) + add_subdirectory(scripts) + + if(WITH_C_API) + add_subdirectory(capi) + endif() + + if(Boost_FOUND) + add_subdirectory(memory) + add_subdirectory(platform) + add_subdirectory(framework) + add_subdirectory(operators) + add_subdirectory(pybind) + endif() -if(WITH_SWIG_PY) - add_subdirectory(api) + if(WITH_SWIG_PY) + add_subdirectory(api) + endif() endif() diff --git a/paddle/capi/CMakeLists.txt b/paddle/capi/CMakeLists.txt index b9bbe58951c643f1b1649858880fbd2ba3a2a7b7..2c458a78c598bf206b30c0c07599ce605af77701 100644 --- a/paddle/capi/CMakeLists.txt +++ b/paddle/capi/CMakeLists.txt @@ -37,9 +37,7 @@ set(PADDLE_CAPI_INFER_LIBS paddle_cuda paddle_function paddle_gserver - paddle_proto - paddle_pserver - paddle_network) + paddle_proto) cc_library(paddle_capi_whole DEPS paddle_capi ${PADDLE_CAPI_INFER_LIBS}) diff --git a/paddle/capi/tests/CMakeLists.txt b/paddle/capi/tests/CMakeLists.txt index 8208808b94f54f2ddaf4d426a65b8db562b36aca..bb38ace62808db5ce95a1a57ff465e8edc059213 100644 --- a/paddle/capi/tests/CMakeLists.txt +++ b/paddle/capi/tests/CMakeLists.txt @@ -4,11 +4,12 @@ add_unittest(capi_test_mats test_Vector.cpp target_include_directories(capi_test_mats PUBLIC ${PADDLE_CAPI_INC_PATH}) target_link_libraries(capi_test_mats paddle_capi) - -add_unittest_without_exec(capi_test_gradientMachine test_GradientMachine.cpp) -target_include_directories(capi_test_gradientMachine PUBLIC - ${PADDLE_CAPI_INC_PATH}) -target_link_libraries(capi_test_gradientMachine paddle_capi) -add_test(NAME capi_test_gradientMachine - COMMAND ${PADDLE_SOURCE_DIR}/paddle/.set_python_path.sh -d ${PADDLE_SOURCE_DIR}/python ${CMAKE_CURRENT_BINARY_DIR}/capi_test_gradientMachine - WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle/capi/tests) +if(NOT MOBILE_INFERENCE) + add_unittest_without_exec(capi_test_gradientMachine test_GradientMachine.cpp) + target_include_directories(capi_test_gradientMachine PUBLIC + ${PADDLE_CAPI_INC_PATH}) + target_link_libraries(capi_test_gradientMachine paddle_capi) + add_test(NAME capi_test_gradientMachine + COMMAND ${PADDLE_SOURCE_DIR}/paddle/.set_python_path.sh -d ${PADDLE_SOURCE_DIR}/python ${CMAKE_CURRENT_BINARY_DIR}/capi_test_gradientMachine + WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle/capi/tests) +endif() diff --git a/paddle/framework/CMakeLists.txt b/paddle/framework/CMakeLists.txt index a2efcdb55cfc75a4f961533d16d454ca6d431990..3e0e0f59038daa33cae1952ffbe5fc0bb1870485 100644 --- a/paddle/framework/CMakeLists.txt +++ b/paddle/framework/CMakeLists.txt @@ -26,10 +26,8 @@ cc_library(op_info SRCS op_info.cc DEPS attribute framework_proto proto_desc) cc_library(operator SRCS operator.cc DEPS op_info device_context tensor scope proto_desc) cc_test(operator_test SRCS operator_test.cc DEPS operator op_registry) -cc_library(grad_op_builder SRCS grad_op_builder.cc DEPS operator proto_desc) -cc_library(op_registry SRCS op_registry.cc DEPS grad_op_builder op_proto_maker op_info) +cc_library(op_registry SRCS op_registry.cc DEPS op_proto_maker op_info operator) cc_test(op_registry_test SRCS op_registry_test.cc DEPS op_registry) -cc_test(grad_op_builder_test SRCS grad_op_builder_test.cc DEPS grad_op_builder op_registry sum_op) py_proto_compile(framework_py_proto SRCS framework.proto) # Generate an empty __init__.py to make framework_py_proto as a valid python module. diff --git a/paddle/framework/backward.cc b/paddle/framework/backward.cc index c0188c0e55a6db04a86d80c216fd162ad5259593..c970e01dd19d80e9a47f315a05a920ba15585c90 100644 --- a/paddle/framework/backward.cc +++ b/paddle/framework/backward.cc @@ -13,10 +13,13 @@ limitations under the License. */ #include "paddle/framework/backward.h" +#include "paddle/operators/net_op.h" +#include #include #include +#include "paddle/framework/block_desc.h" #include "paddle/framework/op_registry.h" #include "paddle/operators/net_op.h" #include "paddle/operators/recurrent_op.h" @@ -24,6 +27,35 @@ namespace paddle { namespace framework { +static inline std::unique_ptr CreateGradOp( + const OperatorBase& op) { + OpDescBind op_desc; + op_desc.SetInputMap(op.Inputs()); + op_desc.SetOutputMap(op.Outputs()); + op_desc.SetType(op.Type()); + op_desc.SetAttrMap(op.Attrs()); + auto& info = OpInfoMap::Instance().Get(op.Type()); + auto grad_descs = info.GradOpMaker()(op_desc); + std::vector> grad_ops; + grad_ops.reserve(grad_descs.size()); + std::transform(grad_descs.begin(), grad_descs.end(), + std::back_inserter(grad_ops), + [](const std::unique_ptr& grad_desc) { + return OpRegistry::CreateOp(*grad_desc); + }); + PADDLE_ENFORCE(!grad_ops.empty()); + if (grad_ops.size() == 1) { + return std::move(grad_ops[0]); + } else { + auto net_op = new operators::NetOp(); + for (auto& grad_op : grad_ops) { + net_op->AppendOp(std::move(grad_op)); + } + net_op->CompleteAddOp(); + return std::unique_ptr(net_op); + } +} + template static void ForEachVarName(const Map& names, T callback) { for (auto& name : names) { @@ -171,7 +203,7 @@ static std::unique_ptr BackwardRecursive( net->InsertOp(pos.first + 1, std::move(pos.second)); } } else { - std::unique_ptr grad_op(OpRegistry::CreateGradOp(forwardOp)); + std::unique_ptr grad_op(CreateGradOp(forwardOp)); ForEachVarName(grad_op->Inputs(), [&no_grad_names, &net, &grad_op]( const std::string& grad_input) { @@ -240,5 +272,145 @@ std::unique_ptr Backward( return BackwardRecursive(forwardOp, no_grad_names, uid); } +// ==================================== // + +static bool AllGradInSet(const std::vector& names, + const std::unordered_set& set) { + for (const std::string& name : names) { + if (!set.count(GradVarName(name))) { + return false; + } + } + return true; +} + +std::vector> MakeOpGrad( + const std::unique_ptr& op_desc, + std::unordered_set& no_grad_vars) { + std::vector> grad_op_descs; + // All input gradients of forwarding operator do not need to calculat. + const std::vector& inputs = op_desc->InputArgumentNames(); + if (AllGradInSet(inputs, no_grad_vars)) { + return grad_op_descs; // empty vector + } + // All output gradients of forwarding operator do not need to calculate. + const std::vector& outputs = op_desc->OutputArgumentNames(); + if (AllGradInSet(outputs, no_grad_vars)) { + for (const std::string& name : inputs) { + no_grad_vars.insert(GradVarName(name)); + } + return grad_op_descs; // empty vector + } + + grad_op_descs = OpRegistry::CreateGradOpDescs(*op_desc); + + std::list> pending_fill_zeros_ops; + for (auto& desc : grad_op_descs) { + for (const std::string& in_name : desc->InputArgumentNames()) { + if (no_grad_vars.count(in_name)) { + std::string prefix = in_name.substr( + 0, in_name.size() - sizeof(kGradVarSuffix) / sizeof(char) + 1); + std::string new_name = prefix + kZeroVarSuffix; + desc->Rename(in_name, new_name); + std::unique_ptr fill_zeros_op(new OpDescBind( + "fill_zeros_like", {{"X", {prefix}}}, {{"Y", {new_name}}}, {})); + pending_fill_zeros_ops.push_back(std::move(fill_zeros_op)); + } + } + for (const std::string& out_name : desc->OutputArgumentNames()) { + if (no_grad_vars.count(out_name)) { + desc->Rename(out_name, kEmptyVarName); + } + } + } + + for (auto& p : pending_fill_zeros_ops) { + grad_op_descs.insert(grad_op_descs.begin(), std::move(p)); + } + return grad_op_descs; +} + +std::vector> MakeBlockBackward( + ProgramDescBind& program_desc, int block_idx, + std::unordered_set& no_grad_vars) { + BlockDescBind* cur_block = program_desc.Block(block_idx); + std::deque>& op_descs = cur_block->ops_; + std::unordered_map> dup_out_ops; + size_t grad_desc_idx = 0; + std::vector> backward_descs; + for (auto it = op_descs.rbegin(); it != op_descs.rend(); ++it) { + std::vector> op_grads = + MakeOpGrad(*it, no_grad_vars); + + if ((*it)->Type() == "recurrent") { + PADDLE_ENFORCE_EQ( + op_grads.size(), size_t(1), + "rnn_op's gradient process should contain only one op."); + int step_block_idx = (*it)->GetBlockAttr("stop_block"); + auto backward_block_op_descs = + MakeBlockBackward(program_desc, step_block_idx, no_grad_vars); + BlockDescBind* backward_block = program_desc.AppendBlock(*cur_block); + for (auto& ptr : backward_block_op_descs) { + backward_block->ops_.push_back(std::move(ptr)); + } + op_grads[0]->SetBlockAttr("step_block", *backward_block); + } + + for (const auto& desc : op_grads) { + for (const std::string& out_name : desc->OutputArgumentNames()) { + dup_out_ops[out_name].emplace_back(grad_desc_idx); + } + ++grad_desc_idx; + } + std::transform( + op_grads.begin(), op_grads.end(), std::back_inserter(backward_descs), + [](std::unique_ptr& ptr) { return std::move(ptr); }); + } + // Check whether some variables are written more than once + std::list>> pending_sum_ops; + for (const auto& dup : dup_out_ops) { + const std::string& out_name = dup.first; + const std::vector dup_op = dup.second; + if (out_name != kEmptyVarName && dup_op.size() > 1) { + std::vector sum_op_inputs; + for (size_t i = 0; i < dup_op.size(); ++i) { + std::string new_name = out_name + "@RENAME@" + std::to_string(i); + backward_descs[dup_op[i]]->Rename(out_name, new_name); + sum_op_inputs.emplace_back(new_name); + } + std::unique_ptr sum_op(new OpDescBind( + "sum", {{"X", sum_op_inputs}}, {{"Out", {out_name}}}, {})); + pending_sum_ops.push_back({dup_op.back(), std::move(sum_op)}); + } + } + pending_sum_ops.sort( + [](const std::pair>& a, + const std::pair>& b) { + return a.first > b.first; + }); + for (auto& p : pending_sum_ops) { + backward_descs.insert(backward_descs.begin() + p.first + 1, + std::move(p.second)); + } + return backward_descs; +} + +void AppendBackward(ProgramDescBind& program_desc, + const std::unordered_set& no_grad_vars) { + std::unordered_set no_grad_var_names; + no_grad_var_names.reserve(no_grad_vars.size() + 1); + no_grad_var_names.insert(std::string(kEmptyVarName) + kGradVarSuffix); + for (auto& name : no_grad_vars) { + no_grad_var_names.insert(GradVarName(name)); + } + const int root_block_idx = 0; + auto backward_op_descs = + MakeBlockBackward(program_desc, root_block_idx, no_grad_var_names); + auto& forw_op_descs = program_desc.Block(root_block_idx)->ops_; + for (auto& ptr : backward_op_descs) { + forw_op_descs.push_back(std::move(ptr)); + } +} + } // namespace framework } // namespace paddle diff --git a/paddle/framework/backward.h b/paddle/framework/backward.h index 1ecf69881b3126c2904920b9f4b77bfcccc9cf86..7ffe4c28103f9d6a9f179422d1beb86106ef786e 100644 --- a/paddle/framework/backward.h +++ b/paddle/framework/backward.h @@ -13,8 +13,11 @@ limitations under the License. */ #pragma once + #include -#include "operator.h" +#include "paddle/framework/operator.h" +#include "paddle/framework/program_desc.h" + namespace paddle { namespace framework { @@ -23,5 +26,9 @@ namespace framework { extern std::unique_ptr Backward( const OperatorBase& forwardOp, const std::unordered_set& no_grad_vars); + +void AppendBackward(ProgramDescBind& program_desc, + const std::unordered_set& no_grad_vars); + } // namespace framework } // namespace paddle diff --git a/paddle/framework/backward_test.cc b/paddle/framework/backward_test.cc index a36e7bde8c61c6a8bfc6eefd46ce5e09907476ef..30225a4a99d993c4f12a5e0d276bda18acbc360e 100644 --- a/paddle/framework/backward_test.cc +++ b/paddle/framework/backward_test.cc @@ -15,30 +15,42 @@ #include "paddle/framework/backward.h" #include +#include "paddle/framework/block_desc.h" +#include "paddle/framework/op_desc.h" #include "paddle/framework/op_registry.h" #include "paddle/operators/net_op.h" namespace paddle { namespace framework { -using OperatorBase = framework::OperatorBase; -using OpProtoAndCheckerMaker = framework::OpProtoAndCheckerMaker; -using OpProto = framework::OpProto; -using OpAttrChecker = framework::OpAttrChecker; -using Scope = framework::Scope; using DeviceContext = platform::DeviceContext; class RowWiseAddOpMaker : public OpProtoAndCheckerMaker { public: RowWiseAddOpMaker(OpProto *proto, OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("X", "Input X of Add").NotInGradient(); - AddInput("b", "Bias of Add").NotInGradient(); - AddOutput("Out", "Out of Add").NotInGradient(); + AddInput("X", "Input X of Add"); + AddInput("b", "Bias of Add"); + AddOutput("Out", "Out of Add"); AddComment("Add Op"); } }; +class RowWiseAddGradMaker : public SingleGradOpDescMaker { + public: + using SingleGradOpDescMaker::SingleGradOpDescMaker; + + protected: + std::unique_ptr Apply() const override { + auto grad_op = new OpDescBind(); + grad_op->SetInput(GradVarName("Out"), OutputGrad("Out")); + grad_op->SetOutput(GradVarName("X"), InputGrad("X")); + grad_op->SetOutput(GradVarName("b"), InputGrad("b")); + grad_op->SetType("rowwise_add_grad"); + return std::unique_ptr(grad_op); + } +}; + class MulOpMaker : public OpProtoAndCheckerMaker { public: MulOpMaker(OpProto *proto, OpAttrChecker *op_checker) @@ -137,10 +149,20 @@ class SumOpMaker : public framework::OpProtoAndCheckerMaker { public: SumOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("X", "the input tensors of sum operator.") - .AsDuplicable() - .NotInGradient(); - AddOutput("Out", "the output tensor of sum operator.").NotInGradient(); + AddInput("X", "the input tensors of sum operator.").AsDuplicable(); + AddOutput("Out", "the output tensor of sum operator."); + AddComment(""); + } +}; + +class MultInOutOpMaker : public OpProtoAndCheckerMaker { + public: + MultInOutOpMaker(OpProto *proto, OpAttrChecker *op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("X", "x"); + AddInput("H", "h"); + AddOutput("Y", "y"); + AddOutput("Z", "z"); AddComment(""); } }; @@ -151,8 +173,9 @@ class SumOpMaker : public framework::OpProtoAndCheckerMaker { namespace f = paddle::framework; namespace ops = paddle::operators; using EnforceNotMet = paddle::platform::EnforceNotMet; -REGISTER_OP(rowwise_add, f::NOP, f::RowWiseAddOpMaker, rowwise_add_grad, - f::NOP); +REGISTER_OPERATOR(rowwise_add, f::NOP, f::RowWiseAddOpMaker, + f::RowWiseAddGradMaker); +REGISTER_OPERATOR(rowwise_add_grad, f::NOP); REGISTER_OP(mul, f::NOP, f::MulOpMaker, mul_grad, f::NOP); REGISTER_OP(sigmoid, f::NOP, f::SigmoidOpMaker, sigmoid_grad, f::NOP); REGISTER_OP_WITHOUT_GRADIENT(nograd, f::NOP, f::NoGradOpMaker); @@ -161,17 +184,7 @@ REGISTER_OP(sum, f::NOP, f::SumOpMaker, sum_grad, f::NOP); REGISTER_OP_WITHOUT_GRADIENT(fc, f::FcOp, f::FcOpMaker); REGISTER_OP(many_output_op, f::NOP, f::ManyOutputOpMaker, many_output_op_grad, f::NOP); - -TEST(Backward, simple_op_grad) { - auto fwd = f::OpRegistry::CreateOp( - "rowwise_add", {{"X", {"x"}}, {"b", {"b"}}}, {{"Out", {"out"}}}, {}); - ASSERT_NE(fwd, nullptr); - auto gop = f::OpRegistry::CreateGradOp(*fwd); - ASSERT_EQ(1UL, gop->Inputs().size()); - ASSERT_EQ("rowwise_add_grad", gop->Type()); - ASSERT_EQ(f::GradVarName("x"), gop->Output(f::GradVarName("X"))); - ASSERT_EQ(f::GradVarName("b"), gop->Output(f::GradVarName("b"))); -} +REGISTER_OP(mult_in_out, f::NOP, f::MultInOutOpMaker, mult_in_out_grad, f::NOP); TEST(Backward, simple_op_not_need_grad) { auto fwd = f::OpRegistry::CreateOp( @@ -289,17 +302,6 @@ TEST(Backward, net_shared_weight) { ASSERT_EQ("sum", bwd_net->ops_[2]->Type()); } -TEST(Backward, op_register_grad_not_for_network) { - auto fwd = - f::OpRegistry::CreateOp("fc", {{"X", {"x"}}, {"W", {"w"}}, {"b", {"b"}}}, - {{"mul_result", {"mul_out"}}, - {"add_result", {"add_out"}}, - {"Out", {"out1"}}}, - {{"temporary_index", std::vector{0, 1}}}); - - ASSERT_THROW(f::OpRegistry::CreateGradOp(*fwd), EnforceNotMet); -} - TEST(Backward, op_all_input_are_not_need) { auto fwd = f::OpRegistry::CreateOp( "rowwise_add", {{"X", {"x"}}, {"b", {"b"}}}, {{"Out", {"out"}}}, {}); @@ -402,3 +404,293 @@ TEST(Backward, linear_net_intermediate_variable_has_no_grad) { EXPECT_EQ(bwd_net->ops_[2]->Inputs(all).size(), 0UL); EXPECT_EQ(bwd_net->ops_[2]->Outputs(all).size(), 0UL); } + +// =================================== // + +f::ProgramDesc *GetNewProgramDesc() { + auto *program_desc = new f::ProgramDesc(); + auto *root_block = program_desc->add_blocks(); + root_block->set_idx(0); + root_block->set_parent_idx(-1); + return program_desc; +} + +TEST(Backward, simple_single_op) { + f::ProgramDesc *program_desc = GetNewProgramDesc(); + f::ProgramDescBind &program = f::ProgramDescBind::Instance(program_desc); + f::BlockDescBind *block = program.Block(0); + f::OpDescBind *op = block->AppendOp(); + op->SetType("rowwise_add"); + op->SetInput("X", {"x"}); + op->SetInput("b", {"b"}); + op->SetOutput("Out", {"out"}); + + AppendBackward(program, {}); + + ASSERT_EQ(block->AllOps().size(), 2UL); + f::OpDescBind *grad_op = block->AllOps()[1]; + EXPECT_EQ(grad_op->Type(), "rowwise_add_grad"); + ASSERT_EQ(grad_op->InputNames().size(), 1UL); + ASSERT_EQ(grad_op->OutputNames().size(), 2UL); + EXPECT_EQ(grad_op->Input(f::GradVarName("Out")), + std::vector({f::GradVarName("out")})); + EXPECT_EQ(grad_op->Output(f::GradVarName("X")), + std::vector({f::GradVarName("x")})); + EXPECT_EQ(grad_op->Output(f::GradVarName("b")), + std::vector({f::GradVarName("b")})); +} + +TEST(Backward, simple_mult_op) { + f::ProgramDesc *program_desc = GetNewProgramDesc(); + f::ProgramDescBind &program = f::ProgramDescBind::Instance(program_desc); + f::BlockDescBind *block = program.Block(0); + f::OpDescBind *op1 = block->AppendOp(); + op1->SetType("rowwise_add"); + op1->SetInput("X", {"x1"}); + op1->SetInput("b", {"b1"}); + op1->SetOutput("Out", {"out1"}); + + f::OpDescBind *op2 = block->AppendOp(); + op2->SetType("mul"); + op2->SetInput("X", {"out1"}); + op2->SetInput("Y", {"y2"}); + op2->SetOutput("Out", {"out2"}); + + f::OpDescBind *op3 = block->AppendOp(); + op3->SetType("rowwise_add"); + op3->SetInput("X", {"out2"}); + op3->SetInput("b", {"b3"}); + op3->SetOutput("Out", {"out3"}); + + AppendBackward(program, {}); + + ASSERT_EQ(block->AllOps().size(), 6UL); + f::OpDescBind *grad_op1 = block->AllOps()[5]; + EXPECT_EQ(grad_op1->Type(), "rowwise_add_grad"); + ASSERT_EQ(grad_op1->InputNames().size(), 1UL); + ASSERT_EQ(grad_op1->OutputNames().size(), 2UL); + EXPECT_EQ(grad_op1->Input(f::GradVarName("Out")), + std::vector({f::GradVarName("out1")})); + EXPECT_EQ(grad_op1->Output(f::GradVarName("X")), + std::vector({f::GradVarName("x1")})); + EXPECT_EQ(grad_op1->Output(f::GradVarName("b")), + std::vector({f::GradVarName("b1")})); + + f::OpDescBind *grad_op2 = block->AllOps()[4]; + EXPECT_EQ(grad_op2->Type(), "mul_grad"); + ASSERT_EQ(grad_op2->InputNames().size(), 4UL); + ASSERT_EQ(grad_op2->OutputNames().size(), 2UL); + EXPECT_EQ(grad_op2->Input("X"), std::vector({"out1"})); + EXPECT_EQ(grad_op2->Input("Y"), std::vector({"y2"})); + EXPECT_EQ(grad_op2->Input("Out"), std::vector({"out2"})); + EXPECT_EQ(grad_op2->Input(f::GradVarName("Out")), + std::vector({f::GradVarName("out2")})); + EXPECT_EQ(grad_op2->Output(f::GradVarName("X")), + std::vector({f::GradVarName("out1")})); + EXPECT_EQ(grad_op2->Output(f::GradVarName("Y")), + std::vector({f::GradVarName("y2")})); + + f::OpDescBind *grad_op3 = block->AllOps()[3]; + EXPECT_EQ(grad_op3->Type(), "rowwise_add_grad"); + ASSERT_EQ(grad_op3->InputNames().size(), 1UL); + ASSERT_EQ(grad_op3->OutputNames().size(), 2UL); + EXPECT_EQ(grad_op3->Input(f::GradVarName("Out")), + std::vector({f::GradVarName("out3")})); + EXPECT_EQ(grad_op3->Output(f::GradVarName("X")), + std::vector({f::GradVarName("out2")})); + EXPECT_EQ(grad_op3->Output(f::GradVarName("b")), + std::vector({f::GradVarName("b3")})); +} + +TEST(Backward, intermedia_var_no_grad) { + f::ProgramDesc *program_desc = GetNewProgramDesc(); + f::ProgramDescBind &program = f::ProgramDescBind::Instance(program_desc); + f::BlockDescBind *block = program.Block(0); + f::OpDescBind *op1 = block->AppendOp(); + op1->SetType("rowwise_add"); + op1->SetInput("X", {"x1"}); + op1->SetInput("b", {"b1"}); + op1->SetOutput("Out", {"out1"}); + + f::OpDescBind *op2 = block->AppendOp(); + op2->SetType("mul"); + op2->SetInput("X", {"x2"}); + op2->SetInput("Y", {"y2"}); + op2->SetOutput("Out", {"out2"}); + + f::OpDescBind *op3 = block->AppendOp(); + op3->SetType("rowwise_add"); + op3->SetInput("X", {"out2"}); + op3->SetInput("b", {"b3"}); + op3->SetOutput("Out", {"out3"}); + + f::OpDescBind *op4 = block->AppendOp(); + op4->SetType("mul"); + op4->SetInput("X", {"out1"}); + op4->SetInput("Y", {"out3"}); + op4->SetOutput("Out", {"out4"}); + + AppendBackward(program, {"out3"}); + + ASSERT_EQ(block->AllOps().size(), 6UL); + f::OpDescBind *grad_op1 = block->AllOps()[5]; + EXPECT_EQ(grad_op1->Type(), "rowwise_add_grad"); + ASSERT_EQ(grad_op1->InputNames().size(), 1UL); + ASSERT_EQ(grad_op1->OutputNames().size(), 2UL); + EXPECT_EQ(grad_op1->Input(f::GradVarName("Out")), + std::vector({f::GradVarName("out1")})); + EXPECT_EQ(grad_op1->Output(f::GradVarName("X")), + std::vector({f::GradVarName("x1")})); + EXPECT_EQ(grad_op1->Output(f::GradVarName("b")), + std::vector({f::GradVarName("b1")})); + + f::OpDescBind *grad_op4 = block->AllOps()[4]; + EXPECT_EQ(grad_op4->Type(), "mul_grad"); + ASSERT_EQ(grad_op4->InputNames().size(), 4UL); + ASSERT_EQ(grad_op4->OutputNames().size(), 2UL); + EXPECT_EQ(grad_op4->Input("X"), std::vector({"out1"})); + EXPECT_EQ(grad_op4->Input("Y"), std::vector({"out3"})); + EXPECT_EQ(grad_op4->Input("Out"), std::vector({"out4"})); + EXPECT_EQ(grad_op4->Input(f::GradVarName("Out")), + std::vector({f::GradVarName("out4")})); + EXPECT_EQ(grad_op4->Output(f::GradVarName("X")), + std::vector({f::GradVarName("out1")})); + EXPECT_EQ(grad_op4->Output(f::GradVarName("Y")), + std::vector({f::kEmptyVarName})); +} + +TEST(Backward, var_no_grad) { + f::ProgramDesc *program_desc = GetNewProgramDesc(); + f::ProgramDescBind &program = f::ProgramDescBind::Instance(program_desc); + f::BlockDescBind *block = program.Block(0); + f::OpDescBind *op1 = block->AppendOp(); + op1->SetType("mult_in_out"); + op1->SetInput("X", {"x1"}); + op1->SetInput("H", {"h1"}); + op1->SetOutput("Y", {"y1"}); + op1->SetOutput("Z", {"z1"}); + + f::OpDescBind *op2 = block->AppendOp(); + op2->SetType("mult_in_out"); + op2->SetInput("X", {"y1"}); + op2->SetInput("H", {"z1"}); + op2->SetOutput("Y", {"y2"}); + op2->SetOutput("Z", {"z2"}); + + AppendBackward(program, {"z1"}); + + ASSERT_EQ(block->AllOps().size(), 5UL); + f::OpDescBind *grad_op2 = block->AllOps()[2]; + ASSERT_EQ(grad_op2->Type(), "mult_in_out_grad"); + ASSERT_EQ(grad_op2->InputNames().size(), 6UL); + ASSERT_EQ(grad_op2->OutputNames().size(), 2UL); + EXPECT_EQ(grad_op2->Input("X"), std::vector({"y1"})); + EXPECT_EQ(grad_op2->Input("H"), std::vector({"z1"})); + EXPECT_EQ(grad_op2->Input("Y"), std::vector({"y2"})); + EXPECT_EQ(grad_op2->Input("Z"), std::vector({"z2"})); + EXPECT_EQ(grad_op2->Input(f::GradVarName("Y")), + std::vector({f::GradVarName("y2")})); + EXPECT_EQ(grad_op2->Input(f::GradVarName("Z")), + std::vector({f::GradVarName("z2")})); + EXPECT_EQ(grad_op2->Output(f::GradVarName("X")), + std::vector({f::GradVarName("y1")})); + EXPECT_EQ(grad_op2->Output(f::GradVarName("H")), + std::vector({f::kEmptyVarName})); + + f::OpDescBind *fill_zero_op = block->AllOps()[3]; + ASSERT_EQ(fill_zero_op->Type(), "fill_zeros_like"); + ASSERT_EQ(fill_zero_op->InputNames().size(), 1UL); + ASSERT_EQ(fill_zero_op->OutputNames().size(), 1UL); + EXPECT_EQ(fill_zero_op->Input("X"), std::vector({"z1"})); + EXPECT_EQ(fill_zero_op->Output("Y"), + std::vector({std::string("z1") + f::kZeroVarSuffix})); + + f::OpDescBind *grad_op1 = block->AllOps()[4]; + ASSERT_EQ(grad_op1->Type(), "mult_in_out_grad"); + ASSERT_EQ(grad_op1->InputNames().size(), 6UL); + ASSERT_EQ(grad_op1->OutputNames().size(), 2UL); + EXPECT_EQ(grad_op1->Input("X"), std::vector({"x1"})); + EXPECT_EQ(grad_op1->Input("H"), std::vector({"h1"})); + EXPECT_EQ(grad_op1->Input("Y"), std::vector({"y1"})); + EXPECT_EQ(grad_op1->Input("Z"), std::vector({"z1"})); + EXPECT_EQ(grad_op1->Input(f::GradVarName("Y")), + std::vector({f::GradVarName("y1")})); + EXPECT_EQ(grad_op1->Input(f::GradVarName("Z")), + std::vector({std::string("z1") + f::kZeroVarSuffix})); + EXPECT_EQ(grad_op1->Output(f::GradVarName("X")), + std::vector({f::GradVarName("x1")})); + EXPECT_EQ(grad_op1->Output(f::GradVarName("H")), + std::vector({f::GradVarName("h1")})); +} + +TEST(Backward, shared_var) { + f::ProgramDesc *program_desc = GetNewProgramDesc(); + f::ProgramDescBind &program = f::ProgramDescBind::Instance(program_desc); + f::BlockDescBind *block = program.Block(0); + f::OpDescBind *op1 = block->AppendOp(); + op1->SetType("rowwise_add"); + op1->SetInput("X", {"x1"}); + op1->SetInput("b", {"b1"}); + op1->SetOutput("Out", {"out1"}); + + f::OpDescBind *op2 = block->AppendOp(); + op2->SetType("mul"); + op2->SetInput("X", {"out1"}); + op2->SetInput("Y", {"y2"}); + op2->SetOutput("Out", {"out2"}); + + f::OpDescBind *op3 = block->AppendOp(); + op3->SetType("rowwise_add"); + op3->SetInput("X", {"out1"}); + op3->SetInput("b", {"b3"}); + op3->SetOutput("Out", {"out3"}); + + AppendBackward(program, {}); + + ASSERT_EQ(block->AllOps().size(), 7UL); + f::OpDescBind *grad_op3 = block->AllOps()[3]; + ASSERT_EQ(grad_op3->Type(), "rowwise_add_grad"); + ASSERT_EQ(grad_op3->InputNames().size(), 1UL); + ASSERT_EQ(grad_op3->OutputNames().size(), 2UL); + EXPECT_EQ(grad_op3->Input(f::GradVarName("Out")), + std::vector({f::GradVarName("out3")})); + EXPECT_EQ(grad_op3->Output(f::GradVarName("X")), + std::vector({f::GradVarName("out1") + "@RENAME@0"})); + EXPECT_EQ(grad_op3->Output(f::GradVarName("b")), + std::vector({f::GradVarName("b3")})); + + f::OpDescBind *grad_op4 = block->AllOps()[4]; + ASSERT_EQ(grad_op4->Type(), "mul_grad"); + ASSERT_EQ(grad_op4->InputNames().size(), 4UL); + ASSERT_EQ(grad_op4->OutputNames().size(), 2UL); + EXPECT_EQ(grad_op4->Input("X"), std::vector({"out1"})); + EXPECT_EQ(grad_op4->Input("Y"), std::vector({"y2"})); + EXPECT_EQ(grad_op4->Input("Out"), std::vector({"out2"})); + EXPECT_EQ(grad_op4->Input(f::GradVarName("Out")), + std::vector({f::GradVarName("out2")})); + EXPECT_EQ(grad_op4->Output(f::GradVarName("X")), + std::vector({f::GradVarName("out1") + "@RENAME@1"})); + EXPECT_EQ(grad_op4->Output(f::GradVarName("Y")), + std::vector({f::GradVarName("y2")})); + + f::OpDescBind *sum_op = block->AllOps()[5]; + ASSERT_EQ(sum_op->Type(), "sum"); + ASSERT_EQ(sum_op->InputNames().size(), 1UL); + ASSERT_EQ(sum_op->OutputNames().size(), 1UL); + EXPECT_EQ(sum_op->Input("X"), + std::vector({f::GradVarName("out1") + "@RENAME@0", + f::GradVarName("out1") + "@RENAME@1"})); + EXPECT_EQ(sum_op->Output("Out"), + std::vector({f::GradVarName("out1")})); + + f::OpDescBind *grad_op1 = block->AllOps()[6]; + ASSERT_EQ(grad_op1->Type(), "rowwise_add_grad"); + ASSERT_EQ(grad_op1->InputNames().size(), 1UL); + ASSERT_EQ(grad_op1->OutputNames().size(), 2UL); + EXPECT_EQ(grad_op1->Input(f::GradVarName("Out")), + std::vector({f::GradVarName("out1")})); + EXPECT_EQ(grad_op1->Output(f::GradVarName("X")), + std::vector({f::GradVarName("x1")})); + EXPECT_EQ(grad_op1->Output(f::GradVarName("b")), + std::vector({f::GradVarName("b1")})); +} \ No newline at end of file diff --git a/paddle/framework/block_desc.h b/paddle/framework/block_desc.h index b646ad5f3b67e22b933a56d88e4a1bf6e74a124e..2de270f60ec2ae981335f1adb204cfc3bf78c622 100644 --- a/paddle/framework/block_desc.h +++ b/paddle/framework/block_desc.h @@ -32,6 +32,14 @@ class ProgramDescBind; class BlockDescBind { public: + friend std::vector> MakeBlockBackward( + ProgramDescBind &program_desc, int block_idx, + std::unordered_set &no_grad_vars); + + friend void AppendBackward( + ProgramDescBind &program_desc, + const std::unordered_set &no_grad_vars); + BlockDescBind(ProgramDescBind *prog, BlockDesc *desc) : prog_(prog), desc_(desc), need_update_(false) {} diff --git a/paddle/framework/framework.proto b/paddle/framework/framework.proto index d696cdf5ad68118c98577fcaf1aed1cf013df296..ac2827e54773f811eb855c092e3c0ed2fab06dd3 100644 --- a/paddle/framework/framework.proto +++ b/paddle/framework/framework.proto @@ -66,7 +66,6 @@ message OpProto { optional bool duplicable = 3 [ default = false ]; optional bool intermediate = 4 [ default = false ]; - optional bool not_in_gradient = 5 [ default = false ]; } // AttrProto describes the C++ type Attribute. @@ -116,4 +115,7 @@ message BlockDesc { repeated OpDesc ops = 4; } +// Please refer to +// https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/program.md +// for more details. message ProgramDesc { repeated BlockDesc blocks = 1; } diff --git a/paddle/framework/grad_op_builder.cc b/paddle/framework/grad_op_builder.cc deleted file mode 100644 index 3661ce41beba1328d1b1cdd9f0f913e693af9cff..0000000000000000000000000000000000000000 --- a/paddle/framework/grad_op_builder.cc +++ /dev/null @@ -1,97 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOpArgType::OUT WARRANTIES OR CONDITIONS OF ANY KOpArgType::IND, either -express or implied. See the License for the specific language governing -permissions and limitations under the License. */ - -#include "paddle/framework/grad_op_builder.h" -#include "paddle/framework/op_registry.h" - -namespace paddle { -namespace framework { -enum class OpArgType { IN, OUT }; - -static void TransOpArg(const OperatorBase* src_op, const OpArgType& src_type, - bool is_grad, VariableNameMap* vars) { - const auto& src_inout = - src_type == OpArgType::IN ? src_op->Inputs() : src_op->Outputs(); - auto& dst_inout = *vars; - auto& proto = OpInfoMap::Instance().Get(src_op->Type()).Proto(); - const auto& src_arg_list = - src_type == OpArgType::IN ? proto.inputs() : proto.outputs(); - for (const auto& arg : src_arg_list) { - if (arg.not_in_gradient() && !is_grad) continue; - const std::string src_name = arg.name(); - std::string dst_name = is_grad ? GradVarName(src_name) : src_name; - dst_inout[dst_name].reserve(src_inout.at(src_name).size()); - for (auto& var_name : src_inout.at(src_name)) { - std::string s = is_grad ? GradVarName(var_name) : var_name; - dst_inout[dst_name].emplace_back(s); - } - } -} - -OperatorBase* BuildGradOp(const OperatorBase* op) { - auto& info = OpInfoMap::Instance().Get(op->Type()); - PADDLE_ENFORCE(info.HasGradientOp()); - - VariableNameMap inputs; - VariableNameMap outputs; - TransOpArg(op, OpArgType::IN, false, &inputs); // I - TransOpArg(op, OpArgType::OUT, false, &inputs); // O - TransOpArg(op, OpArgType::OUT, true, &inputs); // OG - TransOpArg(op, OpArgType::IN, true, &outputs); // IG - - auto& grad_info = OpInfoMap::Instance().Get(info.grad_op_type_); - return grad_info.Creator()(info.grad_op_type_, inputs, outputs, op->Attrs()); -} - -static void TransOpDescArg(const OpDescBind* src_op, const OpArgType& src_type, - bool is_grad, OpDescBind* dst_op, - const OpArgType& dst_type) { - PADDLE_ENFORCE(dst_op != nullptr, - "Protobuf desc of gradient op must be initialized first."); - const auto& proto = OpInfoMap::Instance().Get(src_op->Type()).Proto(); - const auto& src_arg_list = - src_type == OpArgType::IN ? proto.inputs() : proto.outputs(); - for (const auto& arg : src_arg_list) { - if (arg.not_in_gradient() && !is_grad) continue; - const std::string src_name = arg.name(); - std::vector vars = src_type == OpArgType::IN - ? src_op->Input(src_name) - : src_op->Output(src_name); - if (is_grad) { - for (std::string& var : vars) { - var = GradVarName(var); - } - } - std::string dst_name = is_grad ? GradVarName(src_name) : src_name; - dst_type == OpArgType::IN ? dst_op->SetInput(dst_name, vars) - : dst_op->SetOutput(dst_name, vars); - } -} - -void CompleteGradOpDesc(const OpDescBind* forw_op, OpDescBind* grad_op) { - auto& info = OpInfoMap::Instance().Get(forw_op->Type()); - PADDLE_ENFORCE(info.HasGradientOp()); - - grad_op->SetType(info.grad_op_type_); - - TransOpDescArg(forw_op, OpArgType::IN, false, grad_op, OpArgType::IN); - TransOpDescArg(forw_op, OpArgType::OUT, false, grad_op, OpArgType::IN); - TransOpDescArg(forw_op, OpArgType::OUT, true, grad_op, OpArgType::IN); - TransOpDescArg(forw_op, OpArgType::IN, true, grad_op, OpArgType::OUT); - - grad_op->SetAttrMap(forw_op->GetAttrMap()); -} - -} // namespace framework -} // namespace paddle diff --git a/paddle/framework/grad_op_builder.h b/paddle/framework/grad_op_builder.h deleted file mode 100644 index b601406061f9f8f24302251c2144b07b6e65717f..0000000000000000000000000000000000000000 --- a/paddle/framework/grad_op_builder.h +++ /dev/null @@ -1,28 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once - -#include "paddle/framework/op_desc.h" -#include "paddle/framework/operator.h" - -namespace paddle { -namespace framework { - -OperatorBase* BuildGradOp(const OperatorBase* op); - -void CompleteGradOpDesc(const OpDescBind* forw_op, OpDescBind* grad_op); - -} // namespace framework -} // namespace paddle diff --git a/paddle/framework/grad_op_builder_test.cc b/paddle/framework/grad_op_builder_test.cc deleted file mode 100644 index 793780ea44b80a0d750d35cbea601bfbfd5ccda4..0000000000000000000000000000000000000000 --- a/paddle/framework/grad_op_builder_test.cc +++ /dev/null @@ -1,186 +0,0 @@ -#include "paddle/framework/grad_op_builder.h" -#include -#include "paddle/framework/op_registry.h" -#include "paddle/framework/operator.h" - -USE_OP(sum); - -namespace paddle { -namespace framework { - -class MutiInOutOpMaker : public OpProtoAndCheckerMaker { - public: - MutiInOutOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("In1", "a single input"); - AddInput("In2_mult", "a multiple input").AsDuplicable(); - AddInput("In3", "another single input"); - AddOutput("Out1", "a single output"); - AddOutput("Out2_mult", "a multiple output").AsDuplicable(); - AddComment("test op with multiple inputs and outputs"); - } -}; - -class IOIgnoredOpMaker : public OpProtoAndCheckerMaker { - public: - IOIgnoredOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("In1", "a single input"); - AddInput("In2_mult", "a multiple input").AsDuplicable().NotInGradient(); - AddInput("In3_mult", "another multiple input").AsDuplicable(); - AddOutput("Out1_mult", "a multiple output").AsDuplicable(); - AddOutput("Out2", "a single output").NotInGradient(); - AddComment("op with inputs and outputs ignored in gradient calculating"); - } -}; - -} // namespace framework -} // namespace paddle - -namespace f = paddle::framework; - -REGISTER_OP(mult_io, f::NOP, f::MutiInOutOpMaker, mult_io_grad, f::NOP); -REGISTER_OP(io_ignored, f::NOP, f::IOIgnoredOpMaker, io_ignored_grad, f::NOP); - -TEST(GradOpBuilder, MutiInOut) { - std::shared_ptr test_op(f::OpRegistry::CreateOp( - "mult_io", {{"In1", {"in1"}}, - {"In2_mult", {"in2_1", "in2_2", "in2_3"}}, - {"In3", {"in3"}}}, - {{"Out1", {"out1"}}, {"Out2_mult", {"out2_1", "out2_2"}}}, {})); - std::shared_ptr grad_test_op = - f::OpRegistry::CreateGradOp(*test_op); - - ASSERT_EQ(grad_test_op->Inputs().size(), 3UL + 2UL + 2UL); - EXPECT_EQ(grad_test_op->Input("In1"), "in1"); - EXPECT_EQ(grad_test_op->Inputs("In2_mult"), - std::vector({"in2_1", "in2_2", "in2_3"})); - EXPECT_EQ(grad_test_op->Input("In3"), "in3"); - EXPECT_EQ(grad_test_op->Input("Out1"), "out1"); - EXPECT_EQ(grad_test_op->Inputs("Out2_mult"), - std::vector({"out2_1", "out2_2"})); - EXPECT_EQ(grad_test_op->Input(f::GradVarName("Out1")), - f::GradVarName("out1")); - EXPECT_EQ(grad_test_op->Inputs(f::GradVarName("Out2_mult")), - std::vector( - {f::GradVarName("out2_1"), f::GradVarName("out2_2")})); - - ASSERT_EQ(grad_test_op->Outputs().size(), 3UL); - EXPECT_EQ(grad_test_op->Output(f::GradVarName("In1")), f::GradVarName("in1")); - EXPECT_EQ(grad_test_op->Outputs(f::GradVarName("In2_mult")), - std::vector({f::GradVarName("in2_1"), - f::GradVarName("in2_2"), - f::GradVarName("in2_3")})); - EXPECT_EQ(grad_test_op->Output(f::GradVarName("In3")), f::GradVarName("in3")); -} - -TEST(GradOpBuilder, IOIgnoredInGradient) { - std::shared_ptr test_op(f::OpRegistry::CreateOp( - "io_ignored", {{"In1", {"in1"}}, - {"In2_mult", {"in2_1", "in2_2"}}, - {"In3_mult", {"in3_1", "in3_2"}}}, - {{"Out1_mult", {"out1_1", "out1_2"}}, {"Out2", {"out2"}}}, {})); - std::shared_ptr grad_test_op = - f::OpRegistry::CreateGradOp(*test_op); - - // 'In2' and 'Out2' are ignored in gradient calculating - ASSERT_EQ(grad_test_op->Inputs().size(), 2UL + 1UL + 2UL); - EXPECT_EQ(grad_test_op->Input("In1"), "in1"); - EXPECT_EQ(grad_test_op->Inputs("In3_mult"), - std::vector({"in3_1", "in3_2"})); - EXPECT_EQ(grad_test_op->Inputs("Out1_mult"), - std::vector({"out1_1", "out1_2"})); - EXPECT_EQ(grad_test_op->Inputs(f::GradVarName("Out1_mult")), - std::vector( - {f::GradVarName("out1_1"), f::GradVarName("out1_2")})); - EXPECT_EQ(grad_test_op->Input(f::GradVarName("Out2")), - f::GradVarName("out2")); - - ASSERT_EQ(grad_test_op->Outputs().size(), 3UL); - EXPECT_EQ(grad_test_op->Output(f::GradVarName("In1")), f::GradVarName("in1")); - EXPECT_EQ(grad_test_op->Outputs(f::GradVarName("In2_mult")), - std::vector( - {f::GradVarName("in2_1"), f::GradVarName("in2_2")})); - EXPECT_EQ(grad_test_op->Outputs(f::GradVarName("In3_mult")), - std::vector( - {f::GradVarName("in3_1"), f::GradVarName("in3_2")})); -} - -TEST(GradOpDescBuilder, MutiInOut) { - f::OpDescBind *forw_op = new f::OpDescBind(); - forw_op->SetType("mult_io"); - forw_op->SetInput("In1", {"in1"}); - forw_op->SetInput("In2_mult", {"in2_1", "in2_2", "in2_3"}); - forw_op->SetInput("In3", {"in3"}); - forw_op->SetOutput("Out1", {"out1"}); - forw_op->SetOutput("Out2_mult", {"out2_1", "out2_2"}); - - f::OpDescBind *grad_op = new f::OpDescBind(); - f::CompleteGradOpDesc(forw_op, grad_op); - - EXPECT_EQ(grad_op->Type(), "mult_io_grad"); - ASSERT_EQ(grad_op->InputNames().size(), 3UL + 2UL + 2UL); - EXPECT_EQ(grad_op->Input("In1"), std::vector({"in1"})); - EXPECT_EQ(grad_op->Input("In2_mult"), - std::vector({"in2_1", "in2_2", "in2_3"})); - EXPECT_EQ(grad_op->Input("In3"), std::vector({"in3"})); - EXPECT_EQ(grad_op->Input("Out1"), std::vector({"out1"})); - EXPECT_EQ(grad_op->Input("Out2_mult"), - std::vector({"out2_1", "out2_2"})); - EXPECT_EQ(grad_op->Input(f::GradVarName("Out1")), - std::vector({f::GradVarName("out1")})); - EXPECT_EQ(grad_op->Input(f::GradVarName("Out2_mult")), - std::vector( - {f::GradVarName("out2_1"), f::GradVarName("out2_2")})); - - ASSERT_EQ(grad_op->OutputNames().size(), 3UL); - EXPECT_EQ(grad_op->Output(f::GradVarName("In1")), - std::vector({f::GradVarName("in1")})); - EXPECT_EQ(grad_op->Output(f::GradVarName("In2_mult")), - std::vector({f::GradVarName("in2_1"), - f::GradVarName("in2_2"), - f::GradVarName("in2_3")})); - EXPECT_EQ(grad_op->Output(f::GradVarName("In3")), - std::vector({f::GradVarName("in3")})); - delete forw_op; - delete grad_op; -} - -TEST(GradOpDescBuilder, IOIgnoredInGradient) { - f::OpDescBind *forw_op = new f::OpDescBind(); - forw_op->SetType("io_ignored"); - forw_op->SetInput("In1", {"in1"}); - forw_op->SetInput("In2_mult", {"in2_1", "in2_2"}); - forw_op->SetInput("In3_mult", {"in3_1", "in3_2"}); - forw_op->SetOutput("Out1_mult", {"out1_1", "out1_2"}); - forw_op->SetOutput("Out2", {"out2"}); - - f::OpDescBind *grad_op = new f::OpDescBind(); - f::CompleteGradOpDesc(forw_op, grad_op); - - EXPECT_EQ(grad_op->Type(), "io_ignored_grad"); - // 'In2' and 'Out2' are ignored in gradient calculating - ASSERT_EQ(grad_op->InputNames().size(), 2UL + 1UL + 2UL); - EXPECT_EQ(grad_op->Input("In1"), std::vector({"in1"})); - EXPECT_EQ(grad_op->Input("In3_mult"), - std::vector({"in3_1", "in3_2"})); - EXPECT_EQ(grad_op->Input("Out1_mult"), - std::vector({"out1_1", "out1_2"})); - EXPECT_EQ(grad_op->Input(f::GradVarName("Out1_mult")), - std::vector( - {f::GradVarName("out1_1"), f::GradVarName("out1_2")})); - EXPECT_EQ(grad_op->Input(f::GradVarName("Out2")), - std::vector({f::GradVarName("out2")})); - - ASSERT_EQ(grad_op->OutputNames().size(), 3UL); - EXPECT_EQ(grad_op->Output(f::GradVarName("In1")), - std::vector({f::GradVarName("in1")})); - EXPECT_EQ(grad_op->Output(f::GradVarName("In2_mult")), - std::vector( - {f::GradVarName("in2_1"), f::GradVarName("in2_2")})); - EXPECT_EQ(grad_op->Output(f::GradVarName("In3_mult")), - std::vector( - {f::GradVarName("in3_1"), f::GradVarName("in3_2")})); - delete forw_op; - delete grad_op; -} diff --git a/paddle/framework/op_desc.cc b/paddle/framework/op_desc.cc index 852f0f1eb8ea09b336d656afea4f5b146c8d45de..02aa74a8420a5c685c88d7cb0b487284814b3690 100644 --- a/paddle/framework/op_desc.cc +++ b/paddle/framework/op_desc.cc @@ -18,6 +18,15 @@ limitations under the License. */ namespace paddle { namespace framework { +OpDescBind::OpDescBind(const std::string &type, const VariableNameMap &inputs, + const VariableNameMap &outputs, + const AttributeMap &attrs) { + op_desc_.set_type(type); + inputs_ = inputs; + outputs_ = outputs; + attrs_ = attrs; +} + OpDesc *OpDescBind::Proto() { Sync(); return &op_desc_; @@ -31,6 +40,14 @@ const std::vector &OpDescBind::Input( return it->second; } +std::vector OpDescBind::InputArgumentNames() const { + std::vector retv; + for (auto &ipt : this->inputs_) { + retv.insert(retv.end(), ipt.second.begin(), ipt.second.end()); + } + return retv; +} + void OpDescBind::SetInput(const std::string ¶m_name, const std::vector &args) { need_update_ = true; @@ -45,6 +62,14 @@ const std::vector &OpDescBind::Output( return it->second; } +std::vector OpDescBind::OutputArgumentNames() const { + std::vector retv; + for (auto &ipt : this->outputs_) { + retv.insert(retv.end(), ipt.second.begin(), ipt.second.end()); + } + return retv; +} + void OpDescBind::SetOutput(const std::string ¶m_name, const std::vector &args) { need_update_ = true; @@ -94,6 +119,18 @@ const std::unordered_map &OpDescBind::GetAttrMap() return attrs_; } +void OpDescBind::Rename(const std::string &old_name, + const std::string &new_name) { + for (auto &input : inputs_) { + std::replace(input.second.begin(), input.second.end(), old_name, new_name); + } + for (auto &output : outputs_) { + std::replace(output.second.begin(), output.second.end(), old_name, + new_name); + } + need_update_ = true; +} + struct SetAttrDescVisitor : public boost::static_visitor { explicit SetAttrDescVisitor(OpDesc::Attr *attr) : attr_(attr) {} mutable OpDesc::Attr *attr_; diff --git a/paddle/framework/op_desc.h b/paddle/framework/op_desc.h index 397393f796ed11312578859495529f4aedd17e78..b39808dad1de061e896936ec84169cd62e29856d 100644 --- a/paddle/framework/op_desc.h +++ b/paddle/framework/op_desc.h @@ -27,6 +27,11 @@ class BlockDescBind; class OpDescBind { public: + OpDescBind() {} + + OpDescBind(const std::string &type, const VariableNameMap &inputs, + const VariableNameMap &outputs, const AttributeMap &attrs); + OpDesc *Proto(); std::string Type() const { return op_desc_.type(); } @@ -35,11 +40,15 @@ class OpDescBind { const std::vector &Input(const std::string &name) const; + std::vector InputArgumentNames() const; + void SetInput(const std::string ¶m_name, const std::vector &args); const std::vector &Output(const std::string &name) const; + std::vector OutputArgumentNames() const; + void SetOutput(const std::string ¶m_name, const std::vector &args); @@ -61,6 +70,8 @@ class OpDescBind { int GetBlockAttr(const std::string &name) const; + void Rename(const std::string &old_name, const std::string &new_name); + // Only be used in C++ const AttributeMap &GetAttrMap() const; @@ -70,6 +81,22 @@ class OpDescBind { std::vector InputNames() const { return MapKeys(inputs_); } std::vector OutputNames() const { return MapKeys(outputs_); } + void SetInputMap(const VariableNameMap &input) { + this->inputs_ = input; + this->need_update_ = true; + } + + void SetOutputMap(const VariableNameMap &output) { + this->outputs_ = output; + this->need_update_ = true; + } + + void Sync(); + + const VariableNameMap &Inputs() const { return inputs_; } + + const VariableNameMap &Outputs() const { return outputs_; } + private: template static std::vector MapKeys(const MapType &map) { @@ -81,8 +108,6 @@ class OpDescBind { return ret_val; } - void Sync(); - OpDesc op_desc_; VariableNameMap inputs_; VariableNameMap outputs_; diff --git a/paddle/framework/op_info.h b/paddle/framework/op_info.h index 8b7882485ff82860b884d3a8bc3d5543e2426898..c504f69e30bb899c183bd4281d2eadb50fd3b376 100644 --- a/paddle/framework/op_info.h +++ b/paddle/framework/op_info.h @@ -17,6 +17,7 @@ #include #include #include + #include "paddle/framework/attribute.h" #include "paddle/framework/op_desc.h" #include "paddle/framework/type_defs.h" @@ -27,7 +28,6 @@ namespace framework { struct OpInfo { OpCreator creator_; - std::string grad_op_type_; GradOpMakerFN grad_op_maker_; OpProto* proto_{nullptr}; OpAttrChecker* checker_{nullptr}; @@ -43,19 +43,19 @@ struct OpInfo { return *proto_; } - const OpAttrChecker& Checker() const { - PADDLE_ENFORCE_NOT_NULL(checker_, - "Operator Checker has not been registered"); - return *checker_; - } - const OpCreator& Creator() const { PADDLE_ENFORCE_NOT_NULL(creator_, "Operator Creator has not been registered"); return creator_; } - bool HasGradientOp() const { return !grad_op_type_.empty(); } + const GradOpMakerFN& GradOpMaker() const { + PADDLE_ENFORCE_NOT_NULL(grad_op_maker_, + "Operator GradOpMaker has not been registered."); + return grad_op_maker_; + } + + const OpAttrChecker* Checker() const { return checker_; } }; class OpInfoMap { diff --git a/paddle/framework/op_proto_maker.h b/paddle/framework/op_proto_maker.h index 4d55a37db9f0a3deac7b3489c8bc288ea41f4799..a134befd90a1eaeff6f6ea62f11412df63cdc394 100644 --- a/paddle/framework/op_proto_maker.h +++ b/paddle/framework/op_proto_maker.h @@ -44,11 +44,6 @@ class OpProtoAndCheckerMaker { var_->set_intermediate(true); return *this; } - - VariableBuilder& NotInGradient() { - var_->set_not_in_gradient(true); - return *this; - } }; VariableBuilder AddInput(const std::string& name, const std::string& comment); diff --git a/paddle/framework/op_registry.cc b/paddle/framework/op_registry.cc index b0e85dd49f97da4a7f889fde0b5f060954947be8..66043f6e04fdb63b5d11a15c66abc84339e13c9a 100644 --- a/paddle/framework/op_registry.cc +++ b/paddle/framework/op_registry.cc @@ -23,7 +23,9 @@ std::unique_ptr OpRegistry::CreateOp( const std::string& type, const VariableNameMap& inputs, const VariableNameMap& outputs, AttributeMap attrs) { auto& info = OpInfoMap::Instance().Get(type); - info.Checker().Check(attrs); + if (info.Checker() != nullptr) { + info.Checker()->Check(attrs); + } auto op = info.Creator()(type, inputs, outputs, attrs); return std::unique_ptr(op); } @@ -52,9 +54,15 @@ std::unique_ptr OpRegistry::CreateOp(const OpDesc& op_desc) { return CreateOp(op_desc.type(), inputs, outputs, attrs); } -std::unique_ptr OpRegistry::CreateGradOp(const OperatorBase& op) { - PADDLE_ENFORCE(!op.IsNetOp(), "Use framework::Backward to get backward ops"); - return std::unique_ptr(BuildGradOp(&op)); +std::unique_ptr OpRegistry::CreateOp(const OpDescBind& op_desc) { + return CreateOp(op_desc.Type(), op_desc.Inputs(), op_desc.Outputs(), + op_desc.GetAttrMap()); +} + +std::vector> OpRegistry::CreateGradOpDescs( + const OpDescBind& op_desc) { + auto& info = OpInfoMap::Instance().Get(op_desc.Type()); + return info.grad_op_maker_(op_desc); } } // namespace framework diff --git a/paddle/framework/op_registry.h b/paddle/framework/op_registry.h index 958cf581f53b2bb0252b655267f657c8d9ab371c..cce3605fd480c1d79a5969f6a4cb170ea4d879f2 100644 --- a/paddle/framework/op_registry.h +++ b/paddle/framework/op_registry.h @@ -23,25 +23,37 @@ limitations under the License. */ #include "paddle/framework/attribute.h" #include "paddle/framework/details/op_registry.h" #include "paddle/framework/framework.pb.h" -#include "paddle/framework/grad_op_builder.h" +#include "paddle/framework/grad_op_desc_maker.h" +#include "paddle/framework/op_desc.h" #include "paddle/framework/operator.h" #include "paddle/framework/scope.h" namespace paddle { namespace framework { +class Registrar { + public: + // In our design, various kinds of classes, e.g., operators and kernels, + // have their corresponding registry and registrar. The action of + // registration is in the constructor of a global registrar variable, which, + // however, are not used in the code that calls package framework, and would + // be removed from the generated binary file by the linker. To avoid such + // removal, we add Touch to all registrar classes and make USE_OP macros to + // call this method. So, as long as the callee code calls USE_OP, the global + // registrar variable won't be removed by the linker. + void Touch() {} +}; template -struct OperatorRegistrar { +struct OperatorRegistrar : public Registrar { explicit OperatorRegistrar(const char* op_type) : op_type(op_type) { PADDLE_ENFORCE(!OpInfoMap::Instance().Has(op_type), "'%s' is registered more than once.", op_type); static_assert(sizeof...(ARGS) != 0, "OperatorRegistrar should be invoked at least by OpClass"); details::OperatorRegistrarRecursive<0, false, ARGS...>(op_type, &info); + OpInfoMap::Instance().Insert(op_type, info); } - ~OperatorRegistrar() { OpInfoMap::Instance().Insert(op_type, info); } - const char* op_type; OpInfo info; @@ -67,20 +79,10 @@ class OpRegistry { static std::unique_ptr CreateOp(const OpDesc& op_desc); - static std::unique_ptr CreateGradOp(const OperatorBase& op); -}; + static std::vector> CreateGradOpDescs( + const OpDescBind& op_desc); -class Registrar { - public: - // In our design, various kinds of classes, e.g., operators and kernels, - // have their corresponding registry and registrar. The action of - // registration is in the constructor of a global registrar variable, which, - // however, are not used in the code that calls package framework, and would - // be removed from the generated binary file by the linker. To avoid such - // removal, we add Touch to all registrar classes and make USE_OP macros to - // call this method. So, as long as the callee code calls USE_OP, the global - // registrar variable won't be removed by the linker. - void Touch() {} + static std::unique_ptr CreateOp(const OpDescBind& op_desc); }; template @@ -138,33 +140,41 @@ class OpKernelRegistrar : public Registrar { __test_global_namespace_##uniq_name##__>::value, \ msg) +#define REGISTER_OPERATOR(op_type, op_class, ...) \ + STATIC_ASSERT_GLOBAL_NAMESPACE( \ + __reg_op__##op_type, \ + "REGISTER_OPERATOR must be called in global namespace"); \ + class _OpClass_##op_type##_ : public op_class { \ + public: \ + DEFINE_OP_CLONE_METHOD(_OpClass_##op_type##_); \ + DEFINE_OP_CONSTRUCTOR(_OpClass_##op_type##_, op_class); \ + }; \ + static ::paddle::framework::OperatorRegistrar<_OpClass_##op_type##_, \ + ##__VA_ARGS__> \ + __op_registrar_##op_type##__(#op_type); \ + int TouchOpRegistrar_##op_type() { \ + __op_registrar_##op_type##__.Touch(); \ + return 0; \ + } + /** * Macro to register Operator. */ -#define REGISTER_OP(op_type, op_class, op_maker_class, grad_op_type, \ - grad_op_class) \ - STATIC_ASSERT_GLOBAL_NAMESPACE( \ - __reg_op__##op_type, "REGISTER_OP must be called in global namespace"); \ - class _OpClass_##op_type##_ : public op_class { \ - public: \ - DEFINE_OP_CLONE_METHOD(_OpClass_##op_type##_); \ - DEFINE_OP_CONSTRUCTOR(_OpClass_##op_type##_, op_class); \ - }; \ - class _OpGradClass_##op_type##_ : public grad_op_class { \ - public: \ - DEFINE_OP_CLONE_METHOD(_OpGradClass_##op_type##_); \ - DEFINE_OP_CONSTRUCTOR(_OpGradClass_##op_type##_, grad_op_class); \ - }; \ - static ::paddle::framework::OpRegistrar< \ - _OpClass_##op_type##_, op_maker_class, _OpGradClass_##op_type##_> \ - __op_registrar_##op_type##__(#op_type, #grad_op_type); \ - int TouchOpRegistrar_##op_type() { \ - __op_registrar_##op_type##__.Touch(); \ - return 0; \ - } +#define REGISTER_OP(op_type, op_class, op_maker_class, grad_op_type, \ + grad_op_class) \ + REGISTER_OPERATOR(grad_op_type, grad_op_class); \ + class _GradOpDescMaker_##grad_op_type##_ \ + : public ::paddle::framework::DefaultGradOpDescMaker { \ + using ::paddle::framework::DefaultGradOpDescMaker::DefaultGradOpDescMaker; \ + \ + protected: \ + virtual std::string GradOpType() const { return #grad_op_type; } \ + }; \ + REGISTER_OPERATOR(op_type, op_class, _GradOpDescMaker_##grad_op_type##_, \ + op_maker_class); #define REGISTER_OP_WITHOUT_GRADIENT(op_type, op_class, op_maker_class) \ - REGISTER_OP(op_type, op_class, op_maker_class, , ::paddle::framework::NOP) + REGISTER_OPERATOR(op_type, op_class, op_maker_class) /** * Macro to register OperatorKernel. diff --git a/paddle/framework/tensor_array.h b/paddle/framework/tensor_array.h index 22ae6a966f90c47fe8b4bbaaf5eb227c39d84173..94a14c2df492b175cf6a643800937878e95c5f37 100644 --- a/paddle/framework/tensor_array.h +++ b/paddle/framework/tensor_array.h @@ -26,6 +26,9 @@ namespace framework { * in original lod-tensor. */ struct DySeqMeta { + DySeqMeta(size_t begin, size_t end, size_t ori_idx) + : begin(begin), end(end), ori_idx(ori_idx) {} + size_t begin; size_t end; // not included size_t ori_idx; diff --git a/paddle/gserver/CMakeLists.txt b/paddle/gserver/CMakeLists.txt index 62cff9361ccba3ae3b9359ddb932f5b26146eb97..5f39167afc34affbea7858fa0794ef52b786a383 100644 --- a/paddle/gserver/CMakeLists.txt +++ b/paddle/gserver/CMakeLists.txt @@ -60,6 +60,36 @@ if(NOT WITH_PYTHON) dataproviders/PyDataProvider.h) endif() +if(MOBILE_INFERENCE) + # Remove evaluators + list(REMOVE_ITEM GSERVER_SOURCES + layers/ValidationLayer.cpp + evaluators/Evaluator.cpp + evaluators/DetectionMAPEvaluator.cpp + evaluators/CTCErrorEvaluator.cpp + evaluators/ChunkEvaluator.cpp) + + # Remove dataproviders + list(REMOVE_ITEM GSERVER_SOURCES + dataproviders/DataProvider.cpp + dataproviders/MultiDataProvider.cpp + dataproviders/ProtoDataProvider.cpp + dataproviders/PyDataProvider2.cpp + dataproviders/PyDataProvider.cpp) + + # Remove useless gradientmachines + list(REMOVE_ITEM GSERVER_SOURCES + gradientmachines/MultiNetwork.cpp + gradientmachines/RecurrentGradientMachine.cpp + gradientmachines/ParallelNeuralNetwork.cpp + gradientmachines/GradientMachineMode.cpp + gradientmachines/MultiGradientMachine.cpp) + + # Remove useless layers + list(REMOVE_ITEM GSERVER_SOURCES + layers/RecurrentLayerGroup.cpp) +endif() + if(WITH_GPU) cuda_add_library(paddle_gserver ${GSERVER_SOURCES}) else() diff --git a/paddle/gserver/gradientmachines/GradientMachine.cpp b/paddle/gserver/gradientmachines/GradientMachine.cpp index b44e4dc202f01956ed21c175aa897ced8e92546b..de5faf5e1e2b3e73bc07fe7f1635110f4efd7eec 100644 --- a/paddle/gserver/gradientmachines/GradientMachine.cpp +++ b/paddle/gserver/gradientmachines/GradientMachine.cpp @@ -17,12 +17,15 @@ limitations under the License. */ #include #include "paddle/utils/Logging.h" +#include "NeuralNetwork.h" +#include "hl_gpu.h" + +#ifndef PADDLE_MOBILE_INFERENCE #include "GradientMachineMode.h" #include "MultiGradientMachine.h" #include "MultiNetwork.h" -#include "NeuralNetwork.h" #include "ParallelNeuralNetwork.h" -#include "hl_gpu.h" +#endif namespace paddle { @@ -30,13 +33,16 @@ GradientMachine* GradientMachine::create( const ModelConfig& config, int mode, const std::vector& parameterTypes) { +#ifndef PADDLE_MOBILE_INFERENCE if (auto gm = IGradientMachineMode::tryCreateGradientMachine(mode, config)) { return gm; } if (FLAGS_trainer_count > 1) { return new MultiGradientMachine(config, FLAGS_use_gpu); } +#endif if (FLAGS_trainer_count == 1) { // single +#ifndef PADDLE_MOBILE_INFERENCE NeuralNetwork* nn; if (config.type() == "multi_nn") { /* multi submodel calculate, thread(s) will be initialized inside */ @@ -48,6 +54,9 @@ GradientMachine* GradientMachine::create( /* single thread calculate */ nn = NeuralNetwork::create(config); } +#else + NeuralNetwork* nn = NeuralNetwork::create(config); +#endif ParamInitCallback testParamInitCb = [](int paramId, Parameter* para) { para->enableType(PARAMETER_VALUE); }; diff --git a/paddle/gserver/gradientmachines/GradientMachine.h b/paddle/gserver/gradientmachines/GradientMachine.h index f9c82a2bef82b4e6bcbf0c73583505d2692f3926..ebfe0573cfdbfb2ef54a29b038e8b85356cc6c27 100644 --- a/paddle/gserver/gradientmachines/GradientMachine.h +++ b/paddle/gserver/gradientmachines/GradientMachine.h @@ -20,13 +20,16 @@ limitations under the License. */ #include "ModelConfig.pb.h" #include "TrainerConfig.pb.h" #include "paddle/gserver/dataproviders/DataProvider.h" -#include "paddle/gserver/evaluators/Evaluator.h" #include "paddle/gserver/layers/Layer.h" #include "paddle/math/Matrix.h" #include "paddle/parameter/Parameter.h" #include "paddle/parameter/ParameterUpdaterBase.h" #include "paddle/utils/Thread.h" +#ifndef PADDLE_MOBILE_INFERENCE +#include "paddle/gserver/evaluators/Evaluator.h" +#endif + namespace paddle { /** * @brief A gradient machine is capable of calculating some outputs given @@ -147,6 +150,7 @@ public: virtual void onPassEnd() = 0; +#ifndef PADDLE_MOBILE_INFERENCE /** * Create an evaluator which can be used for eval() */ @@ -156,6 +160,7 @@ public: * evaluate using the given evaluator */ virtual void eval(Evaluator* evaluator) const = 0; +#endif std::vector& getParameters() { return parameters_; } diff --git a/paddle/gserver/gradientmachines/NeuralNetwork.cpp b/paddle/gserver/gradientmachines/NeuralNetwork.cpp index 26cff3e67710b2f38d93572c5d58849aa94a5135..dcf0acb5a2cc7698625a2e254d4c12a32bc9631d 100644 --- a/paddle/gserver/gradientmachines/NeuralNetwork.cpp +++ b/paddle/gserver/gradientmachines/NeuralNetwork.cpp @@ -14,15 +14,17 @@ limitations under the License. */ #include "paddle/utils/Util.h" +#include "NeuralNetwork.h" +#include "hl_gpu.h" +#include "paddle/gserver/layers/AgentLayer.h" #include "paddle/utils/CustomStackTrace.h" #include "paddle/utils/Logging.h" +#include "paddle/utils/Stat.h" +#ifndef PADDLE_MOBILE_INFERENCE #include "MultiNetwork.h" -#include "NeuralNetwork.h" #include "RecurrentGradientMachine.h" -#include "hl_gpu.h" -#include "paddle/gserver/layers/AgentLayer.h" -#include "paddle/utils/Stat.h" +#endif namespace paddle { void parameterInitNN(int paramId, @@ -54,6 +56,7 @@ void parameterInitNN(int paramId, } NeuralNetwork* NeuralNetwork::create(const ModelConfig& config) { +#ifndef PADDLE_MOBILE_INFERENCE if (config.type() == "recurrent_nn") { return newNeuralNetwork("root"); } else if (config.type() == "multi_nn") { @@ -61,6 +64,9 @@ NeuralNetwork* NeuralNetwork::create(const ModelConfig& config) { } else { return newNeuralNetwork(); } +#else + return new NeuralNetwork(); +#endif } std::map NeuralNetwork::dllInitMap; @@ -304,6 +310,8 @@ void NeuralNetwork::onPassEnd() { } } +#ifndef PADDLE_MOBILE_INFERENCE + class CombinedEvaluator : public Evaluator { public: void addEvaluator(std::unique_ptr&& evaluator) { @@ -466,6 +474,8 @@ Evaluator* NeuralNetwork::makeEvaluator() const { void NeuralNetwork::eval(Evaluator* evaluator) const { evaluator->eval(*this); } +#endif + void NeuralNetwork::setOutputGrad(const std::vector& args) { CHECK_GE(outputLayers_.size(), args.size()); for (size_t i = 0; i < args.size(); ++i) { diff --git a/paddle/gserver/gradientmachines/NeuralNetwork.h b/paddle/gserver/gradientmachines/NeuralNetwork.h index 12810f642519b7965fc1b7d751290445e3350dd5..56a1ec78460731554c9b47cf3f517f7654dc314f 100644 --- a/paddle/gserver/gradientmachines/NeuralNetwork.h +++ b/paddle/gserver/gradientmachines/NeuralNetwork.h @@ -97,9 +97,12 @@ public: virtual void onPassEnd(); +#ifndef PADDLE_MOBILE_INFERENCE virtual Evaluator* makeEvaluator() const; virtual void eval(Evaluator* evaluator) const; +#endif + virtual void resetState(); virtual void setOutputGrad(const std::vector& args); diff --git a/paddle/gserver/layers/Layer.cpp b/paddle/gserver/layers/Layer.cpp index e95f42c863b3733ca66055e1b3939e734cae8ad1..01f2aae6cf88d47296da804061b9b039cca593db 100644 --- a/paddle/gserver/layers/Layer.cpp +++ b/paddle/gserver/layers/Layer.cpp @@ -15,11 +15,14 @@ limitations under the License. */ #include "paddle/utils/Util.h" #include "CostLayer.h" -#include "ValidationLayer.h" #include "paddle/math/SparseMatrix.h" #include "paddle/utils/Error.h" #include "paddle/utils/Logging.h" +#ifndef PADDLE_MOBILE_INFERENCE +#include "ValidationLayer.h" +#endif + DEFINE_bool(log_error_clipping, false, "enable log error clipping or not"); namespace paddle { @@ -103,10 +106,12 @@ LayerPtr Layer::create(const LayerConfig& config) { return LayerPtr(new MultiClassCrossEntropy(config)); else if (type == "rank-cost") return LayerPtr(new RankingCost(config)); +#ifndef PADDLE_MOBILE_INFERENCE else if (type == "auc-validation") return LayerPtr(new AucValidation(config)); else if (type == "pnpair-validation") return LayerPtr(new PnpairValidation(config)); +#endif return LayerPtr(registrar_.createByType(config.type(), config)); } diff --git a/paddle/gserver/tests/CMakeLists.txt b/paddle/gserver/tests/CMakeLists.txt index de9b8e63dfc4291f8f42ca8c57cb5eb6baed8d8e..fcee19415c13e9731bd47eb53bbff9b52cf6450b 100644 --- a/paddle/gserver/tests/CMakeLists.txt +++ b/paddle/gserver/tests/CMakeLists.txt @@ -1,15 +1,17 @@ # gserver pacakge unittests +if(NOT MOBILE_INFERENCE) ################### test_ProtoDataProvider ############ -add_unittest_without_exec(test_ProtoDataProvider - test_ProtoDataProvider.cpp) - -# test_ProtoDataProvider will mkdir as same name, -# so if WORKING_DIRECTORY is default directory, then -# mkdir will get error. -add_test(NAME test_ProtoDataProvider - COMMAND ${CMAKE_CURRENT_BINARY_DIR}/test_ProtoDataProvider - WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle) + add_unittest_without_exec(test_ProtoDataProvider + test_ProtoDataProvider.cpp) + + # test_ProtoDataProvider will mkdir as same name, + # so if WORKING_DIRECTORY is default directory, then + # mkdir will get error. + add_test(NAME test_ProtoDataProvider + COMMAND ${CMAKE_CURRENT_BINARY_DIR}/test_ProtoDataProvider + WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle) +endif() ################# test_LayerGrad ####################### add_unittest_without_exec(test_LayerGrad @@ -98,9 +100,11 @@ add_unittest_without_exec(test_KmaxSeqScore add_test(NAME test_KmaxSeqScore COMMAND test_KmaxSeqScore) +if(NOT MOBILE_INFERENCE) ################## test_Evaluator ####################### -add_unittest(test_Evaluator - test_Evaluator.cpp) + add_unittest(test_Evaluator + test_Evaluator.cpp) +endif() ################ test_LinearChainCRF #################### add_simple_unittest(test_LinearChainCRF) @@ -131,27 +135,31 @@ if(NOT WITH_DOUBLE) WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle) endif() +if(NOT MOBILE_INFERENCE) ############### test_RecurrentGradientMachine ############### -# TODO(yuyang18): There is some bug in test_RecurrentGradientMachine -# I will fix it. -add_unittest_without_exec(test_RecurrentGradientMachine - test_RecurrentGradientMachine.cpp) -add_test(NAME test_RecurrentGradientMachine - COMMAND .set_python_path.sh -d - ${PADDLE_SOURCE_DIR}/python:${PADDLE_SOURCE_DIR}/paddle/gserver/tests - ${CMAKE_CURRENT_BINARY_DIR}/test_RecurrentGradientMachine - WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle) - -add_unittest_without_exec(test_NetworkCompare - test_NetworkCompare.cpp) -if(WITH_GPU) - add_test(NAME test_NetworkCompare - COMMAND .set_python_path.sh -d ${PADDLE_SOURCE_DIR}/python ${CMAKE_CURRENT_BINARY_DIR}/test_NetworkCompare --use_gpu=true - WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle) -else() - add_test(NAME test_NetworkCompare - COMMAND .set_python_path.sh -d ${PADDLE_SOURCE_DIR}/python ${CMAKE_CURRENT_BINARY_DIR}/test_NetworkCompare --use_gpu=false - WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle) + # TODO(yuyang18): There is some bug in test_RecurrentGradientMachine + # I will fix it. + add_unittest_without_exec(test_RecurrentGradientMachine + test_RecurrentGradientMachine.cpp) + add_test(NAME test_RecurrentGradientMachine + COMMAND .set_python_path.sh -d + ${PADDLE_SOURCE_DIR}/python:${PADDLE_SOURCE_DIR}/paddle/gserver/tests + ${CMAKE_CURRENT_BINARY_DIR}/test_RecurrentGradientMachine + WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle) +endif() + +if(NOT MOBILE_INFERENCE) + add_unittest_without_exec(test_NetworkCompare + test_NetworkCompare.cpp) + if(WITH_GPU) + add_test(NAME test_NetworkCompare + COMMAND .set_python_path.sh -d ${PADDLE_SOURCE_DIR}/python ${CMAKE_CURRENT_BINARY_DIR}/test_NetworkCompare --use_gpu=true + WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle) + else() + add_test(NAME test_NetworkCompare + COMMAND .set_python_path.sh -d ${PADDLE_SOURCE_DIR}/python ${CMAKE_CURRENT_BINARY_DIR}/test_NetworkCompare --use_gpu=false + WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle) + endif() endif() diff --git a/paddle/gserver/tests/LayerGradUtil.h b/paddle/gserver/tests/LayerGradUtil.h index 88e831f78bd165f63806df6c081d84411be51502..e10a27eedfa3d207d77a9bf1c5bfb23480dcca69 100644 --- a/paddle/gserver/tests/LayerGradUtil.h +++ b/paddle/gserver/tests/LayerGradUtil.h @@ -15,7 +15,6 @@ limitations under the License. */ #pragma once #include "ModelConfig.pb.h" #include "paddle/gserver/layers/DataLayer.h" -#include "paddle/trainer/Trainer.h" #include "paddle/testing/TestUtil.h" using namespace std; // NOLINT diff --git a/paddle/gserver/tests/test_ActivationGrad.cpp b/paddle/gserver/tests/test_ActivationGrad.cpp index de93972a5880518dfbfb9f8582e17c594e54b9b8..f4c2a07c4426da36ff0b0570339a3a972dadec1f 100644 --- a/paddle/gserver/tests/test_ActivationGrad.cpp +++ b/paddle/gserver/tests/test_ActivationGrad.cpp @@ -17,7 +17,6 @@ limitations under the License. */ #include #include "ModelConfig.pb.h" #include "paddle/gserver/layers/DataLayer.h" -#include "paddle/trainer/Trainer.h" #include "LayerGradUtil.h" #include "paddle/testing/TestUtil.h" diff --git a/paddle/gserver/tests/test_BatchNorm.cpp b/paddle/gserver/tests/test_BatchNorm.cpp index 050fde9d0af54e6d3fd46e19903b926019ddd229..41116f480957153eca33d211d09095903d6a00d9 100644 --- a/paddle/gserver/tests/test_BatchNorm.cpp +++ b/paddle/gserver/tests/test_BatchNorm.cpp @@ -17,7 +17,6 @@ limitations under the License. */ #include #include "ModelConfig.pb.h" #include "paddle/gserver/layers/DataLayer.h" -#include "paddle/trainer/Trainer.h" #include "paddle/utils/GlobalConstants.h" #include "LayerGradUtil.h" diff --git a/paddle/gserver/tests/test_CRFLayerGrad.cpp b/paddle/gserver/tests/test_CRFLayerGrad.cpp index df14449291e9ec08f45718de07bbb101f6dbea58..f010066ebc6c33eff17715ba20b4e238583f1966 100644 --- a/paddle/gserver/tests/test_CRFLayerGrad.cpp +++ b/paddle/gserver/tests/test_CRFLayerGrad.cpp @@ -16,7 +16,6 @@ limitations under the License. */ #include "ModelConfig.pb.h" #include "paddle/gserver/layers/DataLayer.h" #include "paddle/gserver/layers/LinearChainCRF.h" -#include "paddle/trainer/Trainer.h" #include "LayerGradUtil.h" #include "paddle/testing/TestUtil.h" diff --git a/paddle/gserver/tests/test_ConvTrans.cpp b/paddle/gserver/tests/test_ConvTrans.cpp index 6035a866b4eee4c6a61fa93f3adbf5e1d2d549f7..5f2f9665478ad4bdfb00421ec57b3ecc1b41b417 100644 --- a/paddle/gserver/tests/test_ConvTrans.cpp +++ b/paddle/gserver/tests/test_ConvTrans.cpp @@ -18,7 +18,6 @@ limitations under the License. */ #include "ModelConfig.pb.h" #include "paddle/gserver/layers/DataLayer.h" #include "paddle/math/MathUtils.h" -#include "paddle/trainer/Trainer.h" #include "paddle/utils/GlobalConstants.h" #include "LayerGradUtil.h" diff --git a/paddle/gserver/tests/test_ConvUnify.cpp b/paddle/gserver/tests/test_ConvUnify.cpp index ffcc47e2a84356807b6591ba11e670d6d3f336ee..8634355b5206f5cde0aa0717df50ade39e173ae7 100644 --- a/paddle/gserver/tests/test_ConvUnify.cpp +++ b/paddle/gserver/tests/test_ConvUnify.cpp @@ -18,7 +18,6 @@ limitations under the License. */ #include "ModelConfig.pb.h" #include "paddle/gserver/layers/DataLayer.h" #include "paddle/math/MathUtils.h" -#include "paddle/trainer/Trainer.h" #include "paddle/utils/GlobalConstants.h" #include "LayerGradUtil.h" diff --git a/paddle/gserver/tests/test_CrossEntropyOverBeamGrad.cpp b/paddle/gserver/tests/test_CrossEntropyOverBeamGrad.cpp index c922237d33da5de0ece61df732334bee5592249d..477638426fe91f2c5b1f4d5011496385f07c2e90 100644 --- a/paddle/gserver/tests/test_CrossEntropyOverBeamGrad.cpp +++ b/paddle/gserver/tests/test_CrossEntropyOverBeamGrad.cpp @@ -18,7 +18,6 @@ limitations under the License. */ #include #include "ModelConfig.pb.h" #include "paddle/gserver/layers/DataLayer.h" -#include "paddle/trainer/Trainer.h" #include "LayerGradUtil.h" #include "paddle/testing/TestUtil.h" diff --git a/paddle/gserver/tests/test_KmaxSeqScore.cpp b/paddle/gserver/tests/test_KmaxSeqScore.cpp index 6386259882f8c70da59e22574c60fadda636aa42..ffe5cfb8dbb55d0b70a5699969abaa101f05f9ce 100644 --- a/paddle/gserver/tests/test_KmaxSeqScore.cpp +++ b/paddle/gserver/tests/test_KmaxSeqScore.cpp @@ -18,7 +18,6 @@ limitations under the License. */ #include #include "ModelConfig.pb.h" #include "paddle/gserver/layers/DataLayer.h" -#include "paddle/trainer/Trainer.h" #include "paddle/utils/GlobalConstants.h" #include "LayerGradUtil.h" diff --git a/paddle/gserver/tests/test_LayerGrad.cpp b/paddle/gserver/tests/test_LayerGrad.cpp index 90a3352898863a66819e23dd9ecba375ae104a60..1a46fb49153a0aa4228f58db481b950bc2d6de83 100644 --- a/paddle/gserver/tests/test_LayerGrad.cpp +++ b/paddle/gserver/tests/test_LayerGrad.cpp @@ -21,7 +21,6 @@ limitations under the License. */ #include "ModelConfig.pb.h" #include "paddle/gserver/layers/DataLayer.h" #include "paddle/math/MathUtils.h" -#include "paddle/trainer/Trainer.h" #include "LayerGradUtil.h" #include "paddle/testing/TestUtil.h" diff --git a/paddle/gserver/tests/test_SelectiveFCLayer.cpp b/paddle/gserver/tests/test_SelectiveFCLayer.cpp index 4c87fe1bba1eff3c081754b3b255a0d7d1b3dfbe..d164e382c4a804aef2417135b64cf709474d12f1 100644 --- a/paddle/gserver/tests/test_SelectiveFCLayer.cpp +++ b/paddle/gserver/tests/test_SelectiveFCLayer.cpp @@ -24,7 +24,6 @@ limitations under the License. */ #include "paddle/gserver/layers/Layer.h" #include "paddle/gserver/layers/SelectiveFullyConnectedLayer.h" #include "paddle/math/CpuSparseMatrix.h" -#include "paddle/trainer/Trainer.h" using namespace paddle; // NOLINT using namespace std; // NOLINT diff --git a/paddle/gserver/tests/test_SeqSliceLayerGrad.cpp b/paddle/gserver/tests/test_SeqSliceLayerGrad.cpp index 3366002ca113a3fd1c8a8c597e619a50f1e29931..3dbffc563462973bdc1da529d486b2a2d5a677d3 100644 --- a/paddle/gserver/tests/test_SeqSliceLayerGrad.cpp +++ b/paddle/gserver/tests/test_SeqSliceLayerGrad.cpp @@ -15,7 +15,6 @@ limitations under the License. */ #include #include "ModelConfig.pb.h" #include "paddle/gserver/layers/DataLayer.h" -#include "paddle/trainer/Trainer.h" #include "LayerGradUtil.h" #include "paddle/testing/TestUtil.h" diff --git a/paddle/operators/activation_op.cc b/paddle/operators/activation_op.cc index 5f2ecc2673a5bf87d07e99faf60d585fb02621d0..2afa8a68b005f87dab952b59cf6d2e962049e0d9 100644 --- a/paddle/operators/activation_op.cc +++ b/paddle/operators/activation_op.cc @@ -97,6 +97,17 @@ class TanhOpMaker : public framework::OpProtoAndCheckerMaker { } }; +class TanhShrinkOpMaker : public framework::OpProtoAndCheckerMaker { + public: + TanhShrinkOpMaker(framework::OpProto *proto, + framework::OpAttrChecker *op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("X", "Input of TanhShrink operator"); + AddOutput("Y", "Output of TanhShrink operator"); + AddComment("TanhShrink activation operator, tanhshrink(x) = x - tanh(x)"); + } +}; + class SqrtOpMaker : public framework::OpProtoAndCheckerMaker { public: SqrtOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) @@ -235,6 +246,9 @@ REGISTER_OP(relu, ops::ActivationOp, ops::ReluOpMaker, relu_grad, REGISTER_OP(tanh, ops::ActivationOp, ops::TanhOpMaker, tanh_grad, ops::ActivationOpGrad); +REGISTER_OP(tanh_shrink, ops::ActivationOp, ops::TanhShrinkOpMaker, + tanh_shrink_grad, ops::ActivationOpGrad); + REGISTER_OP(sqrt, ops::ActivationOp, ops::SqrtOpMaker, sqrt_grad, ops::ActivationOpGrad); @@ -271,11 +285,9 @@ REGISTER_OP(stanh, ops::ActivationOp, ops::STanhOpMaker, stanh_grad, #define REGISTER_ACTIVATION_CPU_KERNEL(act_type, functor, grad_functor) \ REGISTER_OP_CPU_KERNEL( \ act_type, \ - paddle::operators::ActivationKernel>); \ + ops::ActivationKernel>); \ REGISTER_OP_CPU_KERNEL(act_type##_grad, \ - paddle::operators::ActivationGradKernel< \ - paddle::platform::CPUPlace, \ - paddle::operators::grad_functor>); + ops::ActivationGradKernel>); FOR_EACH_KERNEL_FUNCTOR(REGISTER_ACTIVATION_CPU_KERNEL); diff --git a/paddle/operators/activation_op.cu b/paddle/operators/activation_op.cu index 93e9f1c694bacba48c4f8c46f90fb5b512bead99..7b7644519d4e9cadcc4ca62ccb599262feffa660 100644 --- a/paddle/operators/activation_op.cu +++ b/paddle/operators/activation_op.cu @@ -15,14 +15,14 @@ #define EIGEN_USE_GPU #include "paddle/operators/activation_op.h" +namespace ops = paddle::operators; + #define REGISTER_ACTIVATION_GPU_KERNEL(act_type, functor, grad_functor) \ REGISTER_OP_GPU_KERNEL( \ act_type, \ - paddle::operators::ActivationKernel>); \ + ops::ActivationKernel>); \ REGISTER_OP_GPU_KERNEL(act_type##_grad, \ - paddle::operators::ActivationGradKernel< \ - paddle::platform::GPUPlace, \ - paddle::operators::grad_functor>); + ops::ActivationGradKernel>); FOR_EACH_KERNEL_FUNCTOR(REGISTER_ACTIVATION_GPU_KERNEL); diff --git a/paddle/operators/activation_op.h b/paddle/operators/activation_op.h index dae66cc77d9103a6a2b13e69b3f014f8de313209..245060174224c5e24f75adf4ddc9a6db29101d74 100644 --- a/paddle/operators/activation_op.h +++ b/paddle/operators/activation_op.h @@ -146,6 +146,24 @@ struct TanhGradFunctor : public BaseActivationFunctor { } }; +// tanhshrink(x) = x - tanh(x) +// where tanh(x) = (exp(x) - exp(-x)) / (exp(x) + exp(-x)) +template +struct TanhShrinkFunctor : public BaseActivationFunctor { + template + void operator()(Device d, X x, Y y) const { + y.device(d) = x - x.tanh(); + } +}; + +template +struct TanhShrinkGradFunctor : public BaseActivationFunctor { + template + void operator()(Device d, X x, Y y, dY dy, dX dx) const { + dx.device(d) = dy * (x.tanh() * x.tanh()); + } +}; + // sqrt(x) = x^(1/2) template struct SqrtFunctor : public BaseActivationFunctor { @@ -407,4 +425,5 @@ struct STanhGradFunctor : public BaseActivationFunctor { __macro(pow, PowFunctor, PowGradFunctor); \ __macro(stanh, STanhFunctor, STanhGradFunctor); \ __macro(softsign, SoftsignFunctor, SoftsignGradFunctor); \ - __macro(leaky_relu, LeakyReluFunctor, LeakyReluGradFunctor) + __macro(leaky_relu, LeakyReluFunctor, LeakyReluGradFunctor); \ + __macro(tanh_shrink, TanhShrinkFunctor, TanhShrinkGradFunctor) diff --git a/paddle/operators/mean_op.cc b/paddle/operators/mean_op.cc index d799239d4ed6d230578c77921a1a454b476b63fa..2332c9546b037c94a5a6d30319abda8e23c2b3bb 100644 --- a/paddle/operators/mean_op.cc +++ b/paddle/operators/mean_op.cc @@ -36,7 +36,7 @@ class MeanOpMaker : public framework::OpProtoAndCheckerMaker { MeanOpMaker(framework::OpProto* proto, framework::OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "The input of mean op"); - AddOutput("Out", "The output of mean op").NotInGradient(); + AddOutput("Out", "The output of mean op"); AddComment(R"DOC( Mean Operator )DOC"); } @@ -52,11 +52,27 @@ class MeanGradOp : public framework::OperatorWithKernel { } }; +class MeanGradMaker : public framework::SingleGradOpDescMaker { + public: + using framework::SingleGradOpDescMaker::SingleGradOpDescMaker; + + protected: + std::unique_ptr Apply() const override { + auto* grad_op = new framework::OpDescBind(); + grad_op->SetType("mean_grad"); + grad_op->SetInput("X", Input("X")); + grad_op->SetInput(framework::GradVarName("Out"), OutputGrad("Out")); + grad_op->SetOutput(framework::GradVarName("X"), InputGrad("X")); + return std::unique_ptr(grad_op); + } +}; + } // namespace operators } // namespace paddle namespace ops = paddle::operators; -REGISTER_OP(mean, ops::MeanOp, ops::MeanOpMaker, mean_grad, ops::MeanGradOp); +REGISTER_OPERATOR(mean, ops::MeanOp, ops::MeanOpMaker, ops::MeanGradMaker); +REGISTER_OPERATOR(mean_grad, ops::MeanGradOp); REGISTER_OP_CPU_KERNEL(mean, ops::MeanKernel); REGISTER_OP_CPU_KERNEL(mean_grad, diff --git a/paddle/operators/minus_op.cc b/paddle/operators/minus_op.cc index ce049d4d7bd96a6758d71b381e6e6b4edbcc8b5c..7057dcbd6e375adef57d17a13afdfade67e938b6 100644 --- a/paddle/operators/minus_op.cc +++ b/paddle/operators/minus_op.cc @@ -49,9 +49,9 @@ class MinusOpMaker : public framework::OpProtoAndCheckerMaker { public: MinusOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("X", "The left tensor of minus operator.").NotInGradient(); - AddInput("Y", "The right tensor of minus operator.").NotInGradient(); - AddOutput("Out", "The output tensor of minus operator.").NotInGradient(); + AddInput("X", "The left tensor of minus operator."); + AddInput("Y", "The right tensor of minus operator."); + AddOutput("Out", "The output tensor of minus operator."); AddComment(R"DOC(Minus Operator @@ -64,26 +64,35 @@ or not. But the output only shares the LoD with input `X`. )DOC"); } }; -template -class MinusGradOp : public NetOp { + +class MinusGradMaker : public framework::GradOpDescMakerBase { public: - MinusGradOp(const std::string &type, const framework::VariableNameMap &inputs, - const framework::VariableNameMap &outputs, - const framework::AttributeMap &attrs) - : NetOp(type, inputs, outputs, attrs) { - auto out_grad = Input(framework::GradVarName("Out")); - auto x_grad = Output(framework::GradVarName("X")); - auto y_grad = Output(framework::GradVarName("Y")); - - // x_grad = out_grad - AppendOp(framework::OpRegistry::CreateOp("identity", {{"X", {out_grad}}}, - {{"Y", {x_grad}}}, {})); - - framework::AttributeMap scale_attr; - scale_attr["scale"] = static_cast(-1); - AppendOp(framework::OpRegistry::CreateOp("scale", {{"X", {out_grad}}}, - {{"Out", {y_grad}}}, scale_attr)); - CompleteAddOp(false); + using framework::GradOpDescMakerBase::GradOpDescMakerBase; + + std::vector> operator()() + const override { + std::vector> ops; + auto x_g = InputGrad("X"); + if (!x_g.empty()) { + auto *x_g_op = new framework::OpDescBind(); + x_g_op->SetType("scale"); + x_g_op->SetInput("X", OutputGrad("Out")); + x_g_op->SetOutput("Out", x_g); + x_g_op->SetAttr("scale", 1.0f); + ops.emplace_back(x_g_op); + } + + auto y_g = InputGrad("Y"); + if (!y_g.empty()) { + auto *y_g_op = new framework::OpDescBind(); + y_g_op->SetType("scale"); + y_g_op->SetInput("X", OutputGrad("Out")); + y_g_op->SetOutput("Out", y_g); + y_g_op->SetAttr("scale", -1.0f); + ops.emplace_back(y_g_op); + } + + return ops; } }; @@ -91,7 +100,6 @@ class MinusGradOp : public NetOp { } // namespace paddle namespace ops = paddle::operators; -REGISTER_OP(minus, ops::MinusOp, ops::MinusOpMaker, minus_grad, - ops::MinusGradOp); +REGISTER_OPERATOR(minus, ops::MinusOp, ops::MinusOpMaker, ops::MinusGradMaker); REGISTER_OP_CPU_KERNEL(minus, ops::MinusKernel); diff --git a/paddle/operators/pad_op.cc b/paddle/operators/pad_op.cc index 04ebb14f6ee6c73f48aa2f75811a22f9b8a25006..15aa05f26610be14e4c37be35137a259e00eb947 100644 --- a/paddle/operators/pad_op.cc +++ b/paddle/operators/pad_op.cc @@ -56,8 +56,7 @@ class PadOpMaker : public framework::OpProtoAndCheckerMaker { "The input should be a k-D tensor(k > 0 and k < 7)"); AddOutput("Out", "The output of pad op." - "A tensor with the same shape as X.") - .NotInGradient(); + "A tensor with the same shape as X."); AddComment(R"DOC( Pad input into output, as specified by paddings and pad_value. The input should be a k-D tensor(k > 0 and k < 7). As an example: @@ -111,11 +110,29 @@ class PadOpGrad : public framework::OperatorWithKernel { } }; +class PadOpGradMaker : public framework::SingleGradOpDescMaker { + public: + using framework::SingleGradOpDescMaker::SingleGradOpDescMaker; + + protected: + std::unique_ptr Apply() const override { + auto* bind = new framework::OpDescBind(); + bind->SetInput("X", Input("X")); + bind->SetInput(framework::GradVarName("Out"), OutputGrad("Out")); + bind->SetOutput(framework::GradVarName("X"), InputGrad("X")); + bind->SetAttrMap(Attrs()); + bind->SetType("pad_grad"); + return std::unique_ptr(bind); + } +}; + } // namespace operators } // namespace paddle namespace ops = paddle::operators; -REGISTER_OP(pad, ops::PadOp, ops::PadOpMaker, pad_grad, ops::PadOpGrad); + +REGISTER_OPERATOR(pad, ops::PadOp, ops::PadOpMaker, ops::PadOpGradMaker); +REGISTER_OPERATOR(pad_grad, ops::PadOpGrad); REGISTER_OP_CPU_KERNEL(pad, ops::PadKernel); REGISTER_OP_CPU_KERNEL(pad_grad, ops::PadGradKernel); diff --git a/paddle/operators/reduce_op.cc b/paddle/operators/reduce_op.cc index 3ef443d1c7f475cbd578078db02fb5e0d500d060..55f294a9be6f441e2d0b3bd6f7854a9d07a4ee24 100644 --- a/paddle/operators/reduce_op.cc +++ b/paddle/operators/reduce_op.cc @@ -168,36 +168,22 @@ namespace ops = paddle::operators; REGISTER_OP(reduce_sum, ops::ReduceOp, ops::ReduceSumOpMaker, reduce_sum_grad, ops::ReduceGradOp); -REGISTER_OP_CPU_KERNEL( - reduce_sum, - ops::ReduceKernel); -REGISTER_OP_CPU_KERNEL(reduce_sum_grad, - ops::ReduceGradKernel); REGISTER_OP(reduce_mean, ops::ReduceOp, ops::ReduceMeanOpMaker, reduce_mean_grad, ops::ReduceGradOp); -REGISTER_OP_CPU_KERNEL( - reduce_mean, - ops::ReduceKernel); -REGISTER_OP_CPU_KERNEL(reduce_mean_grad, - ops::ReduceGradKernel); REGISTER_OP(reduce_max, ops::ReduceOp, ops::ReduceMaxOpMaker, reduce_max_grad, ops::ReduceGradOp); -REGISTER_OP_CPU_KERNEL( - reduce_max, - ops::ReduceKernel); -REGISTER_OP_CPU_KERNEL(reduce_max_grad, - ops::ReduceGradKernel); - -REGISTER_OP(reduce_min, ops::ReduceOp, ops::ReduceMaxOpMaker, reduce_min_grad, + +REGISTER_OP(reduce_min, ops::ReduceOp, ops::ReduceMinOpMaker, reduce_min_grad, ops::ReduceGradOp); -REGISTER_OP_CPU_KERNEL( - reduce_min, - ops::ReduceKernel); -REGISTER_OP_CPU_KERNEL(reduce_min_grad, - ops::ReduceGradKernel); + +#define REGISTER_REDUCE_CPU_KERNEL(reduce_type, functor, grad_functor) \ + REGISTER_OP_CPU_KERNEL( \ + reduce_type, \ + ops::ReduceKernel); \ + REGISTER_OP_CPU_KERNEL(reduce_type##_grad, \ + ops::ReduceGradKernel); + +FOR_EACH_KERNEL_FUNCTOR(REGISTER_REDUCE_CPU_KERNEL); diff --git a/paddle/operators/reduce_op.cu b/paddle/operators/reduce_op.cu index 595127b858ea8eb41281f92e92c6467e4d90ff1a..d306e1a24096d737438d71d4d4abc35328d160cb 100644 --- a/paddle/operators/reduce_op.cu +++ b/paddle/operators/reduce_op.cu @@ -17,30 +17,12 @@ namespace ops = paddle::operators; -REGISTER_OP_GPU_KERNEL( - reduce_sum, - ops::ReduceKernel); -REGISTER_OP_GPU_KERNEL(reduce_sum_grad, - ops::ReduceGradKernel); - -REGISTER_OP_GPU_KERNEL( - reduce_mean, - ops::ReduceKernel); -REGISTER_OP_GPU_KERNEL(reduce_mean_grad, - ops::ReduceGradKernel); - -REGISTER_OP_GPU_KERNEL( - reduce_max, - ops::ReduceKernel); -REGISTER_OP_GPU_KERNEL(reduce_max_grad, - ops::ReduceGradKernel); - -REGISTER_OP_GPU_KERNEL( - reduce_min, - ops::ReduceKernel); -REGISTER_OP_GPU_KERNEL(reduce_min_grad, - ops::ReduceGradKernel); +#define REGISTER_REDUCE_GPU_KERNEL(reduce_type, functor, grad_functor) \ + REGISTER_OP_GPU_KERNEL( \ + reduce_type, \ + ops::ReduceKernel); \ + REGISTER_OP_GPU_KERNEL(reduce_type##_grad, \ + ops::ReduceGradKernel); + +FOR_EACH_KERNEL_FUNCTOR(REGISTER_REDUCE_GPU_KERNEL); diff --git a/paddle/operators/reduce_op.h b/paddle/operators/reduce_op.h index ba3f3db81dc6251a063d27e597fd7e486e7b6c14..45043c440bc8017e97f8be00d08f1cb60d201e20 100644 --- a/paddle/operators/reduce_op.h +++ b/paddle/operators/reduce_op.h @@ -198,3 +198,9 @@ class ReduceGradKernel : public framework::OpKernel { } // namespace operators } // namespace paddle + +#define FOR_EACH_KERNEL_FUNCTOR(__macro) \ + __macro(reduce_sum, SumFunctor, SumGradFunctor); \ + __macro(reduce_mean, MeanFunctor, MeanGradFunctor); \ + __macro(reduce_max, MaxFunctor, MaxOrMinGradFunctor); \ + __macro(reduce_min, MinFunctor, MaxOrMinGradFunctor); diff --git a/paddle/operators/scale_op.cc b/paddle/operators/scale_op.cc index e92501e12834b92875f494de401672344f50e3b5..e225aecc270bc17c535c10253c970b888c42e5d3 100644 --- a/paddle/operators/scale_op.cc +++ b/paddle/operators/scale_op.cc @@ -41,8 +41,8 @@ class ScaleOpMaker : public framework::OpProtoAndCheckerMaker { public: ScaleOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("X", "The input tensor of scale operator.").NotInGradient(); - AddOutput("Out", "The output tensor of scale operator.").NotInGradient(); + AddInput("X", "The input tensor of scale operator."); + AddOutput("Out", "The output tensor of scale operator."); AddComment(R"DOC(Scale operator The equation is: Out = scale*X @@ -52,21 +52,18 @@ The equation is: Out = scale*X } }; -// The operator to calculate gradients of a scale operator is just the scale -// operator itself. -// Grad(Out=scale(X)) => Grad(X) = scale(Grad(Out)) -template -class ScaleGradOp : public NetOp { +class ScaleGradMaker : public framework::SingleGradOpDescMaker { public: - ScaleGradOp(const std::string &type, const framework::VariableNameMap &inputs, - const framework::VariableNameMap &outputs, - const framework::AttributeMap &attrs) - : NetOp(type, inputs, outputs, attrs) { - AppendOp(framework::OpRegistry::CreateOp( - "scale", {{"X", {Input(framework::GradVarName("Out"))}}}, - {{"Out", {Output(framework::GradVarName("X"))}}}, - {{"scale", Attr("scale")}})); - CompleteAddOp(false); + using framework::SingleGradOpDescMaker::SingleGradOpDescMaker; + + protected: + std::unique_ptr Apply() const override { + auto *grad_op = new framework::OpDescBind(); + grad_op->SetType("scale"); + grad_op->SetInput("X", OutputGrad("Out")); + grad_op->SetOutput("Out", InputGrad("X")); + grad_op->SetAttr("scale", GetAttr("scale")); + return std::unique_ptr(grad_op); } }; @@ -75,7 +72,7 @@ class ScaleGradOp : public NetOp { namespace ops = paddle::operators; -REGISTER_OP(scale, ops::ScaleOp, ops::ScaleOpMaker, scale_grad, - ops::ScaleGradOp); +REGISTER_OPERATOR(scale, ops::ScaleOp, ops::ScaleOpMaker, + ops::ScaleGradMaker); REGISTER_OP_CPU_KERNEL(scale, ops::ScaleKernel); diff --git a/paddle/operators/softmax_with_cross_entropy_op.cc b/paddle/operators/softmax_with_cross_entropy_op.cc index a76489871f30dc8d852b6a783efeff41704fd4a4..42c1ba6fdf1351c43ef78efaaf05c54acb54ce94 100644 --- a/paddle/operators/softmax_with_cross_entropy_op.cc +++ b/paddle/operators/softmax_with_cross_entropy_op.cc @@ -14,6 +14,7 @@ #include "paddle/operators/softmax_with_cross_entropy_op.h" #include +#include namespace paddle { namespace operators { @@ -27,15 +28,14 @@ class SoftmaxWithCrossEntropyOpMaker AddInput("Logits", "(Tensor, default: Tensor), The unscaled log probabilities " "which is a 2-D tensor with shape [N x K]. N is the batch_size, " - "and K is the class number.") - .NotInGradient(); - AddInput( - "Label", - "(Tensor, default: Tensor), The ground truth which is a 2-D " - "tensor. " - "If softLable is set to 0, Label is a Tensor with shape [N x 1]. " - "If softLable is set to 1, Label is a Tensor " - "with shape [N x K]."); + "and K is the class number."); + AddInput("Label", + "(Tensor, default: Tensor), The ground truth which is a 2-D " + "tensor. " + "If softLable is set to 0, Label is a Tensor with shape [N x " + "1]. " + "If softLable is set to 1, Label is a Tensor " + "with shape [N x K]."); AddOutput( "Softmax", "(Tensor, default: Tensor), A 2-D tensor with shape [N x K]. " @@ -163,15 +163,34 @@ class SoftmaxWithCrossEntropyOpGrad : public framework::OperatorWithKernel { } }; +class SoftmaxGradMaker : public framework::SingleGradOpDescMaker { + public: + using framework::SingleGradOpDescMaker::SingleGradOpDescMaker; + + protected: + std::unique_ptr Apply() const override { + auto* grad_op = new framework::OpDescBind(); + grad_op->SetType("softmax_with_cross_entropy_grad"); + grad_op->SetInput("Label", Input("Label")); + grad_op->SetInput("Softmax", Output("Softmax")); + grad_op->SetInput("Loss", Output("Loss")); + grad_op->SetInput(framework::GradVarName("Softmax"), OutputGrad("Softmax")); + grad_op->SetInput(framework::GradVarName("Loss"), OutputGrad("Loss")); + grad_op->SetOutput(framework::GradVarName("Logits"), InputGrad("Logits")); + grad_op->SetAttrMap(Attrs()); + return std::unique_ptr(grad_op); + } +}; + } // namespace operators } // namespace paddle namespace ops = paddle::operators; -REGISTER_OP(softmax_with_cross_entropy, ops::SoftmaxWithCrossEntropyOp, - ops::SoftmaxWithCrossEntropyOpMaker, - softmax_with_cross_entropy_grad, - ops::SoftmaxWithCrossEntropyOpGrad); +REGISTER_OPERATOR(softmax_with_cross_entropy, ops::SoftmaxWithCrossEntropyOp, + ops::SoftmaxWithCrossEntropyOpMaker, ops::SoftmaxGradMaker); +REGISTER_OPERATOR(softmax_with_cross_entropy_grad, + ops::SoftmaxWithCrossEntropyOpGrad); REGISTER_OP_CPU_KERNEL(softmax_with_cross_entropy, ops::SoftmaxWithCrossEntropyKernel); REGISTER_OP_CPU_KERNEL(softmax_with_cross_entropy_grad, diff --git a/paddle/operators/sum_op.cc b/paddle/operators/sum_op.cc index 7c422b477083fba4661aeb427422abb623b172bb..c701ee8dde26f0fd50fae227d3f345df2d7a119d 100644 --- a/paddle/operators/sum_op.cc +++ b/paddle/operators/sum_op.cc @@ -45,10 +45,8 @@ class SumOpMaker : public framework::OpProtoAndCheckerMaker { public: SumOpMaker(framework::OpProto* proto, framework::OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("X", "the input tensors of sum operator.") - .AsDuplicable() - .NotInGradient(); - AddOutput("Out", "the output tensor of sum operator.").NotInGradient(); + AddInput("X", "the input tensors of sum operator.").AsDuplicable(); + AddOutput("Out", "the output tensor of sum operator."); AddComment(R"DOC( Sum the input tensors. @@ -58,23 +56,26 @@ or not. But the output only shares the LoD with the first input. } }; -class SumGradOp : public NetOp { +class SumGradMaker : public framework::GradOpDescMakerBase { public: - SumGradOp(const std::string& type, const framework::VariableNameMap& inputs, - const framework::VariableNameMap& outputs, - const framework::AttributeMap& attrs) - : NetOp(type, inputs, outputs, attrs) { - auto& x_grad_names = Outputs(framework::GradVarName("X")); - auto out_grad_name = this->Input(framework::GradVarName("Out")); + using framework::GradOpDescMakerBase::GradOpDescMakerBase; - framework::AttributeMap grad_attrs; - grad_attrs["scale"] = 1.0f; - for (auto& x_grad_name : x_grad_names) { - AppendOp(framework::OpRegistry::CreateOp( - "scale", {{"X", {out_grad_name}}}, {{"Out", {x_grad_name}}}, - grad_attrs)); - } - CompleteAddOp(false); + std::vector> operator()() + const override { + auto x_grads = InputGrad("X"); + std::vector> grad_ops; + grad_ops.reserve(x_grads.size()); + auto og = OutputGrad("Out"); + std::transform(x_grads.begin(), x_grads.end(), std::back_inserter(grad_ops), + [&og](const std::string& x_grad) { + auto* grad_op = new framework::OpDescBind(); + grad_op->SetType("scale"); + grad_op->SetInput("X", og); + grad_op->SetOutput("Out", {x_grad}); + grad_op->SetAttr("scale", 1.0f); + return std::unique_ptr(grad_op); + }); + return grad_ops; } }; @@ -82,5 +83,6 @@ class SumGradOp : public NetOp { } // namespace paddle namespace ops = paddle::operators; -REGISTER_OP(sum, ops::SumOp, ops::SumOpMaker, sum_grad, ops::SumGradOp); + +REGISTER_OPERATOR(sum, ops::SumOp, ops::SumOpMaker, ops::SumGradMaker); REGISTER_OP_CPU_KERNEL(sum, ops::SumKernel); diff --git a/paddle/pybind/CMakeLists.txt b/paddle/pybind/CMakeLists.txt index 18ecbd1aa34c82d63ae7f8ec1bd8f81b35eee30b..97364f2db9523c0629616692631d8372657a2128 100644 --- a/paddle/pybind/CMakeLists.txt +++ b/paddle/pybind/CMakeLists.txt @@ -1,6 +1,6 @@ if(WITH_PYTHON) cc_library(paddle_pybind SHARED SRCS pybind.cc exception.cc protobuf.cc - DEPS pybind python backward proto_desc + DEPS pybind python backward proto_desc tensor_array ${GLOB_OP_LIB}) endif(WITH_PYTHON) diff --git a/paddle/pybind/pybind.cc b/paddle/pybind/pybind.cc index 38ba450447386b44ee8abe71c3c8b6427bbc398c..356c4986e2e182e904215f7ebb8cac5146364f8b 100644 --- a/paddle/pybind/pybind.cc +++ b/paddle/pybind/pybind.cc @@ -16,6 +16,7 @@ limitations under the License. */ #include "paddle/framework/backward.h" #include "paddle/framework/lod_tensor.h" +#include "paddle/framework/tensor_array.h" #include "paddle/operators/cond_op.h" #include "paddle/operators/net_op.h" #include "paddle/operators/recurrent_op.h" @@ -286,6 +287,56 @@ All parameter, weight, gradient are variables in Paddle. self->CompleteAddOp(); }); + py::class_(m, "TensorArray") + .def("__init__", + [](TensorArray &instance) { new (&instance) TensorArray(); }) + .def("read", + [](TensorArray &self, size_t index) { return self.Read(index); }) + .def("write", [](TensorArray &self, size_t index, + LoDTensor &value) { self.Write(index, value); }) + .def("write_shared", + [](TensorArray &self, size_t index, const LoDTensor &value) { + self.WriteShared(index, value); + }) + .def("size", [](TensorArray &self) { return self.size(); }) + .def("pack", + [](TensorArray &self, size_t level, + const std::vector> &meta_info, + const std::vector> &lod) { + std::vector meta; + for (auto &info : meta_info) { + PADDLE_ENFORCE_EQ(info.size(), 3UL); + meta.emplace_back(info[0], info[1], info[2]); + } +#ifndef PADDLE_WITH_CUDA + return self.Pack(level, meta, lod); +#else + LoD new_lod; + new_lod.reserve(lod.size()); + std::copy(lod.begin(), lod.end(), std::back_inserter(new_lod)); + return self.Pack(level, meta, new_lod); +#endif + }) + .def("unpack", + [](TensorArray &self, const LoDTensor &source, int level, + bool length_descend) { + auto metas = self.Unpack(source, level, length_descend); + std::vector> meta_info; + for (auto meta : metas) { + meta_info.emplace_back( + std::vector({meta.begin, meta.end, meta.ori_idx})); + } + return meta_info; + }) + .def("stack", [](TensorArray &self) { return self.Stack(); }) + .def("unstack", + [](TensorArray &self, const LoDTensor &source) { + return self.Unstack(source); + }) + .def("unstack_shared", [](TensorArray &self, const LoDTensor &source) { + return self.UnstackShared(source); + }); + // recurrent_op py::class_(m, "RecurrentOp") .def_static( diff --git a/python/paddle/v2/event.py b/python/paddle/v2/event.py index e66bf67d7949057486eb54c46f39128fad5dae55..a0ffd31c545eb10dd8c2f14746ee90df58700e61 100644 --- a/python/paddle/v2/event.py +++ b/python/paddle/v2/event.py @@ -10,7 +10,8 @@ There are: * EndPass """ __all__ = [ - 'EndIteration', 'BeginIteration', 'BeginPass', 'EndPass', 'TestResult' + 'EndIteration', 'BeginIteration', 'BeginPass', 'EndPass', 'TestResult', + 'EndForwardBackward' ] @@ -73,6 +74,17 @@ class BeginIteration(object): self.batch_id = batch_id +class EndForwardBackward(object): + """ + Event On One Batch ForwardBackward Complete. + """ + + def __init__(self, pass_id, batch_id, gm): + self.pass_id = pass_id + self.batch_id = batch_id + self.gm = gm + + class EndIteration(WithMetric): """ Event On One Batch Training Complete. diff --git a/python/paddle/v2/framework/tests/test_activation_op.py b/python/paddle/v2/framework/tests/test_activation_op.py index f232996a55da86399140ea2aad27428926c1a055..701e1a1aeec2746643fbd5432dadfd6bc46f358f 100644 --- a/python/paddle/v2/framework/tests/test_activation_op.py +++ b/python/paddle/v2/framework/tests/test_activation_op.py @@ -48,6 +48,21 @@ class TestTanh(OpTest): self.check_grad(['X'], 'Y', max_relative_error=0.007) +class TestTanhShrink(OpTest): + def setUp(self): + self.op_type = "tanh_shrink" + self.inputs = { + 'X': np.random.uniform(0.1, 1, [10, 17]).astype("float32") + } + self.outputs = {'Y': self.inputs['X'] - np.tanh(self.inputs['X'])} + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad(['X'], 'Y', max_relative_error=0.008) + + class TestSqrt(OpTest): def setUp(self): self.op_type = "sqrt" diff --git a/python/paddle/v2/framework/tests/test_tensor_array.py b/python/paddle/v2/framework/tests/test_tensor_array.py new file mode 100644 index 0000000000000000000000000000000000000000..11f8a01f9224fcbd6dd6cbc8c37cc81036ad3e07 --- /dev/null +++ b/python/paddle/v2/framework/tests/test_tensor_array.py @@ -0,0 +1,106 @@ +import logging +import paddle.v2.framework.core as core +import unittest +import numpy as np + + +class TestTensorArray(unittest.TestCase): + def setUp(self): + self.ta = core.TensorArray() + + self.batch_size = 10 + self.dim = 2 + + # create a LoDTensor + self.scope = core.Scope() + var = self.scope.new_var("test_tensor") + self.place = core.CPUPlace() + tensor = var.get_tensor() + tensor.set_dims([self.batch_size, self.dim]) + tensor.alloc_float(self.place) + tensor_array = np.array(tensor) + tensor_array[0, 0] = 0 + tensor_array[1, 0] = 1 + tensor_array[2, 0] = 2 + tensor_array[3, 0] = 3 + tensor_array[4, 0] = 4 + tensor_array[5, 0] = 5 + tensor_array[6, 0] = 6 + tensor_array[7, 0] = 7 + tensor_array[8, 0] = 8 + tensor_array[9, 0] = 9 + + lod_py = [[0, 2, 5, 10]] + lod_tensor = core.LoDTensor(lod_py) + lod_tensor.set(tensor_array, self.place) + + self.py_seq_meta = [[5, 10, 2], [2, 5, 1], [0, 2, 0]] + + self.tensor = lod_tensor + + def test_unstack(self): + self.ta.unstack(self.tensor) + self.assertEqual(self.tensor.get_dims()[0], self.ta.size()) + + def test_read(self): + self.ta.unstack(self.tensor) + for i in range(self.batch_size): + tensor = self.ta.read(i) + + def test_write(self): + self.ta.unstack(self.tensor) + + # create a tensor with shape of [1, self.dim] + var = self.scope.new_var("hell") + tensor = var.get_tensor() + tensor.set_dims([1, self.dim]) + tensor.alloc_float(self.place) + tensor_array = np.array(tensor) + for i in range(self.dim): + tensor_array[0, i] = i + tensor.set(tensor_array, self.place) + + self.ta.write(2, tensor) + + ta_tensor = self.ta.read(2) + ta_tensor_array = np.array(ta_tensor) + self.assertEqual(ta_tensor.get_dims(), [1, self.dim]) + self.assertTrue((tensor_array == ta_tensor_array).all()) + + def test_write_shared(self): + self.ta.unstack(self.tensor) + + # create a tensor with shape of [1, self.dim] + var = self.scope.new_var("hell") + tensor = var.get_tensor() + tensor.set_dims([1, self.dim]) + tensor.alloc_float(self.place) + tensor_array = np.array(tensor) + for i in range(self.dim): + tensor_array[0, i] = i + tensor.set(tensor_array, self.place) + + self.ta.write_shared(2, tensor) + + ta_tensor = self.ta.read(2) + ta_tensor_array = np.array(ta_tensor) + self.assertEqual(ta_tensor.get_dims(), [1, self.dim]) + self.assertTrue((tensor_array == ta_tensor_array).all()) + + def test_unpack(self): + meta = self.ta.unpack(self.tensor, 0, True) + self.assertEqual(self.ta.size(), 5) + self.assertEqual(meta, self.py_seq_meta) + + def test_pack(self): + meta = self.ta.unpack(self.tensor, 0, True) + print "meta", meta + tensor = self.ta.pack(0, meta, self.tensor.lod()) + print np.array(self.tensor) + print np.array(tensor) + self.assertTrue((np.array(self.tensor) == np.array(tensor)).all()) + self.assertTrue(tensor.lod(), self.tensor.lod()) + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/v2/trainer.py b/python/paddle/v2/trainer.py index ca95ef13bd440ac0ba3d46f6e4680d4d7aa94c42..076e75593991415bc3fbcbd36a108c8c7de66932 100644 --- a/python/paddle/v2/trainer.py +++ b/python/paddle/v2/trainer.py @@ -164,11 +164,18 @@ class SGD(object): pass_type) self.__gradient_machine__.eval(pass_evaluator) self.__gradient_machine__.eval(batch_evaluator) + event_handler( + v2_event.EndForwardBackward( + pass_id=pass_id, + batch_id=batch_id, + gm=self.__gradient_machine__)) for each_param in self.__gradient_machine__.getNonStaticParameters( ): self.__parameter_updater__.update(each_param) cost_sum = out_args.sum() cost = cost_sum / len(data_batch) + self.__parameter_updater__.finishBatch(cost) + batch_evaluator.finish() event_handler( v2_event.EndIteration( pass_id=pass_id, @@ -176,8 +183,6 @@ class SGD(object): cost=cost, evaluator=batch_evaluator, gm=self.__gradient_machine__)) - self.__parameter_updater__.finishBatch(cost) - batch_evaluator.finish() self.__parameter_updater__.finishPass() pass_evaluator.finish()