diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 83fe9af768964003130d02b7d913ad1c2102dd1d..59661c9c1da53a2ddac0127ed1827fedde811a1d 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -31,6 +31,3 @@ - id: go-fmt types: - go - - id: gometalinter - types: - - go diff --git a/Dockerfile b/Dockerfile index 136db772cc6a24b8084120fa6bab666bc1eda78e..150344a8116e2be9b5bab8e5fdcc9c37f4025020 100644 --- a/Dockerfile +++ b/Dockerfile @@ -22,7 +22,7 @@ COPY ./paddle/scripts/docker/root/ /root/ RUN apt-get update && \ apt-get install -y \ - git python-pip python-dev openssh-server bison \ + git python-pip python-dev openssh-server bison libnccl-dev \ wget unzip unrar tar xz-utils bzip2 gzip coreutils ntp \ curl sed grep graphviz libjpeg-dev zlib1g-dev \ python-matplotlib gcc-4.8 g++-4.8 \ diff --git a/doc/api/v2/config/networks.rst b/doc/api/v2/config/networks.rst index 6e813ab1a820d068ea3e54cad6178f1cf928eadc..048379cf01f4aec5e73e2fe3ddfa728f3c17a5d1 100644 --- a/doc/api/v2/config/networks.rst +++ b/doc/api/v2/config/networks.rst @@ -125,3 +125,8 @@ simple_attention :members: simple_attention :noindex: +dot_product_attention +--------------------- +.. automodule:: paddle.v2.networks + :members: dot_product_attention + :noindex: diff --git a/doc/design/block.md b/doc/design/block.md index 7cbf0d55b1faeb2093ee7cf234d1c2ad1905885b..4066122c0e8dfa33776796c3d205ba5aec9e0f52 100644 --- a/doc/design/block.md +++ b/doc/design/block.md @@ -189,7 +189,7 @@ OpDesc { inputs = {0} // the index of x in vars of BlockDesc above outputs = {5, 3} // indices of act and hidden_out in vars of BlockDesc above attrs { - "memories" : {1} // the index of h + "states" : {1} // the index of h "step_net" : } }; diff --git a/doc/design/cluster_train/src/trainer.graffle b/doc/design/cluster_train/src/trainer.graffle index 42384a3f059966e22e22f5fa4295cc9ead5cef83..43415ed8cf61a5acfa34f8e56b9577f338dbf254 100644 Binary files a/doc/design/cluster_train/src/trainer.graffle and b/doc/design/cluster_train/src/trainer.graffle differ diff --git a/doc/design/executor.md b/doc/design/executor.md new file mode 100644 index 0000000000000000000000000000000000000000..b5fb6c5c3c1da3c112ce63878322083dd5c42b70 --- /dev/null +++ b/doc/design/executor.md @@ -0,0 +1,23 @@ +# Executor Design Doc + +## Motivation + +We use executor to do the runtime evaluation of a `ProgramDesc`. + +## Overview + +An executor takes a `ProgramDesc`, a `block_id` and a `Scope`. The `ProgramDesc` is a list of blocks and each block contains the protobuf definition of all the parameters and operators. The `block_id` specifies the entrance block. And the `Scope` is the container of all the variable instance, which is persistent throughout different runs. + +### What does executor do? + +It evaluates all the operators in the `block_id`th block of a `ProgramDesc`. + +### What does executor NOT do? + +It does not do runtime optimization, meaning intelligently parse the dependency of each op a choose which one to be run and in which order they should be run. + +It does not do graph partitioning, meaning dividing the `ProgramDesc` into several small pieces and executing them on different devices. + +## Implementation + +`Executor` evaluates a `ProgramDesc`. Essentially, it instantiates Variables and Operators, then run all the operators in sequence. [[code]](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/executor.cc) diff --git a/doc/design/images/feed_forward.png b/doc/design/images/feed_forward.png new file mode 100644 index 0000000000000000000000000000000000000000..d312371a04c26aa6cd196e0bd1f51becb425180b Binary files /dev/null and b/doc/design/images/feed_forward.png differ diff --git a/doc/design/images/feed_forward_regularized.png b/doc/design/images/feed_forward_regularized.png new file mode 100644 index 0000000000000000000000000000000000000000..677e99bfd9f8e72ed9fe4b27127af2ced202f447 Binary files /dev/null and b/doc/design/images/feed_forward_regularized.png differ diff --git a/doc/design/images/l1_regularization.png b/doc/design/images/l1_regularization.png new file mode 100644 index 0000000000000000000000000000000000000000..e1b9c7a44f94dc027598a98da93ddb8133190972 Binary files /dev/null and b/doc/design/images/l1_regularization.png differ diff --git a/doc/design/images/l2_regularization.png b/doc/design/images/l2_regularization.png new file mode 100644 index 0000000000000000000000000000000000000000..d5c2fcbc2ccae75ad083162e5a2dceb0210be298 Binary files /dev/null and b/doc/design/images/l2_regularization.png differ diff --git a/doc/design/images/loss_equation.png b/doc/design/images/loss_equation.png new file mode 100644 index 0000000000000000000000000000000000000000..14212ec8d36c803de96bde8a9a4b5591bd20434e Binary files /dev/null and b/doc/design/images/loss_equation.png differ diff --git a/doc/design/infer_var_type.md b/doc/design/infer_var_type.md new file mode 100644 index 0000000000000000000000000000000000000000..d9d5397becba2ef1806d9341cd49cd9aabbf4a6a --- /dev/null +++ b/doc/design/infer_var_type.md @@ -0,0 +1,78 @@ +# Design Doc: InferVarType + +## The Problem Posed + +The variable in our design can hold variant types. Such as `LoDTensor` and `SelectedRows`. An operator should be able to inference the variable types of its output. + +For example, a `lookup table` operator takes two `LoDTensor`; one is a float tensor as the embedding table, the other is an int tensor as word ID. The gradient operator of `lookup table` will generate a `SelectedRows` as its output. A `sum` operator can take both `LoDTensor` and `SelectedRows` as its inputs and will generate a `LoDTensor` if any of its inputs is `LoDTensor`, otherwise, the `sum` operator will generate `SelectedRows` as its output. + +The variable type will be constant at runtime. Every variable's type can either be set by the user (input data and parameter) or be inferred by the operator in compile time. + +## Proposed Solution + +The `InferVarType` is a compile-time function which is registered to each operator. The inferface of that function is: + + +```c++ +using InferVarTypeFN = std::function< + void (const OpDescBind& /*op_desc*/, BlockDescBind* /*block*/)>; +``` + +It takes an operator description as its input and will write the output variable type and store them in block description. + +The `InferVarTypeFN` will be registered in `OpInfo`, to replace `infer_var_type_` field. The `OpInfo` should be + +```cpp +struct OpInfo { + InferVarTypeFN infer_var_type_; + ... +}; +``` + +The default `InferVarType` will set output type as `LoDTensor`. It can be done by `GetInferVarType()`. + +```cpp +void DefaultInferVarType(const OpDescBind& op_desc, BlockDescBind* block) { + // set the output type of variable as `LoDTensor`. + // ... +} + +struct OpInfo { + InferVarTypeFN infer_var_type_; + InferVarTypeFN GetInferVarType() const { + if (infer_var_type_) { + return infer_var_type_; + } else { + return DefaultInferVarType; + } + } +}; +``` + +## Register InferVarType + +We provide a thin base class for registering an `InferVarTypeFN`. To use a base class will ease the implementation of registry since we can detect the registry entry is an `InferVarTypeFN` or not. + +```cpp +class VarTypeInferer { +public: + virtual void operator()(const OpDescBind& op_desc, BlockDescBind* block) const = 0; +} +``` + +Operator developers can write the specialize `VarTypeInferer` as follow. + +```cpp +class SpecialVarTypeInferer : public VarTypeInferer { +public: + virtual void operator()(const OpDescBind& op_desc, BlockDescBind* block) const { + // .. own logic + } +} +``` + +Then user can register the `InferVarType` just like `GradOpDescMaker` and `OpInfoMaker`. + +``` +REGISTER_OPERATOR(some_op, OpType, SpecialVarTypeInferer, ...); +``` diff --git a/doc/design/prune.md b/doc/design/prune.md new file mode 100644 index 0000000000000000000000000000000000000000..4a5cf10c79a554779137f0cce5494fdd96ef6b7a --- /dev/null +++ b/doc/design/prune.md @@ -0,0 +1,63 @@ +# Prune + +## Motivation + +We want to support running inference, training and checkpointing in one `ProgramDesc`. We implement +`void Prune(const ProgramDesc* input, ProgramDesc* output)` function, which takes a `ProgramDesc` +and generate a pruned `ProgramDesc`. + +## Challenge + +Pruning need to support both variables and operators being evaluation targets. Consider the following +different situations. + +```python +# Case 1: run foward pass. +cost_np = session.run(target=cost) +# Case 2: run backward passing. +opts_np, _ = session.run(target=[cost, opt]) +# Case 3: run checkpointing +_ = session.run(target=checkpoint) +``` + +## Solution + +To support evaluation of operators, we add `is_target` field in the `OpDesc`. + +```c++ +message OpDesc { + required string type = 3; + repeated Var inputs = 1; + repeated Var outputs = 2; + repeated Attr attrs = 4; + optional bool is_target = 5 [ default = false ]; +}; +``` + +To support evaluation of variables, we add [fetch_op](https://github.com/PaddlePaddle/Paddle/pull/4599). +For each variable in the `target`, we insert a `fetch_op` into the `ProgramDesc` with `variable` being +`fetch_op`'s input. Then we also set `fetch_op` is a target. + +### Algorithm + +If an operator needs to be run, it must fall into one of the following cases: + +1. It is the target. +2. It is depended by some other ops, meaning its output is some other op's input. + +The first case can be checked by `op_desc.is_traget()` . The second case can be implement as + +```c++ +bool HasDependentVar(const OpDesc& op_desc, const std::set& dependent_vars) { + for (auto& var : op_desc.outputs()) { + for (auto& argu : var.arguments()) { + if (dependent_vars.count(argu) != 0) { + return true; + } + } + } + return false; +} +``` + +Then the whole algorithm can be implemented as the following [code](https://github.com/tonyyang-svail/Paddle/blob/prune_impl/paddle/framework/prune.cc). diff --git a/doc/design/python_api.md b/doc/design/python_api.md index 56ae1d925a96622b5576013f38e33e5f92cbbb90..cb5fdc765b7126fc66a1c8978d4b96c0dc5a9f2c 100644 --- a/doc/design/python_api.md +++ b/doc/design/python_api.md @@ -179,40 +179,104 @@ init_attr={ `optimize_op_attrs` is not in the `VarDesc` message, but kept in the Python instance, as it will be used in the Python space when creating the optimize operator's `OpDesc`, and will be in the `OpDesc` message. -## Layer Functions +## Layer Function -A layer is a Python function that creates some operators and variables. Layers simplify the work of application programmers. +A layer is a Python function that creates some operators and variables. Layers simplify the work of application programmers. -### Data Layer +Layer functions take `Variable` and configuration parameters as its input and return the output variable(s). + +For example, `FullyConnected` take one or more variable as its input. The input could be input data or another layer's output. There are many configuration options for a `FullyConnected` layer, such as layer size, activation, parameter names, initialization strategies of parameters, and so on. The `FullyConnected` layer will return an output variable. + + +### Necessity for reusing code between layer functions + +There are a lot of code that can be reused. Such as + +* Give the default value of configuration. e.g., default initialize strategy for parameters is uniform random with `min = -1.0`, `max = 1.0`. and default initialize strategy for bias is to fill zero. +* Append the activation operator. +* Create a temporary variable. +* Create parameter. +* Generate a unique name. +* Add a bias. +* ... + +A mechanism to reuse code between layer functions is necessary. It will be around [150 lines of code](https://github.com/PaddlePaddle/Paddle/pull/4724/files#diff-823b27e07e93914ada859232ae23f846R12) if we write a `FullyConnected` layer without any helper functions. + + + +### Comparision between global functions and helper class + +The `FullyConnected` layer will be as follow when we provide global functions: ```python -def data_layer(name, type, column_name): - block = the_current_program.glolal_block() - var = block.create_global_var( - name=name, - shape=[None] + type.dims(), - dtype=type.dtype) - block.prepend_operator(block, - type="Feed", - inputs = None, - outputs = [var], - {column_name: column_name}) - return var +def fc_layer(input, size, param_attr=None, bias_attr=None, act=None, name=None): + if name is None: + name = unique_name("fc") + input = multiple_input(input) + param_attr = default_param_attr(param_attr) + param_attr = multiple_param_attr(param_attr, len(input)) + + # mul + mul_results = [] + for ipt, attr in zip(input, param_attr): + shape = ipt.shape[1:] + [size] + w = g_program.global_block().create_parameter(shape, ipt.dtype, name, attr) + tmp = create_tmp_var(name) + g_program.current_block().append_op("mul", {ipt, w}, {tmp}) + mul_results.append(tmp) + + # add sum + ... + # add bias + ... + # add activation + ... + return out ``` -The input to the feed operator is a special variable in the global scope, which is the output of [Python readers](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/reader/README.md). +We can provide many helpers functions for layer developers. However, there are several disadvantages for global helper functions: + +1. We need a namespace for these methods, then layer developers can quickly figure out what method they can use. +2. Global functions will force layer developers to pass its parameter time by time. + +So we provide a helper class, `LayerHelper`, to share code between layer functions. The `FullyConnected` Layer will be as follow. + +```python +def fc_layer(input, size, param_attr=None, bias_attr=None, act=None, name=None): + helper = LayerHelper(locals()) # pass all parameter to LayerHelper + + mul_results = [] + for ipt, param in helper.iter_multiple_input_and_param(): + w = helper.create_parameter(shape=ipt.shape[1:] + [size], dtype = ipt.dtype) + tmp = helper.create_tmp_variable() + helper.append_op('mul', {ipt, w}, {tmp}) + mul_results.append(tmp) + + pre_bias = helper.add_sum(mul_results) + pre_activation = helper.add_bias(pre_bias) + return helper.add_activation(pre_activation) +``` + +We not only use the fewer lines of code to write `fc_layer` but also make the code clearer to understand. At the same time, layer developers can figure out what function they can invoke by typing `helper.` in a python editor. + + +### Implementation of layer helper -### FC Layer +We just keep all parameters of a layer function as a dictionary in layer helper as a private data member. Every method of layer helper will look up the dictionary after it is invoked. In that way, we can implement a layer helper for all layer functions even some layer does not contain some operator. For example, The `activation` is used by the FullyConnected layer or convolution layers, but a cross-entropy layer does not use it. The example code of `add_activation` are: ```python -def fc_layer(input, size, ...): - block = program.current_block() - w = block.create_parameter(...) - b = block.create_parameter(...) - out = block.create_var() - op = block.append_operator("FC", X=input, W=w, b=b, out=out) - out.writer = op - return out +class LayerHelper(object): + def __init__(self, **kwargs): # kwargs is short for `keyword arguments` + self.kwargs = kwargs + + def add_activation(self, input_var): + act = self.kwargs.get("act", None) # default value is None + if act is None: # do nothing if no act + return input_var + + tmp = self.create_tmp_var(self) + self.append_op(type=act, input=input_var, output=tmp) + return tmp ``` ## Optimizer diff --git a/doc/design/refactorization.md b/doc/design/refactorization.md index ec51aa1a0ec667175ff7215dcd359023e296769f..f93d6155e1764386b01d2f0df3f141ab75cd55d4 100644 --- a/doc/design/refactorization.md +++ b/doc/design/refactorization.md @@ -177,9 +177,6 @@ REGISTER_OP(op_type, op_class, op_maker_class, grad_op_type, grad_op_class) REGISTER_OP_WITHOUT_GRADIENT(op_type, op_class, op_maker_class) ``` -### USE Macros -Make sure the registration process is executed and linked. - --- # Registration Process 1. Write an Op class and its gradient Op class, if required. @@ -188,8 +185,6 @@ Make sure the registration process is executed and linked. 1. Call maker class to complete `proto` and `checker` 2. Using the completed `proto` and `checker`, it will add a new key-value pair to the `OpInfoMap` -4. Invoke the `USE` macro in which the Op is used to make sure that it is linked. - --- # Backward Module (1/2) ### Create Backward Operator diff --git a/doc/design/register_grad_op.md b/doc/design/register_grad_op.md index 9f1ce4bae7b393cb9f04909e5e4917b8d660771c..8d973eb53178c3e889c845144553a453e11f067c 100644 --- a/doc/design/register_grad_op.md +++ b/doc/design/register_grad_op.md @@ -3,17 +3,17 @@ ## The Problem Posed -Currently, for each C++ operator class definition, there registers a *gradient operator creator* function, which takes a C++ operator instance and returns the corresponding gradient operator instance. +Currently, for each C++ operator class definition, a *gradient operator creator* function is registered, which takes as input a C++ operator instance and returns the corresponding gradient operator instance. -However, we noticed two problems with the current deisgn: +However, we noticed two problems with the current design: -1. As we decided to separate the *compilation* and *execution* phases, we need to change the creator to take an `OpDesc` protobuf message in a `ProgramDesc` and inserts corresponding `OpDesc` messages into the `ProgramDesc` message. +1. As we decided to separate the *compilation* and the *execution* phases, we need to change the creator to take an `OpDesc` protobuf message in a `ProgramDesc` and inserts corresponding `OpDesc` messages into the `ProgramDesc` message. -1. Some operator's gradient computation requires more than one gradient operators. For example, the gradient of *minus* consists of two operators -- an identity operaotr and a scale operator. So we need to make the registration mechanism to support the mapping from an operator to a set of operators for gradient computation. +1. For some operators, the gradient computation can be written in terms of existing operators. For example, the gradient of *minus* operator consists of two operators -- an *identity* operator followed by a *scale* operator. Hence the registration mechanism needs to support mapping from an operator to a set of operators for the gradient computation. ## The Current Implementation -The C++ class `OpInfos` store in a association map which key is the operator type. The `grad_op_type` indicate associated gradient operator type. Operator can create gradient operator by `OpInfo::creator_` of gradient. The pseudo code is +Instances of the C++ class `OpInfo` are stored an associative map whose key is the operator type. The `grad_op_type` indicates the associated gradient operator type. An operator can create the gradient operator by invoking `OpInfo::creator_` of the gradient operator. The pseudo code is as follows ```cpp struct OpInfo { @@ -31,16 +31,16 @@ OperatorBase* CreateGradientOperator(const OperatorBase& op) { ## Proposed Solution -The mapping relationship between an operator and its gradient operators is a function. The interface of that function is: +The mapping relationship between an operator and its gradient operators is a function. The interface of this function is: ```cpp // (OpDesc) --> vector std::function(const OpDescBind&)>; ``` -The function takes an `OpDescBind` of the forward operator and returns one or many gradient operator descriptions. `OpDescBind` is a C++ wrapper for protobuf message `OpDesc` to manipulate `OpDesc` fast. +The function takes an `OpDescBind` of the forward operator and returns one or many gradient operator descriptions. `OpDescBind` is a C++ wrapper for the protobuf message `OpDesc` for rapid manipulation of `OpDesc`. -The `GradOpDescMaker` will be registered in `OpInfo`, to replace `grad_op_type_` field. The `OpInfo` should be +The `GradOpDescMaker` will be registered in `OpInfo` and will replace the `grad_op_type_` field. The `OpInfo` should look like ```cpp struct OpInfo { @@ -49,7 +49,7 @@ struct OpInfo { }; ``` -The `grad_op_maker_ ` is `nullptr` if the operator does not have associated gradient operators. +The `grad_op_maker_ ` is a `nullptr` if the operator does not have any associated gradient operators. We propose a base class called `GradOpDescMakerBase` to let operator developers generate `Gradient Operators` easily. The public interface of that class is @@ -74,7 +74,7 @@ func = [] (const OpDescBind& fwd_op) { We can write many helper functions since the `GradOpDescMakerBase` is a class now. The basic helper functions get the variables of `Input`, `Output`, `InputGradient` and `OutputGradient` in the forwarding operator. -We should chagne register macros at the same time. In the current solution, there is no difference between forwarding operators and backward operators. So `REGISTER_OP` just register one operator. If the `REGISTER_OPERATOR ` contains `OpProtoAndCheckerMaker` and `GradOpDescMaker`, we just list them in the same macro. It can be done by a macro contains `__VA_ARGS__`. +We should change register macros at the same time. In the current solution, there is no difference between forwarding operators and backward operators. So `REGISTER_OP` just register one operator. If the `REGISTER_OPERATOR ` contains `OpProtoAndCheckerMaker` and `GradOpDescMaker`, we just list them in the same macro. It can be done by a macro contains `__VA_ARGS__`. The user interface should be diff --git a/doc/design/regularization.md b/doc/design/regularization.md new file mode 100644 index 0000000000000000000000000000000000000000..703a9fbdd4392aa7f44733cce2da19caa1b51e4a --- /dev/null +++ b/doc/design/regularization.md @@ -0,0 +1,103 @@ +# Regularization in PaddlePaddle + +## Introduction to Regularization +A central problem in machine learning is how to design an algorithm that will perform well not just on the training data, but also on new data. Many strategies are used by machine learning practitioners to reduce the test error, possibly at the expense of increased training error. These strategies are collectively known as **regularization**. + +### Parameter Norm Penalties +Most common regularization approaches in deep learning are based on limiting the capacity of the models by adding a parameter norm penalty to the objective function `J`. This is given as follows: + +
+ +The parameter `alpha` is a hyperparameter that weights the relative contribution of the norm penalty term, `omega`, relative to the standard objective function `J`. + +The most commonly used norm penalties are the L2 norm penalty and the L1 norm penalty. These are given as follows: + +##### L2 Regularization: +
+ +##### L1 Regularization +
+ +A much more detailed mathematical background of reguilarization can be found [here](http://www.deeplearningbook.org/contents/regularization.html). + + +## How to do Regularization in PaddlePaddle + +On surveying existing frameworks like Tensorflow, PyTorch, Caffe, etc, it can be seen that there are 2 common approaches of doing regularization: + +1. Making regularization a part of the optimizer using an attribute like `weight_decay` that is used to control the scale of the L2 Penalty. This approach is used in PyTorch as follows: + ```python + opt = torch.optim.SGD(params, lr=0.2, weight_decay=0.2) + ``` + At every optimization step, this code will add the gradient of the L2 Norm of the params to the gradient of the params with respect to the loss function. This can seen in the following code snippet: + ```python + if weight_decay != 0: + d_p.add_(weight_decay, p.data) + ``` + This is a very restyrictive way of doing regularization and does not give the users enough flexibility. + + **Advantages**: + - It is easy to implement for us. + - Faster execution of backward. However, it can be done manually by advanced users too. + + **Disadvantages**: + - Not flexible for other regularizations such as L1/L0 regularization. + - Does not allow for different regularization coefficient for different parameters. For example, in most models, ony the weight matrices are regularized and the bias vectors are unregularized. + - Tightly coupled optimizer and regularization implementation. + + +2. Adding regularization ops to the graph through Python API. This approach is used by Tensorflow and Caffe. Using this approach, we manually add regularization ops to the graph and then add the regularization loss to the final loss function before sending them to the optimizer. + + **Advantages**: + - Allows for greater flexibility to the users of Paddle. Using this approach, the users can put different regularization to different parameters and also choose parameters that are not a part of regularization. + - Makes it easy for the users to customize and extend the framework. + + **Disadvantages**: + - Implementation requires comprehensive design and time. + +## Proposal for Regularization in PaddlePaddle + +### Low-Level implementation + +In the new design, we propose to create new operations for regularization. For now, we can add 2 ops thgat correspond to the most frequently used regularizations: +- L2_regularization_op +- L1_regularization_op + +These ops can be like any other ops with their own CPU/GPU implementations either using Eigen or separate Cpu and GPU kernels. As the initial implementation, we can implement their kernels using Eigen following the abstraction pattern implemented for [Activation Ops](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/operators/accuracy_op.h). This abstraction pattern can make it very easy to implement new regularization schemes. other than L1 and L2 norm penalties. + +The idea of building ops for regularization is in sync with the refactored Paddle philosophy of using operators to represent any computation unit. The way these ops will be added to the computation graph, will be decided by the [layer functions](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/python_api.md#layer-function) in Python API. + +### Computation Graph + +Below is an example of a really simple feed forward neural network. + +
+ +The Python API will modify this computation graph to add regularization operators. The modified computation graph will look as follows: + +
+    +### Python API implementation for Regularization + +Using the low level ops, `L2_regularization_op` and `L1_regularization_op`, any user can add regularization to their computation graphs. However, this will require a lot of lines of code and we should design Python APIs that support regularization. An example of such an API can be seen in [Keras](https://keras.io/regularizers/). As per the PaddlePaddle [Python API design](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/python_api.md), the layer functions are responsible for creating operators, operator parameters and variables. Since regularization is a property of parameters, it makes sense to create these in the layer functions. + +#### Creation of Regularization ops +There are two possibilities for creating the regularization ops: +1. We create these ops immediately while building the computation graph. +2. We add these ops in a lazy manner, just before the backward, similar to the way the optimization ops are added. + +The proposal is to add these ops in a lazy manner just before the backward pass. + +#### Storage of Regularization attributes + +Since we want to create the regularization ops in a lazy manner, the regularization attributes (type of regularization and weight of regularization penalty) can be stored as attributes of the [`Parameter`](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/v2/framework/framework.py#L421) class. This is because regularization is a property of the parameters and storing regularization properties with Parameters also allows for shared parameters. + +#### High-level API + +In PaddlePaddle Python API, users will primarily rely on [layer functions](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/python_api.md#layer-function) to create neural network layers. Hence, we lso need to provide regularization functionality in layer functions. The design of these APIs can be postponed for later right now. A good reference for these APIs can be found in [Keras](https://keras.io/regularizers/) and also by looking at Tensorflow in [`tf.contrib.layers`](https://www.tensorflow.org/api_guides/python/contrib.layers). + + + + + + diff --git a/doc/design/selected_rows.md b/doc/design/selected_rows.md index 9e6f3b20cbcdc55e481fbe7bf5fa555d8b3c3d45..1a98839a957612b91b2276b58818623ecc62d1d5 100644 --- a/doc/design/selected_rows.md +++ b/doc/design/selected_rows.md @@ -1,6 +1,6 @@ # Design Doc: Selected Rows -`SelectedRows` is a kind of sparse tensor data type, which is designed to support `embedding` operators. The gradient of embedding table is a sparse tensor. Only a few rows are non-zero values in that tensor. It is straightforward to represent the sparse tensor by the following sparse tensor data structure: +`SelectedRows` is a type of sparse tensor data type, which is designed to support `embedding` operators. The gradient of embedding table is a sparse tensor. Only a few rows are non-zero values in this tensor. It is straight-forward to represent a sparse tensor by the following sparse tensor data structure: ```cpp class SelectedRows { @@ -11,7 +11,7 @@ class SelectedRows { }; ``` -The field `height_` shows the first dimension of `SelectedRows`. The `rows` are the indices of which rows of `SelectedRows` are non-zeros. The `value_` field is an N-dim tensor and shape is `[rows.size() /* NUM_ROWS */, ...]`, which supplies values for each row. The dimension of `SelectedRows` satisfies `[height_] + value_.shape[1:]`. +The field `height_` is the first dimension of `SelectedRows`. The `rows` are the indices of the non-zero rows of `SelectedRows`. The `value_` field is an N-dim tensor of shape `[rows.size() /* NUM_ROWS */, ...]`, which supplies values for each row. The dimension of `SelectedRows` satisfies `[height_] + value_.shape[1:]`. Suppose that a SelectedRows-typed variable `x` has many rows, but only two of them have values -- row 73 is `[1, 2]` and row 84 is `[3, 4]`, the `SelectedRows` representation would be: @@ -25,7 +25,7 @@ x = SelectedRow { ## SelectedRows in Protobuf -`SelectedRows` is a kind of `Variable`. `VarDesc` in protobuf should describe the `SelectedRows` information. Only the tensor dimension of a `SelectedRows` will be described in compile-time since the `rows_` and `value_` are related to training data. +`SelectedRows` is a type of `Variable`. `VarDesc` in protobuf should describe the `SelectedRows` information. Only the tensor dimension of a `SelectedRows` will be described in compile-time because the `rows_` and `value_` are dependent on the training data. So we use `TensorDesc` to unify `data_type` and `dims`. A LodTensorDesc contains a `TensorDesc` and `lod_level`. The description of `SelectedRows` is a Tensor description. ```proto @@ -54,7 +54,7 @@ message VarDesc { ## InferShape for Selected Rows -Just like `LoD` information, `InferShape` method will inference output tensor type as well. The operator should decide whether its output is a `SelectedRows` or `Dense` tensor. +Just like `LoD` information, `InferShape` method will infer the output tensor type as well. The operator should decide whether its output is a `SelectedRows` or `Dense` tensor. For example, the gradient operator of `TableLookup` will always generate `SelectedRows`. Its `InferShape` method should be like following @@ -68,7 +68,7 @@ void TableLookupGrad::InferShape(context) { ## Sparse Operators -There are several operators should be written to support `SelectedRows`. They are: +There are several operators that need to be written to support `SelectedRows`. These are: -1. Operators which generates `SelectedRows` gradient. e.g. Gradient of `TableLookupOp`. +1. Operators which generate `SelectedRows` gradient. e.g. Gradient of `TableLookupOp`. 2. Optimize operators which support `SelectedRows` gradient. e.g. `SGD` or `AdaGrad` for `SelectedRows`. However, there should be only one `SGD` operator. `OpWithKernel::Run` should select a suitable kernel for both `dense` tensor or `SelectedRows`. diff --git a/doc/faq/local/index_cn.rst b/doc/faq/local/index_cn.rst index 75c4ba028e497e29e9030a86514348726d9c0a80..0e939a2671ace8682c90cdc1c1bb2da1dda0d568 100644 --- a/doc/faq/local/index_cn.rst +++ b/doc/faq/local/index_cn.rst @@ -174,7 +174,7 @@ decoder_inputs = paddle.layer.fc( 1. 两者都是对梯度的截断,但截断时机不同,前者在 :code:`optimzier` 更新网络参数时应用;后者在激活函数反向计算时被调用; 2. 截断对象不同:前者截断可学习参数的梯度,后者截断回传给前层的梯度; -除此之外,还可以通过减小学习律或者对数据进行归一化处理来解决这类问题。 +除此之外,还可以通过减小学习率或者对数据进行归一化处理来解决这类问题。 5. 如何调用 infer 接口输出多个layer的预测结果 ----------------------------------------------- diff --git a/doc/howto/cross_compiling/cross_compiling_for_android_cn.md b/doc/howto/cross_compiling/cross_compiling_for_android_cn.md index 90dc84718c9ce1374cda6022de177afeeb60279d..1fc58c37cc9151d5e4d99b939e30c29aa99e04f1 100644 --- a/doc/howto/cross_compiling/cross_compiling_for_android_cn.md +++ b/doc/howto/cross_compiling/cross_compiling_for_android_cn.md @@ -1,9 +1,46 @@ # 构建Android平台上的PaddlePaddle库 -用户可通过交叉编译的方式,在用户熟悉的开发平台(Linux,Mac OS X和Windows)上编译Android平台上适用的PaddlePaddle库。 +用户可通过如下两种方式,交叉编译Android平台上适用的PaddlePaddle库: +- 基于Docker容器的编译方式 +- 基于Linux交叉编译环境的编译方式 + +## 基于Docker容器的编译方式 +Docker能在所有主要操作系统(包括Linux,Mac OS X和Windows)上运行,因此,使用基于Docker容器的编译方式,用户可在自己熟悉的开发平台上编译Android平台上适用的PaddlePaddle库。 + +### 构建PaddlePaddle的Android开发镜像 +我们把PaddlePaddle的交叉编译环境打包成一个镜像,称为开发镜像,里面涵盖了交叉编译Android版PaddlePaddle库需要的所有编译工具。 + +```bash +$ git clone https://github.com/PaddlePaddle/Paddle.git +$ cd Paddle +$ docker build -t username/paddle-android:dev . -f Dockerfile.android +``` + +### 编译PaddlePaddle C-API库 +构建好开发镜像后,即可使用开发镜像来编译Android版PaddlePaddle C-API库。 +Android的Docker开发镜像向用户提供两个可配置的参数: + +| Argument | Optional Values | Default | +|-----------------|-------------------------|---------| +|`ANDROID_ABI` |`armeabi-v7a, arm64-v8a` | `armeabi-v7a` | +|`ANDROID_API` |`>= 21` | `21` | + +- 编译`armeabi-v7a`,`Android API 21`的PaddlePaddle库 +```bash +$ docker run -it --rm -v $PWD:/paddle -e "ANDROID_ABI=armeabi-v7a" -e "ANDROID_API=21" username/paddle-android:dev +``` + +- 编译`arm64-v8a`,`Android API 21`的PaddlePaddle库 +```bash +$ docker run -it --rm -v $PWD:/paddle -e "ANDROID_ABI=arm64-v8a" -e "ANDROID_API=21" username/paddle-android:dev +``` + +执行上述`docker run`命令时,容器默认执行[paddle/scripts/docker/build_android.sh](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/scripts/docker/build_android.sh)脚本。该脚本中记录了交叉编译Android版PaddlePaddle库常用的CMake配置,并且会根据`ANDROID_ABI`和`ANDROID_API`自动构建独立工具链、进行编译和安装。由于arm64架构要求Android API不小于21。因此当`ANDROID_ABI=arm64-v8a`,`ANDROID_API<21`时,Docker容器中将默认使用`Android API 21`的编译工具链。用户可以参考下文**配置交叉编译参数**章节,根据个人的需求修改定制Docker容器所执行的脚本。编译安装结束之后,PaddlePaddle的C-API库将被安装到`$PWD/install_android`目录,所依赖的第三方库同时也被安装到`$PWD/install_android/third_party`目录。 + +## 基于Linux交叉编译环境的编译方式 本文档将以Linux x86-64平台为例,介绍交叉编译Android平台上适用的PaddlePaddle库的方法和步骤。 -## 准备交叉编译环境 +### 准备交叉编译环境 从源码交叉编译PaddlePaddle,用户需要提前准备好交叉编译环境。Android平台上使用的C/C++交叉编译工具链为[Android NDK](https://developer.android.com/ndk/downloads/index.html?hl=zh-cn),用户可自行前往下载预编译好的版本,也可通过以下命令获取: @@ -13,18 +50,27 @@ unzip -q android-ndk-r14b-linux-x86_64.zip ``` Android NDK中包含了所有Android API级别、所有架构(arm/arm64/x86/mips)需要用到的编译工具和系统库。用户可根据自己的编译目标架构、所需支持的最低Android API级别,构建[独立工具链](https://developer.android.google.cn/ndk/guides/standalone_toolchain.html?hl=zh-cn)。 -比如: + +- 构建`armeabi-v7a`、 `Android API 21`的独立工具链: ```bash your/path/to/android-ndk-r14b-linux-x86_64/build/tools/make-standalone-toolchain.sh \ - --arch=arm --platform=android-21 --install-dir=your/path/to/my_standalone_toolchain + --arch=arm --platform=android-21 --install-dir=your/path/to/arm_standalone_toolchain ``` -此命令将在your/path/to/my_standalone_toolchain目录生成一套编译工具链,面向架构为32位ARM架构,支持的最小的Android API级别为21,使用的编译器为arm-linux-androideabi-gcc (GCC) 4.9。 +此命令将在`your/path/to/arm_standalone_toolchain`目录生成一套独立编译工具链,面向架构为32位ARM架构,支持的最小的Android API级别为21,支持编译器`arm-linux-androideabi-gcc (GCC) 4.9`和`clang 3.8`。 -注意:**PaddlePaddle要求使用的编译工具链所支持的Andoid API级别不小于21**。 +- 构建`arm64-v8a`、 `Android API 21`的独立工具链: +```bash +your/path/to/android-ndk-r14b-linux-x86_64/build/tools/make-standalone-toolchain.sh \ + --arch=arm64 --platform=android-21 --install-dir=your/path/to/arm64_standalone_toolchain +``` -## 配置交叉编译参数 +此命令将在`your/path/to/arm64_standalone_toolchain`目录生成一套独立编译工具链,面向架构为64位ARM64架构,支持的最小Android API级别为21,支持编译器`arm-linux-androideabi-gcc (GCC) 4.9`和`clang 3.8`。 + +注意:**PaddlePaddle要求使用的编译工具链所支持的Android API级别不小于21**。 + +### 配置交叉编译参数 CMake系统对交叉编译提供了支持[cmake-toolchains](https://cmake.org/cmake/help/v3.0/manual/cmake-toolchains.7.html#cross-compiling)。为了简化cmake配置,PaddlePaddle为交叉编译提供了工具链配置文档[cmake/cross_compiling/android.cmake](https://github.com/PaddlePaddle/Paddle/blob/develop/cmake/cross_compiling/android.cmake),以提供一些默认的编译器和编译参数相关配置。注意,从CMake 3.7版本开始,CMake官方对Android平台的交叉编译提供了通用的支持。PaddlePaddle若检测到用户使用的CMake版本不低于3.7时,将会将用户传进来的配置参数传递CMake系统,交由CMake系统本身来处理。有关参数配置的详细说明见[cmake-toolchains](https://cmake.org/cmake/help/v3.7/manual/cmake-toolchains.7.html#cross-compiling)。 @@ -36,32 +82,57 @@ CMake系统对交叉编译提供了支持[cmake-toolchains](https://cmake.org/cm Android平台可选配置参数: - `ANDROID_STANDALONE_TOOLCHAIN`,独立工具链所在的绝对路径,或者相对于构建目录的相对路径。PaddlePaddle的CMake系统将根据该值自动推导和设置需要使用的交叉编译器、sysroot、以及Android API级别;否则,用户需要在cmake时手动设置这些值。无默认值。 -- `ANDROID_ABI`,目标架构ABI。目前只支持`armeabi-v7a`,默认值为`armeabi-v7a`。 +- `ANDROID_TOOLCHAIN`,目标工具链。可设置`gcc/clang`,默认值为`clang`。 + - CMake 3.7以上,将会始终使用`clang`工具链;CMake 3.7以下,可设置`ANDROID_TOOLCHAIN=gcc`以使用`gcc`工具链。 + - Android官方提供的`clang`编译器要求系统支持`GLIBC 2.15`以上。 +- `ANDROID_ABI`,目标架构ABI。目前支持`armeabi-v7a`和`arm64-v8a`,默认值为`armeabi-v7a`。 - `ANDROID_NATIVE_API_LEVEL`,工具链的Android API级别。若没有显式设置,PaddlePaddle将根据`ANDROID_STANDALONE_TOOLCHAIN`的值自动推导得到。 -- `ANROID_ARM_MODE`,是否使用ARM模式。可设置`ON/OFF`,默认值为`ON`。 -- `ANDROID_ARM_NEON`,是否使用NEON指令。目前必须设置成`ON`,默认值为`ON`。 +- `ANROID_ARM_MODE`,是否使用ARM模式。 + - `ANDROID_ABI=armeabi-v7a`时,可设置`ON/OFF`,默认值为`ON`; + - `ANDROID_ABI=arm64-v8a`时,不需要设置。 +- `ANDROID_ARM_NEON`,是否使用NEON指令。 + - `ANDROID_ABI=armeabi-v7a`时,可设置`ON/OFF`,默认值为`ON`; + - `ANDROID_ABI=arm64-v8a`时,不需要设置。 其他配置参数: +- `USE_EIGEN_FOR_BLAS`,是否使用Eigen库进行矩阵计算。可设置`ON/OFF`,默认值为`OFF`。 - `HOST_C/CXX_COMPILER`,宿主机的C/C++编译器。在编译宿主机版protoc可执行文件和目标机版OpenBLAS库时需要用到。默认设置成环境变量`CC`的值;若环境变量`CC`没有设置,则设置成`cc`编译器。 -一种常用的cmake配置如下: +常用的cmake配置如下: ```bash cmake -DCMAKE_SYSTEM_NAME=Android \ - -DANDROID_STANDALONE_TOOLCHAIN=your/path/to/my_standalone_toolchain \ + -DANDROID_STANDALONE_TOOLCHAIN=your/path/to/arm_standalone_toolchain \ -DANDROID_ABI=armeabi-v7a \ -DANDROID_ARM_NEON=ON \ -DANDROID_ARM_MODE=ON \ + -DUSE_EIGEN_FOR_BLAS=ON \ -DCMAKE_INSTALL_PREFIX=your/path/to/install \ -DWITH_C_API=ON \ -DWITH_SWIG_PY=OFF \ .. ``` +``` +cmake -DCMAKE_SYSTEM_NAME=Android \ + -DANDROID_STANDALONE_TOOLCHAIN=your/path/to/arm64_standalone_toolchain \ + -DANDROID_ABI=arm64-v8a \ + -DUSE_EIGEN_FOR_BLAS=OFF \ + -DCMAKE_INSTALL_PREFIX=your/path/to/install \ + -DWITH_C_API=ON \ + -DWITH_SWIG_PY=OFF \ + .. +``` + 用户还可根据自己的需求设置其他编译参数。比如希望最小化生成的库的大小,可以设置`CMAKE_BUILD_TYPE`为`MinSizeRel`;若希望最快的执行速度,则可设置`CMAKE_BUILD_TYPE`为`Release`。亦可以通过手动设置`CMAKE_C/CXX_FLAGS_MINSIZEREL/RELEASE`来影响PaddlePaddle的编译过程。 -## 编译和安装 +**性能TIPS**,为了达到最快的计算速度,在CMake参数配置上,有以下建议: +- 设置`CMAKE_BUILD_TYPE`为`Release` +- 使用`clang`编译工具链 +- `armeabi-v7a`时,设置`USE_EIGEN_BLAS=ON`,使用Eigen进行矩阵计算;`arm64-v8a`时,设置`USE_EIGEN_FOR_BLAS=OFF`,使用OpenBLAS进行矩阵计算 + +### 编译和安装 CMake配置完成后,执行以下命令,PaddlePaddle将自动下载和编译所有第三方依赖库、编译和安装PaddlePaddle预测库。 @@ -72,4 +143,4 @@ make install 注意:如果你曾经在源码目录下编译过其他平台的PaddlePaddle库,请先使用`rm -rf`命令删除`third_party`目录和`build`目录,以确保所有的第三方依赖库和PaddlePaddle代码都是针对新的CMake配置重新编译的。 -执行完安装命令后,`your/path/to/install`目录中会包含`include`和`lib`目录,其中`include`中包含C-API的头文件,`lib`中包含一个Android版本的库。自此,PaddlePaddle的已经安装完成,用户可将`your/path/to/install`目录下的生成文件用于深度学习相关Android App中,调用方法见C-API文档。 +执行完安装命令后,`your/path/to/install`目录中会包含`include`、`lib`和`third_party`目录,其中`include`中包含C-API的头文件,`lib`中包含若干个不同Android ABI的PaddlePaddle库,`third_party`中包含所依赖的所有第三方库。自此,PaddlePaddle的已经安装完成,用户可将`your/path/to/install`目录下的生成文件用于深度学习相关Android App中,调用方法见C-API文档。 diff --git a/doc/howto/usage/cluster/cluster_train_cn.md b/doc/howto/usage/cluster/cluster_train_cn.md index 274452fbf0c595ad7b4dbeffe85ad9038f12b458..93c5544bcfa911f8bdcdaea39a75b3ab7ef218f8 100644 --- a/doc/howto/usage/cluster/cluster_train_cn.md +++ b/doc/howto/usage/cluster/cluster_train_cn.md @@ -1,135 +1,215 @@ -```eval_rst -.. _cluster_train: +# PaddlePaddle分布式训练 + +* [概述](#概述) +* [环境准备](#环境准备) +* [启动参数说明](#启动参数说明) + * [启动参数服务器](#启动参数服务器) + * [启动计算节点](#启动计算节点) + * [准备数据集](#准备数据集) + * [准备训练程序](#准备训练程序) +* [使用分布式计算平台或工具](#使用分布式计算平台或工具) + * [使用Fabric启动集群作业](#使用fabric启动集群作业) + * [准备一个Linux集群](#准备一个linux集群) + * [启动集群作业](#启动集群作业) + * [终止集群作业](#终止集群作业) + * [检查集群训练结果](#检查集群训练结果) + * [检查模型输出](#检查模型输出) + * [在OpenMPI集群中提交训练作业](#在openmpi集群中提交训练作业) + * [准备OpenMPI集群](#准备OpenMPI集群) + * [启动集群作业](#启动集群作业-1) + * [在Kubernetes集群中提交训练作业](#在kubernetes集群中提交训练作业) + +# 概述 +本文将介绍如何使用PaddlePaddle在不同的集群框架下完成分布式训练。分布式训练架构如下图所示: + + + +- 数据分片(Data shard): 用于训练神经网络的数据,被切分成多个部分,每个部分分别给每个trainer使用。 +- 计算节点(Trainer): 每个trainer启动后读取切分好的一部分数据,开始神经网络的“前馈”和“后馈”计算,并和参数服务器通信。在完成一定量数据的训练后,上传计算得出的梯度(gradients),然后下载优化更新后的神经网络参数(parameters)。 +- 参数服务器(Parameter server):每个参数服务器只保存整个神经网络所有参数的一部分。参数服务器接收从计算节点上传的梯度,并完成参数优化更新,再将更新后的参数下发到每个计算节点。 + +这样,通过计算节点和参数服务器的分布式协作,可以完成神经网络的SGD方法的训练。PaddlePaddle可以同时支持同步随机梯度下降(SGD)和异步随机梯度下降。 + +在使用同步SGD训练神经网络时,PaddlePaddle使用同步屏障(barrier),使梯度的提交和参数的更新按照顺序方式执行。在异步SGD中,则并不会等待所有trainer提交梯度才更新参数,这样极大地提高了计算的并行性:参数服务器之间不相互依赖,并行地接收梯度和更新参数,参数服务器也不会等待计算节点全部都提交梯度之后才开始下一步,计算节点之间也不会相互依赖,并行地执行模型的训练。可以看出,虽然异步SGD方式会提高参数更新并行度, 但是并不能保证参数同步更新,在任意时间某一台参数服务器上保存的参数可能比另一台要更新,与同步SGD相比,梯度会有噪声。 + +# 环境准备 + +1. 准备您的计算集群。计算集群通常由一组(几台到几千台规模)的Linux服务器组成。服务器之间可以通过局域网(LAN)联通,每台服务器具有集群中唯一的IP地址(或者可被DNS解析的主机名)。集群中的每台计算机通常被成为一个“节点”。 +1. 我们需要在集群的所有节点上安装 PaddlePaddle。 如果要启用GPU,还需要在节点上安装对应的GPU驱动以及CUDA。PaddlePaddle的安装可以参考[build_and_install](https://github.com/PaddlePaddle/Paddle/tree/develop/doc/getstarted/build_and_install)的多种安装方式。我们推荐使用[Docker](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/getstarted/build_and_install/docker_install_cn.rst)安装方式来快速安装PaddlePaddle。 + +安装完成之后,执行下面的命令可以查看已经安装的版本(docker安装方式可以进入docker容器执行:`docker run -it paddlepaddle/paddle:[tag] /bin/bash`): +```bash +$ paddle version +PaddlePaddle 0.10.0, compiled with + with_avx: ON + with_gpu: OFF + with_double: OFF + with_python: ON + with_rdma: OFF + with_timer: OFF ``` -# 运行分布式训练 +下面以`doc/howto/usage/cluster/src/word2vec`中的代码作为实例,介绍使用PaddlePaddle v2 API完成分布式训练。 -在本文中,我们将阐释如何在集群上运行分布式 Paddle 训练作业。我们将以[推荐系统](https://github.com/baidu/Paddle/tree/develop/demo/recommendation)为例创建分布式的单进程训练。 +# 启动参数说明 +## 启动参数服务器 +执行以下的命令启动一个参数服务器并等待和计算节点的数据交互 +```bash +$ paddle pserver --port=7164 --ports_num=1 --ports_num_for_sparse=1 --num_gradient_servers=1 +``` -在本文中使用的[脚本](https://github.com/baidu/Paddle/tree/develop/paddle/scripts/cluster_train)通过 SSH 运行分布式作业。 它们还可以供那些运行更复杂的集群管理系统(如 MPI 和 [Kubernetes](https://github.com/PaddlePaddle/Paddle/tree/develop/doc/howto/usage/k8s) )的用户参考。 +如果希望可以在后台运行pserver程序,并保存输出到一个日志文件,可以运行: +```bash +$ stdbuf -oL /usr/bin/nohup paddle pserver --port=7164 --ports_num=1 --ports_num_for_sparse=1 --num_gradient_servers=1 &> pserver.log +``` -## 前提条件 +| 参数 | 是否必选 | 默认值 | 说明 | +| ------------- | ------------- | ------------- | ------------- | +| port | 必选 | 7164 | pserver监听的起始端口,根据ports_num决定
总端口个数,从起始端口监听多个端口用于通信 | +| ports_num | 必选 | 1 | 监听的端口个数 | +| ports_num_for_sparse | 必选 | 1 | 用于稀疏类型参数通信的端口个数 | +| num_gradient_servers | 必选 | 1 | 当前训练任务pserver总数 | + +## 启动计算节点 +执行以下命令启动使用python编写的trainer程序(文件名为任意文件名,如train.py) +```bash +$ python train.py +``` -1. 上述脚本使用 Python 库 [fabric](http://www.fabfile.org/) 来运行 SSH 命令。 我们使用 `pip` 来安装 fabric: +trainer需要和pserver保持网络联通以完成训练。trainer启动需要传入端口、pserver地址等参数使trainer可以正确连接到pserver。这些参数可以通过环境变量(https://zh.wikipedia.org/wiki/环境变量 )或编写程序时`paddle.init()`中传入参数。如果同时使用`paddle.init()`参数和环境变量,将会优先使用`paddle.init()`中传入的参数。 - ```bash - pip install fabric - ``` +使用环境变量: -2. 我们需要在集群的所有节点上安装 PaddlePaddle。 如果要启用GPU,需要在 `/usr/local/cuda` 中安装 CUDA; 否则 Paddle 将在运行时报错。 +```bash +export PADDLE_INIT_USE_GPU=False +export PADDLE_INIT_TRAINER_COUNT=1 +export PADDLE_INIT_PORT=7164 +export PADDLE_INIT_PORTS_NUM=1 +export PADDLE_INIT_PORTS_NUM_FOR_SPARSE=1 +export PADDLE_INIT_NUM_GRADIENT_SERVERS=1 +export PADDLE_INIT_TRAINER_ID=0 +export PADDLE_INIT_PSERVERS=127.0.0.1 +``` -3. 在 [`cluster_train/conf.py`] 中设置 `ROOT_DIR`, 该 ROOT_DIR 要在所有节点上存在。为了方便起见,我们通常在所有节点上创建一个 Unix 用户 `paddle`,并设置 `ROOT_DIR=/home/paddle`。这样,我们可以将 SSH 公钥写入 `/home/paddle/.ssh/authorized_keys`,以便用户 `paddle` 可以 SSH 到所有节点而不用密码。 +使用参数: -## 准备工作空间 +```python +paddle.init( + use_gpu=False, + trainer_count=1, + port=7164, + ports_num=1, + ports_num_for_sparse=1, + num_gradient_servers=1, + trainer_id=0, + pservers="127.0.0.1") +``` -我们将放置依赖库、配置等文件的目录视为 *工作空间(workspace)*。 +| 参数 | 是否必选 | 默认 | 说明 | +| ------------- | ------------- | ------------- | ------------- | +| use_gpu | 可选 | False | 是否启用GPU训练 | +| trainer_count | 必选 | 1 | 当前训练任务trainer总个数 | +| port | 必选 | 7164 | 连接到pserver的端口 | +| ports_num | 必选 | 1 | 连接到pserver的端口个数 | +| ports_num_for_sparse | 必选 | 1 | 和pserver之间用于稀疏类型参数通信的端口个数 | +| num_gradient_servers | 必选 | 1 | 当前训练任务pserver总数 | +| trainer_id | 必选 | 0 | 每个trainer的唯一ID,从0开始的整数 | +| pservers | 必选 | 127.0.0.1 | 当前训练任务启动的pserver的IP列表,多个IP使用“,”隔开 | -这些 `train/test` 数据应该在启动集群作业之前准备好。 为了满足训练/测试数据放置在工作空间中不同目录的要求,PADDLE 根据在模型配置文件中使用的名为 `train.list/test.list` 的索引文件引用训练/测试数据,所以训练/测试数据也包含 train.list/test.list 两个列表文件。所有本地训练 demo 已经提供了脚本来帮助您创建这两个文件,并且集群作业中的所有节点将在正常情况下处理具有相同逻辑代码的文件。 -通常,你可以使用本地训练中的相同模型文件进行集群训练。请记住,在模型文件的 `setting`函数中设置的 `batch_size` 表示在集群作业**每个**节点中的 batch 大小,而不是使用同步 SGD 的总 batch 大小。 +## 准备数据集 -以下步骤基于 demo 目录中的 [demo/recommendation](https://github.com/PaddlePaddle/Paddle/tree/develop/demo/recommendation)。 +参考样例数据准备脚本[prepare.py](https://github.com/PaddlePaddle/Paddle/tree/develop/doc/howto/usage/cluster/src/word2vec/prepare.py),准备训练数据和验证数据集,我们使用paddle.dataset.imikolov数据集,并根据分布式训练并发数(trainer节点个数),在`prepare.py`开头部分指定`SPLIT_COUNT`将数据切分成多份。 -你只需完成 demo/recommendation 教程文档到 `Train` 的部分,之后你会得到训练/测试数据和模型配置文件。最后,只需使用 demo/recommendation 作为集群训练的工作空间。 +在线上系统中,通常会使用MapReduce任务的输出结果作为训练结果,这样训练文件的个数会比较多,而且个数并不确定。在trainer中可以使用下面取模的方法为每个trainer分配训练数据文件: -最后,你的工作空间应如下所示: -``` -. -|-- common_utils.py -|-- data -| |-- config.json -| |-- config_generator.py -| |-- meta.bin -| |-- meta_config.json -| |-- meta_generator.py -| |-- ml-1m -| |-- ml_data.sh -| |-- ratings.dat.test -| |-- ratings.dat.train -| |-- split.py -| |-- test.list -| `-- train.list -|-- dataprovider.py -|-- evaluate.sh -|-- prediction.py -|-- preprocess.sh -|-- requirements.txt -|-- run.sh -`-- trainer_config.py +```python +import os +train_list = [] +flist = os.listdir("/train_data/") +for f in flist: + suffix = int(f.split("-")[1]) + if suffix % TRAINER_COUNT == TRAINER_ID: + train_list.append(f) ``` -虽然这些文件并非都需要集群训练,但是也没有必要删除无用的文件。 - -`trainer_config.py` -表示模型配置文件。 -`train.list` 和 `test.list` -文件索引。它存储当前节点所有训练/测试数据的所有相对或绝对文件路径。 +示例程序`prepare.py`会把训练集和测试集分别分割成多个文件(例子中为3个,后缀为`-00000`、`-00001`和`-00002`): +``` +train.txt +train.txt-00000 +train.txt-00001 +train.txt-00002 +test.txt +test.txt-00000 +test.txt-00001 +test.txt-00002 +``` -`dataprovider.py` -用于读取训练/测试样本。这与本地训练相同。 +在进行分布式训练时,每个trainer进程需要能够读取属于自己的一份数据。在一些分布式系统中,系统会提供一个分布式存储服务,这样保存在分布式存储中的数据可以被集群中的每个节点读取到。如果不使用分布式存储,则需要手动拷贝属于每个trainer节点的训练数据到对应的节点上。 -`data` -数据目录中的所有文件被 train.list/test.list 引用。 +对于不同的训练任务,训练数据格式和训练程序的`reader()`会大不相同,所以开发者需要根据自己训练任务的实际场景完成训练数据的分割和`reader()`的编写。 +## 准备训练程序 -## 准备集群作业配置 +我们会对每个训练任务都会在每个节点上创建一个工作空间(workspace),其中包含了用户的训练程序、程序依赖、挂载或下载的训练数据分片。 -以下选项必须在 cluster_train/conf.py 中认真设置 +最后,工作空间应如下所示: +``` +. +|-- my_lib.py +|-- word_dict.pickle +|-- train.py +|-- train_data_dir/ +| |-- train.txt-00000 +| |-- train.txt-00001 +| |-- train.txt-00002 +`-- test_data_dir/ + |-- test.txt-00000 + |-- test.txt-00001 + `-- test.txt-00002 +``` -`HOSTS` 所有节点运行集群作业的主机名或 IP 。你还可以将用户和 ssh 端口附加到主机名上,例如 root@192.168.100.17:9090。 +- `my_lib.py`:会被`train.py`调用的一些用户定义的库函数,比如PIL库等。 +- `word_dict.pickle`:在`train.py`中会使用到的字典数据文件。 +- `train.py`:训练程序,代码参考[api_train_v2_cluster.py](https://github.com/PaddlePaddle/Paddle/tree/develop/doc/howto/usage/cluster/src/word2vec/prepare.py)。***注意:*** 对于本样例代码,在使用不同的分布式计算平台时,您可能需要修改`train.py`开头的部分(如下),以便获得训练数据的位置和获取环境变量配置: -`ROOT_DIR` 用于放置 JOB 工作空间目录的工作空间 ROOT 目录 + ```python + cluster_train_file = "./train_data_dir/train/train.txt" + cluster_test_file = "./test_data_dir/test/test.txt" + node_id = os.getenv("OMPI_COMM_WORLD_RANK") + if not node_id: + raise EnvironmentError("must provied OMPI_COMM_WORLD_RANK") + ``` -`PADDLE_NIC` 集群通信通道的 NIC(Network Interface Card, 网络接口卡) 接口名称,例如以太网的 eth0,infiniband 的 ib0。 +- `train_data_dir`:包含训练数据的目录,可以是从分布式存储挂载过来的,也可以是在任务启动前下载到本地的。 +- `test_data_dir`:包含测试数据集的目录。 -`PADDLE_PORT` 集群通信通道的端口号 +# 使用分布式计算平台或工具 -`PADDLE_PORTS_NUM` 用于集群通信通道的端口数。 如果集群节点数量少(少于5〜6个节点),建议将其设置为较大,如2〜8,以获得更好的网络性能。 +PaddlePaddle可以使用多种分布式计算平台构建分布式计算任务,包括: +- [Kubernetes](http://kubernetes.io) Google开源的容器集群的调度框架,支持大规模集群生产环境的完整集群方案。 +- [OpenMPI](https://www.open-mpi.org) 成熟的高性能并行计算框架。 +- [Fabric](http://www.fabfile.org) 集群管理工具。可以使用`Fabric`编写集群任务提交和管理脚本。 -`PADDLE_PORTS_NUM_FOR_SPARSE` 用于 sparse remote updater 集群通信信道的端口数。如果使用 sparse remote update,则可以像 `PADDLE_PORTS_NUM` 一样设置。 +对于不同的集群平台,会分别介绍集群作业的启动和停止方法。这些例子都可以在[cluster_train_v2](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/scripts/cluster_train_v2)找到。 -`LD_LIBRARY_PATH` 为集群作业设置额外的 LD_LIBRARY_PATH。你可以使用它来设置 CUDA 库的路径。 +在使用分布式计算平台进行训练时,任务被调度在集群中时,分布式计算平台通常会通过API或者环境变量提供任务运行需要的参数,比如节点的ID、IP和任务节点个数等。 -默认配置如下: +## 使用Fabric启动集群作业 -```python -HOSTS = [ - "root@192.168.100.17", - "root@192.168.100.18", - ] - -''' -工作空间配置 -''' - -#工作空间根目录 -ROOT_DIR = "/home/paddle" - -''' -网络配置 -''' -#pserver NIC -PADDLE_NIC = "eth0" -#pserver 端口 -PADDLE_PORT = 7164 -#pserver 端口数 -PADDLE_PORTS_NUM = 2 -#pserver sparse ports num -PADDLE_PORTS_NUM_FOR_SPARSE = 2 - -#集群作业中所有进程的环境设置 -LD_LIBRARY_PATH="/usr/local/cuda/lib64:/usr/lib64" -``` +### 准备一个Linux集群 +可以在`paddle/scripts/cluster_train_v2/fabric/docker_cluster`目录下,执行`kubectl -f ssh_servers.yaml`启动一个测试集群,并使用`kubectl get po -o wide`获得这些节点的IP地址。 ### 启动集群作业 -`paddle.py` 提供了自动化脚本来启动不同节点中的所有 PaddlePaddle 集群进程。默认情况下,所有命令行选项可以设置为```paddle.py``` 命令选项并且 `paddle.py` 将透明、自动地将这些选项应用到 PaddlePaddle 底层进程。 + +`paddle.py` 提供了自动化脚本来启动不同节点中的所有 PaddlePaddle 集群进程。默认情况下,所有命令行选项可以设置为 `paddle.py` 命令选项并且 `paddle.py` 将透明、自动地将这些选项应用到 PaddlePaddle 底层进程。 `paddle.py` 为方便作业启动提供了两个独特的命令选项。 -`job_dispatch_package` 设为本地 `workspace` 目录,它将被分发到 conf.py 中设置的所有节点。 它有助于帮助频繁修改和访问工作区文件的用户减少负担,否则频繁的多节点工作空间部署可能会很麻烦。 -`job_workspace` 设为已部署的工作空间目录,`paddle.py` 将跳过分发阶段直接启动所有节点的集群作业。它可以帮助减少分发延迟。 +- `job_dispatch_package` 设为本地 `workspace` 目录,它将被分发到 `conf.py` 中设置的所有节点。它有助于帮助频繁修改和访问工作区文件的用户减少负担,否则频繁的多节点工作空间部署可能会很麻烦。 +- `job_workspace` 设为已部署的工作空间目录,`paddle.py` 将跳过分发阶段直接启动所有节点的集群作业。它可以帮助减少分发延迟。 -`cluster_train/run.sh` 提供了命令样例来运行 `demo/recommendation` 集群工作,只需用你定义的目录修改 `job_dispatch_package` 和 `job_workspace`,然后: +`cluster_train/run.sh` 提供了命令样例来运行 `doc/howto/usage/cluster/src/word2vec` 集群任务,只需用您定义的目录修改 `job_dispatch_package` 和 `job_workspace`,然后: ``` sh run.sh ``` @@ -149,7 +229,7 @@ sh run.sh 提供 pserver 运行日志,有助于诊断分布式错误。 `server.log` -提供 pserver 进程的 stderr 和 stdout。训练失败时可以检查错误日志。 +提供 parameter server 进程的 stderr 和 stdout。训练失败时可以检查错误日志。 `train.log` 提供训练过程的 stderr 和 stdout。训练失败时可以检查错误日志。 @@ -157,3 +237,49 @@ sh run.sh ### 检查模型输出 运行完成后,模型文件将被写入节点 0 的 `output` 目录中。 工作空间中的 `nodefile` 表示当前集群作业的节点 ID。 + +## 在OpenMPI集群中提交训练作业 + +### 准备OpenMPI集群 + +执行下面的命令以启动3个节点的OpenMPI集群和一个"head"节点: + +```bash +paddle/scripts/cluster_train_v2/openmpi/docker_cluster +kubectl create -f head.yaml +kubectl create -f mpi-nodes.yaml +``` + +然后可以从head节点ssh无密码登录到OpenMPI的每个节点上。 + +### 启动集群作业 + +您可以按照下面的步骤在OpenMPI集群中提交paddle训练任务: + +```bash +# 获得head和node节点的IP地址 +kubectl get po -o wide +# 将node节点的IP地址保存到machines文件中 +kubectl get po -o wide | grep nodes | awk '{print $6}' > machines +# 拷贝必要的文件到head节点 +scp -i ssh/id_rsa.mpi.pub machines prepare.py train.py start_mpi_train.sh tutorial@[headIP]:~ +# ssh 登录到head节点 +ssh -i ssh/id_rsa.mpi.pub tutorial@[headIP] +# --------------- 以下操作均在head节点中执行 --------------- +# 准备训练数据 +python prepare.py +# 拷贝训练程序和字典文件到每台MPI节点 +cat machines | xargs -i scp word_dict.pickle train.py start_mpi_train.sh machines {}:/home/tutorial +# 创建日志目录 +mpirun -hostfile machines -n 3 mkdir /home/tutorial/logs +# 拷贝训练数据到各自的节点 +scp train.txt-00000 test.txt-00000 [node1IP]:/home/tutorial +scp train.txt-00001 test.txt-00001 [node2IP]:/home/tutorial +scp train.txt-00002 test.txt-00002 [node3IP]:/home/tutorial +# 启动训练任务 +mpirun -hostfile machines -n 3 /home/tutorial/start_mpi_train.sh +``` + +## 在Kubernetes集群中提交训练作业 + +此部分的使用方法可以参考[here](../k8s/k8s_distributed_cn.md)。 diff --git a/doc/howto/usage/cluster/cluster_train_en.md b/doc/howto/usage/cluster/cluster_train_en.md index c60876721cbf5565d6e48c8061811aacada748cd..1e8b4d54b9ffa99b3beef35ecaf95bbd0866535f 100644 --- a/doc/howto/usage/cluster/cluster_train_en.md +++ b/doc/howto/usage/cluster/cluster_train_en.md @@ -1,129 +1,220 @@ -# Run Distributed Training +# PaddlePaddle Distributed Training + +* [Introduction](#introduction) +* [Preparations](#preparations) +* [Command-line arguments](#command-line-arguments) + * [Starting parameter server](#starting-parameter-server) + * [Starting trainer](#starting-trainer) + * [Prepare Training Dataset](#prepare-training-dataset) + * [Prepare Training program](#prepare-training-program) +* [Use cluster platforms or cluster management tools](#use-cluster-platforms-or-cluster-management-tools) + * [Cluster Training Using Fabric](#cluster-training-using-fabric) + * [Prepare a Linux cluster](#prepare-a-linux-cluster) + * [Launching Cluster Job](#launching-cluster-job) + * [Kill Cluster Job](#kill-cluster-job) + * [Check Cluster Training Result](#check-cluster-training-result) + * [Check Model Output](#check-model-output) + * [Cluster Training Using OpenMPI](#cluster-training-using-openmpi) + * [Prepare an OpenMPI cluster](#prepare-an-openmpi-cluster) + * [Launching Cluster Job](#launching-cluster-job-1) + * [Cluster Training Using Kubernetes](#cluster-training-using-kubernetes) + +# Introduction + +In this article, we'll explain how to run distributed training jobs with PaddlePaddle on different types of clusters. The diagram below shows the main architecture of a distributed trainning job: + + + +- Data shard: training data will be split into multiple partitions, trainers use the partitions of the whole dataset to do the training job. +- Trainer: each trainer reads the data shard, and train the neural network. Then the trainer will upload calculated "gradients" to parameter servers, and wait for parameters to be optimized on the parameter server side. When that finishes, the trainer download optimized parameters and continues its training. +- Parameter server: every parameter server stores part of the whole neural network model data. They will do optimization calculations when gradients are uploaded from trainers, and then send updated parameters to trainers. + +PaddlePaddle can support both synchronize stochastic gradient descent (SGD) and asynchronous SGD. + +When training with synchronize SGD, PaddlePaddle uses an internal "synchronize barrier" which makes gradients update and parameter download in strict order. On the other hand, asynchronous SGD won't wait for all trainers to finish upload at a single step, this will increase the parallelism of distributed training: parameter servers do not depend on each other, they'll do parameter optimization concurrently. Parameter servers will not wait for trainers, so trainers will also do their work concurrently. But asynchronous SGD will introduce more randomness and noises in the gradient. + +# Preparations +1. Prepare your computer cluster. It's normally a bunch of Linux servers connected by LAN. Each server will be assigned a unique IP address. The computers in the cluster can be called "nodes". +2. Install PaddlePaddle on every node. If you are going to take advantage of GPU cards, you'll also need to install proper driver and CUDA libraries. To install PaddlePaddle please read [this build and install](https://github.com/PaddlePaddle/Paddle/tree/develop/doc/getstarted/build_and_install) document. We strongly recommend using [Docker installation](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/getstarted/build_and_install/docker_install_en.rst). + +After installation, you can check the version by typing the below command (run a docker container if using docker: `docker run -it paddlepaddle/paddle:[tag] /bin/bash`): + +```bash +$ paddle version +PaddlePaddle 0.10.0rc, compiled with + with_avx: ON + with_gpu: OFF + with_double: OFF + with_python: ON + with_rdma: OFF + with_timer: OFF +``` -In this article, we explain how to run distributed Paddle training jobs on clusters. We will create the distributed version of the single-process training example, [recommendation](https://github.com/baidu/Paddle/tree/develop/demo/recommendation). +We'll take `doc/howto/usage/cluster/src/word2vec` as an example to introduce distributed training using PaddlePaddle v2 API. -[Scripts](https://github.com/baidu/Paddle/tree/develop/paddle/scripts/cluster_train) used in this article launch distributed jobs via SSH. They also work as a reference for users running more sophisticated cluster management systems like MPI and [Kubernetes](https://github.com/PaddlePaddle/Paddle/tree/develop/doc/howto/usage/k8s). +# Command-line arguments -## Prerequisite +## Starting parameter server -1. Aforementioned scripts use a Python library [fabric](http://www.fabfile.org/) to run SSH commands. We can use `pip` to install fabric: +Type the below command to start a parameter server which will wait for trainers to connect: - ```bash - pip install fabric - ``` +```bash +$ paddle pserver --port=7164 --ports_num=1 --ports_num_for_sparse=1 --num_gradient_servers=1 +``` -1. We need to install PaddlePaddle on all nodes in the cluster. To enable GPUs, we need to install CUDA in `/usr/local/cuda`; otherwise Paddle would report errors at runtime. +If you wish to run parameter servers in background, and save a log file, you can type: +```bash +$ stdbuf -oL /usr/bin/nohup paddle pserver --port=7164 --ports_num=1 --ports_num_for_sparse=1 --num_gradient_servers=1 &> pserver.log +``` -1. Set the `ROOT_DIR` variable in [`cluster_train/conf.py`] on all nodes. For convenience, we often create a Unix user `paddle` on all nodes and set `ROOT_DIR=/home/paddle`. In this way, we can write public SSH keys into `/home/paddle/.ssh/authorized_keys` so that user `paddle` can SSH to all nodes without password. +| param | required | default | description | +| ------------- | ------------- | ------------- | ------------- | +| port | required | 7164 | port which parameter server will listen on. If ports_num greater than 1, parameter server will listen on multiple ports for more network throughput | +| ports_num | required | 1 | total number of ports will listen on | +| ports_num_for_sparse | required | 1 | number of ports which serves sparse parameter update | +| num_gradient_servers | required | 1 | total number of gradient servers | -## Prepare Job Workspace +## Starting trainer +Type the command below to start the trainer(name the file whatever you want, like "train.py") -We refer to the directory where we put dependent libraries, config files, etc., as *workspace*. +```bash +$ python train.py +``` -These `train/test` data should be prepared before launching cluster job. To satisfy the requirement that train/test data are placed in different directory from workspace, PADDLE refers train/test data according to index file named as `train.list/test.list` which are used in model config file. So the train/test data also contains train.list/test.list two list file. All local training demo already provides scripts to help you create these two files, and all nodes in cluster job will handle files with same logical code in normal condition. +Trainers' network need to be connected with parameter servers' network to finish the job. Trainers need to know port and IPs to locate parameter servers. You can pass arguments to trainers through [environment variables](https://en.wikipedia.org/wiki/Environment_variable) or pass to `paddle.init()` function. Arguments passed to the `paddle.init()` function will overwrite environment variables. -Generally, you can use same model file from local training for cluster training. What you should have in mind that, the `batch_size` set in `setting` function in model file means batch size in `each` node of cluster job instead of total batch size if synchronization SGD was used. +Use environment viriables: -Following steps are based on [demo/recommendation](https://github.com/PaddlePaddle/Paddle/tree/develop/demo/recommendation) demo in demo directory. +```bash +export PADDLE_INIT_USE_GPU=False +export PADDLE_INIT_TRAINER_COUNT=1 +export PADDLE_INIT_PORT=7164 +export PADDLE_INIT_PORTS_NUM=1 +export PADDLE_INIT_PORTS_NUM_FOR_SPARSE=1 +export PADDLE_INIT_NUM_GRADIENT_SERVERS=1 +export PADDLE_INIT_TRAINER_ID=0 +export PADDLE_INIT_PSERVERS=127.0.0.1 +python train.py +``` -You just go through demo/recommendation tutorial doc until `Train` section, and at last you will get train/test data and model configuration file. Finaly, just use demo/recommendation as workspace for cluster training. +Pass arguments: -At last your workspace should look like as follow: +```python +paddle.init( + use_gpu=False, + trainer_count=1, + port=7164, + ports_num=1, + ports_num_for_sparse=1, + num_gradient_servers=1, + trainer_id=0, + pservers="127.0.0.1") ``` -. -|-- common_utils.py -|-- data -| |-- config.json -| |-- config_generator.py -| |-- meta.bin -| |-- meta_config.json -| |-- meta_generator.py -| |-- ml-1m -| |-- ml_data.sh -| |-- ratings.dat.test -| |-- ratings.dat.train -| |-- split.py -| |-- test.list -| `-- train.list -|-- dataprovider.py -|-- evaluate.sh -|-- prediction.py -|-- preprocess.sh -|-- requirements.txt -|-- run.sh -`-- trainer_config.py + +| param | required | default | description | +| ------------- | ------------- | ------------- | ------------- | +| use_gpu | optional | False | set to "True" to enable GPU training | +| trainer_count | required | 1 | total count of trainers in the training job | +| port | required | 7164 | port to connect to parameter server | +| ports_num | required | 1 | number of ports for communication | +| ports_num_for_sparse | required | 1 | number of ports for sparse type caculation | +| num_gradient_servers | required | 1 | total number of gradient server | +| trainer_id | required | 0 | ID for every trainer, start from 0 | +| pservers | required | 127.0.0.1 | list of IPs of parameter servers, separated by "," | + +## Prepare Training Dataset + +Here's some example code [prepare.py](https://github.com/PaddlePaddle/Paddle/tree/develop/doc/howto/usage/cluster/src/word2vec/prepare.py), it will download public `imikolov` dataset and split it into multiple files according to job parallelism(trainers count). Modify `SPLIT_COUNT` at the begining of `prepare.py` to change the count of output files. + +In the real world, we often use `MapReduce` job's output as training data, so there will be lots of files. You can use `mod` to assign training file to trainers: + +```python +import os +train_list = [] +flist = os.listdir("/train_data/") +for f in flist: + suffix = int(f.split("-")[1]) + if suffix % TRAINER_COUNT == TRAINER_ID: + train_list.append(f) +``` + +Example code `prepare.py` will split training data and testing data into 3 files with digital suffix like `-00000`, `-00001` and`-00002`: + +``` +train.txt +train.txt-00000 +train.txt-00001 +train.txt-00002 +test.txt +test.txt-00000 +test.txt-00001 +test.txt-00002 ``` -Not all of these files are needed for cluster training, but it's not necessary to remove useless files. -`trainer_config.py` -Indicates the model config file. +When job started, every trainer needs to get it's own part of data. In some distributed systems a storage service will be provided, so the date under that path can be accessed by all the trainer nodes. Without the storage service, you must copy the training data to each trainer node. -`train.list` and `test.list` -File index. It stores all relative or absolute file paths of all train/test data at current node. +Different training jobs may have different data format and `reader()` function, developers may need to write different data prepare scripts and `reader()` functions for their job. -`dataprovider.py` -used to read train/test samples. It's same as local training. +## Prepare Training program -`data` -all files in data directory are refered by train.list/test.list which are refered by data provider. +We'll create a *workspace* directory on each node, storing your training program, dependencies, mounted or downloaded dataset directory. -## Prepare Cluster Job Configuration +Your workspace may looks like: +``` +. +|-- my_lib.py +|-- word_dict.pickle +|-- train.py +|-- train_data_dir/ +| |-- train.txt-00000 +| |-- train.txt-00001 +| |-- train.txt-00002 +`-- test_data_dir/ + |-- test.txt-00000 + |-- test.txt-00001 + `-- test.txt-00002 +``` -The options below must be carefully set in cluster_train/conf.py +- `my_lib.py`: user defined libraries, like PIL libs. This is optional. +- `word_dict.pickle`: dict file for training word embeding. +- `train.py`: training program. Sample code: [api_train_v2_cluster.py](https://github.com/PaddlePaddle/Paddle/tree/develop/doc/howto/usage/cluster/src/word2vec/prepare.py). ***NOTE:*** You may need to modify the head part of `train.py` when using different cluster platform to retrive configuration environment variables: -`HOSTS` all nodes hostname or ip that will run cluster job. You can also append user and ssh port with hostname, such as root@192.168.100.17:9090. + ```python + cluster_train_file = "./train_data_dir/train/train.txt" + cluster_test_file = "./test_data_dir/test/test.txt" + node_id = os.getenv("OMPI_COMM_WORLD_RANK") + if not node_id: + raise EnvironmentError("must provied OMPI_COMM_WORLD_RANK") + ``` -`ROOT_DIR` workspace ROOT directory for placing JOB workspace directory +- `train_data_dir`: containing training data. Mount from storage service or copy trainning data to here. +- `test_data_dir`: containing testing data. -`PADDLE_NIC` the NIC(Network Interface Card) interface name for cluster communication channel, such as eth0 for ethternet, ib0 for infiniband. +# Use cluster platforms or cluster management tools -`PADDLE_PORT` port number for cluster commnunication channel +PaddlePaddle supports running jobs on several platforms including: +- [Kubernetes](http://kubernetes.io) open-source system for automating deployment, scaling, and management of containerized applications from Google. +- [OpenMPI](https://www.open-mpi.org) Mature high performance parallel computing framework. +- [Fabric](http://www.fabfile.org) A cluster management tool. Write scripts to submit jobs or manage the cluster. -`PADDLE_PORTS_NUM` the number of port used for cluster communication channle. if the number of cluster nodes is small(less than 5~6nodes), recommend you set it to larger, such as 2 ~ 8, for better network performance. +We'll introduce cluster job management on these platforms. The examples can be found under [cluster_train_v2](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/scripts/cluster_train_v2). -`PADDLE_PORTS_NUM_FOR_SPARSE` the number of port used for sparse updater cluster commnunication channel. if sparse remote update is used, set it like `PADDLE_PORTS_NUM` +These cluster platforms provide API or environment variables for training processes, when the job is dispatched to different nodes. Like node ID, IP or total number of nodes etc. -`LD_LIBRARY_PATH` set addtional LD_LIBRARY_PATH for cluster job. You can use it to set CUDA libraries path. +## Cluster Training Using Fabric -Default Configuration as follow: +### Prepare a Linux cluster -```python -HOSTS = [ - "root@192.168.100.17", - "root@192.168.100.18", - ] - -''' -workspace configuration -''' - -#root dir for workspace -ROOT_DIR = "/home/paddle" - -''' -network configuration -''' -#pserver nics -PADDLE_NIC = "eth0" -#pserver port -PADDLE_PORT = 7164 -#pserver ports num -PADDLE_PORTS_NUM = 2 -#pserver sparse ports num -PADDLE_PORTS_NUM_FOR_SPARSE = 2 - -#environments setting for all processes in cluster job -LD_LIBRARY_PATH="/usr/local/cuda/lib64:/usr/lib64" -``` +Run `kubectl -f ssh_servers.yaml` under the directory: `paddle/scripts/cluster_train_v2/fabric/docker_cluster` will launch a demo cluster. Run `kubectl get po -o wide` to get IP addresses of these nodes. ### Launching Cluster Job -`paddle.py` provides automatical scripts to start all PaddlePaddle cluster processes in different nodes. By default, all command line options can set as `paddle.py` command options and `paddle.py` will transparently and automatically set these options to PaddlePaddle lower level processes. +`paddle.py` provides automatical scripts to start all PaddlePaddle cluster processes in different nodes. By default, all command line options can be set as `paddle.py` command options and `paddle.py` will transparently and automatically set these options to PaddlePaddle lower level processes. `paddle.py`provides two distinguished command option for easy job launching. -`job_dispatch_package` set it with local `workspace`directory, it will be dispatched to all nodes set in conf.py. It could be helpful for frequent hacking workspace files, otherwise frequent mulit-nodes workspace deployment could make your crazy. -`job_workspace` set it with already deployed workspace directory, `paddle.py` will skip dispatch stage to directly launch cluster job with all nodes. It could help to reduce heavy +- `job_dispatch_package` set it with local `workspace` directory, it will be dispatched to all nodes which is set in `conf.py`. It could be helpful for frequently manipulating workspace files. otherwise, frequent multi-nodes workspace deployment is very annoying. +- `job_workspace` set it with already deployed workspace directory, `paddle.py` will skip dispatch stage to directly launch cluster job with all nodes. It could help to reduce heavy dispatch latency. `cluster_train/run.sh` provides command line sample to run `demo/recommendation` cluster job, just modify `job_dispatch_package` and `job_workspace` with your defined directory, then: @@ -134,23 +225,69 @@ sh run.sh The cluster Job will start in several seconds. ### Kill Cluster Job -`paddle.py` can capture `Ctrl + C` SIGINT signal to automatically kill all processes launched by it. So just stop `paddle.py` to kill cluster job. You should mannally kill job if program crashed. +`paddle.py` can capture `Ctrl + C` SIGINT signal to automatically kill all processes launched by it. So just stop `paddle.py` to kill cluster job. You should manually kill the job if the program crashed. ### Check Cluster Training Result Check log in $workspace/log for details, each node owns same log structure. `paddle_trainer.INFO` -It provides almost all interal output log for training, same as local training. Check runtime model convergence here. +It provides almost all internal output log for training, same as local training. Check runtime model convergence here. `paddle_pserver2.INFO` -It provides pserver running log, which could help to diagnose distributed error. +It provides parameter server running log, which could help to diagnose distributed error. `server.log` -It provides stderr and stdout of pserver process. Check error log if training crashs. +It provides stderr and stdout of parameter server process. Check error log if training crashes. `train.log` -It provides stderr and stdout of trainer process. Check error log if training crashs. +It provides stderr and stdout of trainer process. Check error log if training crashes. ### Check Model Output -After one pass finished, model files will be writed in `output` directory in node 0. +After one pass finished, model files will be written in `output` directory in node 0. `nodefile` in workspace indicates the node id of current cluster job. + +## Cluster Training Using OpenMPI + +### Prepare an OpenMPI cluster + +Run the following command to start a 3-node MPI cluster and one "head" node. + +```bash +cd paddle/scripts/cluster_train_v2/openmpi/docker_cluster +kubectl create -f head.yaml +kubectl create -f mpi-nodes.yaml +``` + +Then you can log in to every OpenMPI node using ssh without input any passwords. + +### Launching Cluster Job + +Follow the steps to launch a PaddlePaddle training job in OpenMPI cluster:\ + +```bash +# find out node IP addresses +kubectl get po -o wide +# generate a "machines" file containing node IP addresses +kubectl get po -o wide | grep nodes | awk '{print $6}' > machines +# copy necessary files onto "head" node +scp -i ssh/id_rsa.mpi.pub machines prepare.py train.py start_mpi_train.sh tutorial@[headIP]:~ +# login to head node using ssh +ssh -i ssh/id_rsa.mpi.pub tutorial@[headIP] +# --------------- in head node --------------- +# prepare training data +python prepare.py +# copy training data and dict file to MPI nodes +cat machines | xargs -i scp word_dict.pickle train.py start_mpi_train.sh machines {}:/home/tutorial +# creat a directory for storing log files +mpirun -hostfile machines -n 3 mkdir /home/tutorial/logs +# copy training data to every node +scp train.txt-00000 test.txt-00000 [node1IP]:/home/tutorial +scp train.txt-00001 test.txt-00001 [node2IP]:/home/tutorial +scp train.txt-00002 test.txt-00002 [node3IP]:/home/tutorial +# start the job +mpirun -hostfile machines -n 3 /home/tutorial/start_mpi_train.sh +``` + +## Cluster Training Using Kubernetes + +The details can be found [here](../k8s/k8s_cn.md) diff --git a/doc/howto/usage/cluster/src/trainer.png b/doc/howto/usage/cluster/src/trainer.png new file mode 100644 index 0000000000000000000000000000000000000000..6537d3d56589ca9f19a77a50a970e4b5275e6ce0 Binary files /dev/null and b/doc/howto/usage/cluster/src/trainer.png differ diff --git a/doc/howto/usage/cluster/src/trainer_cn.png b/doc/howto/usage/cluster/src/trainer_cn.png new file mode 100644 index 0000000000000000000000000000000000000000..f9525739cc8bc6506adde642aafa0a85ae3ebebc Binary files /dev/null and b/doc/howto/usage/cluster/src/trainer_cn.png differ diff --git a/doc/howto/usage/cluster/src/word2vec/api_train_v2.py b/doc/howto/usage/cluster/src/word2vec/api_train_v2.py new file mode 100644 index 0000000000000000000000000000000000000000..c0940f0e56eafa22f8aeb7052c0ddc79d8862917 --- /dev/null +++ b/doc/howto/usage/cluster/src/word2vec/api_train_v2.py @@ -0,0 +1,100 @@ +import gzip +import math + +import paddle.v2 as paddle + +embsize = 32 +hiddensize = 256 +N = 5 + + +def wordemb(inlayer): + wordemb = paddle.layer.embedding( + input=inlayer, + size=embsize, + param_attr=paddle.attr.Param( + name="_proj", + initial_std=0.001, + learning_rate=1, + l2_rate=0, + sparse_update=True)) + return wordemb + + +def main(): + # for local training + cluster_train = False + + if not cluster_train: + paddle.init(use_gpu=False, trainer_count=1) + else: + paddle.init( + use_gpu=False, + trainer_count=2, + port=7164, + ports_num=1, + ports_num_for_sparse=1, + num_gradient_servers=1) + word_dict = paddle.dataset.imikolov.build_dict() + dict_size = len(word_dict) + firstword = paddle.layer.data( + name="firstw", type=paddle.data_type.integer_value(dict_size)) + secondword = paddle.layer.data( + name="secondw", type=paddle.data_type.integer_value(dict_size)) + thirdword = paddle.layer.data( + name="thirdw", type=paddle.data_type.integer_value(dict_size)) + fourthword = paddle.layer.data( + name="fourthw", type=paddle.data_type.integer_value(dict_size)) + nextword = paddle.layer.data( + name="fifthw", type=paddle.data_type.integer_value(dict_size)) + + Efirst = wordemb(firstword) + Esecond = wordemb(secondword) + Ethird = wordemb(thirdword) + Efourth = wordemb(fourthword) + + contextemb = paddle.layer.concat(input=[Efirst, Esecond, Ethird, Efourth]) + hidden1 = paddle.layer.fc(input=contextemb, + size=hiddensize, + act=paddle.activation.Sigmoid(), + layer_attr=paddle.attr.Extra(drop_rate=0.5), + bias_attr=paddle.attr.Param(learning_rate=2), + param_attr=paddle.attr.Param( + initial_std=1. / math.sqrt(embsize * 8), + learning_rate=1)) + predictword = paddle.layer.fc(input=hidden1, + size=dict_size, + bias_attr=paddle.attr.Param(learning_rate=2), + act=paddle.activation.Softmax()) + + def event_handler(event): + if isinstance(event, paddle.event.EndIteration): + if event.batch_id % 100 == 0: + with gzip.open("batch-" + str(event.batch_id) + ".tar.gz", + 'w') as f: + trainer.save_parameter_to_tar(f) + result = trainer.test( + paddle.batch( + paddle.dataset.imikolov.test(word_dict, N), 32)) + print "Pass %d, Batch %d, Cost %f, %s, Testing metrics %s" % ( + event.pass_id, event.batch_id, event.cost, event.metrics, + result.metrics) + + cost = paddle.layer.classification_cost(input=predictword, label=nextword) + + parameters = paddle.parameters.create(cost) + adagrad = paddle.optimizer.AdaGrad( + learning_rate=3e-3, + regularization=paddle.optimizer.L2Regularization(8e-4)) + trainer = paddle.trainer.SGD(cost, + parameters, + adagrad, + is_local=not cluster_train) + trainer.train( + paddle.batch(paddle.dataset.imikolov.train(word_dict, N), 32), + num_passes=30, + event_handler=event_handler) + + +if __name__ == '__main__': + main() diff --git a/doc/howto/usage/cluster/src/word2vec/api_train_v2_cluster.py b/doc/howto/usage/cluster/src/word2vec/api_train_v2_cluster.py new file mode 100644 index 0000000000000000000000000000000000000000..2e6d8887124a5524505b097803a60a35478ca644 --- /dev/null +++ b/doc/howto/usage/cluster/src/word2vec/api_train_v2_cluster.py @@ -0,0 +1,123 @@ +import math +import os +import paddle.v2 as paddle +import pickle + +embsize = 32 +hiddensize = 256 +N = 5 +cluster_train_file = "./train_data_dir/train/train.txt" +cluster_test_file = "./test_data_dir/test/test.txt" +node_id = os.getenv("OMPI_COMM_WORLD_RANK") +if not node_id: + raise EnvironmentError("must provied OMPI_COMM_WORLD_RANK") + + +def wordemb(inlayer): + wordemb = paddle.layer.embedding( + input=inlayer, + size=embsize, + param_attr=paddle.attr.Param( + name="_proj", + initial_std=0.001, + learning_rate=1, + l2_rate=0, + sparse_update=True)) + return wordemb + + +def cluster_reader_cluster(filename, node_id): + def cluster_reader(): + with open("-".join([filename, "%05d" % int(node_id)]), "r") as f: + for l in f: + csv_data = [int(cell) for cell in l.split(",")] + yield tuple(csv_data) + + return cluster_reader + + +def main(): + # get arguments from env + + # for local training + TRUTH = ["true", "True", "TRUE", "1", "yes", "Yes", "YES"] + cluster_train = os.getenv('PADDLE_CLUSTER_TRAIN', "False") in TRUTH + use_gpu = os.getenv('PADDLE_INIT_USE_GPU', "False") + + if not cluster_train: + paddle.init( + use_gpu=use_gpu, + trainer_count=int(os.getenv("PADDLE_INIT_TRAINER_COUNT", "1"))) + else: + paddle.init( + use_gpu=use_gpu, + trainer_count=int(os.getenv("PADDLE_INIT_TRAINER_COUNT", "1")), + port=int(os.getenv("PADDLE_INIT_PORT", "7164")), + ports_num=int(os.getenv("PADDLE_INIT_PORTS_NUM", "1")), + ports_num_for_sparse=int( + os.getenv("PADDLE_INIT_PORTS_NUM_FOR_SPARSE", "1")), + num_gradient_servers=int( + os.getenv("PADDLE_INIT_NUM_GRADIENT_SERVERS", "1")), + trainer_id=int(os.getenv("PADDLE_INIT_TRAINER_ID", "0")), + pservers=os.getenv("PADDLE_INIT_PSERVERS", "127.0.0.1")) + fn = open("thirdparty/wuyi_train_thdpty/word_dict.pickle", "r") + word_dict = pickle.load(fn) + fn.close() + dict_size = len(word_dict) + firstword = paddle.layer.data( + name="firstw", type=paddle.data_type.integer_value(dict_size)) + secondword = paddle.layer.data( + name="secondw", type=paddle.data_type.integer_value(dict_size)) + thirdword = paddle.layer.data( + name="thirdw", type=paddle.data_type.integer_value(dict_size)) + fourthword = paddle.layer.data( + name="fourthw", type=paddle.data_type.integer_value(dict_size)) + nextword = paddle.layer.data( + name="fifthw", type=paddle.data_type.integer_value(dict_size)) + + Efirst = wordemb(firstword) + Esecond = wordemb(secondword) + Ethird = wordemb(thirdword) + Efourth = wordemb(fourthword) + + contextemb = paddle.layer.concat(input=[Efirst, Esecond, Ethird, Efourth]) + hidden1 = paddle.layer.fc(input=contextemb, + size=hiddensize, + act=paddle.activation.Sigmoid(), + layer_attr=paddle.attr.Extra(drop_rate=0.5), + bias_attr=paddle.attr.Param(learning_rate=2), + param_attr=paddle.attr.Param( + initial_std=1. / math.sqrt(embsize * 8), + learning_rate=1)) + predictword = paddle.layer.fc(input=hidden1, + size=dict_size, + bias_attr=paddle.attr.Param(learning_rate=2), + act=paddle.activation.Softmax()) + + def event_handler(event): + if isinstance(event, paddle.event.EndIteration): + if event.batch_id % 100 == 0: + result = trainer.test( + paddle.batch( + cluster_reader_cluster(cluster_test_file, node_id), 32)) + print "Pass %d, Batch %d, Cost %f, %s, Testing metrics %s" % ( + event.pass_id, event.batch_id, event.cost, event.metrics, + result.metrics) + + cost = paddle.layer.classification_cost(input=predictword, label=nextword) + parameters = paddle.parameters.create(cost) + adagrad = paddle.optimizer.AdaGrad( + learning_rate=3e-3, + regularization=paddle.optimizer.L2Regularization(8e-4)) + trainer = paddle.trainer.SGD(cost, + parameters, + adagrad, + is_local=not cluster_train) + trainer.train( + paddle.batch(cluster_reader_cluster(cluster_train_file, node_id), 32), + num_passes=30, + event_handler=event_handler) + + +if __name__ == '__main__': + main() diff --git a/doc/howto/usage/cluster/src/word2vec/prepare.py b/doc/howto/usage/cluster/src/word2vec/prepare.py new file mode 100644 index 0000000000000000000000000000000000000000..24f5c5b26d37ea03de3ab4dc2d967a4bd009eef0 --- /dev/null +++ b/doc/howto/usage/cluster/src/word2vec/prepare.py @@ -0,0 +1,41 @@ +import paddle.v2 as paddle +import tarfile +import os +import pickle + +SPLIT_COUNT = 3 +N = 5 + + +def file_len(fd): + for i, l in enumerate(fd): + pass + return i + 1 + + +def split_from_reader_by_line(filename, reader, split_count): + fn = open(filename, "w") + for batch_id, batch_data in enumerate(reader()): + batch_data_str = [str(d) for d in batch_data] + fn.write(",".join(batch_data_str)) + fn.write("\n") + fn.close() + + fn = open(filename, "r") + total_line_count = file_len(fn) + fn.close() + per_file_lines = total_line_count / split_count + 1 + cmd = "split -d -a 5 -l %d %s %s-" % (per_file_lines, filename, filename) + os.system(cmd) + + +word_dict = paddle.dataset.imikolov.build_dict() +with open("word_dict.pickle", "w") as dict_f: + pickle.dump(word_dict, dict_f) + +split_from_reader_by_line("train.txt", + paddle.dataset.imikolov.train(word_dict, N), + SPLIT_COUNT) +split_from_reader_by_line("test.txt", + paddle.dataset.imikolov.test(word_dict, N), + SPLIT_COUNT) diff --git a/go/glide.lock b/go/glide.lock index 1ecdd217520e0a62b546b4c7048a25f4316d3f37..aabc03657fff299581c61ed2a220e1c615cd6dfe 100644 --- a/go/glide.lock +++ b/go/glide.lock @@ -1,6 +1,8 @@ -hash: 1b9b07408ca7fac27a374dc2ccd2433e4bff090484008a037df967284949a582 -updated: 2017-08-07T23:37:48.867469328Z +hash: 328e7b9b7306b45e7b9879139a9f86698115981f6283032e1312093a6a6ddb04 +updated: 2017-10-16T08:00:23.484693528Z imports: +- name: github.com/alecthomas/gometalinter + version: bae2f1293d092fd8167939d5108d1b025eaef9de - name: github.com/beorn7/perks version: 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9 subpackages: @@ -10,7 +12,7 @@ imports: - name: github.com/cockroachdb/cmux version: 112f0506e7743d64a6eb8fedbcff13d9979bbf92 - name: github.com/coreos/etcd - version: d0d1a87aa96ae14914751d42264262cb69eda170 + version: f1d7dd87da3e8feab4aaf675b8e29c6a5ed5f58b subpackages: - alarm - auth @@ -149,7 +151,7 @@ imports: - name: github.com/satori/go.uuid version: 879c5887cd475cd7864858769793b2ceb0d44feb - name: github.com/sirupsen/logrus - version: a3f95b5c423586578a4e099b11a46c2479628cac + version: f006c2ac4710855cf0f916dd6b77acf6b048dc6e - name: github.com/topicai/candy version: 1b9030d056fa9f8c4b1f9c91b52fe4b8ab4cd8cc - name: github.com/ugorji/go @@ -159,12 +161,13 @@ imports: - name: github.com/xiang90/probing version: 07dd2e8dfe18522e9c447ba95f2fe95262f63bb2 - name: golang.org/x/crypto - version: 1351f936d976c60a0a48d728281922cf63eafb8d + version: 9419663f5a44be8b34ca85f08abc5fe1be11f8a3 repo: https://github.com/golang/crypto.git vcs: git subpackages: - bcrypt - blowfish + - ssh/terminal - name: golang.org/x/net version: c8c74377599bd978aee1cf3b9b63a8634051cec2 subpackages: @@ -219,3 +222,4 @@ testImports: version: 05e8a0eda380579888eb53c394909df027f06991 subpackages: - assert + diff --git a/go/glide.yaml b/go/glide.yaml index a90e71b615de92d64c79823e2a04c46001963932..4b22ab2caaae2272e3aab0eeba0758925c67d448 100644 --- a/go/glide.yaml +++ b/go/glide.yaml @@ -24,3 +24,5 @@ import: vcs: git - package: github.com/satori/go.uuid version: v1.1.0 +- package: github.com/alecthomas/gometalinter + version: v1.2.1 diff --git a/go/pserver/client/client.go b/go/pserver/client/client.go index 20d91e77034e1a0c6825bc401175e6dc1afec52f..e5187ce3df77cb983e070508230c51c078f1e07b 100644 --- a/go/pserver/client/client.go +++ b/go/pserver/client/client.go @@ -137,7 +137,7 @@ func (c *Client) FinishInitParams() error { return err } } - return nil + return c.sel.Done() } // SendGrads sends gradients to parameter servers for updating diff --git a/paddle/capi/CMakeLists.txt b/paddle/capi/CMakeLists.txt index 2c458a78c598bf206b30c0c07599ce605af77701..e767856d5012fd205f6b57f9721d0cbca8dc46ed 100644 --- a/paddle/capi/CMakeLists.txt +++ b/paddle/capi/CMakeLists.txt @@ -28,23 +28,37 @@ add_style_check_target(paddle_capi ${CAPI_SOURCES} ${CAPI_HEADER} add_dependencies(paddle_capi paddle_proto) -# combine all paddle static libraries together, into libpaddle_capi_whole.a -# user should use PaddleCAPI as -lpaddle_capi_whole -set(PADDLE_CAPI_INFER_LIBS - paddle_utils - paddle_parameter - paddle_math - paddle_cuda - paddle_function - paddle_gserver - paddle_proto) - +# TODO: paddle_capi_whole will be removed. +if(MOBILE_INFERENCE) + set(PADDLE_CAPI_INFER_LIBS + paddle_utils + paddle_parameter + paddle_math + paddle_cuda + paddle_function + paddle_gserver + paddle_proto) +else() + set(PADDLE_CAPI_INFER_LIBS + paddle_utils + paddle_parameter + paddle_math + paddle_cuda + paddle_function + paddle_gserver + paddle_proto + paddle_pserver + paddle_network) +endif() cc_library(paddle_capi_whole DEPS paddle_capi ${PADDLE_CAPI_INFER_LIBS}) -# No shared library for iOS +# Link the static library for inference +cc_library(paddle_capi_engine DEPS paddle_capi paddle_utils paddle_parameter paddle_math paddle_cuda paddle_proto) +cc_library(paddle_capi_layers DEPS paddle_function paddle_gserver) + +# Link the shared library for inference if(NOT IOS) - set(LINK_FLAGS " -Wl,--retain-symbols-file ${CMAKE_CURRENT_SOURCE_DIR}/export.sym -Wl,--version-script ${CMAKE_CURRENT_SOURCE_DIR}/export.map") - # TODO: merge mkl into paddle_capi_shared + set(LINK_FLAGS "-Wl,--version-script ${CMAKE_CURRENT_SOURCE_DIR}/paddle_capi.map") add_library(paddle_capi_shared SHARED ${CAPI_SOURCES}) set_target_properties(paddle_capi_shared PROPERTIES LINK_FLAGS "${LINK_FLAGS}") target_include_directories(paddle_capi_shared PUBLIC ${CMAKE_CURRENT_BINARY_DIR}) @@ -53,9 +67,10 @@ endif() # install library & headers. install(FILES ${CAPI_HEADERS} DESTINATION include/paddle) +install(FILES paddle_capi.map DESTINATION include/paddle) install(FILES ${CMAKE_CURRENT_BINARY_DIR}/config.h DESTINATION include/paddle) if(ANDROID) - install(TARGETS paddle_capi_whole paddle_capi_shared + install(TARGETS paddle_capi_whole paddle_capi_engine paddle_capi_layers paddle_capi_shared ARCHIVE DESTINATION lib/${ANDROID_ABI} LIBRARY DESTINATION lib/${ANDROID_ABI}) execute_process( @@ -80,7 +95,7 @@ if(ANDROID) )" ) else(ANDROID) - install(TARGETS paddle_capi_whole ARCHIVE DESTINATION lib) + install(TARGETS paddle_capi_whole paddle_capi_engine paddle_capi_layers ARCHIVE DESTINATION lib) if(NOT IOS) install(TARGETS paddle_capi_shared DESTINATION lib) endif() diff --git a/paddle/capi/export.sym b/paddle/capi/export.sym deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/paddle/capi/export.map b/paddle/capi/paddle_capi.map similarity index 100% rename from paddle/capi/export.map rename to paddle/capi/paddle_capi.map diff --git a/paddle/framework/CMakeLists.txt b/paddle/framework/CMakeLists.txt index c8d9dac21d995d92b9d50436d42e47b63ea55f58..dbe76a8eaf134f7db08fb545297c8e4db68a7aab 100644 --- a/paddle/framework/CMakeLists.txt +++ b/paddle/framework/CMakeLists.txt @@ -19,14 +19,15 @@ cc_test(scope_test SRCS scope_test.cc DEPS scope) proto_library(framework_proto SRCS framework.proto) cc_library(attribute SRCS attribute.cc DEPS framework_proto) -cc_library(proto_desc SRCS var_desc.cc op_desc.cc block_desc.cc program_desc.cc DEPS attribute ddim op_info) +cc_test(program_desc_test SRCS program_desc_test.cc DEPS proto_desc) cc_library(op_proto_maker SRCS op_proto_maker.cc DEPS framework_proto attribute) cc_test(op_proto_maker_test SRCS op_proto_maker_test.cc DEPS op_proto_maker) cc_library(op_info SRCS op_info.cc DEPS attribute framework_proto) -cc_library(operator SRCS operator.cc DEPS op_info device_context tensor scope proto_desc) +cc_library(operator SRCS operator.cc DEPS op_info device_context tensor scope glog) cc_test(operator_test SRCS operator_test.cc DEPS operator op_registry) +cc_library(proto_desc SRCS var_desc.cc op_desc.cc block_desc.cc program_desc.cc DEPS attribute ddim op_info operator) -cc_library(op_registry SRCS op_registry.cc DEPS op_proto_maker op_info operator) +cc_library(op_registry SRCS op_registry.cc DEPS op_proto_maker op_info operator glog proto_desc) cc_test(op_registry_test SRCS op_registry_test.cc DEPS op_registry) py_proto_compile(framework_py_proto SRCS framework.proto) @@ -42,17 +43,15 @@ add_custom_command(TARGET framework_py_proto POST_BUILD cc_library(backward SRCS backward.cc DEPS net_op) cc_test(backward_test SRCS backward_test.cc DEPS backward recurrent_op device_context) -cc_library(executor SRCS executor.cc DEPS op_registry device_context scope framework_proto backward) -set(EXECUTOR_TEST_OP elementwise_add_op gaussian_random_op feed_op fetch_op - mul_op sum_op squared_l2_distance_op fill_constant_op sgd_op mean_op) -if(WITH_GPU) - nv_test(executor_test SRCS executor_test.cc DEPS executor ${EXECUTOR_TEST_OP}) -else() - cc_test(executor_test SRCS executor_test.cc DEPS executor ${EXECUTOR_TEST_OP}) -endif() +cc_library(executor SRCS executor.cc DEPS op_registry device_context scope framework_proto backward glog) + +cc_library(prune SRCS prune.cc DEPS framework_proto) +cc_test(prune_test SRCS prune_test.cc DEPS op_info prune recurrent_op device_context) cc_library(tensor_array SRCS tensor_array.cc DEPS lod_tensor) cc_test(tensor_array_test SRCS tensor_array_test.cc DEPS tensor_array place) +cc_test(var_type_inference_test SRCS var_type_inference_test.cc DEPS op_registry + proto_desc) cc_library(selected_rows SRCS selected_rows.cc DEPS tensor) cc_test(selected_rows_test SRCS selected_rows_test.cc DEPS selected_rows) diff --git a/paddle/framework/attribute.cc b/paddle/framework/attribute.cc index d6a2975aaa419406aef7b228e78381dbce78890d..29fe352ca450740e55ee87b63392e3aabac8aa40 100644 --- a/paddle/framework/attribute.cc +++ b/paddle/framework/attribute.cc @@ -19,19 +19,7 @@ limitations under the License. */ namespace paddle { namespace framework { -static ProgramDesc* g_program_desc = nullptr; - -ProgramDesc& GetProgramDesc() { - if (g_program_desc == nullptr) { - g_program_desc = new ProgramDesc(); - auto root_block = g_program_desc->mutable_blocks()->Add(); - root_block->set_idx(0); - root_block->set_parent_idx(-1); - } - return *g_program_desc; -} - -Attribute GetAttrValue(const OpDesc::Attr& attr_desc) { +Attribute GetAttrValue(const OpDesc::Attr& attr_desc, ProgramDesc* program) { switch (attr_desc.type()) { case framework::AttrType::BOOLEAN: { return attr_desc.b(); @@ -74,7 +62,9 @@ Attribute GetAttrValue(const OpDesc::Attr& attr_desc) { return val; } case framework::AttrType::BLOCK: { - return GetProgramDesc().mutable_blocks(attr_desc.block_idx()); + PADDLE_ENFORCE(program != nullptr, + "Need to specify ProgramDesc when get a block attr"); + return program->mutable_blocks(attr_desc.block_idx()); } } PADDLE_ENFORCE(false, "Unknown OpDesc::AttrDesc::type !"); diff --git a/paddle/framework/attribute.h b/paddle/framework/attribute.h index d13530e3408a54c7ecab87c3bd9e6288e342f9af..9744662b8f7229b0b17e910ae5cd997fa7d31e06 100644 --- a/paddle/framework/attribute.h +++ b/paddle/framework/attribute.h @@ -26,16 +26,13 @@ limitations under the License. */ namespace paddle { namespace framework { - -ProgramDesc& GetProgramDesc(); - template inline AttrType AttrTypeID() { Attribute tmp = T(); return static_cast(tmp.which() - 1); } -Attribute GetAttrValue(const OpDesc::Attr& attr_desc); +Attribute GetAttrValue(const OpDesc::Attr& attr_desc, ProgramDesc* desc); class AttrReader { public: @@ -120,6 +117,57 @@ class EnumInContainer { std::unordered_set container_; }; +template +struct ExtractAttribute { + explicit ExtractAttribute(const std::string& attr_name) + : attr_name_(attr_name) {} + + T* operator()(Attribute& attr) const { + T* attr_value = nullptr; + try { + attr_value = &boost::get(attr); + } catch (boost::bad_get& bad_get) { + PADDLE_THROW("Cannot get attribute %s by type %s, its type is %s", + attr_name_, typeid(T).name(), attr.type().name()); + } + return attr_value; + } + + const std::string& attr_name_; +}; + +// special handle bool +// FIXME(yuyang18): Currently we cast bool into int in python binding. It is +// hard to change the logic there. In another way, we should correct handle +// if the user set `some_flag=1`. +// +// FIX ME anytime if there is a better solution. +template <> +struct ExtractAttribute { + explicit ExtractAttribute(const std::string& attr_name) + : attr_name_(attr_name) {} + + bool* operator()(Attribute& attr) const { + if (attr.type() == typeid(int)) { // NOLINT + int val = boost::get(attr); + attr = static_cast(val); + } else if (attr.type() == typeid(float)) { // NOLINT + float val = boost::get(attr); + attr = static_cast(val); + } + bool* attr_value = nullptr; + try { + attr_value = &boost::get(attr); + } catch (boost::bad_get& bad_get) { + PADDLE_THROW("Cannot get attribute %s by type bool, its type is %s", + attr_name_, attr.type().name()); + } + return attr_value; + } + + const std::string& attr_name_; +}; + // check whether a certain attribute fit its limits // an attribute can have more than one limits template @@ -171,9 +219,10 @@ class TypedAttrChecker { attr_map[attr_name_] = val; } Attribute& attr = attr_map.at(attr_name_); - T& attr_value = boost::get(attr); + ExtractAttribute extract_attr(attr_name_); + T* attr_value = extract_attr(attr); for (const auto& checker : value_checkers_) { - checker(attr_value); + checker(*attr_value); } } diff --git a/paddle/framework/backward.cc b/paddle/framework/backward.cc index e3d7dacd7f0ad61a606e8b3e8f6a84b98deac729..1ae7fb60f01e4925ceb310f661171eb231eb6c96 100644 --- a/paddle/framework/backward.cc +++ b/paddle/framework/backward.cc @@ -21,6 +21,7 @@ #include "paddle/framework/block_desc.h" #include "paddle/framework/op_registry.h" +#include "paddle/operators/dynamic_recurrent_op.h" #include "paddle/operators/net_op.h" #include "paddle/operators/recurrent_op.h" @@ -220,8 +221,7 @@ static std::unique_ptr BackwardRecursive( // process recurrent gradient op as a special operator. if (forwardOp.Type() == "recurrent") { // NOTE clean up cycle call somewhere (RNN's stepnet constains itself), - // or - // this will result in infinite loop. + // or this will result in infinite loop. const auto& rnnop = *static_cast(&forwardOp); auto rnn_grad_op = @@ -231,6 +231,18 @@ static std::unique_ptr BackwardRecursive( // create stepnet's gradient op rnn_grad_op->set_stepnet( BackwardRecursive(stepnet_op, no_grad_names, grad_to_var, uniq_id)); + } else if (forwardOp.Type() == "dynamic_recurrent") { + // NOTE clean up cycle call somewhere (RNN's stepnet constains itself), + // or this will result in infinite loop. + const auto& rnnop = + *static_cast(&forwardOp); + auto rnn_grad_op = + static_cast(grad_op.get()); + const auto& stepnet_op = + *static_cast(&rnnop.rnn.GetStepUnit()); + // create stepnet's gradient op + rnn_grad_op->rnn.SetStepUnit( + BackwardRecursive(stepnet_op, no_grad_names, grad_to_var, uniq_id)); } if (net->ops_.empty()) { // Current no aux op is added to network @@ -281,12 +293,16 @@ static void CreateGradVarInBlock( auto ops = block_desc->AllOps(); for (size_t op_index = grad_op_start_index; op_index < ops.size(); ++op_index) { + bool need_infer_shape = false; ForEachVarName(ops[op_index]->Outputs(), [&](const std::string& grad_var_name) { if (block_desc->HasVar(grad_var_name)) { return false; } - block_desc->Var(grad_var_name); + need_infer_shape = true; + auto var = block_desc->Var(grad_var_name); + // FIXME(qiao) infer the datatype + var->SetDataType(framework::DataType::FP32); auto it = param_name_map.find(grad_var_name); if (it == param_name_map.end()) { return false; @@ -298,12 +314,14 @@ static void CreateGradVarInBlock( grad_record.op_idx_ = static_cast(op_index); return false; /* not break */ }); + if (need_infer_shape) { + ops[op_index]->InferShape(*block_desc); + } } } std::vector> MakeOpGrad( - const std::unique_ptr& op_desc, - std::unordered_set* no_grad_vars, + const OpDescBind* op_desc, std::unordered_set* no_grad_vars, std::unordered_map* grad_to_var) { std::vector> grad_op_descs; // All input gradients of forwarding operator do not need to calculate. @@ -350,7 +368,7 @@ std::vector> MakeBlockBackward( std::unordered_set* no_grad_vars, std::unordered_map* grad_to_var) { BlockDescBind* cur_block = program_desc.Block(block_idx); - std::deque>& op_descs = cur_block->ops_; + std::vector op_descs = cur_block->AllOps(); std::unordered_map> dup_out_ops; size_t grad_desc_idx = 0; std::vector> backward_descs; @@ -368,7 +386,7 @@ std::vector> MakeBlockBackward( program_desc, step_block_idx, no_grad_vars, grad_to_var); BlockDescBind* backward_block = program_desc.AppendBlock(*cur_block); for (auto& ptr : backward_block_op_descs) { - backward_block->ops_.push_back(std::move(ptr)); + backward_block->AppendAllocatedOp(std::move(ptr)); } op_grads[0]->SetBlockAttr("step_block", *backward_block); } @@ -425,17 +443,22 @@ ParamGradInfoMap AppendBackward( const int root_block_idx = 0; auto root_block = program_desc.Block(root_block_idx); - auto& all_ops = root_block->ops_; // insert fill one op for target + // TODO(qiao) add some check to the target. std::string fill_one_op_out = GradVarName(target.Name()); + std::vector target_shape_desc = target.Shape(); + std::vector target_shape; + std::transform(target_shape_desc.begin(), target_shape_desc.end(), + std::back_inserter(target_shape), + [](int64_t dim) { return static_cast(dim); }); std::unique_ptr fill_one_op( new OpDescBind("fill_constant", {}, {{"Out", {fill_one_op_out}}}, - {{"shape", std::vector{1}}, + {{"shape", target_shape}, {"value", static_cast(1.0)}, - {"dataType", framework::DataType::FP32}})); - all_ops.push_back(std::move(fill_one_op)); - size_t forward_op_num = all_ops.size(); + {"data_type", framework::DataType::FP32}})); + root_block->AppendAllocatedOp(std::move(fill_one_op)); + size_t forward_op_num = root_block->OpSize(); size_t forward_block_num = program_desc.Size(); // Insert backward operators @@ -443,13 +466,22 @@ ParamGradInfoMap AppendBackward( auto backward_op_descs = MakeBlockBackward(program_desc, root_block_idx, &no_grad_var_names, &grad_to_var); - std::unordered_map retv; - - // Create Variable for (auto& ptr : backward_op_descs) { - all_ops.push_back(std::move(ptr)); + root_block->AppendAllocatedOp(std::move(ptr)); } - root_block->Var(fill_one_op_out); + // Create Variable + + // Create target gradient variable + std::unordered_map retv; + + auto var = root_block->Var(fill_one_op_out); + // FIXME(qiao) infer the data type + var->SetDataType(framework::DataType::FP32); + var->SetShape(target.Shape()); + auto& target_grad = retv[target.Name()]; + target_grad.name_ = fill_one_op_out; + target_grad.block_idx_ = root_block_idx; + target_grad.op_idx_ = static_cast(forward_op_num); // create grad_var for all blocks in this program CreateGradVarInBlock(forward_op_num, grad_to_var, root_block, &retv); diff --git a/paddle/framework/backward_test.cc b/paddle/framework/backward_test.cc index 5302afcafb5c0e1c057302dac174be935649ef11..10301f7e39423c8ff0eba33277edecab14c119bf 100644 --- a/paddle/framework/backward_test.cc +++ b/paddle/framework/backward_test.cc @@ -26,6 +26,20 @@ namespace framework { using DeviceContext = platform::DeviceContext; +class NoneOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + protected: + void InferShape(framework::InferShapeContext *ctx) const override {} +}; + +template +class NoneKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext &context) const override {} +}; + class RowWiseAddOpMaker : public OpProtoAndCheckerMaker { public: RowWiseAddOpMaker(OpProto *proto, OpAttrChecker *op_checker) @@ -215,19 +229,51 @@ class MinusOpMaker : public OpProtoAndCheckerMaker { namespace f = paddle::framework; namespace ops = paddle::operators; using EnforceNotMet = paddle::platform::EnforceNotMet; -REGISTER_OPERATOR(rowwise_add, f::NOP, f::RowWiseAddOpMaker, +// rowwise_add +REGISTER_OPERATOR(rowwise_add, f::NoneOp, f::RowWiseAddOpMaker, f::RowWiseAddGradMaker); -REGISTER_OPERATOR(rowwise_add_grad, f::NOP); -REGISTER_OP(mul, f::NOP, f::MulOpMaker, mul_grad, f::NOP); -REGISTER_OP(sigmoid, f::NOP, f::SigmoidOpMaker, sigmoid_grad, f::NOP); -REGISTER_OP_WITHOUT_GRADIENT(nograd, f::NOP, f::NoGradOpMaker); -REGISTER_OP_WITHOUT_GRADIENT(fill_zeros_like, f::NOP, f::FillZeroOpMaker); -REGISTER_OP(sum, f::NOP, f::SumOpMaker, sum_grad, f::NOP); +REGISTER_OP_CPU_KERNEL(rowwise_add, + f::NoneKernel); +REGISTER_OPERATOR(rowwise_add_grad, f::NoneOp); +REGISTER_OP_CPU_KERNEL(rowwise_add_grad, + f::NoneKernel); +// mul +REGISTER_OP(mul, f::NoneOp, f::MulOpMaker, mul_grad, f::NoneOp); +REGISTER_OP_CPU_KERNEL(mul, f::NoneKernel); +REGISTER_OP_CPU_KERNEL(mul_grad, + f::NoneKernel); +// sigmoid +REGISTER_OP(sigmoid, f::NoneOp, f::SigmoidOpMaker, sigmoid_grad, f::NoneOp); +REGISTER_OP_CPU_KERNEL(sigmoid, + f::NoneKernel); +REGISTER_OP_WITHOUT_GRADIENT(nograd, f::NoneOp, f::NoGradOpMaker); +// fill_zeros_like +REGISTER_OP_WITHOUT_GRADIENT(fill_zeros_like, f::NoneOp, f::FillZeroOpMaker); +REGISTER_OP_CPU_KERNEL(fill_zeros_like, + f::NoneKernel); +// sum +REGISTER_OP(sum, f::NoneOp, f::SumOpMaker, sum_grad, f::NoneOp); +REGISTER_OP_CPU_KERNEL(sum, f::NoneKernel); +REGISTER_OP_CPU_KERNEL(sum_grad, + f::NoneKernel); +// fc REGISTER_OP_WITHOUT_GRADIENT(fc, f::FcOp, f::FcOpMaker); -REGISTER_OP(many_output_op, f::NOP, f::ManyOutputOpMaker, many_output_op_grad, - f::NOP); -REGISTER_OP(mult_in_out, f::NOP, f::MultInOutOpMaker, mult_in_out_grad, f::NOP); -REGISTER_OPERATOR(minus, f::NOP, f::MinusOpMaker, f::MinusGradOpDescMaker); +// many_output_op +REGISTER_OP(many_output_op, f::NoneOp, f::ManyOutputOpMaker, + many_output_op_grad, f::NoneOp); +// mult_in_out +REGISTER_OP(mult_in_out, f::NoneOp, f::MultInOutOpMaker, mult_in_out_grad, + f::NoneOp); +REGISTER_OP_CPU_KERNEL(mult_in_out, + f::NoneKernel); +REGISTER_OP_CPU_KERNEL(mult_in_out_grad, + f::NoneKernel); +// minus +REGISTER_OPERATOR(minus, f::NoneOp, f::MinusOpMaker, f::MinusGradOpDescMaker); +REGISTER_OP_CPU_KERNEL(minus, f::NoneKernel); +// scale +REGISTER_OPERATOR(scale, f::NoneOp); +REGISTER_OP_CPU_KERNEL(scale, f::NoneKernel); TEST(Backward, simple_op_not_need_grad) { auto fwd = f::OpRegistry::CreateOp( @@ -449,20 +495,10 @@ TEST(Backward, linear_net_intermediate_variable_has_no_grad) { EXPECT_EQ(bwd_net->ops_[2]->Outputs(all).size(), 0UL); } -// =================================== // - -f::ProgramDesc *GetNewProgramDesc() { - auto *program_desc = new f::ProgramDesc(); - auto *root_block = program_desc->add_blocks(); - root_block->set_idx(0); - root_block->set_parent_idx(-1); - return program_desc; -} - TEST(Backward, simple_single_op) { - f::ProgramDesc *program_desc = GetNewProgramDesc(); - f::ProgramDescBind &program = f::ProgramDescBind::Instance(program_desc); + f::ProgramDescBind program; f::BlockDescBind *block = program.Block(0); + f::OpDescBind *op = block->AppendOp(); op->SetType("rowwise_add"); op->SetInput("X", {"x"}); @@ -487,7 +523,7 @@ TEST(Backward, simple_single_op) { EXPECT_EQ(grad_op->Output(f::GradVarName("b")), std::vector({f::GradVarName("b")})); - EXPECT_EQ(var_to_grad.size(), 2UL); + EXPECT_EQ(var_to_grad.size(), 3UL); EXPECT_EQ(var_to_grad.at("b"), f::GradVarInfo(f::GradVarName("b"), 0, 2)); EXPECT_EQ(var_to_grad.at("x"), f::GradVarInfo(f::GradVarName("x"), 0, 2)); @@ -496,8 +532,7 @@ TEST(Backward, simple_single_op) { } TEST(Backward, default_attribute) { - f::ProgramDesc *program_desc = GetNewProgramDesc(); - f::ProgramDescBind &program = f::ProgramDescBind::Instance(program_desc); + f::ProgramDescBind program; f::BlockDescBind *block = program.Block(0); f::OpDescBind *op = block->AppendOp(); op->SetType("mul"); @@ -523,8 +558,7 @@ TEST(Backward, default_attribute) { } TEST(Backward, simple_mult_op) { - f::ProgramDesc *program_desc = GetNewProgramDesc(); - f::ProgramDescBind &program = f::ProgramDescBind::Instance(program_desc); + f::ProgramDescBind program; f::BlockDescBind *block = program.Block(0); f::OpDescBind *op1 = block->AppendOp(); op1->SetType("rowwise_add"); @@ -588,7 +622,7 @@ TEST(Backward, simple_mult_op) { EXPECT_EQ(grad_op3->Output(f::GradVarName("b")), std::vector({f::GradVarName("b3")})); - EXPECT_EQ(var_to_grad.size(), 6UL); + EXPECT_EQ(var_to_grad.size(), 7UL); EXPECT_EQ(var_to_grad.at("x1"), f::GradVarInfo(f::GradVarName("x1"), 0, 6)); EXPECT_EQ(var_to_grad.at("b1"), f::GradVarInfo(f::GradVarName("b1"), 0, 6)); EXPECT_EQ(var_to_grad.at("out1"), @@ -607,8 +641,7 @@ TEST(Backward, simple_mult_op) { } TEST(Backward, intermedia_var_no_grad) { - f::ProgramDesc *program_desc = GetNewProgramDesc(); - f::ProgramDescBind &program = f::ProgramDescBind::Instance(program_desc); + f::ProgramDescBind program; f::BlockDescBind *block = program.Block(0); f::OpDescBind *op1 = block->AppendOp(); op1->SetType("rowwise_add"); @@ -666,7 +699,7 @@ TEST(Backward, intermedia_var_no_grad) { std::vector({f::GradVarName("out1")})); EXPECT_EQ(grad_op4->Output(f::GradVarName("Y")), std::vector()); - EXPECT_EQ(var_to_grad.size(), 3UL); + EXPECT_EQ(var_to_grad.size(), 4UL); EXPECT_EQ(var_to_grad.at("x1"), f::GradVarInfo(f::GradVarName("x1"), 0, 6)); EXPECT_EQ(var_to_grad.at("b1"), f::GradVarInfo(f::GradVarName("b1"), 0, 6)); EXPECT_EQ(var_to_grad.at("out1"), @@ -678,8 +711,7 @@ TEST(Backward, intermedia_var_no_grad) { } TEST(Backward, var_no_grad) { - f::ProgramDesc *program_desc = GetNewProgramDesc(); - f::ProgramDescBind &program = f::ProgramDescBind::Instance(program_desc); + f::ProgramDescBind program; f::BlockDescBind *block = program.Block(0); f::OpDescBind *op1 = block->AppendOp(); op1->SetType("mult_in_out"); @@ -744,7 +776,7 @@ TEST(Backward, var_no_grad) { EXPECT_EQ(grad_op1->Output(f::GradVarName("H")), std::vector({f::GradVarName("h1")})); - EXPECT_EQ(var_to_grad.size(), 3UL); + EXPECT_EQ(var_to_grad.size(), 4UL); EXPECT_EQ(var_to_grad.at("y1"), f::GradVarInfo(f::GradVarName("y1"), 0, 3)); EXPECT_EQ(var_to_grad.at("x1"), f::GradVarInfo(f::GradVarName("x1"), 0, 5)); EXPECT_EQ(var_to_grad.at("h1"), f::GradVarInfo(f::GradVarName("h1"), 0, 5)); @@ -755,8 +787,7 @@ TEST(Backward, var_no_grad) { } TEST(Backward, shared_var) { - f::ProgramDesc *program_desc = GetNewProgramDesc(); - f::ProgramDescBind &program = f::ProgramDescBind::Instance(program_desc); + f::ProgramDescBind program; f::BlockDescBind *block = program.Block(0); f::OpDescBind *op1 = block->AppendOp(); op1->SetType("rowwise_add"); @@ -830,7 +861,7 @@ TEST(Backward, shared_var) { EXPECT_EQ(grad_op1->Output(f::GradVarName("b")), std::vector({f::GradVarName("b1")})); - EXPECT_EQ(var_to_grad.size(), 5UL); + EXPECT_EQ(var_to_grad.size(), 6UL); EXPECT_EQ(var_to_grad.at("b3"), f::GradVarInfo(f::GradVarName("b3"), 0, 4)); EXPECT_EQ(var_to_grad.at("y2"), f::GradVarInfo(f::GradVarName("y2"), 0, 5)); EXPECT_EQ(var_to_grad.at("out1"), @@ -846,8 +877,7 @@ TEST(Backward, shared_var) { } TEST(Backward, half_backward) { - f::ProgramDesc *program_desc = GetNewProgramDesc(); - f::ProgramDescBind &program = f::ProgramDescBind::Instance(program_desc); + f::ProgramDescBind program; f::BlockDescBind *block = program.Block(0); auto *op1 = block->AppendOp(); op1->SetType("minus"); @@ -863,7 +893,7 @@ TEST(Backward, half_backward) { auto ops = block->AllOps(); ASSERT_EQ(3UL, ops.size()); - EXPECT_EQ(var_to_grad.size(), 1UL); + EXPECT_EQ(var_to_grad.size(), 2UL); EXPECT_EQ(var_to_grad.at("a"), f::GradVarInfo(f::GradVarName("a"), 0, forward_len + 1)); } diff --git a/paddle/framework/block_desc.cc b/paddle/framework/block_desc.cc index 47b75228cdbd2a8b4f0c5ad33aa82f5e43044606..251e340e6ddcc17ba16bdcab63f2a8c907122eab 100644 --- a/paddle/framework/block_desc.cc +++ b/paddle/framework/block_desc.cc @@ -19,11 +19,11 @@ namespace paddle { namespace framework { VarDescBind *BlockDescBind::Var(const std::string &name) { - need_update_ = true; auto it = vars_.find(name); if (it != vars_.end()) { return it->second.get(); } + need_update_ = true; auto *var = new VarDescBind(name); vars_[name].reset(var); return var; @@ -41,6 +41,19 @@ bool BlockDescBind::HasVar(const std::string &name) const { return vars_.find(name) != vars_.end(); } +VarDescBind *BlockDescBind::FindVarRecursive(const std::string &name) const { + auto it = vars_.find(name); + if (it == vars_.end()) { + return Parent() == kNoneBlockIndex ? nullptr + : ParentBlock()->FindVarRecursive(name); + } + return it->second.get(); +} + +bool BlockDescBind::HasVarRecursive(const std::string &name) const { + return FindVarRecursive(name) != nullptr; +} + std::vector BlockDescBind::AllVars() const { std::vector res; for (const auto &p : vars_) { @@ -55,6 +68,11 @@ OpDescBind *BlockDescBind::AppendOp() { return ops_.back().get(); } +void BlockDescBind::AppendAllocatedOp(std::unique_ptr &&op_desc) { + need_update_ = true; + ops_.emplace_back(std::move(op_desc)); +} + OpDescBind *BlockDescBind::PrependOp() { need_update_ = true; ops_.emplace_front(new OpDescBind()); @@ -70,15 +88,19 @@ std::vector BlockDescBind::AllOps() const { } void BlockDescBind::Flush() { + for (auto &op_desc : ops_) { + op_desc->Flush(); + } + if (need_update_) { auto &op_field = *this->desc_->mutable_ops(); - op_field.Clear(); + this->ClearPBOps(); op_field.Reserve(static_cast(ops_.size())); for (auto &op_desc : ops_) { op_field.AddAllocated(op_desc->Proto()); } auto &var_field = *this->desc_->mutable_vars(); - var_field.Clear(); + this->ClearPBVars(); var_field.Reserve(static_cast(vars_.size())); for (auto &var_desc : vars_) { var_field.AddAllocated(var_desc.second->Proto()); @@ -88,7 +110,7 @@ void BlockDescBind::Flush() { } BlockDescBind *BlockDescBind::ParentBlock() const { - if (this->desc_->parent_idx() == -1) { + if (this->desc_->parent_idx() == kNoneBlockIndex) { return nullptr; } return prog_->Block(static_cast(this->desc_->parent_idx())); @@ -98,6 +120,35 @@ BlockDesc *BlockDescBind::Proto() { Flush(); return desc_; } +BlockDescBind::BlockDescBind(const BlockDescBind &other, BlockDesc *desc, + ProgramDescBind *prog) + : prog_(prog), desc_(desc) { + need_update_ = true; + for (auto &op : other.ops_) { + ops_.emplace_back(new OpDescBind(*op)); + } + + for (auto &it : other.vars_) { + auto *var = new VarDescBind(*it.second); + vars_[it.first].reset(var); + } +} + +void BlockDescBind::ClearPBOps() { + auto ops = this->desc_->mutable_ops(); + while (!ops->empty()) { + // we do not own the OpDesc, so release the ownership. + ops->ReleaseLast(); + } +} + +void BlockDescBind::ClearPBVars() { + auto vars = this->desc_->mutable_vars(); + while (!vars->empty()) { + // we do not own the VarDesc, so release the ownership. + vars->ReleaseLast(); + } +} } // namespace framework } // namespace paddle diff --git a/paddle/framework/block_desc.h b/paddle/framework/block_desc.h index 9fb88f963283c72e1ec389b72dd2d98049c74f6d..c685050850dc25f346df49b5ce1d897974870460 100644 --- a/paddle/framework/block_desc.h +++ b/paddle/framework/block_desc.h @@ -16,9 +16,12 @@ limitations under the License. */ #include #include +#include #include #include + #include "paddle/framework/op_desc.h" +#include "paddle/framework/proto_desc.h" #include "paddle/framework/var_desc.h" #include "paddle/platform/macros.h" @@ -36,6 +39,14 @@ class BlockDescBind { BlockDescBind(ProgramDescBind *prog, BlockDesc *desc) : prog_(prog), desc_(desc), need_update_(false) {} + BlockDescBind(const BlockDescBind &other, BlockDesc *desc, + ProgramDescBind *prog); + + ~BlockDescBind() { + this->ClearPBVars(); + this->ClearPBOps(); + } + int32_t ID() const { return desc_->idx(); } int32_t Parent() const { return desc_->parent_idx(); } @@ -46,23 +57,43 @@ class BlockDescBind { bool HasVar(const std::string &var_name) const; + VarDescBind *FindVarRecursive(const std::string &name_bytes) const; + + bool HasVarRecursive(const std::string &var_name) const; + + std::set LocalVarNames() const { + std::set var_names; + for (auto &var : vars_) { + var_names.insert(var.first); + } + return var_names; + } + std::vector AllVars() const; BlockDescBind *ParentBlock() const; OpDescBind *AppendOp(); + void AppendAllocatedOp(std::unique_ptr &&op_desc); + OpDescBind *PrependOp(); std::vector AllOps() const; + size_t OpSize() const { return ops_.size(); } + + OpDescBind *Op(int idx) { return ops_.at(idx).get(); } + void Flush(); BlockDesc *Proto(); - // FIXME(yuyang18): backward will access private data of BlockDesc. - // Mark it public temporary. We can fix it later. - public: + private: + void ClearPBOps(); + void ClearPBVars(); + + private: ProgramDescBind *prog_; // not_own BlockDesc *desc_; // not_own bool need_update_; diff --git a/paddle/framework/data_type.h b/paddle/framework/data_type.h index 649899d42572c9a22adca5337dcd56b0bcf42e7c..c25a62c2b11ead614d93a4be8d63d40d0cc0165a 100644 --- a/paddle/framework/data_type.h +++ b/paddle/framework/data_type.h @@ -26,6 +26,8 @@ inline DataType ToDataType(std::type_index type) { return DataType::FP64; } else if (typeid(int).hash_code() == type.hash_code()) { return DataType::INT32; + } else if (typeid(int64_t).hash_code() == type.hash_code()) { + return DataType::INT64; } else { PADDLE_THROW("Not supported"); } diff --git a/paddle/framework/details/op_registry.h b/paddle/framework/details/op_registry.h index ed7c5f17b0854809bde923276f36440cce193a88..357ad21f39f3b1f6dbdb98063f8fb24ec6800ec6 100644 --- a/paddle/framework/details/op_registry.h +++ b/paddle/framework/details/op_registry.h @@ -18,6 +18,7 @@ #include "paddle/framework/op_info.h" #include "paddle/framework/op_proto_maker.h" #include "paddle/framework/operator.h" +#include "paddle/framework/var_type_inference.h" namespace paddle { namespace framework { @@ -26,7 +27,8 @@ namespace details { enum OpInfoFillType { kOperator = 0, kOpProtoAndCheckerMaker = 1, - kGradOpDescMaker = 2 + kGradOpDescMaker = 2, + kVarTypeInference = 3 }; template @@ -38,7 +40,9 @@ struct OpInfoFillTypeID { ? kOpProtoAndCheckerMaker : (std::is_base_of::value ? kGradOpDescMaker - : static_cast(-1))); + : (std::is_base_of::value + ? kVarTypeInference + : static_cast(-1)))); } }; @@ -106,6 +110,17 @@ struct OpInfoFiller { }; } }; + +template +struct OpInfoFiller { + void operator()(const char* op_type, OpInfo* info) const { + info->infer_var_type_ = [](const OpDescBind& fwd_op, BlockDescBind* block) { + T inference; + inference(fwd_op, block); + }; + } +}; + } // namespace details } // namespace framework diff --git a/paddle/framework/executor.cc b/paddle/framework/executor.cc index 8e82e28bac478ad93ece3fcec9730c6cbabc392a..1f1e4edda823d62b169422672c855d96a2bd2ede 100644 --- a/paddle/framework/executor.cc +++ b/paddle/framework/executor.cc @@ -64,99 +64,27 @@ void Executor::Run(const ProgramDesc& pdesc, Scope* scope, int block_id) { auto& block = pdesc.blocks(block_id); auto& device = device_contexts_[0]; - // Instantiate all the vars in the global scope - for (auto& var : block.vars()) { - scope->Var(var.name()); - } - Scope& local_scope = scope->NewScope(); - std::vector should_run = Prune(pdesc, block_id); - PADDLE_ENFORCE_EQ(should_run.size(), static_cast(block.ops_size())); - for (size_t i = 0; i < should_run.size(); ++i) { - if (should_run[i]) { - for (auto& var : block.ops(i).outputs()) { - for (auto& argu : var.arguments()) { - if (local_scope.FindVar(argu) == nullptr) { - local_scope.Var(argu); - } - } - } - auto op = paddle::framework::OpRegistry::CreateOp(block.ops(i)); - op->Run(local_scope, *device); - } - } - - // TODO(tonyyang-svail): - // - Destroy local_scope -} - -std::vector Prune(const ProgramDesc& pdesc, int block_id) { - // TODO(tonyyang-svail): - // - will change to use multiple blocks for RNN op and Cond Op - - auto& block = pdesc.blocks(block_id); - auto& ops = block.ops(); - - bool expect_feed = true; - for (auto& op_desc : ops) { - PADDLE_ENFORCE(op_desc.type() != kFeedOpType || expect_feed, - "All FeedOps are at the beginning of the ProgramDesc"); - expect_feed = (op_desc.type() == kFeedOpType); - } - - bool expect_fetch = true; - for (auto op_iter = ops.rbegin(); op_iter != ops.rend(); ++op_iter) { - auto& op_desc = *op_iter; - PADDLE_ENFORCE(op_desc.type() != kFetchOpType || expect_fetch, - "All FetchOps must at the end of the ProgramDesc"); - expect_fetch = (op_desc.type() == kFetchOpType); - } - - std::set dependent_vars; - std::vector should_run; - for (auto op_iter = ops.rbegin(); op_iter != ops.rend(); ++op_iter) { - auto& op_desc = *op_iter; - - bool found_dependent_vars = false; - for (auto& var : op_desc.outputs()) { - for (auto& argu : var.arguments()) { - if (dependent_vars.count(argu) != 0) { - found_dependent_vars = true; - } - } - } - - if (op_desc.type() == kFetchOpType || found_dependent_vars) { - // erase its output to the dependency graph - for (auto& var : op_desc.outputs()) { - for (auto& argu : var.arguments()) { - dependent_vars.erase(argu); - } - } - - // insert its input to the dependency graph - for (auto& var : op_desc.inputs()) { - for (auto& argu : var.arguments()) { - dependent_vars.insert(argu); - } - } - - should_run.push_back(true); + for (auto& var : block.vars()) { + if (var.persistable()) { + auto* ptr = scope->Var(var.name()); + VLOG(3) << "Create Variable " << var.name() + << " global, which pointer is " << ptr; } else { - should_run.push_back(false); + auto* ptr = local_scope.Var(var.name()); + VLOG(3) << "Create Variable " << var.name() + << " locally, which pointer is " << ptr; } } - // TODO(tonyyang-svail): - // - check this after integration of Init - // PADDLE_ENFORCE(dependent_vars.empty()); - - // since we are traversing the ProgramDesc in reverse order - // we reverse the should_run vector - std::reverse(should_run.begin(), should_run.end()); + for (auto& op_desc : block.ops()) { + auto op = paddle::framework::OpRegistry::CreateOp( + op_desc, const_cast(&pdesc)); + op->Run(local_scope, *device); + } - return should_run; + scope->DeleteScope(&local_scope); } } // namespace framework diff --git a/paddle/framework/executor.h b/paddle/framework/executor.h index 4e3bc2c0a59dfee5b9993037671f14a109dc8a74..793ee954e25f7da6c9d04ea6acc2ad78812e8329 100644 --- a/paddle/framework/executor.h +++ b/paddle/framework/executor.h @@ -40,16 +40,5 @@ class Executor { std::vector device_contexts_; }; -/* @Brief - * Pruning the graph - * - * @param - * ProgramDesc - * - * @return - * vector Same size as ops. Indicates whether an op should be run. - */ -std::vector Prune(const ProgramDesc& pdesc, int block_id); - } // namespace framework } // namespace paddle diff --git a/paddle/framework/executor_test.cc b/paddle/framework/executor_test.cc deleted file mode 100644 index fcd2e47cff57fcc6c177be77d7e14b167a28f4ae..0000000000000000000000000000000000000000 --- a/paddle/framework/executor_test.cc +++ /dev/null @@ -1,339 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#include "paddle/framework/executor.h" - -#include -#include - -#include "gflags/gflags.h" -#include "gtest/gtest.h" -#include "paddle/framework/attribute.h" -#include "paddle/framework/backward.h" -#include "paddle/framework/block_desc.h" -#include "paddle/framework/op_desc.h" -#include "paddle/framework/op_registry.h" -#include "paddle/framework/operator.h" - -USE_OP(elementwise_add); -USE_OP(gaussian_random); -USE_OP(feed); -USE_OP(fetch); -USE_OP(mul); -USE_OP(sum); -USE_OP(squared_l2_distance); -USE_OP(fill_constant); -USE_OP(mean); -USE_OP(sgd); - -using namespace paddle::platform; -using namespace paddle::framework; - -void AddOp(const std::string& type, const VariableNameMap& inputs, - const VariableNameMap& outputs, AttributeMap attrs, - paddle::framework::BlockDescBind* block) { - // insert output - for (auto kv : outputs) { - for (auto v : kv.second) { - // <<<<<<< HEAD - // auto var = block->Var(v); - // var->SetType(VarDesc::LOD_TENSOR); - // var->SetDataType(paddle::framework::DataType::FP32); - // ======= - if (!block->HasVar(v)) { - auto var = block->Var(v); - var->SetDataType(paddle::framework::DataType::FP32); - } - // >>>>>>> origin/develop - } - } - - // insert op - auto op = block->AppendOp(); - op->SetType(type); - for (auto& kv : inputs) { - op->SetInput(kv.first, kv.second); - } - for (auto& kv : outputs) { - op->SetOutput(kv.first, kv.second); - } - op->SetAttrMap(attrs); - op->CheckAttrs(); -} - -// Tensors in feed value variable will only be in CPUPlace -// So we can memcpy the data from vector to feed_value -template -void SetFeedVariable(const std::vector>& inputs, - const std::vector>& dims) { - Variable* g_feed_value = GetGlobalScope().FindVar("feed_value"); - auto& feed_inputs = - *(g_feed_value->GetMutable>()); - size_t size = inputs.size(); - feed_inputs.resize(size); - for (size_t i = 0; i < size; i++) { - T* dst = feed_inputs[i].mutable_data(make_ddim(dims[i]), CPUPlace()); - memcpy(dst, inputs[i].data(), inputs[i].size() * sizeof(T)); - } -} - -// Tensors in fetch value variable will only be in CPUPlace -// So we can memcpy the data from fetch_value to vector -template -std::vector> GetFetchVariable() { - Variable* g_fetch_value = GetGlobalScope().FindVar("fetch_value"); - auto& fetch_outputs = - *(g_fetch_value->GetMutable>()); - - size_t size = fetch_outputs.size(); - std::vector> result; - result.reserve(size); - for (size_t i = 0; i < size; i++) { - std::vector tmp; - tmp.resize(fetch_outputs[i].numel()); - memcpy(tmp.data(), fetch_outputs[i].data(), - fetch_outputs[i].numel() * sizeof(T)); - result.push_back(tmp); - } - - return result; -} - -class ExecutorTesterRandom : public ::testing::Test { - public: - virtual void SetUp() override { - int input_dim = 3, batch_size = 2, embed_dim = 5; - - auto temp_init_root_block = init_pdesc_.add_blocks(); - temp_init_root_block->set_idx(0); - temp_init_root_block->set_parent_idx(-1); - paddle::framework::ProgramDescBind& init_program = - paddle::framework::ProgramDescBind::Instance(&init_pdesc_); - paddle::framework::BlockDescBind* init_root_block = init_program.Block(0); - - AddOp("gaussian_random", {}, {{"Out", {"w1"}}}, - {{"dims", std::vector{input_dim, embed_dim}}}, init_root_block); - AddOp("gaussian_random", {}, {{"Out", {"w2"}}}, - {{"dims", std::vector{embed_dim, input_dim}}}, init_root_block); - AddOp("fetch", {{"Input", {"w1"}}}, {}, {{"col", 0}}, init_root_block); - AddOp("fetch", {{"Input", {"w2"}}}, {}, {{"col", 1}}, init_root_block); - - // flush - init_program.Proto(); - - // run block - auto temp_root_block = pdesc_.add_blocks(); - temp_root_block->set_idx(0); - temp_root_block->set_parent_idx(-1); - paddle::framework::ProgramDescBind& program = - paddle::framework::ProgramDescBind::Instance(&pdesc_); - paddle::framework::BlockDescBind* root_block = program.Block(0); - - // feed data - inputs_.push_back({1.0, 1.0, 1.0, 1.0, 1.0, 1.0}); - dims_.push_back({batch_size, input_dim}); - AddOp("feed", {}, {{"Out", {"a"}}}, - {{"dims", std::vector{batch_size, input_dim}}, {"col", 0}}, - root_block); - - // forward - AddOp("mul", {{"X", {"a"}}, {"Y", {"w1"}}}, {{"Out", {"b"}}}, {}, - root_block); - AddOp("mul", {{"X", {"b"}}, {"Y", {"w2"}}}, {{"Out", {"a_out"}}}, {}, - root_block); - AddOp("squared_l2_distance", {{"X", {"a"}}, {"Y", {"a_out"}}}, - {{"Out", {"l2_distance"}}, {"sub_result", {"l2_distance_sub"}}}, {}, - root_block); - AddOp("mean", {{"X", {"l2_distance"}}}, {{"Out", {"mean_out"}}}, {}, - root_block); - - // backward - auto target = VarDescBind("mean_out"); - AppendBackward(program, target, {}); - - // update - AddOp("fill_constant", {}, {{"Out", {"learning_rate"}}}, - {{"shape", std::vector{1}}, {"value", float(0.001)}}, - root_block); - AddOp("sgd", {{"Param", {"w1"}}, - {"LearningRate", {"learning_rate"}}, - {"Grad", {"w1@GRAD"}}}, - {{"ParamOut", {"w1"}}}, {}, root_block); - AddOp("sgd", {{"Param", {"w2"}}, - {"LearningRate", {"learning_rate"}}, - {"Grad", {"w2@GRAD"}}}, - {{"ParamOut", {"w2"}}}, {}, root_block); - - AddOp("fetch", {{"Input", {"w1"}}}, {}, {{"col", 0}}, root_block); - AddOp("fetch", {{"Input", {"w2"}}}, {}, {{"col", 1}}, root_block); - AddOp("fetch", {{"Input", {"l2_distance"}}}, {}, {{"col", 0}}, root_block); - - // flush - program.Proto(); - } - - protected: - ProgramDesc init_pdesc_; - ProgramDesc pdesc_; - std::vector> inputs_; - std::vector> dims_; -}; - -class ExecutorTesterFeedAndFetch : public ::testing::Test { - public: - virtual void SetUp() override { - auto temp_root_block = pdesc_.add_blocks(); - temp_root_block->set_idx(0); - temp_root_block->set_parent_idx(-1); - - // wrap to BlockDescBind - paddle::framework::ProgramDescBind& program = - paddle::framework::ProgramDescBind::Instance(&pdesc_); - paddle::framework::BlockDescBind* root_block = program.Block(0); - - std::vector dim{6}; - - AddOp("feed", {}, {{"Out", {"a"}}}, {{"dims", dim}, {"col", 0}}, - root_block); - AddOp("feed", {}, {{"Out", {"b"}}}, {{"dims", dim}, {"col", 1}}, - root_block); - AddOp("fetch", {{"Input", {"a"}}}, {}, {{"col", 0}}, root_block); - AddOp("fetch", {{"Input", {"b"}}}, {}, {{"col", 1}}, root_block); - - // flush - program.Proto(); - - std::vector vec1 = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0}; - std::vector vec2 = {4.0, 5.0, 6.0, 7.0, 8.0, 9.0}; - inputs_.push_back(vec1); - inputs_.push_back(vec2); - dims_.push_back({static_cast(vec1.size())}); - dims_.push_back({static_cast(vec2.size())}); - } - - protected: - ProgramDesc pdesc_; - std::vector> inputs_; - std::vector> dims_; -}; - -#ifndef PADDLE_WITH_CUDA -TEST_F(ExecutorTesterRandom, CPU) { - std::vector places; - CPUPlace cpu_place; - places.push_back(cpu_place); - - // We have a global Scope and BuddyAllocator, and we must ensure - // global BuddyAllocator is initialized before global Scope. Thus, - // global Scope will deconstruct before BuddyAllocator. Otherwise, - // "pointer being freed was not allocated" error will appear. - paddle::memory::Used(cpu_place); - - std::unique_ptr executor(new Executor(places)); - - executor->Run(init_pdesc_, &GetGlobalScope(), 0); - SetFeedVariable(inputs_, dims_); - executor->Run(pdesc_, &GetGlobalScope(), 0); - std::vector> result = GetFetchVariable(); -} - -TEST_F(ExecutorTesterFeedAndFetch, CPU) { - std::vector places; - CPUPlace cpu_place; - places.push_back(cpu_place); - - // We have a global Scope and BuddyAllocator, and we must ensure - // global BuddyAllocator is initialized before global Scope. Thus, - // global Scope will deconstruct before BuddyAllocator. Otherwise, - // "pointer being freed was not allocated" error will appear. - paddle::memory::Used(cpu_place); - - std::unique_ptr executor(new Executor(places)); - - for (int batch_id = 0; batch_id < 3; batch_id++) { - SetFeedVariable(inputs_, dims_); - executor->Run(pdesc_, &GetGlobalScope(), 0); - std::vector> result = GetFetchVariable(); - PADDLE_ENFORCE_EQ(result.size(), inputs_.size()); - for (size_t i = 0; i < result.size(); ++i) { - PADDLE_ENFORCE_EQ(result[i].size(), inputs_[i].size()); - for (size_t j = 0; j < result[i].size(); ++j) { - PADDLE_ENFORCE_EQ(result[i][j], inputs_[i][j]); - } - } - } -} -#else -TEST_F(ExecutorTesterRandom, GPU) { - std::vector places; - GPUPlace gpu_place(0); - places.push_back(gpu_place); - - // We have a global Scope and BuddyAllocator, and we must ensure - // global BuddyAllocator is initialized before global Scope. Thus, - // global Scope will deconstruct before BuddyAllocator. Otherwise, - // "pointer being freed was not allocated" error will appear. - // If paddle is compiled with GPU, both CPU and GPU BuddyAllocator - // need to be used at first. - paddle::memory::Used(CPUPlace()); - paddle::memory::Used(gpu_place); - - std::unique_ptr executor(new Executor(places)); - - executor->Run(init_pdesc_, &GetGlobalScope(), 0); - for (int batch_id = 0; batch_id < 3; batch_id++) { - SetFeedVariable(inputs_, dims_); - executor->Run(pdesc_, &GetGlobalScope(), 0); - } -} - -TEST_F(ExecutorTesterFeedAndFetch, GPU) { - std::vector places; - GPUPlace gpu_place(0); - places.push_back(gpu_place); - // We have a global Scope and BuddyAllocator, and we must ensure - // global BuddyAllocator is initialized before global Scope. Thus, - // global Scope will deconstruct before BuddyAllocator. Otherwise, - // "pointer being freed was not allocated" error will appear. - // If paddle is compiled with GPU, both CPU and GPU BuddyAllocator - // need to be used at first. - paddle::memory::Used(CPUPlace()); - paddle::memory::Used(gpu_place); - - std::unique_ptr executor(new Executor(places)); - - for (int batch_id = 0; batch_id < 3; batch_id++) { - SetFeedVariable(inputs_, dims_); - executor->Run(pdesc_, &GetGlobalScope(), 0); - std::vector> result = GetFetchVariable(); - PADDLE_ENFORCE_EQ(result.size(), inputs_.size()); - for (size_t i = 0; i < result.size(); ++i) { - PADDLE_ENFORCE_EQ(result[i].size(), inputs_[i].size()); - for (size_t j = 0; j < result[i].size(); ++j) { - PADDLE_ENFORCE_EQ(result[i][j], inputs_[i][j]); - } - } - } -} - -DECLARE_double(fraction_of_gpu_memory_to_use); - -int main(int argc, char** argv) { - testing::InitGoogleTest(&argc, argv); - // Use less GPU memory for unittest. - FLAGS_fraction_of_gpu_memory_to_use = 0.25; - return RUN_ALL_TESTS(); -} - -#endif diff --git a/paddle/framework/feed_fetch_method.h b/paddle/framework/feed_fetch_method.h new file mode 100644 index 0000000000000000000000000000000000000000..7feacb1e24708411e7fbb610f9909447cba9e291 --- /dev/null +++ b/paddle/framework/feed_fetch_method.h @@ -0,0 +1,58 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once +#include "glog/logging.h" +#include "paddle/framework/feed_fetch_type.h" +#include "paddle/framework/scope.h" +#include "paddle/framework/variable.h" + +namespace paddle { +namespace framework { + +void SetFeedVariable(Scope* scope, const LoDTensor& input, + const std::string& var_name, size_t index) { + // If var_name Variable is not found in GlobalScope, a new variable will + // be created. + VLOG(3) << "SetFeedVariable name=" << var_name << " index=" << index; + Variable* g_feed_value = scope->Var(var_name); + auto& feed_inputs = + *(g_feed_value->GetMutable>()); + if (index >= feed_inputs.size()) { + feed_inputs.resize(index + 1); + } + // shared data with input tensor + feed_inputs[index].ShareDataWith(input); + // set lod + feed_inputs[index].set_lod(input.lod()); +} + +LoDTensor& GetFetchVariable(const Scope& scope, const std::string& var_name, + size_t index) { + // Since we want to fetch LodTensor from a variable, the variable must + // be created alreadly. + Variable* g_fetch_value = scope.FindVar(var_name); + PADDLE_ENFORCE(g_fetch_value->IsType(), + "Only %s can be invoked by GetFetchVariable", + typeid(FeedFetchList).name()); + auto& fetch_outputs = *g_fetch_value->GetMutable(); + auto& tensor = fetch_outputs[index]; + VLOG(3) << "Fetch " << var_name << " with index " << index + << " shape= " << tensor.dims(); + PADDLE_ENFORCE_LT(index, fetch_outputs.size()); + return tensor; +} + +} // namespace framework +} // namespace paddle diff --git a/paddle/framework/feed_fetch_type.h b/paddle/framework/feed_fetch_type.h new file mode 100644 index 0000000000000000000000000000000000000000..bc4ae440fc708f696c18bb9d5ab3ba7dd59e21ab --- /dev/null +++ b/paddle/framework/feed_fetch_type.h @@ -0,0 +1,24 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#pragma once +#include +#include "paddle/framework/lod_tensor.h" + +namespace paddle { +namespace framework { +using FeedFetchType = LoDTensor; +using FeedFetchList = std::vector; +} // namespace framework +} // namespace paddle diff --git a/paddle/framework/framework.proto b/paddle/framework/framework.proto index 65760b07ada7a63a568cb8296eef35a8aa18d9ff..3d023535ef6c49326481ec7edc2bfc9d7c0d4ffa 100644 --- a/paddle/framework/framework.proto +++ b/paddle/framework/framework.proto @@ -55,6 +55,7 @@ message OpDesc { repeated Var inputs = 1; repeated Var outputs = 2; repeated Attr attrs = 4; + optional bool is_target = 5 [ default = false ]; }; // OpProto describes a C++ framework::OperatorBase derived class. @@ -67,6 +68,7 @@ message OpProto { optional bool duplicable = 3 [ default = false ]; optional bool intermediate = 4 [ default = false ]; + optional bool dispensable = 5 [ default = false ]; } // AttrProto describes the C++ type Attribute. @@ -111,6 +113,8 @@ message VarDesc { enum VarType { LOD_TENSOR = 1; SELECTED_ROWS = 2; + FEED_MINIBATCH = 3; + FETCH_LIST = 4; } required string name = 1; required VarType type = 2; diff --git a/paddle/framework/grad_op_desc_maker.h b/paddle/framework/grad_op_desc_maker.h index 1219e0487531b19b00adde5a9aa2bde51bfc0aa8..94944c79b64d38e799df436de874cabc3661e30a 100644 --- a/paddle/framework/grad_op_desc_maker.h +++ b/paddle/framework/grad_op_desc_maker.h @@ -149,5 +149,13 @@ class DefaultGradOpDescMaker : public SingleGradOpDescMaker { } }; +class EmptyGradOpMaker : public GradOpDescMakerBase { + public: + using GradOpDescMakerBase::GradOpDescMakerBase; + std::vector> operator()() const override { + return {}; + } +}; + } // namespace framework } // namespace paddle diff --git a/paddle/framework/lod_tensor.cc b/paddle/framework/lod_tensor.cc index 5b7badf89c1714331bae9fc8cf94c8da2c66dbad..7c0ea0df7829883ccb36772634263cd33ff32e1d 100644 --- a/paddle/framework/lod_tensor.cc +++ b/paddle/framework/lod_tensor.cc @@ -25,31 +25,50 @@ LoD SliceLevels(const LoD& in, size_t level_begin, size_t level_end) { for (size_t i = level_begin; i < level_end; i++) { new_lod.emplace_back(in.at(i)); } + // transform the lowest level to absolute offset. + LoD abs_offset_lod = ToAbsOffset(in); + new_lod.back() = abs_offset_lod[level_end - 1]; return new_lod; } LoD SliceInLevel(const LoD& in, size_t level, size_t elem_begin, size_t elem_end) { - // slice the lod. - LoD new_lod; - new_lod.reserve(in.size() - level); - auto start = in.at(level)[elem_begin]; - auto end = in.at(level)[elem_end]; - - for (auto it = in.begin() + level; it != in.end(); it++) { - auto it_begin = std::find(it->begin(), it->end(), start); - auto it_end = std::find(it_begin, it->end(), end); - PADDLE_ENFORCE(it_begin != it->end(), "error in parsing lod info"); - PADDLE_ENFORCE(it_end != it->end(), "error in parsing lod info"); - new_lod.emplace_back(it_begin, it_end + 1); - // reset offset if tensor is copyed and sliced. - std::transform(new_lod.back().begin(), new_lod.back().end(), - new_lod.back().begin(), - [start](int v) { return v - start; }); - PADDLE_ENFORCE_EQ(new_lod.back().front(), 0, "error in slice LoD"); + PADDLE_ENFORCE_LT(level, in.size()); + PADDLE_ENFORCE_LT(elem_end, in[level].size()); + + LoD res; + res.resize(in.size() - level); + // copy the first level + res[0].assign(in[level].begin() + elem_begin, + in[level].begin() + elem_end + 1); + for (size_t lvl = 1; lvl < res.size(); lvl++) { + const auto& in_level = in[level + lvl]; + const auto& above_level = res[lvl - 1]; + auto& out_level = res[lvl]; + out_level.assign(in_level.begin() + above_level.front(), + in_level.begin() + above_level.back() + 1); } - PADDLE_ENFORCE_LE(new_lod.size(), in.size()); - return new_lod; + for (size_t lvl = 0; lvl < res.size(); lvl++) { + // to make the first offset equals 0, all the elements minus the first + // element + size_t front = res[lvl].front(); + for (auto& ele : res[lvl]) { + ele -= front; + } + } + return res; +} + +LoD ToAbsOffset(const LoD& in) { + // the lowest level stores relative offsets + if (in.empty() || in.size() == 1) return in; + LoD result = in; + for (int level = result.size() - 2; level >= 0; level--) { + for (auto& ele : result[level]) { + ele = result[level + 1][ele]; + } + } + return result; } bool operator==(const LoD& a, const LoD& b) { @@ -75,17 +94,7 @@ bool operator==(const LoD& a, const LoD& b) { size_t LoDTensor::NumElements(size_t level, size_t idx) const { PADDLE_ENFORCE_LT(level, NumLevels()); PADDLE_ENFORCE_LT(idx, NumElements(level)); - // the last level of LoD, just return number of records in Tensor - if (level == NumLevels() - 1) { - return lod_[level][idx + 1] - lod_[level][idx]; - } - // high level of LoD, and there is another lower level, return number of - // lower-level elements - auto tmp = SliceInLevel(lod_, level, idx, idx + 1); - PADDLE_ENFORCE_GE(tmp.size(), 2); - // there is a 0 as a placeholder stored in LoD, so the number of elements - // equals lod.size() - 1 - return tmp[1].size() - 1; + return lod_[level][idx + 1] - lod_[level][idx]; } void LoDTensor::ShrinkLevels(size_t level_begin, size_t level_end) { diff --git a/paddle/framework/lod_tensor.h b/paddle/framework/lod_tensor.h index 4db36ee76609ac6360fe2fc7b4a366e0284d1016..dec59a5750ab24244a013282b4547fb18d4991ac 100644 --- a/paddle/framework/lod_tensor.h +++ b/paddle/framework/lod_tensor.h @@ -39,23 +39,36 @@ using Vector = thrust::host_vector< #endif /* - * 3-level LoD stores + * LoD is short for Level of Details. * - * 0 10 20 - * 0 5 10 15 20 - * 0 2 5 7 10 12 15 20 - * - * - in a level, each element indicates offset in the underlying Tensor + * - in a level, each element indicates relative offset of the lower level * - the first element should be 0 and that indicates that this sequence start * from 0 * - each sequence's begin and end(no-inclusive) is level[id, id+1] + * + * For example: + * 3-level LoD stores + * + * 0 2 3 + * 0 2 4 7 + * 0 2 5 7 10 12 15 20 */ using LoD = std::vector>; +/* + * Slice levels from a LoD. + * NOTE the lowest level should always be the absolute offsets of the underlying + * tensor instances. So if higher layers are sliced without the lowest level, + * the lower level of the sliced LoD will be transformed to the absolute offset. + */ LoD SliceLevels(const LoD& in, size_t level_begin, size_t level_end); LoD SliceInLevel(const LoD& in, size_t level, size_t elem_begin, size_t elem_end); +/* + * Transform an LoD from relative offsets to absolute offsets. + */ +LoD ToAbsOffset(const LoD& in); bool operator==(const LoD& a, const LoD& b); @@ -74,12 +87,12 @@ class LoDTensor : public Tensor { LoD lod() const { return lod_; } /* - * Get a element from LoD. + * Get the start offset and end offset of an element from LoD. */ - size_t lod_element(size_t level, size_t elem) const { + std::pair lod_element(size_t level, size_t elem) const { PADDLE_ENFORCE_LT(level, NumLevels()); PADDLE_ENFORCE_LT(elem, NumElements(level)); - return (lod_)[level][elem]; + return std::make_pair((lod_)[level][elem], (lod_)[level][elem + 1]); } /* diff --git a/paddle/framework/lod_tensor_test.cc b/paddle/framework/lod_tensor_test.cc index 44f09f584fb752d7003baa804979f3bb5cd9d651..e1e15abecf5534fb4fd94f7e2b65230c74d175de 100644 --- a/paddle/framework/lod_tensor_test.cc +++ b/paddle/framework/lod_tensor_test.cc @@ -30,8 +30,8 @@ class LoDTensorTester : public ::testing::Test { // 0 5 10 15 20 // 0 2 5 7 10 12 15 20 LoD lod; - lod.push_back(std::vector{0, 10, 20}); - lod.push_back(std::vector{0, 5, 10, 15, 20}); + lod.push_back(std::vector{0, 2, 3}); + lod.push_back(std::vector{0, 2, 5, 8}); lod.push_back(std::vector{0, 2, 5, 7, 10, 12, 15, 17, 20}); ASSERT_EQ(lod.size(), 3UL); @@ -52,14 +52,14 @@ TEST_F(LoDTensorTester, NumLevels) { ASSERT_EQ(lod_tensor_.NumLevels(), 3UL); } TEST_F(LoDTensorTester, NumElements) { ASSERT_EQ(lod_tensor_.NumElements(0), 2UL); - ASSERT_EQ(lod_tensor_.NumElements(1), 4UL); + ASSERT_EQ(lod_tensor_.NumElements(1), 3UL); ASSERT_EQ(lod_tensor_.NumElements(2), 8UL); } TEST_F(LoDTensorTester, NumElements2) { ASSERT_EQ(lod_tensor_.NumElements(0, 0), 2UL); - ASSERT_EQ(lod_tensor_.NumElements(0, 1), 2UL); - ASSERT_EQ(lod_tensor_.NumElements(1, 1), 2UL); + ASSERT_EQ(lod_tensor_.NumElements(0, 1), 1UL); + ASSERT_EQ(lod_tensor_.NumElements(1, 1), 3UL); } TEST_F(LoDTensorTester, ShrinkLevels) { @@ -68,17 +68,16 @@ TEST_F(LoDTensorTester, ShrinkLevels) { LoDTensor new_lod_tensor = lod_tensor_; new_lod_tensor.ShrinkLevels(level, level + 1); ASSERT_EQ(new_lod_tensor.NumLevels(), 1UL); - ASSERT_EQ(new_lod_tensor.NumElements(0), lod_tensor_.NumElements(level)); ASSERT_EQ(new_lod_tensor.data(), lod_tensor_.data()); } // shrink 2 level for (size_t level = 0; level < 2UL; ++level) { LoDTensor new_lod_tensor = lod_tensor_; new_lod_tensor.ShrinkLevels(level, level + 2); + // the lowest level's last element should be the tensor's batch_size. + ASSERT_EQ(new_lod_tensor.lod().back().back(), + lod_tensor_.lod().back().back()); ASSERT_EQ(new_lod_tensor.NumLevels(), 2UL); - ASSERT_EQ(new_lod_tensor.NumElements(0), lod_tensor_.NumElements(level)); - ASSERT_EQ(new_lod_tensor.NumElements(1), - lod_tensor_.NumElements(level + 1)); ASSERT_EQ(new_lod_tensor.data(), lod_tensor_.data()); } } @@ -86,19 +85,19 @@ TEST_F(LoDTensorTester, ShrinkLevels) { TEST_F(LoDTensorTester, ShrinkInLevel) { size_t level = 0; LoDTensor new_lod_tensor = lod_tensor_; - new_lod_tensor.ShrinkInLevel(level, 0, 2); + new_lod_tensor.ShrinkInLevel(level, 0, 1); EXPECT_EQ(new_lod_tensor.NumLevels(), 3UL); - EXPECT_EQ(new_lod_tensor.NumElements(0), 2UL); - EXPECT_EQ(new_lod_tensor.NumElements(1), 4UL); - EXPECT_EQ(new_lod_tensor.NumElements(2), 8UL); + EXPECT_EQ(new_lod_tensor.NumElements(0), 1UL); + EXPECT_EQ(new_lod_tensor.NumElements(1), 2UL); + EXPECT_EQ(new_lod_tensor.NumElements(2), 5UL); ASSERT_EQ(new_lod_tensor.data(), lod_tensor_.data()); level = 1; new_lod_tensor = lod_tensor_; - new_lod_tensor.ShrinkInLevel(level, 0, 2); + new_lod_tensor.ShrinkInLevel(level, 1, 2); ASSERT_EQ(new_lod_tensor.NumLevels(), 2UL); - ASSERT_EQ(new_lod_tensor.NumElements(0), 2UL); - ASSERT_EQ(new_lod_tensor.NumElements(1), 4UL); + ASSERT_EQ(new_lod_tensor.NumElements(0), 1UL); + ASSERT_EQ(new_lod_tensor.NumElements(1), 3UL); ASSERT_EQ(new_lod_tensor.data(), lod_tensor_.data()); } diff --git a/paddle/framework/lod_tensor_test.cu b/paddle/framework/lod_tensor_test.cu index 647d07536dd070bc37137fc01f683ec07ba7d6f4..25041024cb51d4d2f360edb06571a0a99dcf29b1 100644 --- a/paddle/framework/lod_tensor_test.cu +++ b/paddle/framework/lod_tensor_test.cu @@ -36,8 +36,8 @@ TEST(LoDTensor, LoDInGPU) { lod_tensor.mutable_data(place); lod_tensor.set_lod(src_lod); - CHECK_EQ(lod_tensor.lod_element(0, 2), 4UL); - CHECK_EQ(lod_tensor.lod_element(0, 4), 8UL); + CHECK_EQ(lod_tensor.lod_element(0, 2).first, 4UL); + CHECK_EQ(lod_tensor.lod_element(0, 4).first, 8UL); auto lod = lod_tensor.lod(); diff --git a/paddle/framework/op_desc.cc b/paddle/framework/op_desc.cc index ef207dc54ebe6cc72d9f1e428dd2aaed5ad3dbf0..18fabe481dac9c1b70e7c30cb83ec5ee8ac47026 100644 --- a/paddle/framework/op_desc.cc +++ b/paddle/framework/op_desc.cc @@ -220,9 +220,12 @@ static InferShapeFuncMap &InferShapeFuncs() { void OpDescBind::CheckAttrs() { PADDLE_ENFORCE(!Type().empty(), "CheckAttr() can not be called before type is setted."); - const auto *checker = OpInfoMap::Instance().Get(Type()).Checker(); - PADDLE_ENFORCE_NOT_NULL(checker, "Operator \"%s\" has no registered checker.", - Type()); + auto *checker = OpInfoMap::Instance().Get(Type()).Checker(); + if (checker == nullptr) { + // checker is not configured. That operator could be generated by Paddle, + // not by users. + return; + } checker->Check(attrs_); } @@ -236,5 +239,19 @@ void OpDescBind::InferShape(const BlockDescBind &block) const { it->second(&ctx); } +void OpDescBind::InferVarType(BlockDescBind *block) const { + auto &info = OpInfoMap::Instance().Get(this->Type()); + if (info.infer_var_type_) { + info.infer_var_type_(*this, block); + } else { + // all output type is LoDTensor by default + for (auto &out_pair : this->outputs_) { + for (auto &out_var_name : out_pair.second) { + block->Var(out_var_name)->SetType(VarDesc::LOD_TENSOR); + } + } + } +} + } // namespace framework } // namespace paddle diff --git a/paddle/framework/op_desc.h b/paddle/framework/op_desc.h index 73b5cf846f702fe21277ae139156ec9784aa79b3..313bf538ac7c947c5e77ca0ead6bb53e6a156478 100644 --- a/paddle/framework/op_desc.h +++ b/paddle/framework/op_desc.h @@ -102,6 +102,8 @@ class OpDescBind { void InferShape(const BlockDescBind &block) const; + void InferVarType(BlockDescBind *block) const; + void Flush(); private: diff --git a/paddle/framework/op_info.h b/paddle/framework/op_info.h index c504f69e30bb899c183bd4281d2eadb50fd3b376..e926180780609c0a8ffc6270627835c50bbce782 100644 --- a/paddle/framework/op_info.h +++ b/paddle/framework/op_info.h @@ -19,7 +19,6 @@ #include #include "paddle/framework/attribute.h" -#include "paddle/framework/op_desc.h" #include "paddle/framework/type_defs.h" #include "paddle/platform/macros.h" @@ -31,6 +30,7 @@ struct OpInfo { GradOpMakerFN grad_op_maker_; OpProto* proto_{nullptr}; OpAttrChecker* checker_{nullptr}; + InferVarTypeFN infer_var_type_; bool HasOpProtoAndChecker() const { return proto_ != nullptr && checker_ != nullptr; diff --git a/paddle/framework/op_proto_maker.h b/paddle/framework/op_proto_maker.h index a134befd90a1eaeff6f6ea62f11412df63cdc394..44e8ab16895cc604f85bb83e240eab55739f8ba0 100644 --- a/paddle/framework/op_proto_maker.h +++ b/paddle/framework/op_proto_maker.h @@ -44,6 +44,11 @@ class OpProtoAndCheckerMaker { var_->set_intermediate(true); return *this; } + + VariableBuilder& AsDispensable() { + var_->set_dispensable(true); + return *this; + } }; VariableBuilder AddInput(const std::string& name, const std::string& comment); diff --git a/paddle/framework/op_registry.cc b/paddle/framework/op_registry.cc index 504afbd5dbacf7185f92e0000d19666230e2fb42..c2f2438edf6daadf26cbc6db37f6668739ab1726 100644 --- a/paddle/framework/op_registry.cc +++ b/paddle/framework/op_registry.cc @@ -43,12 +43,13 @@ static VariableNameMap ConvertOpDescVarsToVarNameMap( return ret_val; } -std::unique_ptr OpRegistry::CreateOp(const OpDesc& op_desc) { +std::unique_ptr OpRegistry::CreateOp(const OpDesc& op_desc, + ProgramDesc* program) { VariableNameMap inputs = ConvertOpDescVarsToVarNameMap(op_desc.inputs()); VariableNameMap outputs = ConvertOpDescVarsToVarNameMap(op_desc.outputs()); AttributeMap attrs; for (auto& attr : op_desc.attrs()) { - attrs[attr.name()] = GetAttrValue(attr); + attrs[attr.name()] = GetAttrValue(attr, program); } return CreateOp(op_desc.type(), inputs, outputs, attrs); diff --git a/paddle/framework/op_registry.h b/paddle/framework/op_registry.h index 226e8ddcd4b1a2630e0eea00ee6c9f6af6bd5d20..ed85c386ec2632604bf5faf0ff9b1a087eb9c276 100644 --- a/paddle/framework/op_registry.h +++ b/paddle/framework/op_registry.h @@ -20,6 +20,8 @@ limitations under the License. */ #include #include #include + +#include "glog/logging.h" // For VLOG() #include "paddle/framework/attribute.h" #include "paddle/framework/details/op_registry.h" #include "paddle/framework/framework.pb.h" @@ -45,18 +47,15 @@ class Registrar { template struct OperatorRegistrar : public Registrar { - explicit OperatorRegistrar(const char* op_type) : op_type(op_type) { + explicit OperatorRegistrar(const char* op_type) { PADDLE_ENFORCE(!OpInfoMap::Instance().Has(op_type), "'%s' is registered more than once.", op_type); static_assert(sizeof...(ARGS) != 0, "OperatorRegistrar should be invoked at least by OpClass"); + OpInfo info; details::OperatorRegistrarRecursive<0, false, ARGS...>(op_type, &info); OpInfoMap::Instance().Insert(op_type, info); } - - const char* op_type; - - OpInfo info; }; class OpRegistry { @@ -77,21 +76,12 @@ class OpRegistry { const VariableNameMap& outputs, AttributeMap attrs); - static std::unique_ptr CreateOp(const OpDesc& op_desc); + static std::unique_ptr CreateOp(const OpDesc& op_desc, + ProgramDesc* program); static std::unique_ptr CreateOp(const OpDescBind& op_desc); }; -template -class OpRegistrar : public Registrar { - public: - explicit OpRegistrar(const char* op_type) { OpRegistrar(op_type, ""); } - OpRegistrar(const char* op_type, const char* grad_op_type) { - OpRegistry::RegisterOp(op_type, - grad_op_type); - } -}; - template struct OpKernelRegistrarFunctor; diff --git a/paddle/framework/op_registry_test.cc b/paddle/framework/op_registry_test.cc index b860fe6cac773d1e85adecc43f5dfec42b6c7661..6289125d7c782e542e5c55e1d4403836351b7e05 100644 --- a/paddle/framework/op_registry_test.cc +++ b/paddle/framework/op_registry_test.cc @@ -74,7 +74,7 @@ TEST(OpRegistry, CreateOp) { attr->set_type(paddle::framework::AttrType::FLOAT); attr->set_f(scale); - auto op = paddle::framework::OpRegistry::CreateOp(op_desc); + auto op = paddle::framework::OpRegistry::CreateOp(op_desc, nullptr); paddle::framework::Scope scope; paddle::platform::CPUDeviceContext dev_ctx; op->Run(scope, dev_ctx); @@ -95,7 +95,7 @@ TEST(OpRegistry, IllegalAttr) { bool caught = false; try { - paddle::framework::OpRegistry::CreateOp(op_desc); + paddle::framework::OpRegistry::CreateOp(op_desc, nullptr); } catch (paddle::platform::EnforceNotMet err) { caught = true; std::string msg = "larger_than check fail"; @@ -115,7 +115,7 @@ TEST(OpRegistry, DefaultValue) { ASSERT_TRUE(op_desc.IsInitialized()); - auto op = paddle::framework::OpRegistry::CreateOp(op_desc); + auto op = paddle::framework::OpRegistry::CreateOp(op_desc, nullptr); paddle::framework::Scope scope; paddle::platform::CPUDeviceContext dev_ctx; op->Run(scope, dev_ctx); @@ -131,7 +131,7 @@ TEST(OpRegistry, CustomChecker) { // attr 'test_attr' is not set bool caught = false; try { - paddle::framework::OpRegistry::CreateOp(op_desc); + paddle::framework::OpRegistry::CreateOp(op_desc, nullptr); } catch (paddle::platform::EnforceNotMet err) { caught = true; std::string msg = "Attribute 'test_attr' is required!"; @@ -149,7 +149,7 @@ TEST(OpRegistry, CustomChecker) { attr->set_i(3); caught = false; try { - paddle::framework::OpRegistry::CreateOp(op_desc); + paddle::framework::OpRegistry::CreateOp(op_desc, nullptr); } catch (paddle::platform::EnforceNotMet err) { caught = true; std::string msg = "'test_attr' must be even!"; @@ -166,7 +166,7 @@ TEST(OpRegistry, CustomChecker) { attr->set_name("test_attr"); attr->set_type(paddle::framework::AttrType::INT); attr->set_i(4); - auto op = paddle::framework::OpRegistry::CreateOp(op_desc); + auto op = paddle::framework::OpRegistry::CreateOp(op_desc, nullptr); paddle::platform::CPUDeviceContext dev_ctx; paddle::framework::Scope scope; op->Run(scope, dev_ctx); diff --git a/paddle/framework/operator.cc b/paddle/framework/operator.cc index 2fca816f353635d3bff184323755961ee82fbb68..a67625fa88fd2fbe4db43241ee824519ceac7017 100644 --- a/paddle/framework/operator.cc +++ b/paddle/framework/operator.cc @@ -252,5 +252,20 @@ std::ostream& operator<<(std::ostream& os, return os; } +bool OpSupportGPU(const std::string& op_type) { + auto& all_kernels = OperatorWithKernel::AllOpKernels(); + auto it = all_kernels.find(op_type); + if (it == all_kernels.end()) { + // All control operator must support GPU + return true; + } + for (auto& kern_pair : it->second) { + if (platform::is_gpu_place(kern_pair.first.place_)) { + return true; + } + } + return false; +} + } // namespace framework } // namespace paddle diff --git a/paddle/framework/operator.h b/paddle/framework/operator.h index cf15f9933ab3bc881add3d45b7ca17194a70e0f1..0d0304ac9e13089ef533b0a47f0ec989c8fd7078 100644 --- a/paddle/framework/operator.h +++ b/paddle/framework/operator.h @@ -20,12 +20,13 @@ limitations under the License. */ #include #include -#include "op_info.h" +#include "glog/logging.h" // For VLOG #include "paddle/framework/attribute.h" #include "paddle/framework/block_desc.h" #include "paddle/framework/data_type.h" #include "paddle/framework/framework.pb.h" #include "paddle/framework/lod_tensor.h" +#include "paddle/framework/op_info.h" #include "paddle/framework/scope.h" #include "paddle/framework/shape_inference.h" #include "paddle/framework/tensor.h" @@ -326,37 +327,47 @@ class CompileTimeInferShapeContext : public InferShapeContext { bool HasInput(const std::string& name) const override { const std::vector& input_names = op_.Input(name); auto length = input_names.size(); + if (length == 0) { + return false; + } PADDLE_ENFORCE_EQ(length, 1UL, "Input(%s) should have only one value, " "but it have %d now", name, length); - return block_.HasVar(input_names[0]); + return block_.HasVarRecursive(input_names[0]); } bool HasOutput(const std::string& name) const override { const std::vector& output_names = op_.Output(name); auto length = output_names.size(); + if (length == 0) { + return false; + } PADDLE_ENFORCE_EQ(length, 1UL, "Output(%s) should have only one value, " "but it have %d now", name, length); - return block_.HasVar(output_names[0]); + return block_.HasVarRecursive(output_names[0]); } bool HasInputs(const std::string& name) const override { const std::vector& input_names = op_.Input(name); - PADDLE_ENFORCE(!input_names.empty(), "Inputs(%s) length is 0", name); + if (input_names.empty()) { + return false; + } for (auto& input : input_names) { - if (!block_.HasVar(input)) return false; + if (!block_.HasVarRecursive(input)) return false; } return true; } bool HasOutputs(const std::string& name) const override { const std::vector& output_names = op_.Output(name); - PADDLE_ENFORCE(!output_names.empty(), "Inputs(%s) length is 0", name); + if (output_names.empty()) { + return false; + } for (auto& output : output_names) { - if (!block_.HasVar(output)) return false; + if (!block_.HasVarRecursive(output)) return false; } return true; } @@ -403,11 +414,11 @@ class CompileTimeInferShapeContext : public InferShapeContext { private: DDim GetDim(const std::string& name) const override { - return framework::make_ddim(block_.FindVar(name)->Shape()); + return framework::make_ddim(block_.FindVarRecursive(name)->Shape()); } void SetDim(const std::string& name, const DDim& dim) override { - block_.FindVar(name)->SetShape(framework::vectorize(dim)); + block_.FindVarRecursive(name)->SetShape(framework::vectorize(dim)); } const OpDescBind& op_; @@ -420,13 +431,27 @@ class RuntimeInferShapeContext : public InferShapeContext { : op_(op), scope_(scope) {} bool HasInput(const std::string& name) const override { - auto ipt = op_.Input(name); + auto& ins = Inputs(name); + size_t length = ins.size(); + if (length == 0) { + return false; + } + PADDLE_ENFORCE_EQ(length, 1UL, "Input %s should have more than one inputs", + name); + auto ipt = ins[0]; auto* var = ipt == kEmptyVarName ? nullptr : scope_.FindVar(ipt); return var != nullptr; } bool HasOutput(const std::string& name) const override { - auto ipt = op_.Output(name); + auto& outs = Outputs(name); + size_t length = outs.size(); + if (length == 0) { + return false; + } + PADDLE_ENFORCE_EQ(length, 1UL, "Output %s should have more than one inputs", + name); + auto ipt = outs[0]; auto* var = ipt == kEmptyVarName ? nullptr : scope_.FindVar(ipt); return var != nullptr; } @@ -573,6 +598,7 @@ class OperatorWithKernel : public OperatorBase { void Run(const Scope& scope, const platform::DeviceContext& dev_ctx) const final { + VLOG(3) << "Running operator " << this->Type(); RuntimeInferShapeContext infer_shape_ctx(*this, scope); this->InferShape(&infer_shape_ctx); @@ -647,5 +673,7 @@ class OperatorWithKernel : public OperatorBase { std::ostream& operator<<(std::ostream& os, const OperatorWithKernel::OpKernelKey& kernel_key); +extern bool OpSupportGPU(const std::string& op_type); + } // namespace framework } // namespace paddle diff --git a/paddle/framework/operator_test.cc b/paddle/framework/operator_test.cc index d7890ac8d0af2171271a0cfccd356563c7604e72..c358f1a2b6ee3174b8c336ba1d212be7c5aa15c6 100644 --- a/paddle/framework/operator_test.cc +++ b/paddle/framework/operator_test.cc @@ -83,7 +83,7 @@ TEST(OperatorBase, all) { paddle::platform::CPUDeviceContext device_context; paddle::framework::Scope scope; - auto op = paddle::framework::OpRegistry::CreateOp(op_desc); + auto op = paddle::framework::OpRegistry::CreateOp(op_desc, nullptr); scope.Var("OUT1"); ASSERT_EQ(paddle::framework::op_run_num, 0); op->Run(scope, device_context); @@ -208,7 +208,7 @@ TEST(OpKernel, all) { paddle::platform::CPUDeviceContext cpu_device_context; paddle::framework::Scope scope; - auto op = paddle::framework::OpRegistry::CreateOp(op_desc); + auto op = paddle::framework::OpRegistry::CreateOp(op_desc, nullptr); ASSERT_EQ(paddle::framework::cpu_kernel_run_num, 0); op->Run(scope, cpu_device_context); ASSERT_EQ(paddle::framework::cpu_kernel_run_num, 1); @@ -244,7 +244,7 @@ TEST(OpKernel, multi_inputs) { scope.Var("y0")->GetMutable(); scope.Var("y1")->GetMutable(); - auto op = paddle::framework::OpRegistry::CreateOp(op_desc); + auto op = paddle::framework::OpRegistry::CreateOp(op_desc, nullptr); op->Run(scope, cpu_device_context); } diff --git a/paddle/framework/program_desc.cc b/paddle/framework/program_desc.cc index fcb7292884275d972377983cb3ba1bcd86fb8348..8e99bba81117c9cc50227122527d6ab9a421c251 100644 --- a/paddle/framework/program_desc.cc +++ b/paddle/framework/program_desc.cc @@ -18,27 +18,10 @@ limitations under the License. */ namespace paddle { namespace framework { -using ProgDescMap = - std::unordered_map>; -static ProgDescMap *g_bind_map = nullptr; - -ProgramDescBind &ProgramDescBind::Instance(ProgramDesc *prog) { - if (g_bind_map == nullptr) { - g_bind_map = new ProgDescMap(); - } - auto &map = *g_bind_map; - auto &ptr = map[prog]; - - if (ptr == nullptr) { - ptr.reset(new ProgramDescBind(prog)); - } - return *ptr; -} - BlockDescBind *ProgramDescBind::AppendBlock(const BlockDescBind &parent) { - auto *b = prog_->add_blocks(); + auto *b = prog_.add_blocks(); b->set_parent_idx(parent.ID()); - b->set_idx(prog_->blocks_size() - 1); + b->set_idx(prog_.blocks_size() - 1); blocks_.emplace_back(new BlockDescBind(this, b)); return blocks_.back().get(); } @@ -47,13 +30,22 @@ ProgramDesc *ProgramDescBind::Proto() { for (auto &block : blocks_) { block->Flush(); } - return prog_; + return &prog_; +} + +ProgramDescBind::ProgramDescBind() { + auto *block = prog_.mutable_blocks()->Add(); + block->set_idx(kRootBlockIndex); + block->set_parent_idx(kNoneBlockIndex); + blocks_.emplace_back(new BlockDescBind(this, block)); } -ProgramDescBind::ProgramDescBind(ProgramDesc *prog) { - prog_ = prog; - for (auto &block : *prog->mutable_blocks()) { - blocks_.emplace_back(new BlockDescBind(this, &block)); +ProgramDescBind::ProgramDescBind(const ProgramDescBind &o) { + prog_ = o.prog_; + + for (int i = 0; i < prog_.blocks_size(); ++i) { + auto *block = prog_.mutable_blocks(i); + blocks_.emplace_back(new BlockDescBind(*o.blocks_[i], block, this)); } } } // namespace framework diff --git a/paddle/framework/program_desc.h b/paddle/framework/program_desc.h index f29b1c54e7160ac477229f64e5471939131a2d8f..dc4cd7cc735b5e4e3466d9b82dc5eb8647c80ef9 100644 --- a/paddle/framework/program_desc.h +++ b/paddle/framework/program_desc.h @@ -17,6 +17,7 @@ limitations under the License. */ #include #include #include "paddle/framework/framework.pb.h" +#include "paddle/framework/proto_desc.h" #include "paddle/platform/macros.h" namespace paddle { @@ -26,7 +27,9 @@ class BlockDescBind; class ProgramDescBind { public: - static ProgramDescBind &Instance(ProgramDesc *prog); + ProgramDescBind(); + + ProgramDescBind(const ProgramDescBind &o); BlockDescBind *AppendBlock(const BlockDescBind &parent); @@ -37,14 +40,9 @@ class ProgramDescBind { ProgramDesc *Proto(); private: - explicit ProgramDescBind(ProgramDesc *prog); - - // Not owned - ProgramDesc *prog_; + ProgramDesc prog_; std::vector> blocks_; - - DISABLE_COPY_AND_ASSIGN(ProgramDescBind); }; } // namespace framework } // namespace paddle diff --git a/paddle/framework/program_desc_test.cc b/paddle/framework/program_desc_test.cc new file mode 100644 index 0000000000000000000000000000000000000000..c9709a2d3f1d9e0be2bda1e8e9e7835ca49141b1 --- /dev/null +++ b/paddle/framework/program_desc_test.cc @@ -0,0 +1,83 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#include "paddle/framework/program_desc.h" +#include "gtest/gtest.h" +#include "paddle/framework/block_desc.h" + +namespace paddle { +namespace framework { +TEST(ProgramDesc, copy_ctor) { + ProgramDescBind program; + auto* global_block = program.Block(0); + auto* x = global_block->Var("X"); + x->SetType(VarDesc_VarType_LOD_TENSOR); + x->SetLoDLevel(0); + x->SetDataType(FP32); + x->SetShape({1000, 784}); + + auto* y = global_block->Var("Y"); + y->SetType(VarDesc_VarType_LOD_TENSOR); + y->SetLoDLevel(0); + y->SetDataType(FP32); + y->SetShape({784, 100}); + + auto* op = global_block->AppendOp(); + op->SetType("mul"); + op->SetInput("X", {x->Name()}); + op->SetInput("Y", {y->Name()}); + + auto* out = global_block->Var("Out"); + out->SetType(VarDesc_VarType_LOD_TENSOR); + op->SetOutput("Y", {out->Name()}); + + ProgramDescBind program_copy(program); + + auto* global_block_copy = program_copy.Block(0); + ASSERT_NE(global_block, global_block_copy); + + auto assert_same_var = [&](const std::string& name, VarDescBind* var_before) { + ASSERT_TRUE(global_block_copy->HasVar(name)); + auto* copy = global_block_copy->Var(name); + ASSERT_NE(copy, var_before); + ASSERT_EQ(copy->Name(), var_before->Name()); + ASSERT_EQ(copy->GetType(), var_before->GetType()); + ASSERT_EQ(copy->Shape(), var_before->Shape()); + ASSERT_EQ(copy->Proto()->SerializeAsString(), + var_before->Proto()->SerializeAsString()); + }; + + ASSERT_EQ(global_block->LocalVarNames(), global_block_copy->LocalVarNames()); + ASSERT_EQ(3, global_block_copy->LocalVarNames().size()); + assert_same_var("X", x); + assert_same_var("Y", y); + assert_same_var("Out", out); + + for (size_t i = 0; i < global_block->OpSize(); ++i) { + auto op_origin = global_block->Op(i); + auto op_copy = global_block->Op(i); + + ASSERT_EQ(op_origin->Type(), op_copy->Type()); + ASSERT_EQ(op_origin->Inputs(), op_copy->Inputs()); + ASSERT_EQ(op_origin->Outputs(), op_copy->Outputs()); + + ASSERT_EQ(op_copy->Proto()->SerializeAsString(), + op_origin->Proto()->SerializeAsString()); + } + + // Not check block's protostr are same it because the order of vars could be + // different and it is correct. +} +} // namespace framework +} // namespace paddle diff --git a/paddle/operators/feed_op.cu b/paddle/framework/proto_desc.h similarity index 61% rename from paddle/operators/feed_op.cu rename to paddle/framework/proto_desc.h index 7b6a2ac91e7b8d306804ca3d27b1eaf8177302f9..fa01224fefce50eb3688ff407f0a7c948c5b7cfc 100644 --- a/paddle/operators/feed_op.cu +++ b/paddle/framework/proto_desc.h @@ -4,7 +4,7 @@ Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at -http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -12,7 +12,15 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/feed_op.h" +#pragma once -namespace ops = paddle::operators; -REGISTER_OP_GPU_KERNEL(feed, ops::FeedKernel); +namespace paddle { +namespace framework { + +// The Index of first Block in Program. also called root block. +constexpr int kRootBlockIndex = 0; +// The Parent Index of root Block, this block does not exist. +constexpr int kNoneBlockIndex = -1; + +} // namespace framework +} // namespace paddle diff --git a/paddle/framework/prune.cc b/paddle/framework/prune.cc new file mode 100644 index 0000000000000000000000000000000000000000..95833692925af4477fe575d6bd908a2ce7653c1b --- /dev/null +++ b/paddle/framework/prune.cc @@ -0,0 +1,109 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/framework/prune.h" + +#include +#include +#include +#include + +#include + +namespace paddle { +namespace framework { + +const std::string kFeedOpType = "feed"; +const std::string kFetchOpType = "fetch"; + +bool HasDependentVar(const OpDesc& op_desc, + const std::set& dependent_vars) { + for (auto& var : op_desc.outputs()) { + for (auto& argu : var.arguments()) { + if (dependent_vars.count(argu) != 0) { + return true; + } + } + } + return false; +} + +bool IsTarget(const OpDesc& op_desc) { + if (op_desc.has_is_target()) { + return op_desc.is_target(); + } + return false; +} + +void prune_impl(const ProgramDesc& input, ProgramDesc& output, int block_id) { + // TODO(tonyyang-svail): + // - will change to use multiple blocks for RNN op and Cond Op + + auto& block = input.blocks(block_id); + auto& ops = block.ops(); + + bool expect_feed = true; + for (auto& op_desc : ops) { + PADDLE_ENFORCE(op_desc.type() != kFeedOpType || expect_feed, + "All FeedOps are at the beginning of the ProgramDesc"); + expect_feed = (op_desc.type() == kFeedOpType); + } + + bool expect_fetch = true; + for (auto op_iter = ops.rbegin(); op_iter != ops.rend(); ++op_iter) { + auto& op_desc = *op_iter; + PADDLE_ENFORCE(op_desc.type() != kFetchOpType || expect_fetch, + "All FetchOps must at the end of the ProgramDesc"); + expect_fetch = (op_desc.type() == kFetchOpType); + } + + std::set dependent_vars; + std::vector should_run; + for (auto op_iter = ops.rbegin(); op_iter != ops.rend(); ++op_iter) { + auto& op_desc = *op_iter; + + if (IsTarget(op_desc) || HasDependentVar(op_desc, dependent_vars)) { + // insert its input to the dependency graph + for (auto& var : op_desc.inputs()) { + for (auto& argu : var.arguments()) { + dependent_vars.insert(argu); + } + } + + should_run.push_back(true); + } else { + should_run.push_back(false); + } + } + + // since we are traversing the ProgramDesc in reverse order + // we reverse the should_run vector + std::reverse(should_run.begin(), should_run.end()); + + output = input; + auto* op_field = output.mutable_blocks(block_id)->mutable_ops(); + op_field->Clear(); + for (size_t i = 0; i < should_run.size(); ++i) { + if (should_run[i]) { + *op_field->Add() = input.blocks(block_id).ops(i); + } + } +} + +void Prune(const ProgramDesc& input, ProgramDesc& output) { + prune_impl(input, output, 0); +} + +} // namespace framework +} // namespace paddle diff --git a/paddle/operators/fetch_op.cu b/paddle/framework/prune.h similarity index 65% rename from paddle/operators/fetch_op.cu rename to paddle/framework/prune.h index ca39d24c791ded71149777acc53e3b5cc240329f..9414ac64f9491c07aabb216a4c81dfe6e78e8043 100644 --- a/paddle/operators/fetch_op.cu +++ b/paddle/framework/prune.h @@ -4,7 +4,7 @@ Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at -http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -12,7 +12,15 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/fetch_op.h" +#pragma once -namespace ops = paddle::operators; -REGISTER_OP_GPU_KERNEL(fetch, ops::FetchKernel); +#include "paddle/framework/framework.pb.h" +#include "paddle/platform/enforce.h" + +namespace paddle { +namespace framework { + +void Prune(const ProgramDesc& input, ProgramDesc& output); + +} // namespace framework +} // namespace paddle diff --git a/paddle/framework/prune_test.cc b/paddle/framework/prune_test.cc new file mode 100644 index 0000000000000000000000000000000000000000..3ab4b43d9256af5880083b00df446c451e3f598b --- /dev/null +++ b/paddle/framework/prune_test.cc @@ -0,0 +1,138 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#include "paddle/framework/prune.h" + +#include "paddle/framework/attribute.h" +#include "paddle/framework/operator.h" +#include "paddle/operators/net_op.h" + +#include "paddle/framework/block_desc.h" +#include "paddle/framework/op_desc.h" +#include "paddle/framework/program_desc.h" + +#include + +namespace f = paddle::framework; +namespace ops = paddle::operators; + +void AddOp(const std::string &type, const f::VariableNameMap &inputs, + const f::VariableNameMap &outputs, f::AttributeMap attrs, + paddle::framework::BlockDescBind *block) { + // insert output + for (auto kv : outputs) { + for (auto v : kv.second) { + auto var = block->Var(v); + var->SetDataType(paddle::framework::DataType::FP32); + } + } + + // insert op + auto op = block->AppendOp(); + op->SetType(type); + for (auto &kv : inputs) { + op->SetInput(kv.first, kv.second); + } + for (auto &kv : outputs) { + op->SetOutput(kv.first, kv.second); + } + op->SetAttrMap(attrs); +} + +TEST(Prune, one_operator) { + f::ProgramDescBind program; + f::BlockDescBind *block = program.Block(0); + + AddOp("one_one", {{"input", {"a"}}}, {{"output", {"b"}}}, {}, block); + + f::ProgramDesc *pdesc = program.Proto(); + f::ProgramDesc pruned; + + Prune(*pdesc, pruned); + PADDLE_ENFORCE_EQ(pruned.blocks(0).ops_size(), 0); + + pdesc->mutable_blocks(0)->mutable_ops(0)->set_is_target(true); + Prune(*pdesc, pruned); + PADDLE_ENFORCE_EQ(pruned.blocks(0).ops_size(), 1); +} + +TEST(Prune, forward) { + f::ProgramDescBind program; + f::BlockDescBind *block = program.Block(0); + + AddOp("one_one", {{"input", {"a"}}}, {{"output", {"b"}}}, {}, block); + AddOp("one_one", {{"input", {"b"}}}, {{"output", {"c"}}}, {}, block); + AddOp("one_one", {{"input", {"c"}}}, {{"output", {"d"}}}, {}, block); + AddOp("one_one", {{"input", {"d"}}}, {{"output", {"e"}}}, {}, block); + + f::ProgramDesc *pdesc = program.Proto(); + + for (int i = 0; i < pdesc->blocks(0).ops_size(); ++i) { + f::ProgramDesc pruned; + pdesc->mutable_blocks(0)->mutable_ops(i)->set_is_target(true); + Prune(*pdesc, pruned); + PADDLE_ENFORCE_EQ(pruned.blocks(0).ops_size(), i + 1); + } +} + +TEST(Prune, multi_input_op) { + f::ProgramDescBind program; + f::BlockDescBind *block = program.Block(0); + + AddOp("one_one", {{"input", {"a0"}}}, {{"output", {"b0"}}}, {}, block); + AddOp("one_one", {{"input", {"a1"}}}, {{"output", {"b1"}}}, {}, block); + AddOp("one_one", {{"input", {"a2"}}}, {{"output", {"b2"}}}, {}, block); + AddOp("three_one", {{"input", {"b0", "b1", "b2"}}}, {{"output", {"c"}}}, {}, + block); + + f::ProgramDesc *pdesc = program.Proto(); + pdesc->mutable_blocks(0)->mutable_ops(3)->set_is_target(true); + + f::ProgramDesc pruned; + Prune(*pdesc, pruned); + PADDLE_ENFORCE_EQ(pruned.blocks(0).ops_size(), 4); +} + +TEST(Prune, multi_output_op) { + f::ProgramDescBind program; + f::BlockDescBind *block = program.Block(0); + + AddOp("one_two", {{"input", {"a"}}}, {{"output", {"b", "c"}}}, {}, block); + AddOp("one_one", {{"input", {"b"}}}, {{"output", {"b1"}}}, {}, block); + AddOp("one_one", {{"input", {"c"}}}, {{"output", {"c1"}}}, {}, block); + + f::ProgramDesc *pdesc = program.Proto(); + pdesc->mutable_blocks(0)->mutable_ops(2)->set_is_target(true); + + f::ProgramDesc pruned; + Prune(*pdesc, pruned); + PADDLE_ENFORCE_EQ(pruned.blocks(0).ops_size(), 2); +} + +TEST(Prune, multi_target) { + f::ProgramDescBind program; + f::BlockDescBind *block = program.Block(0); + + AddOp("one_two", {{"input", {"a"}}}, {{"output", {"b", "c"}}}, {}, block); + AddOp("one_one", {{"input", {"b"}}}, {{"output", {"b1"}}}, {}, block); + AddOp("one_one", {{"input", {"c"}}}, {{"output", {"c1"}}}, {}, block); + + f::ProgramDesc *pdesc = program.Proto(); + pdesc->mutable_blocks(0)->mutable_ops(1)->set_is_target(true); + pdesc->mutable_blocks(0)->mutable_ops(2)->set_is_target(true); + + f::ProgramDesc pruned; + Prune(*pdesc, pruned); + PADDLE_ENFORCE_EQ(pruned.blocks(0).ops_size(), 3); +} diff --git a/paddle/framework/scope.cc b/paddle/framework/scope.cc index 8f8a53eec8f947b088124a3f034fedb17fd86a48..ac3ac649f96c492852a3bd69be69487736a4ddd7 100644 --- a/paddle/framework/scope.cc +++ b/paddle/framework/scope.cc @@ -65,16 +65,11 @@ void Scope::DropKids() { kids_.clear(); } -std::once_flag feed_variable_flag; - -framework::Scope& GetGlobalScope() { - static std::unique_ptr g_scope{nullptr}; - std::call_once(feed_variable_flag, [&]() { - g_scope.reset(new framework::Scope()); - g_scope->Var("feed_value"); - g_scope->Var("fetch_value"); - }); - return *(g_scope.get()); +void Scope::DeleteScope(Scope* scope) { + auto it = std::find(this->kids_.begin(), this->kids_.end(), scope); + PADDLE_ENFORCE(it != this->kids_.end(), "Cannot find %p as kid scope", scope); + this->kids_.erase(it); + delete scope; } } // namespace framework diff --git a/paddle/framework/scope.h b/paddle/framework/scope.h index a7fce3514b163d78bf96b3cc19d188744a383395..7206b53068bac3e16db385abc76359dc45a582df 100644 --- a/paddle/framework/scope.h +++ b/paddle/framework/scope.h @@ -59,6 +59,8 @@ class Scope { /// Find the scope or an ancestor scope that contains the given variable. const Scope* FindScope(const Variable* var) const; + void DeleteScope(Scope* scope); + /// Drop all kids scopes belonged to this scope. void DropKids(); @@ -72,8 +74,5 @@ class Scope { DISABLE_COPY_AND_ASSIGN(Scope); }; - -framework::Scope& GetGlobalScope(); - } // namespace framework } // namespace paddle diff --git a/paddle/framework/selected_rows.h b/paddle/framework/selected_rows.h index f9f563051e264ae7ed7cf3c07c0065522b2bbe2e..cd9078137132669c7265ce3972f2c6df996fa366 100644 --- a/paddle/framework/selected_rows.h +++ b/paddle/framework/selected_rows.h @@ -10,6 +10,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once +#include "paddle/framework/lod_tensor.h" #include "paddle/framework/tensor.h" namespace paddle { @@ -34,9 +35,9 @@ class SelectedRows { void set_height(int64_t height) { height_ = height; } - const std::vector& rows() const { return rows_; } + const Vector& rows() const { return rows_; } - void set_rows(const std::vector& rows) { rows_ = rows; } + void set_rows(const Vector& rows) { rows_ = rows; } DDim GetCompleteDims() const { std::vector dims = vectorize(value_->dims()); @@ -45,7 +46,10 @@ class SelectedRows { } private: - std::vector rows_; + // Notice: rows can be duplicate. We can have {0, 4, 7, 0, 5, 7, 9} here. + // SelectedRows are simplely concated when adding together. Until a + // SelectedRows add a Tensor, will the duplicate rows be handled. + Vector rows_; std::unique_ptr value_{nullptr}; int64_t height_; }; diff --git a/paddle/framework/tensor.h b/paddle/framework/tensor.h index 3304d857ae2600bd94013b6672b88d43d1d188c6..3a2bdaf086372d5d0b07cf260feb2ee6f3cfb508 100644 --- a/paddle/framework/tensor.h +++ b/paddle/framework/tensor.h @@ -60,6 +60,10 @@ class Tensor { template inline T* mutable_data(platform::Place place); + inline void* mutable_data(platform::Place place, std::type_index type); + + inline void* mutable_data(platform::Place place); + /** * @brief Return a pointer to mutable memory block. * @@ -81,7 +85,6 @@ class Tensor { inline Tensor& Resize(const DDim& dims); /*! The internal of two tensors share the same memory block. */ - template inline Tensor& ShareDataWith(const Tensor& src); /** @@ -96,7 +99,6 @@ class Tensor { // TODO(qijun): https://github.com/PaddlePaddle/Paddle/issues/4647 // Remove `CopyFrom` and `CopyFromVector` from Tensor interface // and make them global functions - template inline void CopyFrom(const Tensor& src, const platform::Place& dst_place, const platform::DeviceContext& ctx); @@ -119,7 +121,6 @@ class Tensor { * @param[in] begin_idx The begin index of the slice. * @param[in] end_idx The end index of the slice. */ - template inline Tensor Slice(const int& begin_idx, const int& end_idx) const; platform::Place place() const { @@ -130,7 +131,6 @@ class Tensor { std::type_index type() const { return holder_->type(); } private: - template inline void check_memory_size() const; private: @@ -139,20 +139,22 @@ class Tensor { * parameter of Variable. */ struct Placeholder { - virtual ~Placeholder() {} + virtual ~Placeholder() = default; virtual void* ptr() const = 0; virtual size_t size() const = 0; virtual std::type_index type() const = 0; virtual platform::Place place() const = 0; + virtual void set_type(std::type_index type) = 0; }; - template + template struct PlaceholderImpl : public Placeholder { - PlaceholderImpl(Place place, size_t size) - : ptr_(static_cast(memory::Alloc(place, size)), - memory::PODDeleter(place)), + PlaceholderImpl(Place place, size_t size, std::type_index type) + : ptr_(static_cast(memory::Alloc(place, size)), + memory::PODDeleter(place)), place_(place), - size_(size) { + size_(size), + type_(type) { PADDLE_ENFORCE_NOT_NULL(ptr_, "Insufficient %s memory to allocation.", (is_cpu_place(place_) ? "CPU" : "GPU")); } @@ -160,16 +162,20 @@ class Tensor { virtual size_t size() const { return size_; } virtual platform::Place place() const { return place_; } virtual void* ptr() const { return static_cast(ptr_.get()); } - virtual std::type_index type() const { return std::type_index(typeid(T)); } + virtual std::type_index type() const { return type_; } + virtual void set_type(std::type_index type) { type_ = type; } /*! the pointer of memory block. */ - std::unique_ptr> ptr_; + std::unique_ptr> ptr_; /*! the place of memory block. */ platform::Place place_; /*! the size of memory block. */ size_t size_; + + /* the current type of memory */ + std::type_index type_; }; /*! holds the memory block if allocated. */ diff --git a/paddle/framework/tensor_array.cc b/paddle/framework/tensor_array.cc index 06459cbfd7b8c19c176452ff73c9f3a81ba1dc03..4c82c3638351c41df26503e2a26b5a4bb5822a67 100644 --- a/paddle/framework/tensor_array.cc +++ b/paddle/framework/tensor_array.cc @@ -106,8 +106,8 @@ void TensorArray::Write(size_t index, const LoDTensor& value) { values_[index].Resize(value.dims()); values_[index].mutable_data(platform::CPUPlace()); - values_[index].CopyFrom(value, platform::CPUPlace(), - platform::CPUDeviceContext()); + values_[index].CopyFrom(value, platform::CPUPlace(), + platform::CPUDeviceContext()); } void TensorArray::WriteShared(size_t index, const LoDTensor& value) { @@ -116,7 +116,7 @@ void TensorArray::WriteShared(size_t index, const LoDTensor& value) { values_.resize(index + 1); } - values_[index].ShareDataWith(value); + values_[index].ShareDataWith(value); } LoDTensor TensorArray::Pack(size_t level, const std::vector& meta, @@ -163,9 +163,9 @@ LoDTensor TensorArray::Stack() const { result.mutable_data(platform::CPUPlace()); for (size_t idx = 0; idx < size(); idx++) { - result.Slice(idx, idx + 1) - .CopyFrom(Read(idx), platform::CPUPlace(), - platform::CPUDeviceContext()); + result.Slice(idx, idx + 1) + .CopyFrom(Read(idx), platform::CPUPlace(), + platform::CPUDeviceContext()); } return result; } @@ -191,13 +191,12 @@ void TensorArray::Unstack(const LoDTensor& source, bool data_shared) const { auto& value = values_[elem]; if (data_shared) { // share memory - value.ShareDataWith(source.Slice(elem, elem + 1)); + value.ShareDataWith(source.Slice(elem, elem + 1)); } else { // copy value.Resize(value_dims); - value.CopyFrom(source.Slice(elem, elem + 1), - platform::CPUPlace(), - platform::CPUDeviceContext()); + value.CopyFrom(source.Slice(elem, elem + 1), platform::CPUPlace(), + platform::CPUDeviceContext()); } } } @@ -242,11 +241,10 @@ LoDTensor DynamicBatchUnpacker::GetBatch(size_t index) { for (size_t i = 0; i < indice.size(); i++) { auto index = indice[i]; - auto target = result.Slice(i, i + 1); - auto slice = source->Slice(index, index + 1); + auto target = result.Slice(i, i + 1); + auto slice = source->Slice(index, index + 1); - target.CopyFrom(slice, platform::CPUPlace(), - platform::CPUDeviceContext()); + target.CopyFrom(slice, platform::CPUPlace(), platform::CPUDeviceContext()); } return result; @@ -277,10 +275,10 @@ LoDTensor PackDynamicBatch(const std::vector& source, // target is result[index] auto index = seq_meta.begin + batch_id; if (index >= seq_meta.end) break; - auto source_ = source[batch_id].Slice(seq_id, seq_id + 1); - auto target = result.Slice(index, index + 1); - target.CopyFrom(source_, platform::CPUPlace(), - platform::CPUDeviceContext()); + auto source_ = source[batch_id].Slice(seq_id, seq_id + 1); + auto target = result.Slice(index, index + 1); + target.CopyFrom(source_, platform::CPUPlace(), + platform::CPUDeviceContext()); } } diff --git a/paddle/framework/tensor_array_test.cc b/paddle/framework/tensor_array_test.cc index d9f52509cdd1b79f6d53b5d4922f9e44279de08b..9470ac5e6ed714d5ba63f3743e683af7f8edd4b0 100644 --- a/paddle/framework/tensor_array_test.cc +++ b/paddle/framework/tensor_array_test.cc @@ -91,7 +91,7 @@ class TensorArrayPackTester : public ::testing::Test { size_t begin = level[i]; size_t end = level[i + 1]; for (size_t j = begin; j < end; j++) { - auto record = source.Slice(j, j + 1); + auto record = source.Slice(j, j + 1); for (int dim = 0; dim < 128; dim++) { record.mutable_data(platform::CPUPlace())[dim] = j - begin; } diff --git a/paddle/framework/tensor_impl.h b/paddle/framework/tensor_impl.h index ce73e0a9edbe340f1165e2dbcba8c976c55df348..f6e801bbb4a056b5590da95a4b140cb90638f322 100644 --- a/paddle/framework/tensor_impl.h +++ b/paddle/framework/tensor_impl.h @@ -19,12 +19,50 @@ limitations under the License. */ namespace paddle { namespace framework { +template +struct SizeOfTypeFunctor; + template +struct SizeOfTypeFunctor { + size_t operator()(std::type_index type) const { + if (typeid(T).hash_code() == type.hash_code()) { + return sizeof(T); + } else { + return 0UL; + } + } +}; + +template <> +struct SizeOfTypeFunctor<> { + size_t operator()(std::type_index type) const { return 0UL; } +}; + +template +struct SizeOfTypeFunctor { + size_t operator()(std::type_index type) const { + SizeOfTypeFunctor head; + size_t head_size = head(type); + if (head_size != 0) { + return head_size; + } + SizeOfTypeFunctor tail; + return tail(type); + } +}; + +static inline size_t SizeOfType(std::type_index type) { + SizeOfTypeFunctor functor; + size_t size = functor(type); + PADDLE_ENFORCE(size != 0UL, "Cannot get size of type %s", type.name()); + return size; +} + inline void Tensor::check_memory_size() const { PADDLE_ENFORCE_NOT_NULL( holder_, "Tensor holds no memory. Call Tensor::mutable_data first."); PADDLE_ENFORCE_GE( - holder_->size(), numel() * sizeof(T) + offset_, + holder_->size(), numel() * SizeOfType(type()) + offset_, "Tensor's dims_ is out of bound. Call Tensor::mutable_data " "first to re-allocate memory.\n" "or maybe the required data-type mismatches the data already stored."); @@ -32,14 +70,23 @@ inline void Tensor::check_memory_size() const { template inline const T* Tensor::data() const { - check_memory_size(); + check_memory_size(); + PADDLE_ENFORCE(std::is_same::value || + holder_->type().hash_code() == typeid(T).hash_code(), + "Tensor holds the wrong type, it holds %s", + this->holder_->type().name()); + return reinterpret_cast( reinterpret_cast(holder_->ptr()) + offset_); } template inline T* Tensor::data() { - check_memory_size(); + check_memory_size(); + PADDLE_ENFORCE(std::is_same::value || + holder_->type().hash_code() == typeid(T).hash_code(), + "Tensor holds the wrong type, it holds %s", + this->holder_->type().name()); return reinterpret_cast(reinterpret_cast(holder_->ptr()) + offset_); } @@ -54,51 +101,62 @@ inline T* Tensor::mutable_data(DDim dims, platform::Place place) { template inline T* Tensor::mutable_data(platform::Place place) { static_assert(std::is_pod::value, "T must be POD"); + return reinterpret_cast(mutable_data(place, typeid(T))); +} + +inline void* Tensor::mutable_data(platform::Place place, std::type_index type) { + if (holder_ != nullptr) { + holder_->set_type(type); + } PADDLE_ENFORCE_GT(numel(), 0, "Tensor's numel must be larger than zero to call " "Tensor::mutable_data. Call Tensor::set_dim first."); + int64_t size = numel() * SizeOfType(type); /* some versions of boost::variant don't have operator!= */ - int64_t size = numel() * sizeof(T); if (holder_ == nullptr || !(holder_->place() == place) || holder_->size() < size + offset_) { if (platform::is_cpu_place(place)) { - holder_.reset(new PlaceholderImpl( - boost::get(place), size)); + holder_.reset(new PlaceholderImpl( + boost::get(place), size, type)); } else if (platform::is_gpu_place(place)) { #ifndef PADDLE_WITH_CUDA PADDLE_THROW("'GPUPlace' is not supported in CPU only device."); } #else - holder_.reset(new PlaceholderImpl( - boost::get(place), size)); + holder_.reset(new PlaceholderImpl( + boost::get(place), size, type)); } #endif offset_ = 0; } - return reinterpret_cast(reinterpret_cast(holder_->ptr()) + - offset_); + return reinterpret_cast(reinterpret_cast(holder_->ptr()) + + offset_); +} + +inline void* Tensor::mutable_data(platform::Place place) { + PADDLE_ENFORCE(this->holder_ != nullptr, + "Cannot invoke mutable data if current hold nothing"); + return mutable_data(place, holder_->type()); } -template inline Tensor& Tensor::ShareDataWith(const Tensor& src) { - src.check_memory_size(); + src.check_memory_size(); *this = src; return *this; } -template inline void Tensor::CopyFrom(const Tensor& src, const platform::Place& dst_place, const platform::DeviceContext& ctx) { - src.check_memory_size(); + src.check_memory_size(); Resize(src.dims()); auto src_place = src.holder_->place(); - auto src_ptr = static_cast(src.data()); + auto src_ptr = src.data(); - auto dst_ptr = static_cast(mutable_data(dst_place)); + auto dst_ptr = mutable_data(dst_place, src.type()); - auto size = src.numel() * sizeof(T); + auto size = src.numel() * SizeOfType(src.type()); if (platform::is_cpu_place(src_place) && platform::is_cpu_place(dst_place)) { memory::Copy(boost::get(dst_place), dst_ptr, @@ -165,9 +223,8 @@ inline void Tensor::CopyFromVector(const std::vector& src, #endif } -template inline Tensor Tensor::Slice(const int& begin_idx, const int& end_idx) const { - check_memory_size(); + check_memory_size(); PADDLE_ENFORCE_GE(begin_idx, 0, "Slice begin index is less than zero."); PADDLE_ENFORCE_LE(end_idx, dims_[0], "Slice end index is out of bound."); PADDLE_ENFORCE_LT(begin_idx, end_idx, @@ -182,7 +239,7 @@ inline Tensor Tensor::Slice(const int& begin_idx, const int& end_idx) const { DDim dst_dims = dims_; dst_dims[0] = end_idx - begin_idx; dst.Resize(dst_dims); - dst.offset_ = offset_ + begin_idx * base * sizeof(T); + dst.offset_ = offset_ + begin_idx * base * SizeOfType(type()); return dst; } } @@ -196,10 +253,9 @@ inline const DDim& Tensor::dims() const { return dims_; } inline int64_t Tensor::numel() const { return product(dims_); } -template inline Tensor ReshapeToMatrix(const Tensor& src, int num_col_dims) { Tensor res; - res.ShareDataWith(src); + res.ShareDataWith(src); res.Resize(flatten_to_2d(src.dims(), num_col_dims)); return res; } diff --git a/paddle/framework/tensor_test.cc b/paddle/framework/tensor_test.cc index 0b62fe08ce9e592384e55432861a943403453bb7..1bb0fb71b079940d35a995b78e04a531c074a8b2 100644 --- a/paddle/framework/tensor_test.cc +++ b/paddle/framework/tensor_test.cc @@ -108,7 +108,7 @@ TEST(Tensor, ShareDataWith) { // Try to share data form uninitialized tensor bool caught = false; try { - dst_tensor.ShareDataWith(src_tensor); + dst_tensor.ShareDataWith(src_tensor); } catch (paddle::platform::EnforceNotMet err) { caught = true; std::string msg = @@ -122,7 +122,7 @@ TEST(Tensor, ShareDataWith) { ASSERT_TRUE(caught); src_tensor.mutable_data(make_ddim({2, 3, 4}), CPUPlace()); - dst_tensor.ShareDataWith(src_tensor); + dst_tensor.ShareDataWith(src_tensor); ASSERT_EQ(src_tensor.data(), dst_tensor.data()); } @@ -131,7 +131,7 @@ TEST(Tensor, ShareDataWith) { Tensor src_tensor; Tensor dst_tensor; src_tensor.mutable_data(make_ddim({2, 3, 4}), GPUPlace()); - dst_tensor.ShareDataWith(src_tensor); + dst_tensor.ShareDataWith(src_tensor); ASSERT_EQ(src_tensor.data(), dst_tensor.data()); } #endif @@ -143,7 +143,7 @@ TEST(Tensor, Slice) { { Tensor src_tensor; src_tensor.mutable_data(make_ddim({5, 3, 4}), CPUPlace()); - Tensor slice_tensor = src_tensor.Slice(1, 3); + Tensor slice_tensor = src_tensor.Slice(1, 3); DDim slice_dims = slice_tensor.dims(); ASSERT_EQ(arity(slice_dims), 3); EXPECT_EQ(slice_dims[0], 2); @@ -167,7 +167,7 @@ TEST(Tensor, Slice) { { Tensor src_tensor; src_tensor.mutable_data(make_ddim({6, 9}), GPUPlace()); - Tensor slice_tensor = src_tensor.Slice(2, 6); + Tensor slice_tensor = src_tensor.Slice(2, 6); DDim slice_dims = slice_tensor.dims(); ASSERT_EQ(arity(slice_dims), 2); EXPECT_EQ(slice_dims[0], 4); @@ -202,7 +202,7 @@ TEST(Tensor, CopyFrom) { memcpy(src_ptr, arr, 9 * sizeof(int)); auto cpu_place = new paddle::platform::CPUPlace(); - dst_tensor.CopyFrom(src_tensor, *cpu_place, cpu_ctx); + dst_tensor.CopyFrom(src_tensor, *cpu_place, cpu_ctx); const int* dst_ptr = dst_tensor.data(); ASSERT_NE(src_ptr, dst_ptr); @@ -210,8 +210,8 @@ TEST(Tensor, CopyFrom) { EXPECT_EQ(src_ptr[i], dst_ptr[i]); } - Tensor slice_tensor = src_tensor.Slice(1, 2); - dst_tensor.CopyFrom(slice_tensor, *cpu_place, cpu_ctx); + Tensor slice_tensor = src_tensor.Slice(1, 2); + dst_tensor.CopyFrom(slice_tensor, *cpu_place, cpu_ctx); const int* slice_ptr = slice_tensor.data(); dst_ptr = dst_tensor.data(); ASSERT_NE(dst_ptr, slice_ptr); @@ -233,11 +233,11 @@ TEST(Tensor, CopyFrom) { // CPU Tensor to GPU Tensor auto gpu_place = new paddle::platform::GPUPlace(0); CUDADeviceContext gpu_ctx(*gpu_place); - gpu_tensor.CopyFrom(src_tensor, *gpu_place, gpu_ctx); + gpu_tensor.CopyFrom(src_tensor, *gpu_place, gpu_ctx); // GPU Tensor to CPU Tensor auto cpu_place = new paddle::platform::CPUPlace(); - dst_tensor.CopyFrom(gpu_tensor, *cpu_place, gpu_ctx); + dst_tensor.CopyFrom(gpu_tensor, *cpu_place, gpu_ctx); // Sync before Compare Tensors gpu_ctx.Wait(); @@ -247,13 +247,13 @@ TEST(Tensor, CopyFrom) { EXPECT_EQ(src_ptr[i], dst_ptr[i]); } - Tensor slice_tensor = src_tensor.Slice(1, 2); + Tensor slice_tensor = src_tensor.Slice(1, 2); // CPU Slice Tensor to GPU Tensor - gpu_tensor.CopyFrom(slice_tensor, *gpu_place, gpu_ctx); + gpu_tensor.CopyFrom(slice_tensor, *gpu_place, gpu_ctx); // GPU Tensor to CPU Tensor - dst_tensor.CopyFrom(gpu_tensor, *cpu_place, gpu_ctx); + dst_tensor.CopyFrom(gpu_tensor, *cpu_place, gpu_ctx); // Sync before Compare Slice Tensors gpu_ctx.Wait(); @@ -320,7 +320,7 @@ TEST(Tensor, CopyFromVector) { CUDADeviceContext gpu_ctx(*gpu_place); gpu_tensor.CopyFromVector(src_vec, gpu_ctx); // Copy from GPU to CPU tensor for comparison - dst_tensor.CopyFrom(gpu_tensor, *cpu_place, gpu_ctx); + dst_tensor.CopyFrom(gpu_tensor, *cpu_place, gpu_ctx); // Sync before Compare Tensors gpu_ctx.Wait(); @@ -340,7 +340,7 @@ TEST(Tensor, CopyFromVector) { cpu_tensor.CopyFromVector(src_vec, cpu_ctx); gpu_tensor.Resize(make_ddim({2, 2})); gpu_tensor.CopyFromVector(src_vec, gpu_ctx); - dst_tensor.CopyFrom(gpu_tensor, *cpu_place, gpu_ctx); + dst_tensor.CopyFrom(gpu_tensor, *cpu_place, gpu_ctx); // Sync before Compare Tensors gpu_ctx.Wait(); @@ -368,7 +368,7 @@ TEST(Tensor, ReshapeToMatrix) { for (int i = 0; i < 2 * 3 * 4 * 9; ++i) { src_ptr[i] = i; } - Tensor res = ReshapeToMatrix(src, 2); + Tensor res = ReshapeToMatrix(src, 2); ASSERT_EQ(res.dims()[0], 2 * 3); ASSERT_EQ(res.dims()[1], 4 * 9); } diff --git a/paddle/framework/type_defs.h b/paddle/framework/type_defs.h index 0d1564a7510ddf0106ff417fb0b487ddbde1ac2e..00da7289394cf18e013220a4bedde2c182f6a4a4 100644 --- a/paddle/framework/type_defs.h +++ b/paddle/framework/type_defs.h @@ -16,12 +16,18 @@ #include #include #include +#include +#include +#include +#include #include "paddle/platform/variant.h" namespace paddle { namespace framework { class OperatorBase; class OpDescBind; +class BlockDescBind; +class BlockDesc; using VariableNameMap = std::map>; // The order should be as same as framework.proto @@ -40,5 +46,8 @@ using GradOpMakerFN = std::function>( const OpDescBind&, const std::unordered_set& /*no_grad_set*/, std::unordered_map* /*grad_to_var*/)>; +using InferVarTypeFN = std::function; + } // namespace framework } // namespace paddle diff --git a/paddle/framework/var_desc.h b/paddle/framework/var_desc.h index 688a46f83982fc464c7602ec1041ad3f42122211..af4c26ca0a77b444852cc01545a8b585a5c3afcc 100644 --- a/paddle/framework/var_desc.h +++ b/paddle/framework/var_desc.h @@ -79,6 +79,10 @@ class VarDescBind { void SetType(VarDesc::VarType type) { desc_.set_type(type); } + bool Persistable() const { return desc_.persistable(); } + + void SetPersistable(bool persistable) { desc_.set_persistable(persistable); } + private: const TensorDesc &tensor_desc() const; TensorDesc *mutable_tensor_desc(); diff --git a/paddle/framework/var_type_inference.h b/paddle/framework/var_type_inference.h new file mode 100644 index 0000000000000000000000000000000000000000..32abbeb33479444c5e7a9889f4211f59af07f98f --- /dev/null +++ b/paddle/framework/var_type_inference.h @@ -0,0 +1,29 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#pragma once +#include "paddle/framework/type_defs.h" + +namespace paddle { +namespace framework { + +class VarTypeInference { + public: + virtual ~VarTypeInference() {} + virtual void operator()(const OpDescBind& op_desc, + BlockDescBind* block) const = 0; +}; + +} // namespace framework +} // namespace paddle diff --git a/paddle/framework/var_type_inference_test.cc b/paddle/framework/var_type_inference_test.cc new file mode 100644 index 0000000000000000000000000000000000000000..918de1fd055e32888f71ffea1f33993ba1210e86 --- /dev/null +++ b/paddle/framework/var_type_inference_test.cc @@ -0,0 +1,104 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#include "paddle/framework/var_type_inference.h" +#include "gtest/gtest.h" +#include "paddle/framework/op_registry.h" +#include "paddle/framework/operator.h" +#include "paddle/framework/program_desc.h" + +namespace paddle { +namespace framework { + +class SumOpMaker : public OpProtoAndCheckerMaker { + public: + SumOpMaker(OpProto *proto, OpAttrChecker *op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("X", "").AsDuplicable(); + AddOutput("Out", ""); + AddComment(""); + } +}; + +class SumOpVarTypeInference : public VarTypeInference { + public: + void operator()(const OpDescBind &op_desc, + BlockDescBind *block) const override { + auto &inputs = op_desc.Input("X"); + auto default_var_type = VarDesc::SELECTED_ROWS; + + bool any_input_is_lod_tensor = std::any_of( + inputs.begin(), inputs.end(), [block](const std::string &name) { + return block->Var(name)->GetType() == VarDesc::LOD_TENSOR; + }); + if (any_input_is_lod_tensor) { + default_var_type = VarDesc::LOD_TENSOR; + } + + auto out_var_name = op_desc.Output("Out").front(); + block->Var(out_var_name)->SetType(default_var_type); + } +}; +} // namespace framework +} // namespace paddle + +REGISTER_OPERATOR(sum, paddle::framework::NOP, paddle::framework::SumOpMaker, + paddle::framework::SumOpVarTypeInference); +REGISTER_OPERATOR(sum_without_infer_var_type, paddle::framework::NOP, + paddle::framework::SumOpMaker); + +namespace paddle { +namespace framework { + +TEST(InferVarType, sum_op) { + ProgramDescBind prog; + auto *op = prog.Block(0)->AppendOp(); + op->SetType("sum"); + op->SetInput("X", {"test_a", "test_b", "test_c"}); + op->SetOutput("Out", {"test_out"}); + + prog.Block(0)->Var("test_a")->SetType(VarDesc::SELECTED_ROWS); + prog.Block(0)->Var("test_b")->SetType(VarDesc::SELECTED_ROWS); + prog.Block(0)->Var("test_c")->SetType(VarDesc::SELECTED_ROWS); + prog.Block(0)->Var("test_out"); + + op->InferVarType(prog.Block(0)); + + ASSERT_EQ(VarDesc::SELECTED_ROWS, prog.Block(0)->Var("test_out")->GetType()); + + prog.Block(0)->Var("test_b")->SetType(VarDesc::LOD_TENSOR); + op->InferVarType(prog.Block(0)); + ASSERT_EQ(VarDesc::LOD_TENSOR, prog.Block(0)->Var("test_out")->GetType()); +} + +TEST(InferVarType, sum_op_without_infer_var_type) { + ProgramDescBind prog; + auto *op = prog.Block(0)->AppendOp(); + op->SetType("sum_without_infer_var_type"); + op->SetInput("X", {"test2_a", "test2_b", "test2_c"}); + op->SetOutput("Out", {"test2_out"}); + + prog.Block(0)->Var("test2_a")->SetType(VarDesc::SELECTED_ROWS); + prog.Block(0)->Var("test2_b")->SetType(VarDesc::SELECTED_ROWS); + prog.Block(0)->Var("test2_c")->SetType(VarDesc::SELECTED_ROWS); + prog.Block(0)->Var("test2_out"); + + op->InferVarType(prog.Block(0)); + + ASSERT_EQ(VarDesc_VarType_LOD_TENSOR, + prog.Block(0)->Var("test2_out")->GetType()); +} + +} // namespace framework +} // namespace paddle \ No newline at end of file diff --git a/paddle/framework/variable.h b/paddle/framework/variable.h index 38fc2720a3023039aa113b32a394bda9c5def4c0..a80f0e66b5a59bf95efc200d159ad5dd9cf4111a 100644 --- a/paddle/framework/variable.h +++ b/paddle/framework/variable.h @@ -25,7 +25,10 @@ class Variable { public: template const T& Get() const { - PADDLE_ENFORCE(IsType(), "Variable must be type %s", typeid(T).name()); + PADDLE_ENFORCE(holder_ != nullptr, "Variable must hold some thing"); + PADDLE_ENFORCE(IsType(), + "Variable must be type %s, the holding type is %s", + typeid(T).name(), holder_->Type().name()); return *static_cast(holder_->Ptr()); } diff --git a/paddle/gserver/activations/MKLDNNActivation.cpp b/paddle/gserver/activations/MKLDNNActivation.cpp index 18c5638100065109fba1f0647a1c5f91256f7b9d..f3ccd68160859795f28a40f8d0d4032adb289ccf 100644 --- a/paddle/gserver/activations/MKLDNNActivation.cpp +++ b/paddle/gserver/activations/MKLDNNActivation.cpp @@ -126,7 +126,7 @@ void MKLDNNEltwiseActivation::resetFwd(Argument& act) { copyInVal_ = nullptr; if (act.grad && algo == algorithm::eltwise_tanh) { // tanh need save src input for backward - inVal_ = MKLDNNMatrix::create(nullptr, val_->getPrimitiveDesc()); + inVal_ = MKLDNNMatrix::create(val_->getPrimitiveDesc()); copyInVal_ = std::make_shared(*val_, *inVal_); CHECK(copyInVal_) << "should not be emptry"; pipelineFwd_.push_back(*copyInVal_); @@ -145,7 +145,7 @@ void MKLDNNEltwiseActivation::resetBwd(Argument& act) { algorithm algo = getAlgo(this->getName()); float alpha = getBwdAlpha(); float beta = getBeta(); - grad_ = MKLDNNMatrix::create(act.grad, val_->getPrimitiveDesc()); + grad_ = MKLDNNMatrix::create(val_->getPrimitiveDesc(), act.grad); auto eng = CPUEngine::Instance().getEngine(); auto bwdDesc = eltwise_bwd::desc( algo, grad_->getMemoryDesc(), val_->getMemoryDesc(), alpha, beta); @@ -230,7 +230,7 @@ void MKLDNNActivation::resetFwd(Argument& act) { int ic = cnt_ / bs / ih / iw; CHECK_EQ(cnt_, (size_t)bs * ic * ih * iw); val_ = MKLDNNMatrix::create( - act.value, {bs, ic, ih, iw}, mkldnn::memory::format::nchw, *engine_); + {bs, ic, ih, iw}, mkldnn::memory::format::nchw, *engine_, act.value); CHECK(val_); val_->downSpatial(); } diff --git a/paddle/gserver/gradientmachines/NeuralNetwork.cpp b/paddle/gserver/gradientmachines/NeuralNetwork.cpp index dcf0acb5a2cc7698625a2e254d4c12a32bc9631d..dbadc352a4ccd7483bf67e1025c212f514e32a24 100644 --- a/paddle/gserver/gradientmachines/NeuralNetwork.cpp +++ b/paddle/gserver/gradientmachines/NeuralNetwork.cpp @@ -21,6 +21,10 @@ limitations under the License. */ #include "paddle/utils/Logging.h" #include "paddle/utils/Stat.h" +#ifdef PADDLE_USE_MKLDNN +#include "paddle/gserver/layers/MKLDNNLayer.h" +#endif + #ifndef PADDLE_MOBILE_INFERENCE #include "MultiNetwork.h" #include "RecurrentGradientMachine.h" @@ -300,6 +304,17 @@ void NeuralNetwork::backward(const UpdateCallback& callback) { } } +void NeuralNetwork::finish() { +#ifdef PADDLE_USE_MKLDNN + FOR_EACH_R(layer, layers_) { + MKLDNNLayerPtr dnnLayer = std::dynamic_pointer_cast(*layer); + if (dnnLayer) { + dnnLayer->convertWeightsToPaddle(); + } + } +#endif +} + Argument NeuralNetwork::getLayerOutput(const std::string& layerName) { return getLayer(layerName)->getOutput(); } diff --git a/paddle/gserver/gradientmachines/NeuralNetwork.h b/paddle/gserver/gradientmachines/NeuralNetwork.h index 56a1ec78460731554c9b47cf3f517f7654dc314f..6888380290074318fe7f94d168b2931e776dda47 100644 --- a/paddle/gserver/gradientmachines/NeuralNetwork.h +++ b/paddle/gserver/gradientmachines/NeuralNetwork.h @@ -134,6 +134,9 @@ public: const std::string& getName() const { return subModelName_; } + /// some finish work, like convert the weight format of MKLDNNLayers + void finish(); + protected: /** * The constructor of NeuralNetwork. diff --git a/paddle/gserver/layers/MKLDNNBase.h b/paddle/gserver/layers/MKLDNNBase.h index 4c0234e7b3a91053596c32cea581fa5d1e26b9d5..af02a37cad668708f77ecf423549a8ec993e54fb 100644 --- a/paddle/gserver/layers/MKLDNNBase.h +++ b/paddle/gserver/layers/MKLDNNBase.h @@ -21,8 +21,8 @@ namespace paddle { typedef enum { MKLDNN_BASE = 1, // basical info of MKLDNN MKLDNN_TESTS = 1, // gtest info of MKLDNN - MKLDNN_SIZES = 2, // size info of MKLDNN - MKLDNN_FMTS = 3, // format info of MKLDNN + MKLDNN_FMTS = 2, // format info of MKLDNN + MKLDNN_SIZES = 3, // size info of MKLDNN MKLDNN_ALL = 4, // show all info of MKLDNN } MKLDNN_LOG_LEVEL; diff --git a/paddle/gserver/layers/MKLDNNConvLayer.cpp b/paddle/gserver/layers/MKLDNNConvLayer.cpp index 8b67a1ef4ffdd42559f8078873ed135751d56674..83f4e4e6151d727b3e6cf367bb7ecae55dd7df73 100644 --- a/paddle/gserver/layers/MKLDNNConvLayer.cpp +++ b/paddle/gserver/layers/MKLDNNConvLayer.cpp @@ -116,8 +116,6 @@ void MKLDNNConvLayer::resetFwd(std::vector& pipeline, resetFwdBuffers(fwdPD_, in, wgt, bias, out); resetFwdPipeline(pipeline, fwdPD_, in, wgt, bias, out); - - printValueFormatFlow(); } void MKLDNNConvLayer::resetBwd(std::vector& pipeline, @@ -135,12 +133,6 @@ void MKLDNNConvLayer::resetBwd(std::vector& pipeline, resetBwdBuffers(bwdWgtPD, bwdDataPD, in, wgt, bias, out); resetBwdPipeline(pipeline, bwdWgtPD, bwdDataPD, in, wgt, bias, out); - - printGradFormatFlow(); -} - -void MKLDNNConvLayer::updateInputData() { - cpuInVal_->setData(getInputValue(0, CPU_DEVICE)->getData()); } void MKLDNNConvLayer::updateWeights(const UpdateCallback& callback) { @@ -211,11 +203,18 @@ void MKLDNNConvLayer::resetFwdBuffers( MKLDNNMatrixPtr& bias, MKLDNNMatrixPtr& out) { CHECK(pd); - resetInValue(pd, in); + resetInValue( + in, std::make_shared(pd->src_primitive_desc())); + + resetOutValue(out, pd->dst_primitive_desc()); - resetWgtBiasValue(pd, wgt, bias); + resetWithMatrix(wgt, weight_->getW(), pd->weights_primitive_desc()); - resetOutValue(pd, out); + if (biases_ && biases_->getW()) { + resetWithMatrix(bias, biases_->getW(), pd->bias_primitive_desc()); + } else { + bias = nullptr; + } } void MKLDNNConvLayer::resetFwdPipeline( @@ -225,103 +224,12 @@ void MKLDNNConvLayer::resetFwdPipeline( MKLDNNMatrixPtr& wgt, MKLDNNMatrixPtr& bias, MKLDNNMatrixPtr& out) { - if (cvtInVal_) { - pipeline.push_back(*cvtInVal_); - } - if (bias) { fwd_.reset(new conv_fwd(*pd, *in, *wgt, *bias, *out)); } else { fwd_.reset(new conv_fwd(*pd, *in, *wgt, *out)); } pipeline.push_back(*fwd_); - - if (cvtOutVal_) { - pipeline.push_back(*cvtOutVal_); - } -} - -void MKLDNNConvLayer::resetInValue( - std::shared_ptr& pd, MKLDNNMatrixPtr& in) { - const MatrixPtr& inMat = inputLayers_[0]->getOutputValue(); - in = MKLDNNMatrix::create(inMat, pd->src_primitive_desc()); - - // create buffer and reorder if input value do not match - cpuInVal_ = nullptr; - cvtInVal_ = nullptr; - - MKLDNNMatrixPtr dnnIn = std::dynamic_pointer_cast(inMat); - CHECK_EQ(inputIsOnlyMKLDNN(), dnnIn != nullptr); - if (dnnIn != nullptr && dnnIn->getPrimitiveDesc() == in->getPrimitiveDesc()) { - in = dnnIn; - return; - } - if (dnnIn) { - if (dnnIn->getFormat() == format::nc) { - CHECK(ih_ == 1 && iw_ == 1) << "when input is nc format"; - // create a new one with nchw format and same data - memory::dims inDims = memory::dims{bs_, ic_, 1, 1}; - dnnIn = MKLDNNMatrix::create(inMat, inDims, format::nchw, engine_); - } - if (dnnIn->getPrimitiveDesc() == in->getPrimitiveDesc()) { - in = dnnIn; - return; - } - cpuInVal_ = dnnIn; - in = MKLDNNMatrix::create(nullptr, pd->src_primitive_desc()); - cvtInVal_ = MKLDNNMatrix::createReorder(cpuInVal_, in); - CHECK(cvtInVal_) << "should not be emptry"; - } else { - memory::dims inDims = memory::dims{bs_, ic_, ih_, iw_}; - cpuInVal_ = MKLDNNMatrix::create(inMat, inDims, format::nchw, engine_); - if (cpuInVal_->getPrimitiveDesc() != in->getPrimitiveDesc()) { - // create new mkldnn matrix - in = MKLDNNMatrix::create(nullptr, pd->src_primitive_desc()); - cvtInVal_ = MKLDNNMatrix::createReorder(cpuInVal_, in); - CHECK(cvtInVal_) << "should not be emptry"; - } else { - in = cpuInVal_; - } - } -} - -void MKLDNNConvLayer::resetWgtBiasValue( - std::shared_ptr& pd, - MKLDNNMatrixPtr& wgt, - MKLDNNMatrixPtr& bias) { - wgt = MKLDNNMatrix::create(weight_->getW(), pd->weights_primitive_desc()); - VLOG(MKLDNN_FMTS) << "Weight value format: " << wgt->getFormat(); - - bias = (biases_ && biases_->getW()) - ? MKLDNNMatrix::create(biases_->getW(), pd->bias_primitive_desc()) - : nullptr; -} - -void MKLDNNConvLayer::resetOutValue( - std::shared_ptr& pd, MKLDNNMatrixPtr& out) { - out = MKLDNNMatrix::create(output_.value, pd->dst_primitive_desc()); - - // create reorder if output value has cpu device and pd do not match - cpuOutVal_ = nullptr; - cvtOutVal_ = nullptr; - if (!outputIsOnlyMKLDNN()) { - const MatrixPtr& cpuOut = getOutput(CPU_DEVICE).value; - memory::dims outDims = memory::dims{bs_, oc_, oh_, ow_}; - cpuOutVal_ = MKLDNNMatrix::create(cpuOut, outDims, format::nchw, engine_); - if (cpuOutVal_->getPrimitiveDesc() != pd->dst_primitive_desc()) { - out = MKLDNNMatrix::create(nullptr, pd->dst_primitive_desc()); - cvtOutVal_ = MKLDNNMatrix::createReorder(out, cpuOutVal_); - CHECK(cvtOutVal_) << "should not be empty"; - } else { - cpuOutVal_ = out; - } - // when output is cpu device, change the mkldnn output value and make them - // share the same data. Then if next layer use inputlayer->getOuputValue() - // to achieve the input value, it will get the right data. - output_.value = std::dynamic_pointer_cast(cpuOutVal_); - return; - } - output_.value = std::dynamic_pointer_cast(out); } void MKLDNNConvLayer::resetBwdWgtPD( @@ -330,8 +238,8 @@ void MKLDNNConvLayer::resetBwdWgtPD( loadConvSettings(wgtDims, biasDims, strides, dilations, padL, padR); // create backward weight using input, output and weight value memory desc - CHECK(inVal_) << "Should have input value"; - CHECK(outVal_) << "Should have output value"; + CHECK(inVal_) << "Should have internal input value"; + CHECK(outVal_) << "Should have internal output value"; CHECK(wgtVal_) << "Should have weight value"; algorithm algo = algorithm::convolution_direct; padding_kind padKind = padding_kind::zero; @@ -371,8 +279,8 @@ void MKLDNNConvLayer::resetBwdDataPD( memory::dims wgtDims, biasDims, strides, dilations, padL, padR; loadConvSettings(wgtDims, biasDims, strides, dilations, padL, padR); - CHECK(inVal_) << "Should have input value"; - CHECK(outVal_) << "Should have output value"; + CHECK(inVal_) << "Should have internal input value"; + CHECK(outVal_) << "Should have internal output value"; // create backward data using input and output value memory desc // but using weight memory desc with any format auto bwdDataDesc = conv_bwdData::desc(algorithm::convolution_direct, @@ -398,12 +306,27 @@ void MKLDNNConvLayer::resetBwdBuffers( MKLDNNMatrixPtr& bias, MKLDNNMatrixPtr& out) { CHECK(wgtPD); - resetOutGrad(wgtPD, out); + resetOutGrad(out, wgtPD->diff_dst_primitive_desc()); - resetWgtBiasGrad(wgtPD, wgt, bias); + resetWithMatrix( + wgt, weight_->getWGrad(), wgtPD->diff_weights_primitive_desc()); + CHECK(wgtVal_ != nullptr && + wgt->getPrimitiveDesc() == wgtVal_->getPrimitiveDesc()) + << "primitive desc of weight grad and value should be equal"; - resetInGrad(dataPD, in); + bias = nullptr; + if (biases_ && biases_->getWGrad()) { + resetWithMatrix( + bias, biases_->getWGrad(), wgtPD->diff_bias_primitive_desc()); + CHECK(bias && biasVal_ && + bias->getPrimitiveDesc() == biasVal_->getPrimitiveDesc()) + << "primitive desc of bias grad should equal the bias value"; + } + if (dataPD == nullptr) { + return; + } + resetInGrad(in, dataPD->diff_src_primitive_desc()); resetWgtValBwdData(dataPD, wgtValBwdData_); } @@ -415,10 +338,7 @@ void MKLDNNConvLayer::resetBwdPipeline( MKLDNNMatrixPtr& wgt, MKLDNNMatrixPtr& bias, MKLDNNMatrixPtr& out) { - if (cvtOutGrad_) { - pipeline.push_back(*cvtOutGrad_); - } - + CHECK(inVal_); // add bwdWgt handle if (bias) { bwdWgt_.reset(new conv_bwdWgt(*wgtPD, *inVal_, *out, *wgt, *bias)); @@ -430,98 +350,13 @@ void MKLDNNConvLayer::resetBwdPipeline( if (dataPD == nullptr) { return; } - if (cvtWgtVal_) { pipeline.push_back(*cvtWgtVal_); } - // add bwdData handle CHECK(wgtValBwdData_) << "Should have weight memory"; bwdData_.reset(new conv_bwdData(*dataPD, *out, *wgtValBwdData_, *in)); pipeline.push_back(*bwdData_); - - if (cvtInGrad_) { - pipeline.push_back(*cvtInGrad_); - } -} - -void MKLDNNConvLayer::resetOutGrad( - std::shared_ptr& wgtPD, MKLDNNMatrixPtr& out) { - cpuOutGrad_ = nullptr; - cvtOutGrad_ = nullptr; - CHECK(outVal_ != nullptr && - outVal_->getPrimitiveDesc() == wgtPD->diff_dst_primitive_desc()) - << "primitive desc of out grad and value should be equal"; - if (outputIsOnlyMKLDNN()) { - MKLDNNLayer::resetOutGrad(out, outVal_->getPrimitiveDesc()); - } else { - const MatrixPtr& cpuOut = getOutput(CPU_DEVICE).grad; - // same PrimitiveDesc with cpuInVal_ - CHECK(cpuOutVal_); - cpuOutGrad_ = MKLDNNMatrix::create(cpuOut, cpuOutVal_->getPrimitiveDesc()); - // create reorder if primitive desc does not match - if (cpuOutGrad_->getPrimitiveDesc() != outVal_->getPrimitiveDesc()) { - out = MKLDNNMatrix::create(output_.grad, outVal_->getPrimitiveDesc()); - cvtOutGrad_ = MKLDNNMatrix::createReorder(cpuOutGrad_, out); - CHECK(cvtOutGrad_); - } else { - // share the same data of CPU output - output_.grad->setData(cpuOut->getData()); - out = cpuOutGrad_; - } - } -} - -void MKLDNNConvLayer::resetWgtBiasGrad( - std::shared_ptr& wgtPD, - MKLDNNMatrixPtr& wgt, - MKLDNNMatrixPtr& bias) { - wgt = MKLDNNMatrix::create(weight_->getWGrad(), - wgtPD->diff_weights_primitive_desc()); - CHECK(nullptr != wgtVal_ && - wgt->getPrimitiveDesc() == wgtVal_->getPrimitiveDesc()) - << "primitive desc of weight grad and value should be equal"; - VLOG(MKLDNN_FMTS) << "weight grad format: " << wgt->getFormat(); - - bias = nullptr; - if (biasVal_ == nullptr) { - return; - } - bias = MKLDNNMatrix::create(biases_->getWGrad(), - wgtPD->diff_bias_primitive_desc()); - CHECK(bias->getPrimitiveDesc() == biasVal_->getPrimitiveDesc()) - << "primitive desc of bias grad should equal the bias value"; -} - -void MKLDNNConvLayer::resetInGrad( - std::shared_ptr& dataPD, - MKLDNNMatrixPtr& in) { - in = nullptr; - cpuInGrad_ = nullptr; - cvtInGrad_ = nullptr; - if (dataPD == nullptr) { - return; - } - - if (inputIsOnlyMKLDNN()) { - MKLDNNLayer::resetInGrad(in, dataPD->diff_src_primitive_desc()); - CHECK(nullptr != inVal_ && - in->getPrimitiveDesc() == inVal_->getPrimitiveDesc()) - << "primitive desc of input grad and value should be equal"; - } else { - const MatrixPtr& cpuIn = getInputGrad(0, CPU_DEVICE); - // same PrimitiveDesc with cpuInVal_ - CHECK(cpuInVal_); - cpuInGrad_ = MKLDNNMatrix::create(cpuIn, cpuInVal_->getPrimitiveDesc()); - in = cpuInGrad_; - // create reorder if PrimitiveDesc does not match - if (cpuInGrad_->getPrimitiveDesc() != dataPD->diff_src_primitive_desc()) { - in = MKLDNNMatrix::create(getInputGrad(0, MKLDNN_DEVICE), - dataPD->diff_src_primitive_desc()); - cvtInGrad_ = MKLDNNMatrix::createReorder(in, cpuInGrad_); - CHECK(cvtInGrad_); - } - } } void MKLDNNConvLayer::resetWgtValBwdData( @@ -535,8 +370,7 @@ void MKLDNNConvLayer::resetWgtValBwdData( // since the primitive_desc would be different with wgtVal_ CHECK(wgtVal_) << "should have weight value"; if (dataPD->weights_primitive_desc() != wgtVal_->getPrimitiveDesc()) { - wgtValBwdData_ = - MKLDNNMatrix::create(nullptr, dataPD->weights_primitive_desc()); + wgtValBwdData_ = MKLDNNMatrix::create(dataPD->weights_primitive_desc()); cvtWgtVal_ = MKLDNNMatrix::createReorder(wgtVal_, wgtValBwdData_); CHECK(cvtWgtVal_); } else { diff --git a/paddle/gserver/layers/MKLDNNConvLayer.h b/paddle/gserver/layers/MKLDNNConvLayer.h index f84f2f737c47a1b8adc2b83360a0396ffbc6ae24..1fed0e1c6565b763a3ee73a0853f560ddfbd44c6 100644 --- a/paddle/gserver/layers/MKLDNNConvLayer.h +++ b/paddle/gserver/layers/MKLDNNConvLayer.h @@ -48,17 +48,6 @@ protected: // save forward primitive_desc, which can be used backward std::shared_ptr fwdPD_; - // MKLDNNMatrixPtr which should be created from CPU Device - MKLDNNMatrixPtr cpuInVal_; - MKLDNNMatrixPtr cpuInGrad_; - MKLDNNMatrixPtr cpuOutVal_; - MKLDNNMatrixPtr cpuOutGrad_; - // convert handle between CPU device and MKLDNN device - std::shared_ptr cvtInVal_; - std::shared_ptr cvtInGrad_; - std::shared_ptr cvtOutVal_; - std::shared_ptr cvtOutGrad_; - // whether the weight has been init bool hasInitedWgt_; @@ -94,8 +83,6 @@ public: MKLDNNMatrixPtr& bias, MKLDNNMatrixPtr& out) override; - void updateInputData() override; - void updateWeights(const UpdateCallback& callback) override; void convertWeightsFromPaddle() override; @@ -109,26 +96,6 @@ public: << ", sw: " << sw_ << ", dh: " << dh_ << ", dw: " << dw_; } - void printValueFormatFlow() override { - if (cpuInVal_) { - VLOG(MKLDNN_FMTS) << cpuInVal_->getFormat() << " >>>"; - } - MKLDNNLayer::printValueFormatFlow(); - if (cpuOutVal_) { - VLOG(MKLDNN_FMTS) << " >>> " << cpuOutVal_->getFormat(); - } - } - - void printGradFormatFlow() override { - if (cpuInGrad_) { - VLOG(MKLDNN_FMTS) << cpuInGrad_->getFormat() << " <<<"; - } - MKLDNNLayer::printGradFormatFlow(); - if (cpuOutGrad_) { - VLOG(MKLDNN_FMTS) << " <<< " << cpuOutGrad_->getFormat(); - } - } - protected: /** * load the dims settings of this conv @@ -162,23 +129,6 @@ protected: MKLDNNMatrixPtr& bias, MKLDNNMatrixPtr& out); - /** - * reset MKLDNNMatrix of input value - */ - void resetInValue(std::shared_ptr& pd, - MKLDNNMatrixPtr& in); - /** - * reset MKLDNNMatrix of weight and bias value - */ - void resetWgtBiasValue(std::shared_ptr& pd, - MKLDNNMatrixPtr& wgt, - MKLDNNMatrixPtr& bias); - /** - * reset MKLDNNMatrix of output value - */ - void resetOutValue(std::shared_ptr& pd, - MKLDNNMatrixPtr& out); - /** * reset the backward weight primitive descriptor. */ @@ -207,22 +157,6 @@ protected: MKLDNNMatrixPtr& bias, MKLDNNMatrixPtr& out); - /** - * reset MKLDNNMatrix of output grad - */ - void resetOutGrad(std::shared_ptr& wgtPD, - MKLDNNMatrixPtr& out); - /** - * reset MKLDNNMatrix of weight and bias grad - */ - void resetWgtBiasGrad(std::shared_ptr& wgtPD, - MKLDNNMatrixPtr& wgt, - MKLDNNMatrixPtr& bias); - /** - * reset MKLDNNMatrix of input grad - */ - void resetInGrad(std::shared_ptr& dataPD, - MKLDNNMatrixPtr& in); /** * reset MKLDNNMatrix of weight value for backward data * since the primitive_desc would be different with wgtVal_ diff --git a/paddle/gserver/layers/MKLDNNFcLayer.cpp b/paddle/gserver/layers/MKLDNNFcLayer.cpp index cf19a155681f3a1ceb20af67245c8f2b8fa8fa73..d82063a7130ca928ba042e210eb216f90c7207cd 100644 --- a/paddle/gserver/layers/MKLDNNFcLayer.cpp +++ b/paddle/gserver/layers/MKLDNNFcLayer.cpp @@ -62,7 +62,7 @@ void MKLDNNFcLayer::convertWeightsFromPaddle() { CHECK(wgtVal_) << "should have been initialized"; bool hasNoSpatial_ = ih_ == 1 && iw_ == 1; auto targetDim = wgtVal_->getDims(); - auto srcFmt = hasNoSpatial_ ? memory::format::io : memory::format::ihwo; + auto srcFmt = hasNoSpatial_ ? format::io : format::ihwo; wgtVal_->reorderDataFrom(wgtVal_, srcFmt, targetDim); hasInitedWgt_ = true; } @@ -71,7 +71,7 @@ void MKLDNNFcLayer::convertWeightsToPaddle() { CHECK(wgtVal_) << "should have been initialized"; bool hasNoSpatial_ = ih_ == 1 && iw_ == 1; auto targetDim = wgtVal_->getDims(); - auto dstFmt = hasNoSpatial_ ? memory::format::io : memory::format::ihwo; + auto dstFmt = hasNoSpatial_ ? format::io : format::ihwo; wgtVal_->reorderDataTo(wgtVal_, dstFmt, targetDim); } @@ -100,8 +100,6 @@ void MKLDNNFcLayer::resetFwd(std::vector& pipeline, resetFwdPD(fwdPD_, in, wgt, bias, out); resetFwdPipeline(pipeline, fwdPD_, in, wgt, bias, out); - - printValueFormatFlow(); } void MKLDNNFcLayer::resetBwd(std::vector& pipeline, @@ -119,12 +117,6 @@ void MKLDNNFcLayer::resetBwd(std::vector& pipeline, resetBwdDataPD(bwdDataPD, in, out); resetBwdPipeline(pipeline, bwdWgtPD, bwdDataPD, in, wgt, bias, out); - - printGradFormatFlow(); -} - -void MKLDNNFcLayer::updateInputData() { - inVal_->setData(getInputValue(0, CPU_DEVICE)->getData()); } void MKLDNNFcLayer::updateWeights(const UpdateCallback& callback) { @@ -139,51 +131,30 @@ void MKLDNNFcLayer::resetFwdBuffers(MKLDNNMatrixPtr& in, MKLDNNMatrixPtr& bias, MKLDNNMatrixPtr& out) { resetInValue(in); - - resetWgtBiasValue(wgt, bias); - - resetOutValue(out); -} - -void MKLDNNFcLayer::resetInValue(MKLDNNMatrixPtr& in) { - if (inputIsOnlyMKLDNN()) { - const MatrixPtr& dnnIn = getInputValue(0); - in = std::dynamic_pointer_cast(dnnIn); - CHECK(in) << "Input should be MKLDNNMatrix"; - } else { - CHECK_EQ(getPrev(0)->getDeviceId(), CPU_DEVICE) << "Only support CPU yet"; - const MatrixPtr& cpuIn = getInputValue(0, CPU_DEVICE); - in = MKLDNNMatrix::create( - cpuIn, {bs_, ic_, ih_, iw_}, format::nchw, engine_); - } + CHECK(in); in->downSpatial(); -} -void MKLDNNFcLayer::resetWgtBiasValue(MKLDNNMatrixPtr& wgt, - MKLDNNMatrixPtr& bias) { + auto outPD = + MKLDNNMatrix::createPrimitiveDesc({bs_, oc_}, format::nc, engine_); + resetOutValue(out, outPD); + format wgtFmt = format::oihw; - if (inVal_->getFormat() == format::nChw8c) { + if (in->getFormat() == format::nChw8c) { wgtFmt = format::oIhw8i; - } else if (inVal_->getFormat() == format::nChw16c) { + } else if (in->getFormat() == format::nChw16c) { wgtFmt = format::oIhw16i; } - wgt = MKLDNNMatrix::create( - weight_->getW(), {oc_, ic_, ih_, iw_}, wgtFmt, engine_); + auto wgtPD = + MKLDNNMatrix::createPrimitiveDesc({oc_, ic_, ih_, iw_}, wgtFmt, engine_); + resetWithMatrix(wgt, weight_->getW(), wgtPD); wgt->downSpatial(); - VLOG(MKLDNN_FMTS) << "Weight value format: " << wgt->getFormat(); - - bias = (biases_ && biases_->getW()) - ? MKLDNNMatrix::create(biases_->getW(), {oc_}, format::x, engine_) - : nullptr; -} -void MKLDNNFcLayer::resetOutValue(MKLDNNMatrixPtr& out) { - out = MKLDNNMatrix::create(output_.value, {bs_, oc_}, format::nc, engine_); - if (!outputIsOnlyMKLDNN()) { - // fc cpu output value do not need create convert, just share data - getOutput(CPU_DEVICE).value->setData(out->getData()); + if (biases_ && biases_->getW()) { + auto biasPD = MKLDNNMatrix::createPrimitiveDesc({oc_}, format::x, engine_); + resetWithMatrix(bias, biases_->getW(), biasPD); + } else { + bias = nullptr; } - output_.value = std::dynamic_pointer_cast(out); } void MKLDNNFcLayer::resetFwdPD(std::shared_ptr& pd, @@ -219,7 +190,6 @@ void MKLDNNFcLayer::resetFwdPipeline( } else { fwd_.reset(new fc_fwd(*pd, *in, *wgt, *out)); } - pipeline.push_back(*fwd_); } @@ -227,44 +197,18 @@ void MKLDNNFcLayer::resetBwdBuffers(MKLDNNMatrixPtr& in, MKLDNNMatrixPtr& wgt, MKLDNNMatrixPtr& bias, MKLDNNMatrixPtr& out) { - resetOutGrad(out); - - resetWgtBiasGrad(wgt, bias); - - resetInGrad(in); -} - -void MKLDNNFcLayer::resetOutGrad(MKLDNNMatrixPtr& out) { - CHECK(outVal_); - if (outputIsOnlyMKLDNN()) { - MKLDNNLayer::resetOutGrad(out, outVal_->getPrimitiveDesc()); - } else { - const MatrixPtr& cpuOut = getOutput(CPU_DEVICE).grad; - output_.grad->setData(cpuOut->getData()); - out = MKLDNNMatrix::create(cpuOut, outVal_->getPrimitiveDesc()); - } -} + CHECK(inVal_ && outVal_); + resetOutGrad(out, outVal_->getPrimitiveDesc()); + resetInGrad(in, inVal_->getPrimitiveDesc()); -void MKLDNNFcLayer::resetWgtBiasGrad(MKLDNNMatrixPtr& wgt, - MKLDNNMatrixPtr& bias) { CHECK(wgtVal_); - wgt = MKLDNNMatrix::create(weight_->getWGrad(), wgtVal_->getPrimitiveDesc()); + resetWithMatrix(wgt, weight_->getWGrad(), wgtVal_->getPrimitiveDesc()); - bias = nullptr; - if (biasVal_ == nullptr) { - return; - } - bias = - MKLDNNMatrix::create(biases_->getWGrad(), biasVal_->getPrimitiveDesc()); -} - -void MKLDNNFcLayer::resetInGrad(MKLDNNMatrixPtr& in) { - in = nullptr; - if (inputLayers_[0]->getOutput().grad == nullptr) { - return; + if (biasVal_) { + resetWithMatrix(bias, biases_->getWGrad(), biasVal_->getPrimitiveDesc()); + } else { + bias = nullptr; } - CHECK(inVal_); - MKLDNNLayer::resetInGrad(in, inVal_->getPrimitiveDesc()); } void MKLDNNFcLayer::resetBwdWgtPD( diff --git a/paddle/gserver/layers/MKLDNNFcLayer.h b/paddle/gserver/layers/MKLDNNFcLayer.h index c76878aafab7e986d2bf478eaba02f2f0aced293..ee861763ff3dc10ddb4c119358b80dbe1614aecb 100644 --- a/paddle/gserver/layers/MKLDNNFcLayer.h +++ b/paddle/gserver/layers/MKLDNNFcLayer.h @@ -66,8 +66,6 @@ public: MKLDNNMatrixPtr& bias, MKLDNNMatrixPtr& out) override; - void updateInputData() override; - void updateWeights(const UpdateCallback& callback) override; void convertWeightsFromPaddle() override; @@ -84,9 +82,6 @@ protected: MKLDNNMatrixPtr& wgt, MKLDNNMatrixPtr& bias, MKLDNNMatrixPtr& out); - void resetInValue(MKLDNNMatrixPtr& in); - void resetWgtBiasValue(MKLDNNMatrixPtr& wgt, MKLDNNMatrixPtr& bias); - void resetOutValue(MKLDNNMatrixPtr& out); void resetFwdPD(std::shared_ptr& pd, MKLDNNMatrixPtr in, MKLDNNMatrixPtr wgt, @@ -109,9 +104,6 @@ protected: MKLDNNMatrixPtr& wgt, MKLDNNMatrixPtr& bias, MKLDNNMatrixPtr& out); - void resetOutGrad(MKLDNNMatrixPtr& out); - void resetWgtBiasGrad(MKLDNNMatrixPtr& wgt, MKLDNNMatrixPtr& bias); - void resetInGrad(MKLDNNMatrixPtr& in); void resetBwdWgtPD(std::shared_ptr& pd, MKLDNNMatrixPtr& wgt, MKLDNNMatrixPtr& bias, diff --git a/paddle/gserver/layers/MKLDNNLayer.cpp b/paddle/gserver/layers/MKLDNNLayer.cpp new file mode 100644 index 0000000000000000000000000000000000000000..6bb19976b5552fcd2e420f03de45c77a90ffb9d2 --- /dev/null +++ b/paddle/gserver/layers/MKLDNNLayer.cpp @@ -0,0 +1,333 @@ +/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "MKLDNNLayer.h" + +using namespace mkldnn; // NOLINT +typedef memory::format format; + +namespace paddle { + +bool MKLDNNLayer::init(const LayerMap& layerMap, + const ParameterMap& parameterMap) { + CHECK(FLAGS_use_mkldnn) << "MkldnnLayers only support use_mkldnn." + << "Please set WITH_MKLDNN=ON " + << "and set use_mkldnn=True"; + CHECK(!useGpu_) << "Do not support GPU yet"; + + // set device id before Layer::init + setDevice(MKLDNN_DEVICE); + // change param device to MKLDNN device + setParamsDevice(MKLDNN_DEVICE, parameterMap); + if (!Layer::init(layerMap, parameterMap)) { + return false; + } + setOutputMap(); + checkCPUOutputsNumber(); + + stream_.reset(new MKLDNNStream()); + engine_ = CPUEngine::Instance().getEngine(); + return true; +} + +void MKLDNNLayer::forward(PassType passType) { + passType_ = passType; + + { + REGISTER_TIMER_INFO("mkldnn_FwdTimer", getName().c_str()); + CHECK(!inputLayers_.empty()); + copySeqInfoToOutputs(); + size_t elemenCnt = inputLayers_[0]->getOutputValue()->getElementCnt(); + if (inputElemenCnt_ != elemenCnt) { + VLOG(MKLDNN_BASE) << getName() << " reset mkldnn forward"; + // reset when input total sizes changed, not only the batchsize + inputElemenCnt_ = elemenCnt; + pipelineFwd_.clear(); + reshape(bs_, ic_, ih_, iw_, oc_, oh_, ow_); + // all cpu device output grad or value share output's + shareCPUDevice(); + resetFwd(pipelineFwd_, inVal_, wgtVal_, biasVal_, outVal_); + // MKLDNNLayer output value should be MKLDNNMatrix + // so external output value is necessary. + // Then external input value is not necessary, + // since input may be mkldnn internal buffer. + CHECK(extOutVal_) << "external output value is necessary"; + output_.value = std::dynamic_pointer_cast(extOutVal_); + CHECK(inVal_ && outVal_) << "internal memories are necessary"; + if (cvtInVal_) { + pipelineFwd_.insert(pipelineFwd_.begin(), *cvtInVal_); + } + if (cvtOutVal_) { + pipelineFwd_.push_back(*cvtOutVal_); + } + convertWeightsFromPaddle(); + printSizeInfo(); + printValueFormat(); + needResetBwd_ = true; + } + + if (inputLayers_[0]->getType() == "data") { + // Update input value data when input layer is "data" type, + // since the input value data address might be changed. + CHECK(extInVal_); + extInVal_->setData(getInputValue(0, CPU_DEVICE)->getData()); + } + + if (!outputOnlyMKLDNN_) { + clearGrads(); + } + stream_->submit(pipelineFwd_); + } + { + REGISTER_TIMER_INFO("FwActTimer", getName().c_str()); + forwardActivation(); + } +} + +void MKLDNNLayer::backward(const UpdateCallback& callback) { + if (needResetBwd_) { + VLOG(MKLDNN_BASE) << getName() << " reset mkldnn backward"; + pipelineBwd_.clear(); + pipelineMergeGrad_.clear(); + mergeGrad_ = nullptr; + resetBwd(pipelineBwd_, inGrad_, wgtGrad_, biasGrad_, outGrad_); + // external output grad is not necessary + // since output may be mkldnn internal buffer or merge them directly. + CHECK(outGrad_) << "internal output grad is necessary"; + if (extOutGrad_) { + CHECK_EQ(extOutGrad_->getData(), output_.grad->getData()) + << "the external buffer should share the same data with output_.grad"; + } + if (cvtOutGrad_) { + pipelineBwd_.insert(pipelineBwd_.begin(), *cvtOutGrad_); + } + if (cvtInGrad_) { + pipelineBwd_.push_back(*cvtInGrad_); + } + printGradFormat(); + needResetBwd_ = false; + } + + // merge grad must before backward activation + if (mergeGrad_) { + REGISTER_TIMER_INFO("MergeBpGrad", getName().c_str()); + stream_->submit(pipelineMergeGrad_); + } + { + REGISTER_TIMER_INFO("BpActTimer", getName().c_str()); + backwardActivation(); + } + { + REGISTER_TIMER_INFO("mkldnn_bwdTimer", getName().c_str()); + stream_->submit(pipelineBwd_); + } + { + REGISTER_TIMER_INFO("WeightUpdate", getName().c_str()); + updateWeights(callback); + } +} + +void MKLDNNLayer::reshapeInput(int& batchsize, int& height, int& width) { + const Argument& input = inputLayers_[0]->getOutput(); + batchsize = input.getBatchSize(); + int h = input.getFrameHeight(); + int w = input.getFrameWidth(); + if (h != 0) { + height = h; + } + if (w != 0) { + width = w; + } +} + +void MKLDNNLayer::reshapeOutput(size_t height, size_t width) { + output_.setFrameHeight(height); + output_.setFrameWidth(width); + for (size_t i = 0; i < outputOtherDevice_.size(); i++) { + outputOtherDevice_[i].setFrameHeight(height); + outputOtherDevice_[i].setFrameWidth(width); + } +} + +void MKLDNNLayer::resetWithMatrix(MKLDNNMatrixPtr& dnn, + const MatrixPtr& mat, + memory::primitive_desc pd) { + dnn = nullptr; + if (mat == nullptr) { + return; + } + dnn = MKLDNNMatrix::create(pd, mat); +} + +void MKLDNNLayer::resetInValue( + MKLDNNMatrixPtr& in, const std::shared_ptr& intPD) { + cvtInVal_ = nullptr; + extInVal_ = nullptr; + in = nullptr; + CHECK_GT(bs_ * ic_ * ih_ * iw_, 0); + auto extPD = MKLDNNMatrix::createPrimitiveDesc( + {bs_, ic_, ih_, iw_}, format::nchw, engine_); + const MatrixPtr& inMat = inputLayers_[0]->getOutputValue(); + in = std::dynamic_pointer_cast(inMat); + CHECK_EQ(inputIsOnlyMKLDNN(), in != nullptr); + if (in == nullptr || in->getFormat() == format::nc) { + in = MKLDNNMatrix::create(extPD, inMat); + } + extInVal_ = isPaddleFormat(in->getFormat()) ? in : nullptr; + if (in->getFormat() == format::nc) { + CHECK(ih_ == 1 && iw_ == 1); + } + if (nullptr == intPD || in->getPrimitiveDesc() == *intPD) { + return; + } + // need create reorder + in = MKLDNNMatrix::create(*intPD); + extInVal_ = extInVal_ ? extInVal_ : MKLDNNMatrix::create(extPD, inMat); + cvtInVal_ = MKLDNNMatrix::createReorder(extInVal_, in); + CHECK(cvtInVal_) << "should not be emptry"; +} + +void MKLDNNLayer::resetOutValue(MKLDNNMatrixPtr& out, + memory::primitive_desc intPD) { + cvtOutVal_ = nullptr; + out = MKLDNNMatrix::create(intPD, output_.value); + extOutVal_ = out; + if (outputIsOnlyMKLDNN() || isPaddleFormat(extOutVal_->getFormat())) { + return; + } + // need create reorder + CHECK_GT(bs_ * oc_ * oh_ * ow_, 0); + extOutVal_ = MKLDNNMatrix::create( + memory::dims{bs_, oc_, oh_, ow_}, format::nchw, engine_, output_.value); + out = MKLDNNMatrix::create(intPD); + cvtOutVal_ = MKLDNNMatrix::createReorder(out, extOutVal_); + CHECK(cvtOutVal_) << "should not be empty"; +} + +void MKLDNNLayer::resetInGrad(MKLDNNMatrixPtr& in, + memory::primitive_desc intPD) { + cvtInGrad_ = nullptr; + extInGrad_ = nullptr; + in = nullptr; + LayerPtr& input = inputLayers_[0]; + if (input->getOutputGrad() == nullptr) { + // no need input grad + return; + } + CHECK(inputIsOnlyMKLDNN() || input->getOutputMapSize() <= 1) + << "only support input is MKLDNN layer or only have one output layer"; + // when input is a mkldnn branch node, + // this layer will save input grad to a internal buffer, + // and the mkldnn input layer will merge them to actual prev->output_.grad + const MatrixPtr& inMat = + input->getOutputMapSize() <= 1 ? input->getOutputGrad() : nullptr; + in = MKLDNNMatrix::create(intPD, inMat); + Argument& arg = input->getOutput(this->getName()); + arg.grad = std::dynamic_pointer_cast(in); + CHECK(inVal_); + CHECK(inVal_->getPrimitiveDesc() == intPD) << "the primitive desc must equal"; + if (inputIsOnlyMKLDNN()) { + return; + } + + extInGrad_ = in; + if (isPaddleFormat(extInGrad_->getFormat())) { + return; + } + // need create reorder + // TODO(TJ): add macro definition to simplify it + CHECK(extInVal_ != nullptr && isPaddleFormat(extInVal_->getFormat())) + << "should have external input value and the format must be nchw(nc)"; + extInGrad_ = MKLDNNMatrix::create(extInVal_->getPrimitiveDesc(), inMat); + CHECK(inVal_ != nullptr && inVal_->getPrimitiveDesc() == intPD) + << "should have internal input value and primitive desc must equal"; + in = MKLDNNMatrix::create(intPD); + cvtInGrad_ = MKLDNNMatrix::createReorder(in, extInGrad_); + CHECK(cvtInGrad_); +} + +void MKLDNNLayer::resetOutGrad(MKLDNNMatrixPtr& out, + memory::primitive_desc intPD) { + cvtOutGrad_ = nullptr; + extOutGrad_ = nullptr; + out = nullptr; + MatrixPtr& outMat = output_.grad; + out = MKLDNNMatrix::create(intPD, outMat); + resetMergeGrad(out); + if (outputIsOnlyMKLDNN()) { + return; + } + CHECK_LE(outputMap_.size(), 1U) << "do not support mixed with cpu device"; + extOutGrad_ = out; + if (isPaddleFormat(extOutGrad_->getFormat())) { + return; + } + // need create reorder + CHECK(extOutVal_ != nullptr && isPaddleFormat(extOutVal_->getFormat())) + << "should have external output value and the format must be nchw(nc)"; + extOutGrad_ = MKLDNNMatrix::create(extOutVal_->getPrimitiveDesc(), outMat); + CHECK(outVal_ != nullptr && outVal_->getPrimitiveDesc() == intPD) + << "should have internal output value and primitive desc must equal"; + out = MKLDNNMatrix::create(intPD); + cvtOutGrad_ = MKLDNNMatrix::createReorder(extOutGrad_, out); + CHECK(cvtOutGrad_); +} + +void MKLDNNLayer::resetMergeGrad(MKLDNNMatrixPtr& out) { + mergeGrad_ = nullptr; + pipelineMergeGrad_.clear(); + if (outputMap_.size() <= 1 || !outputIsOnlyMKLDNN()) { + // do not merge when output is not all MKLDNN or only one output + return; + } + CHECK(out) << "should have reset internal ouput grad"; + std::vector scales(outputMap_.size(), 1.0); + std::vector srcPDs; + std::vector srcs; + for (auto it = outputMap_.begin(); it != outputMap_.end(); ++it) { + MKLDNNMatrixPtr src = + std::dynamic_pointer_cast(it->second->grad); + CHECK(src) << "should be MKLDNNMatrix"; + auto srcDims = src->getDims(); + auto dstDims = out->getDims(); + CHECK_EQ(srcDims.size(), dstDims.size()); + for (size_t i = 0; i < srcDims.size(); ++i) { + CHECK_EQ(srcDims[i], dstDims[i]); + } + VLOG(MKLDNN_BASE) << getName() << " has output grad " << it->first + << ", format " << src->getFormat(); + srcPDs.push_back(src->getPrimitiveDesc()); + srcs.push_back(*src); + } + + // TODO(TJ): remove me when mkldnn sum support different formats + for (size_t i = 1; i < srcPDs.size(); ++i) { + CHECK(srcPDs[0] == srcPDs[i]); + } + tmpOutGrad_ = out; + tmpCvt_ = nullptr; + if (out->getPrimitiveDesc() != srcPDs[0]) { + tmpOutGrad_ = MKLDNNMatrix::create(srcPDs[0]); + tmpCvt_ = MKLDNNMatrix::createReorder(tmpOutGrad_, out); + CHECK(tmpCvt_); + pipelineMergeGrad_.push_back(*tmpCvt_); + } + + auto sumPD = + sum::primitive_desc(tmpOutGrad_->getMemoryDesc(), scales, srcPDs); + mergeGrad_.reset(new sum(sumPD, srcs, *tmpOutGrad_)); + pipelineMergeGrad_.insert(pipelineMergeGrad_.begin(), *mergeGrad_); +} + +} // namespace paddle diff --git a/paddle/gserver/layers/MKLDNNLayer.h b/paddle/gserver/layers/MKLDNNLayer.h index 5f9923da769781287e39a3aaaf92248dfe09f225..2c21a5b2aaecb17a52a5de9a98664068f2255d83 100644 --- a/paddle/gserver/layers/MKLDNNLayer.h +++ b/paddle/gserver/layers/MKLDNNLayer.h @@ -46,6 +46,9 @@ protected: // backward also need reset after reset forward handle bool needResetBwd_; + // is output only mkldnn + bool outputOnlyMKLDNN_; + // mkldnn engine, stream and primivtives mkldnn::engine engine_; std::shared_ptr stream_; @@ -55,11 +58,31 @@ protected: std::vector pipelineFwd_; std::vector pipelineBwd_; - // MKLDNNMatrixPtr with internal format + /* Value and grad are seperated as internal and external buffers. + * Each MKLDNNLayer must init or reset internal buffer at least, + * and the external buffer format is always nchw of nc(when h==w==1), + * which is the same format as paddle. + * The output_.value and output_.grad always save the external data, + * when mixed with cpu device. + * When all layers are mkldnn layers, they could save internal data. + */ + // below MKLDNNMatrix buffers are all internal buffers MKLDNNMatrixPtr inVal_; MKLDNNMatrixPtr inGrad_; MKLDNNMatrixPtr outVal_; MKLDNNMatrixPtr outGrad_; + // below are external value and grad + MKLDNNMatrixPtr extInVal_; + MKLDNNMatrixPtr extInGrad_; + MKLDNNMatrixPtr extOutVal_; + MKLDNNMatrixPtr extOutGrad_; + // convert handle between external and internal buffers + std::shared_ptr cvtInVal_; + std::shared_ptr cvtInGrad_; + std::shared_ptr cvtOutVal_; + std::shared_ptr cvtOutGrad_; + + // weight and bias are always internal buffers MKLDNNMatrixPtr wgtVal_; MKLDNNMatrixPtr wgtGrad_; MKLDNNMatrixPtr biasVal_; @@ -88,6 +111,7 @@ public: oh_(0), ow_(0), needResetBwd_(true), + outputOnlyMKLDNN_(false), engine_(mkldnn::engine::cpu, 0), stream_(nullptr), fwd_(nullptr), @@ -96,89 +120,9 @@ public: ~MKLDNNLayer() {} - virtual bool init(const LayerMap& layerMap, - const ParameterMap& parameterMap) { - CHECK(FLAGS_use_mkldnn) << "MkldnnLayers only support use_mkldnn." - << "Please set WITH_MKLDNN=ON " - << "and set use_mkldnn=True"; - CHECK(!useGpu_) << "Do not support GPU yet"; - - // set device id before Layer::init - setDevice(MKLDNN_DEVICE); - // change param device to MKLDNN device - setParamsDevice(MKLDNN_DEVICE, parameterMap); - if (!Layer::init(layerMap, parameterMap)) { - return false; - } - setOutputMap(); - checkCPUOutputsNumber(); - - stream_.reset(new MKLDNNStream()); - engine_ = CPUEngine::Instance().getEngine(); - return true; - } - - void forward(PassType passType) override { - passType_ = passType; - - { - REGISTER_TIMER_INFO("mkldnn_FwdTimer", getName().c_str()); - CHECK(!inputLayers_.empty()); - copySeqInfoToOutputs(); - size_t elemenCnt = inputLayers_[0]->getOutput().value->getElementCnt(); - if (inputElemenCnt_ != elemenCnt) { - VLOG(MKLDNN_BASE) << getName() << " reset mkldnn forward"; - // reset when input total sizes changed, not only the batchsize - inputElemenCnt_ = elemenCnt; - pipelineFwd_.clear(); - reshape(bs_, ic_, ih_, iw_, oc_, oh_, ow_); - resetFwd(pipelineFwd_, inVal_, wgtVal_, biasVal_, outVal_); - convertWeightsFromPaddle(); - needResetBwd_ = true; - } - - if (inputLayers_[0]->getType() == "data") { - updateInputData(); - } - - stream_->submit(pipelineFwd_); - } - - /* activation */ { - REGISTER_TIMER_INFO("FwActTimer", getName().c_str()); - forwardActivation(); - } - } - - void backward(const UpdateCallback& callback) override { - if (needResetBwd_) { - VLOG(MKLDNN_BASE) << getName() << " reset mkldnn backward"; - pipelineBwd_.clear(); - pipelineMergeGrad_.clear(); - mergeGrad_ = nullptr; - resetBwd(pipelineBwd_, inGrad_, wgtGrad_, biasGrad_, outGrad_); - needResetBwd_ = false; - } - - // merge grad must before backward activation - if (mergeGrad_) { - REGISTER_TIMER_INFO("MergeBpGrad", getName().c_str()); - stream_->submit(pipelineMergeGrad_); - } - { - REGISTER_TIMER_INFO("BpActTimer", getName().c_str()); - backwardActivation(); - } - { - REGISTER_TIMER_INFO("mkldnn_bwdTimer", getName().c_str()); - stream_->submit(pipelineBwd_); - } - - { - REGISTER_TIMER_INFO("WeightUpdate", getName().c_str()); - updateWeights(callback); - } - } + virtual bool init(const LayerMap& layerMap, const ParameterMap& parameterMap); + virtual void forward(PassType passType); + virtual void backward(const UpdateCallback& callback); /** * reshape the input image sizes @@ -189,7 +133,7 @@ public: int& bs, int& ic, int& ih, int& iw, int oc, int& oh, int& ow) = 0; /** - * reset the mkldnn forward primitve and memory + * reset the mkldnn forward primitve and memories * only would be called when input size changes */ virtual void resetFwd(std::vector& pipeline, @@ -199,7 +143,7 @@ public: MKLDNNMatrixPtr& out) = 0; /** - * reset the mkldnn backward primitve and memory for mkldnn fc + * reset the mkldnn backward primitve and memories * only would be called when needed */ virtual void resetBwd(std::vector& pipeline, @@ -208,12 +152,6 @@ public: MKLDNNMatrixPtr& bias, MKLDNNMatrixPtr& out) = 0; - /** - * Update input value data when input layer is "data" type. - * Since the input value data address might be changed. - */ - virtual void updateInputData() {} - /** * Update weights and biases if necessary. */ @@ -240,131 +178,78 @@ protected: /** * reshape the input image sizes and input batchsize */ - virtual void reshapeInput(int& batchsize, int& height, int& width) { - const Argument& input = inputLayers_[0]->getOutput(); - batchsize = input.getBatchSize(); - int h = input.getFrameHeight(); - int w = input.getFrameWidth(); - if (h != 0) { - height = h; - } - if (w != 0) { - width = w; - } - } + void reshapeInput(int& batchsize, int& height, int& width); /** * reshape output image sizes */ - virtual void reshapeOutput(size_t height, size_t width) { - output_.setFrameHeight(height); - output_.setFrameWidth(width); - for (size_t i = 0; i < outputOtherDevice_.size(); i++) { - outputOtherDevice_[i].setFrameHeight(height); - outputOtherDevice_[i].setFrameWidth(width); - } - } + void reshapeOutput(size_t height, size_t width); /** - * reset the output grad matrix from primitive desc. - * and reset the merge grad primitive if needed. - * note: when this layer has serval outputs, - * it could not be mixed with cpu device, - * since it can not get memory desc from cpu device. + * reset MKLDNNMatrix from Matrix and internal primitive desc. + * reset nullptr if matrix or primitive desc is empty */ - virtual void resetOutGrad(MKLDNNMatrixPtr& out, - mkldnn::memory::primitive_desc pd) { - CHECK(outputIsOnlyMKLDNN()) << "do not support mixed with other device yet"; - mergeGrad_ = nullptr; - pipelineMergeGrad_.clear(); - out = MKLDNNMatrix::create(output_.grad, pd); - if (outputMap_.size() <= 1) { - return; - } - std::vector scales(outputMap_.size(), 1.0); - std::vector srcPDs; - std::vector srcs; - for (auto it = outputMap_.begin(); it != outputMap_.end(); ++it) { - MKLDNNMatrixPtr src = - std::dynamic_pointer_cast(it->second->grad); - VLOG(MKLDNN_BASE) << getName() << " has output grad " << it->first; - CHECK(src) << "should be MKLDNNMatrix"; - auto srcDims = src->getDims(); - auto dstDims = out->getDims(); - CHECK_EQ(srcDims.size(), dstDims.size()); - for (size_t i = 0; i < srcDims.size(); ++i) { - CHECK_EQ(srcDims[i], dstDims[i]); - } - srcPDs.push_back(src->getPrimitiveDesc()); - srcs.push_back(*src); - } + void resetWithMatrix(MKLDNNMatrixPtr& dnn, + const MatrixPtr& mat, + mkldnn::memory::primitive_desc pd); - // TODO(TJ): remove me when mkldnn sum support different formats - for (size_t i = 1; i < srcPDs.size(); ++i) { - CHECK(srcPDs[0] == srcPDs[i]); - } - tmpOutGrad_ = nullptr; - tmpCvt_ = nullptr; - if (out->getPrimitiveDesc() != srcPDs[0]) { - tmpOutGrad_ = MKLDNNMatrix::create(nullptr, srcPDs[0]); - tmpCvt_ = MKLDNNMatrix::createReorder(tmpOutGrad_, out); - CHECK(tmpCvt_); - pipelineMergeGrad_.push_back(*tmpCvt_); - } else { - tmpOutGrad_ = out; - } + /** + * reset input value from input MKLDNNMatrix and internal primitive desc. + * reset both internal and external buffer and create reorder if necessary. + */ + void resetInValue( + MKLDNNMatrixPtr& in, + const std::shared_ptr& intPD = nullptr); - auto sumPD = mkldnn::sum::primitive_desc( - tmpOutGrad_->getMemoryDesc(), scales, srcPDs); - mergeGrad_.reset(new mkldnn::sum(sumPD, srcs, *tmpOutGrad_)); - pipelineMergeGrad_.insert(pipelineMergeGrad_.begin(), *mergeGrad_); - } + /** + * reset output value from internal primitive desc. + * reset both internal and external buffer and create reorder if necessary. + */ + void resetOutValue(MKLDNNMatrixPtr& out, + mkldnn::memory::primitive_desc intPD); /** - * reset input grad from primitive desc. - * this function is avaiable for input is only mkldnn - * or input do not care cpu device + * reset input grad from internal primitive desc. + * reset both internal and external buffer and create reorder if necessary. */ - virtual void resetInGrad(MKLDNNMatrixPtr& in, - mkldnn::memory::primitive_desc pd) { - LayerPtr& input = inputLayers_[0]; - const MatrixPtr& grad = - input->getOutputMapSize() > 1 ? nullptr : input->getOutput().grad; - in = MKLDNNMatrix::create(grad, pd); - Argument& arg = input->getOutput(this->getName()); - arg.grad = std::dynamic_pointer_cast(in); - } + void resetInGrad(MKLDNNMatrixPtr& in, mkldnn::memory::primitive_desc intPD); /** - * print info about sizes + * reset output grad from internal primitive desc. + * merge grad if necessary. + * reset both internal and external buffer and create reorder if necessary. + * note: about merge grad, when this layer has several outputs, + * it could not be mixed with cpu device, + * since it can not get memory desc from cpu device. */ - virtual void printSizeInfo() { - VLOG(MKLDNN_SIZES) << getName() << ": bs: " << bs_ << ", ic: " << ic_ - << ", ih: " << ih_ << ", iw: " << iw_ << ", oc: " << oc_ - << ", oh: " << oh_ << ", ow: " << ow_; - } + void resetOutGrad(MKLDNNMatrixPtr& out, mkldnn::memory::primitive_desc intPD); /** - * Print the mkldnn memory format flow of value + * reset the merge grad primitive if necessary. + * note: do not support the grads mixed with cpu device, + * since it can not get memory desc from cpu device. */ - virtual void printValueFormatFlow() { - if (inVal_ && outVal_) { - VLOG(MKLDNN_FMTS) << inVal_->getFormat() << " >>> " - << outVal_->getFormat(); - } - } + void resetMergeGrad(MKLDNNMatrixPtr& out); +protected: /** - * Print the mkldnn memory format flow of grad + * Set deviceId of this layer. */ - virtual void printGradFormatFlow() { - if (inGrad_ && outGrad_) { - VLOG(MKLDNN_FMTS) << inGrad_->getFormat() << " <<< " - << outGrad_->getFormat(); + void setDevice(int id) { deviceId_ = id; } + + /** + * check the format is nchw or nc, + * which is supported by Paddle default memory layout + */ + bool isPaddleFormat(mkldnn::memory::format fmt) { + if (fmt == mkldnn::memory::format::nchw || + fmt == mkldnn::memory::format::nc) { + return true; + } else { + return false; } } -protected: /** * If input only has MKLDNN device. * Otherwise, only support the previous layer using CPU device. @@ -374,7 +259,6 @@ protected: if (prevDevice == MKLDNN_DEVICE) { return true; } else { - // do not support GPU yet CHECK_EQ(prevDevice, CPU_DEVICE) << "Only support CPU yet"; return false; } @@ -389,15 +273,82 @@ protected: CHECK_EQ(outputOtherDevice_[i].deviceId, CPU_DEVICE) << "Only support other device is CPU yet"; } - return outputOtherDevice_.size() == 0; + outputOnlyMKLDNN_ = outputOtherDevice_.size() == 0; + return outputOnlyMKLDNN_; } /** - * Set deviceId of this layer. + * print info about sizes */ - void setDevice(int id) { deviceId_ = id; } + virtual void printSizeInfo() { + VLOG(MKLDNN_SIZES) << getName() << ": bs: " << bs_ << ", ic: " << ic_ + << ", ih: " << ih_ << ", iw: " << iw_ << ", oc: " << oc_ + << ", oh: " << oh_ << ", ow: " << ow_; + } + + /** + * print the mkldnn memory format of value + */ + virtual void printValueFormat() { + if (extInVal_) { + VLOG(MKLDNN_FMTS) << extInVal_->getFormat() << " >>> "; + } + if (inVal_) { + VLOG(MKLDNN_FMTS) << inVal_->getFormat() << " >>>"; + } + if (outVal_) { + VLOG(MKLDNN_FMTS) << outVal_->getFormat() << " >>> "; + } + if (extOutVal_) { + VLOG(MKLDNN_FMTS) << extOutVal_->getFormat(); + } + if (wgtVal_) { + VLOG(MKLDNN_FMTS) << "Weight value format: " << wgtVal_->getFormat(); + } + if (biasVal_) { + VLOG(MKLDNN_FMTS) << "Bias value format: " << biasVal_->getFormat(); + } + } + + /** + * print the mkldnn memory format of grad + */ + virtual void printGradFormat() { + if (extOutGrad_) { + VLOG(MKLDNN_FMTS) << extOutGrad_->getFormat(); + } + if (outGrad_) { + VLOG(MKLDNN_FMTS) << outGrad_->getFormat() << " <<< "; + } + if (inGrad_) { + VLOG(MKLDNN_FMTS) << inGrad_->getFormat() << " <<<"; + } + if (extInGrad_) { + VLOG(MKLDNN_FMTS) << extInGrad_->getFormat() << " <<< "; + } + if (wgtGrad_) { + VLOG(MKLDNN_FMTS) << "Weight grad format: " << wgtGrad_->getFormat(); + } + if (biasGrad_) { + VLOG(MKLDNN_FMTS) << "Bias grad format: " << biasGrad_->getFormat(); + } + } private: + /** + * clear all grad + */ + void clearGrads() { + if (output_.grad) { + output_.grad->zeroMem(); + } + for (size_t i = 0; i < outputOtherDevice_.size(); i++) { + if (outputOtherDevice_[i].grad) { + outputOtherDevice_[i].grad->zeroMem(); + } + } + } + /** * Set deviceId of the params used in this layer. */ @@ -432,6 +383,19 @@ private: } } + /** + * if have cpu device, share value and grad data with output_ + */ + void shareCPUDevice() { + if (outputIsOnlyMKLDNN()) { + return; + } + for (size_t i = 0; i < outputOtherDevice_.size(); i++) { + outputOtherDevice_[i].value = output_.value; + outputOtherDevice_[i].grad = output_.grad; + } + } + /** * Check the cpu device number of outputOtherDevice_. * should have only one at most. diff --git a/paddle/gserver/layers/MKLDNNPoolLayer.cpp b/paddle/gserver/layers/MKLDNNPoolLayer.cpp index 5606aae80ce8e9a1e571d3c057c471b26a59d032..6e89260f49979d4edb4da138507a73dc2bf120de 100644 --- a/paddle/gserver/layers/MKLDNNPoolLayer.cpp +++ b/paddle/gserver/layers/MKLDNNPoolLayer.cpp @@ -85,8 +85,6 @@ void MKLDNNPoolLayer::resetFwd(std::vector& pipeline, resetFwdPD(fwdPD_, in, out); resetFwdPipeline(pipeline, fwdPD_, in, out); - - printValueFormatFlow(); } void MKLDNNPoolLayer::resetBwd(std::vector& pipeline, @@ -101,64 +99,22 @@ void MKLDNNPoolLayer::resetBwd(std::vector& pipeline, resetBwdPD(pd, in, out); resetBwdPipeline(pipeline, pd, in, out); - - printGradFormatFlow(); -} - -void MKLDNNPoolLayer::updateInputData() { - inVal_->setData(getInputValue(0, CPU_DEVICE)->getData()); } void MKLDNNPoolLayer::resetFwdBuffers(MKLDNNMatrixPtr& in, MKLDNNMatrixPtr& out) { resetInValue(in); - resetOutValue(out); -} - -void MKLDNNPoolLayer::resetInValue(MKLDNNMatrixPtr& in) { - if (inputIsOnlyMKLDNN()) { - const MatrixPtr& dnnIn = getInputValue(0); - in = std::dynamic_pointer_cast(dnnIn); - CHECK(in) << "Input should be MKLDNNMatrix"; - } else { - CHECK_EQ(getPrev(0)->getDeviceId(), CPU_DEVICE) << "Only support CPU yet"; - const MatrixPtr& cpuIn = getInputValue(0, CPU_DEVICE); - in = MKLDNNMatrix::create( - cpuIn, {bs_, ic_, ih_, iw_}, format::nchw, engine_); - } -} - -void MKLDNNPoolLayer::resetOutValue(MKLDNNMatrixPtr& out) { - CHECK(inVal_) << "Should reset input value first"; memory::dims outDims = memory::dims{bs_, oc_, oh_, ow_}; - out = MKLDNNMatrix::create( - output_.value, outDims, inVal_->getFormat(), engine_); - - // create reorder if output value has cpu device and pd do not match - cpuOutVal_ = nullptr; - cvtOutVal_ = nullptr; - if (!outputIsOnlyMKLDNN()) { - const MatrixPtr& cpuOut = getOutput(CPU_DEVICE).value; - cpuOutVal_ = MKLDNNMatrix::create(cpuOut, outDims, format::nchw, engine_); - if (cpuOutVal_->getPrimitiveDesc() != out->getPrimitiveDesc()) { - out = MKLDNNMatrix::create(nullptr, out->getPrimitiveDesc()); - cvtOutVal_ = MKLDNNMatrix::createReorder(out, cpuOutVal_); - CHECK(cvtOutVal_) << "should not be emptry"; - } else { - cpuOutVal_ = out; - } - output_.value = std::dynamic_pointer_cast(cpuOutVal_); - return; - } - output_.value = std::dynamic_pointer_cast(outVal_); + CHECK(in); + auto outPD = + MKLDNNMatrix::createPrimitiveDesc(outDims, in->getFormat(), engine_); + resetOutValue(out, outPD); } void MKLDNNPoolLayer::resetFwdPD(std::shared_ptr& pd, MKLDNNMatrixPtr in, MKLDNNMatrixPtr out) { - memory::dims inDims = memory::dims{bs_, ic_, ih_, iw_}; - memory::dims outDims = memory::dims{bs_, oc_, oh_, ow_}; memory::dims kernels = memory::dims{fh_, fw_}; memory::dims strides = memory::dims{sh_, sw_}; memory::dims padL = memory::dims{ph_, pw_}; @@ -193,57 +149,26 @@ void MKLDNNPoolLayer::resetFwdPipeline( ? std::make_shared(pool_fwd(*pd, *in, *out, *workspace_)) : std::make_shared(pool_fwd(*pd, *in, *out)); pipeline.push_back(*fwd_); - - if (cvtOutVal_) { - pipeline.push_back(*cvtOutVal_); - } } void MKLDNNPoolLayer::resetBwdBuffers(MKLDNNMatrixPtr& in, MKLDNNMatrixPtr& out) { - resetOutGrad(out); - - resetInGrad(in); -} -void MKLDNNPoolLayer::resetOutGrad(MKLDNNMatrixPtr& out) { - cpuOutGrad_ = nullptr; - cvtOutGrad_ = nullptr; - CHECK(outVal_); - if (outputIsOnlyMKLDNN()) { - MKLDNNLayer::resetOutGrad(out, outVal_->getPrimitiveDesc()); - } else { - const MatrixPtr& cpuOut = getOutput(CPU_DEVICE).grad; - cpuOutGrad_ = MKLDNNMatrix::create( - cpuOut, memory::dims{bs_, oc_, oh_, ow_}, format::nchw, engine_); - if (cpuOutGrad_->getPrimitiveDesc() != outVal_->getPrimitiveDesc()) { - out = MKLDNNMatrix::create(output_.grad, outVal_->getPrimitiveDesc()); - cvtOutGrad_ = MKLDNNMatrix::createReorder(cpuOutGrad_, out); - CHECK(cvtOutGrad_) << "should not be emptry"; - } else { - // share the same data of CPU output - output_.grad->setData(cpuOut->getData()); - out = cpuOutGrad_; - } - } -} - -void MKLDNNPoolLayer::resetInGrad(MKLDNNMatrixPtr& in) { - in = nullptr; - if (inputLayers_[0]->getOutput().grad == nullptr) { - return; - } - CHECK(inVal_); - MKLDNNLayer::resetInGrad(in, inVal_->getPrimitiveDesc()); + CHECK(inVal_ && outVal_); + resetOutGrad(out, outVal_->getPrimitiveDesc()); + resetInGrad(in, inVal_->getPrimitiveDesc()); } void MKLDNNPoolLayer::resetBwdPD(std::shared_ptr& pd, MKLDNNMatrixPtr& in, MKLDNNMatrixPtr& out) { + pd = nullptr; + if (in == nullptr) { + return; + } memory::dims kernels = memory::dims{fh_, fw_}; memory::dims strides = memory::dims{sh_, sw_}; memory::dims padL = memory::dims{ph_, pw_}; memory::dims padR = getPaddingR(); - CHECK(in); CHECK(out); auto bwdDesc = pool_bwd::desc(poolAlgo_, in->getMemoryDesc(), @@ -261,8 +186,8 @@ void MKLDNNPoolLayer::resetBwdPipeline( std::shared_ptr& pd, MKLDNNMatrixPtr& in, MKLDNNMatrixPtr& out) { - if (cvtOutGrad_) { - pipeline.push_back(*cvtOutGrad_); + if (pd == nullptr) { + return; } bwdData_ = diff --git a/paddle/gserver/layers/MKLDNNPoolLayer.h b/paddle/gserver/layers/MKLDNNPoolLayer.h index 891e15a7efcdd2e54f61352efc1ba7345b91c76b..c5ec87828bfb28b4502b4ec6b47287089c514204 100644 --- a/paddle/gserver/layers/MKLDNNPoolLayer.h +++ b/paddle/gserver/layers/MKLDNNPoolLayer.h @@ -38,13 +38,6 @@ protected: // pooling_avg or pooling_max mkldnn::algorithm poolAlgo_; - // MKLDNNMatrixPtr which should be created from CPU Device - MKLDNNMatrixPtr cpuOutVal_; - MKLDNNMatrixPtr cpuOutGrad_; - // convert handle between CPU device and MKLDNN device - std::shared_ptr cvtOutVal_; - std::shared_ptr cvtOutGrad_; - // save forward primitive_desc, which can be used backward std::shared_ptr fwdPD_; // according to https://github.com/01org/mkl-dnn/blob/master/tests/gtests/ @@ -74,8 +67,6 @@ public: MKLDNNMatrixPtr& bias, MKLDNNMatrixPtr& out) override; - void updateInputData() override; - void printSizeInfo() override { MKLDNNLayer::printSizeInfo(); VLOG(MKLDNN_SIZES) << getName() << ": fh: " << fh_ << ", fw: " << fw_ @@ -90,8 +81,6 @@ protected: * reset pipeline. */ void resetFwdBuffers(MKLDNNMatrixPtr& in, MKLDNNMatrixPtr& out); - void resetInValue(MKLDNNMatrixPtr& in); - void resetOutValue(MKLDNNMatrixPtr& out); void resetFwdPD(std::shared_ptr& pd, MKLDNNMatrixPtr in, MKLDNNMatrixPtr out); @@ -106,8 +95,6 @@ protected: * reset pipeline. */ void resetBwdBuffers(MKLDNNMatrixPtr& in, MKLDNNMatrixPtr& out); - void resetOutGrad(MKLDNNMatrixPtr& out); - void resetInGrad(MKLDNNMatrixPtr& in); void resetBwdPD(std::shared_ptr& pd, MKLDNNMatrixPtr& in, MKLDNNMatrixPtr& out); diff --git a/paddle/gserver/tests/CMakeLists.txt b/paddle/gserver/tests/CMakeLists.txt index fcee19415c13e9731bd47eb53bbff9b52cf6450b..329536afaf6d69676e8c39fdf8b6b8cb87ade5fa 100644 --- a/paddle/gserver/tests/CMakeLists.txt +++ b/paddle/gserver/tests/CMakeLists.txt @@ -26,7 +26,10 @@ if(WITH_MKLDNN) test_MKLDNN.cpp MKLDNNTester.cpp LayerGradUtil.cpp) - add_test(NAME test_MKLDNN COMMAND test_MKLDNN) + add_test(NAME test_MKLDNN + COMMAND .set_python_path.sh -d ${PADDLE_SOURCE_DIR}/python + ${CMAKE_CURRENT_BINARY_DIR}/test_MKLDNN + WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle) endif() ################ test_CRFLayerGrad #################### diff --git a/paddle/gserver/tests/MKLDNNTester.cpp b/paddle/gserver/tests/MKLDNNTester.cpp index eaebdd671cfa1b37e5efe149588ca23fdc402a8e..0a19fe23336ea943cb8a572dc40f8c0fbbd7236a 100644 --- a/paddle/gserver/tests/MKLDNNTester.cpp +++ b/paddle/gserver/tests/MKLDNNTester.cpp @@ -15,6 +15,7 @@ limitations under the License. */ #include "MKLDNNTester.h" #include "paddle/gserver/layers/MKLDNNBase.h" #include "paddle/gserver/layers/MKLDNNLayer.h" +#include "paddle/trainer/Trainer.h" namespace paddle { @@ -96,7 +97,7 @@ void MKLDNNTester::randomWgtDatas() { parameters_[REF][i]->randomize(); dnnValue->copyFrom(*refValue); - VLOG(lvl_) << "Random weight data " << parameters_[DNN][i]->getName(); + VLOG(MKLDNN_TESTS) << "Random weight " << parameters_[DNN][i]->getName(); printVector(dnnValue); } } @@ -108,7 +109,7 @@ void MKLDNNTester::randomBotDatas() { dataLayers_[REF][i]->getOutputValue()->randomizeUniform(); dataLayers_[DNN][i]->getOutputValue()->copyFrom( *(dataLayers_[REF][i]->getOutputValue())); - VLOG(lvl_) << "Input " << i << " data:"; + VLOG(MKLDNN_TESTS) << "Random Foward, InputValue " << i; printMatrix(dataLayers_[REF][i]->getOutputValue()); } } @@ -117,12 +118,12 @@ void MKLDNNTester::randomTopDiffs() { refLayer_->getOutputGrad()->randomizeUniform(); dnnLayer_->getOutput(CPU_DEVICE) .grad->copyFrom(*(refLayer_->getOutputGrad())); - VLOG(lvl_) << "Random Backward Input, TopDiff: "; + VLOG(MKLDNN_TESTS) << "Random Backward, OutputGrad"; printMatrix(refLayer_->getOutputGrad()); } void MKLDNNTester::checkForward() { - VLOG(MKLDNN_ALL) << "Check Forward"; + VLOG(MKLDNN_TESTS) << "Check Forward"; printTopDatas(); double delta = compareMatrix(dnnLayer_->getOutputValue(), refLayer_->getOutputValue()); @@ -130,15 +131,15 @@ void MKLDNNTester::checkForward() { } void MKLDNNTester::checkBackwardData() { - VLOG(MKLDNN_ALL) << "Check Backward Data"; + VLOG(MKLDNN_TESTS) << "Check Backward Data"; // TODO(TJ): uncomment me when batch norm ready // const bool isBN = dnnLayer_->getType() == "mkldnn_batch_norm"; for (size_t i = 0; i < dataLayers_[DNN].size(); ++i) { const MatrixPtr& dnnDiff = dataLayers_[DNN][i]->getOutputGrad(); const MatrixPtr& refDiff = dataLayers_[REF][i]->getOutputGrad(); - VLOG(lvl_) << "Mkldnn Backward Output BotDiff " << i; + VLOG(MKLDNN_ALL) << "MKLDNN Backward Result: InputGrad " << i; printMatrix(dnnDiff); - VLOG(lvl_) << "Reference Backward Output BotDiff " << i; + VLOG(MKLDNN_ALL) << "Reference Backward Result: InputGrad " << i; printMatrix(refDiff); double delta = compareMatrix(dnnDiff, refDiff); @@ -152,7 +153,7 @@ void MKLDNNTester::checkBackwardData() { } void MKLDNNTester::checkBackwardWgts() { - VLOG(MKLDNN_ALL) << "Check Backward Weight"; + VLOG(MKLDNN_TESTS) << "Check Backward Weight"; CHECK_EQ(parameters_[DNN].size(), parameters_[REF].size()); vector dnnWgts; // used to temply save mkldnn weights saveWgt(parameters_[DNN], dnnWgts); @@ -164,9 +165,11 @@ void MKLDNNTester::checkBackwardWgts() { for (size_t i = 0; i < parameters_[DNN].size(); ++i) { const VectorPtr& dnn = parameters_[DNN][i]->getBuf(PARAMETER_VALUE); const VectorPtr& ref = parameters_[REF][i]->getBuf(PARAMETER_VALUE); - VLOG(lvl_) << "Mkldnn Output weight " << parameters_[DNN][i]->getName(); + VLOG(MKLDNN_ALL) << "MKLDNN Result: weight value" + << parameters_[DNN][i]->getName(); printVector(dnn); - VLOG(lvl_) << "Reference Output weight " << parameters_[REF][i]->getName(); + VLOG(MKLDNN_ALL) << "Reference Result: weight value " + << parameters_[REF][i]->getName(); printVector(ref); double delta = compareVector(dnn, ref); @@ -239,7 +242,8 @@ void MKLDNNTester::printTopDatas() { } for (int n = 0; n < NUM; ++n) { - VLOG(lvl_) << testLayers_[n]->getType() << " forward output TopData: "; + VLOG(MKLDNN_ALL) << testLayers_[n]->getType() + << " Forward Result: OutputValue"; printMatrix(testLayers_[n]->getOutputValue()); } } @@ -251,7 +255,7 @@ void MKLDNNTester::printMatrix(const MatrixPtr& m) { std::ostringstream ostr; m->print(ostr); - VLOG(lvl_) << std::endl << ostr.str(); + VLOG(MKLDNN_ALL) << std::endl << ostr.str(); } void MKLDNNTester::printVector(const VectorPtr& v) { @@ -261,7 +265,7 @@ void MKLDNNTester::printVector(const VectorPtr& v) { std::ostringstream ostr; v->print(ostr, v->getSize()); - VLOG(lvl_) << std::endl << ostr.str(); + VLOG(MKLDNN_ALL) << std::endl << ostr.str(); } double MKLDNNTester::getDelta(const real* d1, @@ -313,8 +317,9 @@ void MKLDNNTester::runOnce() { UpdateCallback updateCallback = [](Parameter* para) { auto& grad = para->getBuf(PARAMETER_GRADIENT); auto& value = para->getBuf(PARAMETER_VALUE); - real lr = 1e-3; + real lr = 1e-2; value->add(*grad, lr); + grad->zeroMem(); }; randomTopDiffs(); dnnLayer_->backward(updateCallback); @@ -338,10 +343,9 @@ void MKLDNNTester::run(const TestConfig& dnn, size_t batchSize, size_t inputImgH, size_t inputImgW, + bool printDetails, size_t iter, - float epsilon, - bool log, - int level) { + float epsilon) { CHECK(dnn.layerConfig.type().compare(0, 7, "mkldnn_") == 0 || dnn.layerConfig.active_type().compare(0, 7, "mkldnn_") == 0) << "should be MKLDNN layer or MKLDNN activation"; @@ -357,10 +361,9 @@ void MKLDNNTester::run(const TestConfig& dnn, ih_ = inputImgH; iw_ = inputImgW; + log_ = printDetails; iter_ = iter; eps_ = epsilon; - log_ = log; - lvl_ = level; // Firstly test mkldnn init from PARAM_FORMAT_ORIGINAL weight reset(dnn, ref, batchSize); @@ -411,4 +414,146 @@ void MKLDNNTester::run(const TestConfig& dnn, } } +void MKLDNNTester::initArgument(DataIn& data, + const std::string& configPath, + const size_t iter) { + TrainerConfigHelper config(configPath); + size_t batchSize = config.getOptConfig().batch_size(); + data.inArgs.resize(iter); + data.outGrads.resize(iter); + data.paraValues.clear(); + for (const auto& layer_name : config.getModelConfig().input_layer_names()) { + auto layer_config = std::find_if(config.getModelConfig().layers().begin(), + config.getModelConfig().layers().end(), + [=](const LayerConfig& layer_config) { + return layer_config.name() == layer_name; + }); + CHECK(layer_config != config.getModelConfig().layers().end()); + + size_t layerSize = layer_config->size(); + for (size_t i = 0; i < iter; ++i) { + Argument arg; + arg.value = Matrix::create(batchSize, layerSize, false, false); + arg.grad = Matrix::create(batchSize, layerSize, false, false); + arg.value->randomizeUniform(); + arg.value->add(-0.5); + arg.value->sigmoid(*arg.value); + arg.grad->zeroMem(); + arg.ids = VectorT::create(batchSize, false); + arg.ids->rand(layerSize); + generateSequenceStartPositions(batchSize, arg.sequenceStartPositions); + data.inArgs[i].push_back(arg); + } + } + + for (const auto& layer_name : config.getModelConfig().output_layer_names()) { + auto layer_config = std::find_if(config.getModelConfig().layers().begin(), + config.getModelConfig().layers().end(), + [=](const LayerConfig& layer_config) { + return layer_config.name() == layer_name; + }); + CHECK(layer_config != config.getModelConfig().layers().end()); + + size_t layerSize = layer_config->size(); + for (size_t i = 0; i < iter; ++i) { + MatrixPtr grad = Matrix::create(batchSize, layerSize, false, false); + grad->randomizeUniform(); + data.outGrads[i].push_back(grad); + } + } + + for (const auto& para_config : config.getModelConfig().parameters()) { + VectorPtr value = Vector::create(para_config.size(), false); + value->randnorm(0, 2); + data.paraValues.push_back(value); + } +} + +void MKLDNNTester::getOutResult(const std::string& configPath, + DataIn& in, + DataOut& out, + bool use_mkldnn, + size_t iter) { + FLAGS_use_gpu = false; + FLAGS_use_mkldnn = use_mkldnn; + *ThreadLocalRand::getSeed() = 1; + srand(1); + + Trainer trainer; + auto config = std::make_shared(configPath); + trainer.init(config, false); + auto gradientMachine = trainer.getGradientMachine(); + std::vector parameters = gradientMachine->getParameters(); + for (size_t i = 0; i < in.paraValues.size(); i++) { + parameters[i]->getBuf(PARAMETER_VALUE)->copyFrom(*in.paraValues[i]); + } + UpdateCallback simpleUpdate = [](Parameter* para) { + auto& grad = para->getBuf(PARAMETER_GRADIENT); + auto& value = para->getBuf(PARAMETER_VALUE); + real lr = 1e-2; + value->add(*grad, lr); + grad->zeroMem(); + }; + + vector outArgs; + gradientMachine->start(); + out.outValues.clear(); + out.paraValues.clear(); + for (size_t i = 0; i < iter; ++i) { + VLOG(MKLDNN_TESTS) << "runing iteration " << i; + gradientMachine->forward(in.inArgs[i], &outArgs, PASS_TRAIN); + // save forward result + for (size_t k = 0; k < outArgs.size(); k++) { + MatrixPtr value = Matrix::create(outArgs[k].value->getHeight(), + outArgs[k].value->getWidth(), + false, + false); + value->copyFrom(*outArgs[k].value); + out.outValues.push_back(value); + } + + // random backward input + for (size_t k = 0; k < outArgs.size(); k++) { + outArgs[k].grad->copyFrom(*in.outGrads[i][k]); + } + gradientMachine->backward(simpleUpdate); + } + gradientMachine->finish(); + + // save param value + for (size_t i = 0; i < in.paraValues.size(); i++) { + VectorPtr val = Vector::create( + parameters[i]->getBuf(PARAMETER_VALUE)->getSize(), false); + val->copyFrom(*parameters[i]->getBuf(PARAMETER_VALUE)); + out.paraValues.push_back(val); + } +} + +void MKLDNNTester::compareResult(DataOut& ref, DataOut& dnn, float eps) { + CHECK_EQ(ref.outValues.size(), dnn.outValues.size()); + CHECK_EQ(ref.paraValues.size(), dnn.paraValues.size()); + VLOG(MKLDNN_TESTS) << "compare value size: " << ref.outValues.size(); + for (size_t i = 0; i < ref.outValues.size(); i++) { + EXPECT_LE(fabs(compareMatrix(ref.outValues[i], dnn.outValues[i])), eps); + } + VLOG(MKLDNN_TESTS) << "compare param size: " << ref.outValues.size(); + for (size_t i = 0; i < ref.paraValues.size(); i++) { + EXPECT_LE(fabs(compareVector(ref.paraValues[i], dnn.paraValues[i])), eps); + } +} + +void MKLDNNTester::runBranchesTest(const std::string& configPath, + size_t iter, + float eps) { + DataIn in; + initArgument(in, configPath, iter); + DataOut outCpu, outDnn; + VLOG(MKLDNN_TESTS) << "runing cpu network"; + getOutResult(configPath, in, outCpu, false, iter); + VLOG(MKLDNN_TESTS) << "runing mkldnn network"; + getOutResult(configPath, in, outDnn, true, iter); + + compareResult(outCpu, outDnn, eps); +} + } // namespace paddle diff --git a/paddle/gserver/tests/MKLDNNTester.h b/paddle/gserver/tests/MKLDNNTester.h index 171d176ee757f1164c38d86273bdf9e5aefeda06..c385d1c72717d120211f167b5c5eb9a557da3714 100644 --- a/paddle/gserver/tests/MKLDNNTester.h +++ b/paddle/gserver/tests/MKLDNNTester.h @@ -33,6 +33,17 @@ class MKLDNNTester { NUM = 2, // Number of total }; + struct DataIn { + std::vector> inArgs; + std::vector> outGrads; + std::vector paraValues; + }; + + struct DataOut { + std::vector outValues; + std::vector paraValues; + }; + protected: std::vector configs_; vector layerNames_; @@ -47,8 +58,6 @@ protected: size_t iter_; /// whether to print out the details bool log_; - /// vlog level to print the matrix details datas - int lvl_; /// epsilon float eps_; /// input image size, default 1 @@ -59,7 +68,6 @@ public: iter_ = iter; eps_ = epsilon; log_ = false; - lvl_ = MKLDNN_ALL; } ~MKLDNNTester() {} @@ -70,11 +78,20 @@ public: size_t batchSize, size_t inputImgH = 1, size_t inputImgW = 1, + bool printDetails = false, size_t iter = 3, - float epsilon = 1e-4, - bool log = false, - int level = MKLDNN_ALL); - void setLogLevel(int lvl) { lvl_ = lvl; } + float epsilon = 1e-4); + static void runBranchesTest(const std::string& configPath, + size_t iter = 3, + float eps = 1e-4); + static void initArgument(DataIn& data, + const std::string& configPath, + size_t iter = 3); + static void getOutResult(const std::string& configPath, + DataIn& in, + DataOut& out, + bool use_mkldnn, + size_t iter = 3); private: void reset(const TestConfig& dnn, const TestConfig& ref, size_t batchSize); @@ -101,8 +118,9 @@ private: void saveWgt(const vector& from, vector& to); void restoreWgt(const vector& from, vector& to); - double compareMatrix(const MatrixPtr& m1, const MatrixPtr& m2); - double compareVector(const VectorPtr& v1, const VectorPtr& v2); + static double compareMatrix(const MatrixPtr& m1, const MatrixPtr& m2); + static double compareVector(const VectorPtr& v1, const VectorPtr& v2); + static void compareResult(DataOut& ref, DataOut& dnn, float eps = 1e-4); /** * Get delta percent @@ -111,11 +129,11 @@ private: * else return sum(abs(a-b)) / sum(abs(b)) * The return value should be smaller than eps when passing. */ - double getDelta(const real* d1, - const real* d2, - size_t len, - const float failRate = 1e-3, - const float thres = 0.1); + static double getDelta(const real* d1, + const real* d2, + size_t len, + const float failRate = 1e-3, + const float thres = 0.1); }; } // namespace paddle diff --git a/paddle/gserver/tests/mkldnn_branches_conv.conf b/paddle/gserver/tests/mkldnn_branches_conv.conf new file mode 100644 index 0000000000000000000000000000000000000000..2628509db43e6a5f69a4f5ea956bffdc2837e32a --- /dev/null +++ b/paddle/gserver/tests/mkldnn_branches_conv.conf @@ -0,0 +1,56 @@ +# Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from paddle.trainer_config_helpers import * + +settings(batch_size=16) +channels = get_config_arg("channels", int, 2) + +def two_conv(input, group_name): + out1 = img_conv_layer(input=input, + name=group_name+'_conv1', + filter_size=1, + num_filters=channels, + padding=0, + shared_biases=True, + act=ReluActivation()) + + out2 = img_conv_layer(input=input, + name=group_name+'_conv2', + filter_size=3, + num_filters=channels, + padding=1, + shared_biases=True, + act=ReluActivation()) + return out1, out2 + +data = data_layer(name ="input", size=channels*16*16) + +conv = img_conv_layer(input=data, + num_channels=channels, + filter_size=3, + num_filters=channels, + padding=1, + shared_biases=True, + act=ReluActivation()) + +a1, a2 = two_conv(input=conv, group_name='a') + +concat = concat_layer(input=[a1, a2]) + +b1, b2 = two_conv(input=conv, group_name='b') + +addto = addto_layer(input=[b1, b2]) + +outputs([concat, addto]) diff --git a/paddle/gserver/tests/mkldnn_branches_fc.conf b/paddle/gserver/tests/mkldnn_branches_fc.conf new file mode 100644 index 0000000000000000000000000000000000000000..fb85425c2b63c7604d636e2b0c5d20d91fb5de1b --- /dev/null +++ b/paddle/gserver/tests/mkldnn_branches_fc.conf @@ -0,0 +1,58 @@ +# Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from paddle.trainer_config_helpers import * + +settings(batch_size=16) +channels = get_config_arg("channels", int, 2) + +def two_fc(input, group_name): + out1 = fc_layer(input=input, + name=group_name+'_fc1', + size=channels, + bias_attr=False, + act=LinearActivation()) + + out2 = fc_layer(input=input, + name=group_name+'_fc2', + size=channels, + bias_attr=False, + act=LinearActivation()) + return out1, out2 + +data = data_layer(name ="input", size=channels*16*16) + +conv = img_conv_layer(input=data, + num_channels=channels, + filter_size=3, + num_filters=channels, + padding=1, + shared_biases=True, + act=LinearActivation()) + +pool = img_pool_layer(input=conv, + pool_size=3, + stride=2, + padding=1, + pool_type=AvgPooling()) + +a1, a2 = two_fc(input=pool, group_name='a') + +concat = concat_layer(input=[a1, a2]) + +b1, b2 = two_fc(input=pool, group_name='b') + +addto = addto_layer(input=[b1, b2]) + +outputs([concat, addto]) diff --git a/paddle/gserver/tests/mkldnn_branches_pool.conf b/paddle/gserver/tests/mkldnn_branches_pool.conf new file mode 100644 index 0000000000000000000000000000000000000000..ca17c74752ab0777a69f818d9f43275a6140cb4c --- /dev/null +++ b/paddle/gserver/tests/mkldnn_branches_pool.conf @@ -0,0 +1,60 @@ +# Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from paddle.trainer_config_helpers import * + +settings(batch_size=16) +channels = get_config_arg("channels", int, 2) + +def two_pool(input, group_name): + out1 = img_pool_layer(input=input, + name=group_name+'_pool1', + pool_size=3, + stride=2, + padding=0, + pool_type=MaxPooling()) + + out2 = img_pool_layer(input=input, + name=group_name+'_pool2', + pool_size=5, + stride=2, + padding=1, + pool_type=MaxPooling()) + return out1, out2 + +data = data_layer(name ="input", size=channels*16*16) + +conv = img_conv_layer(input=data, + num_channels=channels, + filter_size=3, + num_filters=channels, + padding=1, + shared_biases=True, + act=LinearActivation()) + +pool = img_pool_layer(input=conv, + pool_size=3, + stride=1, + padding=1, + pool_type=AvgPooling()) + +a1, a2 = two_pool(input=pool, group_name='a') + +concat = concat_layer(input=[a1, a2]) + +b1, b2 = two_pool(input=pool, group_name='b') + +addto = addto_layer(input=[b1, b2]) + +outputs([concat, addto]) diff --git a/paddle/gserver/tests/test_MKLDNN.cpp b/paddle/gserver/tests/test_MKLDNN.cpp index a70b2f17f4f1130322f3c50d244f70fdcf34468b..6cb4ca5e08eab5b979e404c9e09dcfec11086c22 100644 --- a/paddle/gserver/tests/test_MKLDNN.cpp +++ b/paddle/gserver/tests/test_MKLDNN.cpp @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include +#include #include #include #include "MKLDNNTester.h" @@ -40,12 +41,13 @@ DECLARE_bool(use_mkldnn); struct testFcDesc { int bs; int ic; - int oc; int ih, iw; // oh == ow == 1 + int oc; }; static void getMKLDNNFcConfig(TestConfig& cfg, const testFcDesc& pm) { cfg.layerConfig.set_type("mkldnn_fc"); + cfg.layerConfig.set_active_type("relu"); cfg.layerConfig.set_size(pm.oc); cfg.inputDefs.push_back( {INPUT_DATA, @@ -86,6 +88,7 @@ struct testConvDesc { static void getMKLDNNConvConfig(TestConfig& cfg, const testConvDesc& pm) { cfg.layerConfig.set_type("mkldnn_conv"); + cfg.layerConfig.set_active_type("relu"); cfg.layerConfig.set_num_filters(pm.oc); cfg.layerConfig.set_size(pm.oc * pm.oh * pm.ow); cfg.layerConfig.set_shared_biases(true); @@ -158,6 +161,7 @@ struct testPoolDesc { static void getMKLDNNPoolConfig(TestConfig& cfg, const testPoolDesc& pm) { cfg.layerConfig.set_type("mkldnn_pool"); + cfg.layerConfig.set_active_type("relu"); cfg.layerConfig.set_size(pm.ic * pm.oh * pm.ow); cfg.inputDefs.push_back( {INPUT_DATA, @@ -244,13 +248,26 @@ TEST(MKLDNNActivation, Activations) { } } -// TODO(TJ): add branch test +DECLARE_string(config_args); +TEST(MKLDNNLayer, branches) { + std::vector cases = {"conv", "pool", "fc"}; + for (auto name : cases) { + std::string config = "./gserver/tests/mkldnn_branches_" + name + ".conf"; + for (auto channels : {2, 32}) { + std::ostringstream oss; + oss << "channels=" << channels; + FLAGS_config_args = oss.str(); + MKLDNNTester::runBranchesTest(config); + } + } +} int main(int argc, char** argv) { testing::InitGoogleTest(&argc, argv); FLAGS_use_gpu = false; FLAGS_use_mkldnn = true; initMain(argc, argv); + initPython(argc, argv); FLAGS_thread_local_rand_use_global_seed = true; srand(1); return RUN_ALL_TESTS(); diff --git a/paddle/gserver/tests/test_PyDataProvider2.py b/paddle/gserver/tests/test_PyDataProvider2.py index 2e6225519f4681238f4b40fb33764ead4a16b24a..0d0fe476ff5eac8bf8ad1c9fe09b32c1a8f73ebc 100644 --- a/paddle/gserver/tests/test_PyDataProvider2.py +++ b/paddle/gserver/tests/test_PyDataProvider2.py @@ -51,7 +51,10 @@ def test_sparse_non_value_no_seq(setting, filename): yield [(i + 1) * (j + 1) for j in xrange(10)] -@provider(input_types=[sparse_vector(30000, seq_type=SequenceType.NO_SEQUENCE)]) +@provider(input_types=[ + sparse_float_vector( + 30000, seq_type=SequenceType.NO_SEQUENCE) +]) def test_sparse_value_no_seq(setting, filename): for i in xrange(200): yield [((i + 1) * (j + 1), float(j) / float(i + 1)) for j in xrange(10)] diff --git a/paddle/math/MKLDNNMatrix.cpp b/paddle/math/MKLDNNMatrix.cpp index 0778bb63b7b3bca9b3d2647ca43dad72d783950a..21a8f73c3e650d4b3c3b86247594cd965f4ead35 100644 --- a/paddle/math/MKLDNNMatrix.cpp +++ b/paddle/math/MKLDNNMatrix.cpp @@ -18,7 +18,7 @@ using namespace mkldnn; // NOLINT namespace paddle { -MKLDNNMatrixPtr MKLDNNMatrix::create(MatrixPtr m, memory::primitive_desc pd) { +MKLDNNMatrixPtr MKLDNNMatrix::create(memory::primitive_desc pd, MatrixPtr m) { memory::desc md = pd.desc(); size_t ndims = md.data.ndims; int* dims = md.data.dims; @@ -41,12 +41,12 @@ MKLDNNMatrixPtr MKLDNNMatrix::create(MatrixPtr m, memory::primitive_desc pd) { return std::make_shared(cpuMatrix, pd); } -MKLDNNMatrixPtr MKLDNNMatrix::create(MatrixPtr m, - memory::dims dims, +MKLDNNMatrixPtr MKLDNNMatrix::create(memory::dims dims, memory::format fmt, engine& eg, + MatrixPtr m, mkldnn::memory::data_type dtype) { - return create(m, memory::primitive_desc(memory::desc(dims, dtype, fmt), eg)); + return create(createPrimitiveDesc(dims, fmt, eg, dtype), m); } std::shared_ptr MKLDNNMatrix::createReorder(const MKLDNNMatrixPtr& src, diff --git a/paddle/math/MKLDNNMatrix.h b/paddle/math/MKLDNNMatrix.h index c843115eb9a5be50d6ff873f1510844228c9d89f..fe755d096da9713e39581a909e5d21aa93d69f0f 100644 --- a/paddle/math/MKLDNNMatrix.h +++ b/paddle/math/MKLDNNMatrix.h @@ -40,24 +40,37 @@ public: /** * Create MKLDNNMatrix from a MatrixPtr and memory primitive_desc */ - static MKLDNNMatrixPtr create(MatrixPtr m, mkldnn::memory::primitive_desc pd); + static MKLDNNMatrixPtr create(mkldnn::memory::primitive_desc pd, + MatrixPtr m = nullptr); /** * Create MKLDNNMatrix from a MatrixPtr and memory details info */ static MKLDNNMatrixPtr create( - MatrixPtr m, mkldnn::memory::dims dims, mkldnn::memory::format fmt, mkldnn::engine& eg, + MatrixPtr m = nullptr, mkldnn::memory::data_type dtype = mkldnn::memory::data_type::f32); + /** + * Create primitive descriptor. + * default with f32 dtype + */ + static mkldnn::memory::primitive_desc createPrimitiveDesc( + const mkldnn::memory::dims dims, + const mkldnn::memory::format& fmt, + const mkldnn::engine& eg, + const mkldnn::memory::data_type& dtype = mkldnn::memory::data_type::f32) { + return mkldnn::memory::primitive_desc(memory::desc(dims, dtype, fmt), eg); + } + /** * Create Memory descriptor. * default with any format and f32 dtype */ static mkldnn::memory::desc createMemoryDesc( - const mkldnn::memory::dims& dims, + const mkldnn::memory::dims dims, const mkldnn::memory::format& fmt = mkldnn::memory::format::any, const mkldnn::memory::data_type& dtype = mkldnn::memory::data_type::f32) { return mkldnn::memory::desc(dims, dtype, fmt); diff --git a/paddle/memory/memory.cc b/paddle/memory/memory.cc index 5087c02385f7f37d78d134b739f3f22522977fb8..8e561528f0e7e6ff524fc51b4776efc4e5bd28cd 100644 --- a/paddle/memory/memory.cc +++ b/paddle/memory/memory.cc @@ -14,11 +14,6 @@ limitations under the License. */ #include "paddle/memory/memory.h" -#include // for transform -#include // for memcpy -#include // for unique_ptr -#include // for call_once - #include "glog/logging.h" #include "paddle/memory/detail/buddy_allocator.h" @@ -32,19 +27,14 @@ namespace memory { using BuddyAllocator = detail::BuddyAllocator; -std::once_flag cpu_allocator_flag; -std::once_flag gpu_allocator_flag; - BuddyAllocator* GetCPUBuddyAllocator() { - static std::unique_ptr a{nullptr}; - - std::call_once(cpu_allocator_flag, [&]() { - a.reset(new BuddyAllocator(new detail::CPUAllocator, - platform::CpuMinChunkSize(), - platform::CpuMaxChunkSize())); - }); - - return a.get(); + static detail::BuddyAllocator* a = nullptr; + if (a == nullptr) { + a = new detail::BuddyAllocator(new detail::CPUAllocator, + platform::CpuMinChunkSize(), + platform::CpuMaxChunkSize()); + } + return a; } template <> @@ -65,35 +55,24 @@ size_t Used(platform::CPUPlace place) { #ifdef PADDLE_WITH_CUDA BuddyAllocator* GetGPUBuddyAllocator(int gpu_id) { - using BuddyAllocVec = std::vector; - static std::unique_ptr as{ - new BuddyAllocVec, [](BuddyAllocVec* p) { - std::for_each(p->begin(), p->end(), - [](BuddyAllocator* p) { delete p; }); - }}; - - // GPU buddy allocators - auto& allocators = *as.get(); - - // GPU buddy allocator initialization - std::call_once(gpu_allocator_flag, [&]() { + static BuddyAllocator** as = NULL; + if (as == NULL) { int gpu_num = platform::GetCUDADeviceCount(); - allocators.reserve(gpu_num); + as = new BuddyAllocator*[gpu_num]; for (int gpu = 0; gpu < gpu_num; gpu++) { platform::SetDeviceId(gpu); - allocators.emplace_back(new BuddyAllocator(new detail::GPUAllocator, - platform::GpuMinChunkSize(), - platform::GpuMaxChunkSize())); + as[gpu] = new BuddyAllocator(new detail::GPUAllocator, + platform::GpuMinChunkSize(), + platform::GpuMaxChunkSize()); } VLOG(3) << "\n\nNOTE: each GPU device use " << FLAGS_fraction_of_gpu_memory_to_use * 100 << "% of GPU memory.\n" << "You can set environment variable '" << platform::kEnvFractionGpuMemoryToUse << "' to change the fraction of GPU usage.\n\n"; - }); - + } platform::SetDeviceId(gpu_id); - return allocators[gpu_id]; + return as[gpu_id]; } template <> diff --git a/paddle/operators/CMakeLists.txt b/paddle/operators/CMakeLists.txt index 75fcc1cda165197fc4413efc6bbbc440088cb4cd..f97bc837dca09060c55cae6a5524c49cd69df28b 100644 --- a/paddle/operators/CMakeLists.txt +++ b/paddle/operators/CMakeLists.txt @@ -115,7 +115,8 @@ set(DEPS_OPS softmax_with_cross_entropy_op sum_op pool_op - pool_with_index_op) + pool_with_index_op + lstm_op) op_library(recurrent_op SRCS recurrent_op.cc rnn/recurrent_op_utils.cc @@ -126,6 +127,7 @@ op_library(softmax_with_cross_entropy_op DEPS cross_entropy softmax) op_library(sum_op DEPS net_op) op_library(pool_op DEPS pooling) op_library(pool_with_index_op DEPS pooling) +op_library(lstm_op DEPS sequence2batch lstm_compute) list(REMOVE_ITEM GENERAL_OPS ${DEPS_OPS}) foreach(src ${GENERAL_OPS}) diff --git a/paddle/operators/accuracy_op.cc b/paddle/operators/accuracy_op.cc index c5fb113e0f4455ec852fd48542ea2917398df5f3..e0a00ecaf04335800eab9e2e5a03628a2ce2ca8d 100644 --- a/paddle/operators/accuracy_op.cc +++ b/paddle/operators/accuracy_op.cc @@ -21,7 +21,6 @@ class AccuracyOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - protected: void InferShape(framework::InferShapeContext *ctx) const override { PADDLE_ENFORCE(ctx->HasInput("Inference"), "Input(Inference) of AccuracyOp should not be null."); @@ -70,5 +69,8 @@ information, or not. But the output only shares the LoD with input `Inference`. namespace ops = paddle::operators; REGISTER_OP_WITHOUT_GRADIENT(accuracy, ops::AccuracyOp, ops::AccuracyOpMaker); -REGISTER_OP_CPU_KERNEL(accuracy, - ops::AccuracyKernel); +REGISTER_OP_CPU_KERNEL( + accuracy, ops::AccuracyKernel, + ops::AccuracyKernel, + ops::AccuracyKernel, + ops::AccuracyKernel); diff --git a/paddle/operators/accuracy_op.cu b/paddle/operators/accuracy_op.cu index 0ca9ef941d4cb15619caea2b6baed197e4b15e5a..54e6ab99dc8c8ff1afbc636e6595cd67fb64eccf 100644 --- a/paddle/operators/accuracy_op.cu +++ b/paddle/operators/accuracy_op.cu @@ -21,9 +21,9 @@ namespace paddle { namespace operators { using platform::PADDLE_CUDA_NUM_THREADS; -template -__global__ void AccuracyCudaKernel(const int N, const int D, const int* Xdata, - const int* labeldata, float* accuracy) { +template +__global__ void AccuracyCudaKernel(const int N, const int D, const T* Xdata, + const T* labeldata, float* accuracy) { int count = 0; __shared__ int total[BlockSize]; @@ -57,8 +57,8 @@ class AccuracyOpCUDAKernel : public framework::OpKernel { auto* accuracy = ctx.Output("Accuracy"); // FIXME(typhoonzero): only support indices currently // if add support for output values, how to detect the data type? - const int* inference_data = inference->data(); - const int* label_data = label->data(); + const T* inference_data = inference->data(); + const T* label_data = label->data(); float* accuracy_data = accuracy->mutable_data(ctx.GetPlace()); size_t num_samples = inference->dims()[0]; @@ -69,7 +69,7 @@ class AccuracyOpCUDAKernel : public framework::OpKernel { return; } - AccuracyCudaKernel<<< + AccuracyCudaKernel<<< 1, PADDLE_CUDA_NUM_THREADS, 0, reinterpret_cast( ctx.device_context()) @@ -81,5 +81,7 @@ class AccuracyOpCUDAKernel : public framework::OpKernel { } // namespace operators } // namespace paddle -REGISTER_OP_GPU_KERNEL(accuracy, - paddle::operators::AccuracyOpCUDAKernel); +REGISTER_OP_GPU_KERNEL(accuracy, paddle::operators::AccuracyOpCUDAKernel, + paddle::operators::AccuracyOpCUDAKernel, + paddle::operators::AccuracyOpCUDAKernel, + paddle::operators::AccuracyOpCUDAKernel); diff --git a/paddle/operators/activation_op.cc b/paddle/operators/activation_op.cc index 84c3775b4fc2602e5df9bb454d21b318b8fda493..ee4f9b0ef29cc73907bc09fb6014850cb4e58a67 100644 --- a/paddle/operators/activation_op.cc +++ b/paddle/operators/activation_op.cc @@ -21,7 +21,6 @@ class ActivationOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - protected: void InferShape(framework::InferShapeContext *ctx) const override { ctx->SetOutputDim("Y", ctx->GetInputDim("X")); ctx->ShareLoD("X", /*->*/ "Y"); @@ -32,7 +31,6 @@ class ActivationOpGrad : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - protected: void InferShape(framework::InferShapeContext *ctx) const override { ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("Y")); } diff --git a/paddle/operators/adadelta_op.cc b/paddle/operators/adadelta_op.cc index cf1bca1658f3fb4cd14465791a0f94865871a120..24e419b532d97bc16ab96dad418d6e73c03f30a0 100644 --- a/paddle/operators/adadelta_op.cc +++ b/paddle/operators/adadelta_op.cc @@ -21,7 +21,6 @@ class AdadeltaOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - protected: void InferShape(framework::InferShapeContext *ctx) const override { PADDLE_ENFORCE(ctx->HasInput("Param"), "Input(Param) of AdadeltaOp should not be null."); diff --git a/paddle/operators/adagrad_op.cc b/paddle/operators/adagrad_op.cc index a17747efb79d76a6beffb27bfc03ee1b62c5618d..bc081f87dcab0dcd8ef329dcb1f66b627c82b4a2 100644 --- a/paddle/operators/adagrad_op.cc +++ b/paddle/operators/adagrad_op.cc @@ -21,7 +21,6 @@ class AdagradOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - protected: void InferShape(framework::InferShapeContext *ctx) const override { PADDLE_ENFORCE(ctx->HasInput("Param"), "Input(Param) of AdagradOp should not be null."); diff --git a/paddle/operators/adam_op.cc b/paddle/operators/adam_op.cc index 293b37b7750427cb88efb6dfd5a02dcf7ede24ac..3572de06bd60f7979e3bfbf39856b04942ce81c0 100644 --- a/paddle/operators/adam_op.cc +++ b/paddle/operators/adam_op.cc @@ -21,7 +21,6 @@ class AdamOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - protected: void InferShape(framework::InferShapeContext *ctx) const override { PADDLE_ENFORCE(ctx->HasInput("Param"), "Input(Param) of AdamOp should not be null."); @@ -44,10 +43,6 @@ class AdamOp : public framework::OperatorWithKernel { "Output(Moment1Out) of AdamOp should not be null."); PADDLE_ENFORCE(ctx->HasOutput("Moment2Out"), "Output(Moment2Out) of AdamOp should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("Beta1PowOut"), - "Output(Beta1PowOut) of AdamOp should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("Beta2PowOut"), - "Output(Beta2PowOut) of AdamOp should not be null."); auto lr_dims = ctx->GetInputDim("LearningRate"); PADDLE_ENFORCE_EQ(framework::product(lr_dims), 1, @@ -73,8 +68,6 @@ class AdamOp : public framework::OperatorWithKernel { ctx->SetOutputDim("ParamOut", param_dims); ctx->SetOutputDim("Moment1Out", param_dims); ctx->SetOutputDim("Moment2Out", param_dims); - ctx->SetOutputDim("Beta1PowOut", beta1_pow_dims); - ctx->SetOutputDim("Beta2PowOut", beta2_pow_dims); } }; @@ -93,8 +86,6 @@ class AdamOpMaker : public framework::OpProtoAndCheckerMaker { AddOutput("ParamOut", "(Tensor) Output parameter"); AddOutput("Moment1Out", "(Tensor) Output first moment"); AddOutput("Moment2Out", "(Tensor) Output second moment"); - AddOutput("Beta1PowOut", "(Tensor) Output beta1 power accumulator"); - AddOutput("Beta2PowOut", "(Tensor) Output beta2 power accumulator"); AddAttr("beta1", "(float, default 0.9) " @@ -122,10 +113,8 @@ Adam updates: moment1_out = beta1 * moment1 + (1 − beta1) * grad moment2_out = beta2 * moment2 + (1 − beta2) * grad * grad -beta1_pow_out = beta1_pow * beta1 -beta2_pow_out = beta2_pow * beta2 learning_rate_t = learning_rate_t * - sqrt(1 - beta2_pow_out) / (1 - beta1_pow_out) + sqrt(1 - beta2_pow) / (1 - beta1_pow) param_out = param - learning_rate_t * moment1/ (sqrt(moment2) + epsilon) References: diff --git a/paddle/operators/adam_op.h b/paddle/operators/adam_op.h index 789c2f14b32478bf9ddc967fc5725bcf65ed2146..45938006db1231a7a134964d729df6ca114d4dbe 100644 --- a/paddle/operators/adam_op.h +++ b/paddle/operators/adam_op.h @@ -26,14 +26,10 @@ class AdamOpKernel : public framework::OpKernel { auto param_out_tensor = ctx.Output("ParamOut"); auto moment1_out_tensor = ctx.Output("Moment1Out"); auto moment2_out_tensor = ctx.Output("Moment2Out"); - auto beta1_pow_out_tensor = ctx.Output("Beta1PowOut"); - auto beta2_pow_out_tensor = ctx.Output("Beta2PowOut"); param_out_tensor->mutable_data(ctx.GetPlace()); moment1_out_tensor->mutable_data(ctx.GetPlace()); moment2_out_tensor->mutable_data(ctx.GetPlace()); - beta1_pow_out_tensor->mutable_data(ctx.GetPlace()); - beta2_pow_out_tensor->mutable_data(ctx.GetPlace()); float beta1 = ctx.Attr("beta1"); float beta2 = ctx.Attr("beta2"); @@ -56,18 +52,13 @@ class AdamOpKernel : public framework::OpKernel { auto param_out = framework::EigenVector::Flatten(*param_out_tensor); auto moment1_out = framework::EigenVector::Flatten(*moment1_out_tensor); auto moment2_out = framework::EigenVector::Flatten(*moment2_out_tensor); - auto beta1_pow_out = - framework::EigenVector::Flatten(*beta1_pow_out_tensor); - auto beta2_pow_out = - framework::EigenVector::Flatten(*beta2_pow_out_tensor); auto place = ctx.GetEigenDevice(); moment1_out.device(place) = beta1 * moment1 + (1 - beta1) * grad; moment2_out.device(place) = beta2 * moment2 + (1 - beta2) * grad.square(); - beta1_pow_out.device(place) = beta1_pow * beta1; - beta2_pow_out.device(place) = beta2_pow * beta2; + // All of these are tensors of 1 element - auto lr_t = lr * (1 - beta2_pow_out).sqrt() / (1 - beta1_pow_out); + auto lr_t = lr * (1 - beta2_pow).sqrt() / (1 - beta1_pow); // Eigen does not support automatic broadcast // Get dimensions of moment vector to broadcast lr_t Eigen::DSizes m_dsize(moment1_out_tensor->numel()); diff --git a/paddle/operators/adamax_op.cc b/paddle/operators/adamax_op.cc index 5cf727742c770f8f0d59cd49345c5c055722a56c..ff2565774115571166712b03c8990e5bf8de12a5 100644 --- a/paddle/operators/adamax_op.cc +++ b/paddle/operators/adamax_op.cc @@ -21,7 +21,6 @@ class AdamaxOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - protected: void InferShape(framework::InferShapeContext *ctx) const override { PADDLE_ENFORCE(ctx->HasInput("Param"), "Input(Param) of AdamaxOp should not be null."); @@ -42,8 +41,6 @@ class AdamaxOp : public framework::OperatorWithKernel { "Output(MomentOut) of AdamaxOp should not be null."); PADDLE_ENFORCE(ctx->HasOutput("InfNormOut"), "Output(InfNormOut) of AdamaxOp should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("Beta1PowOut"), - "Output(Beta1PowOut) of AdamaxOp should not be null."); auto lr_dims = ctx->GetInputDim("LearningRate"); PADDLE_ENFORCE_EQ(framework::product(lr_dims), 1, @@ -65,7 +62,6 @@ class AdamaxOp : public framework::OperatorWithKernel { ctx->SetOutputDim("ParamOut", param_dims); ctx->SetOutputDim("MomentOut", param_dims); ctx->SetOutputDim("InfNormOut", param_dims); - ctx->SetOutputDim("Beta1PowOut", beta1_pow_dims); } }; @@ -87,7 +83,6 @@ class AdamaxOpMaker : public framework::OpProtoAndCheckerMaker { AddOutput("InfNormOut", "(Tensor) " "Output exponentially weighted infinity norm"); - AddOutput("Beta1PowOut", "(Tensor) Output beta1 power accumulator"); AddAttr("beta1", "(float, default 0.9) " @@ -114,8 +109,7 @@ Adamax updates: moment_out = beta1 * moment + (1 - beta1) * grad inf_norm_out = max(beta2 * inf_norm + epsilon, abs(grad)) -beta1_pow_out = beta1_pow * beta1 -learning_rate_t = learning_rate/(1 - beta1_pow_out) +learning_rate_t = learning_rate/(1 - beta1_pow) param_out = param - learning_rate_t * moment_out/inf_norm_out The original paper does not have an epsilon attribute. diff --git a/paddle/operators/adamax_op.h b/paddle/operators/adamax_op.h index 9677b1bb786002aadfaeb571b2ba2e6aa2481ca5..2c99832ec08e9c1d9b5458c467d5238f9b1b3c37 100644 --- a/paddle/operators/adamax_op.h +++ b/paddle/operators/adamax_op.h @@ -26,12 +26,10 @@ class AdamaxOpKernel : public framework::OpKernel { auto param_out_tensor = ctx.Output("ParamOut"); auto moment_out_tensor = ctx.Output("MomentOut"); auto inf_norm_out_tensor = ctx.Output("InfNormOut"); - auto beta1_pow_out_tensor = ctx.Output("Beta1PowOut"); param_out_tensor->mutable_data(ctx.GetPlace()); moment_out_tensor->mutable_data(ctx.GetPlace()); inf_norm_out_tensor->mutable_data(ctx.GetPlace()); - beta1_pow_out_tensor->mutable_data(ctx.GetPlace()); float beta1 = ctx.Attr("beta1"); float beta2 = ctx.Attr("beta2"); @@ -53,15 +51,12 @@ class AdamaxOpKernel : public framework::OpKernel { auto moment_out = framework::EigenVector::Flatten(*moment_out_tensor); auto inf_norm_out = framework::EigenVector::Flatten(*inf_norm_out_tensor); - auto beta1_pow_out = - framework::EigenVector::Flatten(*beta1_pow_out_tensor); auto place = ctx.GetEigenDevice(); moment_out.device(place) = beta1 * moment + (1 - beta1) * grad; inf_norm_out.device(place) = grad.abs().cwiseMax((beta2 * inf_norm) + epsilon); - beta1_pow_out.device(place) = beta1_pow * beta1; - auto lr_t = lr / (1 - beta1_pow_out); + auto lr_t = lr / (1 - beta1_pow); Eigen::DSizes m_dsize(moment_out_tensor->numel()); param_out.device(place) = param - lr_t.broadcast(m_dsize) * (moment_out / inf_norm_out); diff --git a/paddle/operators/batch_norm_op.md b/paddle/operators/batch_norm_op.md new file mode 100644 index 0000000000000000000000000000000000000000..80948adf2b9047a9685dbdd90b2296b5a955f9c1 --- /dev/null +++ b/paddle/operators/batch_norm_op.md @@ -0,0 +1,134 @@ +# Batch Normalization + +## What is batch normalization + +Batch normalization is a frequently-used method in deep network training. It adjusts the mean and variance of a layer's output, and make the data distribution easier for next layer's training. + +The principle of batch normalization can be summarized into a simple function: + +``` +y = (x - E[x]) / STD[x]) * scale + bias +``` + +`x` is a batch of output data of a certain layer. `E[x]` and `STD[x]` is the mean and standard deviation of `x`, respectively。 `scale` and `bias` are two trainable parameters. The training of batch normalization layer equals to the learning of best values of `scale` and `bias`. + +In our design, we use a single operator(`batch_norm_op`) to implement the whole batch normalization in C++, and wrap it as a layer in Python. + +## Differences with normal operators + +`batch_norm_op` is a single operator. However, there are a few differences between `BatchNormOp` and normal operators, which we shall take into consideration in our design. + +1. `batch_norm_op` shall behave differently in training and inferencing. For example, during inferencing, there is no batch data and it's impossible to compute `E[x]` and `STD[x]`, so we have to use an `estimated_mean` and an `estimated_variance` instead of them. These require our framework to be able to inform operators current running type (training/inferencing), then operators can switch their behaviors. + +2. `batch_norm_op` shall have the ability to maintain `estimated_mean` and `estimated_variance` across mini-batch. In each mini-batch, `estimated_mean` is iterated by the following equations: + +``` +if batch_id == 0 + estimated_mean = E[x] +else + estimated_mean = estimated_mean * momentum + (1.0 - momentum_) * E[x] +``` + +The iterating of `estimated_variance` is similar. `momentum` is an attribute, which controls estimated_mean updating speed. + +## Implementation + +Batch normalization is designed as a single operator is C++, and then wrapped as a layer in Python. + +### C++ + +As most C++ operators do, `batch_norm_op` is defined by inputs, outputs, attributes and compute kernels. + +#### Inputs + +- `x`: The inputs data, which is generated by the previous layer. +- `estimated_mean`: The estimated mean of all previous data batches. It is updated in each forward propagation and will be used in inferencing to take the role of `E[x]`. +- `estimated_var`: The estimated standard deviation of all previous data batches. It is updated in each forward propagation and will be used in inferencing to take the role of `STD[x]`. +- `scale`: trainable parameter 'scale' +- `bias`: trainable parameter 'bias' + +#### Outputs + +- `y`: The output data. +- `batch_mean`: The mean value of batch data. +- `batch_var`: The standard deviation value of batch data. +- `saved_mean`: Updated `estimated_mean` with current batch data. It's supposed to share the memory with input `estimated_mean`. +- `saved_var`: Updated `estimated_var` with current batch data. It's supposed to share the memory with input `estimated_var`. + +#### Attributes + +- `is_infer`: *bool*. If true, run `batch_norm_op` in inferencing mode. +- `use_global_est`: *bool*. If true, use `saved_mean` and `saved_var` instead of `E[x]` and `STD[x]` in trainning. +- `epsilon`: *float*. The epsilon value to avoid division by zero. +- `momentum`: *float*. Factor used in `estimated_mean` and `estimated_var` updating. The usage is shown above. + +#### Kernels + +The following graph showes the training computational process of `batch_norm_op`: + + + +cudnn provides APIs to finish the whole series of computation, we can use them in our GPU kernel. + +### Python + +`batch_norm_op` is warpped as a layer in Python: + +```python +def batch_norm_layer(net, + input, + output, + scale, + bias, + use_global_est = False, + epsilon = 1e-6, + momentum = 0.99): + mean_cache = scope.new_var(name = 'estimated_mean', trainable = False) + var_cache = scop.new_var(name = 'estimated_var', trainable = False) + batch_mean = scope.new_var(name = 'batch_mean') + batch_var = scope.new_var(name = 'batch_var') + batch_norm_op = Operator('batch_norm_op', + x = input, + estimated_mean = mean_cache, + estimated_mean = var_cache, + scale = scale, + bias = bias, + y = output, + batch_mean = batch_mean, + batch_var = batch_var, + saved_mean = mean_cache, + saved_var = var_cache, + is_infer = False, + use_global_est = use_global_est, + epsilon = epsilon, + momentum = momentum) + net.append_op(batch_norm_op) + return output +``` + +Because Python API has not been finally decided, the code above can be regarded as pseudo code. There are a few key points we shall note: + +1. `estimated_mean` and `estimated_var` are assigned the same variables with `saved_mean` and `saved_var` respectively. So they share same the memories. The output mean and variance values(`saved_mean` and `saved_var`) of a certain batch will be the inputs(`estimated_mean` and `estimated_var`) of the next batch. + +2. `is_infer` decided whether `batch_norm_op` will run in training mode or inferencing mode. However, a network may contains both training and inferencing parts. And user may switch `batch_norm_op`'s running mode in Python `for` loop like this: + +```python +for pass_id in range(PASS_NUM): + # ... + net.train() # run training model + if pass_id % 100 == 0: + net.infer(test_image) # run inferencing model + # ... +``` + +`is_infer` is an attribute. Once an operator is created, its attributes can not be changed. It suggests us that we shall maintain two `batch_norm_op` in the model, one's `is_infer` is `True`(we call it `infer_batch_norm_op`) and the other one's is `False`(we call it `train_batch_norm_op`). They share all parameters and variables, but be placed in two different branches. That is to say, if a network contains a `batch_norm_op`, it will fork into two branches, one go through `train_batch_norm_op` and the other one go through `infer_batch_norm_op`: + +
+ +
+ +Just like what is shown in the above graph, the net forks before `batch_norm_op` and will never merge again. All the operators after `batch_norm_op` will duplicate. + +When the net runs in training mode, the end of the left branch will be set as the running target, so the dependency tracking process will ignore right branch automatically. When the net runs in inferencing mode, the process is reversed. + +How to set a target is related to Python API design, so I will leave it here waiting for more discussions. diff --git a/paddle/operators/clip_op.cc b/paddle/operators/clip_op.cc index 3e9b0d82ba06a918d52124e791715277640fd4ff..f80204c6833d6436f2cf21610beea45b36787eea 100644 --- a/paddle/operators/clip_op.cc +++ b/paddle/operators/clip_op.cc @@ -21,15 +21,14 @@ class ClipOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - protected: void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) of ClipOp should not be null."); PADDLE_ENFORCE(ctx->HasOutput("Out"), "Output(Out) of ClipOp should not be null."); auto x_dims = ctx->GetInputDim("X"); - auto max = Attr("max"); - auto min = Attr("min"); + auto max = ctx->Attrs().Get("max"); + auto min = ctx->Attrs().Get("min"); PADDLE_ENFORCE_LT(min, max, "max should be greater than min."); ctx->SetOutputDim("Out", x_dims); ctx->ShareLoD("X", /*->*/ "Out"); @@ -60,7 +59,6 @@ class ClipOpGrad : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - protected: void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null"); PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")), diff --git a/paddle/operators/concat_op.cc b/paddle/operators/concat_op.cc index 235c4449ace79e87d209615199d6628f269961a9..e11e51b4583817ef50cd447dbcf4c7202a152422 100644 --- a/paddle/operators/concat_op.cc +++ b/paddle/operators/concat_op.cc @@ -23,7 +23,6 @@ class ConcatOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - protected: void InferShape(framework::InferShapeContext *ctx) const override { PADDLE_ENFORCE_GE(ctx->Inputs("X").size(), 1UL, "Inputs(X) of ConcatOp should be empty.") @@ -82,7 +81,6 @@ class ConcatOpGrad : public framework::OperatorWithKernel { const framework::AttributeMap &attrs) : OperatorWithKernel(type, inputs, outputs, attrs) {} - protected: void InferShape(framework::InferShapeContext *ctx) const override { ctx->SetOutputsDim(framework::GradVarName("X"), ctx->GetInputsDim("X")); } diff --git a/paddle/operators/conv2d_op.h b/paddle/operators/conv2d_op.h index 046f8f5faca2f08ddcbeafa92dc29f3e84b0b941..0621389a79eee6b5e75b1eab309b49f8aa4a97ca 100644 --- a/paddle/operators/conv2d_op.h +++ b/paddle/operators/conv2d_op.h @@ -44,7 +44,6 @@ class Conv2DOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - protected: void InferShape(framework::InferShapeContext* ctx) const override; }; @@ -52,7 +51,6 @@ class Conv2DOpGrad : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - protected: void InferShape(framework::InferShapeContext* ctx) const override; }; @@ -110,17 +108,17 @@ class GemmConv2DKernel : public framework::OpKernel { int in_step = input_channels / groups; int out_step = output_channels / groups; for (int i = 0; i < batch_size; i++) { - Tensor in_batch = input->Slice(i, i + 1).Resize(input_shape); - Tensor out_batch = output->Slice(i, i + 1).Resize(output_matrix_shape); + Tensor in_batch = input->Slice(i, i + 1).Resize(input_shape); + Tensor out_batch = output->Slice(i, i + 1).Resize(output_matrix_shape); for (int g = 0; g < groups; g++) { // im2col - Tensor in_slice = in_batch.Slice(g * in_step, (g + 1) * in_step); + Tensor in_slice = in_batch.Slice(g * in_step, (g + 1) * in_step); im2col(context.device_context(), in_slice, col, strides[0], strides[1], paddings[0], paddings[0], paddings[1], paddings[1]); // gemm - Tensor out_slice = out_batch.Slice(g * out_step, (g + 1) * out_step); - Tensor filter_slice = filter.Slice(g * out_step, (g + 1) * out_step); + Tensor out_slice = out_batch.Slice(g * out_step, (g + 1) * out_step); + Tensor filter_slice = filter.Slice(g * out_step, (g + 1) * out_step); math::matmul(context.device_context(), filter_slice, false, col_matrix, false, T(1.0), &out_slice, T(0.0)); } @@ -200,22 +198,20 @@ class GemmConvGrad2DKernel : public framework::OpKernel { for (int i = 0; i < batch_size; i++) { Tensor out_grad_batch = - output_grad->Slice(i, i + 1).Resize(output_matrix_shape); - Tensor in_grad_batch = - input_grad->Slice(i, i + 1).Resize(input_shape); + output_grad->Slice(i, i + 1).Resize(output_matrix_shape); + Tensor in_grad_batch = input_grad->Slice(i, i + 1).Resize(input_shape); for (int g = 0; g < groups; g++) { // gemm Tensor out_grad_slice = - out_grad_batch.Slice(g * out_step, (g + 1) * out_step); - Tensor filter_slice = - filter.Slice(g * out_step, (g + 1) * out_step); + out_grad_batch.Slice(g * out_step, (g + 1) * out_step); + Tensor filter_slice = filter.Slice(g * out_step, (g + 1) * out_step); math::matmul(context.device_context(), filter_slice, true, out_grad_slice, false, T(1.0), &col_matrix, T(0.0)); // col2im Tensor in_grad_slice = - in_grad_batch.Slice(g * in_step, (g + 1) * in_step); + in_grad_batch.Slice(g * in_step, (g + 1) * in_step); col2im(context.device_context(), in_grad_slice, col, strides[0], strides[1], paddings[0], paddings[0], paddings[1], paddings[1]); @@ -232,20 +228,20 @@ class GemmConvGrad2DKernel : public framework::OpKernel { for (int i = 0; i < batch_size; i++) { Tensor out_grad_batch = - output_grad->Slice(i, i + 1).Resize(output_matrix_shape); - Tensor in_batch = input->Slice(i, i + 1).Resize(input_shape); + output_grad->Slice(i, i + 1).Resize(output_matrix_shape); + Tensor in_batch = input->Slice(i, i + 1).Resize(input_shape); for (int g = 0; g < groups; g++) { // im2col Tensor out_grad_slice = - out_grad_batch.Slice(g * out_step, (g + 1) * out_step); - Tensor in_slice = in_batch.Slice(g * in_step, (g + 1) * in_step); + out_grad_batch.Slice(g * out_step, (g + 1) * out_step); + Tensor in_slice = in_batch.Slice(g * in_step, (g + 1) * in_step); im2col(context.device_context(), in_slice, col, strides[0], strides[1], paddings[0], paddings[0], paddings[1], paddings[1]); // gemm Tensor filter_grad_slice = - filter_grad_.Slice(g * out_step, (g + 1) * out_step); + filter_grad_.Slice(g * out_step, (g + 1) * out_step); math::matmul(context.device_context(), out_grad_slice, false, col_matrix, true, T(1.0), &filter_grad_slice, T(1.0)); diff --git a/paddle/operators/conv_shift_op.cc b/paddle/operators/conv_shift_op.cc index e1e321ed5fce6ce4e4089cc5c5e488a2cbad6c82..6156a2d6af9a010240449a7c944ec0caffc85189 100644 --- a/paddle/operators/conv_shift_op.cc +++ b/paddle/operators/conv_shift_op.cc @@ -27,7 +27,6 @@ class ConvShiftOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - protected: void InferShape(framework::InferShapeContext *ctx) const override { PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should be not null."); PADDLE_ENFORCE(ctx->HasInput("Y"), "Input(Y) should be not null."); @@ -54,7 +53,6 @@ class ConvShiftGradOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - protected: void InferShape(framework::InferShapeContext *ctx) const override { PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should be not null."); PADDLE_ENFORCE(ctx->HasInput("Y"), "Input(Y) should be not null."); diff --git a/paddle/operators/cos_sim_op.cc b/paddle/operators/cos_sim_op.cc index 2b4c4b9c45d22e9d0b124145b55cfaeeaf1583d5..55f69fb03ad69c94dc4ebb8edd651d84e06a5f46 100644 --- a/paddle/operators/cos_sim_op.cc +++ b/paddle/operators/cos_sim_op.cc @@ -23,7 +23,6 @@ class CosSimOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - protected: void InferShape(framework::InferShapeContext* ctx) const override { // notnull check PADDLE_ENFORCE(ctx->HasInput("X"), @@ -97,7 +96,6 @@ class CosSimOpGrad : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - protected: void InferShape(framework::InferShapeContext* ctx) const override { // notnull check PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) must not be null."); diff --git a/paddle/operators/crop_op.cc b/paddle/operators/crop_op.cc index a1424993cc3076f2755164393c5429d79fc1ee78..a994d916761da3b44cb60bd4c1c767cd1987522f 100644 --- a/paddle/operators/crop_op.cc +++ b/paddle/operators/crop_op.cc @@ -24,7 +24,6 @@ class CropOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - protected: void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) of CropOp should not be null."); @@ -114,7 +113,6 @@ class CropOpGrad : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - protected: void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null"); PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")), diff --git a/paddle/operators/cross_entropy_op.cc b/paddle/operators/cross_entropy_op.cc index 708e80e96a0f007be1594d8b4781d1296d6b398d..a865991db3111d2a7cec9f7731b3c34876864299 100644 --- a/paddle/operators/cross_entropy_op.cc +++ b/paddle/operators/cross_entropy_op.cc @@ -21,7 +21,6 @@ class CrossEntropyOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - protected: void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should be not null."); PADDLE_ENFORCE(ctx->HasInput("Label"), "Input(Label) should be not null."); @@ -34,13 +33,13 @@ class CrossEntropyOp : public framework::OperatorWithKernel { PADDLE_ENFORCE_EQ(x_dims[0], label_dims[0], "The 1st dimension of Input(X) and Input(Label) should " "be equal."); - if (ctx->Attrs().Get("softLabel")) { + if (ctx->Attrs().Get("soft_label")) { PADDLE_ENFORCE_EQ(x_dims[1], label_dims[1], - "If Attr(softLabel) == true, the 2nd dimension of " + "If Attr(soft_label) == true, the 2nd dimension of " "Input(X) and Input(Label) should be equal."); } else { PADDLE_ENFORCE_EQ(label_dims[1], 1, - "If Attr(softLabel) == false, the 2nd dimension of " + "If Attr(soft_label) == false, the 2nd dimension of " "Input(Label) should be 1."); } @@ -48,6 +47,7 @@ class CrossEntropyOp : public framework::OperatorWithKernel { ctx->ShareLoD("X", /*->*/ "Y"); } + protected: // CrossEntropy's data type just determined by "X" framework::DataType IndicateDataType( const framework::ExecutionContext& ctx) const override { @@ -59,7 +59,6 @@ class CrossEntropyGradientOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - protected: void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should be not null."); PADDLE_ENFORCE(ctx->HasInput("Label"), "Input(Label) should be not null."); @@ -82,18 +81,19 @@ class CrossEntropyGradientOp : public framework::OperatorWithKernel { "be equal."); PADDLE_ENFORCE_EQ(dy_dims[1], 1, "The 2nd dimension of Input(Y@Grad) should be 1."); - if (ctx->Attrs().Get("softLabel")) { + if (ctx->Attrs().Get("soft_label")) { PADDLE_ENFORCE_EQ(x_dims[1], label_dims[1], - "When Attr(softLabel) == true, the 2nd dimension of " + "When Attr(soft_label) == true, the 2nd dimension of " "Input(X) and Input(Label) should be equal."); } else { PADDLE_ENFORCE_EQ(label_dims[1], 1, - "When Attr(softLabel) == false, the 2nd dimension of " + "When Attr(soft_label) == false, the 2nd dimension of " "Input(Label) should be 1."); } ctx->SetOutputDim(framework::GradVarName("X"), x_dims); } + protected: // CrossEntropy's data type just determined by "X" framework::DataType IndicateDataType( const framework::ExecutionContext& ctx) const override { @@ -115,15 +115,15 @@ class CrossEntropyOpMaker : public framework::OpProtoAndCheckerMaker { "Label", "(Tensor, default Tensor), the ground truth which is " "a 2-D tensor. " - "When softLabel is set to false, `Label` is a Tensor with shape " + "When soft_label is set to false, `Label` is a Tensor with shape " "[N x 1]. " - "When softLabel is set to true, `Label` is a Tensor " + "When soft_label is set to true, `Label` is a Tensor " "with shape [N x K]."); AddOutput("Y", "(Tensor, default Tensor), a 2-D tensor " "with shape [N x 1]. The cross entropy loss."); AddAttr( - "softLabel", + "soft_label", "(bool, default false), a flag to indicate whether to interpretate " "the given labels as soft labels.") .SetDefault(false); @@ -133,12 +133,12 @@ CrossEntropy Operator. It supports both standard cross-entropy and soft-label cross-entropy loss computation. 1) One-hot cross-entropy: - softLabel = false, Label[i, 0] indicates the class index for sample i: + soft_label = false, Label[i, 0] indicates the class index for sample i: Y[i] = -log(X[i, Label[i]]) 2) Soft-label cross-entropy: - softLabel = true, Label[i, j] indicates the soft label of class j + soft_label = true, Label[i, j] indicates the soft label of class j for sample i: Y[i] = \sum_j{-Label[i, j] * log(X[i, j])} diff --git a/paddle/operators/cross_entropy_op.cu b/paddle/operators/cross_entropy_op.cu index 5e2024e0ea9040b758e1cec4dbaa4b329bbb727e..c492dddb09a41e3731a211b4fa083e57ad780f42 100644 --- a/paddle/operators/cross_entropy_op.cu +++ b/paddle/operators/cross_entropy_op.cu @@ -56,7 +56,7 @@ class CrossEntropyOpCUDAKernel : public framework::OpKernel { y->mutable_data(ctx.GetPlace()); math::CrossEntropyFunctor()( - ctx.device_context(), y, x, label, ctx.Attr("softLabel")); + ctx.device_context(), y, x, label, ctx.Attr("soft_label")); } }; @@ -83,7 +83,7 @@ class CrossEntropyGradientOpCUDAKernel : public framework::OpKernel { int block = 512; int grid = (batch_size * class_num + block - 1) / block; - if (ctx.Attr("softLabel")) { + if (ctx.Attr("soft_label")) { auto* label_data = label->data(); SoftCrossEntropyGradientKernel<<< grid, block, 0, reinterpret_cast( @@ -91,7 +91,8 @@ class CrossEntropyGradientOpCUDAKernel : public framework::OpKernel { .stream()>>>(dx_data, dy_data, x_data, label_data, batch_size, class_num); } else { - math::SetConstant(ctx.device_context(), dx, 0); + math::SetConstant functor; + functor(ctx.device_context(), dx, 0); auto* label_data = label->data(); grid = (batch_size + block - 1) / block; CrossEntropyGradientKernel<<< diff --git a/paddle/operators/cross_entropy_op.h b/paddle/operators/cross_entropy_op.h index d2d321aa7ed8e32cc19d5a171beea34d36195b10..42f282103b5609e3c987fc4a83113f86532f74d6 100644 --- a/paddle/operators/cross_entropy_op.h +++ b/paddle/operators/cross_entropy_op.h @@ -38,7 +38,7 @@ class CrossEntropyOpKernel : public framework::OpKernel { y->mutable_data(ctx.GetPlace()); math::CrossEntropyFunctor()( - ctx.device_context(), y, x, labels, ctx.Attr("softLabel")); + ctx.device_context(), y, x, labels, ctx.Attr("soft_label")); } }; @@ -55,7 +55,7 @@ class CrossEntropyGradientOpKernel : public framework::OpKernel { T* dx_data = dx->mutable_data(ctx.GetPlace()); int class_num = x->dims()[1]; - if (ctx.Attr("softLabel")) { + if (ctx.Attr("soft_label")) { auto x_mat = EigenMatrix::From(*x); auto dy_mat = EigenMatrix::From(*dy); auto lbl_mat = EigenMatrix::From(*label); @@ -70,7 +70,8 @@ class CrossEntropyGradientOpKernel : public framework::OpKernel { const T* x_data = x->data(); const int* label_data = label->data(); - math::SetConstant(ctx.device_context(), dx, 0); + math::SetConstant functor; + functor(ctx.device_context(), dx, 0); for (int i = 0; i < batch_size; ++i) { PADDLE_ASSERT(label_data[i] >= 0 || label_data[i] < class_num); diff --git a/paddle/operators/decayed_adagrad_op.cc b/paddle/operators/decayed_adagrad_op.cc index 7f583f18c8c6ee5025f6525306f9323fb329b030..17b394aa07cb0c7ca6e085b61590ff052221b22c 100644 --- a/paddle/operators/decayed_adagrad_op.cc +++ b/paddle/operators/decayed_adagrad_op.cc @@ -21,7 +21,6 @@ class DecayedAdagradOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - protected: void InferShape(framework::InferShapeContext *ctx) const override { PADDLE_ENFORCE(ctx->HasInput("Param"), "Input(Param) of DecayedAdagradOp should not be null."); diff --git a/paddle/operators/dropout_op.cc b/paddle/operators/dropout_op.cc index 708ccfa0bfc163ea75fcd9dbcb5ef6140b4b5f36..29858c90832bf116d07e43825eda5775a94beafb 100644 --- a/paddle/operators/dropout_op.cc +++ b/paddle/operators/dropout_op.cc @@ -23,7 +23,6 @@ class DropoutOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - protected: void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) must not be null."); PADDLE_ENFORCE_GE(ctx->Attrs().Get("dropout_prob"), 0); @@ -69,7 +68,6 @@ class DropoutOpGrad : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - protected: void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE_EQ(ctx->Attrs().Get("is_training"), 1, "GradOp is only callable when is_training is true"); diff --git a/paddle/operators/dynamic_recurrent_op.cc b/paddle/operators/dynamic_recurrent_op.cc index 03f33e28d49fdaeccb9b6266359e0b41a1cb847f..a0b06ac1dc305bc899f9abaafcc980a6150ecda9 100644 --- a/paddle/operators/dynamic_recurrent_op.cc +++ b/paddle/operators/dynamic_recurrent_op.cc @@ -23,6 +23,7 @@ using framework::Scope; using framework::TensorArray; using framework::LoDTensor; using framework::Variable; +using framework::OperatorBase; using framework::DySeqMetaBatch; namespace detail { @@ -43,72 +44,72 @@ inline void CreateVariables(Scope& scope, * be reordered, but the RNN op should not change the `boot_state` as an input * variable's content. */ -template -inline void ReorderBootState(const DySeqMetaBatch& metas, - const LoDTensor& boot_state, LoDTensor* tensor, - const platform::Place& dst_place) { +inline void ReorderInitialState(const DySeqMetaBatch& metas, + const LoDTensor& boot_state, LoDTensor* tensor, + const platform::Place& dst_place) { for (size_t seq_id = 0; seq_id < metas.size(); seq_id++) { - auto slice = tensor->Slice(seq_id, seq_id + 1); + auto slice = tensor->Slice(seq_id, seq_id + 1); auto boot_slice = - boot_state.Slice(metas[seq_id].ori_idx, metas[seq_id].ori_idx + 1); + boot_state.Slice(metas[seq_id].ori_idx, metas[seq_id].ori_idx + 1); // TODO(superjom) pass in device context as an argument - slice.template CopyFrom(boot_slice, dst_place, - platform::CPUDeviceContext()); + slice.CopyFrom(boot_slice, dst_place, platform::CPUDeviceContext()); } } -} // namespace detail - -class DynamicRecurrentOpProtoAndCheckerMaker - : public framework::OpProtoAndCheckerMaker { - public: - DynamicRecurrentOpProtoAndCheckerMaker(framework::OpProto* proto, - framework::OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { - const auto& name = DynamicRecurrentOp::kArgName; - // inputs and outputs stored in proto - AddInput(name.inlinks, - "the inputs that need to be segmented for each step.") - .AsDuplicable(); - AddInput(name.boot_memories, "variables to initialize memories.") - .AsDuplicable(); - - AddOutput(name.outlinks, "the outputs that need to concated for all steps.") - .AsDuplicable(); - AddOutput(name.step_scopes, "step scopes"); - - // Attributes stored in AttributeMap - AddAttr>(name.pre_memories, - "names of pre-memories"); - AddAttr>(name.memories, "names of memories"); - - AddComment("This is a RNN operator for varience-length sequences."); +inline void RestoreInitialState(const DySeqMetaBatch& metas, + const LoDTensor& tensor, LoDTensor* boot_state, + const platform::Place& dst_place) { + for (size_t seq_id = 0; seq_id < metas.size(); seq_id++) { + auto slice = tensor.Slice(seq_id, seq_id + 1); + auto boot_slice = + boot_state->Slice(metas[seq_id].ori_idx, metas[seq_id].ori_idx + 1); + boot_slice.CopyFrom(slice, dst_place, platform::CPUDeviceContext()); } -}; +} -void DynamicRecurrentOp::Run(const Scope& scope, - const platform::DeviceContext& dev_ctx) const { - cache_.Init(kArgName, *this, scope, &arg_); +} // namespace detail + +// Implementation for forward propagation. +template <> +void RNNAlgorithm::Run( + const framework::Scope& scope, const framework::OperatorBase& op, + const platform::DeviceContext& dev_ctx) { + SetComputeMode(ComputeMode::kForward); + cache_.Init(kArgNames[mode_], op, scope, &dev_ctx, &arg_); SplitInputs(); CreateScopes(); WriteStepInputs(); InitStates(); WriteStepOutputs(); + RunSteps(); + ConcatOutputs(); +} - // call stepnet in all the time steps - for (size_t step = 0; step < cache_.num_steps; step++) { - auto& step_scope = cache_.GetScope(step); - stepnet_->Run(step_scope, dev_ctx); +// Implementation for backward propagation. +template <> +void RNNAlgorithm::Run( + const framework::Scope& scope, const framework::OperatorBase& op, + const platform::DeviceContext& dev_ctx) { + SetComputeMode(ComputeMode::kBackward); + cache_.Init(kArgNames[mode_], op, scope, &dev_ctx, &arg_); + SplitInputs(); + WriteStepInputs(); + InitStates(); + WriteStepOutputs(); + RunSteps(); + // copy boot-states' gradients back. + for (const auto& state : arg_.states) { + ExportInitialStateGradient(state); } ConcatOutputs(); } -void DynamicRecurrentOp::SplitInputs() const { +void RNNAlgorithm::SplitInputs() { // TODO(superjom) make level a config // TODO(superjom) check all the inputs has the same LoD int level = 0; - for (const auto& item : cache_.inlinks) { + for (const auto& item : cache_.inputs) { const auto& var = item.second; const auto& tensor = var->Get(); TensorArray& ta = step_inputs_[item.first]; @@ -125,8 +126,8 @@ void DynamicRecurrentOp::SplitInputs() const { } } -void DynamicRecurrentOp::WriteStepInputs() const { - for (const auto& item : cache_.inlinks) { +void RNNAlgorithm::WriteStepInputs() { + for (const auto& item : cache_.inputs) { auto ta_it = step_inputs_.find(item.first); PADDLE_ENFORCE(ta_it != step_inputs_.end(), "step_inputs_ not compatible with memory set"); @@ -138,20 +139,20 @@ void DynamicRecurrentOp::WriteStepInputs() const { if (var == nullptr) { var = step_scope.Var(item.first); } - var->GetMutable()->ShareDataWith(tensor); + var->GetMutable()->ShareDataWith(tensor); } } } -void DynamicRecurrentOp::WriteStepOutputs() const { +void RNNAlgorithm::WriteStepOutputs() { // initialize step outputs - for (const auto& item : cache_.outlinks) { + for (const auto& item : cache_.outputs) { step_outputs_.emplace(item.first, TensorArray()); } PADDLE_ENFORCE_GT(step_outputs_.size(), 0UL); } -void DynamicRecurrentOp::CreateScopes() const { +void RNNAlgorithm::CreateScopes() { PADDLE_ENFORCE_GT(cache_.num_steps, 0); // resize scopes size_t num_scopes_need_create = cache_.num_steps - cache_.scopes->size(); @@ -160,19 +161,19 @@ void DynamicRecurrentOp::CreateScopes() const { } // init temporary inputs - PADDLE_ENFORCE_NOT_NULL(stepnet_, "stepnet should be set first"); - std::vector memories; - std::vector pre_memories; - std::vector stepnet_outputs; - std::transform(arg_.memories.begin(), arg_.memories.end(), - std::back_inserter(memories), - [](const rnn::MemoryAttr& m) { return m.var; }); - std::transform(arg_.memories.begin(), arg_.memories.end(), - std::back_inserter(pre_memories), - [](const rnn::MemoryAttr& m) { return m.pre_var; }); - for (const auto& item : stepnet_->Outputs()) { + PADDLE_ENFORCE_NOT_NULL(step_unit_, "stepnet should be set first"); + std::vector states; + std::vector ex_states; + std::vector step_unit_outputs; + std::transform(arg_.states.begin(), arg_.states.end(), + std::back_inserter(states), + [](const rnn::StateAttr& m) { return m.var; }); + std::transform(arg_.states.begin(), arg_.states.end(), + std::back_inserter(ex_states), + [](const rnn::StateAttr& m) { return m.pre_var; }); + for (const auto& item : step_unit_->Outputs()) { for (const auto& var : item.second) { - stepnet_outputs.push_back(var); + step_unit_outputs.push_back(var); } } @@ -180,13 +181,13 @@ void DynamicRecurrentOp::CreateScopes() const { auto& scope = cache_.GetScope(step); detail::CreateVariables(scope, arg_.inlinks); detail::CreateVariables(scope, arg_.outlinks); - detail::CreateVariables(scope, memories); - detail::CreateVariables(scope, pre_memories); - detail::CreateVariables(scope, stepnet_outputs); + detail::CreateVariables(scope, states); + detail::CreateVariables(scope, ex_states); + detail::CreateVariables(scope, step_unit_outputs); } } -void DynamicRecurrentOp::ConcatOutputs() const { +void RNNAlgorithm::ConcatOutputs() { // TODO(superjom) transform this to a config int level = 0; for (size_t step = 0; step < cache_.num_steps; step++) { @@ -199,31 +200,45 @@ void DynamicRecurrentOp::ConcatOutputs() const { item.second.WriteShared(step, *tensor); } } - // the inlinks' lods should be the same, so randomly get one lod. + // the inputs' lods should be the same, so randomly get one lod. const auto& some_lod = cache_.scope->FindVar(arg_.inlinks.front())->Get().lod(); const auto& some_meta = dy_seq_metas_[arg_.inlinks.front()]; for (auto& item : step_outputs_) { auto tensor = item.second.Pack(level, some_meta, some_lod); - auto* output = cache_.outlinks[item.first]->GetMutable(); - const_cast(output)->ShareDataWith(tensor); + auto* output = cache_.outputs[item.first]->GetMutable(); + const_cast(output)->ShareDataWith(tensor); + } +} + +void RNNAlgorithm::RunSteps() { + if (IsBackward()) { + // call stepnet in all the time steps reversely + for (int step = cache_.num_steps - 1; step >= 0; step--) { + auto& step_scope = cache_.GetScope(step); + step_unit_->Run(step_scope, *cache_.dev_ctx); + } + } else { + for (size_t step = 0; step < cache_.num_steps; step++) { + auto& step_scope = cache_.GetScope(step); + step_unit_->Run(step_scope, *cache_.dev_ctx); + } } } -void DynamicRecurrentOp::InitStates() const { +void RNNAlgorithm::InitStates() { for (size_t step = 0; step < cache_.num_steps; step++) { - for (const auto& memory : arg_.memories) { - CreateState(memory, step); - LinkState(memory, step); + for (const auto& state : arg_.states) { + CreateState(state, step); + LinkState(state, step); } } } -void DynamicRecurrentOp::CreateState(const rnn::MemoryAttr& memory, - size_t step) const { +void RNNAlgorithm::CreateState(const rnn::StateAttr& state_attr, size_t step) { auto& scope = cache_.GetScope(step); - auto& state = *cache_.GetTensor(scope, memory.var); - auto& boot_state = *cache_.GetTensor(*cache_.scope, memory.boot_var); + auto& state = *cache_.GetTensor(scope, state_attr.var); + auto& boot_state = *cache_.GetTensor(*cache_.scope, state_attr.boot_var); size_t num_instances = step_inputs_[arg_.inlinks.front()].Read(step).dims()[0]; @@ -232,56 +247,79 @@ void DynamicRecurrentOp::CreateState(const rnn::MemoryAttr& memory, state.Resize(dims); state.mutable_data(platform::CPUPlace()); - states_[memory.var].WriteShared(step, state); + states_[state_attr.var].WriteShared(step, state); } -void DynamicRecurrentOp::LinkState(const rnn::MemoryAttr& memory, - size_t step) const { +void RNNAlgorithm::LinkState(const rnn::StateAttr& state, size_t step) { auto& scope = cache_.GetScope(step); - auto& state_pre = *cache_.GetTensor(scope, memory.pre_var); + auto& state_pre = *cache_.GetTensor(scope, state.pre_var); + + // process the first state's boot-state(the 0-step in forward mode or the + // last step in backward mode) + // Only forward mode need to link the boot-state to the `pre-state` in first + // time step. In backward mode, need to copy the gradient of `pre-state` in + // first time step to the gradient of `boot-state`. + if (step == 0 && IsForward()) { + LinkInitialState(state); + } else { + size_t num_instances = + step_inputs_[arg_.inlinks.front()].Read(step).dims()[0]; + auto* pre_state = cache_.GetTensor(cache_.GetScope(step - 1), state.var); + // shink and share from previous state + auto shrinked_pre_state = pre_state->Slice(0, num_instances); + state_pre.ShareDataWith(shrinked_pre_state); + } +} +void RNNAlgorithm::LinkInitialState(const rnn::StateAttr& state) { // all the step_inputs' metas should be the same, just randomly select one // and get the dyseq meta. const auto& some_meta = dy_seq_metas_[arg_.inlinks.front()]; - size_t num_instances = - step_inputs_[arg_.inlinks.front()].Read(step).dims()[0]; + auto& scope = cache_.GetScope(0); + auto& state_pre = *cache_.GetTensor(scope, state.pre_var); + auto* pre_state = cache_.GetTensor(*cache_.scope, state.boot_var); + pre_state->mutable_data(platform::CPUPlace()); + // allocate state + state_pre.Resize(pre_state->dims()); + state_pre.mutable_data(platform::CPUPlace()); + detail::ReorderInitialState(some_meta, *pre_state, &state_pre, + pre_state->place()); +} - LoDTensor* pre_state{nullptr}; - if (step == 0) { - pre_state = cache_.GetTensor(*cache_.scope, memory.boot_var); - pre_state->mutable_data(platform::CPUPlace()); - // allocate memory - state_pre.Resize(pre_state->dims()); - state_pre.mutable_data(platform::CPUPlace()); - detail::ReorderBootState(some_meta, *pre_state, &state_pre, - pre_state->place()); - } else { - pre_state = cache_.GetTensor(cache_.GetScope(step - 1), memory.var); - } +void RNNAlgorithm::ExportInitialStateGradient(const rnn::StateAttr& state) { + // all the step_inputs' metas should be the same, just randomly select one + // and get the dyseq meta. + const auto& some_meta = dy_seq_metas_[arg_.inlinks.front()]; + auto& scope = cache_.GetScope(0); - // shink and share from previous state - auto shrinked_pre_state = pre_state->Slice(0, num_instances); - state_pre.ShareDataWith(shrinked_pre_state); + auto& state_pre = *cache_.GetTensor(scope, state.pre_var); + auto& pre_state = *cache_.GetTensor(*cache_.scope, state.boot_var); + pre_state.Resize(state_pre.dims()); + detail::RestoreInitialState(some_meta, state_pre, &pre_state, + pre_state.place()); } -void DynamicRecurrentOp::ArgCache::Init( - const rnn::ArgumentName& name, const paddle::framework::OperatorBase& op, - const paddle::framework::Scope& scope, rnn::Argument* arg) { +void RNNAlgorithm::ArgCache::Init(const rnn::ArgumentName& name, + const paddle::framework::OperatorBase& op, + const paddle::framework::Scope& scope, + platform::DeviceContext const* dev_ctx, + rnn::Argument* arg) { this->scope = &scope; InitArgument(name, op, arg); CacheScopes(scope, *arg); CacheInlinks(scope, arg->inlinks); CacheOutlinks(scope, arg->outlinks); + this->dev_ctx = dev_ctx; } -void DynamicRecurrentOp::ArgCache::InitArgument(const rnn::ArgumentName& name, - const OperatorBase& op, - rnn::Argument* arg) { +void RNNAlgorithm::ArgCache::InitArgument(const rnn::ArgumentName& name, + const OperatorBase& op, + rnn::Argument* arg) { rnn::InitArgument(name, arg, op, false /*is_grad*/); } -void DynamicRecurrentOp::ArgCache::CacheScopes(const Scope& scope, - const rnn::Argument& arg) { +void RNNAlgorithm::ArgCache::CacheScopes(const Scope& scope, + const rnn::Argument& arg) { auto scopes_var = scope.FindVar(arg.step_scopes); PADDLE_ENFORCE(scopes_var != nullptr, "the step_scopes output argument [%s] should be created first " @@ -290,45 +328,85 @@ void DynamicRecurrentOp::ArgCache::CacheScopes(const Scope& scope, this->scopes = scopes_var->GetMutable>(); } -void DynamicRecurrentOp::ArgCache::CacheInlinks( +void RNNAlgorithm::ArgCache::CacheInlinks( const Scope& scope, const std::vector& names) { for (auto name : names) { auto* var = GetVariable(scope, name); - inlinks[name] = var; + inputs[name] = var; } } -void DynamicRecurrentOp::ArgCache::CacheOutlinks( +void RNNAlgorithm::ArgCache::CacheOutlinks( const Scope& scope, const std::vector& names) { for (auto name : names) { auto* var = GetVariable(scope, name); - outlinks[name] = var; + outputs[name] = var; } } -Variable* DynamicRecurrentOp::ArgCache::GetVariable(const Scope& scope, - const std::string& name) { +Variable* RNNAlgorithm::ArgCache::GetVariable(const Scope& scope, + const std::string& name) { auto* var = scope.FindVar(name); PADDLE_ENFORCE_NOT_NULL(var, "variable [%s] not exist in scope", name); return var; } -LoDTensor* DynamicRecurrentOp::ArgCache::GetTensor( - const framework::Scope& scope, const std::string& name) { +LoDTensor* RNNAlgorithm::ArgCache::GetTensor(const framework::Scope& scope, + const std::string& name) { auto* var = GetVariable(scope, name); return var->GetMutable(); } -const rnn::ArgumentName DynamicRecurrentOp::kArgName{ - "step_net", "step_scopes", "inlinks", "outlinks", - "memories", "pre_memories", "boot_memories"}; +const std::array RNNAlgorithm::kArgNames{ + {rnn::ArgumentName{"step_unit", "step_scopes", "inputs", "outputs", + "states", "ex_states", "initial_states"}, + rnn::ArgumentName{"step_unit", "step_scopes@GRAD", "outputs@GRAD", + "inputs@GRAD", "states", "ex_states", + "initial_states@GRAD"}}}; + +void DynamicRecurrentOp::Run(const framework::Scope& scope, + const platform::DeviceContext& dev_ctx) const { + rnn.Run( + scope, *dynamic_cast(this), dev_ctx); +} void DynamicRecurrentGradientOp::Run( - const Scope& scope, const platform::DeviceContext& dev_ctx) const {} + const Scope& scope, const platform::DeviceContext& dev_ctx) const { + rnn.Run( + scope, *dynamic_cast(this), dev_ctx); +} + +class DynamicRecurrentOpProtoAndCheckerMaker + : public framework::OpProtoAndCheckerMaker { + public: + DynamicRecurrentOpProtoAndCheckerMaker(framework::OpProto* proto, + framework::OpAttrChecker* op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + const auto& name = + RNNAlgorithm::kArgNames[RNNAlgorithm::ComputeMode::kForward]; + // inputs and outputs stored in proto + AddInput(name.inlinks, + "the inputs that need to be segmented for each step.") + .AsDuplicable(); + AddInput(name.initial_states, "variables to initialize states.") + .AsDuplicable(); + + AddOutput(name.outlinks, "the outputs that need to concated for all steps.") + .AsDuplicable(); + AddOutput(name.step_scopes, "step scopes"); + + // Attributes stored in AttributeMap + AddAttr>(name.ex_states, "names of ex_states"); + AddAttr>(name.states, "names of states"); + + AddComment("This is a RNN operator for varience-length sequences."); + } +}; } // namespace operators } // namespace paddle -REGISTER_OP_WITHOUT_GRADIENT( - dynamic_recurrent, paddle::operators::DynamicRecurrentOp, - paddle::operators::DynamicRecurrentOpProtoAndCheckerMaker); +REGISTER_OP(dynamic_recurrent, paddle::operators::DynamicRecurrentOp, + paddle::operators::DynamicRecurrentOpProtoAndCheckerMaker, + dynamic_recurrent_grad, + paddle::operators::DynamicRecurrentGradientOp); diff --git a/paddle/operators/dynamic_recurrent_op.h b/paddle/operators/dynamic_recurrent_op.h index ec80a1c90eee3a655febe0dd3d6c67c16ec6c64b..5b0548c3a44c9f58838ecc567ee41a587883c26a 100644 --- a/paddle/operators/dynamic_recurrent_op.h +++ b/paddle/operators/dynamic_recurrent_op.h @@ -27,47 +27,39 @@ namespace paddle { namespace operators { -class DynamicRecurrentOp : public framework::OperatorBase { +class RNNAlgorithm { public: - static const rnn::ArgumentName kArgName; + enum ComputeMode { kForward = 0, kBackward = 1 }; + static const std::array kArgNames; using value_type = float; - DynamicRecurrentOp(const std::string& type, - const framework::VariableNameMap& inputs, - const framework::VariableNameMap& outputs, - const framework::AttributeMap& attrs) - : OperatorBase(type, inputs, outputs, attrs) {} - - DynamicRecurrentOp(const DynamicRecurrentOp& o) - : framework::OperatorBase( - static_cast(o)) { - // TODO(yuyang18): Implement copy ctor well. - PADDLE_THROW("Not implemented"); - } - - void Run(const framework::Scope& scope, - const platform::DeviceContext& dev_ctx) const override; - + /* + * Different `Run` method for forward and backward, `_` is just for template + * specifialization. + */ + template + void Run(const framework::Scope& scope, const framework::OperatorBase& op, + const platform::DeviceContext& dev_ctx); /* * Split the inputs(LoDTensors) to segments for each time step. */ - void SplitInputs() const; + void SplitInputs(); /* * Create step-scopes to store temporary outputs in each time steps. */ - void CreateScopes() const; + void CreateScopes(); /* * Link TensorArray steps to the corresponding variables located in * step-scopes. */ - void WriteStepInputs() const; + void WriteStepInputs(); /* * Write output of each step to the corresponding TensorArray. */ - void WriteStepOutputs() const; + void WriteStepOutputs(); /* * Initialize the states, each state will have a corresponding pre-state, @@ -75,54 +67,83 @@ class DynamicRecurrentOp : public framework::OperatorBase { * pre-state in the first time step will be initialized with an zero tensor or * a tensor in parent scope if is provided. */ - void InitStates() const; + void InitStates(); /* * Create state variables for each time step. */ - void CreateState(const rnn::MemoryAttr& memory, size_t step) const; + void CreateState(const rnn::StateAttr& state, size_t step); /* * Link pre-state variable in current scope to the state variable in the - * previous time step (scope). + * previous time step (scope) by reference. + */ + void LinkState(const rnn::StateAttr& state, size_t step); + + /* + * Link the pre-state of the first time step to the `boot-state` in parent's + * scope. + */ + void LinkInitialState(const rnn::StateAttr& state); + + /* + * Copy the gradient from `pre-state` in the first step-scope to the + * `boot-state` in parent's scope. + */ + void ExportInitialStateGradient(const rnn::StateAttr& state); + + /* + * Calculate time steps. */ - void LinkState(const rnn::MemoryAttr& memory, size_t step) const; + void RunSteps(); /* * Concatenate outputs in each time step and generate a LoDTensor. */ - void ConcatOutputs() const; + void ConcatOutputs(); + + void SetComputeMode(ComputeMode mode) { mode_ = mode; } + bool IsForward() const { return mode_ == ComputeMode::kForward; } + bool IsBackward() const { return mode_ == ComputeMode::kBackward; } /* - * set a stepnet that is created according to a RecurrentOp's stepnet. + * set a step unit that is created according to a RecurrentOp's step unit. */ - void SetStepNet(std::unique_ptr net) { - PADDLE_ENFORCE_NOT_NULL(net); - stepnet_ = std::move(net); + void SetStepUnit(std::unique_ptr step_unit) { + PADDLE_ENFORCE_NOT_NULL(step_unit); + step_unit_ = std::move(step_unit); } - const OperatorBase& GetStepNet() const { return *stepnet_; } + const framework::OperatorBase& GetStepUnit() const { return *step_unit_; } const framework::TensorArray& state(const std::string& name) const { - return states_[name]; + auto it = states_.find(name); + PADDLE_ENFORCE(it != states_.end()); + return it->second; } const framework::TensorArray& step_input(const std::string& name) const { - return step_inputs_[name]; + auto it = step_inputs_.find(name); + PADDLE_ENFORCE(it != step_inputs_.end()); + return it->second; } const framework::TensorArray& step_output(const std::string& name) const { - return step_outputs_[name]; + auto it = step_outputs_.find(name); + PADDLE_ENFORCE(it != step_outputs_.end()); + return it->second; } protected: struct ArgCache { framework::Scope const* scope; std::vector* scopes; - std::map inlinks; - std::map outlinks; + std::map inputs; + std::map outputs; + platform::DeviceContext const* dev_ctx; size_t num_steps{0}; - void Init(const rnn::ArgumentName& name, const OperatorBase& op, - const framework::Scope& scope, rnn::Argument* arg); + void Init(const rnn::ArgumentName& name, const framework::OperatorBase& op, + const framework::Scope& scope, + platform::DeviceContext const* dev_ctx, rnn::Argument* arg); framework::Scope& GetScope(size_t index) { PADDLE_ENFORCE_LT(index, num_steps); @@ -133,8 +154,8 @@ class DynamicRecurrentOp : public framework::OperatorBase { const std::string& name); private: - void InitArgument(const rnn::ArgumentName& name, const OperatorBase& op, - rnn::Argument* arg); + void InitArgument(const rnn::ArgumentName& name, + const framework::OperatorBase& op, rnn::Argument* arg); void CacheScopes(const framework::Scope& scope, const rnn::Argument& arg); void CacheInlinks(const framework::Scope& scope, const std::vector& names); @@ -145,27 +166,49 @@ class DynamicRecurrentOp : public framework::OperatorBase { }; private: - std::unique_ptr stepnet_; - mutable std::map states_; - mutable std::map step_inputs_; - mutable std::map step_outputs_; - mutable std::map> - dy_seq_metas_; - mutable rnn::Argument arg_; - mutable ArgCache cache_; + std::unique_ptr step_unit_; + std::map states_; + std::map step_inputs_; + std::map step_outputs_; + std::map> dy_seq_metas_; + rnn::Argument arg_; + ArgCache cache_; + ComputeMode mode_{ComputeMode::kForward}; #ifdef PADDLE_WITH_TESTING - friend class DynamicRecurrentOpTestHelper; - FRIEND_TEST(DynamicRecurrentOpTestHelper, SplitInputs); - FRIEND_TEST(DynamicRecurrentOpTestHelper, CreateCache); - FRIEND_TEST(DynamicRecurrentOpTestHelper, CreateScopes); - FRIEND_TEST(DynamicRecurrentOpTestHelper, WriteStepInputs); - FRIEND_TEST(DynamicRecurrentOpTestHelper, WriteStepOutputs); - FRIEND_TEST(DynamicRecurrentOpTestHelper, InitStates); - FRIEND_TEST(DynamicRecurrentOpTestHelper, ConcatOutputs); + // test forward + friend class RNNAlgorithmTestHelper; + FRIEND_TEST(RNNAlgorithmTestHelper, SplitInputs); + FRIEND_TEST(RNNAlgorithmTestHelper, CreateCache); + FRIEND_TEST(RNNAlgorithmTestHelper, CreateScopes); + FRIEND_TEST(RNNAlgorithmTestHelper, WriteStepInputs); + FRIEND_TEST(RNNAlgorithmTestHelper, WriteStepOutputs); + FRIEND_TEST(RNNAlgorithmTestHelper, InitStates); + FRIEND_TEST(RNNAlgorithmTestHelper, ConcatOutputs); +// TODO(superjom) test backward #endif }; +class DynamicRecurrentOp : public framework::OperatorBase { + public: + DynamicRecurrentOp(const std::string& type, + const framework::VariableNameMap& inputs, + const framework::VariableNameMap& outputs, + const framework::AttributeMap& attrs) + : OperatorBase(type, inputs, outputs, attrs) {} + + DynamicRecurrentOp(const DynamicRecurrentOp& o) + : framework::OperatorBase( + static_cast(o)) { + PADDLE_THROW("Not implemented"); + } + + void Run(const framework::Scope& scope, + const platform::DeviceContext& dev_ctx) const override; + + mutable RNNAlgorithm rnn; +}; + class DynamicRecurrentGradientOp : public framework::OperatorBase { public: DynamicRecurrentGradientOp(const std::string& type, @@ -174,8 +217,16 @@ class DynamicRecurrentGradientOp : public framework::OperatorBase { const framework::AttributeMap& attrs) : OperatorBase(type, inputs, outputs, attrs) {} + DynamicRecurrentGradientOp(const DynamicRecurrentGradientOp& o) + : framework::OperatorBase( + static_cast(o)) { + PADDLE_THROW("Not implemented"); + } + void Run(const framework::Scope& scope, const platform::DeviceContext& dev_ctx) const override; + + mutable RNNAlgorithm rnn; }; } // namespace operators diff --git a/paddle/operators/dynamic_recurrent_op_test.cc b/paddle/operators/dynamic_recurrent_op_test.cc index 83a5ba36d9af2ef81ebcbb33e056de2e0b98cbc1..fff63efb24c70b7e864e2d5b011a22883c13dede 100644 --- a/paddle/operators/dynamic_recurrent_op_test.cc +++ b/paddle/operators/dynamic_recurrent_op_test.cc @@ -43,16 +43,16 @@ LoDTensor* CreateVar(Scope& scope, std::string name, framework::DDim dims, return tensor; } -class DynamicRecurrentOpTestHelper : public ::testing::Test { +class RNNAlgorithmTestHelper : public ::testing::Test { protected: - const rnn::ArgumentName argname = DynamicRecurrentOp::kArgName; + const rnn::ArgumentName argname = RNNAlgorithm::kArgNames[0]; virtual void SetUp() override { CreateGlobalVariables(); auto op_desc = CreateOpDesc(); - op = paddle::framework::OpRegistry::CreateOp(op_desc); - dop = dynamic_cast(op.get()); + op = paddle::framework::OpRegistry::CreateOp(op_desc, nullptr); + dop = &(dynamic_cast(op.get())->rnn); InitCacheManually(); InitStepNet(); } @@ -63,20 +63,20 @@ class DynamicRecurrentOpTestHelper : public ::testing::Test { op_desc.set_type("dynamic_recurrent"); OpDescNewVar(argname.inlinks, {"in0"}, op_desc.add_inputs()); - OpDescNewVar(argname.boot_memories, {"boot_mem"}, op_desc.add_inputs()); + OpDescNewVar(argname.initial_states, {"boot_mem"}, op_desc.add_inputs()); OpDescNewVar(argname.step_scopes, {"step_scopes"}, op_desc.add_outputs()); OpDescNewVar(argname.outlinks, {"out0"}, op_desc.add_outputs()); - // set pre-memories + // set pre-states auto pre_memories = op_desc.mutable_attrs()->Add(); - pre_memories->set_name(argname.pre_memories); + pre_memories->set_name(argname.ex_states); pre_memories->set_type(paddle::framework::AttrType::STRINGS); auto pre_memories_item = pre_memories->add_strings(); *pre_memories_item = "mem@pre"; - // set memories + // set states auto memories = op_desc.mutable_attrs()->Add(); - memories->set_name(argname.memories); + memories->set_name(argname.states); memories->set_type(paddle::framework::AttrType::STRINGS); auto memories_item = memories->add_strings(); *memories_item = "mem"; @@ -113,32 +113,33 @@ class DynamicRecurrentOpTestHelper : public ::testing::Test { } void InitCacheManually() { - dop->cache_.Init(DynamicRecurrentOp::kArgName, *dop, scope, &dop->arg_); + dop->cache_.Init(RNNAlgorithm::kArgNames[0], *op, scope, &device_context, + &dop->arg_); } void InitStepNet() { std::unique_ptr stepnet{new NetOp}; dynamic_cast(stepnet.get()) ->AppendOp(std::unique_ptr(new TestOp( - "test", {{"inlinks", {"in0"}}, {"boot_memories", {"boot_mem"}}}, - {{"outlinks", {"out0"}}, {"step_scopes", {"step_scopes"}}}, {}))); - dop->SetStepNet(std::move(stepnet)); + "test", {{"inputs", {"in0"}}, {"initial_states", {"boot_mem"}}}, + {{"outputs", {"out0"}}, {"step_scopes", {"step_scopes"}}}, {}))); + dop->SetStepUnit(std::move(stepnet)); } protected: - DynamicRecurrentOp* dop; + RNNAlgorithm* dop; std::unique_ptr op; paddle::platform::CPUDeviceContext device_context; paddle::framework::Scope scope; }; -TEST_F(DynamicRecurrentOpTestHelper, CreateCache) { +TEST_F(RNNAlgorithmTestHelper, CreateCache) { const rnn::Argument& arg = dop->arg_; ASSERT_EQ(arg.inlinks.size(), 1UL); ASSERT_EQ(arg.outlinks.size(), 1UL); } -TEST_F(DynamicRecurrentOpTestHelper, SplitInputs) { +TEST_F(RNNAlgorithmTestHelper, SplitInputs) { dop->SplitInputs(); auto& in0_ta = dop->step_inputs_["in0"]; ASSERT_EQ(in0_ta.size(), 4UL); @@ -153,14 +154,14 @@ TEST_F(DynamicRecurrentOpTestHelper, SplitInputs) { EXPECT_EQ(batch3.dims()[0], 1); } -TEST_F(DynamicRecurrentOpTestHelper, CreateScopes) { +TEST_F(RNNAlgorithmTestHelper, CreateScopes) { dop->SplitInputs(); dop->CreateScopes(); ASSERT_EQ(dop->cache_.num_steps, 4UL); ASSERT_EQ(dop->cache_.scopes->size(), 4UL); } -TEST_F(DynamicRecurrentOpTestHelper, WriteStepInputs) { +TEST_F(RNNAlgorithmTestHelper, WriteStepInputs) { dop->SplitInputs(); dop->CreateScopes(); dop->WriteStepInputs(); @@ -173,7 +174,7 @@ TEST_F(DynamicRecurrentOpTestHelper, WriteStepInputs) { } } -TEST_F(DynamicRecurrentOpTestHelper, WriteStepOutputs) { +TEST_F(RNNAlgorithmTestHelper, WriteStepOutputs) { dop->SplitInputs(); dop->CreateScopes(); dop->WriteStepInputs(); @@ -187,11 +188,12 @@ TEST_F(DynamicRecurrentOpTestHelper, WriteStepOutputs) { } } -TEST_F(DynamicRecurrentOpTestHelper, ConcatOutputs) { +TEST_F(RNNAlgorithmTestHelper, ConcatOutputs) { // Let's leave this test to python unittest. } -TEST_F(DynamicRecurrentOpTestHelper, InitStates) { +TEST_F(RNNAlgorithmTestHelper, InitStates) { + dop->SetComputeMode(RNNAlgorithm::ComputeMode::kForward); dop->SplitInputs(); dop->CreateScopes(); dop->WriteStepInputs(); @@ -208,12 +210,6 @@ TEST_F(DynamicRecurrentOpTestHelper, InitStates) { auto* boot_state = scope.FindVar("boot_mem"); ASSERT_TRUE(boot_state != nullptr); - - if (step == 0) { - // check pre_state is a reference of boot_state - ASSERT_EQ(boot_state->Get().data(), - pre_state->Get().data()); - } } } diff --git a/paddle/operators/elementwise_op.h b/paddle/operators/elementwise_op.h index 66f1910a4738f737c7581e17729c201a7e3eaeae..fce4b24a22f40c9cc57738273a758d0d48ff5e91 100644 --- a/paddle/operators/elementwise_op.h +++ b/paddle/operators/elementwise_op.h @@ -23,7 +23,6 @@ class ElementwiseOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - protected: using Tensor = framework::Tensor; void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasInput("X"), @@ -105,7 +104,6 @@ class ElementwiseOpGrad : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; using Tensor = framework::Tensor; - protected: void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null"); PADDLE_ENFORCE(ctx->HasInput("Y"), "Input(Y) should not be null"); diff --git a/paddle/operators/elementwise_op_function.h b/paddle/operators/elementwise_op_function.h index 3eb97f60b59848d23bcd15ea1e3d2f21b721f6a4..488a35aafc8600bb8bb252fc3a5161c72a2f6df1 100644 --- a/paddle/operators/elementwise_op_function.h +++ b/paddle/operators/elementwise_op_function.h @@ -108,7 +108,7 @@ void ElementwiseCompute(const framework::ExecutionContext& ctx) { PADDLE_ENFORCE_GE(x_dims.size(), y_dims.size(), "Rank of first input must >= rank of second input.") - if (x_dims == y_dims || product(y_dims) == 1) { + if (x_dims == y_dims) { functor f; f.template Run(x, y, z, ctx); return; @@ -174,12 +174,6 @@ void ElementwiseGradCompute(const framework::ExecutionContext& ctx) { return; } - if (product(y_dims) == 1) { - functor1 f; - f(place, x, y, out, dx, dy, dout); - return; - } - int axis = ctx.Attr("axis"); axis = (axis == -1 ? x_dims.size() - y_dims.size() : axis); diff --git a/paddle/operators/feed_op.cc b/paddle/operators/feed_op.cc index fa325bb28299afe24a67772473529fb76b9c73e1..0f1722a5383c80ff2ede0801d34f22a80fbc6e52 100644 --- a/paddle/operators/feed_op.cc +++ b/paddle/operators/feed_op.cc @@ -1,59 +1,72 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ -#include "paddle/operators/feed_op.h" +#include "paddle/framework/feed_fetch_type.h" +#include "paddle/framework/op_registry.h" +#include "paddle/framework/operator.h" namespace paddle { namespace operators { - -class FeedOp : public framework::OperatorWithKernel { +class FeedOp : public framework::OperatorBase { public: - using framework::OperatorWithKernel::OperatorWithKernel; - - protected: - void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE(ctx->HasOutput("Out"), "Output should be not null."); - auto& shape = ctx->Attrs().Get>("dims"); - std::vector shape_int64(shape.size(), 0); - std::transform(shape.begin(), shape.end(), shape_int64.begin(), - [](int a) { return static_cast(a); }); - ctx->SetOutputDim("Out", framework::make_ddim(shape_int64)); - // TODO(qijun): need to handle LodTensor later - } + FeedOp(const std::string &type, const framework::VariableNameMap &inputs, + const framework::VariableNameMap &outputs, + const framework::AttributeMap &attrs) + : OperatorBase(type, inputs, outputs, attrs) {} + void Run(const framework::Scope &scope, + const platform::DeviceContext &dev_ctx) const override { + auto feed_var_name = Input("X"); + auto *feed_var = scope.FindVar(feed_var_name); + + PADDLE_ENFORCE(feed_var != nullptr, + "Cannot find feed_var in scope, feed_var_name is %s", + feed_var_name); + + auto out_name = this->Output("Out"); + auto *out_var = scope.FindVar(out_name); + PADDLE_ENFORCE(out_var != nullptr, + "Cannot find out_var in scope, out_var_name is %s", + out_name); + + auto col = Attr("col"); + + VLOG(3) << "Feed Var " << feed_var_name << "'s " << col << " column to var" + << out_name; - framework::DataType IndicateDataType( - const framework::ExecutionContext& ctx) const override { - return static_cast(Attr("dataType")); + auto &feed_list = feed_var->Get(); + auto &feed_item = feed_list.at(static_cast(col)); + auto *out_item = out_var->GetMutable(); + out_item->CopyFrom(feed_item, dev_ctx.GetPlace(), dev_ctx); + out_item->set_lod(feed_item.lod()); } }; -class FeedOpMaker : public framework::OpProtoAndCheckerMaker { +class FeedOpInfoMaker : public framework::OpProtoAndCheckerMaker { public: - FeedOpMaker(framework::OpProto* proto, framework::OpAttrChecker* op_checker) + FeedOpInfoMaker(framework::OpProto *proto, + framework::OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { - AddAttr("dataType", "output data type") - .SetDefault(framework::DataType::FP32); - AddAttr("col", "The col in global feed variable").SetDefault(0); - AddAttr>("dims", "The dimension of feed tensor."); - AddOutput("Out", "The output of feed op."); - AddComment(R"DOC(Feed data from global feed variable)DOC"); + AddInput("X", "The input of feed op"); + AddOutput("Out", "The output of feed op"); + AddComment("feed op, it should not be configured by users directly"); + AddAttr("col", "column of feed"); } }; } // namespace operators } // namespace paddle -namespace ops = paddle::operators; -REGISTER_OP_WITHOUT_GRADIENT(feed, ops::FeedOp, ops::FeedOpMaker); -REGISTER_OP_CPU_KERNEL(feed, ops::FeedKernel); +REGISTER_OPERATOR(feed, paddle::operators::FeedOp, + paddle::framework::EmptyGradOpMaker, + paddle::operators::FeedOpInfoMaker); diff --git a/paddle/operators/feed_op.h b/paddle/operators/feed_op.h deleted file mode 100644 index e756cd1842a4db3fbe17138a1133e7cb41d4809e..0000000000000000000000000000000000000000 --- a/paddle/operators/feed_op.h +++ /dev/null @@ -1,42 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once -#include "paddle/framework/eigen.h" -#include "paddle/framework/op_registry.h" - -namespace paddle { -namespace operators { - -template -class FeedKernel : public framework::OpKernel { - public: - void Compute(const framework::ExecutionContext& ctx) const override { - framework::Tensor* out = ctx.Output("Out"); - out->mutable_data(ctx.GetPlace()); - framework::Variable* g_feed_variable = - framework::GetGlobalScope().FindVar("feed_value"); - const auto& tensors = - g_feed_variable->Get>(); - int col = ctx.template Attr("col"); - PADDLE_ENFORCE_GT(tensors.size(), static_cast(col)); - // TODO(qijun): - // check tensors[col].dims() with attribute, - // except the first dimenson. - out->CopyFrom(tensors[col], ctx.GetPlace(), ctx.device_context()); - } -}; - -} // namespace operators -} // namespace paddle diff --git a/paddle/operators/fetch_op.cc b/paddle/operators/fetch_op.cc index 90737c8c550ca18f03c6a9ad0d9323d0b4d0b96d..c1b3d66bac4c703ce78b247aadc2975bb146b5b0 100644 --- a/paddle/operators/fetch_op.cc +++ b/paddle/operators/fetch_op.cc @@ -1,52 +1,76 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ -#include "paddle/operators/fetch_op.h" +#include "paddle/framework/feed_fetch_type.h" +#include "paddle/framework/op_registry.h" namespace paddle { namespace operators { -class FetchOp : public framework::OperatorWithKernel { +class FetchOp : public framework::OperatorBase { public: - using framework::OperatorWithKernel::OperatorWithKernel; + FetchOp(const std::string &type, const framework::VariableNameMap &inputs, + const framework::VariableNameMap &outputs, + const framework::AttributeMap &attrs) + : OperatorBase(type, inputs, outputs, attrs) {} - protected: - void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("Input"), "Input should be not null."); - } + void Run(const framework::Scope &scope, + const platform::DeviceContext &dev_ctx) const override { + auto fetch_var_name = Input("X"); + auto *fetch_var = scope.FindVar(fetch_var_name); + PADDLE_ENFORCE(fetch_var != nullptr, + "Cannot find fetch variable in scope, fetch_var_name is %s", + fetch_var_name); + + auto out_name = this->Output("Out"); + auto *out_var = scope.FindVar(out_name); + PADDLE_ENFORCE(out_var != nullptr, + "Cannot find out_var in scope, out_var_name is %s", + out_name); + + auto col = static_cast(Attr("col")); + + auto *fetch_list = out_var->GetMutable(); + auto &src_item = fetch_var->Get(); - framework::DataType IndicateDataType( - const framework::ExecutionContext& ctx) const override { - return static_cast(Attr("dataType")); + if (col >= fetch_list->size()) { + fetch_list->resize(col + 1); + } + auto &dst_item = fetch_list->at(col); + + // FIXME(yuyang18): Should we assume the fetch operator always generate + // CPU outputs? + dst_item.CopyFrom(src_item, platform::CPUPlace(), dev_ctx); + + VLOG(3) << "Fetch variable " << fetch_var_name << " to " << out_name; } }; -class FetchOpMaker : public framework::OpProtoAndCheckerMaker { +class FetchOpInfoMaker : public framework::OpProtoAndCheckerMaker { public: - FetchOpMaker(framework::OpProto* proto, framework::OpAttrChecker* op_checker) + FetchOpInfoMaker(framework::OpProto *proto, + framework::OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { - AddAttr("dataType", "output data type") - .SetDefault(framework::DataType::FP32); - AddAttr("col", "The col in global fetch variable").SetDefault(0); - AddInput("Input", "The output of fetch op."); - AddComment(R"DOC(Fetch data to global fetch variable)DOC"); + AddInput("X", "The input of fetch op"); + AddOutput("Out", "The output of fetch op"); + AddComment("fetch op, it should not be configured by users directly"); + AddAttr("col", "column of fetch"); } }; - } // namespace operators } // namespace paddle -namespace ops = paddle::operators; -REGISTER_OP_WITHOUT_GRADIENT(fetch, ops::FetchOp, ops::FetchOpMaker); -REGISTER_OP_CPU_KERNEL(fetch, ops::FetchKernel); +REGISTER_OPERATOR(fetch, paddle::operators::FetchOp, + paddle::framework::EmptyGradOpMaker, + paddle::operators::FetchOpInfoMaker); diff --git a/paddle/operators/fetch_op.h b/paddle/operators/fetch_op.h deleted file mode 100644 index b2a6e95875054ca2cec51624c20a6c19490a9e88..0000000000000000000000000000000000000000 --- a/paddle/operators/fetch_op.h +++ /dev/null @@ -1,45 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once -#include "paddle/framework/eigen.h" -#include "paddle/framework/op_registry.h" - -namespace paddle { -namespace operators { - -template -class FetchKernel : public framework::OpKernel { - public: - void Compute(const framework::ExecutionContext& ctx) const override { - const framework::Tensor* input = ctx.Input("Input"); - framework::Variable* g_fetch_variable = - framework::GetGlobalScope().FindVar("fetch_value"); - auto* tensors = - g_fetch_variable->GetMutable>(); - int col = ctx.template Attr("col"); - if (tensors->size() < static_cast(col + 1)) { - tensors->resize(col + 1); - } - PADDLE_ENFORCE_GT(tensors->size(), static_cast(col)); - (*tensors)[col].Resize(input->dims()); - (*tensors)[col].mutable_data(platform::CPUPlace()); - (*tensors)[col].CopyFrom(*input, platform::CPUPlace(), - ctx.device_context()); - // TODO(qijun): need to handle LodTensor later - } -}; - -} // namespace operators -} // namespace paddle diff --git a/paddle/operators/fill_constant_op.cc b/paddle/operators/fill_constant_op.cc index 65d03d5fa4426ef4229c1155753c67e59ce98857..0438d4d085f81d463253605b3aeca640a433a3b3 100644 --- a/paddle/operators/fill_constant_op.cc +++ b/paddle/operators/fill_constant_op.cc @@ -21,7 +21,6 @@ class FillConstantOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - protected: void InferShape(framework::InferShapeContext *ctx) const override { PADDLE_ENFORCE(ctx->HasOutput("Out"), "Output(Out) of FillConstantOp should not be null."); @@ -33,9 +32,10 @@ class FillConstantOp : public framework::OperatorWithKernel { ctx->SetOutputDim("Out", dims); } + protected: framework::DataType IndicateDataType( const framework::ExecutionContext &ctx) const override { - return static_cast(ctx.Attr("dataType")); + return static_cast(ctx.Attr("data_type")); } }; @@ -44,7 +44,7 @@ class FillConstantOpMaker : public framework::OpProtoAndCheckerMaker { FillConstantOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) : framework::OpProtoAndCheckerMaker(proto, op_checker) { - AddAttr("dataType", + AddAttr("data_type", "(int, default 5 (FP32)) " "Output data type") .SetDefault(framework::DataType::FP32); diff --git a/paddle/operators/fill_zeros_like_op.cc b/paddle/operators/fill_zeros_like_op.cc index 4c70b9a36bb8f3a2184a8f2ac738378e9d3271e2..ed529ac40aaf179b35a9ab32e11ed7dbbe9289ba 100644 --- a/paddle/operators/fill_zeros_like_op.cc +++ b/paddle/operators/fill_zeros_like_op.cc @@ -21,7 +21,6 @@ class FillZerosLikeOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - protected: void InferShape(framework::InferShapeContext *ctx) const override { PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) of FillZerosLikeOp should not be null."); diff --git a/paddle/operators/gather_op.cc b/paddle/operators/gather_op.cc index fb99c6c01670fcbacd012573b0679ac755638c57..f6c7f472da24a1a60c0d2538ae643bdc8e55b10f 100644 --- a/paddle/operators/gather_op.cc +++ b/paddle/operators/gather_op.cc @@ -22,7 +22,6 @@ class GatherOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - protected: void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) of GatherOp should not be null."); @@ -40,6 +39,7 @@ class GatherOp : public framework::OperatorWithKernel { ctx->SetOutputDim("Out", output_dims); } + protected: framework::DataType IndicateDataType( const framework::ExecutionContext& ctx) const override { return framework::ToDataType(ctx.Input("X")->type()); @@ -50,11 +50,11 @@ class GatherGradOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - protected: void InferShape(framework::InferShapeContext* ctx) const override { ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X")); } + protected: framework::DataType IndicateDataType( const framework::ExecutionContext& ctx) const override { return framework::ToDataType(ctx.Input("X")->type()); diff --git a/paddle/operators/gaussian_random_op.cc b/paddle/operators/gaussian_random_op.cc index ca7fb385057dda4ee6f7dea7edc21d9ebbf6b48e..04dfdf7c48381240108cf924979764966599151f 100644 --- a/paddle/operators/gaussian_random_op.cc +++ b/paddle/operators/gaussian_random_op.cc @@ -42,7 +42,6 @@ class GaussianRandomOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - protected: void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasOutput("Out"), "Output(Out) of GaussianRandomOp should not be null."); @@ -57,9 +56,10 @@ class GaussianRandomOp : public framework::OperatorWithKernel { ctx->SetOutputDim("Out", framework::make_ddim(temp)); } + protected: framework::DataType IndicateDataType( const framework::ExecutionContext& ctx) const override { - return static_cast(Attr("data_type")); + return static_cast(ctx.Attr("data_type")); } }; diff --git a/paddle/operators/gru_unit_op.cc b/paddle/operators/gru_unit_op.cc index 24f84597cd7301af6521b8c1032e69569ba6f03a..72dd841c85ce9934a57263d10c366e679693c471 100644 --- a/paddle/operators/gru_unit_op.cc +++ b/paddle/operators/gru_unit_op.cc @@ -23,7 +23,6 @@ class GRUUnitOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - protected: void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasInput("Input"), "Input(%s) of GRUUnitOp should not be null.", "Input"); @@ -131,7 +130,6 @@ class GRUUnitGradOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - protected: void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasInput("Input"), "Input(%s) of GRUUnitGradOp should not be null.", "Input"); diff --git a/paddle/operators/images/batch_norm_fork.dot b/paddle/operators/images/batch_norm_fork.dot new file mode 100644 index 0000000000000000000000000000000000000000..4bc47713cba2cb23f1b34fffe6426ef10ac3a9df --- /dev/null +++ b/paddle/operators/images/batch_norm_fork.dot @@ -0,0 +1,25 @@ +digraph ImageBatchNormForkGragh { + subgraph cluster_before { + Prev [label="...", shape=plaintext]; + Rnn [label="rnn_op", shape=box]; + BatchNorm [label="batch_norm_op", shape=box]; + Fc [label="fc_op", shape=box]; + After [label="...", shape=plaintext]; + Prev -> Rnn -> BatchNorm -> Fc -> After; + label="original"; + } + + subgraph cluster_after { + Prev2 [label="...", shape=plaintext]; + Rnn2 [label="rnn_op", shape=box]; + BatchNorm2_1 [label="train_batch_norm_op", shape=box]; + BatchNorm2_2 [label="infer_batch_norm_op", shape=box]; + Fc2_1 [label="fc_op", shape=box]; + Fc2_2 [label="fc_op", shape=box]; + After2_1 [label="...", shape=plaintext]; + After2_2 [label="...", shape=plaintext]; + Prev2 -> Rnn2 -> BatchNorm2_1 -> Fc2_1 -> After2_1; + Rnn2 -> BatchNorm2_2 ->Fc2_2 ->After2_2 + label="forked"; + } +} diff --git a/paddle/operators/images/batch_norm_fork.png b/paddle/operators/images/batch_norm_fork.png new file mode 100644 index 0000000000000000000000000000000000000000..aded62bce5bc268b7a3ef4dc96c89fe21d6ea955 Binary files /dev/null and b/paddle/operators/images/batch_norm_fork.png differ diff --git a/paddle/operators/images/batch_norm_op_kernel.png b/paddle/operators/images/batch_norm_op_kernel.png new file mode 100644 index 0000000000000000000000000000000000000000..a99ce81ff3bf42880ebbd6a1297de3bf038e09b2 Binary files /dev/null and b/paddle/operators/images/batch_norm_op_kernel.png differ diff --git a/paddle/operators/increment_op.cc b/paddle/operators/increment_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..139392c691e00b2a94f46801f1cfc2018ce139f5 --- /dev/null +++ b/paddle/operators/increment_op.cc @@ -0,0 +1,75 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#include "paddle/operators/increment_op.h" + +namespace paddle { +namespace operators { + +class IncrementOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + void InferShape(framework::InferShapeContext *ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("X"), + "Input(X) of IncrementOp should not be null."); + PADDLE_ENFORCE(ctx->HasOutput("Out"), + "Output(Out) of IncrementOp should not be null."); + ctx->SetOutputDim("Out", ctx->GetInputDim("X")); + ctx->ShareLoD("X", /*->*/ "Out"); + } +}; + +template +class IncrementOpMaker : public framework::OpProtoAndCheckerMaker { + public: + IncrementOpMaker(framework::OpProto *proto, + framework::OpAttrChecker *op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("X", "(Tensor) The input tensor of increment operator"); + AddOutput("Out", "(Tensor) The output tensor of increment operator."); + AddComment(R"DOC(Increment operator + +The equation is: Out = X + step +)DOC"); + AddAttr("step", + "The step size by which the " + "input tensor will be incremented.") + .SetDefault(1.0); + } +}; + +class IncrementGradOpMaker : public framework::SingleGradOpDescMaker { + public: + using framework::SingleGradOpDescMaker::SingleGradOpDescMaker; + + std::unique_ptr Apply() const override { + auto *grad_op = new framework::OpDescBind(); + grad_op->SetType("scale"); + grad_op->SetInput("X", OutputGrad("Out")); + grad_op->SetOutput("Out", InputGrad("X")); + grad_op->SetAttr("scale", 1.0f); + return std::unique_ptr(grad_op); + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; + +REGISTER_OPERATOR(increment, ops::IncrementOp, ops::IncrementOpMaker, + ops::IncrementGradOpMaker); +REGISTER_OP_CPU_KERNEL(increment, + ops::IncrementKernel); diff --git a/paddle/operators/increment_op.cu b/paddle/operators/increment_op.cu new file mode 100644 index 0000000000000000000000000000000000000000..659c380d147a36650452bea23b30cbcf1ff516ee --- /dev/null +++ b/paddle/operators/increment_op.cu @@ -0,0 +1,19 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#include "paddle/operators/increment_op.h" + +REGISTER_OP_GPU_KERNEL( + increment, + paddle::operators::IncrementKernel); diff --git a/paddle/operators/increment_op.h b/paddle/operators/increment_op.h new file mode 100644 index 0000000000000000000000000000000000000000..342e254fc453555c70923efbca02fdfd014af015 --- /dev/null +++ b/paddle/operators/increment_op.h @@ -0,0 +1,40 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#pragma once + +#include "paddle/framework/eigen.h" +#include "paddle/framework/op_registry.h" + +namespace paddle { +namespace operators { +template +class IncrementKernel : public framework::OpKernel { + public: + virtual void Compute(const framework::ExecutionContext& context) const { + auto* tensor = context.Output("Out"); + auto* in = context.Input("X"); + tensor->mutable_data(in->place()); + + auto step = static_cast(context.Attr("step")); + + auto eigen_out = framework::EigenVector::Flatten(*tensor); + auto eigen_in = framework::EigenVector::Flatten(*in); + auto& place = context.GetEigenDevice(); + eigen_out.device(place) = eigen_in + step; + } +}; + +} // namespace operators +} // namespace paddle diff --git a/paddle/operators/lookup_table_op.cc b/paddle/operators/lookup_table_op.cc index 3f8d4ab85756ec007004e397a55ea60e2fa704ae..b88cd14d78f616b0e57386ab891dad1a872bfe65 100644 --- a/paddle/operators/lookup_table_op.cc +++ b/paddle/operators/lookup_table_op.cc @@ -21,7 +21,6 @@ class LookupTableOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - protected: void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasInput("W"), "Input(W) of LookupTableOp should not be null."); @@ -37,6 +36,7 @@ class LookupTableOp : public framework::OperatorWithKernel { ctx->ShareLoD("Ids", /*->*/ "Out"); } + protected: framework::DataType IndicateDataType( const framework::ExecutionContext& ctx) const override { return framework::ToDataType(ctx.Input("W")->type()); @@ -69,12 +69,12 @@ class LookupTableOpGrad : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - protected: void InferShape(framework::InferShapeContext* ctx) const override { auto table_dims = ctx->GetInputDim("W"); ctx->SetOutputDim(framework::GradVarName("W"), table_dims); } + protected: framework::DataType IndicateDataType( const framework::ExecutionContext& ctx) const override { return framework::ToDataType(ctx.Input("W")->type()); diff --git a/paddle/operators/lstm_op.cc b/paddle/operators/lstm_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..0a089b7c2dc1e05224525bc4fe5399ec39036d01 --- /dev/null +++ b/paddle/operators/lstm_op.cc @@ -0,0 +1,226 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/operators/lstm_op.h" + +namespace paddle { +namespace operators { + +class LSTMOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + protected: + void InferShape(framework::InferShapeContext* ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("Input"), + "Input(Input) of LSTM should not be null."); + PADDLE_ENFORCE(ctx->HasOutput("Hidden"), + "Output(Hidden) of LSTM should not be null."); + PADDLE_ENFORCE(ctx->HasOutput("Cell"), + "Output(Cell) of LSTM should not be null."); + + auto x_dims = ctx->GetInputDim("Input"); + PADDLE_ENFORCE_EQ(x_dims.size(), 2, "Input(X)'s rank must be 2."); + + if (ctx->HasInput("H0")) { + PADDLE_ENFORCE(ctx->HasInput("C0"), + "Input(Cell) and Input(Hidden) of LSTM should not " + "be null at the same time."); + auto h_dims = ctx->GetInputDim("H0"); + auto c_dims = ctx->GetInputDim("C0"); + PADDLE_ENFORCE(h_dims == c_dims, + "The dimension of Input(H0) and Input(C0) " + "should be the same."); + } + + int frame_size = x_dims[1] / 4; + auto w_dims = ctx->GetInputDim("Weight"); + PADDLE_ENFORCE_EQ(w_dims.size(), 2, + "The rank of Input(Weight) should be 2."); + PADDLE_ENFORCE_EQ(w_dims[0], frame_size, + "The first dimension of Input(Weight) " + "should be %d.", + frame_size); + PADDLE_ENFORCE_EQ(w_dims[1], 4 * frame_size, + "The second dimension of Input(Weight) " + "should be 4 * %d.", + frame_size); + auto b_dims = ctx->GetInputDim("Bias"); + PADDLE_ENFORCE_EQ(b_dims.size(), 2, "The rank of Input(Bias) should be 2."); + PADDLE_ENFORCE_EQ(b_dims[0], 1, + "The first dimension of Input(Bias) should be 1."); + if (ctx->Attrs().Get("usePeepholes")) { + PADDLE_ENFORCE_EQ(b_dims[1], 7 * frame_size, + "The second dimension of Input(Bias) should be " + "7 * %d if enable peepholes connection", + frame_size); + } else { + PADDLE_ENFORCE_EQ(b_dims[1], 4 * frame_size, + "The second dimension of Input(Bias) should be " + "4 * %d if disable peepholes connection", + frame_size); + } + ctx->SetOutputDim("Hidden", {x_dims[0], frame_size}); + ctx->SetOutputDim("Cell", {x_dims[0], frame_size}); + ctx->SetOutputDim("BatchGate", x_dims); + ctx->ShareLoD("Input", "Hidden"); + ctx->ShareLoD("Input", "Cell"); + } +}; + +class LSTMOpMaker : public framework::OpProtoAndCheckerMaker { + public: + LSTMOpMaker(framework::OpProto* proto, framework::OpAttrChecker* op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("Input", + "(LoDTensor) the first input is a LodTensor, which support " + "variable-time length input sequence. The underlying tensor in " + "this LoDTensor is a matrix with shape (T X 4D), where, T is the " + "total time steps in this mini-batch, D is the hidden size."); + AddInput("H0", + "(Tensor, optional) the initial hidden state is an optional " + "input. This is a tensor with shape (N x D), where N is the " + "batch size, D is the hidden size."); + AddInput("C0", + "(Tensor, optional) the initial cell state is an optional " + "input. This is a tensor with shape (N x D), where N is the " + "batch size. `H0` and `C0` can be NULL but only at the same time"); + AddInput("Weight", + "(Tensor) the learnable hidden-hidden weights." + " - The shape is (D x 4D), where D is the hidden size. " + " - Weight = {W_ch, W_ih, W_fh, W_oh}"); + AddInput("Bias", + "(Tensor) the learnable weights, which contains two parts: " + "input-hidden bias weight and peephole connections weight if " + "setting `usePeepholes` True. " + "1. `usePeepholes = False` " + " - The shape is (1 x 4D). " + " - Bias = {b_c, b_i, b_f, b_o}." + "2. `usePeepholes = True` " + " - The shape is (1 x 7D). " + " - Bias = {b_c, b_i, b_f, b_o, W_ic, W_fc, W_oc}."); + AddOutput("BatchGate", + "(LoDTensor) This LoDTensor contains input gate, forget gate " + "and output gate after the nonlinear computation. This " + "LoDTensor has the same shape with the reorganized input, which " + "was also be called batch input. The LoD size is 2. The first " + "LoD is the batch offsets and the second LoD contains the " + "indexes, which denote the position of reorganized sequence " + "in the raw input.") + .AsIntermediate(); + AddOutput("Hidden", + "(LoDTensor) the hidden state lod tensor of LSTM operator. " + "The shape and lod is the same with the `Input`."); + AddOutput("Cell", + "(LoDTensor) the cell state lod tensor of LSTM operator. " + "The shape and lod is the same with the `Input`."); + AddAttr("usePeepholes", + "(bool, defalut: True) " + "whether to enable diagonal/peephole connections.") + .SetDefault(true); + AddAttr("isReverse", + "(bool, defalut: False) " + "whether to compute reversed LSTM.") + .SetDefault(false); + AddAttr( + "gateActivation", + "(string, default: sigmoid)" + "The activation for input gate, forget gate and output " + "gate, `sigmoid` by default.") + .SetDefault("sigmoid"); + AddAttr("cellActivation", + "(string, default: tanh)" + "The activation for cell output, `tanh` by defalut.") + .SetDefault("tanh"); + AddAttr("candidateActivation", + "(string, default: tanh)" + "The activation for candidate hidden state, " + "`tanh` by default.") + .SetDefault("tanh"); + AddComment(R"DOC(Long-Short Term Memory (LSTM) Operator + +The defalut implementation is diagonal/peephole connection [1], the formula is +as follows + + i_t = \sigma(W_{ix}x_{t} + W_{ih}h_{t-1} + W_{ic}c_{t-1} + b_i) + + f_t = \sigma(W_{fx}x_{t} + W_{fh}h_{t-1} + W_{fc}c_{t-1} + b_f) + + \tilde{c_t} = act_g(W_{cx}x_t + W_{ch}h_{t-1} + b_c) + + o_t = \sigma(W_{ox}x_{t} + W_{oh}h_{t-1} + W_{oc}c_t + b_o) + + c_t = f_t ⊙ c_{t-1} + i_t ⊙ \tilde{c_t} + + h_t = o_t ⊙ act_h(c_t) + +where the W terms denote weight matrices (e.g. \f$W_{xi}\f$ is the matrix +of weights from the input gate to the input), \f$W_{ic}, W_{fc}, W_{oc}\f$ +are diagonal weight matrices for peephole connections. In our implenmention, +We use vectors to reprenset these diagonal weight matrices. The b terms +denote bias vectors (\f$b_i\f$ is the input gate bias vector), \f$\sigma\f$ +is the non-line actications, such as logistic sigmoid function, and +\f$i, f, o\f$ and \f$c\f$ are respectively the input gate, forget gate, +output gate and cell activation vectors, all of which are the same size as +the cell output activation vector \f$h\f$. + +The ⊙ is the element-wise product of the vectors, \f$act_g\f$ and \f$act_h\f$ +are the cell input and cell output activation functions, `tanh` is usually +used for them. \f$\tilde{c_t}\f$ is also called candidate hidden state, +which is computed based on the current input and the previous hidden state. + +Set `usePeepholes` False to disable peephole connection [2]. The formula +is omitted here. + +@note These \f$W_{xi}x_{t}, W_{xf}x_{t}, W_{xc}x_{t}, W_{xo}x_{t}\f$ +operations on the input x_{t} were NOT included in this operator. +Users can choose to use fully-connect operator before LSTM operator. + +[1] Hasim Sak, Andrew Senior, and Francoise Beaufays. Long short-term memory +recurrent neural network architectures for large scale acoustic modeling. +INTERSPEECH, 2014. + +[2] S. Hochreiter and J. Schmidhuber. Long Short-Term Memory. +Neural Computation, 9(8):1735-1780, 1997. + +)DOC"); + } +}; + +class LSTMGradOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + protected: + void InferShape(framework::InferShapeContext* ctx) const override { + PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Hidden")), + "Input(Hidden@GRAD) should not be null"); + PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Cell")), + "Input(Cell@GRAD) should not be null"); + ctx->SetOutputDim(framework::GradVarName("Weight"), + ctx->GetInputDim("Weight")); + ctx->SetOutputDim(framework::GradVarName("Bias"), ctx->GetInputDim("Bias")); + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OP(lstm, ops::LSTMOp, ops::LSTMOpMaker, lstm_grad, ops::LSTMGradOp); +REGISTER_OP_CPU_KERNEL(lstm, ops::LSTMKernel, + ops::LSTMKernel); +REGISTER_OP_CPU_KERNEL(lstm_grad, + ops::LSTMGradKernel, + ops::LSTMGradKernel); diff --git a/paddle/operators/lstm_op.cu b/paddle/operators/lstm_op.cu new file mode 100644 index 0000000000000000000000000000000000000000..9ad56941553bf19a56c25f41f76fe20dfa3a106f --- /dev/null +++ b/paddle/operators/lstm_op.cu @@ -0,0 +1,23 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#define EIGEN_USE_GPU +#include "paddle/operators/lstm_op.h" + +namespace ops = paddle::operators; +REGISTER_OP_GPU_KERNEL(lstm, ops::LSTMKernel, + ops::LSTMKernel); +REGISTER_OP_GPU_KERNEL(lstm_grad, + ops::LSTMGradKernel, + ops::LSTMGradKernel); diff --git a/paddle/operators/lstm_op.h b/paddle/operators/lstm_op.h new file mode 100644 index 0000000000000000000000000000000000000000..0af5694c48fcb4437e3acd422606de013bb2e145 --- /dev/null +++ b/paddle/operators/lstm_op.h @@ -0,0 +1,139 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once +#include "paddle/framework/op_registry.h" +#include "paddle/operators/math/lstm_compute.h" +#include "paddle/operators/math/math_function.h" +#include "paddle/operators/math/sequence2batch.h" + +namespace paddle { +namespace operators { + +using framework::LoDTensor; +using framework::Tensor; +template +using EigenMatrix = framework::EigenMatrix; + +template +class LSTMKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + auto* input = ctx.Input("Input"); + auto* weight = ctx.Input("Weight"); + auto* bias = ctx.Input("Bias"); + + auto* batch_gate = ctx.Output("BatchGate"); + batch_gate->mutable_data(ctx.GetPlace()); + auto* hidden_out = ctx.Output("Hidden"); + hidden_out->mutable_data(ctx.GetPlace()); + auto* cell_out = ctx.Output("Cell"); + cell_out->mutable_data(ctx.GetPlace()); + + // Now the function ShareLoD in InferShape is not implemented. + // So copy LoD here. + ctx.ShareLoD("Input", "Hidden"); + ctx.ShareLoD("Input", "Cell"); + + bool is_reverse = ctx.Attr("isReverse"); + math::LoDTensor2BatchFunctor to_batch; + to_batch(ctx.device_context(), *input, *batch_gate, is_reverse); + + auto in_dims = input->dims(); + int frame_size = static_cast(in_dims[1] / 4); + framework::DDim dims({in_dims[0], frame_size}); + + if (bias) { + Eigen::array extents({{1, 4 * frame_size}}); + Eigen::array offsets({{0, 0}}); + auto b = EigenMatrix::From(*bias); + auto gate = EigenMatrix::From(*batch_gate); + gate.device(ctx.GetEigenDevice()) = + gate + + b.slice(offsets, extents) + .reshape(Eigen::array({{1, frame_size * 4}})) + .broadcast( + Eigen::array({{static_cast(in_dims[0]), 1}})); + } + + math::LstmMetaValue lstm_value; + T* bias_data = const_cast(bias->data()); + // the code style in LstmMetaValue will be updated later. + lstm_value.checkIg = bias_data + 4 * frame_size; + lstm_value.checkFg = lstm_value.checkIg + frame_size; + lstm_value.checkOg = lstm_value.checkFg + frame_size; + lstm_value.prevStateValue = nullptr; + + framework::LoDTensor batch_out, batch_cell, batch_cell_pre_act; + batch_out.mutable_data(dims, ctx.GetPlace()); + batch_cell.mutable_data(dims, ctx.GetPlace()); + batch_cell_pre_act.mutable_data(dims, ctx.GetPlace()); + + auto batch_starts = batch_gate->lod()[0]; + size_t num_batch = batch_starts.size() - 1; + auto gate_act = ctx.Attr("gateActivation"); + auto cell_act = ctx.Attr("cellActivation"); + auto cand_act = ctx.Attr("candidateActivation"); + + for (size_t n = 0; n < num_batch; n++) { + int bstart = static_cast(batch_starts[n]); + int bend = static_cast(batch_starts[n + 1]); + + Tensor gate_t = batch_gate->Slice(bstart, bend); + Tensor out_t = batch_out.Slice(bstart, bend); + Tensor cell_t = batch_cell.Slice(bstart, bend); + Tensor cell_pre_act_t = batch_cell_pre_act.Slice(bstart, bend); + + int cur_batch_size = bend - bstart; + + if (n != 0) { + int pre_h_start = static_cast(batch_starts[n - 1]); + int pre_h_end = pre_h_start + cur_batch_size; + auto pre_hidden_t = batch_out.Slice(pre_h_start, pre_h_end); + math::matmul(ctx.device_context(), pre_hidden_t, false, + *weight, false, static_cast(1.0), &gate_t, + static_cast(1.0)); + } + // else if : FIXME support the initial hidden and cell + + lstm_value.gateValue = gate_t.data(); + lstm_value.outputValue = out_t.data(); + lstm_value.stateValue = cell_t.data(); + lstm_value.stateActiveValue = cell_pre_act_t.data(); + math::LstmUnitFunctor::compute(ctx.device_context(), lstm_value, + frame_size, cur_batch_size, + gate_act, cell_act, cand_act); + lstm_value.prevStateValue = lstm_value.stateValue; + } + + math::Batch2LoDTensorFunctor to_seq; + batch_out.set_lod(batch_gate->lod()); + // restore the output hidden in LoDTensor from the batch hidden + to_seq(ctx.device_context(), batch_out, *hidden_out); + + batch_cell.set_lod(batch_gate->lod()); + // restore the output cell state in LoDTensor from the batch cell + to_seq(ctx.device_context(), batch_cell, *cell_out); + } +}; + +template +class LSTMGradKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override {} +}; + +} // namespace operators +} // namespace paddle diff --git a/paddle/operators/lstm_unit_op.cc b/paddle/operators/lstm_unit_op.cc index 13a45ec246988558b0c090ab14e627b5b0e44532..5d63017208a55ec4bcc2e8d66f1ca2e1b84d4593 100644 --- a/paddle/operators/lstm_unit_op.cc +++ b/paddle/operators/lstm_unit_op.cc @@ -21,7 +21,6 @@ class LstmUnitOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - protected: void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) of LSTM should not be null."); PADDLE_ENFORCE(ctx->HasInput("C_prev"), @@ -76,7 +75,6 @@ class LstmUnitGradOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - protected: void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("C")), "Input(C@GRAD) should not be null"); diff --git a/paddle/operators/lstm_unit_op.h b/paddle/operators/lstm_unit_op.h index a0ff498c1d3ed2aaa10f5473ef91de168c250649..625b1852c2f0eb2ed435f73fea251c40c614a7dd 100644 --- a/paddle/operators/lstm_unit_op.h +++ b/paddle/operators/lstm_unit_op.h @@ -19,7 +19,6 @@ namespace paddle { namespace operators { -using framework::LoDTensor; using framework::Tensor; template diff --git a/paddle/operators/margin_rank_loss_op.cc b/paddle/operators/margin_rank_loss_op.cc index 5be61dfec3bb58ab9b658cb59ab0dd49bb67d8cb..638a99addc2119e8f44648cc54b97bd8a892d2bc 100644 --- a/paddle/operators/margin_rank_loss_op.cc +++ b/paddle/operators/margin_rank_loss_op.cc @@ -21,7 +21,6 @@ class MarginRankLossOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - protected: void InferShape(framework::InferShapeContext *ctx) const override { // input check PADDLE_ENFORCE(ctx->HasInput("Label"), "Input(Label) shouldn't be null."); @@ -94,7 +93,6 @@ class MarginRankLossGradOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - protected: void InferShape(framework::InferShapeContext *ctx) const override { PADDLE_ENFORCE(ctx->HasInput("Label"), "Input(Label) shouldn't be null."); PADDLE_ENFORCE(ctx->HasInput("X1"), "Input(X1) shouldn't be null."); diff --git a/paddle/operators/math/CMakeLists.txt b/paddle/operators/math/CMakeLists.txt index 1a2f623ce7917b1e60656743e699271eab9c7011..5598669ef96535b7d47150052b3841771c37c60b 100644 --- a/paddle/operators/math/CMakeLists.txt +++ b/paddle/operators/math/CMakeLists.txt @@ -1,18 +1,28 @@ +add_subdirectory(detail) + if(WITH_GPU) nv_library(math_function SRCS math_function.cc math_function.cu im2col.cc im2col.cu DEPS cblas device_context operator) - nv_test(math_function_test SRCS math_function_test.cc DEPS math_function tensor) + nv_test(math_function_gpu_test SRCS math_function_test.cu DEPS math_function tensor) + nv_library(selected_rows_functor SRCS selected_rows_functor.cc selected_rows_functor.cu DEPS selected_rows math_function) + nv_test(selected_rows_functor_gpu_test SRCS selected_rows_functor_test.cu DEPS selected_rows_functor) nv_library(softmax SRCS softmax.cc softmax.cu DEPS operator) nv_library(cross_entropy SRCS cross_entropy.cc cross_entropy.cu DEPS operator) nv_library(pooling SRCS pooling.cc pooling.cu DEPS device_context) nv_library(vol2col SRCS vol2col.cc vol2col.cu DEPS device_context) + nv_library(sequence2batch SRCS sequence2batch.cc sequence2batch.cu DEPS device_context) + nv_library(lstm_compute SRCS lstm_compute.cc lstm_compute.cu DEPS device_context activation_functions) else() cc_library(math_function SRCS math_function.cc im2col.cc DEPS cblas device_context operator) - cc_test(math_function_test SRCS math_function_test.cc DEPS math_function tensor) + cc_library(selected_rows_functor SRCS selected_rows_functor.cc DEPS selected_rows math_function) cc_library(softmax SRCS softmax.cc DEPS operator) cc_library(cross_entropy SRCS cross_entropy.cc DEPS operator) cc_library(pooling SRCS pooling.cc DEPS device_context) cc_library(vol2col SRCS vol2col.cc DEPS device_context) + cc_library(sequence2batch SRCS sequence2batch.cc DEPS device_context) + cc_library(lstm_compute SRCS lstm_compute.cc DEPS device_context activation_functions) endif() +cc_test(math_function_test SRCS math_function_test.cc DEPS math_function tensor) +cc_test(selected_rows_functor_test SRCS selected_rows_functor_test.cc DEPS selected_rows_functor) cc_test(im2col_test SRCS im2col_test.cc DEPS math_function tensor) cc_test(vol2col_test SRCS vol2col_test.cc DEPS vol2col tensor) diff --git a/paddle/operators/math/cross_entropy.cu b/paddle/operators/math/cross_entropy.cu index 367190e6b0682ec62550e869e2f04c3a2b2cbec3..db878129d650d663e187ecabb106eea0e39db6fa 100644 --- a/paddle/operators/math/cross_entropy.cu +++ b/paddle/operators/math/cross_entropy.cu @@ -22,8 +22,6 @@ namespace { template __global__ void CrossEntropyKernel(T* Y, const T* X, const int* label, const int N, const int D) { - // TOOD(qingqing) define CUDA_1D_KERNEL_LOOP macro in a common file. - // CUDA_1D_KERNEL_LOOP(i, N) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { PADDLE_ASSERT(label[i] >= 0 && label[i] < D); diff --git a/paddle/operators/math/detail/CMakeLists.txt b/paddle/operators/math/detail/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..49cf228de2204cb4888cf645a0cb68ed04cc3371 --- /dev/null +++ b/paddle/operators/math/detail/CMakeLists.txt @@ -0,0 +1,5 @@ +if(WITH_AVX) + cc_library(activation_functions SRCS hl_cpu_functions.cc hl_avx_functions.cc) +else() + cc_library(activation_functions SRCS hl_cpu_functions.cc) +endif() diff --git a/paddle/operators/math/detail/hl_activation_functions.h b/paddle/operators/math/detail/hl_activation_functions.h new file mode 100644 index 0000000000000000000000000000000000000000..9d7d9914f0090bff17049038dfa2288d84f3dbda --- /dev/null +++ b/paddle/operators/math/detail/hl_activation_functions.h @@ -0,0 +1,188 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#ifndef HL_ACTIVATION_FUNCTIONS_H_ +#define HL_ACTIVATION_FUNCTIONS_H_ + +#include "hl_functions.h" +#include "paddle/operators/math/lstm_compute.h" + +/** + * Active functions: sigmoid, relu, tanh and linear. + */ +#define FLOAT_ACTIVE_FUNCTION \ + { \ + hppl::typef::sigmoid, hppl::typef::relu, hppl::typef::tanh, \ + hppl::typef::linear \ + } + +#define DOUBLE_ACTIVE_FUNCTION \ + { \ + hppl::typed::sigmoid, hppl::typed::relu, hppl::typed::tanh, \ + hppl::typed::linear \ + } + +#define AVX_ACTIVE_FUNCTION \ + { hppl::sigmoid, hppl::relu, hppl::tanh, hppl::linear } + +namespace hppl { + +using activation_mode_t = paddle::operators::math::activation_mode_t; + +/** + * Hppl supports sigmoid, relu, tanh, linear active functions + * for neural networks' forward and backward activation. + */ +template +class Active { + public: + typedef T (*forward)(T); + typedef T (*backward)(T, T); +}; + +template +struct ForwardActType; + +template <> +struct ForwardActType { + using type = Active::forward; +}; + +template <> +struct ForwardActType { + using type = Active::forward; +}; + +template +struct BackwardActType; + +template <> +struct BackwardActType { + using type = Active::backward; +}; + +template <> +struct BackwardActType { + using type = Active::backward; +}; + +#ifdef __NVCC__ +namespace gpu { +static __device__ Active::forward forward[] = FLOAT_ACTIVE_FUNCTION; +static __device__ Active::backward backward[] = FLOAT_ACTIVE_FUNCTION; + +static __device__ Active::forward forward_d[] = DOUBLE_ACTIVE_FUNCTION; +static __device__ Active::backward backward_d[] = + DOUBLE_ACTIVE_FUNCTION; + +template +struct ForwardAct { + __device__ typename ForwardActType::type operator()( + activation_mode_t type); +}; + +template <> +struct ForwardAct { + __device__ ForwardActType::type operator()(activation_mode_t type) { + return forward[type]; + } +}; + +template <> +struct ForwardAct { + __device__ ForwardActType::type operator()(activation_mode_t type) { + return forward_d[type]; + } +}; + +template +struct BackwardAct { + __device__ typename BackwardActType::type operator()( + activation_mode_t type); +}; + +template <> +struct BackwardAct { + __device__ BackwardActType::type operator()(activation_mode_t type) { + return backward[type]; + } +}; + +template <> +struct BackwardAct { + __device__ BackwardActType::type operator()(activation_mode_t type) { + return backward_d[type]; + } +}; + +} // namespace gpu +#else +namespace cpu { +static Active::forward forward[] = FLOAT_ACTIVE_FUNCTION; +static Active::backward backward[] = FLOAT_ACTIVE_FUNCTION; + +static Active::forward forward_d[] = DOUBLE_ACTIVE_FUNCTION; +static Active::backward backward_d[] = DOUBLE_ACTIVE_FUNCTION; + +template +struct ForwardAct { + typename ForwardActType::type operator()(activation_mode_t type); +}; + +template <> +struct ForwardAct { + ForwardActType::type operator()(activation_mode_t type) { + return forward[type]; + } +}; + +template <> +struct ForwardAct { + ForwardActType::type operator()(activation_mode_t type) { + return forward_d[type]; + } +}; + +template +struct BackwardAct { + typename BackwardActType::type operator()(activation_mode_t type); +}; + +template <> +struct BackwardAct { + BackwardActType::type operator()(activation_mode_t type) { + return backward[type]; + } +}; + +template <> +struct BackwardAct { + BackwardActType::type operator()(activation_mode_t type) { + return backward_d[type]; + } +}; + +} // namespace cpu + +#ifdef __AVX__ +namespace avx { +static Active<__m256>::forward forward[] = AVX_ACTIVE_FUNCTION; +static Active<__m256>::backward backward[] = AVX_ACTIVE_FUNCTION; +} // namespace avx +#endif +#endif + +} // namespace hppl + +#endif // HL_ACTIVATION_FUNCTIONS_H_ diff --git a/paddle/operators/math/detail/hl_avx_functions.cc b/paddle/operators/math/detail/hl_avx_functions.cc new file mode 100644 index 0000000000000000000000000000000000000000..415bac5d93ee00244d072b0998c6941b14d4f8d8 --- /dev/null +++ b/paddle/operators/math/detail/hl_avx_functions.cc @@ -0,0 +1,70 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include "hl_functions.h" +// TODO(qingqing) refine this dependence +#include "paddle/cuda/src/avx_mathfun.h" + +namespace hppl { + +__m256 exp(__m256 a) { return exp256_ps(a); } + +__m256 relu(const __m256 a) { + __m256 tmp = _mm256_set1_ps(0.0f); + return _mm256_max_ps(a, tmp); +} + +__m256 sigmoid(const __m256 a) { + __m256 max = _mm256_set1_ps(SIGMOID_THRESHOLD_MAX); + __m256 min = _mm256_set1_ps(SIGMOID_THRESHOLD_MIN); + __m256 tmp = _mm256_max_ps(a, min); + tmp = _mm256_min_ps(tmp, max); + tmp = _mm256_sub_ps(_mm256_set1_ps(0.0f), tmp); + tmp = exp(tmp); + tmp = _mm256_add_ps(_mm256_set1_ps(1.0f), tmp); + tmp = _mm256_div_ps(_mm256_set1_ps(1.0f), tmp); + return tmp; +} + +__m256 tanh(const __m256 a) { + __m256 max = _mm256_set1_ps(EXP_MAX_INPUT); + __m256 tmp = _mm256_mul_ps(_mm256_set1_ps(-2.0f), a); + tmp = _mm256_min_ps(tmp, max); + tmp = exp(tmp); + return _mm256_sub_ps(_mm256_div_ps(_mm256_set1_ps(2.0f), + _mm256_add_ps(_mm256_set1_ps(1.0f), tmp)), + _mm256_set1_ps(1.0f)); +} + +__m256 linear(const __m256 a) { return a; } + +__m256 relu(const __m256 a, const __m256 b) { + return _mm256_mul_ps( + a, _mm256_and_ps(_mm256_cmp_ps(b, _mm256_set1_ps(0.0f), _CMP_GT_OS), + _mm256_set1_ps(1.0f))); +} + +__m256 sigmoid(const __m256 a, const __m256 b) { + return _mm256_mul_ps(_mm256_mul_ps(a, b), + _mm256_sub_ps(_mm256_set1_ps(1.0f), b)); +} + +__m256 tanh(const __m256 a, const __m256 b) { + return _mm256_mul_ps( + a, _mm256_sub_ps(_mm256_set1_ps(1.0f), _mm256_mul_ps(b, b))); +} + +__m256 linear(const __m256 a, const __m256 b) { return a; } +} // namespace hppl diff --git a/paddle/operators/math/detail/hl_avx_functions.h b/paddle/operators/math/detail/hl_avx_functions.h new file mode 100644 index 0000000000000000000000000000000000000000..35f4eabb4c07c6cc9d2edded02e5b6290b1232f8 --- /dev/null +++ b/paddle/operators/math/detail/hl_avx_functions.h @@ -0,0 +1,32 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#ifndef HL_AVX_FUNCTIONS_H_ +#define HL_AVX_FUNCTIONS_H_ + +#include + +namespace hppl { +__m256 relu(const __m256 a); +__m256 sigmoid(const __m256 a); +__m256 tanh(const __m256 a); +__m256 linear(const __m256 a); + +__m256 relu(const __m256 a, const __m256 b); +__m256 sigmoid(const __m256 a, const __m256 b); +__m256 tanh(const __m256 a, const __m256 b); +__m256 linear(const __m256 a, const __m256 b); +} // namespace hppl + +#endif // HL_AVX_FUNCTIONS_H_ diff --git a/paddle/operators/math/detail/hl_cpu_functions.cc b/paddle/operators/math/detail/hl_cpu_functions.cc new file mode 100644 index 0000000000000000000000000000000000000000..21ec78f9629af0e4673a56517d76ac6734f57db8 --- /dev/null +++ b/paddle/operators/math/detail/hl_cpu_functions.cc @@ -0,0 +1,89 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include "hl_functions.h" + +namespace hppl { +namespace typef { + +float relu(const float a) { + return a > static_cast(0.0) ? a : static_cast(0.0); +} + +float sigmoid(const float a) { + const float min = SIGMOID_THRESHOLD_MIN; + const float max = SIGMOID_THRESHOLD_MAX; + float tmp = (a < min) ? min : ((a > max) ? max : a); + return static_cast(1.0) / (static_cast(1.0) + exp(-tmp)); +} + +float tanh(const float a) { + float tmp = -2.0 * a; + tmp = (tmp > EXP_MAX_INPUT) ? EXP_MAX_INPUT : tmp; + return (2.0 / (1.0 + exp(tmp))) - 1.0; +} + +float linear(const float a) { return a; } + +float relu(const float a, const float b) { return a * (b > 0.0 ? 1.0 : 0.0); } + +float sigmoid(const float a, const float b) { + return a * b * (static_cast(1) - b); +} + +float tanh(const float a, const float b) { + return a * (static_cast(1) - b * b); +} + +float linear(const float a, const float b) { return a; } + +} // namespace typef + +namespace typed { +double relu(const double a) { + return a > static_cast(0.0) ? a : static_cast(0.0); +} + +double sigmoid(const double a) { + const double min = SIGMOID_THRESHOLD_MIN; + const double max = SIGMOID_THRESHOLD_MAX; + double tmp = (a < min) ? min : ((a > max) ? max : a); + return static_cast(1.0) / (static_cast(1.0) + exp(-tmp)); +} + +double tanh(const double a) { + double tmp = -2.0 * a; + tmp = (tmp > EXP_MAX_INPUT) ? EXP_MAX_INPUT : tmp; + return (2.0 / (1.0 + exp(tmp))) - 1.0; +} + +double linear(const double a) { return a; } + +double relu(const double a, const double b) { + return a * (b > 0.0 ? 1.0 : 0.0); +} + +double sigmoid(const double a, const double b) { + return a * b * (static_cast(1) - b); +} + +double tanh(const double a, const double b) { + return a * (static_cast(1) - b * b); +} + +double linear(const double a, const double b) { return a; } + +} // namespace typed +} // namespace hppl diff --git a/paddle/operators/math/detail/hl_functions.h b/paddle/operators/math/detail/hl_functions.h new file mode 100644 index 0000000000000000000000000000000000000000..3e2f0c9ee6d3ae2ed598c4d5f09b85b7d61fdd51 --- /dev/null +++ b/paddle/operators/math/detail/hl_functions.h @@ -0,0 +1,71 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#ifndef HL_FUNCTIONS_H_ +#define HL_FUNCTIONS_H_ + +/** + * sigmoid threshold maximum + */ +#define SIGMOID_THRESHOLD_MIN -40.0 + +/** + * sigmoid threshold minimum + */ +#define SIGMOID_THRESHOLD_MAX 13.0 + +/** + * The maximum input value for exp, used to avoid overflow problem. + * currently only used for tanh function. + */ +#define EXP_MAX_INPUT 40.0 + +#ifndef __NVCC__ +namespace hppl { +namespace typef { +float relu(const float a); +float sigmoid(const float a); +float tanh(const float a); +float linear(const float a); + +float relu(const float a, const float b); +float sigmoid(const float a, const float b); +float tanh(const float a, const float b); +float linear(const float a, const float b); + +} // namespace typef + +namespace typed { +double relu(const double a); +double sigmoid(const double a); +double tanh(const double a); +double linear(const double a); + +double relu(const double a, const double b); +double sigmoid(const double a, const double b); +double tanh(const double a, const double b); +double linear(const double a, const double b); +} // namespace typed + +} // namespace hppl + +#ifdef __AVX__ +#include "hl_avx_functions.h" +#endif + +#else +#include "hl_gpu_functions.h" +#endif + +#endif // HL_FUNCTIONS_H_ diff --git a/paddle/operators/math/detail/hl_gpu_functions.h b/paddle/operators/math/detail/hl_gpu_functions.h new file mode 100644 index 0000000000000000000000000000000000000000..72f2204e7b2cfdba1367b51e3731dde11fb292d6 --- /dev/null +++ b/paddle/operators/math/detail/hl_gpu_functions.h @@ -0,0 +1,93 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#ifndef HL_GPU_FUNCTIONS_CUH_ +#define HL_GPU_FUNCTIONS_CUH_ + +#include "hl_base.h" + +namespace hppl { +namespace typef { + +__device__ static float relu(const float a) { return a > 0.0f ? a : 0.0f; } + +__device__ static float sigmoid(const float a) { + const float min = SIGMOID_THRESHOLD_MIN; + const float max = SIGMOID_THRESHOLD_MAX; + float tmp = (a < min) ? min : ((a > max) ? max : a); + return __fdividef(1.0f, 1.0f + __expf(-tmp)); +} + +__device__ static float tanh(const float a) { + float tmp = -2.0 * a; + tmp = (tmp > EXP_MAX_INPUT) ? EXP_MAX_INPUT : tmp; + return __fdividef(2.0f, (1.0f + __expf(-2.0f * tmp))) - 1.0f; +} + +__device__ static float linear(const float a) { return a; } + +__device__ static float relu(const float a, const float b) { + return a * (b > 0.0f ? 1.0f : 0.0f); +} + +__device__ static float sigmoid(const float a, const float b) { + return a * b * (1.0f - b); +} + +__device__ static float tanh(const float a, const float b) { + return a * (1.0f - b * b); +} + +__device__ static float linear(const float a, const float b) { return a; } + +} // namespace typef + +namespace typed { + +__device__ static double relu(const double a) { return a > 0.0 ? a : 0.0; } + +__device__ static double sigmoid(const double a) { + const double min = SIGMOID_THRESHOLD_MIN; + const double max = SIGMOID_THRESHOLD_MAX; + double tmp = (a < min) ? min : ((a > max) ? max : a); + return 1.0 / (1.0 + exp(-tmp)); +} + +__device__ static double tanh(const double a) { + double tmp = -2.0 * a; + tmp = (tmp > EXP_MAX_INPUT) ? EXP_MAX_INPUT : tmp; + return (2.0 / (1.0 + exp(-2.0 * a))) - 1.0; +} + +__device__ static double linear(const double a) { return a; } + +__device__ static double relu(const double a, const double b) { + return a * (b > 0.0 ? 1.0 : 0.0); +} + +__device__ static double sigmoid(const double a, const double b) { + return a * b * (1 - b); +} + +__device__ static double tanh(const double a, const double b) { + return a * (1.0 - b * b); +} + +__device__ static double linear(const double a, const double b) { return a; } + +} // namespace typef + +} // namespace hppl + +#endif // HL_GPU_FUNCTIONS_CUH_ diff --git a/paddle/operators/math/detail/lstm_cpu_kernel.h b/paddle/operators/math/detail/lstm_cpu_kernel.h new file mode 100644 index 0000000000000000000000000000000000000000..74d51d7bc9b91f4c8088384d77183131f57aafab --- /dev/null +++ b/paddle/operators/math/detail/lstm_cpu_kernel.h @@ -0,0 +1,310 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once +#include +#include "paddle/operators/math/detail/hl_activation_functions.h" +#include "paddle/operators/math/lstm_compute.h" + +namespace paddle { +namespace operators { +namespace math { +namespace detail { + +#ifndef __NVCC__ + +template +void naive_lstm_forward_one_sequence(Op op, LstmMetaValue value, + int frameSize, + activation_mode_t active_node, + activation_mode_t active_gate, + activation_mode_t active_state) { + T rValueIn; + T rValueIg; + T rValueFg; + T rValueOg; + T rCheckI; + T rCheckF; + T rCheckO; + T rState; + T rPrevState = 0; + T rStateAtv; + T rOut; + + T *valueIn = value.gateValue; + T *valueIg = value.gateValue + frameSize; + T *valueFg = value.gateValue + frameSize * 2; + T *valueOg = value.gateValue + frameSize * 3; + + for (int i = 0; i < frameSize; i++) { + rValueIn = valueIn[i]; + rValueIg = valueIg[i]; + rValueFg = valueFg[i]; + rValueOg = valueOg[i]; + rCheckI = value.checkIg[i]; + rCheckF = value.checkFg[i]; + rCheckO = value.checkOg[i]; + + if (value.prevStateValue) { + rPrevState = value.prevStateValue[i]; + } + + hppl::cpu::ForwardAct act; + op(rValueIn, rValueIg, rValueFg, rValueOg, rPrevState, rState, rStateAtv, + rOut, rCheckI, rCheckF, rCheckO, act(active_node), act(active_gate), + act(active_state)); + + valueIn[i] = rValueIn; + valueIg[i] = rValueIg; + valueFg[i] = rValueFg; + valueOg[i] = rValueOg; + value.stateValue[i] = rState; + value.stateActiveValue[i] = rStateAtv; + value.outputValue[i] = rOut; + } +} + +template +void naive_lstm_backward_one_sequence(Op op, LstmMetaValue value, + LstmMetaGrad grad, int frameSize, + activation_mode_t active_node, + activation_mode_t active_gate, + activation_mode_t active_state) { + T rValueIn; + T rValueIg; + T rValueFg; + T rValueOg; + T rGradIn; + T rGradIg; + T rGradFg; + T rGradOg; + T rPrevState = 0; + T rPrevStateGrad; + T rState; + T rStateGrad; + T rStateAtv; + T rOutputGrad; + T rCheckI; + T rCheckF; + T rCheckO; + T rCheckIGrad; + T rCheckFGrad; + T rCheckOGrad; + + T *valueIn = value.gateValue; + T *valueIg = value.gateValue + frameSize; + T *valueFg = value.gateValue + frameSize * 2; + T *valueOg = value.gateValue + frameSize * 3; + T *gradIn = grad.gateGrad; + T *gradIg = grad.gateGrad + frameSize; + T *gradFg = grad.gateGrad + frameSize * 2; + T *gradOg = grad.gateGrad + frameSize * 3; + + for (int i = 0; i < frameSize; i++) { + rValueIn = valueIn[i]; + rValueIg = valueIg[i]; + rValueFg = valueFg[i]; + rValueOg = valueOg[i]; + rCheckI = value.checkIg[i]; + rCheckF = value.checkFg[i]; + rCheckO = value.checkOg[i]; + rState = value.stateValue[i]; + rStateAtv = value.stateActiveValue[i]; + rOutputGrad = grad.outputGrad[i]; + rStateGrad = grad.stateGrad[i]; + if (value.prevStateValue) { + rPrevState = value.prevStateValue[i]; + } + + hppl::cpu::BackwardAct act; + op(rValueIn, rValueIg, rValueFg, rValueOg, rGradIn, rGradIg, rGradFg, + rGradOg, rPrevState, rPrevStateGrad, rState, rStateGrad, rStateAtv, + rOutputGrad, rCheckI, rCheckF, rCheckO, rCheckIGrad, rCheckFGrad, + rCheckOGrad, act(active_node), act(active_gate), act(active_state)); + + gradIn[i] = rGradIn; + gradIg[i] = rGradIg; + gradFg[i] = rGradFg; + gradOg[i] = rGradOg; + grad.stateGrad[i] = rStateGrad; + + if (grad.prevStateGrad) grad.prevStateGrad[i] = rPrevStateGrad; + if (value.prevStateValue) { + if (grad.checkIgGrad) grad.checkIgGrad[i] += rCheckIGrad; + if (grad.checkFgGrad) grad.checkFgGrad[i] += rCheckFGrad; + } + if (grad.checkOgGrad) grad.checkOgGrad[i] += rCheckOGrad; + } +} + +template +void avx_lstm_forward_one_sequence(Op op, LstmMetaValue value, int frameSize, + activation_mode_t active_node, + activation_mode_t active_gate, + activation_mode_t active_state) { +#ifdef __AVX__ + __m256 rValueIn; + __m256 rValueIg; + __m256 rValueFg; + __m256 rValueOg; + __m256 rCheckI; + __m256 rCheckF; + __m256 rCheckO; + __m256 rState; + __m256 rPrevState = _mm256_set1_ps(0.0f); + __m256 rStateAtv; + __m256 rOut; + + __m256 *valueIn = (__m256 *)value.gateValue; + __m256 *valueIg = (__m256 *)(value.gateValue + frameSize); + __m256 *valueFg = (__m256 *)(value.gateValue + frameSize * 2); + __m256 *valueOg = (__m256 *)(value.gateValue + frameSize * 3); + + for (int i = 0; i < frameSize / 8; i++) { + rValueIn = valueIn[i]; + rValueIg = valueIg[i]; + rValueFg = valueFg[i]; + rValueOg = valueOg[i]; + rCheckI = ((__m256 *)value.checkIg)[i]; + rCheckF = ((__m256 *)value.checkFg)[i]; + rCheckO = ((__m256 *)value.checkOg)[i]; + + if (value.prevStateValue) { + rPrevState = ((__m256 *)value.prevStateValue)[i]; + } + + op(rValueIn, rValueIg, rValueFg, rValueOg, rPrevState, rState, rStateAtv, + rOut, rCheckI, rCheckF, rCheckO, hppl::avx::forward[active_node], + hppl::avx::forward[active_gate], hppl::avx::forward[active_state]); + + valueIn[i] = rValueIn; + valueIg[i] = rValueIg; + valueFg[i] = rValueFg; + valueOg[i] = rValueOg; + ((__m256 *)value.stateValue)[i] = rState; + ((__m256 *)value.stateActiveValue)[i] = rStateAtv; + ((__m256 *)value.outputValue)[i] = rOut; + } +#endif +} + +template +void avx_lstm_backward_one_sequence(Op op, LstmMetaValue value, + LstmMetaGrad grad, int frameSize, + activation_mode_t active_node, + activation_mode_t active_gate, + activation_mode_t active_state) { +#ifdef __AVX__ + __m256 rValueIn; + __m256 rValueIg; + __m256 rValueFg; + __m256 rValueOg; + __m256 rGradIn; + __m256 rGradIg; + __m256 rGradFg; + __m256 rGradOg; + __m256 rPrevState = _mm256_set1_ps(0.0f); + __m256 rPrevStateGrad; + __m256 rStateGrad; + __m256 rState; + __m256 rStateAtv; + __m256 rOutputGrad; + __m256 rCheckI; + __m256 rCheckF; + __m256 rCheckO; + __m256 rCheckIGrad; + __m256 rCheckFGrad; + __m256 rCheckOGrad; + + __m256 *valueIn = (__m256 *)value.gateValue; + __m256 *valueIg = (__m256 *)(value.gateValue + frameSize); + __m256 *valueFg = (__m256 *)(value.gateValue + frameSize * 2); + __m256 *valueOg = (__m256 *)(value.gateValue + frameSize * 3); + __m256 *gradIn = (__m256 *)grad.gateGrad; + __m256 *gradIg = (__m256 *)(grad.gateGrad + frameSize); + __m256 *gradFg = (__m256 *)(grad.gateGrad + frameSize * 2); + __m256 *gradOg = (__m256 *)(grad.gateGrad + frameSize * 3); + + for (int i = 0; i < frameSize / 8; i++) { + rValueIn = valueIn[i]; + rValueIg = valueIg[i]; + rValueFg = valueFg[i]; + rValueOg = valueOg[i]; + rCheckI = ((__m256 *)value.checkIg)[i]; + rCheckF = ((__m256 *)value.checkFg)[i]; + rCheckO = ((__m256 *)value.checkOg)[i]; + rState = ((__m256 *)value.stateValue)[i]; + rStateAtv = ((__m256 *)value.stateActiveValue)[i]; + rOutputGrad = ((__m256 *)grad.outputGrad)[i]; + rStateGrad = ((__m256 *)grad.stateGrad)[i]; + if (value.prevStateValue) { + rPrevState = ((__m256 *)value.prevStateValue)[i]; + } + + op(rValueIn, rValueIg, rValueFg, rValueOg, rGradIn, rGradIg, rGradFg, + rGradOg, rPrevState, rPrevStateGrad, rState, rStateGrad, rStateAtv, + rOutputGrad, rCheckI, rCheckF, rCheckO, rCheckIGrad, rCheckFGrad, + rCheckOGrad, hppl::avx::backward[active_node], + hppl::avx::backward[active_gate], hppl::avx::backward[active_state]); + + gradIn[i] = rGradIn; + gradIg[i] = rGradIg; + gradFg[i] = rGradFg; + gradOg[i] = rGradOg; + ((__m256 *)grad.stateGrad)[i] = rStateGrad; + + if (grad.prevStateGrad) ((__m256 *)grad.prevStateGrad)[i] = rPrevStateGrad; + if (value.prevStateValue) { + if (grad.checkIgGrad) ((__m256 *)grad.checkIgGrad)[i] += rCheckIGrad; + if (grad.checkFgGrad) ((__m256 *)grad.checkFgGrad)[i] += rCheckFGrad; + } + if (grad.checkOgGrad) ((__m256 *)grad.checkOgGrad)[i] += rCheckOGrad; + } +#endif +} + +template +void cpu_lstm_forward(Op op, LstmMetaValue value, int frameSize, + activation_mode_t active_node, + activation_mode_t active_gate, + activation_mode_t active_state) { + if (Op::avx && !(frameSize & (8 - 1)) && (std::is_same::value)) { + avx_lstm_forward_one_sequence(op, value, frameSize, active_node, + active_gate, active_state); + } else { + naive_lstm_forward_one_sequence(op, value, frameSize, active_node, + active_gate, active_state); + } +} + +template +void cpu_lstm_backward(Op op, LstmMetaValue value, LstmMetaGrad grad, + int frameSize, activation_mode_t active_node, + activation_mode_t active_gate, + activation_mode_t active_state) { + if (Op::avx && !(frameSize & (8 - 1)) && (std::is_same::value)) { + avx_lstm_backward_one_sequence(op, value, grad, frameSize, active_node, + active_gate, active_state); + } else { + naive_lstm_backward_one_sequence(op, value, grad, frameSize, active_node, + active_gate, active_state); + } +} + +#endif + +} // namespace detail +} // namespace math +} // namespace operators +} // namespace paddle diff --git a/paddle/operators/math/detail/lstm_gpu_kernel.h b/paddle/operators/math/detail/lstm_gpu_kernel.h new file mode 100644 index 0000000000000000000000000000000000000000..9573eaefb6a9d678ef70f2e2bffdc6a3011b21ea --- /dev/null +++ b/paddle/operators/math/detail/lstm_gpu_kernel.h @@ -0,0 +1,256 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once +#include +#include "paddle/operators/math/detail/hl_activation_functions.h" +#include "paddle/operators/math/lstm_compute.h" +#include "paddle/platform/cuda_helper.h" +#include "paddle/platform/device_context.h" + +#include + +namespace paddle { +namespace operators { +namespace math { +namespace detail { + +/* + * threads(framePerBlock, batchPerBlock) + * grid(frameBlocks, batchBlocks) + */ +template +__global__ void KeLstmForward(Op op, LstmMetaValue value, int frameSize, + int batchSize, activation_mode_t active_node, + activation_mode_t active_gate, + activation_mode_t active_state) { + const int frameIdx = blockIdx.x * blockDim.x + threadIdx.x; + if (frameIdx >= frameSize) return; + + int batchIdx = 0; + if (isBatch) { + batchIdx = blockIdx.y * blockDim.y + threadIdx.y; + if (batchIdx >= batchSize) return; + value.gateValue += batchIdx * frameSize * 4; + value.outputValue += batchIdx * frameSize; + value.stateValue += batchIdx * frameSize; + value.stateActiveValue += batchIdx * frameSize; + } + + T rState; + T rPrevState = 0; + T rStateAtv; + T rOut; + T rValueIn; + T rValueIg; + T rValueFg; + T rValueOg; + T rCheckI = value.checkIg[frameIdx]; + T rCheckF = value.checkFg[frameIdx]; + T rCheckO = value.checkOg[frameIdx]; + + rValueIn = value.gateValue[frameIdx]; + rValueIg = value.gateValue[frameIdx + frameSize]; + rValueFg = value.gateValue[frameIdx + frameSize * 2]; + rValueOg = value.gateValue[frameIdx + frameSize * 3]; + + if (value.prevStateValue) { + if (isBatch) value.prevStateValue += batchIdx * frameSize; + rPrevState = value.prevStateValue[frameIdx]; + } + + hppl::gpu::ForwardAct act; + op(rValueIn, rValueIg, rValueFg, rValueOg, rPrevState, rState, rStateAtv, + rOut, rCheckI, rCheckF, rCheckO, act(active_node), act(active_gate), + act(active_state)); + + value.gateValue[frameIdx] = rValueIn; + value.gateValue[frameIdx + frameSize] = rValueIg; + value.gateValue[frameIdx + frameSize * 2] = rValueFg; + value.gateValue[frameIdx + frameSize * 3] = rValueOg; + + value.stateValue[frameIdx] = rState; + value.stateActiveValue[frameIdx] = rStateAtv; + value.outputValue[frameIdx] = rOut; +} + +/* + * threads(framePerBlock, batchPerBlock) + * grid(frameBlocks, batchBlocks) + */ +template +__global__ void KeLstmBackward(Op op, LstmMetaValue value, + LstmMetaGrad grad, int frameSize, + int batchSize, activation_mode_t active_node, + activation_mode_t active_gate, + activation_mode_t active_state) { + const int frameIdx = blockIdx.x * blockDim.x + threadIdx.x; + if (frameIdx >= frameSize) return; + + int batchIdx = 0; + if (isBatch) { + batchIdx = blockIdx.y * blockDim.y + threadIdx.y; + if (batchIdx >= batchSize) return; + value.gateValue += batchIdx * frameSize * 4; + value.stateValue += batchIdx * frameSize; + value.stateActiveValue += batchIdx * frameSize; + grad.gateGrad += batchIdx * frameSize * 4; + grad.stateGrad += batchIdx * frameSize; + grad.outputGrad += batchIdx * frameSize; + } + + T rValueIn; + T rValueIg; + T rValueFg; + T rValueOg; + T rGradIn; + T rGradIg; + T rGradFg; + T rGradOg; + T rPrevState = 0; + T rPrevStateGrad; + T rState; + T rStateGrad; + T rStateAtv; + T rOutputGrad; + T rCheckI = value.checkIg[frameIdx]; + T rCheckF = value.checkFg[frameIdx]; + T rCheckO = value.checkOg[frameIdx]; + T rCheckIGrad; + T rCheckFGrad; + T rCheckOGrad; + + rValueIn = value.gateValue[frameIdx]; + rValueIg = value.gateValue[frameIdx + frameSize]; + rValueFg = value.gateValue[frameIdx + frameSize * 2]; + rValueOg = value.gateValue[frameIdx + frameSize * 3]; + rState = value.stateValue[frameIdx]; + rStateAtv = value.stateActiveValue[frameIdx]; + rOutputGrad = grad.outputGrad[frameIdx]; + rStateGrad = grad.stateGrad[frameIdx]; + + if (value.prevStateValue) { + if (isBatch) value.prevStateValue += batchIdx * frameSize; + rPrevState = value.prevStateValue[frameIdx]; + } + + hppl::gpu::BackwardAct act; + op(rValueIn, rValueIg, rValueFg, rValueOg, rGradIn, rGradIg, rGradFg, rGradOg, + rPrevState, rPrevStateGrad, rState, rStateGrad, rStateAtv, rOutputGrad, + rCheckI, rCheckF, rCheckO, rCheckIGrad, rCheckFGrad, rCheckOGrad, + act(active_node), act(active_gate), act(active_state)); + + grad.gateGrad[frameIdx] = rGradIn; + grad.gateGrad[frameIdx + frameSize] = rGradIg; + grad.gateGrad[frameIdx + frameSize * 2] = rGradFg; + grad.gateGrad[frameIdx + frameSize * 3] = rGradOg; + grad.stateGrad[frameIdx] = rStateGrad; + if (grad.prevStateGrad) { + if (isBatch) grad.prevStateGrad += batchIdx * frameSize; + grad.prevStateGrad[frameIdx] = rPrevStateGrad; + } + + if (isBatch) { + if (value.prevStateValue) { + if (grad.checkIgGrad) + paddle::platform::CudaAtomicAdd(grad.checkIgGrad + frameIdx, + rCheckIGrad); + if (grad.checkFgGrad) + paddle::platform::CudaAtomicAdd(grad.checkFgGrad + frameIdx, + rCheckFGrad); + } + if (grad.checkOgGrad) + paddle::platform::CudaAtomicAdd(grad.checkOgGrad + frameIdx, rCheckOGrad); + } else { + if (value.prevStateValue) { + if (grad.checkIgGrad) grad.checkIgGrad[frameIdx] += rCheckIGrad; + if (grad.checkFgGrad) grad.checkFgGrad[frameIdx] += rCheckFGrad; + } + if (grad.checkOgGrad) grad.checkOgGrad[frameIdx] += rCheckOGrad; + } +} + +template +void gpu_lstm_forward(const platform::DeviceContext& context, Op op, + LstmMetaValue value, int frameSize, int batchSize, + activation_mode_t active_node, + activation_mode_t active_gate, + activation_mode_t active_state) { + dim3 threads; + dim3 grid; + if (batchSize == 1) { + int framePerBlock = frameSize <= 1024 ? frameSize : 1024; + int frameBlocks = (frameSize + 1024 - 1) / 1024; + threads = dim3(framePerBlock, 1); + grid = dim3(frameBlocks, 1); + } else { + /* framePerBlock = 32 batchPerBlock = 32 */ + threads = dim3(32, 32); + grid = dim3((frameSize + 32 - 1) / 32, (batchSize + 32 - 1) / 32); + } + + auto stream = + reinterpret_cast(context).stream(); + if (batchSize == 1) { + KeLstmForward<<>>( + op, value, frameSize, batchSize, active_node, active_gate, + active_state); + } else { + KeLstmForward<<>>( + op, value, frameSize, batchSize, active_node, active_gate, + active_state); + } +} + +template +void gpu_lstm_backward(const platform::DeviceContext& context, Op op, + LstmMetaValue value, LstmMetaGrad grad, + int frameSize, int batchSize, + activation_mode_t active_node, + activation_mode_t active_gate, + activation_mode_t active_state) { + dim3 threads; + dim3 grid; + if (batchSize == 1) { + int framePerBlock = frameSize <= 1024 ? frameSize : 1024; + int frameBlocks = (frameSize + 1024 - 1) / 1024; + threads = dim3(framePerBlock, 1); + grid = dim3(frameBlocks, 1); + } else { + /* framePerBlock = 32 batchPerBlock = 32 */ + threads = dim3(32, 32); + grid = dim3((frameSize + 32 - 1) / 32, (batchSize + 32 - 1) / 32); + } + + auto stream = + reinterpret_cast(context).stream(); + if (batchSize == 1) { + KeLstmBackward<<>>( + op, value, grad, frameSize, batchSize, active_node, active_gate, + active_state); + } else { + KeLstmBackward<<>>( + op, value, grad, frameSize, batchSize, active_node, active_gate, + active_state); + } +} + +} // namespace detail +} // namespace math +} // namespace operators +} // namespace paddle diff --git a/paddle/operators/math/detail/lstm_kernel.h b/paddle/operators/math/detail/lstm_kernel.h new file mode 100644 index 0000000000000000000000000000000000000000..6f3ead2397d5131b4468d0ad288513cedb289594 --- /dev/null +++ b/paddle/operators/math/detail/lstm_kernel.h @@ -0,0 +1,138 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/operators/math/detail/hl_activation_functions.h" +#include "paddle/platform/hostdevice.h" + +#include + +namespace paddle { +namespace operators { +namespace math { +namespace detail { + +namespace forward { + +template +class lstm { + public: + HOSTDEVICE void operator()(T &valueIn, T &valueIg, T &valueFg, T &valueOg, + T &prevState, T &state, T &stateAtv, T &output, + T &checkI, T &checkF, T &checkO, + typename hppl::ForwardActType::type actInput, + typename hppl::ForwardActType::type actGate, + typename hppl::ForwardActType::type actState) { + valueIn = actInput(valueIn); + valueIg = actGate(valueIg + prevState * checkI); + valueFg = actGate(valueFg + prevState * checkF); + state = valueIn * valueIg + prevState * valueFg; + valueOg = actGate(valueOg + state * checkO); + stateAtv = actState(state); + output = valueOg * stateAtv; + } +#ifndef __NVCC__ +#ifndef __AVX__ // If not compiled with AVX instructs. Disable AVX by default + static const bool avx = false; +#else + // Only float support AVX optimization + static const bool avx = std::is_same::value; + + HOSTDEVICE void operator()(__m256 &valueIn, __m256 &valueIg, __m256 &valueFg, + __m256 &valueOg, __m256 &prevState, __m256 &state, + __m256 &stateAtv, __m256 &output, __m256 &checkI, + __m256 &checkF, __m256 &checkO, + hppl::Active<__m256>::forward actInput, + hppl::Active<__m256>::forward actGate, + hppl::Active<__m256>::forward actState) { + valueIn = actInput(valueIn); + valueIg = actGate(_mm256_add_ps(valueIg, _mm256_mul_ps(prevState, checkI))); + valueFg = actGate(_mm256_add_ps(valueFg, _mm256_mul_ps(prevState, checkF))); + state = _mm256_add_ps(_mm256_mul_ps(valueIn, valueIg), + _mm256_mul_ps(prevState, valueFg)); + valueOg = actGate(_mm256_add_ps(valueOg, _mm256_mul_ps(state, checkO))); + stateAtv = actState(state); + output = _mm256_mul_ps(valueOg, stateAtv); + } +#endif +#endif +}; + +} // namespace forward + +namespace backward { + +template +class lstm { + public: + HOSTDEVICE void operator()(T &valueIn, T &valueIg, T &valueFg, T &valueOg, + T &gradIn, T &gradIg, T &gradFg, T &gradOg, + T &prevState, T &prevStateGrad, T &state, + T &stateGrad, T &stateAtv, T &outputGrad, + T &checkI, T &checkF, T &checkO, T &checkIGrad, + T &checkFGrad, T &checkOGrad, + typename hppl::BackwardActType::type actInput, + typename hppl::BackwardActType::type actGate, + typename hppl::BackwardActType::type actState) { + gradOg = actGate(outputGrad * stateAtv, valueOg); + stateGrad += actState(outputGrad * valueOg, stateAtv) + gradOg * checkO; + gradIn = actInput(stateGrad * valueIg, valueIn); + gradIg = actGate(stateGrad * valueIn, valueIg); + gradFg = actGate(stateGrad * prevState, valueFg); + prevStateGrad = gradIg * checkI + gradFg * checkF + stateGrad * valueFg; + checkIGrad = gradIg * prevState; + checkFGrad = gradFg * prevState; + checkOGrad = gradOg * state; + } +#ifndef __NVCC__ +#ifndef __AVX__ // If not compiled with AVX instructs. Disable AVX by default + static const bool avx = false; +#else + // Only float support AVX optimization + static const bool avx = std::is_same::value; + HOSTDEVICE void operator()(__m256 &valueIn, __m256 &valueIg, __m256 &valueFg, + __m256 &valueOg, __m256 &gradIn, __m256 &gradIg, + __m256 &gradFg, __m256 &gradOg, __m256 &prevState, + __m256 &prevStateGrad, __m256 &state, + __m256 &stateGrad, __m256 &stateAtv, + __m256 &outputGrad, __m256 &checkI, __m256 &checkF, + __m256 &checkO, __m256 &checkIGrad, + __m256 &checkFGrad, __m256 &checkOGrad, + hppl::Active<__m256>::backward actInput, + hppl::Active<__m256>::backward actGate, + hppl::Active<__m256>::backward actState) { + gradOg = actGate(_mm256_mul_ps(outputGrad, stateAtv), valueOg); + stateGrad = _mm256_add_ps( + actState(_mm256_mul_ps(outputGrad, valueOg), stateAtv), stateGrad); + stateGrad = _mm256_add_ps(_mm256_mul_ps(gradOg, checkO), stateGrad); + gradIn = actInput(_mm256_mul_ps(stateGrad, valueIg), valueIn); + gradIg = actGate(_mm256_mul_ps(stateGrad, valueIn), valueIg); + gradFg = actGate(_mm256_mul_ps(stateGrad, prevState), valueFg); + prevStateGrad = _mm256_add_ps(_mm256_mul_ps(gradIg, checkI), + _mm256_mul_ps(gradFg, checkF)); + prevStateGrad = + _mm256_add_ps(_mm256_mul_ps(stateGrad, valueFg), prevStateGrad); + checkIGrad = _mm256_mul_ps(gradIg, prevState); + checkFGrad = _mm256_mul_ps(gradFg, prevState); + checkOGrad = _mm256_mul_ps(gradOg, state); + } +#endif +#endif +}; + +} // namespace backward + +} // namespace detail +} // namespace math +} // namespace operators +} // namespace paddle diff --git a/paddle/operators/math/im2col_test.cc b/paddle/operators/math/im2col_test.cc index 6dfa61649d9a8200b9f20c1e898b07385d037222..10a7e61f5ba71e250a386e528dc2dca1aed8e19c 100644 --- a/paddle/operators/math/im2col_test.cc +++ b/paddle/operators/math/im2col_test.cc @@ -70,7 +70,7 @@ void testIm2col() { if (paddle::platform::is_cpu_place(*place)) { input = input_tmp; } else { - input.CopyFrom(input_tmp, *place, *context); + input.CopyFrom(input_tmp, *place, *context); } output_cfo.mutable_data( {1, filter_size, filter_size, output_height, output_width}, *place); @@ -97,8 +97,7 @@ void testIm2col() { if (paddle::platform::is_cpu_place(*place)) { out_cfo_ptr = output_cfo.data(); } else { - output_tmp.CopyFrom(output_cfo, paddle::platform::CPUPlace(), - *context); + output_tmp.CopyFrom(output_cfo, paddle::platform::CPUPlace(), *context); out_cfo_ptr = output_tmp.data(); } for (int i = 0; i < 6; ++i) { @@ -109,8 +108,7 @@ void testIm2col() { if (paddle::platform::is_cpu_place(*place)) { out_ocf_ptr = output_ocf.data(); } else { - output_tmp.CopyFrom(output_ocf, paddle::platform::CPUPlace(), - *context); + output_tmp.CopyFrom(output_ocf, paddle::platform::CPUPlace(), *context); out_ocf_ptr = output_tmp.data(); } for (int i = 0; i < 6; ++i) { diff --git a/paddle/operators/math/lstm_compute.cc b/paddle/operators/math/lstm_compute.cc new file mode 100644 index 0000000000000000000000000000000000000000..0febf8e3b70111d12f858cf6259a2801a42d9a90 --- /dev/null +++ b/paddle/operators/math/lstm_compute.cc @@ -0,0 +1,82 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/operators/math/lstm_compute.h" +#include "paddle/operators/math/detail/lstm_cpu_kernel.h" +#include "paddle/operators/math/detail/lstm_kernel.h" + +namespace paddle { +namespace operators { +namespace math { + +template +struct LstmUnitFunctor { + static void compute(const platform::DeviceContext& context, + LstmMetaValue value, int frame_size, int batch_size, + const std::string& gate_act, const std::string& cell_act, + const std::string& cand_act) { + for (int b = 0; b < batch_size; b++) { + detail::cpu_lstm_forward(detail::forward::lstm(), value, frame_size, + ActiveType(cand_act), ActiveType(gate_act), + ActiveType(cell_act)); + value.gateValue += frame_size * 4; + value.stateValue += frame_size; + value.stateActiveValue += frame_size; + value.outputValue += frame_size; + if (value.prevStateValue) { + value.prevStateValue += frame_size; + } + } + } +}; + +template +struct LstmUnitGradFunctor { + static void compute(const platform::DeviceContext& context, + LstmMetaValue value, LstmMetaGrad grad, + int frame_size, int batch_size, + const std::string& gate_act, const std::string& cell_act, + const std::string& cand_act) { + for (int b = 0; b < batch_size; b++) { + detail::cpu_lstm_backward(detail::backward::lstm(), value, grad, + frame_size, ActiveType(cand_act), + ActiveType(gate_act), ActiveType(cell_act)); + + value.gateValue += frame_size * 4; + value.stateValue += frame_size; + value.stateActiveValue += frame_size; + value.outputValue += frame_size; + if (value.prevStateValue) { + value.prevStateValue += frame_size; + } + + grad.gateGrad += frame_size * 4; + grad.stateGrad += frame_size; + grad.stateActiveGrad += frame_size; + grad.outputGrad += frame_size; + if (grad.prevStateGrad) { + grad.prevStateGrad += frame_size; + } + } + } +}; + +template class LstmUnitFunctor; +template class LstmUnitFunctor; +template class LstmUnitGradFunctor; +template class LstmUnitGradFunctor; + +} // namespace math +} // namespace operators +} // namespace paddle diff --git a/paddle/operators/math/lstm_compute.cu b/paddle/operators/math/lstm_compute.cu new file mode 100644 index 0000000000000000000000000000000000000000..b2122f2a5c08a6d9d53293833177f0ba2c3ab860 --- /dev/null +++ b/paddle/operators/math/lstm_compute.cu @@ -0,0 +1,55 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/operators/math/detail/lstm_gpu_kernel.h" +#include "paddle/operators/math/detail/lstm_kernel.h" +#include "paddle/operators/math/lstm_compute.h" + +namespace paddle { +namespace operators { +namespace math { + +template +struct LstmUnitFunctor { + static void compute(const platform::DeviceContext& context, + LstmMetaValue value, int frame_size, int batch_size, + const std::string& gate_act, const std::string& cell_act, + const std::string& cand_act) { + detail::gpu_lstm_forward(context, detail::forward::lstm(), value, + frame_size, batch_size, ActiveType(cand_act), + ActiveType(gate_act), ActiveType(cell_act)); + } +}; + +template +struct LstmUnitGradFunctor { + static void compute(const platform::DeviceContext& context, + LstmMetaValue value, LstmMetaGrad grad, + int frame_size, int batch_size, + const std::string& gate_act, const std::string& cell_act, + const std::string& cand_act) { + detail::gpu_lstm_backward(context, detail::backward::lstm(), value, grad, + frame_size, batch_size, ActiveType(cand_act), + ActiveType(gate_act), ActiveType(cell_act)); + } +}; + +template class LstmUnitFunctor; +template class LstmUnitFunctor; +template class LstmUnitGradFunctor; +template class LstmUnitGradFunctor; + +} // namespace math +} // namespace operators +} // namespace paddle diff --git a/paddle/operators/math/lstm_compute.h b/paddle/operators/math/lstm_compute.h new file mode 100644 index 0000000000000000000000000000000000000000..28d2c6fd3b0d8143da90c37f241072e37397f98b --- /dev/null +++ b/paddle/operators/math/lstm_compute.h @@ -0,0 +1,91 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include "paddle/platform/device_context.h" +#include "paddle/platform/enforce.h" + +namespace paddle { +namespace operators { +namespace math { + +typedef enum { + HL_ACTIVATION_SIGMOID = 0, + HL_ACTIVATION_RELU = 1, + HL_ACTIVATION_TANH = 2, + HL_ACTIVATION_LINEAR = 3, + HL_ACTIVATION_END +} activation_mode_t; + +template +struct LstmMetaValue { + T *gateValue; + T *prevStateValue; + T *stateValue; + T *stateActiveValue; + T *outputValue; + T *checkIg; + T *checkFg; + T *checkOg; +}; + +template +struct LstmMetaGrad { + T *gateGrad; + T *prevStateGrad; + T *stateGrad; + T *stateActiveGrad; + T *outputGrad; + T *checkIgGrad; + T *checkFgGrad; + T *checkOgGrad; +}; + +inline activation_mode_t ActiveType(const std::string &type) { + if (type == "sigmoid") { + return HL_ACTIVATION_SIGMOID; + } else if (type == "relu") { + return HL_ACTIVATION_RELU; + } else if (type == "tanh") { + return HL_ACTIVATION_TANH; + } else if (type == "linear" || type == "identity" || type == "") { + return HL_ACTIVATION_LINEAR; + } else { + PADDLE_THROW("Do not support activation type."); + } +} + +template +class LstmUnitFunctor { + public: + static void compute(const platform::DeviceContext &context, + LstmMetaValue value, int frame_size, int batch_size, + const std::string &gate_act, const std::string &cell_act, + const std::string &cand_act); +}; + +template +class LstmUnitGradFunctor { + public: + static void compute(const platform::DeviceContext &context, + LstmMetaValue value, LstmMetaGrad grad, + int frame_size, int batch_size, + const std::string &gate_act, const std::string &cell_act, + const std::string &cand_act); +}; + +} // namespace math +} // namespace operators +} // namespace paddle diff --git a/paddle/operators/math/math_function.cc b/paddle/operators/math/math_function.cc index ba653afa2cb175ae2e5e21088b6dc7ba76a6018f..aad1357598c629a4edfe0ad9b23f0241093a2522 100644 --- a/paddle/operators/math/math_function.cc +++ b/paddle/operators/math/math_function.cc @@ -130,6 +130,89 @@ void matmul( matrix_b.data(), beta, matrix_out->data()); } +#ifdef PADDLE_USE_MKLML +// Use cblas_{s,d}gemm_batched if available: Run with 1 group of size batchSize. +template <> +void batched_gemm( + const platform::DeviceContext& context, const CBLAS_TRANSPOSE transA, + const CBLAS_TRANSPOSE transB, const int M, const int N, const int K, + const float alpha, const float* A, const float* B, const float beta, + float* C, const int batchCount, const int strideA, const int strideB) { + int lda = (transA == CblasNoTrans) ? K : M; + int ldb = (transB == CblasNoTrans) ? N : K; + int ldc = N; + auto a_array = std::vector(batchCount); + auto b_array = std::vector(batchCount); + auto c_array = std::vector(batchCount); + for (int k = 0; k < batchCount; ++k) { + a_array[k] = &A[k * strideA]; + b_array[k] = &B[k * strideB]; + c_array[k] = &C[k * M * N]; + } + cblas_sgemm_batch(CblasRowMajor, &transA, &transB, &M, &N, &K, &alpha, + a_array.data(), &lda, b_array.data(), &ldb, &beta, + c_array.data(), &ldc, 1 /* group_count */, &batchCount); +} + +template <> +void batched_gemm( + const platform::DeviceContext& context, const CBLAS_TRANSPOSE transA, + const CBLAS_TRANSPOSE transB, const int M, const int N, const int K, + const double alpha, const double* A, const double* B, const double beta, + double* C, const int batchCount, const int strideA, const int strideB) { + int lda = (transA == CblasNoTrans) ? K : M; + int ldb = (transB == CblasNoTrans) ? N : K; + int ldc = N; + auto a_array = std::vector(batchCount); + auto b_array = std::vector(batchCount); + auto c_array = std::vector(batchCount); + for (int k = 0; k < batchCount; ++k) { + a_array[k] = &A[k * strideA]; + b_array[k] = &B[k * strideB]; + c_array[k] = &C[k * M * N]; + } + cblas_dgemm_batch(CblasRowMajor, &transA, &transB, &M, &N, &K, &alpha, + a_array.data(), &lda, b_array.data(), &ldb, &beta, + c_array.data(), &ldc, 1 /* group_count */, &batchCount); +} +#else +// The below is a naive but correct serial implementation that just loops +// over the batch dimension. This is a fallback for when the batched gemm +// functions of Intel MKL are not available. In the future, this computation +// should be parallelized. +template <> +void batched_gemm( + const platform::DeviceContext& context, const CBLAS_TRANSPOSE transA, + const CBLAS_TRANSPOSE transB, const int M, const int N, const int K, + const float alpha, const float* A, const float* B, const float beta, + float* C, const int batchCount, const int strideA, const int strideB) { + for (int k = 0; k < batchCount; ++k) { + const float* Ak = &A[k * strideA]; + const float* Bk = &B[k * strideB]; + float* Ck = &C[k * M * N]; + gemm(context, transA, transB, M, N, K, alpha, Ak, + Bk, beta, Ck); + } +} + +template <> +void batched_gemm( + const platform::DeviceContext& context, const CBLAS_TRANSPOSE transA, + const CBLAS_TRANSPOSE transB, const int M, const int N, const int K, + const double alpha, const double* A, const double* B, const double beta, + double* C, const int batchCount, const int strideA, const int strideB) { + for (int k = 0; k < batchCount; ++k) { + const double* Ak = &A[k * strideA]; + const double* Bk = &B[k * strideB]; + double* Ck = &C[k * M * N]; + gemm(context, transA, transB, M, N, K, alpha, + Ak, Bk, beta, Ck); + } +} +#endif + +template struct SetConstant; + } // namespace math } // namespace operators } // namespace paddle diff --git a/paddle/operators/math/math_function.cu b/paddle/operators/math/math_function.cu index 649f1f352c2a4a5ebaa0cb00ffb2e4de8aa4961a..5583683c6e12b88ba81015aef9161913de261ef2 100644 --- a/paddle/operators/math/math_function.cu +++ b/paddle/operators/math/math_function.cu @@ -155,6 +155,56 @@ void matmul( matrix_b.data(), beta, matrix_out->data()); } +template <> +void batched_gemm( + const platform::DeviceContext& context, const CBLAS_TRANSPOSE transA, + const CBLAS_TRANSPOSE transB, const int M, const int N, const int K, + const float alpha, const float* A, const float* B, const float beta, + float* C, const int batchCount, const int strideA, const int strideB) { + // Note that cublas follows fortran order, so the order is different from + // the cblas convention. + int lda = (transA == CblasNoTrans) ? K : M; + int ldb = (transB == CblasNoTrans) ? N : K; + int ldc = N; + cublasOperation_t cuTransA = + (transA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; + cublasOperation_t cuTransB = + (transB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; + const int strideC = M * N; + + PADDLE_ENFORCE(platform::dynload::cublasSgemmStridedBatched( + reinterpret_cast(context) + .cublas_handle(), + cuTransB, cuTransA, N, M, K, &alpha, B, ldb, strideB, A, lda, strideA, + &beta, C, ldc, strideC, batchCount)); +} + +template <> +void batched_gemm( + const platform::DeviceContext& context, const CBLAS_TRANSPOSE transA, + const CBLAS_TRANSPOSE transB, const int M, const int N, const int K, + const double alpha, const double* A, const double* B, const double beta, + double* C, const int batchCount, const int strideA, const int strideB) { + // Note that cublas follows fortran order, so the order is different from + // the cblas convention. + int lda = (transA == CblasNoTrans) ? K : M; + int ldb = (transB == CblasNoTrans) ? N : K; + int ldc = N; + cublasOperation_t cuTransA = + (transA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; + cublasOperation_t cuTransB = + (transB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; + const int strideC = M * N; + + PADDLE_ENFORCE(platform::dynload::cublasDgemmStridedBatched( + reinterpret_cast(context) + .cublas_handle(), + cuTransB, cuTransA, N, M, K, &alpha, B, ldb, strideB, A, lda, strideA, + &beta, C, ldc, strideC, batchCount)); +} + +template struct SetConstant; + } // namespace math } // namespace operators } // namespace paddle diff --git a/paddle/operators/math/math_function.h b/paddle/operators/math/math_function.h index 473eff4d198ca9b17b6af8eebd6dfe39d49d138d..9777ebfd156709a370be2cb4ba0077ac7c6735fb 100644 --- a/paddle/operators/math/math_function.h +++ b/paddle/operators/math/math_function.h @@ -63,7 +63,7 @@ namespace math { // Support continuous memory now // If transA = N, and transB = N -// Then matrixA: M * K, matrixB: K * N matrixC : M * N +// Then matrixA: M * K, matrixB: K * N, matrixC : M * N // For more detailed info, please refer to // http://www.netlib.org/lapack/explore-html/d4/de2/sgemm_8f.html template @@ -85,12 +85,23 @@ void matmul(const platform::DeviceContext& context, const framework::Tensor& matrix_b, bool trans_b, T alpha, framework::Tensor* matrix_out, T beta); +// Batched gemm template -void SetConstant(const platform::DeviceContext& context, - framework::Tensor* tensor, T num) { - auto t = framework::EigenVector::Flatten(*tensor); - t.device(*context.GetEigenDevice()) = t.constant(static_cast(num)); -} +void batched_gemm(const platform::DeviceContext& context, + const CBLAS_TRANSPOSE transA, const CBLAS_TRANSPOSE transB, + const int M, const int N, const int K, const T alpha, + const T* A, const T* B, const T beta, T* C, + const int batchCount, const int strideA, const int strideB); + +template +struct SetConstant { + void operator()(const platform::DeviceContext& context, + framework::Tensor* tensor, T num) { + auto t = framework::EigenVector::Flatten(*tensor); + t.device(*context.GetEigenDevice()) = + t.constant(static_cast(num)); + } +}; } // namespace math } // namespace operators diff --git a/paddle/operators/math/math_function_test.cc b/paddle/operators/math/math_function_test.cc index c87d200c3aa5a9336c0f73d3a8bb88d2e9eafbab..3b9f92e7ae5f34dd0fb1ba8fb0c67ff5ae1628c4 100644 --- a/paddle/operators/math/math_function_test.cc +++ b/paddle/operators/math/math_function_test.cc @@ -1,185 +1,6 @@ #include "paddle/operators/math/math_function.h" #include "gtest/gtest.h" -#ifdef PADDLE_WITH_CUDA -TEST(math_function, notrans_mul_trans) { - paddle::framework::Tensor input1; - paddle::framework::Tensor input1_gpu; - paddle::framework::Tensor input2_gpu; - paddle::framework::Tensor out_gpu; - paddle::framework::Tensor out; - - auto* cpu_place = new paddle::platform::CPUPlace(); - float* input1_ptr = input1.mutable_data({2, 3}, *cpu_place); - float arr[6] = {0, 1, 2, 3, 4, 5}; - memcpy(input1_ptr, arr, 6 * sizeof(float)); - - auto* gpu_place = new paddle::platform::GPUPlace(0); - paddle::platform::CUDADeviceContext context(*gpu_place); - - input1_gpu.CopyFrom(input1, *gpu_place, context); - input2_gpu.CopyFrom(input1, *gpu_place, context); - - out_gpu.mutable_data({2, 2}, *gpu_place); - - paddle::operators::math::matmul( - context, input1_gpu, false, input2_gpu, true, 1, &out_gpu, 0); - - out.CopyFrom(out_gpu, *cpu_place, context); - - float* out_ptr = out.data(); - context.Wait(); - EXPECT_EQ(out_ptr[0], 5); - EXPECT_EQ(out_ptr[1], 14); - EXPECT_EQ(out_ptr[2], 14); - EXPECT_EQ(out_ptr[3], 50); - delete gpu_place; -} - -TEST(math_function, trans_mul_notrans) { - paddle::framework::Tensor input1; - paddle::framework::Tensor input1_gpu; - paddle::framework::Tensor input2_gpu; - paddle::framework::Tensor out_gpu; - paddle::framework::Tensor out; - - auto* cpu_place = new paddle::platform::CPUPlace(); - float* input1_ptr = input1.mutable_data({2, 3}, *cpu_place); - float arr[6] = {0, 1, 2, 3, 4, 5}; - memcpy(input1_ptr, arr, 6 * sizeof(float)); - - auto* gpu_place = new paddle::platform::GPUPlace(0); - paddle::platform::CUDADeviceContext context(*gpu_place); - - input1_gpu.CopyFrom(input1, *gpu_place, context); - input2_gpu.CopyFrom(input1, *gpu_place, context); - - out_gpu.mutable_data({3, 3}, *gpu_place); - - paddle::operators::math::matmul( - context, input1_gpu, true, input2_gpu, false, 1, &out_gpu, 0); - - out.CopyFrom(out_gpu, *cpu_place, context); - - float* out_ptr = out.data(); - context.Wait(); - EXPECT_EQ(out_ptr[0], 9); - EXPECT_EQ(out_ptr[1], 12); - EXPECT_EQ(out_ptr[2], 15); - EXPECT_EQ(out_ptr[3], 12); - EXPECT_EQ(out_ptr[4], 17); - EXPECT_EQ(out_ptr[5], 22); - EXPECT_EQ(out_ptr[6], 15); - EXPECT_EQ(out_ptr[7], 22); - EXPECT_EQ(out_ptr[8], 29); - delete gpu_place; -} - -TEST(math_function, gemm_notrans_cublas) { - paddle::framework::Tensor input1; - paddle::framework::Tensor input2; - paddle::framework::Tensor input3; - paddle::framework::Tensor input1_gpu; - paddle::framework::Tensor input2_gpu; - paddle::framework::Tensor input3_gpu; - - int m = 2; - int n = 3; - int k = 3; - auto* cpu_place = new paddle::platform::CPUPlace(); - float* input1_ptr = input1.mutable_data({2, 3}, *cpu_place); - float arr1[6] = {0, 1, 2, 3, 4, 5}; - memcpy(input1_ptr, arr1, 6 * sizeof(float)); - float* input2_ptr = input2.mutable_data({3, 4}, *cpu_place); - float arr2[12] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}; - memcpy(input2_ptr, arr2, 12 * sizeof(float)); - float* input3_ptr = input3.mutable_data({2, 4}, *cpu_place); - float arr3[8] = {0, 1, 2, 3, 4, 5, 6, 7}; - memcpy(input3_ptr, arr3, 8 * sizeof(float)); - - auto* gpu_place = new paddle::platform::GPUPlace(0); - paddle::platform::CUDADeviceContext context(*gpu_place); - - input1_gpu.CopyFrom(input1, *gpu_place, context); - input2_gpu.CopyFrom(input2, *gpu_place, context); - input3_gpu.CopyFrom(input3, *gpu_place, context); - float* a = input1_gpu.data(); - float* b = input2_gpu.data(); - float* c = input3_gpu.mutable_data(*gpu_place); - - paddle::operators::math::gemm( - context, false, false, m, n, k, 1, a, 3, b + 1, 4, 1, c + 1, 4); - - input3.CopyFrom(input3_gpu, *cpu_place, context); - - // numpy code: - // a = np.arange(6).reshape(2, 3) - // b = np.arange(12).reshape(3, 4)[:, 1:] - // c = np.arange(8).reshape(2, 4)[:, 1:] - // out = np.arange(8).reshape(2, 4) - // out[:, 1:] = np.dot(a, b) + c - context.Wait(); - EXPECT_EQ(input3_ptr[0], 0); - EXPECT_EQ(input3_ptr[1], 24); - EXPECT_EQ(input3_ptr[2], 28); - EXPECT_EQ(input3_ptr[3], 32); - EXPECT_EQ(input3_ptr[4], 4); - EXPECT_EQ(input3_ptr[5], 73); - EXPECT_EQ(input3_ptr[6], 86); - EXPECT_EQ(input3_ptr[7], 99); - delete gpu_place; -} - -TEST(math_function, gemm_trans_cublas) { - paddle::framework::Tensor input1; - paddle::framework::Tensor input2; - paddle::framework::Tensor input3; - paddle::framework::Tensor input1_gpu; - paddle::framework::Tensor input2_gpu; - paddle::framework::Tensor input3_gpu; - - int m = 2; - int n = 3; - int k = 3; - auto* cpu_place = new paddle::platform::CPUPlace(); - float* input1_ptr = input1.mutable_data({2, 3}, *cpu_place); - float arr1[6] = {0, 1, 2, 3, 4, 5}; - memcpy(input1_ptr, arr1, 6 * sizeof(float)); - float* input2_ptr = input2.mutable_data({4, 3}, *cpu_place); - float arr2[12] = {0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11}; - memcpy(input2_ptr, arr2, 12 * sizeof(float)); - float* input3_ptr = input3.mutable_data({2, 4}, *cpu_place); - float arr3[8] = {0, 1, 2, 3, 4, 5, 6, 7}; - memcpy(input3_ptr, arr3, 8 * sizeof(float)); - - auto* gpu_place = new paddle::platform::GPUPlace(0); - paddle::platform::CUDADeviceContext context(*gpu_place); - - input1_gpu.CopyFrom(input1, *gpu_place, context); - input2_gpu.CopyFrom(input2, *gpu_place, context); - input3_gpu.CopyFrom(input3, *gpu_place, context); - float* a = input1_gpu.data(); - float* b = input2_gpu.data(); - float* c = input3_gpu.mutable_data(*gpu_place); - - paddle::operators::math::gemm( - context, false, true, m, n, k, 1, a, 3, b + 3, 3, 1, c + 1, 4); - - input3.CopyFrom(input3_gpu, *cpu_place, context); - context.Wait(); - - EXPECT_EQ(input3_ptr[0], 0); - EXPECT_EQ(input3_ptr[1], 24); - EXPECT_EQ(input3_ptr[2], 28); - EXPECT_EQ(input3_ptr[3], 32); - EXPECT_EQ(input3_ptr[4], 4); - EXPECT_EQ(input3_ptr[5], 73); - EXPECT_EQ(input3_ptr[6], 86); - EXPECT_EQ(input3_ptr[7], 99); - delete gpu_place; -} -#endif - TEST(math_function, gemm_notrans_cblas) { paddle::framework::Tensor input1; paddle::framework::Tensor input2; @@ -253,15 +74,15 @@ TEST(math_function, zero) { auto* cpu_place = new paddle::platform::CPUPlace(); float* t = tensor.mutable_data({2, 2}, *cpu_place); paddle::platform::CPUDeviceContext context(*cpu_place); - paddle::operators::math::SetConstant( - context, &tensor, 0); + paddle::operators::math::SetConstant + functor; + functor(context, &tensor, 0); EXPECT_EQ(t[0], 0); EXPECT_EQ(t[1], 0); EXPECT_EQ(t[2], 0); EXPECT_EQ(t[3], 0); - paddle::operators::math::SetConstant( - context, &tensor, 1); + functor(context, &tensor, 1); EXPECT_EQ(t[0], 1); EXPECT_EQ(t[1], 1); diff --git a/paddle/operators/math/math_function_test.cu b/paddle/operators/math/math_function_test.cu new file mode 100644 index 0000000000000000000000000000000000000000..8b22c71552a65044cbd02441fb35c1eafe0173dc --- /dev/null +++ b/paddle/operators/math/math_function_test.cu @@ -0,0 +1,179 @@ +#include "gtest/gtest.h" +#include "paddle/operators/math/math_function.h" + +TEST(math_function, notrans_mul_trans) { + paddle::framework::Tensor input1; + paddle::framework::Tensor input1_gpu; + paddle::framework::Tensor input2_gpu; + paddle::framework::Tensor out_gpu; + paddle::framework::Tensor out; + + auto* cpu_place = new paddle::platform::CPUPlace(); + float* input1_ptr = input1.mutable_data({2, 3}, *cpu_place); + float arr[6] = {0, 1, 2, 3, 4, 5}; + memcpy(input1_ptr, arr, 6 * sizeof(float)); + + auto* gpu_place = new paddle::platform::GPUPlace(0); + paddle::platform::CUDADeviceContext context(*gpu_place); + + input1_gpu.CopyFrom(input1, *gpu_place, context); + input2_gpu.CopyFrom(input1, *gpu_place, context); + + out_gpu.mutable_data({2, 2}, *gpu_place); + + paddle::operators::math::matmul( + context, input1_gpu, false, input2_gpu, true, 1, &out_gpu, 0); + + out.CopyFrom(out_gpu, *cpu_place, context); + + float* out_ptr = out.data(); + context.Wait(); + EXPECT_EQ(out_ptr[0], 5); + EXPECT_EQ(out_ptr[1], 14); + EXPECT_EQ(out_ptr[2], 14); + EXPECT_EQ(out_ptr[3], 50); + delete gpu_place; +} + +TEST(math_function, trans_mul_notrans) { + paddle::framework::Tensor input1; + paddle::framework::Tensor input1_gpu; + paddle::framework::Tensor input2_gpu; + paddle::framework::Tensor out_gpu; + paddle::framework::Tensor out; + + auto* cpu_place = new paddle::platform::CPUPlace(); + float* input1_ptr = input1.mutable_data({2, 3}, *cpu_place); + float arr[6] = {0, 1, 2, 3, 4, 5}; + memcpy(input1_ptr, arr, 6 * sizeof(float)); + + auto* gpu_place = new paddle::platform::GPUPlace(0); + paddle::platform::CUDADeviceContext context(*gpu_place); + + input1_gpu.CopyFrom(input1, *gpu_place, context); + input2_gpu.CopyFrom(input1, *gpu_place, context); + + out_gpu.mutable_data({3, 3}, *gpu_place); + + paddle::operators::math::matmul( + context, input1_gpu, true, input2_gpu, false, 1, &out_gpu, 0); + + out.CopyFrom(out_gpu, *cpu_place, context); + + float* out_ptr = out.data(); + context.Wait(); + EXPECT_EQ(out_ptr[0], 9); + EXPECT_EQ(out_ptr[1], 12); + EXPECT_EQ(out_ptr[2], 15); + EXPECT_EQ(out_ptr[3], 12); + EXPECT_EQ(out_ptr[4], 17); + EXPECT_EQ(out_ptr[5], 22); + EXPECT_EQ(out_ptr[6], 15); + EXPECT_EQ(out_ptr[7], 22); + EXPECT_EQ(out_ptr[8], 29); + delete gpu_place; +} + +TEST(math_function, gemm_notrans_cublas) { + paddle::framework::Tensor input1; + paddle::framework::Tensor input2; + paddle::framework::Tensor input3; + paddle::framework::Tensor input1_gpu; + paddle::framework::Tensor input2_gpu; + paddle::framework::Tensor input3_gpu; + + int m = 2; + int n = 3; + int k = 3; + auto* cpu_place = new paddle::platform::CPUPlace(); + float* input1_ptr = input1.mutable_data({2, 3}, *cpu_place); + float arr1[6] = {0, 1, 2, 3, 4, 5}; + memcpy(input1_ptr, arr1, 6 * sizeof(float)); + float* input2_ptr = input2.mutable_data({3, 4}, *cpu_place); + float arr2[12] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}; + memcpy(input2_ptr, arr2, 12 * sizeof(float)); + float* input3_ptr = input3.mutable_data({2, 4}, *cpu_place); + float arr3[8] = {0, 1, 2, 3, 4, 5, 6, 7}; + memcpy(input3_ptr, arr3, 8 * sizeof(float)); + + auto* gpu_place = new paddle::platform::GPUPlace(0); + paddle::platform::CUDADeviceContext context(*gpu_place); + + input1_gpu.CopyFrom(input1, *gpu_place, context); + input2_gpu.CopyFrom(input2, *gpu_place, context); + input3_gpu.CopyFrom(input3, *gpu_place, context); + float* a = input1_gpu.data(); + float* b = input2_gpu.data(); + float* c = input3_gpu.mutable_data(*gpu_place); + + paddle::operators::math::gemm( + context, false, false, m, n, k, 1, a, 3, b + 1, 4, 1, c + 1, 4); + + input3.CopyFrom(input3_gpu, *cpu_place, context); + + // numpy code: + // a = np.arange(6).reshape(2, 3) + // b = np.arange(12).reshape(3, 4)[:, 1:] + // c = np.arange(8).reshape(2, 4)[:, 1:] + // out = np.arange(8).reshape(2, 4) + // out[:, 1:] = np.dot(a, b) + c + context.Wait(); + EXPECT_EQ(input3_ptr[0], 0); + EXPECT_EQ(input3_ptr[1], 24); + EXPECT_EQ(input3_ptr[2], 28); + EXPECT_EQ(input3_ptr[3], 32); + EXPECT_EQ(input3_ptr[4], 4); + EXPECT_EQ(input3_ptr[5], 73); + EXPECT_EQ(input3_ptr[6], 86); + EXPECT_EQ(input3_ptr[7], 99); + delete gpu_place; +} + +TEST(math_function, gemm_trans_cublas) { + paddle::framework::Tensor input1; + paddle::framework::Tensor input2; + paddle::framework::Tensor input3; + paddle::framework::Tensor input1_gpu; + paddle::framework::Tensor input2_gpu; + paddle::framework::Tensor input3_gpu; + + int m = 2; + int n = 3; + int k = 3; + auto* cpu_place = new paddle::platform::CPUPlace(); + float* input1_ptr = input1.mutable_data({2, 3}, *cpu_place); + float arr1[6] = {0, 1, 2, 3, 4, 5}; + memcpy(input1_ptr, arr1, 6 * sizeof(float)); + float* input2_ptr = input2.mutable_data({4, 3}, *cpu_place); + float arr2[12] = {0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11}; + memcpy(input2_ptr, arr2, 12 * sizeof(float)); + float* input3_ptr = input3.mutable_data({2, 4}, *cpu_place); + float arr3[8] = {0, 1, 2, 3, 4, 5, 6, 7}; + memcpy(input3_ptr, arr3, 8 * sizeof(float)); + + auto* gpu_place = new paddle::platform::GPUPlace(0); + paddle::platform::CUDADeviceContext context(*gpu_place); + + input1_gpu.CopyFrom(input1, *gpu_place, context); + input2_gpu.CopyFrom(input2, *gpu_place, context); + input3_gpu.CopyFrom(input3, *gpu_place, context); + float* a = input1_gpu.data(); + float* b = input2_gpu.data(); + float* c = input3_gpu.mutable_data(*gpu_place); + + paddle::operators::math::gemm( + context, false, true, m, n, k, 1, a, 3, b + 3, 3, 1, c + 1, 4); + + input3.CopyFrom(input3_gpu, *cpu_place, context); + context.Wait(); + + EXPECT_EQ(input3_ptr[0], 0); + EXPECT_EQ(input3_ptr[1], 24); + EXPECT_EQ(input3_ptr[2], 28); + EXPECT_EQ(input3_ptr[3], 32); + EXPECT_EQ(input3_ptr[4], 4); + EXPECT_EQ(input3_ptr[5], 73); + EXPECT_EQ(input3_ptr[6], 86); + EXPECT_EQ(input3_ptr[7], 99); + delete gpu_place; +} diff --git a/paddle/operators/math/matmul.h b/paddle/operators/math/matmul.h new file mode 100644 index 0000000000000000000000000000000000000000..6ba9a0ba9a70bd938f9362179990ab68fa3186ba --- /dev/null +++ b/paddle/operators/math/matmul.h @@ -0,0 +1,124 @@ +/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once +#include "paddle/operators/math/math_function.h" + +namespace paddle { +namespace operators { +namespace math { + +// Implements the logic of numpy matmul: +// https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.matmul.html +// +// but allowing also for a, b to be transposed +// +// Both a & b can be 1- to 3-dimensional. Higher rank tensors are not supported +// yet. +template +class MatMulFunctor { + public: + void operator()(const platform::DeviceContext& context, + const framework::Tensor& a, bool trans_a, + const framework::Tensor& b, bool trans_b, T alpha, + framework::Tensor* out, T beta) { + auto dim_a = a.dims(); + auto dim_b = b.dims(); + + PADDLE_ENFORCE(a.place() == b.place() && b.place() == out->place(), + "Tensors must all be in the same place."); + PADDLE_ENFORCE_GE(dim_a.size(), 1, + "Input tensor a must be at least 1-dimensional."); + PADDLE_ENFORCE_GE(dim_b.size(), 1, + "Input tensor b must be at least 1-dimensional."); + PADDLE_ENFORCE_LE(dim_a.size(), 3, + "Input tensor a must be at most 3-dimensional."); + PADDLE_ENFORCE_LE(dim_b.size(), 3, + "Input tensor b must be at most 3-dimensional."); + + int M = 0, N = 0, kA = 0, kB = 0, batchCountA = 0, batchCountB = 0, + strideA = 0, strideB = 0; + + switch (dim_a.size()) { + case 1: + // similar to np.matmul: + // prepend dimension 1 (no transpose) or append dimension 1 (transpose) + M = trans_a ? dim_a[0] : 1; + kA = trans_a ? 1 : dim_a[0]; + break; + case 2: + M = trans_a ? dim_a[1] : dim_a[0]; + kA = trans_a ? dim_a[0] : dim_a[1]; + break; + case 3: + batchCountA = dim_a[0]; + M = trans_a ? dim_a[2] : dim_a[1]; + kA = trans_a ? dim_a[1] : dim_a[2]; + strideA = M * kA; + break; + default: + assert(false); + } + + switch (dim_b.size()) { + case 1: + // similar to np.matmul: + // append dimension 1 (no transpose) or prepend dimension 1 (transpose) + kB = trans_b ? 1 : dim_b[0]; + N = trans_b ? dim_b[0] : 1; + break; + case 2: + kB = trans_b ? dim_b[1] : dim_b[0]; + N = trans_b ? dim_b[0] : dim_b[1]; + break; + case 3: + batchCountB = dim_b[0]; + kB = trans_b ? dim_b[2] : dim_b[1]; + N = trans_b ? dim_b[1] : dim_b[2]; + strideB = kB * N; + break; + default: + assert(false); + } + + PADDLE_ENFORCE_EQ( + kA, kB, + "First matrix's width must be equal with second matrix's height."); + if (batchCountA && batchCountB) { + PADDLE_ENFORCE_EQ( + batchCountA, batchCountB, + "When input tensors a and b are both batched, they must have the " + "same batch dimension."); + } + int batchCount = std::max(batchCountA, batchCountB); + + CBLAS_TRANSPOSE transA = (trans_a == false) ? CblasNoTrans : CblasTrans; + CBLAS_TRANSPOSE transB = (trans_b == false) ? CblasNoTrans : CblasTrans; + + if (!batchCount) { + // regular matrix multiplication + gemm(context, transA, transB, M, N, kA, alpha, a.data(), + b.data(), beta, out->data()); + } else { + // batched matrix multiplication + batched_gemm(context, transA, transB, M, N, kA, alpha, + a.data(), b.data(), beta, out->data(), + batchCount, strideA, strideB); + } + } +}; + +} // namespace math +} // namespace operators +} // namespace paddle diff --git a/paddle/operators/math/selected_rows_functor.cc b/paddle/operators/math/selected_rows_functor.cc new file mode 100644 index 0000000000000000000000000000000000000000..f2305ea16913e927dca17e5a80201368f03ca253 --- /dev/null +++ b/paddle/operators/math/selected_rows_functor.cc @@ -0,0 +1,114 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/operators/math/selected_rows_functor.h" +#include "paddle/operators/math/math_function.h" + +namespace paddle { +namespace operators { +namespace math { +template +struct SelectedRowsAdd { + void operator()(const platform::DeviceContext& context, + const framework::SelectedRows& input1, + const framework::SelectedRows& input2, + framework::SelectedRows* output) { + auto in1_height = input1.height(); + PADDLE_ENFORCE_EQ(in1_height, input2.height()); + output->set_height(in1_height); + + auto& in1_rows = input1.rows(); + auto& in2_rows = input2.rows(); + std::vector out_rows; + out_rows.reserve(in1_rows.size() + in2_rows.size()); + + // concat rows + out_rows.insert(out_rows.end(), in1_rows.begin(), in1_rows.end()); + out_rows.insert(out_rows.end(), in2_rows.begin(), in2_rows.end()); + output->set_rows(out_rows); + + auto* out_value = output->mutable_value(); + auto& in1_value = input1.value(); + auto& in2_value = input2.value(); + + auto in1_row_numel = in1_value.numel() / in1_rows.size(); + PADDLE_ENFORCE_EQ(in1_row_numel, in2_value.numel() / in2_rows.size()); + PADDLE_ENFORCE_EQ(in1_row_numel, out_value->numel() / out_rows.size()); + + auto in1_place = input1.place(); + PADDLE_ENFORCE(platform::is_cpu_place(in1_place)); + auto in2_place = input2.place(); + PADDLE_ENFORCE(platform::is_cpu_place(in2_place)); + auto out_place = context.GetPlace(); + PADDLE_ENFORCE(platform::is_cpu_place(out_place)); + + auto* out_data = out_value->data(); + auto* in1_data = in1_value.data(); + memory::Copy(boost::get(out_place), out_data, + boost::get(in1_place), in1_data, + in1_value.numel() * sizeof(T)); + + auto* in2_data = in2_value.data(); + memory::Copy(boost::get(out_place), + out_data + in1_value.numel(), + boost::get(in2_place), in2_data, + in2_value.numel() * sizeof(T)); + } +}; + +template struct SelectedRowsAdd; + +template +struct SelectedRowsAddTensor { + void operator()(const platform::DeviceContext& context, + const framework::SelectedRows& input1, + const framework::Tensor& input2, framework::Tensor* output) { + auto in1_height = input1.height(); + auto in2_dims = input2.dims(); + auto out_dims = output->dims(); + PADDLE_ENFORCE_EQ(in1_height, in2_dims[0]); + PADDLE_ENFORCE_EQ(in1_height, out_dims[0]); + + auto& in1_value = input1.value(); + auto& in1_rows = input1.rows(); + + int64_t in1_row_numel = in1_value.numel() / in1_rows.size(); + PADDLE_ENFORCE_EQ(in1_row_numel, input2.numel() / in1_height); + PADDLE_ENFORCE_EQ(in1_row_numel, output->numel() / in1_height); + + SetConstant functor; + functor(context, output, 0.0); + + auto* in1_data = in1_value.data(); + auto* out_data = output->data(); + + for (size_t i = 0; i < in1_rows.size(); i++) { + for (int64_t j = 0; j < in1_row_numel; j++) { + out_data[in1_rows[i] * in1_row_numel + j] += + in1_data[i * in1_row_numel + j]; + } + } + + auto out_eigen = framework::EigenVector::Flatten(*output); + auto in2_eigen = framework::EigenVector::Flatten(input2); + out_eigen.device(*context.GetEigenDevice()) = + out_eigen + in2_eigen; + } +}; + +template struct SelectedRowsAddTensor; + +} // namespace math +} // namespace operators +} // namespace paddle diff --git a/paddle/operators/math/selected_rows_functor.cu b/paddle/operators/math/selected_rows_functor.cu new file mode 100644 index 0000000000000000000000000000000000000000..ea149ebbc12beeab43a2047372352ba769959307 --- /dev/null +++ b/paddle/operators/math/selected_rows_functor.cu @@ -0,0 +1,142 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/operators/math/math_function.h" +#include "paddle/operators/math/selected_rows_functor.h" +#include "paddle/platform/cuda_helper.h" + +namespace paddle { +namespace operators { +namespace math { +template +struct SelectedRowsAdd { + void operator()(const platform::DeviceContext& context, + const framework::SelectedRows& input1, + const framework::SelectedRows& input2, + framework::SelectedRows* output) { + auto in1_height = input1.height(); + PADDLE_ENFORCE_EQ(in1_height, input2.height()); + output->set_height(in1_height); + + auto& in1_rows = input1.rows(); + auto& in2_rows = input2.rows(); + std::vector out_rows; + out_rows.reserve(in1_rows.size() + in2_rows.size()); + + // concat rows + out_rows.insert(out_rows.end(), in1_rows.begin(), in1_rows.end()); + out_rows.insert(out_rows.end(), in2_rows.begin(), in2_rows.end()); + output->set_rows(out_rows); + + auto* out_value = output->mutable_value(); + auto& in1_value = input1.value(); + auto& in2_value = input2.value(); + + auto in1_row_numel = in1_value.numel() / in1_rows.size(); + PADDLE_ENFORCE_EQ(in1_row_numel, in2_value.numel() / in2_rows.size()); + PADDLE_ENFORCE_EQ(in1_row_numel, out_value->numel() / out_rows.size()); + + auto* out_data = out_value->data(); + auto* in1_data = in1_value.data(); + + auto in1_place = input1.place(); + PADDLE_ENFORCE(platform::is_gpu_place(in1_place)); + auto in2_place = input2.place(); + PADDLE_ENFORCE(platform::is_gpu_place(in2_place)); + auto out_place = context.GetPlace(); + PADDLE_ENFORCE(platform::is_gpu_place(out_place)); + + memory::Copy( + boost::get(out_place), out_data, + boost::get(in1_place), in1_data, + in1_value.numel() * sizeof(T), + reinterpret_cast(context).stream()); + + auto* in2_data = in2_value.data(); + memory::Copy( + boost::get(out_place), out_data + in1_value.numel(), + boost::get(in2_place), in2_data, + in2_value.numel() * sizeof(T), + reinterpret_cast(context).stream()); + } +}; + +template struct SelectedRowsAdd; + +namespace { +template +__global__ void SelectedRowsAddTensorKernel(const T* selected_rows, + const int64_t* rows, T* tensor_out, + int64_t row_numel, int block_size) { + const int ty = blockIdx.y; + int tid = threadIdx.x; + + selected_rows += ty * row_numel; + tensor_out += rows[ty] * row_numel; + + for (int index = tid; index < row_numel; index += block_size) { + // Since index in rows of SelectedRows can be duplicate, we can not use + // tensor_out[index] += selected_rows[index]; Instead, we have to use + // AtomicAdd to avoid concurrent write error. + paddle::platform::CudaAtomicAdd(tensor_out + index, selected_rows[index]); + } +} +} // namespace + +template +struct SelectedRowsAddTensor { + void operator()(const platform::DeviceContext& context, + const framework::SelectedRows& input1, + const framework::Tensor& input2, framework::Tensor* output) { + auto in1_height = input1.height(); + auto in2_dims = input2.dims(); + auto out_dims = output->dims(); + PADDLE_ENFORCE_EQ(in1_height, in2_dims[0]); + PADDLE_ENFORCE_EQ(in1_height, out_dims[0]); + + auto& in1_value = input1.value(); + auto& in1_rows = input1.rows(); + + int64_t in1_row_numel = in1_value.numel() / in1_rows.size(); + PADDLE_ENFORCE_EQ(in1_row_numel, input2.numel() / in1_height); + PADDLE_ENFORCE_EQ(in1_row_numel, output->numel() / in1_height); + + auto* in1_data = in1_value.data(); + auto* in2_data = input2.data(); + auto* out_data = output->data(); + + SetConstant functor; + functor(context, output, 0.0); + + int block_size = 256; + dim3 threads(block_size, 1); + dim3 grid(1, in1_rows.size()); + SelectedRowsAddTensorKernel< + T><<(context) + .stream()>>>(in1_data, in1_rows.data(), out_data, + in1_row_numel, block_size); + + auto out_eigen = framework::EigenVector::Flatten(*output); + auto in2_eigen = framework::EigenVector::Flatten(input2); + out_eigen.device(*context.GetEigenDevice()) = + out_eigen + in2_eigen; + } +}; + +template struct SelectedRowsAddTensor; + +} // namespace math +} // namespace operators +} // namespace paddle diff --git a/paddle/operators/math/selected_rows_functor.h b/paddle/operators/math/selected_rows_functor.h new file mode 100644 index 0000000000000000000000000000000000000000..53ab240ca600cd4a817afa2c19fb8d9427c6f3da --- /dev/null +++ b/paddle/operators/math/selected_rows_functor.h @@ -0,0 +1,41 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ +#pragma once +#include "paddle/framework/selected_rows.h" +#include "paddle/platform/device_context.h" + +namespace paddle { +namespace operators { +namespace math { + +// SelectedRows + SelectedRows will simplely concat value and rows. +// The real computation happens in dealing with LoDTensor. +template +struct SelectedRowsAdd { + void operator()(const platform::DeviceContext& context, + const framework::SelectedRows& input1, + const framework::SelectedRows& input2, + framework::SelectedRows* output); +}; + +template +struct SelectedRowsAddTensor { + void operator()(const platform::DeviceContext& context, + const framework::SelectedRows& input1, + const framework::Tensor& input2, framework::Tensor* output); +}; + +} // namespace math +} // namespace operators +} // namespace paddle diff --git a/paddle/operators/math/selected_rows_functor_test.cc b/paddle/operators/math/selected_rows_functor_test.cc new file mode 100644 index 0000000000000000000000000000000000000000..4f7760cb713b6bf58c82f38fb043d7d53d82710a --- /dev/null +++ b/paddle/operators/math/selected_rows_functor_test.cc @@ -0,0 +1,106 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/operators/math/selected_rows_functor.h" +#include "gtest/gtest.h" +#include "paddle/operators/math/math_function.h" + +TEST(selected_rows_functor, cpu_add) { + using namespace paddle::framework; + using namespace paddle::platform; + using namespace paddle::operators::math; + + CPUPlace cpu_place; + CPUDeviceContext ctx(cpu_place); + SetConstant functor; + int64_t height = 10; + int64_t row_numel = 10; + + std::vector rows1{0, 4, 7}; + std::unique_ptr selected_rows1{new SelectedRows(rows1, height)}; + auto* in1_value = selected_rows1->mutable_value(); + in1_value->mutable_data( + make_ddim({static_cast(rows1.size()), row_numel}), cpu_place); + functor(ctx, in1_value, 1.0); + + std::vector rows2{0, 5, 7, 9}; + std::unique_ptr selected_rows2{new SelectedRows(rows2, height)}; + auto* in2_value = selected_rows2->mutable_value(); + in2_value->mutable_data( + make_ddim({static_cast(rows2.size()), row_numel}), cpu_place); + functor(ctx, in2_value, 2.0); + + std::unique_ptr output{new SelectedRows()}; + auto* out_value = output->mutable_value(); + + // simplely concat two SelectedRows + out_value->mutable_data(make_ddim({7, 10}), cpu_place); + + SelectedRowsAdd add_functor; + add_functor(ctx, *selected_rows1, *selected_rows2, output.get()); + + auto out_height = output->height(); + EXPECT_EQ(out_height, height); + + auto& out_rows = output->rows(); + + // input1 rows + EXPECT_EQ(out_rows[0], 0); + EXPECT_EQ(out_rows[1], 4); + EXPECT_EQ(out_rows[2], 7); + // input2 rows + EXPECT_EQ(out_rows[3], 0); + EXPECT_EQ(out_rows[4], 5); + EXPECT_EQ(out_rows[5], 7); + EXPECT_EQ(out_rows[6], 9); + + auto* out_data = output->value().data(); + // input1 value + EXPECT_EQ(out_data[0 * row_numel + 0], 1.0); + EXPECT_EQ(out_data[0 * row_numel + 8], 1.0); + EXPECT_EQ(out_data[1 * row_numel + 1], 1.0); + EXPECT_EQ(out_data[2 * row_numel + 6], 1.0); + // input2 value + EXPECT_EQ(out_data[3 * row_numel + 3], 2.0); + EXPECT_EQ(out_data[3 * row_numel + 8], 2.0); + EXPECT_EQ(out_data[4 * row_numel + 4], 2.0); + EXPECT_EQ(out_data[5 * row_numel + 7], 2.0); + EXPECT_EQ(out_data[6 * row_numel + 9], 2.0); + + std::unique_ptr tensor1{new Tensor()}; + tensor1->mutable_data(make_ddim({height, row_numel}), cpu_place); + functor(ctx, tensor1.get(), 3.0); + + std::unique_ptr tensor2{new Tensor()}; + tensor2->mutable_data(make_ddim({height, row_numel}), cpu_place); + + SelectedRowsAddTensor add_tensor_functor; + add_tensor_functor(ctx, *output, *tensor1, tensor2.get()); + + auto* tensor2_data = tensor2->data(); + // row0: 1.0 + 2.0 + 3.0 + EXPECT_EQ(tensor2_data[0 * row_numel + 0], 6.0); + // row1: 3.0 + EXPECT_EQ(tensor2_data[1 * row_numel + 1], 3.0); + // row4 : 1.0 + 3.0 + EXPECT_EQ(tensor2_data[4 * row_numel + 6], 4.0); + // row5: 2.0 + 3.0 + EXPECT_EQ(tensor2_data[5 * row_numel + 7], 5.0); + // row6: 3.0 + EXPECT_EQ(tensor2_data[6 * row_numel + 1], 3.0); + // row7: 1.0 + 2.0 + 3.0 + EXPECT_EQ(tensor2_data[7 * row_numel + 3], 6.0); + // row9: 2.0 + 3.0 + EXPECT_EQ(tensor2_data[9 * row_numel + 6], 5.0); +} diff --git a/paddle/operators/math/selected_rows_functor_test.cu b/paddle/operators/math/selected_rows_functor_test.cu new file mode 100644 index 0000000000000000000000000000000000000000..69607c5afc46921c08ce278bf164e5bed7b446f8 --- /dev/null +++ b/paddle/operators/math/selected_rows_functor_test.cu @@ -0,0 +1,115 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "gtest/gtest.h" +#include "paddle/operators/math/math_function.h" +#include "paddle/operators/math/selected_rows_functor.h" + +TEST(selected_rows_functor, gpu_add) { + using namespace paddle::framework; + using namespace paddle::platform; + using namespace paddle::operators::math; + + GPUPlace gpu_place(0); + CPUPlace cpu_place; + CUDADeviceContext ctx(gpu_place); + SetConstant functor; + int64_t height = 10; + int64_t row_numel = 10; + + std::vector rows1{0, 4, 7}; + std::unique_ptr selected_rows1{new SelectedRows(rows1, height)}; + auto* in1_value = selected_rows1->mutable_value(); + in1_value->mutable_data( + make_ddim({static_cast(rows1.size()), row_numel}), gpu_place); + functor(ctx, in1_value, 1.0); + + std::vector rows2{0, 5, 7, 9}; + std::unique_ptr selected_rows2{new SelectedRows(rows2, height)}; + auto* in2_value = selected_rows2->mutable_value(); + in2_value->mutable_data( + make_ddim({static_cast(rows2.size()), row_numel}), gpu_place); + functor(ctx, in2_value, 2.0); + + std::unique_ptr output{new SelectedRows()}; + auto* out_value = output->mutable_value(); + + // simplely concat two SelectedRows + out_value->mutable_data(make_ddim({7, 10}), gpu_place); + + SelectedRowsAdd add_functor; + add_functor(ctx, *selected_rows1, *selected_rows2, output.get()); + + auto out_height = output->height(); + EXPECT_EQ(out_height, height); + + auto& out_rows = output->rows(); + + // input1 rows + EXPECT_EQ(out_rows[0], 0); + EXPECT_EQ(out_rows[1], 4); + EXPECT_EQ(out_rows[2], 7); + // input2 rows + EXPECT_EQ(out_rows[3], 0); + EXPECT_EQ(out_rows[4], 5); + EXPECT_EQ(out_rows[5], 7); + EXPECT_EQ(out_rows[6], 9); + + Tensor out_cpu; + out_cpu.CopyFrom(*out_value, cpu_place, ctx); + ctx.Wait(); + + auto* out_cpu_data = out_cpu.data(); + // input1 value + EXPECT_EQ(out_cpu_data[0 * row_numel + 0], 1.0); + EXPECT_EQ(out_cpu_data[0 * row_numel + 8], 1.0); + EXPECT_EQ(out_cpu_data[1 * row_numel + 1], 1.0); + EXPECT_EQ(out_cpu_data[2 * row_numel + 6], 1.0); + // input2 value + EXPECT_EQ(out_cpu_data[3 * row_numel + 3], 2.0); + EXPECT_EQ(out_cpu_data[3 * row_numel + 8], 2.0); + EXPECT_EQ(out_cpu_data[4 * row_numel + 4], 2.0); + EXPECT_EQ(out_cpu_data[5 * row_numel + 7], 2.0); + EXPECT_EQ(out_cpu_data[6 * row_numel + 9], 2.0); + + std::unique_ptr tensor1{new Tensor()}; + tensor1->mutable_data(make_ddim({height, row_numel}), gpu_place); + functor(ctx, tensor1.get(), 3.0); + + std::unique_ptr tensor2{new Tensor()}; + tensor2->mutable_data(make_ddim({height, row_numel}), gpu_place); + + SelectedRowsAddTensor add_tensor_functor; + add_tensor_functor(ctx, *output, *tensor1, tensor2.get()); + + Tensor tensor2_cpu; + tensor2_cpu.CopyFrom(*tensor2, cpu_place, ctx); + ctx.Wait(); + + auto* tensor2_cpu_data = tensor2_cpu.data(); + // row0: 1.0 + 2.0 + 3.0 + EXPECT_EQ(tensor2_cpu_data[0 * row_numel + 0], 6.0); + // row1: 3.0 + EXPECT_EQ(tensor2_cpu_data[1 * row_numel + 1], 3.0); + // row4 : 1.0 + 3.0 + EXPECT_EQ(tensor2_cpu_data[4 * row_numel + 6], 4.0); + // row5: 2.0 + 3.0 + EXPECT_EQ(tensor2_cpu_data[5 * row_numel + 7], 5.0); + // row6: 3.0 + EXPECT_EQ(tensor2_cpu_data[6 * row_numel + 1], 3.0); + // row7: 1.0 + 2.0 + 3.0 + EXPECT_EQ(tensor2_cpu_data[7 * row_numel + 3], 6.0); + // row9: 2.0 + 3.0 + EXPECT_EQ(tensor2_cpu_data[9 * row_numel + 6], 5.0); +} diff --git a/paddle/operators/math/sequence2batch.cc b/paddle/operators/math/sequence2batch.cc new file mode 100644 index 0000000000000000000000000000000000000000..10c6e105b950b9d510e7a14828d72531e8eb0028 --- /dev/null +++ b/paddle/operators/math/sequence2batch.cc @@ -0,0 +1,61 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/operators/math/sequence2batch.h" + +namespace paddle { +namespace operators { +namespace math { + +template +class CopyMatrixRowsFunctor { + public: + void operator()(const platform::DeviceContext& context, + const framework::LoDTensor& src, const size_t* index, + framework::LoDTensor& dst, bool is_src_index) { + auto src_dims = src.dims(); + auto dst_dims = dst.dims(); + PADDLE_ENFORCE_EQ(src_dims.size(), 2UL, + "The src must be matrix with rank 2."); + PADDLE_ENFORCE_EQ(dst_dims.size(), 2UL, + "The dst must be matrix with rank 2."); + PADDLE_ENFORCE_EQ(src_dims[1], dst_dims[1], + "The width of src and dst must be same."); + auto height = dst_dims[0]; + auto width = dst_dims[1]; + auto* src_data = src.data(); + auto* dst_data = dst.data(); + for (int i = 0; i < height; ++i) { + if (is_src_index) { + memcpy(dst_data + i * width, src_data + index[i] * width, + width * sizeof(T)); + } else { + memcpy(dst_data + index[i] * width, src_data + i * width, + width * sizeof(T)); + } + } + } +}; + +template class CopyMatrixRowsFunctor; +template class CopyMatrixRowsFunctor; + +template class LoDTensor2BatchFunctor; +template class LoDTensor2BatchFunctor; +template class Batch2LoDTensorFunctor; +template class Batch2LoDTensorFunctor; + +} // namespace math +} // namespace operators +} // namespace paddle diff --git a/paddle/operators/math/sequence2batch.cu b/paddle/operators/math/sequence2batch.cu new file mode 100644 index 0000000000000000000000000000000000000000..4f349946785171e6c59b22163ba76791c7244f88 --- /dev/null +++ b/paddle/operators/math/sequence2batch.cu @@ -0,0 +1,78 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/operators/math/sequence2batch.h" + +namespace paddle { +namespace operators { +namespace math { + +template +__global__ void CopyMatrixRowsKernel(const T* src, T* dst, const size_t* index, + int64_t height, int64_t width, + bool is_src_index) { + int idx = threadIdx.x; + int idy = threadIdx.y; + int id = blockIdx.x + idy * GridDimX; + while (id < height) { + int src_idx = is_src_index ? index[id] : id; + int dst_idx = is_src_index ? id : index[id]; + const T* src_data = src + src_idx * width; + T* dst_data = dst + dst_idx * width; + for (int i = idx; i < width; i += BlockDimX) { + dst_data[i] = src_data[i]; + } + id += BlockDimY * GridDimX; + } +} + +template +class CopyMatrixRowsFunctor { + public: + void operator()(const platform::DeviceContext& context, + const framework::LoDTensor& src, const size_t* index, + framework::LoDTensor& dst, bool is_src_index) { + auto src_dims = src.dims(); + auto dst_dims = dst.dims(); + PADDLE_ENFORCE_EQ(src_dims.size(), 2, + "The src must be matrix with rank 2."); + PADDLE_ENFORCE_EQ(dst_dims.size(), 2, + "The dst must be matrix with rank 2."); + PADDLE_ENFORCE_EQ(src_dims[1], dst_dims[1], + "The width of src and dst must be same."); + auto height = dst_dims[0]; + auto width = dst_dims[1]; + auto* src_data = src.data(); + auto* dst_data = dst.data(); + + dim3 threads(128, 8); + dim3 grid(8, 1); + auto stream = + reinterpret_cast(context).stream(); + CopyMatrixRowsKernel<<>>( + src_data, dst_data, index, height, width, is_src_index); + } +}; + +template class CopyMatrixRowsFunctor; +template class CopyMatrixRowsFunctor; + +template class LoDTensor2BatchFunctor; +template class LoDTensor2BatchFunctor; +template class Batch2LoDTensorFunctor; +template class Batch2LoDTensorFunctor; + +} // namespace math +} // namespace operators +} // namespace paddle diff --git a/paddle/operators/math/sequence2batch.h b/paddle/operators/math/sequence2batch.h new file mode 100644 index 0000000000000000000000000000000000000000..03cd018e46e90c9bbe689c9686377e0e998ee513 --- /dev/null +++ b/paddle/operators/math/sequence2batch.h @@ -0,0 +1,148 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once +#include "paddle/framework/lod_tensor.h" +#include "paddle/framework/tensor.h" +#include "paddle/platform/device_context.h" + +namespace paddle { +namespace operators { +namespace math { + +template +class CopyMatrixRowsFunctor { + public: + // If is_src_index is true, + // copy the indexed rows of input src to the output dst. + // If is_src_index is false, + // copy the input src to the indexed rows of output dst. + // The indexed rows are based on the input index. + void operator()(const platform::DeviceContext& context, + const framework::LoDTensor& src, const size_t* index, + framework::LoDTensor& dst, bool is_src_index); +}; + +template +class LoDTensor2BatchFunctor { + // Calculate the length of each sequence and + // sort sequence index by the length. + // example: sequences = {s0, s1, s2} + // s0: 0 0 0 0, s1: 1 1 1 1 1, s2: 2 2 2 + // seq_info[3] = {(4, 5, 1), (0, 4, 0), (9, 3, 2)} + // + struct SeqInfo { + SeqInfo(int start, int length, int seq_idx) + : start(start), length(length), seq_idx(seq_idx) {} + int start; + int length; + int seq_idx; + }; + + public: + void operator()(const platform::DeviceContext& context, + const framework::LoDTensor& lod_tensor, + framework::LoDTensor& batch, bool is_reverse) const { + auto lods = lod_tensor.lod(); + PADDLE_ENFORCE_EQ(lods.size(), 1UL, "Only support one level sequence now."); + auto lod = lods[0]; + + std::vector seq_info; + for (size_t seq_id = 0; seq_id < lod.size() - 1; ++seq_id) { + int length = lod[seq_id + 1] - lod[seq_id]; + seq_info.emplace_back(lod[seq_id], length, seq_id); + } + + std::sort(seq_info.begin(), seq_info.end(), + [](SeqInfo a, SeqInfo b) { return a.length > b.length; }); + + // calculate the start position of each batch + // (numBatch equal the maxLength of sequences) + // example: sequences = {s0, s1, s2} + // s0: 0 0 0 0, s1: 1 1 1 1 1, s2: 2 2 2 + // num_batch = 5, + // batchIndex = {b0, b1, b2, b3, b4} + // b0: 1 0 2, b1: 1 0 2, b2: 1 0 2, b3: 1 0, b4: 1 + // batch_start_positions[6] = {0, 3, 6, 9, 11, 12} + // batch_start_positions[0] = len(b0) + // batch_start_positions[1] = len(b0) + len(b1) + // batch_start_positions[2] = len(b0) + len(b1) + len(b2) + // ... + // seq2batch_idx[12] = {4, 0, 9, + // 5, 1, 10, + // 6, 2, 11, + // 7, 3, + // 8} + // The batch number represents batch size after rearranging the + // input LodTensor. It is also the maximum length of input sequence. + + paddle::framework::LoD batch_lods; + batch_lods.emplace_back(std::vector{0}); + batch_lods.emplace_back(std::vector{0}); + + // batch_lods[0] is the start positions for batch LoDTensor + int num_batch = seq_info[0].length; + batch_lods[0].resize(static_cast(num_batch + 1)); + // batch_lods[1] is the raw index in the input LoDTensor + auto dims = lod_tensor.dims(); + batch_lods[1].resize(static_cast(dims[0])); + + size_t* batch_starts = batch_lods[0].data(); + size_t* seq2batch_idx = batch_lods[1].data(); + batch_starts[0] = 0; + for (size_t n = 0; n < num_batch; n++) { + auto batch_id = static_cast(batch_starts[n]); + for (size_t i = 0; i < seq_info.size(); ++i) { + size_t seq_len = seq_info[i].length; + int start = seq_info[i].start; + if (n < seq_len) { + seq2batch_idx[batch_id] = + is_reverse ? start + seq_len - 1 - n : start + n; + batch_id++; + } else { + break; + } + } + batch_starts[n + 1] = static_cast(batch_id); + } + batch.set_lod(batch_lods); + + CopyMatrixRowsFunctor to_batch; + to_batch(context, lod_tensor, seq2batch_idx, batch, true); + } +}; + +template +class Batch2LoDTensorFunctor { + public: + void operator()(const platform::DeviceContext& context, + const framework::LoDTensor& batch, + framework::LoDTensor& lod_tensor) const { + auto in_lod = batch.lod(); + PADDLE_ENFORCE_EQ(in_lod.size(), 2UL, + "The LoD size of input `batch` should be 2."); + auto out_lod = lod_tensor.lod()[0]; + auto num = out_lod[out_lod.size() - 1]; + PADDLE_ENFORCE_EQ(num, lod_tensor.dims()[0]); + PADDLE_ENFORCE_EQ(num, in_lod[1].size()); + PADDLE_ENFORCE_EQ(num, batch.dims()[0]); + CopyMatrixRowsFunctor to_seq; + size_t* index = in_lod[1].data(); + to_seq(context, batch, index, lod_tensor, false); + } +}; + +} // namespace math +} // namespace operators +} // namespace paddle diff --git a/paddle/operators/math/vol2col_test.cc b/paddle/operators/math/vol2col_test.cc index 2d69218843a69497b5b501d4297f2ec5ab26a844..74590d17cd0f974f830e760d85daef8ab5318a43 100644 --- a/paddle/operators/math/vol2col_test.cc +++ b/paddle/operators/math/vol2col_test.cc @@ -78,7 +78,7 @@ void testVol2col() { if (paddle::platform::is_cpu_place(*place)) { input = input_tmp; } else { - input.CopyFrom(input_tmp, *place, *context); + input.CopyFrom(input_tmp, *place, *context); } output.mutable_data({1, filter_size, filter_size, filter_size, output_depth, output_height, output_width}, @@ -93,7 +93,7 @@ void testVol2col() { if (paddle::platform::is_cpu_place(*place)) { out_cfo_ptr = output.data(); } else { - output_tmp.CopyFrom(output, paddle::platform::CPUPlace(), *context); + output_tmp.CopyFrom(output, paddle::platform::CPUPlace(), *context); out_cfo_ptr = output_tmp.data(); } @@ -107,7 +107,7 @@ void testVol2col() { if (paddle::platform::is_cpu_place(*place)) { input = input_tmp; } else { - input.CopyFrom(input_tmp, *place, *context); + input.CopyFrom(input_tmp, *place, *context); } paddle::operators::math::Col2VolFunctor col2vol; @@ -118,7 +118,7 @@ void testVol2col() { if (paddle::platform::is_cpu_place(*place)) { in_ptr = input.data(); } else { - input_tmp.CopyFrom(input, paddle::platform::CPUPlace(), *context); + input_tmp.CopyFrom(input, paddle::platform::CPUPlace(), *context); in_ptr = input_tmp.data(); } diff --git a/paddle/operators/matmul_op.cc b/paddle/operators/matmul_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..5ecbee3b413617e3a5523d9a32e72bc08bd316c5 --- /dev/null +++ b/paddle/operators/matmul_op.cc @@ -0,0 +1,208 @@ +/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/operators/matmul_op.h" + +namespace paddle { +namespace operators { + +using framework::Tensor; + +class MatMulOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + protected: + void InferShape(framework::InferShapeContext* context) const override { + PADDLE_ENFORCE(context->HasInput("X"), + "Input(X) of MatMulOp should not be null."); + PADDLE_ENFORCE(context->HasInput("Y"), + "Input(Y) of MatMulOp should not be null."); + PADDLE_ENFORCE(context->HasOutput("Out"), + "Output(Out) of MatMulOp should not be null."); + + auto dim_x = context->GetInputDim("X"); + auto dim_y = context->GetInputDim("Y"); + bool transpose_x = context->Attrs().Get("transpose_X"); + bool transpose_y = context->Attrs().Get("transpose_Y"); + + PADDLE_ENFORCE_GE(dim_x.size(), 1, + "Input tensor X must be at least 1-dimensional."); + PADDLE_ENFORCE_GE(dim_y.size(), 1, + "Input tensor Y must be at least 1-dimensional."); + PADDLE_ENFORCE_LE(dim_x.size(), 3, + "Input tensor X must be at most 3-dimensional."); + PADDLE_ENFORCE_LE(dim_y.size(), 3, + "Input tensor Y must be at most 3-dimensional."); + + int M = 0, N = 0, KX = 0, KY = 0, batchCountX = 0, batchCountY = 0; + bool remove_initial_dim = false, remove_final_dim = false; + + switch (dim_x.size()) { + case 1: + if (transpose_x) { + M = dim_x[0]; + KX = 1; + } else { + M = 1; + KX = dim_x[0]; + remove_initial_dim = true; + } + break; + case 2: + M = transpose_x ? dim_x[1] : dim_x[0]; + KX = transpose_x ? dim_x[0] : dim_x[1]; + break; + case 3: + batchCountX = dim_x[0]; + M = transpose_x ? dim_x[2] : dim_x[1]; + KX = transpose_x ? dim_x[1] : dim_x[2]; + break; + default: + assert(false); + } + + switch (dim_y.size()) { + case 1: + if (transpose_y) { + N = dim_y[0]; + KY = 1; + } else { + N = 1; + KY = dim_y[0]; + remove_final_dim = true; + } + break; + case 2: + KY = transpose_y ? dim_y[1] : dim_y[0]; + N = transpose_y ? dim_y[0] : dim_y[1]; + break; + case 3: + batchCountY = dim_y[0]; + KY = transpose_y ? dim_y[2] : dim_y[1]; + N = transpose_y ? dim_y[1] : dim_y[2]; + break; + default: + assert(false); + } + + PADDLE_ENFORCE_EQ( + KX, KY, + "First matrix's width must be equal with second matrix's height."); + if (batchCountX && batchCountY) { + PADDLE_ENFORCE_EQ( + batchCountX, batchCountY, + "When Input(X) and Input(Y) are both three dimensional, they " + "must have the same batch dimension."); + } + int batchCount = std::max(batchCountX, batchCountY); + + std::vector dim_out; + if (batchCount) { + dim_out.push_back(batchCount); + } + if (!remove_initial_dim) { + dim_out.push_back(M); + } + if (!remove_final_dim) { + dim_out.push_back(N); + } + if (dim_out.size() == 0) { + // We don't support 0-dimensional Tensors (scalars), so instead + // treat the output as a Tensor of shape (1, ) in this case. + dim_out.push_back(1); + } + context->SetOutputDim("Out", framework::make_ddim(dim_out)); + context->ShareLoD("X", /*->*/ "Out"); + } +}; + +class MatMulOpMaker : public framework::OpProtoAndCheckerMaker { + public: + MatMulOpMaker(framework::OpProto* proto, framework::OpAttrChecker* op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("X", "The first input of MatMul op"); + AddInput("Y", "The second input of MatMul op"); + AddOutput("Out", "The output of MatMul op"); + AddAttr("transpose_X", + R"DOC(If true, use the transpose of `X`. + )DOC") + .SetDefault(false); + AddAttr("transpose_Y", + R"DOC(If true, use the transpose of `Y`. + )DOC") + .SetDefault(false); + AddComment(R"DOC( +The MatMul operator is used to perform (batched) matrix multiplication +over the last two dimensions of the input tensors `X` and `Y`. + +If a transpose flag is specified, the last two dimensions of the +tensor are transposed. If the tensor is rank-1 of shape [D], then +for `X` it is treated as [1, D] in nontransposed form and as [D, 1] +in transposed form, whereas for `Y` it is the opposite: It is treated +as [D, 1] in nontransposed form and as [1, D] in transposed form. + +Examples without transpose: +- X: [K], Y: [K] => Out: [1] +- X: [K], Y: [K, N] => Out: [N] +- X: [B, M, K], Y: [K] => Out: [B, M] +- X: [M, K], Y: [B, K, N] => Out: [B, M, N] +- X: [B, M, K], Y: [B, K, N] => Out: [B, M, N] + +The behavior is designed to be similar to the `numpy.matmul` function. +The differences are: +- Currently only rank 1 to rank 3 input tensors are supported. +- We add `transpose_X` and `transpose_Y` flags. + +Both the input `X` and `Y` can carry the LoD (Level of Details) information, +or not. But the output only shares the LoD with input `X`. +)DOC"); + } +}; + +class MatMulOpGrad : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + protected: + void InferShape(framework::InferShapeContext* context) const override { + PADDLE_ENFORCE(context->HasInput("X"), "Input(X) should not be null"); + PADDLE_ENFORCE(context->HasInput("Y"), "Input(Y) should not be null"); + PADDLE_ENFORCE(context->HasInput(framework::GradVarName("Out")), + "Input(Out@GRAD) should not be null"); + auto x_dims = context->GetInputDim("X"); + auto y_dims = context->GetInputDim("Y"); + + auto x_grad_name = framework::GradVarName("X"); + auto y_grad_name = framework::GradVarName("Y"); + + if (context->HasOutput(x_grad_name)) { + context->SetOutputDim(x_grad_name, x_dims); + } + if (context->HasOutput(y_grad_name)) { + context->SetOutputDim(y_grad_name, y_dims); + } + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OP(matmul, ops::MatMulOp, ops::MatMulOpMaker, matmul_grad, + ops::MatMulOpGrad); +REGISTER_OP_CPU_KERNEL(matmul, + ops::MatMulKernel); +REGISTER_OP_CPU_KERNEL( + matmul_grad, ops::MatMulGradKernel); diff --git a/paddle/operators/matmul_op.cu b/paddle/operators/matmul_op.cu new file mode 100644 index 0000000000000000000000000000000000000000..b7e66382f00445b087e14103e7a148d450b37405 --- /dev/null +++ b/paddle/operators/matmul_op.cu @@ -0,0 +1,21 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#include "paddle/operators/matmul_op.h" + +namespace ops = paddle::operators; +REGISTER_OP_GPU_KERNEL(matmul, + ops::MatMulKernel); +REGISTER_OP_GPU_KERNEL( + matmul_grad, ops::MatMulGradKernel); diff --git a/paddle/operators/matmul_op.h b/paddle/operators/matmul_op.h new file mode 100644 index 0000000000000000000000000000000000000000..5ce30740c90b5cd0bd4f8ab183cf985ed5d827c1 --- /dev/null +++ b/paddle/operators/matmul_op.h @@ -0,0 +1,228 @@ +/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + You may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#pragma once + +#include "paddle/framework/op_registry.h" +#include "paddle/operators/math/matmul.h" +#include "paddle/operators/transpose_op.h" + +namespace paddle { +namespace operators { +namespace matmul_detail { + +using Tensor = framework::Tensor; +using DDim = framework::DDim; +using framework::make_ddim; +using framework::vectorize; + +template +class MatMulKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& context) const override { + const Tensor& x = *context.Input("X"); + const Tensor& y = *context.Input("Y"); + Tensor* out = context.Output("Out"); + out->mutable_data(context.GetPlace()); + bool transpose_x = context.Attr("transpose_X"); + bool transpose_y = context.Attr("transpose_Y"); + + math::MatMulFunctor()(context.device_context(), x, transpose_x, y, + transpose_y, T(1), out, T(0)); + } +}; + +template +inline Tensor Reshape(const Tensor& input, const DDim& dims) { + Tensor output; + output.ShareDataWith(input); + output.Resize(dims); + return output; +} + +// Reshape a rank-3 tensor from P x M x N to (P * M) x N. +// Identity op if the tensor is not of rank 3. +template +Tensor CombineBatchAndM(const Tensor& input) { + Tensor output; + output.ShareDataWith(input); + auto in_dims = input.dims(); + if (in_dims.size() == 3) { + std::vector out_dims = {in_dims[0] * in_dims[1], in_dims[2]}; + output.Resize(make_ddim(out_dims)); + } + return output; +} + +// Reshape a rank-3 tensor from P x M x N to M x (P * N). +// (Warning: This requires transposing data and writes into new memory.) +// Identity op if the tensor is not of rank 3. +template +Tensor CombineBatchAndN(const framework::ExecutionContext& context, + const Tensor& input) { + Tensor output; + auto in_dims = input.dims(); + if (in_dims.size() == 3) { + output.Resize(in_dims); + output.mutable_data(context.GetPlace()); + EigenTranspose(context, input, output, {1, 0, 2}); + std::vector out_dims = {in_dims[1], in_dims[0] * in_dims[2]}; + output.Resize(make_ddim(out_dims)); + } else { + output.ShareDataWith(input); + } + return output; +} + +// Using dimensional constraints on matrix multiplication, it is +// straight-forward to check the following table for when X and Y +// are both matrices. +// +// transpose_X | False | True | False | True +// transpose_Y | False | False | True | True +// -----------+----------+----------+----------+----------- +// dX = | dOut Y^T | Y dOut^T | dOut Y | Y^T dOut^T +// dY = | X^T dOut | X dOut | dOut^T X | dOut^T X^T +// +// When X is a vector of size K, we treat it instead as a matrix of shape +// (1, K). Similarly, when Y is a vector of size K, we treat it instead as +// a matrix of shape (K, 1). +// +// When X and Y are both 3-dimensional tensors, then the first dimension +// the batch dimension can be ignored and the exact same formulas apply +// as for two matrices. +// +// Finally, when, e.g., X is a 3-dimensional tensor but Y is a matrix, we end +// up with formulas like +// +// dY_{ij} = \sum_{p, m} X_{pmi} dOut_{pmj} +// +// To handle this sort of scenario, we reshape X : P x M x K, dOut: P x M x N +// to X: (P * M) x K, dOut: (P * M) x N. +template +class MatMulGradKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& context) const override { + const Tensor& x = *context.Input("X"); + const Tensor& y = *context.Input("Y"); + const Tensor& dout = *context.Input(framework::GradVarName("Out")); + Tensor* dx = context.Output(framework::GradVarName("X")); + Tensor* dy = context.Output(framework::GradVarName("Y")); + bool transpose_x = context.Attr("transpose_X"); + bool transpose_y = context.Attr("transpose_Y"); + + std::vector x_dims = vectorize(x.dims()); + std::vector y_dims = vectorize(y.dims()); + + // If X is a vector, reshape it to a matrix. + if (x_dims.size() == 1) { + x_dims.insert(x_dims.begin(), 1); + } + + // If Y is a vector, reshape it to a matrix. + if (y_dims.size() == 1) { + y_dims.push_back(1); + } + + // Fix the dOut dimensions. + int M = 0, N = 0, batchCountX = 0, batchCountY = 0; + + switch (x_dims.size()) { + case 2: + M = transpose_x ? x_dims[1] : x_dims[0]; + break; + case 3: + batchCountX = x_dims[0]; + M = transpose_x ? x_dims[2] : x_dims[1]; + break; + default: + assert(false); + } + + switch (y_dims.size()) { + case 2: + N = transpose_y ? y_dims[0] : y_dims[1]; + break; + case 3: + batchCountY = y_dims[0]; + N = transpose_y ? y_dims[1] : y_dims[2]; + break; + default: + assert(false); + } + if (batchCountX && batchCountY) { + PADDLE_ENFORCE_EQ( + batchCountX, batchCountY, + "When Input(X) and Input(Y) are both three dimensional, they " + "must have the same batch dimension."); + } + int batchCount = std::max(batchCountX, batchCountY); + std::vector dout_dims = {M, N}; + if (batchCount) { + dout_dims.insert(dout_dims.begin(), batchCount); + } + Tensor X = Reshape(x, make_ddim(x_dims)); + Tensor Y = Reshape(y, make_ddim(y_dims)); + Tensor dOut = Reshape(dout, make_ddim(dout_dims)); + + if (dx) { + dx->mutable_data(context.GetPlace()); + const Tensor& dOut_for_dX = + (x_dims.size() == 2 && y_dims.size() == 3) + ? CombineBatchAndN(context, dOut) + : dOut; + if (x_dims.size() == 2 && y_dims.size() == 3) { + Y = transpose_y ? CombineBatchAndM(Y) + : CombineBatchAndN(context, Y); + } + if (transpose_x) { + math::MatMulFunctor()(context.device_context(), Y, + transpose_y, dOut_for_dX, transpose_x, + T(1), dx, T(0)); + } else { + math::MatMulFunctor()(context.device_context(), dOut_for_dX, + transpose_x, Y, !transpose_y, T(1), dx, + T(0)); + } + } + + if (dy) { + dy->mutable_data(context.GetPlace()); + const Tensor& dOut_for_dY = (y_dims.size() == 2 && x_dims.size() == 3) + ? CombineBatchAndM(dOut) + : dOut; + if (y_dims.size() == 2 && x_dims.size() == 3) { + X = transpose_x ? CombineBatchAndN(context, X) + : CombineBatchAndM(X); + dOut = CombineBatchAndM(dOut); + } + if (transpose_y) { + math::MatMulFunctor()(context.device_context(), dOut_for_dY, + transpose_y, X, transpose_x, T(1), dy, + T(0)); + } else { + math::MatMulFunctor()(context.device_context(), X, + !transpose_x, dOut_for_dY, transpose_y, + T(1), dy, T(0)); + } + } + } +}; +} // namespace matmul_detail + +using matmul_detail::MatMulKernel; +using matmul_detail::MatMulGradKernel; + +} // namespace operators +} // namespace paddle diff --git a/paddle/operators/mean_op.cc b/paddle/operators/mean_op.cc index 441543049faf96c8fbc14e84448a348c3287c1e2..9556fdf73151eeb947b4f1aee63e131ac6aa76e6 100644 --- a/paddle/operators/mean_op.cc +++ b/paddle/operators/mean_op.cc @@ -21,7 +21,6 @@ class MeanOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - protected: void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) of MeanOp should not be null."); @@ -46,7 +45,6 @@ class MeanGradOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - protected: void InferShape(framework::InferShapeContext* ctx) const override { ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X")); } diff --git a/paddle/operators/minus_op.cc b/paddle/operators/minus_op.cc index d7fd2f901b766c3105a5ea8847fb8a3fe456a945..f7943e99acc5975d077f2319b6f678cfc693c1f3 100644 --- a/paddle/operators/minus_op.cc +++ b/paddle/operators/minus_op.cc @@ -25,7 +25,6 @@ class MinusOp : public framework::OperatorWithKernel { const framework::AttributeMap &attrs) : OperatorWithKernel(type, inputs, outputs, attrs) {} - protected: void InferShape(framework::InferShapeContext *ctx) const override { PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) of MinusOp should not be null."); diff --git a/paddle/operators/modified_huber_loss_op.cc b/paddle/operators/modified_huber_loss_op.cc index 6522327fdcf646a60df76909d049341b5a9021c9..7b9e9528952d552a69ffe6a628672901c5c1a7fd 100644 --- a/paddle/operators/modified_huber_loss_op.cc +++ b/paddle/operators/modified_huber_loss_op.cc @@ -21,7 +21,6 @@ class ModifiedHuberLossOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - protected: void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasInput("X"), "X must be initialized."); PADDLE_ENFORCE(ctx->HasInput("Y"), "Y must be initialized."); @@ -73,7 +72,6 @@ class ModifiedHuberLossGradOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - protected: void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasInput("X"), "X must be initialized."); PADDLE_ENFORCE(ctx->HasInput("Y"), "Y must be initialized."); diff --git a/paddle/operators/momentum_op.cc b/paddle/operators/momentum_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..2d4d6f13720f0e6888edbddcb3243116506227ba --- /dev/null +++ b/paddle/operators/momentum_op.cc @@ -0,0 +1,99 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/operators/momentum_op.h" + +namespace paddle { +namespace operators { + +class MomentumOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + protected: + void InferShape(framework::InferShapeContext *ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("Param"), + "Input(param) of Momentum should not be null."); + PADDLE_ENFORCE(ctx->HasInput("Grad"), + "Input(grad) of Momentum should not be null."); + PADDLE_ENFORCE(ctx->HasInput("Velocity"), + "Input(velocity) of Momentum should not be null."); + PADDLE_ENFORCE(ctx->HasInput("LearningRate"), + "Input(LearningRate) of Momentum should not be null."); + + PADDLE_ENFORCE(ctx->HasOutput("ParamOut"), + "Output(ParamOut) of Momentum should not be null."); + PADDLE_ENFORCE(ctx->HasOutput("VelocityOut"), + "Output(VelocityOut) of Momentum should not be null."); + + auto param_dim = ctx->GetInputDim("Param"); + PADDLE_ENFORCE_EQ( + param_dim, ctx->GetInputDim("Grad"), + "Param and Grad input of MomentumOp should have the same dimension."); + PADDLE_ENFORCE_EQ( + param_dim, ctx->GetInputDim("Velocity"), + "Param and Velocity of MomentumOp should have the same dimension."); + PADDLE_ENFORCE_EQ(framework::product(ctx->GetInputDim("LearningRate")), 1, + "Learning_rate should be a scalar"); + + ctx->SetOutputDim("ParamOut", param_dim); + ctx->SetOutputDim("VelocityOut", param_dim); + } +}; + +class MomentumOpMaker : public framework::OpProtoAndCheckerMaker { + public: + MomentumOpMaker(framework::OpProto *proto, + framework::OpAttrChecker *op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("Param", + "(Tensor, default Tensor) " + "Input parameter that has to be updated"); + AddInput("Grad", + "(Tensor, default Tensor) " + "Input gradient of the parameter"); + AddInput("Velocity", + "(Tensor, default Tensor) " + "Input velocity (corresponding to the parameter) " + "that has to be updated"); + AddInput("LearningRate", + "(Tensor, default Tensor) " + "Input learning rate"); + + AddOutput("ParamOut", "(Tensor) Output updated parameter"); + AddOutput("VelocityOut", "(Tensor) Output updated velocity"); + + AddAttr("mu", "(float) Momentum coefficient"); + AddAttr("useNesterov", "(bool) Use Nesterov Momentum") + .SetDefault(false); + AddComment(R"DOC( + +Momentum Algorithm with a flag for Nestrov Moemntum (momentum). + +velocity = mu * velocity + gradient +if (use_nesterov): + param = param - gradient * learning_rate + mu * velocity * learning_rate +else: + param = param - learning_rate * velocity + +)DOC"); + } +}; +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OP_WITHOUT_GRADIENT(momentum, ops::MomentumOp, ops::MomentumOpMaker); +REGISTER_OP_CPU_KERNEL( + momentum, ops::MomentumOpKernel); diff --git a/paddle/operators/momentum_op.cu b/paddle/operators/momentum_op.cu new file mode 100644 index 0000000000000000000000000000000000000000..efc24e795e05951024009f0b3258769c352df344 --- /dev/null +++ b/paddle/operators/momentum_op.cu @@ -0,0 +1,20 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#define EIGEN_USE_GPU +#include "paddle/operators/momentum_op.h" + +namespace ops = paddle::operators; +REGISTER_OP_GPU_KERNEL( + momentum, ops::MomentumOpKernel); diff --git a/paddle/operators/momentum_op.h b/paddle/operators/momentum_op.h new file mode 100644 index 0000000000000000000000000000000000000000..e6d6d1da3df9f7e43a93fcc2e12658a01a491f81 --- /dev/null +++ b/paddle/operators/momentum_op.h @@ -0,0 +1,62 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once +#include "paddle/framework/eigen.h" +#include "paddle/framework/op_registry.h" + +namespace paddle { +namespace operators { + +template +class MomentumOpKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + auto param_out = ctx.Output("ParamOut"); + auto velocity_out = ctx.Output("VelocityOut"); + auto param = ctx.Input("Param"); + auto velocity = ctx.Input("Velocity"); + auto grad = ctx.Input("Grad"); + auto learning_rate = ctx.Input("LearningRate"); + + param_out->mutable_data(ctx.GetPlace()); + velocity_out->mutable_data(ctx.GetPlace()); + + float mu = ctx.Attr("mu"); + bool use_nesterov = ctx.Attr("useNesterov"); + + auto p_out = framework::EigenVector::Flatten(*param_out); + auto v_out = framework::EigenVector::Flatten(*velocity_out); + + auto p = framework::EigenVector::Flatten(*param); + auto v = framework::EigenVector::Flatten(*velocity); + auto g = framework::EigenVector::Flatten(*grad); + auto lr = framework::EigenVector::Flatten(*learning_rate); + + auto place = ctx.GetEigenDevice(); + + Eigen::DSizes grad_dsize(grad->numel()); + + v_out.device(place) = v * mu + g; + if (use_nesterov) { + p_out.device(place) = p - g * lr.broadcast(grad_dsize) + + v_out * mu * lr.broadcast(grad_dsize); + } else { + p_out.device(place) = p - lr.broadcast(grad_dsize) * v_out; + } + } +}; + +} // namespace operators +} // namespace paddle diff --git a/paddle/operators/mul_op.cc b/paddle/operators/mul_op.cc index ec0683d8875a945746b676a1f859e614af557441..065800f250d8b35a626060bac271e1bce6bb784b 100644 --- a/paddle/operators/mul_op.cc +++ b/paddle/operators/mul_op.cc @@ -23,7 +23,6 @@ class MulOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - protected: void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) of MulOp should not be null."); PADDLE_ENFORCE(ctx->HasInput("Y"), "Input(Y) of MulOp should not be null."); @@ -96,7 +95,6 @@ class MulOpGrad : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - protected: void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null"); PADDLE_ENFORCE(ctx->HasInput("Y"), "Input(Y) should not be null"); @@ -106,10 +104,10 @@ class MulOpGrad : public framework::OperatorWithKernel { auto y_dims = ctx->GetInputDim("Y"); auto out_dims = ctx->GetInputDim(framework::GradVarName("Out")); - auto x_mat_dims = - framework::flatten_to_2d(x_dims, Attr("x_num_col_dims")); - auto y_mat_dims = - framework::flatten_to_2d(y_dims, Attr("y_num_col_dims")); + auto x_mat_dims = framework::flatten_to_2d( + x_dims, ctx->Attrs().Get("x_num_col_dims")); + auto y_mat_dims = framework::flatten_to_2d( + y_dims, ctx->Attrs().Get("y_num_col_dims")); PADDLE_ENFORCE_EQ( x_mat_dims[0], out_dims[0], diff --git a/paddle/operators/mul_op.h b/paddle/operators/mul_op.h index 684b1ea0c0c8ddabc9809cc05ed985e0cc250955..3f3e77595b701d428a728fc4727dd3ff4abee45f 100644 --- a/paddle/operators/mul_op.h +++ b/paddle/operators/mul_op.h @@ -36,12 +36,12 @@ class MulKernel : public framework::OpKernel { Tensor* z = context.Output("Out"); const Tensor x_matrix = x->dims().size() > 2 - ? framework::ReshapeToMatrix( + ? framework::ReshapeToMatrix( *x, context.template Attr("x_num_col_dims")) : *x; const Tensor y_matrix = y->dims().size() > 2 - ? framework::ReshapeToMatrix( + ? framework::ReshapeToMatrix( *y, context.template Attr("y_num_col_dims")) : *y; @@ -59,30 +59,30 @@ class MulGradKernel : public framework::OpKernel { int y_num_col_dims = ctx.template Attr("y_num_col_dims"); const Tensor* x = ctx.Input("X"); const Tensor* y = ctx.Input("Y"); - const Tensor x_matrix = - x->dims().size() > 2 ? framework::ReshapeToMatrix(*x, x_num_col_dims) - : *x; - const Tensor y_matrix = - y->dims().size() > 2 ? framework::ReshapeToMatrix(*y, y_num_col_dims) - : *y; + const Tensor x_matrix = x->dims().size() > 2 + ? framework::ReshapeToMatrix(*x, x_num_col_dims) + : *x; + const Tensor y_matrix = y->dims().size() > 2 + ? framework::ReshapeToMatrix(*y, y_num_col_dims) + : *y; const Tensor* dout = ctx.Input(framework::GradVarName("Out")); Tensor* dx = ctx.Output(framework::GradVarName("X")); Tensor* dy = ctx.Output(framework::GradVarName("Y")); if (dx) { dx->mutable_data(ctx.GetPlace()); - Tensor dx_matrix = dx->dims().size() > 2 ? framework::ReshapeToMatrix( - *dx, x_num_col_dims) - : *dx; + Tensor dx_matrix = dx->dims().size() > 2 + ? framework::ReshapeToMatrix(*dx, x_num_col_dims) + : *dx; // dx = dout * y'. dx: M x K, dout : M x N, y : K x N math::matmul(ctx.device_context(), *dout, false, y_matrix, true, 1, &dx_matrix, 0); } if (dy) { dy->mutable_data(ctx.GetPlace()); - Tensor dy_matrix = dy->dims().size() > 2 ? framework::ReshapeToMatrix( - *dy, y_num_col_dims) - : *dy; + Tensor dy_matrix = dy->dims().size() > 2 + ? framework::ReshapeToMatrix(*dy, y_num_col_dims) + : *dy; // dy = x' * dout. dy K x N, dout : M x N, x : M x K math::matmul(ctx.device_context(), x_matrix, true, *dout, false, 1, &dy_matrix, 0); diff --git a/paddle/operators/multiplex_op.cc b/paddle/operators/multiplex_op.cc index 051051b051961c6da064bd9319460b3f41cea3e8..4d86769026e4b3e3040bdcb3bc6dc2edea58b4b0 100644 --- a/paddle/operators/multiplex_op.cc +++ b/paddle/operators/multiplex_op.cc @@ -23,7 +23,6 @@ class MultiplexOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - protected: void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasInput("Ids"), "Input(Ids) shouldn't be null."); PADDLE_ENFORCE(!ctx->Inputs("X").empty(), @@ -51,6 +50,7 @@ class MultiplexOp : public framework::OperatorWithKernel { ctx->SetOutputDim("Out", in_dim); } + protected: framework::DataType IndicateDataType( const framework::ExecutionContext& ctx) const override { return framework::ToDataType(ctx.MultiInput("X")[0]->type()); @@ -89,7 +89,6 @@ class MultiplexGradOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - protected: void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(!ctx->Inputs("X").empty(), "Input(X) should not be null."); PADDLE_ENFORCE(!ctx->Outputs(framework::GradVarName("X")).empty(), @@ -105,6 +104,7 @@ class MultiplexGradOp : public framework::OperatorWithKernel { ctx->SetOutputsDim(framework::GradVarName("X"), d_ins); } + protected: framework::DataType IndicateDataType( const framework::ExecutionContext& ctx) const override { return framework::ToDataType(ctx.MultiInput("X")[0]->type()); diff --git a/paddle/operators/multiplex_op.cu b/paddle/operators/multiplex_op.cu index 10cb0e005f483abe91b4ee862ea5b48305ec08c7..143a14fef5783f8ed085d4c4ce2afb3b190d0600 100644 --- a/paddle/operators/multiplex_op.cu +++ b/paddle/operators/multiplex_op.cu @@ -33,8 +33,7 @@ class MultiplexGPUKernel : public framework::OpKernel { auto cols = ins[0]->numel() / rows; // copy index to cpu Tensor index_t_cpu; - index_t_cpu.CopyFrom(*ids, platform::CPUPlace(), - ctx.device_context()); + index_t_cpu.CopyFrom(*ids, platform::CPUPlace(), ctx.device_context()); auto* index = index_t_cpu.data(); auto stream = reinterpret_cast( ctx.device_context()) @@ -71,8 +70,7 @@ class MultiplexGradGPUKernel : public framework::OpKernel { auto cols = ins[0]->numel() / rows; // copy index to cpu Tensor index_t_cpu; - index_t_cpu.CopyFrom(*ids, platform::CPUPlace(), - ctx.device_context()); + index_t_cpu.CopyFrom(*ids, platform::CPUPlace(), ctx.device_context()); auto* index = index_t_cpu.data(); auto stream = reinterpret_cast( diff --git a/paddle/operators/name_convention.md b/paddle/operators/name_convention.md index 379385dc5d914101c7b5c9494f9383b6cf6a9b79..5a216907950100070ba57176c382eb659effb293 100644 --- a/paddle/operators/name_convention.md +++ b/paddle/operators/name_convention.md @@ -11,7 +11,7 @@ When defining an operator in Paddle, a corresponding [OpProtoMaker](https://gith - If an operator's Input/Output are tensors in math, not match to any meaningful words, input name should starts from `X`. e.g. `X`, `Y`, and output name should starts from `Out`. e.g. `Out`. This rule intends making operators which have few inputs/outputs unified. - Attribute. - - Attribute name follows the **camelCase**. e.g. `x`, `y`, `axis`, `rowwiseMatrix`. Also, attribute name prefers to meaningful English words. + - Attribute name follows the **snake_case**. e.g. `x`, `y`, `axis`, `rowwise_matrix`. Also, attribute name prefers to meaningful English words. - Comments. - Input/Output/Attr comment follow the format of **(type,default value) usage**, corresponding to which type it can be and how it will be used in the operator. e.g. Attribute in Accumulator`"gamma" `,`(float, default 1.0) Accumulation multiplier`. diff --git a/paddle/operators/pad_op.cc b/paddle/operators/pad_op.cc index 2f26ada85e477a6e5cd24c881f6369548331e8d0..73a0b8baff530840ddd0d4c65cd4c060ab18e401 100644 --- a/paddle/operators/pad_op.cc +++ b/paddle/operators/pad_op.cc @@ -23,7 +23,6 @@ class PadOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - protected: void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) of PadOp should not be null."); PADDLE_ENFORCE(ctx->HasOutput("Out"), @@ -97,7 +96,6 @@ class PadOpGrad : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - protected: void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null"); PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")), diff --git a/paddle/operators/pool_op.cc b/paddle/operators/pool_op.cc index c6d9aae13322ebcc9ebbe394d9b9831bd67fe632..a326839c0f9ad14b8fd2aac596f21c7dd2539cd7 100644 --- a/paddle/operators/pool_op.cc +++ b/paddle/operators/pool_op.cc @@ -29,7 +29,7 @@ void PoolOp::InferShape(framework::InferShapeContext *ctx) const { auto in_x_dims = ctx->GetInputDim("X"); - std::string pooling_type = ctx->Attrs().Get("poolingType"); + std::string pooling_type = ctx->Attrs().Get("pooling_type"); std::vector ksize = ctx->Attrs().Get>("ksize"); std::vector strides = ctx->Attrs().Get>("strides"); std::vector paddings = ctx->Attrs().Get>("paddings"); @@ -37,7 +37,7 @@ void PoolOp::InferShape(framework::InferShapeContext *ctx) const { PADDLE_ENFORCE(in_x_dims.size() == 4 || in_x_dims.size() == 5, "Pooling intput should be 4-D or 5-D tensor."); - if (ctx->Attrs().Get("globalPooling")) { + if (ctx->Attrs().Get("global_pooling")) { ksize.resize(static_cast(in_x_dims.size()) - 2); for (size_t i = 0; i < ksize.size(); ++i) ksize[i] = static_cast(in_x_dims[i + 2]); @@ -80,23 +80,23 @@ Pool2dOpMaker::Pool2dOpMaker(framework::OpProto *proto, "the number of channels, H and W is the height and " "width of feature."); - AddAttr("poolingType", - "PoolingType of pooling operator." + AddAttr("pooling_type", + "Pooling_type of pooling operator." "Str constant equal to 'max' or 'avg'.") .InEnum({"max", "avg"}); AddAttr>( "ksize", "The pooling window size(height, width) of pooling operator." - "If globalPooling = true, ksize is ignored and need not be " + "If global_pooling = true, ksize is ignored and need not be " "specified."); // TODO(Chengduo): Add checker. (Currently, // TypedAttrChecker don't support vector type.) AddAttr( - "globalPooling", - "Whether to use the globalPooling." + "global_pooling", + "Whether to use the global_pooling." "Bool constant equal to false or true." "Default false." - "If globalPooling = true, ksize is ignored and need not be specified.") + "If global_pooling = true, ksize is ignored and need not be specified.") .SetDefault(false); AddAttr>("strides", "The strides(height, width) of pooling window." @@ -146,7 +146,7 @@ Pool3dOpMaker::Pool3dOpMaker(framework::OpProto *proto, "the number of channels, D, H and W is the depth, height and " "width of feature."); - AddAttr("poolingType", + AddAttr("pooling_type", "PoolingType of pooling operator." "Str constant equal to 'max' or 'avg'.") .InEnum({"max", "avg"}); @@ -154,15 +154,15 @@ Pool3dOpMaker::Pool3dOpMaker(framework::OpProto *proto, AddAttr>( "ksize", "The pooling window size(depth, height, width) of pooling operator." - "If globalPooling = true, ksize is ignored and need not be " + "If global_pooling = true, ksize is ignored and need not be " "specified."); // TODO(Chengduo): Add checker. (Currently, // TypedAttrChecker don't support vector type.) AddAttr( - "globalPooling", - "Whether to use the globalPooling." + "global_pooling", + "Whether to use the global_pooling." "Bool constant equal to false or true." "Default false." - "If globalPooling = true, ksize is ignored and need not be specified.") + "If global_pooling = true, ksize is ignored and need not be specified.") .SetDefault(false); AddAttr>("strides", "Strides(depth, height, width) of pooling operator." diff --git a/paddle/operators/pool_op.h b/paddle/operators/pool_op.h index e5016d573dde0a9c8a90cddf14f68706b69fade5..ada956501918cc92a2d30ebb8d0c42453acd2839 100644 --- a/paddle/operators/pool_op.h +++ b/paddle/operators/pool_op.h @@ -28,7 +28,6 @@ class PoolOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - protected: void InferShape(framework::InferShapeContext* ctx) const override; }; @@ -36,7 +35,6 @@ class PoolOpGrad : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - protected: void InferShape(framework::InferShapeContext* ctx) const override; }; @@ -59,11 +57,11 @@ class PoolKernel : public framework::OpKernel { const Tensor* in_x = context.Input("X"); Tensor* out = context.Output("Out"); - std::string pooling_type = context.Attr("poolingType"); + std::string pooling_type = context.Attr("pooling_type"); std::vector ksize = context.Attr>("ksize"); std::vector strides = context.Attr>("strides"); std::vector paddings = context.Attr>("paddings"); - if (context.Attr("globalPooling")) { + if (context.Attr("global_pooling")) { for (size_t i = 0; i < ksize.size(); ++i) { ksize[i] = static_cast(in_x->dims()[i + 2]); } @@ -119,12 +117,12 @@ class PoolGradKernel : public framework::OpKernel { context.Input(framework::GradVarName("Out")); Tensor* in_x_grad = context.Output(framework::GradVarName("X")); - std::string pooling_type = context.Attr("poolingType"); + std::string pooling_type = context.Attr("pooling_type"); std::vector ksize = context.Attr>("ksize"); std::vector strides = context.Attr>("strides"); std::vector paddings = context.Attr>("paddings"); - if (context.Attr("globalPooling")) { + if (context.Attr("global_pooling")) { for (size_t i = 0; i < ksize.size(); ++i) ksize[i] = static_cast(in_x->dims()[i + 2]); } diff --git a/paddle/operators/pool_with_index_op.cc b/paddle/operators/pool_with_index_op.cc index 005ee886934b193064cc739638398b3535db9274..29d0322a27b71fe8d335703e228969c084f5139f 100644 --- a/paddle/operators/pool_with_index_op.cc +++ b/paddle/operators/pool_with_index_op.cc @@ -27,7 +27,6 @@ class MaxPoolWithIndexOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - protected: void InferShape(framework::InferShapeContext *ctx) const override { PADDLE_ENFORCE(ctx->HasInput("X"), "X(Input) of Pooling should not be null."); @@ -45,7 +44,7 @@ class MaxPoolWithIndexOp : public framework::OperatorWithKernel { PADDLE_ENFORCE(in_x_dims.size() == 4 || in_x_dims.size() == 5, "Pooling intput should be 4-D or 5-D tensor."); - if (ctx->Attrs().Get("globalPooling")) { + if (ctx->Attrs().Get("global_pooling")) { ksize.resize(static_cast(in_x_dims.size()) - 2); for (size_t i = 0; i < ksize.size(); ++i) ksize[i] = static_cast(in_x_dims[i + 2]); @@ -72,7 +71,6 @@ class MaxPoolWithIndexOpGrad : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - protected: void InferShape(framework::InferShapeContext *ctx) const override { PADDLE_ENFORCE(ctx->HasInput("Mask"), "Input(Mask) must not be null."); PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) must not be null."); @@ -108,15 +106,15 @@ class MaxPool2dWithIndexOpMaker : public framework::OpProtoAndCheckerMaker { AddAttr>( "ksize", "The pooling window size(height, width) of pooling operator." - "If globalPooling = true, ksize is ignored and need not be " + "If global_pooling = true, ksize is ignored and need not be " "specified."); // TODO(Chengduo): Add checker. (Currently, // TypedAttrChecker don't support vector type.) AddAttr( - "globalPooling", - "Whether to use the globalPooling." + "global_pooling", + "Whether to use the global_pooling." "Bool constant equal to false or true." "Default false." - "If globalPooling = true, ksize is ignored and need not be specified.") + "If global_pooling = true, ksize is ignored and need not be specified.") .SetDefault(false); AddAttr>("strides", "The strides(height, width) of pooling window." @@ -179,15 +177,15 @@ class MaxPool3dWithIndexOpMaker : public framework::OpProtoAndCheckerMaker { AddAttr>( "ksize", "The pooling window size(depth, height, width) of pooling operator." - "If globalPooling = true, ksize is ignored and need not be " + "If global_pooling = true, ksize is ignored and need not be " "specified."); // TODO(Chengduo): Add checker. (Currently, // TypedAttrChecker don't support vector type.) AddAttr( - "globalPooling", - "Whether to use the globalPooling." + "global_pooling", + "Whether to use the global_pooling." "Bool constant equal to false or true." "Default false." - "If globalPooling = true, ksize is ignored and need not be specified.") + "If global_pooling = true, ksize is ignored and need not be specified.") .SetDefault(false); AddAttr>( "strides", diff --git a/paddle/operators/pool_with_index_op.h b/paddle/operators/pool_with_index_op.h index 01b961ca8295f723bea7335e43ec5ab100dfc65c..455c453efcd15bf0150bbd3de83d50729f338b4b 100644 --- a/paddle/operators/pool_with_index_op.h +++ b/paddle/operators/pool_with_index_op.h @@ -35,7 +35,7 @@ class MaxPoolWithIndexKernel : public framework::OpKernel { std::vector ksize = context.Attr>("ksize"); std::vector strides = context.Attr>("strides"); std::vector paddings = context.Attr>("paddings"); - if (context.Attr("globalPooling")) { + if (context.Attr("global_pooling")) { for (size_t i = 0; i < ksize.size(); ++i) { ksize[i] = static_cast(in_x->dims()[i + 2]); } @@ -70,7 +70,7 @@ class MaxPoolWithIndexGradKernel : public framework::OpKernel { std::vector ksize = context.Attr>("ksize"); std::vector strides = context.Attr>("strides"); std::vector paddings = context.Attr>("paddings"); - if (context.Attr("globalPooling")) { + if (context.Attr("global_pooling")) { for (size_t i = 0; i < ksize.size(); ++i) { ksize[i] = static_cast(in_x_grad->dims()[i + 2]); } diff --git a/paddle/operators/prelu_op.cc b/paddle/operators/prelu_op.cc index 166fe2682422c61a264c88b8683752ee323e940e..eef2e34eaacf59b9adacb343e9a0091ebabeaea3 100644 --- a/paddle/operators/prelu_op.cc +++ b/paddle/operators/prelu_op.cc @@ -25,7 +25,6 @@ class PReluOp : public framework::OperatorWithKernel { const framework::AttributeMap &attrs) : OperatorWithKernel(type, inputs, outputs, attrs) {} - protected: void InferShape(framework::InferShapeContext *ctx) const override { PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null"); PADDLE_ENFORCE(ctx->HasInput("Alpha"), "Input(Alpha) should not be null"); @@ -62,7 +61,6 @@ class PReluGradOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - protected: void InferShape(framework::InferShapeContext *ctx) const override { PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) must not be null."); PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")), diff --git a/paddle/operators/proximal_gd_op.cc b/paddle/operators/proximal_gd_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..e4b014b9f5866ec0791cba9b3998b1734066eeeb --- /dev/null +++ b/paddle/operators/proximal_gd_op.cc @@ -0,0 +1,93 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/operators/proximal_gd_op.h" + +namespace paddle { +namespace operators { + +class ProximalGDOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + protected: + void InferShape(framework::InferShapeContext *ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("Param"), + "Input(Param) of ProximalGDOp should not be null."); + PADDLE_ENFORCE(ctx->HasInput("Grad"), + "Input(Grad) of ProximalGDOp should not be null."); + PADDLE_ENFORCE(ctx->HasInput("LearningRate"), + "Input(LearningRate) of ProximalGDOp should not be null."); + + PADDLE_ENFORCE(ctx->HasOutput("ParamOut"), + "Output(ParamOut) of ProximalGDOp should not be null."); + + auto param_dim = ctx->GetInputDim("Param"); + PADDLE_ENFORCE_EQ(param_dim, ctx->GetInputDim("Grad"), + "Two input of ProximalGD Op's dimension must be same."); + + auto lr_dim = ctx->GetInputDim("LearningRate"); + PADDLE_ENFORCE_EQ(framework::product(lr_dim), 1, + "Learning Rate should be a scalar."); + + ctx->SetOutputDim("ParamOut", param_dim); + } +}; + +class ProximalGDOpMaker : public framework::OpProtoAndCheckerMaker { + public: + ProximalGDOpMaker(framework::OpProto *proto, + framework::OpAttrChecker *op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("Param", + "(Tensor, default Tensor) " + "Input parameter value that has to be updated."); + AddInput("Grad", + "(Tensor, default Tensor) " + "Input gradient of the parameter."); + AddInput("LearningRate", + "(Tensor, default Tensor) " + "The learning rate should be a tensor of size 1."); + + AddOutput("ParamOut", "(Tensor) Output updated parameter value."); + + AddAttr("l1", + "(float, default 0.0) " + "L1 regularization strength.") + .SetDefault(0.0f); + AddAttr("l2", + "(float, default 0.0)" + "L2 regularization strength.") + .SetDefault(0.0f); + AddComment(R"DOC( + +Optimizer that implements the proximal gradient descent algorithm. + +prox_param = param - learning_rate * grad +param = sign(prox_param) / (1 + learning_rate * l2) * + max { |prox_param| - learning_rate * l1 , 0 } + +The paper that proposed Proximal Gradient Descent: +(http://papers.nips.cc/paper/3793-efficient-learning-using-forward-backward-splitting.pdf) +)DOC"); + } +}; +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OP_WITHOUT_GRADIENT(proximal_gd, ops::ProximalGDOp, + ops::ProximalGDOpMaker); +REGISTER_OP_CPU_KERNEL( + proximal_gd, ops::ProximalGDOpKernel); diff --git a/paddle/operators/proximal_gd_op.cu b/paddle/operators/proximal_gd_op.cu new file mode 100644 index 0000000000000000000000000000000000000000..26f4ebaa0f43620fee7ece2d71755be94a0e01a5 --- /dev/null +++ b/paddle/operators/proximal_gd_op.cu @@ -0,0 +1,19 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +You may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software distributed +under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +CONDITIONS OF ANY KIND, either express or implied. See the License for the +specific language governing permissions and limitations under the License. */ + +#define EIGEN_USE_GPU +#include "paddle/operators/proximal_gd_op.h" + +namespace ops = paddle::operators; +REGISTER_OP_GPU_KERNEL( + proximal_gd, ops::ProximalGDOpKernel); diff --git a/paddle/operators/proximal_gd_op.h b/paddle/operators/proximal_gd_op.h new file mode 100644 index 0000000000000000000000000000000000000000..bebda0204173ec5c3ec9a7a9da6fb623171f4cea --- /dev/null +++ b/paddle/operators/proximal_gd_op.h @@ -0,0 +1,64 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once +#include "paddle/framework/eigen.h" +#include "paddle/framework/op_registry.h" + +namespace paddle { +namespace operators { + +using Tensor = framework::Tensor; +template +using EigenVector = framework::EigenVector; + +template +class ProximalGDOpKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + auto* param_out = ctx.Output("ParamOut"); + + param_out->mutable_data(ctx.GetPlace()); + + auto grad = ctx.Input("Grad"); + + auto l1 = static_cast(ctx.Attr("l1")); + auto l2 = static_cast(ctx.Attr("l2")); + + auto p = EigenVector::Flatten(*ctx.Input("Param")); + auto g = EigenVector::Flatten(*grad); + auto lr = EigenVector::Flatten(*ctx.Input("LearningRate")); + + auto p_out = EigenVector::Flatten(*param_out); + auto place = ctx.GetEigenDevice(); + + Eigen::DSizes grad_dsize(grad->numel()); + + auto prox_param = p - lr.broadcast(grad_dsize) * g; + if (l1 > 0) { + p_out.device(place) = + prox_param.sign() * + (((prox_param.abs() - (lr * l1).broadcast(grad_dsize)) + .cwiseMax(T(0.0))) / + (1.0 + (lr * l2).broadcast(grad_dsize))); + } else { + p_out.device(place) = + prox_param / (1.0 + (lr * l2).broadcast(grad_dsize)); + } + } +}; + +} // namespace operators +} // namespace paddle diff --git a/paddle/operators/rank_loss_op.cc b/paddle/operators/rank_loss_op.cc index e0abbc4db1723e297c313d50fc1f9cbb77acb9f3..17ef2b1d01bd37abf2ece97ed0a307c2f1bf7e6f 100644 --- a/paddle/operators/rank_loss_op.cc +++ b/paddle/operators/rank_loss_op.cc @@ -24,7 +24,6 @@ class RankLossOp : public framework::OperatorWithKernel { const framework::AttributeMap &attrs) : OperatorWithKernel(type, inputs, outputs, attrs) {} - protected: void InferShape(framework::InferShapeContext *ctx) const override { // input check PADDLE_ENFORCE(ctx->HasInput("Label"), "Input(Label) shouldn't be null"); @@ -89,7 +88,6 @@ class RankLossGradOp : public framework::OperatorWithKernel { const framework::AttributeMap &attrs) : OperatorWithKernel(type, inputs, outputs, attrs) {} - protected: void InferShape(framework::InferShapeContext *ctx) const override { PADDLE_ENFORCE(ctx->HasInput("Label"), "Input(Label) shouldn't be null."); PADDLE_ENFORCE(ctx->HasInput("Left"), "Input(Left) shouldn't be null."); diff --git a/paddle/operators/recurrent_op.cc b/paddle/operators/recurrent_op.cc index e3d08378c2f29fa5d84c24ae7cebfcb0e7a53b25..40303e3adf4db7e8336ed72667fe69afa56c3f69 100644 --- a/paddle/operators/recurrent_op.cc +++ b/paddle/operators/recurrent_op.cc @@ -42,7 +42,7 @@ void RecurrentAlgorithm::Run(const Scope& scope, for (size_t step_id = 0; step_id < seq_len; step_id++) { if (step_id > 0) { - rnn::LinkMemories(step_scopes, arg_->memories, step_id, -1); + rnn::LinkMemories(step_scopes, arg_->states, step_id, -1); } (*stepnet_)->Run(*step_scopes[step_id], dev_ctx); } @@ -59,7 +59,8 @@ void RecurrentAlgorithm::CreateScopes(const Scope& scope, // Now all variables in scope must be created outside of op. PADDLE_ENFORCE_NOT_NULL(stepnet_); - PADDLE_ENFORCE(!(*stepnet_)->Outputs().empty(), "stepnet_ op has no outputs"); + PADDLE_ENFORCE(!(*stepnet_)->Outputs().empty(), + "step_unit_ op has no outputs"); if (seq_len > step_scopes->size()) { for (size_t i = step_scopes->size(); i < seq_len; ++i) { @@ -86,7 +87,7 @@ void RecurrentAlgorithm::CreateScopes(const Scope& scope, } void RecurrentAlgorithm::InitMemories(Scope* step_scope) const { - for (auto& attr : arg_->memories) { + for (auto& attr : arg_->states) { auto* pre_mem = step_scope->Var(attr.pre_var)->GetMutable(); PADDLE_ENFORCE(step_scope->FindVar(attr.boot_var) != nullptr, "memory [%s]'s boot variable [%s] not exists", attr.var, @@ -95,17 +96,17 @@ void RecurrentAlgorithm::InitMemories(Scope* step_scope) const { step_scope->FindVar(attr.boot_var)->GetMutable(); pre_mem->Resize(boot_mem->dims()); PADDLE_ENFORCE_EQ(pre_mem->dims().size(), 2); - pre_mem->ShareDataWith(*boot_mem); + pre_mem->ShareDataWith(*boot_mem); } } const rnn::ArgumentName RecurrentOp::kArgName{ - "step_net", "step_scopes", "inlinks", "outlinks", - "memories", "pre_memories", "boot_memories"}; + "step_net", "step_scopes", "inputs", "outputs", + "states", "ex_states", "initial_states"}; const rnn::ArgumentName RecurrentGradientOp::kArgName{ - "step_net", "step_scopes@GRAD", "outlinks@GRAD", "inlinks@GRAD", - "memories", "pre_memories", "boot_memories@GRAD"}; + "step_net", "step_scopes@GRAD", "outputs@GRAD", "inputs@GRAD", + "states", "ex_states", "initial_states@GRAD"}; RecurrentOp::RecurrentOp(const std::string& type, const framework::VariableNameMap& inputs, @@ -127,7 +128,7 @@ class RecurrentAlgorithmProtoAndCheckerMaker AddInput(name.inlinks, "the inputs that need to be segmented for each step.") .AsDuplicable(); - AddInput(name.boot_memories, "variables to initialize memories.") + AddInput(name.initial_states, "variables to initialize states.") .AsDuplicable(); AddOutput(name.outlinks, "the outputs that need to concated for all steps.") @@ -135,9 +136,8 @@ class RecurrentAlgorithmProtoAndCheckerMaker AddOutput(name.step_scopes, "step scopes"); // Attributes stored in AttributeMap - AddAttr>(name.pre_memories, - "names of pre-memories"); - AddAttr>(name.memories, "names of memories"); + AddAttr>(name.ex_states, "names of pre-states"); + AddAttr>(name.states, "names of states"); AddComment("This is a recurrent group operator."); } @@ -152,7 +152,7 @@ void RecurrentGradientAlgorithm::Run( rnn::SegmentInputs(step_scopes, arg_->inlinks, seq_len); for (int step_id = seq_len - 1; step_id >= 0; --step_id) { if (static_cast(step_id) != seq_len - 1) { - rnn::LinkMemories(step_scopes, arg_->memories, step_id, 1); + rnn::LinkMemories(step_scopes, arg_->states, step_id, 1); } (*stepnet_)->Run(*step_scopes[step_id], dev_ctx); } @@ -162,7 +162,7 @@ void RecurrentGradientAlgorithm::Run( void RecurrentGradientAlgorithm::LinkBootMemoryGradients( Scope* step_scope) const { - for (auto& attr : arg_->memories) { + for (auto& attr : arg_->states) { PADDLE_ENFORCE(step_scope->FindVar(attr.var) != nullptr, "memory variable [%s] does not exists", attr.var); PADDLE_ENFORCE(step_scope->FindVar(attr.boot_var) != nullptr, @@ -171,7 +171,7 @@ void RecurrentGradientAlgorithm::LinkBootMemoryGradients( auto* boot_mem_grad = step_scope->Var(attr.boot_var)->GetMutable(); boot_mem_grad->Resize(mem_grad->dims()); - boot_mem_grad->ShareDataWith(*mem_grad); + boot_mem_grad->ShareDataWith(*mem_grad); } } diff --git a/paddle/operators/reduce_op.cc b/paddle/operators/reduce_op.cc index 005f88b57cb47b2c00f9a81da8afa49439665a22..46f66a1370a35593d1911fc9b3ce76beb38c0956 100644 --- a/paddle/operators/reduce_op.cc +++ b/paddle/operators/reduce_op.cc @@ -13,6 +13,7 @@ limitations under the License. */ #include "paddle/operators/reduce_op.h" +#include "paddle/operators/net_op.h" namespace paddle { namespace operators { @@ -23,7 +24,6 @@ class ReduceOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - protected: void InferShape(framework::InferShapeContext *ctx) const override { PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) of ReduceOp should not be null."); @@ -57,7 +57,6 @@ class ReduceGradOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - protected: void InferShape(framework::InferShapeContext *ctx) const override { PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null."); PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")), @@ -161,6 +160,66 @@ class ReduceMinOpMaker : public ReduceOpMaker { } }; +class NormOp : public NetOp { + public: + NormOp(const std::string &type, const framework::VariableNameMap &inputs, + const framework::VariableNameMap &outputs, + const framework::AttributeMap &attrs) + : NetOp(type, inputs, outputs, attrs) { + PADDLE_ENFORCE_NE(Input("X"), framework::kEmptyVarName, + "Input(X) of NormOp should not be null."); + PADDLE_ENFORCE_NE(Output("AbsOut"), framework::kEmptyVarName, + "Output(AbsOut) of NormOp should not be null."); + PADDLE_ENFORCE_NE(Output("PowOut"), framework::kEmptyVarName, + "Output(PowOut) of NormOp should not be null."); + PADDLE_ENFORCE_NE(Output("SumOut"), framework::kEmptyVarName, + "Output(SumOut) of NormOp should not be null."); + PADDLE_ENFORCE_NE(Output("Out"), framework::kEmptyVarName, + "Output(Out) of NormOp should not be null."); + auto dim = Attr("dim"); + auto keep_dim = Attr("keep_dim"); + auto p = Attr("p"); + PADDLE_ENFORCE_GT(p, 0, "Order of the norm should be positive."); + AppendOp(framework::OpRegistry::CreateOp("abs", {{"X", {Input("X")}}}, + {{"Y", {Output("AbsOut")}}}, {})); + AppendOp(framework::OpRegistry::CreateOp("pow", {{"X", {Output("AbsOut")}}}, + {{"Y", {Output("PowOut")}}}, + {{"factor", p}})); + framework::AttributeMap sum_attr; + sum_attr["dim"] = dim; + sum_attr["keep_dim"] = keep_dim; + AppendOp(framework::OpRegistry::CreateOp( + "reduce_sum", {{"X", {Output("PowOut")}}}, + {{"Out", {Output("SumOut")}}}, sum_attr)); + AppendOp(framework::OpRegistry::CreateOp( + "pow", {{"X", {Output("SumOut")}}}, {{"Y", {Output("Out")}}}, + {{"factor", static_cast(1. / p)}})); + CompleteAddOp(false); + } +}; + +class NormOpMaker : public ReduceOpMaker { + public: + NormOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) + : ReduceOpMaker(proto, op_checker) { + AddOutput("AbsOut", + "(Tensor) The intermediate output of Norm operator, " + "saving the absolute value of the input tensor X.") + .AsIntermediate(); + AddOutput("PowOut", + "(Tensor) The intermediate output of Norm operator, " + "saving the p-th power of the output tensor AbsOut.") + .AsIntermediate(); + AddOutput("SumOut", + "(Tensor) the intermediate output of Norm operator, " + "saving the sum of PowOut reduced on the given dimension.") + .AsIntermediate(); + AddAttr("p", "(float, default 2) The order of Norm.").SetDefault(2); + SetComment("Norm", "vector p-norm"); + AddComment(comment_); + } +}; + } // namespace operators } // namespace paddle @@ -178,6 +237,8 @@ REGISTER_OP(reduce_max, ops::ReduceOp, ops::ReduceMaxOpMaker, reduce_max_grad, REGISTER_OP(reduce_min, ops::ReduceOp, ops::ReduceMinOpMaker, reduce_min_grad, ops::ReduceGradOp); +REGISTER_OP_WITHOUT_GRADIENT(norm, ops::NormOp, ops::NormOpMaker); + #define REGISTER_REDUCE_CPU_KERNEL(reduce_type, functor, grad_functor) \ REGISTER_OP_CPU_KERNEL( \ reduce_type, \ diff --git a/paddle/operators/reshape_op.cc b/paddle/operators/reshape_op.cc index 3cd54930a0919b7c6aad624b442eb15167b4c354..a8eb8d45eec214842ee756a260127b9d0aacb0f4 100644 --- a/paddle/operators/reshape_op.cc +++ b/paddle/operators/reshape_op.cc @@ -25,7 +25,6 @@ class ReshapeOp : public framework::OperatorWithKernel { const framework::AttributeMap &attrs) : OperatorWithKernel(type, inputs, outputs, attrs) {} - protected: void InferShape(framework::InferShapeContext *ctx) const override { // input check PADDLE_ENFORCE(ctx->HasInput("X"), @@ -93,7 +92,6 @@ class ReshapeGradOp : public framework::OperatorWithKernel { const framework::AttributeMap &attrs) : OperatorWithKernel(type, inputs, outputs, attrs) {} - protected: void InferShape(framework::InferShapeContext *ctx) const override { PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) shouldn't be null."); PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")), diff --git a/paddle/operators/reshape_op.h b/paddle/operators/reshape_op.h index 3ba4611458fda0aa2f234c29d27086cd6f5742cc..c89cdf8cab9f209667c5e09b521b8f6e30f202fd 100644 --- a/paddle/operators/reshape_op.h +++ b/paddle/operators/reshape_op.h @@ -33,7 +33,7 @@ class ReshapeKernel : public framework::OpKernel { std::transform(shape.begin(), shape.end(), shape_int64.begin(), [](int a) { return static_cast(a); }); auto out_dims = framework::make_ddim(shape_int64); - out->CopyFrom(*in, ctx.GetPlace(), ctx.device_context()); + out->CopyFrom(*in, ctx.GetPlace(), ctx.device_context()); out->Resize(out_dims); } }; @@ -47,7 +47,7 @@ class ReshapeGradKernel : public framework::OpKernel { d_x->mutable_data(ctx.GetPlace()); auto in_dims = d_x->dims(); - d_x->CopyFrom(*d_out, ctx.GetPlace(), ctx.device_context()); + d_x->CopyFrom(*d_out, ctx.GetPlace(), ctx.device_context()); d_x->Resize(in_dims); } }; diff --git a/paddle/operators/rmsprop_op.cc b/paddle/operators/rmsprop_op.cc index ada6f2bc3cd5de0424139144d8c0696158c319d9..fd5567a365c4c843de3b8aec7fa77164f16644a4 100644 --- a/paddle/operators/rmsprop_op.cc +++ b/paddle/operators/rmsprop_op.cc @@ -21,7 +21,6 @@ class RmspropOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - protected: void InferShape(framework::InferShapeContext *ctx) const override { PADDLE_ENFORCE(ctx->HasInput("Param"), "Input(Param) of RmspropOp should not be null."); diff --git a/paddle/operators/rnn/recurrent_op_utils.cc b/paddle/operators/rnn/recurrent_op_utils.cc index 30b8ddeb5bc4220e261a5c37ac195b0348fef936..ee61ea300c33722471189d06eb09f67a083d2a4d 100644 --- a/paddle/operators/rnn/recurrent_op_utils.cc +++ b/paddle/operators/rnn/recurrent_op_utils.cc @@ -36,14 +36,14 @@ void SegmentInputs(const std::vector& step_scopes, LoDTensor* input = input_var->GetMutable(); f::DDim dims = input->dims(); PADDLE_ENFORCE_EQ(static_cast(dims[0]), seq_len, - "all the inlinks be the same length"); + "all the inputs be the same length"); f::DDim step_dims = slice_ddim(dims, 1, dims.size()); for (size_t j = 0; j < seq_len; j++) { Tensor* step_input = step_scopes[j]->Var(inlinks[i])->GetMutable(); // The input of operators of each step is Tensor here. // Maybe need to modify Slice function. - *step_input = input->Slice(j, j + 1); + *step_input = input->Slice(j, j + 1); step_input->Resize(step_dims); } } @@ -71,14 +71,14 @@ void ConcatOutputs(const std::vector& step_scopes, step_scopes[j]->FindVar(outlinks[i])->GetMutable(); // TODO(luotao02) data type and platform::DeviceContext() should set // correctly - (output->Slice(j, j + 1)) - .CopyFrom(*step_output, platform::CPUPlace(), ctx); + (output->Slice(j, j + 1)) + .CopyFrom(*step_output, platform::CPUPlace(), ctx); } } } void LinkMemories(const std::vector& scopes, - const std::vector& memories, + const std::vector& memories, const size_t step_id, const int offset) { PADDLE_ENFORCE_LT(step_id, scopes.size(), "step [%d] is out of range of step scopes' size [%d]", @@ -95,7 +95,7 @@ void LinkMemories(const std::vector& scopes, auto* mem = scope->FindVar(attr.pre_var)->GetMutable(); auto* linked_mem = linked_scope->FindVar(attr.var)->GetMutable(); mem->Resize(linked_mem->dims()); - mem->ShareDataWith(*linked_mem); + mem->ShareDataWith(*linked_mem); } } @@ -106,26 +106,26 @@ void InitArgument(const ArgumentName& name, Argument* arg, arg->inlinks = op.Inputs(name.inlinks); arg->outlinks = op.Outputs(name.outlinks); - auto& boot_memories = - is_grad ? op.Outputs(name.boot_memories) : op.Inputs(name.boot_memories); + auto& boot_memories = is_grad ? op.Outputs(name.initial_states) + : op.Inputs(name.initial_states); // attributes - auto& memories = op.Attr>(name.memories); - auto& pre_memories = op.Attr>(name.pre_memories); + auto& memories = op.Attr>(name.states); + auto& pre_memories = op.Attr>(name.ex_states); PADDLE_ENFORCE(memories.size() == boot_memories.size(), - "the size of memories, boot_memories don't match:%d,%d", + "the size of states, initial_states don't match:%d,%d", memories.size(), boot_memories.size()); PADDLE_ENFORCE(pre_memories.size() == boot_memories.size(), - "the size of pre_memories, boot_memories don't match:%d,%d", + "the size of ex_states, initial_states don't match:%d,%d", pre_memories.size(), boot_memories.size()); - PADDLE_ENFORCE(memories.size() > 0, "more than 1 memories should be set"); + PADDLE_ENFORCE(memories.size() > 0, "more than 1 states should be set"); for (size_t i = 0; i < memories.size(); ++i) { - rnn::MemoryAttr mem_attr; + rnn::StateAttr mem_attr; mem_attr.var = memories[i]; mem_attr.pre_var = pre_memories[i]; mem_attr.boot_var = boot_memories[i]; - (arg->memories).push_back(mem_attr); + (arg->states).push_back(mem_attr); } } diff --git a/paddle/operators/rnn/recurrent_op_utils.h b/paddle/operators/rnn/recurrent_op_utils.h index fe173edb24ad015b9546546565027358f9b93476..fb0e158e07745d58c6211d33e385b324e492b95e 100644 --- a/paddle/operators/rnn/recurrent_op_utils.h +++ b/paddle/operators/rnn/recurrent_op_utils.h @@ -31,7 +31,7 @@ using Scope = framework::Scope; * boot memories in father scope. Other attributes are copied from Op's proto * attributes. */ -struct MemoryAttr { +struct StateAttr { // name of current state variable std::string var; // name of previous step's state variable @@ -46,7 +46,7 @@ struct Argument { std::string step_scopes; std::vector inlinks; std::vector outlinks; - std::vector memories; + std::vector states; }; struct ArgumentName { @@ -54,9 +54,9 @@ struct ArgumentName { std::string step_scopes; std::string inlinks; std::string outlinks; - std::string memories; // the memory name - std::string pre_memories; // the previous memory name - std::string boot_memories; // the boot memory name + std::string states; // the memory name + std::string ex_states; // the previous memory name + std::string initial_states; // the boot memory name }; /** @@ -74,7 +74,7 @@ void ConcatOutputs(const std::vector& step_scopes, const size_t seq_len, const platform::DeviceContext& ctx); void LinkMemories(const std::vector& step_scopes, - const std::vector& memories, const size_t step_id, + const std::vector& memories, const size_t step_id, const int offset); void InitArgument(const ArgumentName& name, Argument* arg, diff --git a/paddle/operators/scale_op.cc b/paddle/operators/scale_op.cc index ac297da6b72250eaa5eb6ea2177fbcc7a6c1ec56..7f1a21bea72992307a05d50e7a0600ee763dd813 100644 --- a/paddle/operators/scale_op.cc +++ b/paddle/operators/scale_op.cc @@ -25,7 +25,6 @@ class ScaleOp : public framework::OperatorWithKernel { const framework::AttributeMap &attrs) : OperatorWithKernel(type, inputs, outputs, attrs) {} - protected: void InferShape(framework::InferShapeContext *ctx) const override { PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) of ScaleOp should not be null."); @@ -56,7 +55,6 @@ class ScaleGradMaker : public framework::SingleGradOpDescMaker { public: using framework::SingleGradOpDescMaker::SingleGradOpDescMaker; - protected: std::unique_ptr Apply() const override { auto *grad_op = new framework::OpDescBind(); grad_op->SetType("scale"); diff --git a/paddle/operators/scatter_op.cc b/paddle/operators/scatter_op.cc index fbea01a8db007a5b8cfe17f545f1cce6ecff3afb..62e6c70b4513fdfab1c563b6b23f36292fb6486a 100644 --- a/paddle/operators/scatter_op.cc +++ b/paddle/operators/scatter_op.cc @@ -22,7 +22,6 @@ class ScatterOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - protected: void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasInput("Ref"), "Input(Ref) of ScatterOp should not be null."); @@ -49,6 +48,7 @@ class ScatterOp : public framework::OperatorWithKernel { ctx->SetOutputDim("Out", ref_dims); } + protected: framework::DataType IndicateDataType( const framework::ExecutionContext& ctx) const override { return framework::ToDataType(ctx.Input("Ref")->type()); @@ -59,13 +59,13 @@ class ScatterGradOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - protected: void InferShape(framework::InferShapeContext* ctx) const override { ctx->SetOutputDim(framework::GradVarName("Updates"), ctx->GetInputDim("Updates")); ctx->SetOutputDim(framework::GradVarName("Ref"), ctx->GetInputDim("Ref")); } + protected: framework::DataType IndicateDataType( const framework::ExecutionContext& ctx) const override { return framework::ToDataType(ctx.Input("Ref")->type()); diff --git a/paddle/operators/scatter_op.cu b/paddle/operators/scatter_op.cu index 06f4d759447b6dcd28b50576dfc246fc466d9336..3b32ae2fb77a5d3d4c558742ec469c74d15eee07 100644 --- a/paddle/operators/scatter_op.cu +++ b/paddle/operators/scatter_op.cu @@ -30,7 +30,7 @@ class ScatterOpCUDAKernel : public framework::OpKernel { auto *Updates = ctx.Input("Updates"); auto *Out = ctx.Output("Out"); - Out->ShareDataWith(*Ref); + Out->ShareDataWith(*Ref); GPUScatterAssign(ctx.device_context(), *Updates, *Index, Out); } @@ -48,7 +48,7 @@ class ScatterGradOpCUDAKernel : public framework::OpKernel { auto *dOut = ctx.Input(framework::GradVarName("Out")); // In place gradient: dRef = dO - dRef->ShareDataWith(*dOut); + dRef->ShareDataWith(*dOut); dUpdates->mutable_data(ctx.GetPlace()); // Gradient by Gather: dUpdates = dO[Index] GPUGather(ctx.device_context(), *dOut, *Index, dUpdates); diff --git a/paddle/operators/scatter_op.h b/paddle/operators/scatter_op.h index 6101219006414e4865f676e3ca5d2a88949ad17a..1a4f6f99bfe36cd0de2d4f2af3f6054571d8f188 100644 --- a/paddle/operators/scatter_op.h +++ b/paddle/operators/scatter_op.h @@ -35,7 +35,7 @@ class ScatterOpKernel : public framework::OpKernel { auto *Out = ctx.Output("Out"); // In place output: Out = Ref, Out[Index] += Updates - Out->ShareDataWith(*Ref); + Out->ShareDataWith(*Ref); // Apply ScatterUpdate: Out[index] += Updates[:] ScatterAssign(ctx.device_context(), *Updates, *Index, Out); } @@ -53,7 +53,7 @@ class ScatterGradientOpKernel : public framework::OpKernel { auto *dOut = ctx.Input(framework::GradVarName("Out")); // In place gradient: dRef = dO - dRef->ShareDataWith(*dOut); + dRef->ShareDataWith(*dOut); dUpdates->mutable_data(ctx.GetPlace()); // Gradient by Gather: dUpdates += dO[Index] CPUGather(ctx.device_context(), *dOut, *Index, dUpdates); diff --git a/paddle/operators/sequence_concat_op.cc b/paddle/operators/sequence_concat_op.cc index 287fb1942e4a2b17f6d51c9a6b7f6fb71fbaa601..1fce96cdfe20fc3ab33a3cd00e9a03833c9b94f8 100644 --- a/paddle/operators/sequence_concat_op.cc +++ b/paddle/operators/sequence_concat_op.cc @@ -21,7 +21,6 @@ class SequenceConcatOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - protected: void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasInputs("X"), "Inputs(X) of SequenceConcatOp should not be null."); @@ -105,7 +104,6 @@ class SequenceConcatGradOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - protected: void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")), "The gradient of Out should not be null."); diff --git a/paddle/operators/sequence_concat_op.h b/paddle/operators/sequence_concat_op.h index a197a05bbb881806b24f9dcce5282a4d972e3adc..6adf96120c99f9b84a1ff947058e65ac3ddff1d4 100644 --- a/paddle/operators/sequence_concat_op.h +++ b/paddle/operators/sequence_concat_op.h @@ -87,16 +87,16 @@ class SequenceConcatOpKernel : public framework::OpKernel { auto out_lod_level = out_lod[level]; for (size_t i = 0; i < out_lod_level.size() - 1; ++i) { - Tensor out_t = out->Slice(static_cast(out_lod_level[i]), - static_cast(out_lod_level[i + 1])); + Tensor out_t = out->Slice(static_cast(out_lod_level[i]), + static_cast(out_lod_level[i + 1])); auto out_stride = framework::stride(out_t.dims()); size_t offset = 0; for (size_t j = 0; j < n; ++j) { auto in_lod_level = ins[j]->lod()[level]; auto in_stride = framework::stride(ins[j]->dims()); - Tensor in_t = ins[j]->Slice(static_cast(in_lod_level[i]), - static_cast(in_lod_level[i + 1])); + Tensor in_t = ins[j]->Slice(static_cast(in_lod_level[i]), + static_cast(in_lod_level[i + 1])); size_t axis_dim = in_t.dims()[axis]; StridedMemcpy(ctx.device_context(), in_t.data(), in_stride, in_t.dims(), out_stride, out_t.data() + offset); @@ -130,8 +130,8 @@ class SequenceConcatGradOpKernel : public framework::OpKernel { for (size_t i = 0; i < out_lod_level.size() - 1; ++i) { Tensor out_grad_t = - out_grad->Slice(static_cast(out_lod_level[i]), - static_cast(out_lod_level[i + 1])); + out_grad->Slice(static_cast(out_lod_level[i]), + static_cast(out_lod_level[i + 1])); auto out_grad_stride = framework::stride(out_grad_t.dims()); size_t offset = 0; @@ -139,8 +139,8 @@ class SequenceConcatGradOpKernel : public framework::OpKernel { auto x_grad_lod_level = x_grads[j]->lod()[level]; auto x_grad_stride = framework::stride(x_grads[j]->dims()); Tensor x_grad_t = - x_grads[j]->Slice(static_cast(x_grad_lod_level[i]), - static_cast(x_grad_lod_level[i + 1])); + x_grads[j]->Slice(static_cast(x_grad_lod_level[i]), + static_cast(x_grad_lod_level[i + 1])); size_t axis_dim = x_grad_t.dims()[axis]; StridedMemcpy(ctx.device_context(), out_grad_t.data() + offset, out_grad_stride, out_grad_t.dims(), x_grad_stride, diff --git a/paddle/operators/sequence_pool_op.cc b/paddle/operators/sequence_pool_op.cc index 06c00d31eac9ff09084db951ce2929850f9d14c2..e3f5d509a85537669237b8fd0ed44efe8abb6874 100644 --- a/paddle/operators/sequence_pool_op.cc +++ b/paddle/operators/sequence_pool_op.cc @@ -21,7 +21,6 @@ class SequencePoolOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - protected: void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) of SequencePoolOp should not be null."); @@ -36,11 +35,10 @@ class SequencePoolOpMaker : public framework::OpProtoAndCheckerMaker { SequencePoolOpMaker(framework::OpProto* proto, framework::OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("X", - "A float LoDTensor, the variable-length input of SequencePoolOp"); - AddOutput( - "Out", - "A float LoDTensor, the variable-length output of SequencePoolOp."); + AddInput("X", "(LoDTensor), the variable-length input of SequencePoolOp"); + AddOutput("Out", + "(Tensor), output of SequencePoolOp, which does not contain LoD " + "infomation."); AddAttr( "strategy", "(int, default AVERAGE) the pooling strategy of SequencePoolOp.") @@ -49,13 +47,13 @@ class SequencePoolOpMaker : public framework::OpProtoAndCheckerMaker { AddComment(R"DOC( SequencePoolOp pools features of all time-steps of each instance. - For a mini-batch of 3 variable lengths sentences, containing 2, 3, and 2 time-steps: + For a mini-batch of 3 variable-length sentences, containing 2, 3, and 2 time-steps: - Assume X is a [7,M,N] float LoDTensor, and X->lod()[0] = [0, 2, 5, 7]. + Assume X is a [7,M,N] LoDTensor, and X->lod()[0] = [0, 2, 5, 7], 7=2+3+2. Besides, for the sake of simplicity, we assume M=1 and N=1, and the value of X = [[1, 3], [2, 4, 6], [5, 1]]. - Thus, Out is a [3,1,1] float LoDTensor, but Out->lod() is nullptr. + Thus, Out is a [3,1,1] Tensor without LoD infomation. And for different strategy, the value of Out is as follows: - AVERAGE: [2, 4, 3], where 2=(1+3)/2, 4=(2+4+6)/3, 3=(5+1)/2 @@ -73,7 +71,6 @@ class SequencePoolGradOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - protected: void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")), "Gradient of Out should not be null."); diff --git a/paddle/operators/sequence_pool_op.h b/paddle/operators/sequence_pool_op.h index 752d714125578b2d1f926765b183495ec5cc203e..0de6cafe9ca83f09636a69b5579d19afde1c73b5 100644 --- a/paddle/operators/sequence_pool_op.h +++ b/paddle/operators/sequence_pool_op.h @@ -15,6 +15,7 @@ limitations under the License. */ #pragma once #include "paddle/framework/eigen.h" #include "paddle/framework/op_registry.h" +#include "paddle/operators/math/math_function.h" namespace paddle { namespace operators { @@ -63,9 +64,9 @@ class SequencePoolKernel : public framework::OpKernel { out->mutable_data(context.GetPlace()); auto place = context.GetEigenDevice(); for (int i = 0; i < static_cast(lod_level_0.size()) - 1; ++i) { - Tensor in_t = in->Slice(static_cast(lod_level_0[i]), - static_cast(lod_level_0[i + 1])); - Tensor out_t = out->Slice(i, i + 1); + Tensor in_t = in->Slice(static_cast(lod_level_0[i]), + static_cast(lod_level_0[i + 1])); + Tensor out_t = out->Slice(i, i + 1); int64_t h = static_cast(lod_level_0[i + 1] - lod_level_0[i]); auto in_e = EigenMatrix::From(in_t, framework::make_ddim({h, w})); auto out_e = EigenVector::Flatten(out_t); @@ -77,6 +78,16 @@ class SequencePoolKernel : public framework::OpKernel { case SUM: out_e.device(place) = in_e.sum(Eigen::array({{0}})); break; + case SQRT: + out_e.device(place) = in_e.sum(Eigen::array({{0}})) / + std::sqrt(static_cast(h)); + break; + case LAST: + out_e.device(place) = in_e.chip(h - 1, 0); + break; + case FIRST: + out_e.device(place) = in_e.chip(0, 0); + break; default: PADDLE_THROW("unsupported pooling strategy"); } @@ -98,11 +109,16 @@ class SequencePoolGradKernel : public framework::OpKernel { int64_t w = in->numel() / dims[0]; in_g->mutable_data(context.GetPlace()); + if (strategy == LAST || strategy == FIRST) { + // set X@Grad be zero at first when strategy is LAST/FIRST + math::SetConstant functor; + functor(context.device_context(), in_g, 0); + } auto place = context.GetEigenDevice(); for (int i = 0; i < static_cast(lod.size()) - 1; ++i) { - auto in_g_t = in_g->Slice(static_cast(lod[i]), - static_cast(lod[i + 1])); - auto out_g_t = out_g->Slice(i, i + 1); + auto in_g_t = + in_g->Slice(static_cast(lod[i]), static_cast(lod[i + 1])); + auto out_g_t = out_g->Slice(i, i + 1); int64_t h = static_cast(lod[i + 1] - lod[i]); auto in_g_e = EigenMatrix::From(in_g_t, {h, w}); auto out_g_e = EigenMatrix::From(out_g_t, {1, w}); @@ -115,6 +131,16 @@ class SequencePoolGradKernel : public framework::OpKernel { case SUM: in_g_e.device(place) = (out_g_e).broadcast(bcast); break; + case SQRT: + in_g_e.device(place) = + (out_g_e / std::sqrt(static_cast(h))).broadcast(bcast); + break; + case LAST: + in_g_e.chip(h - 1, 0).device(place) = out_g_e; + break; + case FIRST: + in_g_e.chip(0, 0).device(place) = out_g_e; + break; default: PADDLE_THROW("unsupported pooling strategy"); } diff --git a/paddle/operators/sequence_softmax_op.cc b/paddle/operators/sequence_softmax_op.cc index ea217ba4597c24ec323c6509f097874d977fcf81..c891ab1fdcbb167453462c45b00b4632e663dd0e 100644 --- a/paddle/operators/sequence_softmax_op.cc +++ b/paddle/operators/sequence_softmax_op.cc @@ -21,7 +21,6 @@ class SequenceSoftmaxOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - protected: void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) of SequenceSoftmaxOp should not be null."); @@ -66,7 +65,6 @@ class SequenceSoftmaxGradOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - protected: void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasInput("Out"), "Input(Out) of SequenceSoftmaxGradOp should not be null."); diff --git a/paddle/operators/sequence_softmax_op.h b/paddle/operators/sequence_softmax_op.h index 96d87c404d217280d74bd088e7a23f539ef6e7ce..3eb1e2844dff6ac94e86dcf4586bb51bc33adbec 100644 --- a/paddle/operators/sequence_softmax_op.h +++ b/paddle/operators/sequence_softmax_op.h @@ -46,8 +46,8 @@ class SequenceSoftmaxKernel : public framework::OpKernel { for (int i = 0; i < static_cast(lod[level].size()) - 1; ++i) { int start_pos = static_cast(lod[level][i]); int end_pos = static_cast(lod[level][i + 1]); - Tensor x_i = x->Slice(start_pos, end_pos); - Tensor out_i = out->Slice(start_pos, end_pos); + Tensor x_i = x->Slice(start_pos, end_pos); + Tensor out_i = out->Slice(start_pos, end_pos); // Reshape from (end_pos - start_pos) x 1UL to 1UL x (end_pos - start_pos) framework::DDim dims_i = framework::make_ddim({1UL, end_pos - start_pos}); @@ -75,9 +75,9 @@ class SequenceSoftmaxGradKernel : public framework::OpKernel { int start_pos = static_cast(lod[level][i]); int end_pos = static_cast(lod[level][i + 1]); - Tensor out_i = out->Slice(start_pos, end_pos); - Tensor out_grad_i = out_grad->Slice(start_pos, end_pos); - Tensor x_grad_i = x_grad->Slice(start_pos, end_pos); + Tensor out_i = out->Slice(start_pos, end_pos); + Tensor out_grad_i = out_grad->Slice(start_pos, end_pos); + Tensor x_grad_i = x_grad->Slice(start_pos, end_pos); // Reshape from (end_pos - start_pos) x 1UL to 1UL x (end_pos - start_pos) framework::DDim dims_i = framework::make_ddim({1UL, end_pos - start_pos}); diff --git a/paddle/operators/sgd_op.cc b/paddle/operators/sgd_op.cc index 2a6a162a02507fa42c5ecbba59384f9c32aba7a9..2acb96d1b4f5903ff6c57b10e7621c8adaf73171 100644 --- a/paddle/operators/sgd_op.cc +++ b/paddle/operators/sgd_op.cc @@ -21,8 +21,7 @@ class SGDOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - protected: - void InferShape(framework::InferShapeContext *ctx) const override { + void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasInput("Param"), "Input(Param) of SGDOp should not be null."); PADDLE_ENFORCE(ctx->HasInput("Grad"), @@ -36,15 +35,15 @@ class SGDOp : public framework::OperatorWithKernel { PADDLE_ENFORCE_EQ(framework::product(lr_dims), 1, "Learning rate should have 1 element"); auto param_dim = ctx->GetInputDim("Param"); - PADDLE_ENFORCE_EQ(param_dim, ctx->GetInputDim("Grad"), - "Two input of SGD Op's dimension must be same."); + // TODO(qijun): check dimensions of Param and Grad at complie + // and run time. ctx->SetOutputDim("ParamOut", param_dim); } }; class SGDOpMaker : public framework::OpProtoAndCheckerMaker { public: - SGDOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) + SGDOpMaker(framework::OpProto* proto, framework::OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("Param", "Input parameter"); AddInput("LearningRate", "Learning rate of SGD"); @@ -59,6 +58,38 @@ param_out = param - learning_rate * grad; )DOC"); } }; + +template +struct SparseSGDFunctor { + void operator()(const platform::DeviceContext& context, + const framework::SelectedRows& input, + const framework::Tensor& learning_rate, + framework::Tensor* output) { + auto in_height = input.height(); + auto out_dims = output->dims(); + PADDLE_ENFORCE_EQ(in_height, out_dims[0]); + + auto& in_value = input.value(); + auto& in_rows = input.rows(); + + int64_t in_row_numel = in_value.numel() / in_rows.size(); + PADDLE_ENFORCE_EQ(in_row_numel, output->numel() / in_height); + + auto* in_data = in_value.data(); + auto* out_data = output->data(); + auto* lr = learning_rate.data(); + + for (size_t i = 0; i < in_rows.size(); i++) { + for (int64_t j = 0; j < in_row_numel; j++) { + out_data[in_rows[i] * in_row_numel + j] -= + lr[0] * in_data[i * in_row_numel + j]; + } + } + } +}; + +template struct SparseSGDFunctor; + } // namespace operators } // namespace paddle diff --git a/paddle/operators/sgd_op.cu b/paddle/operators/sgd_op.cu index f5ba6d3c29f8dfbfdea4fbf2c3d5fd7f5b358666..106f9b746ba6614d8fa68b677c47ec04ed26fb81 100644 --- a/paddle/operators/sgd_op.cu +++ b/paddle/operators/sgd_op.cu @@ -14,6 +14,66 @@ #define EIGEN_USE_GPU #include "paddle/operators/sgd_op.h" +#include "paddle/platform/cuda_helper.h" + +namespace paddle { +namespace operators { + +namespace { +template +__global__ void SparseSGDFunctorKernel(const T* selected_rows, + const int64_t* rows, + const T* learning_rate, T* tensor_out, + int64_t row_numel, int block_size) { + const int ty = blockIdx.y; + int tid = threadIdx.x; + + selected_rows += ty * row_numel; + tensor_out += rows[ty] * row_numel; + + for (int index = tid; index < row_numel; index += block_size) { + // Since index in rows of SelectedRows can be duplicate, we have to use + // Atomic Operation to avoid concurrent write error. + paddle::platform::CudaAtomicAdd( + tensor_out + index, -1.0 * learning_rate[0] * selected_rows[index]); + } +} +} // namespace + +template +struct SparseSGDFunctor { + void operator()(const platform::DeviceContext& context, + const framework::SelectedRows& input, + const framework::Tensor& learning_rate, + framework::Tensor* output) { + auto in_height = input.height(); + auto out_dims = output->dims(); + PADDLE_ENFORCE_EQ(in_height, out_dims[0]); + + auto& in_value = input.value(); + auto& in_rows = input.rows(); + + int64_t in_row_numel = in_value.numel() / in_rows.size(); + PADDLE_ENFORCE_EQ(in_row_numel, output->numel() / in_height); + + auto* in_data = in_value.data(); + auto* out_data = output->data(); + + int block_size = 256; + dim3 threads(block_size, 1); + dim3 grid(1, in_rows.size()); + SparseSGDFunctorKernel< + T><<(context) + .stream()>>>(in_data, in_rows.data(), learning_rate.data(), + out_data, in_row_numel, block_size); + } +}; + +template struct SparseSGDFunctor; + +} // namespace operators +} // namespace paddle namespace ops = paddle::operators; REGISTER_OP_GPU_KERNEL(sgd, diff --git a/paddle/operators/sgd_op.h b/paddle/operators/sgd_op.h index 26f4012f258771794c736dbfad4af174b017f410..78b595fc6c63d775b627f23cafa9458f1dadd4e5 100644 --- a/paddle/operators/sgd_op.h +++ b/paddle/operators/sgd_op.h @@ -15,31 +15,53 @@ limitations under the License. */ #pragma once #include "paddle/framework/eigen.h" #include "paddle/framework/op_registry.h" +#include "paddle/framework/selected_rows.h" namespace paddle { namespace operators { +template +struct SparseSGDFunctor { + void operator()(const platform::DeviceContext& context, + const framework::SelectedRows& input, + const framework::Tensor& learning_rate, + framework::Tensor* output); +}; + template class SGDOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { - auto param = ctx.Input("Param"); - auto grad = ctx.Input("Grad"); - auto param_out = ctx.Output("ParamOut"); - auto learning_rate = ctx.Input("LearningRate"); + auto* param = ctx.Input("Param"); + auto* param_out = ctx.Output("ParamOut"); + auto* learning_rate = ctx.Input("LearningRate"); - param_out->mutable_data(ctx.GetPlace()); + auto* grad_var = ctx.InputVar("Grad"); + // Actually, all tensors are LoDTensor except SelectedRows. + if (grad_var->IsType()) { + param_out->mutable_data(ctx.GetPlace()); + auto* grad = ctx.Input("Grad"); - auto p = framework::EigenVector::Flatten(*param); - auto g = framework::EigenVector::Flatten(*grad); - auto o = framework::EigenVector::Flatten(*param_out); - auto lr = framework::EigenVector::Flatten(*learning_rate); - auto place = ctx.GetEigenDevice(); + auto p = framework::EigenVector::Flatten(*param); + auto g = framework::EigenVector::Flatten(*grad); + auto o = framework::EigenVector::Flatten(*param_out); + auto lr = framework::EigenVector::Flatten(*learning_rate); + auto place = ctx.GetEigenDevice(); - Eigen::DSizes grad_dsize(grad->numel()); - o.device(place) = p - lr.broadcast(grad_dsize) * g; + Eigen::DSizes grad_dsize(grad->numel()); + o.device(place) = p - lr.broadcast(grad_dsize) * g; + } else if (grad_var->IsType()) { + // TODO(qijun): In Sparse SGD operator, in-place update is enforced. + // This manual optimization brings difficulty to track data dependency. + // It's better to find a more elegant solution. + PADDLE_ENFORCE_EQ(param, param_out); + auto* grad = ctx.Input("Grad"); + SparseSGDFunctor functor; + functor(ctx.device_context(), *grad, *learning_rate, param_out); + } else { + PADDLE_THROW("Unsupported Variable Type of Grad"); + } } }; - } // namespace operators } // namespace paddle diff --git a/paddle/operators/sigmoid_cross_entropy_with_logits_op.cc b/paddle/operators/sigmoid_cross_entropy_with_logits_op.cc index b6653e1cc754bd626f04c28c42ce59f83c2fc87f..e781c8db208464cb94d94d1914e50f5aba3db2c6 100644 --- a/paddle/operators/sigmoid_cross_entropy_with_logits_op.cc +++ b/paddle/operators/sigmoid_cross_entropy_with_logits_op.cc @@ -23,7 +23,6 @@ class SigmoidCrossEntropyWithLogitsOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - protected: void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should be not null."); PADDLE_ENFORCE(ctx->HasInput("Labels"), @@ -52,7 +51,6 @@ class SigmoidCrossEntropyWithLogitsGradOp public: using framework::OperatorWithKernel::OperatorWithKernel; - protected: void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should be not null."); PADDLE_ENFORCE(ctx->HasInput("Labels"), diff --git a/paddle/operators/smooth_l1_loss_op.cc b/paddle/operators/smooth_l1_loss_op.cc index 91391dc945be3d3d94966339bf9232eb21dcc38f..a4f0f37764667c43d48c6aa7646d61cdf4f3fd2d 100644 --- a/paddle/operators/smooth_l1_loss_op.cc +++ b/paddle/operators/smooth_l1_loss_op.cc @@ -21,7 +21,6 @@ class SmoothL1LossOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - protected: void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasInput("X"), "X must be initialized."); PADDLE_ENFORCE(ctx->HasInput("Y"), "Y must be initialized."); @@ -93,7 +92,6 @@ class SmoothL1LossGradOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - protected: void InferShape(framework::InferShapeContext* ctx) const override { auto in_dims = ctx->GetInputDim("X"); auto out_dims = ctx->GetInputDim(framework::GradVarName("Out")); diff --git a/paddle/operators/softmax_op.cc b/paddle/operators/softmax_op.cc index 4c131ed44de1c1567193c7cabff6ddf2dc63d9be..00fd0b32a9b3c0dd9fedf7b7621b1f15e5c4ce93 100644 --- a/paddle/operators/softmax_op.cc +++ b/paddle/operators/softmax_op.cc @@ -21,7 +21,6 @@ class SoftmaxOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - protected: void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) of SoftmaxOp should not be null."); @@ -68,7 +67,6 @@ class SoftmaxOpGrad : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - protected: void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasInput("Y"), "Input(Y) should be not null."); PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Y")), diff --git a/paddle/operators/softmax_with_cross_entropy_op.cc b/paddle/operators/softmax_with_cross_entropy_op.cc index 5431a1657c300d9c60100616ca7ea2e1196824ec..942fbb42df8bb90b86bd097832a15b320a857750 100644 --- a/paddle/operators/softmax_with_cross_entropy_op.cc +++ b/paddle/operators/softmax_with_cross_entropy_op.cc @@ -46,7 +46,7 @@ class SoftmaxWithCrossEntropyOpMaker "(Tensor, default: Tensor), A 2-D tensor. The cross " "entropy loss with shape [N x 1]."); AddAttr( - "softLabel", + "soft_label", "(bool, default: false), A flag to indicate whether to interpretate " "the given labels as soft labels.") .SetDefault(false); @@ -82,7 +82,6 @@ class SoftmaxWithCrossEntropyOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - protected: void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasInput("Logits"), "Input(Logits) should be not null."); @@ -100,13 +99,13 @@ class SoftmaxWithCrossEntropyOp : public framework::OperatorWithKernel { PADDLE_ENFORCE_EQ(labels_dims.size(), 2UL, "The labels should be a 2-D tensor."); - if (ctx->Attrs().Get("softLabel")) { + if (ctx->Attrs().Get("soft_label")) { PADDLE_ENFORCE_EQ(logits_dims[1], labels_dims[1], - "If Attr(softLabel) == true, the 2nd dimension of " + "If Attr(soft_label) == true, the 2nd dimension of " "Input(X) and Input(Label) should be equal."); } else { PADDLE_ENFORCE_EQ(labels_dims[1], 1UL, - "If Attr(softLabel) == false, the 2nd dimension of " + "If Attr(soft_label) == false, the 2nd dimension of " "Input(Label) should be 1."); } @@ -117,6 +116,7 @@ class SoftmaxWithCrossEntropyOp : public framework::OperatorWithKernel { ctx->ShareLoD("Logits", /*->*/ "Loss"); } + protected: framework::DataType IndicateDataType( const framework::ExecutionContext& ctx) const override { return framework::ToDataType(ctx.Input("Logits")->type()); @@ -127,7 +127,6 @@ class SoftmaxWithCrossEntropyOpGrad : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - protected: void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Loss")), "Input(Loss@Grad) should not be null."); @@ -142,13 +141,13 @@ class SoftmaxWithCrossEntropyOpGrad : public framework::OperatorWithKernel { PADDLE_ENFORCE_EQ(labels_dims.size(), 2UL, "The labels should be a 2-D tensor."); - if (ctx->Attrs().Get("softLabel")) { + if (ctx->Attrs().Get("soft_label")) { PADDLE_ENFORCE_EQ(softmax_dims[1], labels_dims[1], - "When Attr(softLabel) == true, the 2nd dimension of " + "When Attr(soft_label) == true, the 2nd dimension of " "Input(X) and Input(Label) should be equal."); } else { PADDLE_ENFORCE_EQ(labels_dims[1], 1UL, - "When Attr(softLabel) == false, the 2nd dimension of " + "When Attr(soft_label) == false, the 2nd dimension of " "Input(Label) should be 1."); } @@ -156,6 +155,7 @@ class SoftmaxWithCrossEntropyOpGrad : public framework::OperatorWithKernel { ctx->GetInputDim("Softmax")); } + protected: framework::DataType IndicateDataType( const framework::ExecutionContext& ctx) const override { return framework::ToDataType( diff --git a/paddle/operators/softmax_with_cross_entropy_op.cu b/paddle/operators/softmax_with_cross_entropy_op.cu index 2bc53ecf871eb1800a920ba85e8eac31d7037efe..68ac2b0ea36dda55ac1161eecb80f03178b4f303 100644 --- a/paddle/operators/softmax_with_cross_entropy_op.cu +++ b/paddle/operators/softmax_with_cross_entropy_op.cu @@ -70,7 +70,7 @@ class SoftmaxWithCrossEntropyCUDAKernel : public framework::OpKernel { logits, softmax); math::CrossEntropyFunctor()( context.device_context(), loss, softmax, labels, - context.Attr("softLabel")); + context.Attr("soft_label")); } }; @@ -85,7 +85,7 @@ class SoftmaxWithCrossEntropyGradCUDAKernel : public framework::OpKernel { context.Input(framework::GradVarName("Loss"))->data(); Tensor* logit_grad = context.Output(framework::GradVarName("Logits")); - logit_grad->ShareDataWith(*context.Input("Softmax")); + logit_grad->ShareDataWith(*context.Input("Softmax")); T* logit_grad_data = logit_grad->data(); const int batch_size = logit_grad->dims()[0]; @@ -93,7 +93,7 @@ class SoftmaxWithCrossEntropyGradCUDAKernel : public framework::OpKernel { int block = 512; int grid = (batch_size * class_num + block - 1) / block; - if (context.Attr("softLabel")) { + if (context.Attr("soft_label")) { const T* label_data = labels->data(); SoftCrossEntropyGradientKernel<<< grid, block, 0, reinterpret_cast( diff --git a/paddle/operators/softmax_with_cross_entropy_op.h b/paddle/operators/softmax_with_cross_entropy_op.h index cffd422f1827b646a8abcd881fdcb5455e6a663a..01027cf63fc1010a226346609d583af0b400ecbb 100644 --- a/paddle/operators/softmax_with_cross_entropy_op.h +++ b/paddle/operators/softmax_with_cross_entropy_op.h @@ -44,7 +44,7 @@ class SoftmaxWithCrossEntropyKernel : public framework::OpKernel { logits, softmax); math::CrossEntropyFunctor()( context.device_context(), loss, softmax, labels, - context.Attr("softLabel")); + context.Attr("soft_label")); } }; @@ -57,10 +57,10 @@ class SoftmaxWithCrossEntropyGradKernel : public framework::OpKernel { const Tensor* labels = context.Input("Label"); Tensor* logit_grad = context.Output(framework::GradVarName("Logits")); - logit_grad->ShareDataWith(*context.Input("Softmax")); + logit_grad->ShareDataWith(*context.Input("Softmax")); const int class_num = logit_grad->dims()[1]; - if (context.Attr("softLabel")) { + if (context.Attr("soft_label")) { auto out_grad_mat = EigenMatrix::From(*out_grad); auto logit_grad_mat = EigenMatrix::From(*logit_grad); auto lbl_mat = EigenMatrix::From(*labels); diff --git a/paddle/operators/split_op.cc b/paddle/operators/split_op.cc index d5dd4df2a2a6f424421ae7f117c26acff08d37d0..4a6c50f7970208b0f4141aa057bd0db715fb6152 100644 --- a/paddle/operators/split_op.cc +++ b/paddle/operators/split_op.cc @@ -23,7 +23,6 @@ class SplitOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - protected: void InferShape(framework::InferShapeContext *ctx) const override { PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) of SplitOp should not be null."); diff --git a/paddle/operators/squared_l2_distance_op.cc b/paddle/operators/squared_l2_distance_op.cc index cce4e527c36bc0f4735165ff88ebba85b00eb898..e360c19b47eae7fc32ae66f9e4e3873bff211b04 100644 --- a/paddle/operators/squared_l2_distance_op.cc +++ b/paddle/operators/squared_l2_distance_op.cc @@ -21,7 +21,6 @@ class SquaredL2DistanceOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - protected: void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) of SquaredL2DistanceOp should not be null."); @@ -85,7 +84,6 @@ class SquaredL2DistanceGradOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - protected: void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")), "Gradient of Out should not be null"); diff --git a/paddle/operators/sum_op.cc b/paddle/operators/sum_op.cc index 573487b83590c132d5a4379a4b2762fbc16c04bc..5214a8413e8f7b957015985496fe8fb4b4f8b323 100644 --- a/paddle/operators/sum_op.cc +++ b/paddle/operators/sum_op.cc @@ -21,7 +21,6 @@ class SumOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - protected: void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasInputs("X"), "Inputs(X) should not be null"); auto x_dims = ctx->GetInputsDim("X"); diff --git a/paddle/operators/top_k_op.cc b/paddle/operators/top_k_op.cc index c95481991229dd40e162094ccbf15de8ab627c8b..d5c2c91a5fb0f639ea84d13e27de8271218da54f 100644 --- a/paddle/operators/top_k_op.cc +++ b/paddle/operators/top_k_op.cc @@ -21,7 +21,6 @@ class TopkOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - protected: void InferShape(framework::InferShapeContext *ctx) const override { PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) of TopkOp should not be null."); diff --git a/paddle/operators/transpose_op.cc b/paddle/operators/transpose_op.cc index 1101bbe3efe3d313400663ade561063da4e34dfd..d785e57c830439ad80005d9a3d4bb77faf1ae1b9 100644 --- a/paddle/operators/transpose_op.cc +++ b/paddle/operators/transpose_op.cc @@ -23,7 +23,6 @@ class TransposeOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - protected: void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null"); PADDLE_ENFORCE(ctx->HasOutput("Out"), "Output(Out) should not be null"); @@ -92,7 +91,6 @@ class TransposeOpGrad : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - protected: void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null"); PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")), diff --git a/paddle/operators/uniform_random_op.cc b/paddle/operators/uniform_random_op.cc index 75928f1ec818ab028ea06cfa72273fb99430c3c8..39b53948e3cc58ff1d0ab481143b066b1a2fae16 100644 --- a/paddle/operators/uniform_random_op.cc +++ b/paddle/operators/uniform_random_op.cc @@ -46,7 +46,6 @@ class UniformRandomOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - protected: void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasOutput("Out"), "Output(Out) of UniformRandomOp should not be null."); @@ -54,18 +53,19 @@ class UniformRandomOp : public framework::OperatorWithKernel { PADDLE_ENFORCE( ctx->Attrs().Get("min") < ctx->Attrs().Get("max"), "uniform_random's min must less then max"); - auto& dims = ctx->Attrs().Get>("dims"); + auto& shape = ctx->Attrs().Get>("shape"); std::vector temp; - temp.reserve(dims.size()); - for (auto dim : dims) { + temp.reserve(shape.size()); + for (auto dim : shape) { temp.push_back(static_cast(dim)); } ctx->SetOutputDim("Out", framework::make_ddim(temp)); } + protected: framework::DataType IndicateDataType( const framework::ExecutionContext& ctx) const override { - return static_cast(Attr("data_type")); + return static_cast(ctx.Attr("data_type")); } }; @@ -78,7 +78,7 @@ class UniformRandomOpMaker : public framework::OpProtoAndCheckerMaker { AddComment(R"DOC(Uniform random operator. Used to initialize tensor with uniform random generator. )DOC"); - AddAttr>("dims", "the dimension of random tensor"); + AddAttr>("shape", "the dimension of random tensor"); AddAttr("min", "Minimum value of uniform random").SetDefault(-1.0f); AddAttr("max", "Maximun value of uniform random").SetDefault(1.0f); AddAttr("seed", diff --git a/paddle/parameter/FirstOrderOptimizer.h b/paddle/parameter/FirstOrderOptimizer.h index 895e8d6a63d1fad0ee7a6f5647402435d418b2f1..f157188a4f736319ea187052b90a17f8be9e9edb 100644 --- a/paddle/parameter/FirstOrderOptimizer.h +++ b/paddle/parameter/FirstOrderOptimizer.h @@ -265,6 +265,10 @@ public: addParameterType(PARAMETER_SECOND_MOMENTUM); } + virtual void startBatch(int64_t numSamplesProcessed) { + learningRate_ = calcLearningRate(numSamplesProcessed, pass_); + } + virtual void finishBatch() { ++step_; } virtual void update(const VectorPtr vecs[], diff --git a/paddle/platform/dynload/cublas.h b/paddle/platform/dynload/cublas.h index 9d8343c0b5e200b390ccda760f09816959952e9d..6b64539b0a9a4d535a53447fbcc0e458f3ac9129 100644 --- a/paddle/platform/dynload/cublas.h +++ b/paddle/platform/dynload/cublas.h @@ -77,6 +77,10 @@ extern void *cublas_dso_handle; __macro(cublasDgemmBatched); \ __macro(cublasCgemmBatched); \ __macro(cublasZgemmBatched); \ + __macro(cublasSgemmStridedBatched); \ + __macro(cublasDgemmStridedBatched); \ + __macro(cublasCgemmStridedBatched); \ + __macro(cublasZgemmStridedBatched); \ __macro(cublasSgetrfBatched); \ __macro(cublasSgetriBatched); \ __macro(cublasDgetrfBatched); \ diff --git a/paddle/pserver/LightNetwork.cpp b/paddle/pserver/LightNetwork.cpp index 4203f2616456244df616ee2109436ab7caef9741..0e8e5a83a47bee3436450e6bf7db5e26dc037016 100644 --- a/paddle/pserver/LightNetwork.cpp +++ b/paddle/pserver/LightNetwork.cpp @@ -49,6 +49,11 @@ DEFINE_int32(sock_recv_buf_size, 1024 * 1024 * 40, "restrict sock recv buff size"); +/// reasonable sock_listen_queue_size can control maximum pending connections. +DEFINE_int32(sock_listen_queue_size, + 1024, + "listen queue size when pserver listen a TCP port"); + namespace paddle { /** @@ -129,7 +134,7 @@ SocketServer::SocketServer(const std::string &addr, int port, int rdmaCpu) if (rdmaCpu == -1) { tcpRdma_ = F_TCP; socket_ = 0; - maxPendingConnections_ = 100; + maxPendingConnections_ = FLAGS_sock_listen_queue_size; } else { tcpRdma_ = F_RDMA; rdmaCpu_ = rdmaCpu; diff --git a/paddle/pybind/protobuf.cc b/paddle/pybind/protobuf.cc index b360b05d16c9a1c135fa56cb37919dece8f16788..405ac544e10f19a33399a649f76699fefc3d49b9 100644 --- a/paddle/pybind/protobuf.cc +++ b/paddle/pybind/protobuf.cc @@ -100,21 +100,11 @@ using namespace paddle::framework; // NOLINT // Bind Methods void BindProgramDesc(py::module &m) { py::class_(m, "ProgramDesc", "") - .def_static("instance", - []() -> ProgramDescBind * { - return &ProgramDescBind::Instance(&GetProgramDesc()); - }, - py::return_value_policy::reference) - .def_static("__create_program_desc__", - []() -> ProgramDescBind * { - // Only used for unit-test - auto *prog_desc = new ProgramDesc; - auto *block = prog_desc->mutable_blocks()->Add(); - block->set_idx(0); - block->set_parent_idx(-1); - return &ProgramDescBind::Instance(prog_desc); - }, - py::return_value_policy::reference) + .def(py::init<>()) + .def("__init__", + [](ProgramDescBind &self, const ProgramDescBind &other) { + new (&self) ProgramDescBind(other); + }) .def("append_block", &ProgramDescBind::AppendBlock, py::return_value_policy::reference) .def("append_backward", @@ -163,6 +153,11 @@ void BindBlockDesc(py::module &m) { return self.Var(name); }, py::return_value_policy::reference) + .def("has_var", + [](BlockDescBind &self, py::bytes byte_name) { + std::string name = byte_name; + return self.HasVar(name); + }) .def("find_var", [](BlockDescBind &self, py::bytes byte_name) { std::string name = byte_name; @@ -171,8 +166,8 @@ void BindBlockDesc(py::module &m) { py::return_value_policy::reference) .def("all_vars", &BlockDescBind::AllVars, py::return_value_policy::reference) - .def("all_ops", &BlockDescBind::AllOps, - py::return_value_policy::reference) + .def("op_size", &BlockDescBind::OpSize) + .def("op", &BlockDescBind::Op, py::return_value_policy::reference) .def("serialize_to_string", [](BlockDescBind &block_desc) -> py::bytes { const BlockDesc *desc = block_desc.Proto(); PADDLE_ENFORCE(desc->IsInitialized(), @@ -211,20 +206,25 @@ void BindVarDsec(py::module &m) { .def("set_lod_level", &VarDescBind::SetLoDLevel) .def("type", &VarDescBind::GetType) .def("set_type", &VarDescBind::SetType) - .def("serialize_to_string", [](VarDescBind &var_desc) -> py::bytes { - const VarDesc *desc = var_desc.Proto(); - PADDLE_ENFORCE(desc->IsInitialized(), - "VarDesc has not been initialized."); - std::string res; - PADDLE_ENFORCE( - desc->SerializeToString(&res), - "Serialize VarDesc Error. This could be a bug of Paddle."); - return res; - }); + .def("serialize_to_string", + [](VarDescBind &var_desc) -> py::bytes { + const VarDesc *desc = var_desc.Proto(); + PADDLE_ENFORCE(desc->IsInitialized(), + "VarDesc has not been initialized."); + std::string res; + PADDLE_ENFORCE( + desc->SerializeToString(&res), + "Serialize VarDesc Error. This could be a bug of Paddle."); + return res; + }) + .def("persistable", &VarDescBind::Persistable) + .def("set_persistable", &VarDescBind::SetPersistable); py::enum_(var_desc, "VarType", "") .value("LOD_TENSOR", VarDesc::LOD_TENSOR) - .value("SELECTED_ROWS", VarDesc::SELECTED_ROWS); + .value("SELECTED_ROWS", VarDesc::SELECTED_ROWS) + .value("FEED_MINIBATCH", VarDesc::FEED_MINIBATCH) + .value("FETCH_LIST", VarDesc::FETCH_LIST); } void BindOpDesc(py::module &m) { diff --git a/paddle/pybind/pybind.cc b/paddle/pybind/pybind.cc index afc80b25b185509739edcd0d06817aa1d507a8ec..26b793a4bbf5df7a2635838a6c6a8264ca8ebb67 100644 --- a/paddle/pybind/pybind.cc +++ b/paddle/pybind/pybind.cc @@ -16,7 +16,10 @@ limitations under the License. */ #include "paddle/framework/backward.h" #include "paddle/framework/executor.h" +#include "paddle/framework/feed_fetch_method.h" +#include "paddle/framework/framework.pb.h" #include "paddle/framework/lod_tensor.h" +#include "paddle/framework/selected_rows.h" #include "paddle/framework/tensor_array.h" #include "paddle/operators/cond_op.h" #include "paddle/operators/dynamic_recurrent_op.h" @@ -81,10 +84,12 @@ PYBIND11_PLUGIN(core) { .def("set", PyCPUTensorSetFromArray) .def("set", PyCPUTensorSetFromArray) .def("set", PyCPUTensorSetFromArray) + .def("set", PyCPUTensorSetFromArray) #ifdef PADDLE_WITH_CUDA .def("set", PyCUDATensorSetFromArray) .def("set", PyCUDATensorSetFromArray) .def("set", PyCUDATensorSetFromArray) + .def("set", PyCUDATensorSetFromArray) #endif .def("shape", [](Tensor &self) { return vectorize(self.dims()); }) .def("set_float_element", TensorSetElement) @@ -108,6 +113,7 @@ PYBIND11_PLUGIN(core) { new (&instance) LoDTensor(new_lod); #endif }) + .def("__init__", [](LoDTensor &instance) { new (&instance) LoDTensor(); }) .def("set_lod", [](LoDTensor &self, const std::vector> &lod) { #ifndef PADDLE_WITH_CUDA @@ -138,6 +144,40 @@ PYBIND11_PLUGIN(core) { #endif }); + py::class_(m, "SelectedRows") + .def("__init__", + [](SelectedRows &instance) { new (&instance) SelectedRows(); }) + .def("__init__", + [](SelectedRows &instance, const std::vector rows, + const int64_t &height) { + new (&instance) SelectedRows(rows, height); + }) + .def("get_tensor", + [](SelectedRows &self) { return self.mutable_value(); }, + py::return_value_policy::reference) + .def("set_height", &SelectedRows::set_height) + .def("height", &SelectedRows::height) + .def("set_rows", + [](SelectedRows &self, std::vector rows) { +#ifndef PADDLE_WITH_CUDA + self.set_rows(rows); +#else + Vector new_rows(rows); + self.set_rows(new_rows); +#endif + }) + .def("rows", [](SelectedRows &self) { +#ifndef PADDLE_WITH_CUDA + return self.rows(); +#else + auto rows = self.rows(); + std::vector new_rows; + new_rows.reserve(rows.size()); + std::copy(rows.begin(), rows.end(), std::back_inserter(new_rows)); + return new_rows; +#endif + }); + py::class_(m, "Variable", R"DOC(Variable Class. All parameter, weight, gradient are variables in Paddle. @@ -158,6 +198,11 @@ All parameter, weight, gradient are variables in Paddle. return self.GetMutable(); }, py::return_value_policy::reference) + .def("get_selected_rows", + [](Variable &self) -> SelectedRows * { + return self.GetMutable(); + }, + py::return_value_policy::reference) .def("get_net", [](Variable &self) -> operators::NetOp * { return self.GetMutable(); @@ -222,6 +267,17 @@ All parameter, weight, gradient are variables in Paddle. .def(py::init<>()) .def("__str__", string::to_string); + py::class_(m, "Place") + .def(py::init<>()) + .def("set_place", + [](platform::Place &self, const platform::CPUPlace &cpu_place) { + self = cpu_place; + }) + .def("set_place", + [](platform::Place &self, const platform::GPUPlace &gpu_place) { + self = gpu_place; + }); + py::class_(m, "Operator") .def_static("create", [](py::bytes protobin) { @@ -231,7 +287,7 @@ All parameter, weight, gradient are variables in Paddle. PADDLE_ENFORCE(desc.IsInitialized(), "User OpDesc is not initialized, reason %s", desc.InitializationErrorString()); - return OpRegistry::CreateOp(desc); + return OpRegistry::CreateOp(desc, nullptr); }) .def("backward", [](const OperatorBase &forwardOp, @@ -335,7 +391,7 @@ All parameter, weight, gradient are variables in Paddle. PADDLE_ENFORCE(desc.IsInitialized(), "User OpDesc is not initialized, reason %s", desc.InitializationErrorString()); - auto rnn_op = OpRegistry::CreateOp(desc); + auto rnn_op = OpRegistry::CreateOp(desc, nullptr); return static_cast(rnn_op.release()); }) .def("set_stepnet", [](operators::RecurrentOp &self, @@ -353,22 +409,22 @@ All parameter, weight, gradient are variables in Paddle. PADDLE_ENFORCE(desc.IsInitialized(), "User OpDesc is not initialized, reason %s", desc.InitializationErrorString()); - auto rnn_op = OpRegistry::CreateOp(desc); + auto rnn_op = OpRegistry::CreateOp(desc, nullptr); return static_cast( rnn_op.release()); }) - .def("set_stepnet", + .def("set_step_unit", [](operators::DynamicRecurrentOp &self, const operators::NetOp &net) - -> void { self.SetStepNet(net.Clone()); }) + -> void { self.rnn.SetStepUnit(net.Clone()); }) .def("get_state", [](operators::DynamicRecurrentOp &self, const std::string &name) - -> const TensorArray & { return self.state(name); }) + -> const TensorArray & { return self.rnn.state(name); }) .def("get_step_input", [](operators::DynamicRecurrentOp &self, const std::string &name) - -> const TensorArray & { return self.step_input(name); }) + -> const TensorArray & { return self.rnn.step_input(name); }) .def("get_step_output", [](operators::DynamicRecurrentOp &self, const std::string &name) - -> const TensorArray & { return self.step_output(name); }); + -> const TensorArray & { return self.rnn.step_output(name); }); // cond_op py::class_(m, "CondOp") @@ -380,7 +436,7 @@ All parameter, weight, gradient are variables in Paddle. PADDLE_ENFORCE(desc.IsInitialized(), "User OpDesc is not initialized, reason %s", desc.InitializationErrorString()); - auto cond_op = OpRegistry::CreateOp(desc); + auto cond_op = OpRegistry::CreateOp(desc, nullptr); return static_cast(cond_op.release()); }) .def("set_truenet", @@ -394,21 +450,24 @@ All parameter, weight, gradient are variables in Paddle. py::class_(m, "Executor") .def(py::init &>()) - .def("run", - [](Executor &self, const ProgramDesc &program_desc, int block_id) { - framework::Scope &global_scope = GetGlobalScope(); - self.Run(program_desc, &global_scope, block_id); - }); + .def("run", [](Executor &self, ProgramDescBind *program_bind, + Scope *scope, int block_id) { + self.Run(*program_bind->Proto(), scope, block_id); + }); m.def("unique_integer", UniqueIntegerGenerator); m.def("is_compile_gpu", IsCompileGPU); + m.def("set_feed_variable", framework::SetFeedVariable); + m.def("get_fetch_variable", framework::GetFetchVariable); BindProgramDesc(m); BindBlockDesc(m); BindVarDsec(m); BindOpDesc(m); + m.def("op_support_gpu", OpSupportGPU); + return m.ptr(); } } // namespace pybind diff --git a/paddle/scripts/cluster_train_v2/fabric/conf.py b/paddle/scripts/cluster_train_v2/fabric/conf.py new file mode 100644 index 0000000000000000000000000000000000000000..e96503d093a4317df7bb006043eb42098f51b6f5 --- /dev/null +++ b/paddle/scripts/cluster_train_v2/fabric/conf.py @@ -0,0 +1,39 @@ +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +HOSTS = [ + "root@10.1.9.7", + "root@10.1.18.7", + "root@10.1.32.9", +] +''' +workspace configuration +''' +#root dir for workspace, can be set as any director with real user account +ROOT_DIR = "/root" +''' +network configuration +''' +#pserver nics +PADDLE_NIC = "eth0" +#pserver port +PADDLE_PORT = 7164 +#pserver ports num +PADDLE_PORTS_NUM = 1 +#pserver sparse ports num +PADDLE_PORTS_NUM_FOR_SPARSE = 1 +#trainer whether use gpu +PADDLE_USE_GPU = "False" +#environments setting for all processes in cluster job +LD_LIBRARY_PATH = "/usr/local/cuda/lib64:/usr/lib64" diff --git a/paddle/scripts/cluster_train_v2/fabric/docker_cluster/Dockerfile b/paddle/scripts/cluster_train_v2/fabric/docker_cluster/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..6606c01265af1fa8009e67906a3dbbe5c95ebc0d --- /dev/null +++ b/paddle/scripts/cluster_train_v2/fabric/docker_cluster/Dockerfile @@ -0,0 +1,11 @@ +FROM docker.paddlepaddlehub.com/paddle:0.10.0rc2 +RUN apt-get update && apt-get install -y openssh-server +RUN mkdir /var/run/sshd + +RUN echo 'root:root' |chpasswd + +RUN sed -ri 's/^PermitRootLogin\s+.*/PermitRootLogin yes/' /etc/ssh/sshd_config +RUN sed -ri 's/UsePAM yes/#UsePAM yes/g' /etc/ssh/sshd_config + +EXPOSE 22 +CMD ["/usr/sbin/sshd", "-D"] diff --git a/paddle/scripts/cluster_train_v2/fabric/docker_cluster/ssh_servers.yaml b/paddle/scripts/cluster_train_v2/fabric/docker_cluster/ssh_servers.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0784b2d1b8785796f94fff1607643218564fc126 --- /dev/null +++ b/paddle/scripts/cluster_train_v2/fabric/docker_cluster/ssh_servers.yaml @@ -0,0 +1,23 @@ +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: ssh-servers +spec: + replicas: 3 + template: + metadata: + labels: + app: ssh-servers + spec: + containers: + - name: ssh-servers + image: docker.paddlepaddlehub.com/paddlessh + resources: + limits: + cpu: 500m + memory: 1Gi + requests: + cpu: 500m + memory: 1Gi + ports: + - containerPort: 22 diff --git a/paddle/scripts/cluster_train_v2/fabric/run.sh b/paddle/scripts/cluster_train_v2/fabric/run.sh new file mode 100644 index 0000000000000000000000000000000000000000..f6324bcb136803ebc30e69bcdaa2f8725cb0ccba --- /dev/null +++ b/paddle/scripts/cluster_train_v2/fabric/run.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +python paddle.py \ + --job_dispatch_package="/root/wuyi/fabric_submit/workspace" \ + --dot_period=10 \ + --ports_num_for_sparse=1 \ + --log_period=50 \ + --num_passes=5 \ + --trainer_count=2 \ + --saving_period=1 \ + --local=0 \ + --config=./trainer_config.py \ + --save_dir=./output \ + --use_gpu=0 diff --git a/paddle/scripts/cluster_train_v2/openmpi/docker_cluster/Dockerfile b/paddle/scripts/cluster_train_v2/openmpi/docker_cluster/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..1a2d19e823541750830fcaa25f65b2f8e1ea2b49 --- /dev/null +++ b/paddle/scripts/cluster_train_v2/openmpi/docker_cluster/Dockerfile @@ -0,0 +1,43 @@ +# Build this image: docker build -t mpi . +# + +FROM paddledev/paddle:0.10.0rc3 + +ENV DEBIAN_FRONTEND noninteractive + +RUN apt-get update -y && \ + apt-get upgrade -y && \ + apt-get install -y openssh-server zip unzip vim sudo \ +gcc gfortran openmpi-checkpoint binutils wget curl git openmpi-bin openmpi-common libopenmpi-dev && \ +pip install mpi4py numpy virtualenv scipy matplotlib lxml sqlalchemy suds ipython obspy && \ +mkdir /var/run/sshd && \ +echo 'root:tutorial' | chpasswd && \ +sed -i 's/PermitRootLogin without-password/PermitRootLogin yes/' /etc/ssh/sshd_config && \ +# SSH login fix. Otherwise user is kicked off after login +sed 's@session\s*required\s*pam_loginuid.so@session optional pam_loginuid.so@g' -i /etc/pam.d/sshd && \ +echo "export VISIBLE=now" >> /etc/profile && \ +adduser --disabled-password --gecos "" tutorial && \ +echo "tutorial ALL=(ALL) NOPASSWD:ALL" >> /etc/sudoers && \ +mkdir /home/tutorial/.ssh/ + +ENV HOME /home/tutorial +ENV NOTVISIBLE "in users profile" + +# ------------------------------------------------------------ +# Set-Up SSH with our Github deploy key +# ------------------------------------------------------------ + +ADD ssh/config /home/tutorial/.ssh/config +ADD ssh/id_rsa.mpi /home/tutorial/.ssh/id_rsa +ADD ssh/id_rsa.mpi.pub /home/tutorial/.ssh/id_rsa.pub +ADD ssh/id_rsa.mpi.pub /home/tutorial/.ssh/authorized_keys + +#--------------------------------------------------------------- +#LD_LIBRARY_PATH +#--------------------------------------------------------------- + +RUN export LD_LIBRARY_PATH=/usr/lib/openmpi/lib/ + +WORKDIR /home/tutorial +EXPOSE 22 +CMD ["/usr/sbin/sshd", "-D"] diff --git a/paddle/scripts/cluster_train_v2/openmpi/docker_cluster/head.yaml b/paddle/scripts/cluster_train_v2/openmpi/docker_cluster/head.yaml new file mode 100644 index 0000000000000000000000000000000000000000..34835e5eb8d7cb92ad3cf7758a47c9e565a7dcf6 --- /dev/null +++ b/paddle/scripts/cluster_train_v2/openmpi/docker_cluster/head.yaml @@ -0,0 +1,25 @@ +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: mpi-header + labels: + app: mpi-header +spec: + replicas: 1 + template: + metadata: + labels: + app: mpi-header + spec: + containers: + - image: typhoon1986/paddle-openmpi + name : mpi-header + resources: + limits: + cpu: 500m + memory: 2Gi + requests: + cpu: 500m + memory: 2Gi + ports: + - containerPort: 22 diff --git a/paddle/scripts/cluster_train_v2/openmpi/docker_cluster/mpi-nodes.yaml b/paddle/scripts/cluster_train_v2/openmpi/docker_cluster/mpi-nodes.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2fd5cb4d44a25efac68dd8c9195dea9fd8f84a26 --- /dev/null +++ b/paddle/scripts/cluster_train_v2/openmpi/docker_cluster/mpi-nodes.yaml @@ -0,0 +1,26 @@ +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: mpi-nodes + labels: + app: mpi-nodes +spec: + replicas: 3 + template: + metadata: + labels: + app: mpi-nodes + spec: + containers: + - image: typhoon1986/paddle-openmpi + name : mpi-nodes + resources: + limits: + cpu: 500m + memory: 2Gi + requests: + cpu: 500m + memory: 2Gi + ports: + - containerPort: 22 + imagePullPolicy: Always diff --git a/paddle/scripts/cluster_train_v2/openmpi/docker_cluster/ssh/config b/paddle/scripts/cluster_train_v2/openmpi/docker_cluster/ssh/config new file mode 100644 index 0000000000000000000000000000000000000000..a9ecad07c39e4a9d6f0572d6cbf77795d99681f2 --- /dev/null +++ b/paddle/scripts/cluster_train_v2/openmpi/docker_cluster/ssh/config @@ -0,0 +1 @@ +StrictHostKeyChecking no diff --git a/paddle/scripts/cluster_train_v2/openmpi/docker_cluster/ssh/id_rsa.mpi b/paddle/scripts/cluster_train_v2/openmpi/docker_cluster/ssh/id_rsa.mpi new file mode 100644 index 0000000000000000000000000000000000000000..23768343edf5258cf525523d471f67071a24f5de --- /dev/null +++ b/paddle/scripts/cluster_train_v2/openmpi/docker_cluster/ssh/id_rsa.mpi @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEogIBAAKCAQEA7PWLZmgdJ508dD15T6+xqGDvL9Ehzo9SgsnN6xJ+qpUvvOi4 +1axW0AqR4MnPTg/uuvk+x4tUpuufOW4w22UTGjsdvmIVWa9ujLtcRiN3YPY+SU+Y +O5FfqKg7r/hBn+/GMcSoffwSs7vVgmhBBnp/mJh2O1cOAFZEe98/47mbg3/kHBAk +36NOQktaU3l48B38EhBTnjWfcEGm1HcTRPFxXV5Wiko6ZhKFEuHcTVKng4ROtUqE +mgHyI0aB7TAxg4na0ejItsYWEPWGeDOw6ms/4MwylxNosWzHFPW9p4zgLCLNr+b6 +bDDfYKjXZflAuTQtQhLmJUwD9uuYLAijpSE2fQIDAQABAoIBADgcgRET8Gt0CV/B +OtvKz/f+VEVvcWD3gWNlJDTZIVOFllNWjIZUlA4ZoqenQkbK8Q4nfV1FOht4yjCQ +TlN1oMtiWk297i5Zo4UBzPzy4w774I39oh/g8dT/WXr2/5s+7SDV38xNh6Q2A34o +79T35wUcfUrZ93/O7dKjb/6d8hx2FMha0wVKqY4lmG1lQE3bbx3kakec0PdvU5kO +YHKlpqj3pMR7CpMa+4yL/iXFwWYmnK+uu+zw7JR7PwvH1CzrnvW438wjQ1QmYbSx +mHHOE89X67Lsl5hn81qYWBhpwAlBwi1qscsE0cV9GcFyKqWFqZsj5coM9u3CRfvy +lrWe1OUCgYEA+LBUFEd3Hxs4sFiYElJ8R9SAs1udaqPvAl01hTEijJLfYlMMVs/y +rgNN7j22zjDak2f8QdyMJZX7EZdRmdYcHO0csYOwbYvalzcnwk+U3mxmdD3r4xSo +DSvkJ70fogAqUlcVIg2re6fCmZVJQTvMQYTVEM8zQomJRt/Lb2esSfsCgYEA8+zv +44aToe8uqiDs4w8guRW7LCDkTw4z4IVo9JUibIaPjaAs5bZEBXSB43EEywXCR75H +fML0rU1PVvKh1rqcvZdVzm+XMWVr3asPk0sapaiHaTcmyZvJRDxxqbLFp0zRP1T6 +cCtXNFdHWU4KiuKrUi6cDyOKchpfkSZa4seiT+cCgYB+n4FgBfdQPlMB70oW4irn +g/q32CjxuGCk6oKqu5bkzo+xB6obtavSEFqouIGQwO056tNVUY+GP7Rjg5GH663K +yKw4cl3tmS0Gm43B8TVSfw03mKO3rrfWZQe5eCFYIg9qd26KNT2gK435FzsCXQkm +PxUhhu6JrW/ZR2/U3Iur6wKBgADrWLAb1ryagSuE+j+U1AO+kDkHWrTtkcZ72jxp +v3p3O11GSEUJXdJDcSXhTCpTuDq6/dv7hB6PFwh126RKicKxKlKf2wsFndV1Cpb8 +hnovW2tLGOtTmfuW2rrQAKyzvmolsNfxYd/BoHQ2thV16z1hDZeFA8WQUeHjKh6G +sBbrAoGATdtQlaUxx4izua6k02ihkxx/cRYwDl2N8UDvDBHokS7vJFMX8b8NpsGg +zMElnqSpu/pe/0UG7N2MtPF6uyMcX8AZzzcsRkiMkDvWJzYt8Jpf+Eyd/uryF+Yv +yrXaOEY83tm6x/fny5ZaZmk8lNth7bfWywuTMkZLX3fYpWtIeE4= +-----END RSA PRIVATE KEY----- diff --git a/paddle/scripts/cluster_train_v2/openmpi/docker_cluster/ssh/id_rsa.mpi.pub b/paddle/scripts/cluster_train_v2/openmpi/docker_cluster/ssh/id_rsa.mpi.pub new file mode 100644 index 0000000000000000000000000000000000000000..015f2b42e71920e00de090cbb1108d9a12ed5f0c --- /dev/null +++ b/paddle/scripts/cluster_train_v2/openmpi/docker_cluster/ssh/id_rsa.mpi.pub @@ -0,0 +1 @@ +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDs9YtmaB0nnTx0PXlPr7GoYO8v0SHOj1KCyc3rEn6qlS+86LjVrFbQCpHgyc9OD+66+T7Hi1Sm6585bjDbZRMaOx2+YhVZr26Mu1xGI3dg9j5JT5g7kV+oqDuv+EGf78YxxKh9/BKzu9WCaEEGen+YmHY7Vw4AVkR73z/juZuDf+QcECTfo05CS1pTeXjwHfwSEFOeNZ9wQabUdxNE8XFdXlaKSjpmEoUS4dxNUqeDhE61SoSaAfIjRoHtMDGDidrR6Mi2xhYQ9YZ4M7Dqaz/gzDKXE2ixbMcU9b2njOAsIs2v5vpsMN9gqNdl+UC5NC1CEuYlTAP265gsCKOlITZ9 oweidner@peahi diff --git a/paddle/scripts/cluster_train_v2/openmpi/start_mpi_train.sh b/paddle/scripts/cluster_train_v2/openmpi/start_mpi_train.sh new file mode 100644 index 0000000000000000000000000000000000000000..c645495448f9844de5ae9024b6a0f41452522765 --- /dev/null +++ b/paddle/scripts/cluster_train_v2/openmpi/start_mpi_train.sh @@ -0,0 +1,28 @@ +#!/bin/bash +# General trainning configurations + +NICS=eth0 +PADDLE_INIT_PORT=7164 +PADDLE_INIT_PORTS_NUM=1 +PADDLE_INIT_PORTS_NUM_FOR_SPARSE=1 +PADDLE_INIT_PSERVERS=$(cat machines | sed -e ':a' -e 'N' -e '$!ba' -e 's/\n/,/g') +PADDLE_INIT_USE_GPU=False + +PADDLE_INIT_NUM_GRADIENT_SERVERS=${OMPI_COMM_WORLD_SIZE} +PADDLE_INIT_TRAINER_ID=${OMPI_COMM_WORLD_RANK} +PADDLE_CLUSTER_TRAIN=True + +env + +# start pserver +stdbuf -oL nohup paddle pserver --port=$PADDLE_INIT_PORT --ports_num=$PADDLE_INIT_PORTS_NUM \ + --ports_num_for_sparse=$PADDLE_INIT_PORTS_NUM_FOR_SPARSE --nics=$NICS \ + --comment=paddle_cluster_pserver \ + --num_gradient_servers=$PADDLE_INIT_NUM_GRADIENT_SERVERS &> logs/pserver.log & + +# start trainer +# NOTE: train.py will use the above environment variables as configuration +python train.py &> logs/train.log + +# kill background pservers when train finishes +ps -ef | grep pserver | awk '{print $2}' | xargs kill diff --git a/paddle/scripts/docker/build.sh b/paddle/scripts/docker/build.sh index 2ac455d771bf78377ce4ee7d921393d3b3958e3c..a08716c5a559def54bb7b989f250b489f6a805a2 100644 --- a/paddle/scripts/docker/build.sh +++ b/paddle/scripts/docker/build.sh @@ -141,10 +141,17 @@ RUN sed -i '${APT_MIRROR}' /etc/apt/sources.list EOF fi +if [[ ${WITH_GPU} == "ON" ]]; then + NCCL_DEPS="apt-get install -y libnccl-dev &&" +else + NCCL_DEPS="" +fi + cat >> /paddle/build/Dockerfile <