diff --git a/README.md b/README.md index 577528e7aaf45ce002467590ec66b19afb145920..d06375a444dd65675bdd75baccf8445c1638a87c 100644 --- a/README.md +++ b/README.md @@ -37,6 +37,7 @@ Please refer to our [release announcement](https://github.com/PaddlePaddle/Paddl - Optimized math operations through SSE/AVX intrinsics, BLAS libraries (e.g. MKL, OpenBLAS, cuBLAS) or customized CPU/GPU kernels. + - Optimized CNN networks through MKL-DNN library. - Highly optimized recurrent networks which can handle **variable-length** sequence without padding. - Optimized local and distributed training for models with high dimensional diff --git a/benchmark/cluster/README.md b/benchmark/cluster/README.md new file mode 100644 index 0000000000000000000000000000000000000000..b619613ea7a5b6e940ec735314e8e47338b2c600 --- /dev/null +++ b/benchmark/cluster/README.md @@ -0,0 +1,78 @@ +# Cluster Training Benchmark + +## Setup + +- Platform + - Kubernetes: v1.6.2 + - Linux Kernel: v3.10.0 + +- Resource + - CPU: 10 Cores per Pod + - Memory: 5GB per Pod + +- Docker Image + + We use different base Docker Image to run the benchmark on Kubernetes: + - PaddlePaddle v2: paddlepaddle/paddle:0.11.0 + - PaddlePaddle Fluid: paddlepaddle/paddle:[commit-id] + - TensorFlow: tensorflow/tensorflow:1.5.0-rc0 + +- Model + vgg16 is used in this benchmark. + +## Cases + +- Variable + - Batch Size of training data. + - PServer count of the training job. + - The number of trainers. + +- Invariant + - The resource of trainer/pserver Pod. + +### Measure the Performance for Different Batch Size + +- PServer Count: 40 +- Trainer Count: 100 +- Metrics: mini-batch / sec + +| Batch Size | 32 | 64 | 128 | 256 | +| -- | -- | -- | -- | -- | +| PaddlePaddle Fluid | - | - | - | - | +| PaddlePaddle v2 | - | - | - | - | +| TensorFlow | - | - | - | - | + +### Measure the Performance for Different PServer Count + +- Trainer Count: 100 +- Batch Size: 64 +- Metrics: mini-batch / sec + +| PServer Count | 10 | 20 | 40 | 60 | +| -- | -- | -- | -- | -- | +| PaddlePaddle Fluid | - | - | - | - | +| PaddlePaddle v2 | - | - | - | - | +| TensorFlow | - | - | - | - | + +### Measure Parallel Efficiency By Increasing Trainer Count + +- PServer Count: 20 +- Batch Size: 64 +- Metrics: + +$S = \div(T1, TN)$ + +which S is the ratio of T1 over TN, training time of 1 and N trainers. +The parallel efficiency is: + +$E = \div(S, N)$ + +| Trainer Counter | 1 | 10 | 20 | 30 | 40 | 50 | 60 | 70 | 80 | 90 | 100 | +| -- | -- | -- | -- | -- | -- | -- | -- | -- | -- | -- | -- | +| PaddlePaddle Fluid | - | - | - | - | - | - | - | - | - | - | - | +| PaddlePaddle v2 | - | - | - | - | - | - | - | - | - | - | - | - | +| TensorFlow | - | - | - | - | - | - | - | - | - | - | - | - | - | + +## Reproduce the benchmark + +TODO diff --git a/doc/api/v2/fluid.rst b/doc/api/v2/fluid.rst index 43fc19dc492bbc119f2356034b81c65e443db2fa..5f15cad2b530dfb3702357b3c26885ac2a7b7beb 100644 --- a/doc/api/v2/fluid.rst +++ b/doc/api/v2/fluid.rst @@ -15,4 +15,4 @@ Fluid fluid/param_attr.rst fluid/profiler.rst fluid/regularizer.rst - + fluid/io.rst diff --git a/doc/api/v2/fluid/io.rst b/doc/api/v2/fluid/io.rst new file mode 100644 index 0000000000000000000000000000000000000000..67f68c4e9e16b379207b8de114cdf769e056f78e --- /dev/null +++ b/doc/api/v2/fluid/io.rst @@ -0,0 +1,10 @@ +=========== +IO +=========== + + + +is_parameter +----------- +.. autofunction:: paddle.v2.fluid.io.is_parameter + :noindex: diff --git a/doc/api/v2/fluid/layers.rst b/doc/api/v2/fluid/layers.rst index a7c8670f66cc7f319e41155211ead2d89126117f..24bdf08fffd176a799fd12680f4651bb4bd0c9a9 100644 --- a/doc/api/v2/fluid/layers.rst +++ b/doc/api/v2/fluid/layers.rst @@ -38,6 +38,16 @@ elementwise_add .. autofunction:: paddle.v2.fluid.layers.elementwise_add :noindex: +elementwise_sub +--------------- +.. autofunction:: paddle.v2.fluid.layers.elementwise_sub + :noindex: + +elementwise_mul +--------------- +.. autofunction:: paddle.v2.fluid.layers.elementwise_mul + :noindex: + elementwise_div --------------- .. autofunction:: paddle.v2.fluid.layers.elementwise_div @@ -348,3 +358,132 @@ reduce_min .. autofunction:: paddle.v2.fluid.layers.reduce_min :noindex: +logsigmoid +---------- +.. autofunction:: paddle.v2.fluid.layers.logsigmoid + :noindex: + +exp +--- +.. autofunction:: paddle.v2.fluid.layers.exp + :noindex: + +relu +---- +.. autofunction:: paddle.v2.fluid.layers.relu + :noindex: + +tanh +---- +.. autofunction:: paddle.v2.fluid.layers.tanh + :noindex: + +tanh_shrink +----------- +.. autofunction:: paddle.v2.fluid.layers.tanh_shrink + :noindex: + +softshrink +---------- +.. autofunction:: paddle.v2.fluid.layers.softshrink + :noindex: + +sqrt +---- +.. autofunction:: paddle.v2.fluid.layers.sqrt + :noindex: + +abs +---- +.. autofunction:: paddle.v2.fluid.layers.abs + :noindex: + +ceil +---- +.. autofunction:: paddle.v2.fluid.layers.ceil + :noindex: + +floor +----- +.. autofunction:: paddle.v2.fluid.layers.floor + :noindex: + +round +----- +.. autofunction:: paddle.v2.fluid.layers.round + :noindex: + +reciprocal +---------- +.. autofunction:: paddle.v2.fluid.layers.reciprocal + :noindex: + +log +--- +.. autofunction:: paddle.v2.fluid.layers.log + :noindex: + +square +------ +.. autofunction:: paddle.v2.fluid.layers.square + :noindex: + +softplus +-------- +.. autofunction:: paddle.v2.fluid.layers.softplus + :noindex: + +softsign +--------- +.. autofunction:: paddle.v2.fluid.layers.softsign + :noindex: + +brelu +----- +.. autofunction:: paddle.v2.fluid.layers.brelu + :noindex: + +leaky_relu +---------- +.. autofunction:: paddle.v2.fluid.layers.leaky_relu + :noindex: + +soft_relu +--------- +.. autofunction:: paddle.v2.fluid.layers.soft_relu + :noindex: + +elu +---- +.. autofunction:: paddle.v2.fluid.layers.elu + :noindex: + +relu6 +----- +.. autofunction:: paddle.v2.fluid.layers.relu6 + :noindex: + +pow +---- +.. autofunction:: paddle.v2.fluid.layers.pow + :noindex: + +hard_shrink +----------- +.. autofunction:: paddle.v2.fluid.layers.hard_shrink + :noindex: + +thresholded_relu +---------------- +.. autofunction:: paddle.v2.fluid.layers.thresholded_relu + :noindex: + +hard_sigmoid +------------- +.. autofunction:: paddle.v2.fluid.layers.hard_sigmoid + :noindex: + +swish +------ +.. autofunction:: paddle.v2.fluid.layers.swish + :noindex: diff --git a/doc/design/block.md b/doc/design/block.md index fab7f2dc481ae51aa982164dc5048d90fcdc2b0b..907a2def557fd472ac4d679c73447bd9107d1190 100644 --- a/doc/design/block.md +++ b/doc/design/block.md @@ -202,8 +202,8 @@ This `OpDesc` value is in the `ops` field of the `BlockDesc` value representing During the generation of the Protobuf message, the Block should store VarDesc (the Protobuf message which describes Variable) and OpDesc (the Protobuf message which describes Operator). -VarDesc in a block should have its name scope to avoid local variables affect parent block's name scope. -Child block's name scopes should inherit the parent's so that OpDesc in child block can reference a VarDesc that stored in parent block. For example: +VarDesc in a block should have its name scope to avoid local variables affecting parent block's name scope. +Child block's name scopes should inherit the parent's so that OpDesc in child block can reference a VarDesc that is stored in the parent block. For example: ```python a = pd.Variable(shape=[20, 20]) diff --git a/doc/design/operator_kernel_type.md b/doc/design/operator_kernel_type.md index aa82e96bf79319f1a57e2ad58aa9826e57be6470..f86e6b7a564ed23f2bddbec25da1c110014f941d 100644 --- a/doc/design/operator_kernel_type.md +++ b/doc/design/operator_kernel_type.md @@ -1,6 +1,6 @@ # Design Doc: The Keys of Operator Kernel Type ## Problem -An operator can have different kernel implementations, and each operator will have a map to store the related kernels. Fluid uses `OpKernelType` as a key to identify a unique Kernel. Before an operator runs, an certain kernel must be chosen by a key of `OpKernelType`. Currently, `OpKernelType` is defined as follows: +An operator can have different kernel implementations, and each operator will have a map to store the related kernels. Fluid uses `OpKernelType` as a key to identify a unique kernel. Before an operator runs, a certain type of kernel must be chosen via a key of `OpKernelType`. Currently, `OpKernelType` is defined as follows: ```cpp struct OpKernelType { @@ -10,13 +10,13 @@ struct OpKernelType { ``` For more details, please refer to [codes](https://github.com/PaddlePaddle/Paddle/blob/2d5ec16bc8a09fb8e0f62c89b116b0cd1d333907/paddle/framework/operator.h#L348-L374) in github. -It contains two keys, `Place` and `DataType`. And these two keys will be hashed to a unique key to represent a certain type of kernel. However, these two keys are not enough. We need a more complete representation of `OpKernelType`. +It contains two keys, `Place` and `DataType`. And these two keys will be hashed to a unique key to represent a certain type of kernel. However, these two keys do not provide enough information. We need a more complete representation of `OpKernelType`. -We often implement a kernel of an operator with some computing library in certain device(place). Please remind that computing library and device are not one-to-one corresponding. A device can have a lot of computing libraries and a computing library can also support several devices. +We often implement a kernel of an operator with some computing library on certain device(place). Please note that computing library and device do not have a one-to-one correspondence. A device can have a lot of computing libraries and a computing library can also support different devices. -For example, Eigen library can support Nvidia GPU/AMD GPU/CPU. And MKLDNN library can support Intel CPU/Intel FPGA. Both `Place` and `Library` should be a key of `OpKernelType`. +For example, Eigen library supports Nvidia GPU/AMD GPU/CPU and MKLDNN library supports Intel CPU/Intel FPGA. Both `Place` and `Library` should be a key of `OpKernelType`. -It's obvious that different DataTypes, like fp64/fp32/int8 will have different kernels. But the data layout of a Tensor will also lead to different implementation. Please refer to the batch norm operator [kernels](https://github.com/PaddlePaddle/Paddle/blob/a948fac4d0ad7e0412d373b8aabeb711c2899563/paddle/operators/batch_norm_op.cc#L180-L209). Data Layout should also be taken into consideration. +Different DataTypes, such as fp64/fp32/int8, will obviously have different kernels. But different data layout of a Tensor will also lead to different implementations. Please refer to the batch norm operator [kernels](https://github.com/PaddlePaddle/Paddle/blob/a948fac4d0ad7e0412d373b8aabeb711c2899563/paddle/operators/batch_norm_op.cc#L180-L209) as an example. Data layout should also be taken into consideration. ## Solution @@ -31,17 +31,17 @@ struct OpKernelType { }; ``` -Following is the details: +The details are as follows: ### Place -`Place` is defined as follows: +`Place` is defined as: ```cpp typedef boost::variant Place; ``` -`Place` is to represent the device memory where data is locating. +`Place` represents the device memory where data is located. ### Library @@ -52,10 +52,10 @@ One operator kernel is usually implemented based on one library. `Library` is de enum Library { Plain, MKLDNN, CUDNN }; ``` -We use `Plain` enumerator to represent default library. Since most operators in Fluid are implemented based on `Eigen` library, we take `Eigen` library as the `Plain` enumerator. -A library usually has a corresponding `DeviceContext` which contains some handles needed by computation. Fluid now have two default DeviceContexts in CPU and CUDA, `CPUDeviceContext` and `CUDADeviceContext`. `CPUDeviceContext` contains a Eigen library handle and `CDUADeviceContext` contains a Eigen library handle and cuBLAS handle. +We use `Plain` enumerator to represent default library. Since most operators in Fluid are implemented based on the `Eigen` library, we take `Eigen` library as the `Plain` enumerator. +A library usually has a corresponding `DeviceContext` which contains some handles needed for computation. Fluid now has two default DeviceContexts for CPU and CUDA, namely, `CPUDeviceContext` and `CUDADeviceContext`. `CPUDeviceContext` contains an Eigen library handle and `CDUADeviceContext` contains an Eigen library handle and a cuBLAS handle. -If we want to support new Library, a new enumerator need to be added to `Library` and a new corresponding `LibraryDeviceContext` will be created. +If we want to support new library, a new enumerator need to be added to `Library` and a corresponding new `LibraryDeviceContext` need to be created. ### DataType @@ -67,15 +67,15 @@ If we want to support new Library, a new enumerator need to be added to `Library Actually, a Tensor is a view of a block of memory. Besides a pointer to the memory, we also have to get some other descriptions of this block of memory, such as shape(ddim), stride, and layout. -Different layout leads to different implementation of operator kernel. There are mainly 4 principles we have to follow to support layout in our fluid framework. +Different layout leads to different implementation of the operator kernel. There are mainly 4 principles we have to follow to support layout in our Fluid framework. -- We take layout as a data member of Tensor. Layout is actually a enum variable. If fluid is built with MKLDNN, then, the memory format in MKLDNN will be added into this enum variable too. +- We take layout as a data member of Tensor. Layout is actually a enum variable. If Fluid is built with MKLDNN, then the memory format in MKLDNN will also be added into this enum variable. -- Users have to set layout for input data. And some operators like fill_constant/random, also have to set layout of generating data. Of course, we can have some default layout, like NCHW. +- Users have to set layout for input data. And some operators like fill_constant/random, also have to set layout for generating data. Of course, we can have some default layout, like NCHW. -- The inference of Layout is at run-time, not compile-time. +- The inference of Layout is at run-time, not at compile-time. -- Every operator have to implement different kernels for different layouts. Let's take MKLDNN as an example, if we want to implement a MKLDNN convolution operator, we have to realize all the kernels for different layout, list at [here](http://01org.github.io/mkl-dnn/structmkldnn_1_1memory.html). And we will have a special macro to do registering kernels for MKLDNN operators. +- Every operator has to implement different kernels for different layouts. Let's take MKLDNN as an example. If we want to implement an MKLDNN convolution operator, we have to implement all the kernels for different layouts, which are listed [here](http://01org.github.io/mkl-dnn/structmkldnn_1_1memory.html). And we will have a special macro to register kernels for MKLDNN operators. `Layout` is also defined as a enum variable: diff --git a/doc/design/var_desc.md b/doc/design/var_desc.md index 0b2958c1b10ef6a6ce51aa75f61e15a7f2d94b3f..89fa95326c5c4909137544c6b5fd574e1281abe2 100644 --- a/doc/design/var_desc.md +++ b/doc/design/var_desc.md @@ -1,12 +1,12 @@ ## Background -PaddlePaddle divides the description of neural network computation graph into two stages: compile time and runtime. +PaddlePaddle divides the description of neural network computation into two stages: compile time and runtime. At compile time, the neural network computation is described as a `ProgramDesc` whereas at runtime an `Executor` interprets the `ProgramDesc` to compute the operations. -PaddlePaddle use proto message to describe compile time graph because +PaddlePaddle use proto message to describe compile time program because -1. Computation graph should be able to be saved to a file. -1. In distributed training, the graph will be serialized and send to multiple workers. +1. The computation program description must be serializable and saved in a file. +1. During distributed training, the sreialized program will be sent to multiple workers. It should also be possible to break the program into different components, each of which can be executed on different workers. -The computation graph is constructed by Data Node and Operation Node. The concept to represent them is in the table below. +The computation `Program` consists of nested `Blocks`. Each `Block` will consist of data(i.e. `Variable`) and `Operations`. The concept to represent them is in the table below. | |compile time|runtime| |---|---|---| diff --git a/doc/getstarted/build_and_install/build_from_source_cn.rst b/doc/getstarted/build_and_install/build_from_source_cn.rst index 41ac07ca5674d2c121baba77c58226ad328cd681..71904dc41ed0d946867d890cc585e1b88450ca8c 100644 --- a/doc/getstarted/build_and_install/build_from_source_cn.rst +++ b/doc/getstarted/build_and_install/build_from_source_cn.rst @@ -32,6 +32,16 @@ PaddlePaddle主要使用 `CMake `_ 以及GCC, G++作为编译 pip install build/python/dist/*.whl +如果机器中已经安装过PaddlePaddle,有两种方法: + +.. code-block:: bash + + 1. 先卸载之前的版本,再重新安装 + pip uninstall paddlepaddle + pip install build/python/dist/*.whl + + 2. 直接升级到更新的版本 + pip install build/python/dist/*.whl -U .. _run_test: diff --git a/doc/getstarted/build_and_install/build_from_source_en.rst b/doc/getstarted/build_and_install/build_from_source_en.rst index 92211aee8c3bc0ae6e1a38311d40ddf92117cac7..27f73b2e2c029b41d514e1612912ed1c335605b6 100644 --- a/doc/getstarted/build_and_install/build_from_source_en.rst +++ b/doc/getstarted/build_and_install/build_from_source_en.rst @@ -36,6 +36,16 @@ machine or copy it to the target machine. pip install build/python/dist/*.whl +If the machine has installed PaddlePaddle before, there are two methods: + +.. code-block:: bash + + 1. uninstall and reinstall + pip uninstall paddlepaddle + pip install build/python/dist/*.whl + + 2. upgrade directly + pip install build/python/dist/*.whl -U .. _run_test: diff --git a/doc/howto/dev/new_op_cn.md b/doc/howto/dev/new_op_cn.md index 3109d72001f13a38a93b9ca39d3f8525c8cea9f1..92996585674b46f45549b972b9f295503b1c7f8c 100644 --- a/doc/howto/dev/new_op_cn.md +++ b/doc/howto/dev/new_op_cn.md @@ -24,7 +24,7 @@ - `framework::OperatorWithKernel`:继承自OperatorBase,Op有计算函数,称作有Kernel。 - `class OpProtoAndCheckerMaker`:描述该Op的输入、输出、属性、注释,主要用于Python API接口生成 -依据是否包含kernel,可以将Op分为两种:包含Kernel的Op和不包含kernel的Op,前者Op的定义继承自`OperatorBase`,后者继承自`OperatorWithKernel`。本教程主要介绍带Kernel的Op如何写,简单总结Op需要包含的内容如下: +依据是否包含kernel,可以将Op分为两种:包含Kernel的Op和不包含kernel的Op,前者Op的定义继承自`OperatorWithKernel`,后者继承自`OperatorBase`。本教程主要介绍带Kernel的Op如何写,简单总结Op需要包含的内容如下: 内容 | 定义位置 diff --git a/doc/howto/index_cn.rst b/doc/howto/index_cn.rst index ccd909770253bb85dbc8a5a2560594076c2f68b0..e0c69f7a6a4043abe999af6c8dd2555178b68424 100644 --- a/doc/howto/index_cn.rst +++ b/doc/howto/index_cn.rst @@ -9,6 +9,7 @@ usage/cmd_parameter/index_cn.rst usage/cluster/cluster_train_cn.md + usage/capi/index_cn.rst 开发标准 -------- diff --git a/doc/howto/usage/capi/compile_paddle_lib_cn.md b/doc/howto/usage/capi/compile_paddle_lib_cn.md new file mode 100644 index 0000000000000000000000000000000000000000..ac5ecffe2ea8ddc3703a32e9a0a8ee83bbe5dd14 --- /dev/null +++ b/doc/howto/usage/capi/compile_paddle_lib_cn.md @@ -0,0 +1,122 @@ +## 编译 PaddlePaddle 预测库 + +### 概述 + +使用 C-API 进行预测依赖于将 PaddlePaddle 核心代码编译成链接库,只需在编译时需配制下面这些编译选项: + +必须配置选项: +- `WITH_C_API`,必须配置为`ON`。 + +推荐配置选项: +- `WITH_PYTHON`,推荐配置为`OFF` +- `WITH_SWIG_PY`,推荐配置为`OFF` +- `WITH_GOLANG`,推荐设置为`OFF` + +可选配置选项: +- `WITH_GPU`,可配置为`ON/OFF` +- `WITH_MKL`,可配置为`ON/OFF` + +对推荐配置中的选项建议按照设置,以避免链接不必要的库。其它可选编译选项按需进行设定。 + +下面的代码片段从github拉取最新代码,配制编译选项(需要将PADDLE_ROOT替换为PaddlePaddle预测库的安装路径): + +```shell +PADDLE_ROOT=/path/of/capi +git clone https://github.com/PaddlePaddle/Paddle.git +cd Paddle +mkdir build +cd build +cmake -DCMAKE_INSTALL_PREFIX=$PADDLE_ROOT \ + -DCMAKE_BUILD_TYPE=Release \ + -DWITH_C_API=ON \ + -DWITH_SWIG_PY=OFF \ + -DWITH_GOLANG=OFF \ + -DWITH_PYTHON=OFF \ + -DWITH_MKL=OFF \ + -DWITH_GPU=OFF \ + .. +``` + +执行上述代码生成Makefile文件后,执行:`make && make install`。成功编译后,使用C-API所需的依赖(包括:(1)编译出的PaddlePaddle预测库和头文件;(2)第三方链接库和头文件)均会存放于`PADDLE_ROOT`目录中。 + +编译成功后在 `PADDLE_ROOT` 下会看到如下目录结构(包括了编译出的PaddlePaddle头文件和链接库,以及第三方依赖链接库和头文件(如果需要,由链接方式决定)): + +```text +├── include +│   └── paddle +│   ├── arguments.h +│   ├── capi.h +│   ├── capi_private.h +│   ├── config.h +│   ├── error.h +│   ├── gradient_machine.h +│   ├── main.h +│   ├── matrix.h +│   ├── paddle_capi.map +│   └── vector.h +├── lib +│   ├── libpaddle_capi_engine.a +│   ├── libpaddle_capi_layers.a +│   ├── libpaddle_capi_shared.so +│   └── libpaddle_capi_whole.a +└── third_party + ├── gflags + │   ├── include + │   │   └── gflags + │   │   ├── gflags_completions.h + │   │   ├── gflags_declare.h + │   │   ... + │   └── lib + │   └── libgflags.a + ├── glog + │   ├── include + │   │   └── glog + │   │   ├── config.h + │   │   ... + │   └── lib + │   └── libglog.a + ├── openblas + │   ├── include + │   │   ├── cblas.h + │   │   ... + │   └── lib + │   ... + ├── protobuf + │   ├── include + │   │   └── google + │   │   └── protobuf + │   │   ... + │   └── lib + │   └── libprotobuf-lite.a + └── zlib + ├── include + │   ... + └── lib + ... + +``` + +### 链接说明 + +目前提供三种链接方式: + +1. 链接`libpaddle_capi_shared.so` 动态库 + - 使用 PaddlePaddle C-API 开发预测程序链接`libpaddle_capi_shared.so`时,需注意: + 1. 如果编译时指定编译CPU版本,且使用`OpenBLAS`数学库,在使用C-API开发预测程序时,只需要链接`libpaddle_capi_shared.so`这一个库。 + 1. 如果是用编译时指定CPU版本,且使用`MKL`数学库,由于`MKL`库有自己独立的动态库文件,在使用PaddlePaddle C-API开发预测程序时,需要自己链接MKL链接库。 + 1. 如果编译时指定编译GPU版本,CUDA相关库会在预测程序运行时动态装载,需要将CUDA相关的库设置到`LD_LIBRARY_PATH`环境变量中。 + - 这种方式最为简便,链接相对容易,**在无特殊需求情况下,推荐使用此方式**。 + +2. 链接静态库 `libpaddle_capi_whole.a` + - 使用PaddlePaddle C-API 开发预测程序链接`libpaddle_capi_whole.a`时,需注意: + 1. 需要指定`-Wl,--whole-archive`链接选项。 + 1. 需要显式地链接 `gflags`、`glog`、`libz`、`protobuf` 等第三方库,可在`PADDLE_ROOT/third_party`下找到。 + 1. 如果在编译 C-API 时使用OpenBLAS数学库,需要显示地链接`libopenblas.a`。 + 1. 如果在编译 C-API 是使用MKL数学库,需要显示地链接MKL的动态库。 + +3. 链接静态库 `libpaddle_capi_layers.a`和`libpaddle_capi_engine.a` + - 使用PaddlePaddle C-API 开发预测程序链接`libpaddle_capi_whole.a`时,需注意: + 1. 这种链接方式主要用于移动端预测。 + 1. 为了减少生成链接库的大小把`libpaddle_capi_whole.a`拆成以上两个静态链接库。 + 1. 需指定`-Wl,--whole-archive -lpaddle_capi_layers` 和 `-Wl,--no-whole-archive -lpaddle_capi_engine` 进行链接。 + 1. 第三方依赖库需要按照与方式2同样方法显示地进行链接。 diff --git a/doc/howto/usage/capi/images/csr.png b/doc/howto/usage/capi/images/csr.png new file mode 100644 index 0000000000000000000000000000000000000000..3dc10b8de4f6d3f517624956b1694b689405a031 Binary files /dev/null and b/doc/howto/usage/capi/images/csr.png differ diff --git a/doc/howto/usage/capi/images/sequence_data.png b/doc/howto/usage/capi/images/sequence_data.png new file mode 100644 index 0000000000000000000000000000000000000000..6e47a46b8955dfe977e85898fe3c9f33ed28de7e Binary files /dev/null and b/doc/howto/usage/capi/images/sequence_data.png differ diff --git a/doc/howto/usage/capi/images/workflow_of_CAPI.png b/doc/howto/usage/capi/images/workflow_of_CAPI.png new file mode 100644 index 0000000000000000000000000000000000000000..a4399ade048b3fe10d2d9c714bc34333ca068edb Binary files /dev/null and b/doc/howto/usage/capi/images/workflow_of_CAPI.png differ diff --git a/doc/howto/usage/capi/index_cn.rst b/doc/howto/usage/capi/index_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..fd774fbc742671c5a8009cb742f2c9d06a525199 --- /dev/null +++ b/doc/howto/usage/capi/index_cn.rst @@ -0,0 +1,9 @@ +PaddlePaddle C-API +================== + +.. toctree:: + :maxdepth: 1 + + compile_paddle_lib_cn.md + organization_of_the_inputs_cn.md + workflow_of_capi_cn.md diff --git a/doc/howto/usage/capi/organization_of_the_inputs_cn.md b/doc/howto/usage/capi/organization_of_the_inputs_cn.md new file mode 100644 index 0000000000000000000000000000000000000000..563ec5ca21ec5d75800fa201943d65e6d6fe51ea --- /dev/null +++ b/doc/howto/usage/capi/organization_of_the_inputs_cn.md @@ -0,0 +1,285 @@ +## 输入/输出数据组织 + +这篇文档介绍在使用 PaddlePaddle C-API 时如何组织输入数据,以及如何解析神经网络前向计算的输出结果。 + +### 输入/输出数据类型 +在C-API中,按照基本数据类型在PaddlePaddle内部的定义和实现,输入数据可分为: +1. 一维整型数组 +1. 二维浮点型矩阵 + - 稠密矩阵 + - 稀疏矩阵 + +说明: +1. 一维数组**仅支持整型值**; + - 常用于自然语言处理任务,例如:表示词语在词典中的序号; + - 分类任务中类别标签; +1. 逻辑上高于二维的数据(例如含有多个通道的图片,视频等)在程序实现中都会转化为二维矩阵,转化方法在相应的领域都有通用解决方案,需要使用者自己了解并完成转化; +1. 二维矩阵可以表示行向量和列向量,任何时候如果需要浮点型数组(向量),都应使用C-API中的矩阵来表示,而不是C-API中的一维数组。 +1. 不论是一维整型数组还是二维浮点数矩阵,**为它们附加上序列信息将变成序列输入。PaddlePaddle 会通过判数据是否附带有序列信息来判断一个向量/矩阵是否是一个序列**。当非序列输入时,无需关心和处理序列信息。关于什么是“序列信息”,下文会详细进行介绍。 + +### 基本使用概念 + +- 在PaddlePaddle内部,神经网络中一个计算层的输入/输出被组织为一个 `Argument` 结构体,如果神经网络有多个输入或者多个输入,每一个输入/输入都会对应有自己的`Argument`。 +- `Argument` 并不真正“存储”数据,而是将输入/输出信息有机地组织在一起。 +- 在`Argument`内部由`IVector`(对应着上文提到的一维整型数组)和`Matrix`(对应着上文提到的二维浮点型矩阵)来实际存储数据;由 `Sequence Start Positions` (下文详细解释) 来描述输入/输出的序列信息。 + +- **注**: + 1. 这篇文档之后部分将会统一使用`argument`来特指PaddlePaddle中神经网络计算层一个输入/输出数据。 + 1. 使用`paddle_ivector`来特指PaddlePaddle中的一维整型数组。 + 1. 使用`paddle_matrix`来特指PaddlePaddle中的二维浮点型矩阵。 + +### 组织输入数据 +- 一维整型数组 + + 概念上可以将`paddle_ivector`理解为一个一维的整型数组,通常用于表示离散的类别标签,或是在自然语言处理任务中表示词语在字典中的序号。下面的代码片段创建了含有三个元素`1`、`2`、`3`的`paddle_ivector`。 + ```c + int ids[] = {1, 2, 3}; + paddle_ivector ids_array = + paddle_ivector_create(ids, sizeof(ids) / sizeof(int), false, false); + CHECK(paddle_arguments_set_ids(in_args, 0, ids_array)); + ``` + +- **稠密矩阵** + - 一个`m×n`的稠密矩阵是一个由`m`行`n`列元素排列成的矩形阵列,矩阵里的元素是浮点数。对神经网络来说,矩阵的高度`m`是一次预测接受的样本数目,宽度$n$是神经网络定义时,`paddle.layer.data`的`size`。 + - 下面的代码片段创建了一个高度为1,宽度为`layer_size`的稠密矩阵,矩阵中每个元素的值随机生成。 + + ```c + paddle_matrix mat = paddle_matrix_create( + /* height = batch size */ 1, + /* width = dimensionality of the data layer */ layer_size, + /* whether to use GPU */ false); + + paddle_real* array; + // Get the pointer pointing to the start address of the first row of the + // created matrix. + CHECK(paddle_matrix_get_row(mat, 0, &array)); + + // Fill the matrix with a randomly generated test sample. + srand(time(0)); + for (int i = 0; i < layer_size; ++i) { + array[i] = rand() / ((float)RAND_MAX); + } + + // Assign the matrix to the argument. + CHECK(paddle_arguments_set_value(in_args, 0, mat)); + ``` + +- **稀疏矩阵** + + PaddlePaddle C-API 中 稀疏矩阵使用[CSR(Compressed Sparse Row Format)](https://en.wikipedia.org/wiki/Sparse_matrix#Compressed_sparse_row_(CSR,_CRS_or_Yale_format))格式存储。下图是CSR存储稀疏矩阵的示意图。 +

+
图1. 稀疏矩阵存储示意图 +

+ + CSR存储格式通过:(1)非零元素的值(上图中的`values`);(2)行偏移(上图中的`row offsets`):每一行元素在`values`中的起始偏移,`row offsets`中元素个数总是等于行数 + 1;(3)非零元素的列号(上图中的`column indices`)来确定稀疏矩阵的内容。 + + 在PaddlePaddle C-API中,通过调用以下接口创建稀疏矩阵: + + ```c + PD_API paddle_matrix paddle_matrix_create_sparse( + uint64_t height, uint64_t width, uint64_t nnz, bool isBinary, bool useGpu); + ``` + + 1. 创建稀疏矩阵时需要显示地指定矩阵的(1)高度(`height`,在神经网络中等于一次预测处理的样本数)(2)宽度(`width`,`paddle.layer.data`的`size`)以及(3)非零元个数(`nnz`)。 + 1. 当上述接口第4个参数`isBinary`指定为`true`时,**只需要设置行偏移(`row_offset`)和列号(`colum indices`),不需要提供元素值(`values`)**,这时行偏移和列号指定的元素默认其值为1。 + + 下面的代码片段创建了一个CPU上的二值稀疏矩阵: + + ```c + paddle_matrix mat = paddle_matrix_create_sparse(1, layer_size, nnz, true, false); + int colIndices[] = {9, 93, 109}; // layer_size here is greater than 109. + int rowOffset[] = {0, sizeof(colIndices) / sizeof(int)}; + + CHECK(paddle_matrix_sparse_copy_from(mat, + rowOffset, + sizeof(rowOffset) / sizeof(int), + colIndices, + (colIndices) / sizeof(int), + NULL /*values array is NULL.*/, + 0 /*size of the value arrary is 0.*/)); + CHECK(paddle_arguments_set_value(in_args, 0, mat)); + ``` + 下面的代码片段在创建了一个CPU上的带元素值的稀疏矩阵: + ```c + paddle_matrix mat = paddle_matrix_create_sparse(1, layer_size, nnz, false, false); + int colIndices[] = {9, 93, 109}; // layer_size here is greater than 109. + int rowOffset[] = {0, sizeof(colIndices) / sizeof(int)}; + float values[] = {0.5, 0.5, 0.5}; + + CHECK(paddle_matrix_sparse_copy_from(mat, + rowOffset, + sizeof(rowOffset) / sizeof(int), + colIndices, + sizeof(colIndices) / sizeof(int), + values, + sizeof(values) / sizeof(float))); + ``` + 注意事项: + 1. 移动端预测**不支持**稀疏矩阵及相关的接口。 + +### 组织序列信息 + +多个排成一列的元素(可以是整型、浮点数、浮点数向量等)构成一个序列,元素之间的顺序是序列所携带的重要信息。不同序列可能会含有不同数目个元素。在 PaddlePaddle 中,序列输入/输出数据是在上文介绍的**数据输入(一维整型数组,二维浮点数矩阵)基础上,附加上序列信息**。下面详细解释什么是“序列信息”。 + +我们将神经网络一次计算接受的所有输入样本称之为一个`batch`(可以含有一条或多条样本),每一个序列在整个`batch`中的偏移,就是PaddlePaddle中所指的**序列信息**,称之为“sequence start positions”。PaddlePaddle 支持两种序列类型: + +1. 单层序列 + - 序列中的每一个元素是非序列,是进行计算的基本单位,不可再进行拆分。 + - 例如:自然语言中的句子是一个序列,序列中的元素是词语; +1. 双层序列 + - 序列中的每一个元素又是一个序列。 + - 例如:自然语言中的段落是一个双层序列;段落是由句子构成的序列;句子是由词语构成的序列。 + - 双层序列在处理长序列的任务或是构建层级模型时会发挥作用。 + +这篇文档之后部分会统一使用`sequence_start_positions`来特指:PaddlePaddle中神经网络计算层输入/输出所携带的序列信息。 + +对双层序列来讲,不仅要提供每一个外层序列在整个`batch`中的偏移,每一个外层序列又含有若干个内层序列,需要同时提供每一个内层序列在整个`batch`中的偏移。也就是说:**双层序列需要设置分别为外层序列和内层序列分别设置`sequence_start_positions`信息**。 + +**注:** +1. 不论序列中的元素在内存中占用多少实际存储空间,`sequence_start_positions`表示的偏移是以“序列中的一个元素”作为统计的基本单位,而不是相对`batch`起始存储地址以数据的存储大小为单位的偏移。 +1. 非序列输入不携带`sequence_start_positions`,非序列输入无需构造`sequence_start_positions`。 +1. **不论是单层序列还是双层序列的序列信息,都使用`paddle_ivector`(也就是PaddlePaddle中的一维整型数组)来存储。** + +图2 是PaddlePaddle中单层序列和双层序列存储示意图。 +

+
图2. 序列输入示意图 +

+ +- 单层序列 + + 图2 (a) 展示了一个含有4个序列的`batch`输入: + 1. 4个序列的长度分别为:5、3、2、4; + 1. 这时的`sequence_start_positions`为:`[0, 5, 8, 10, 14]`; + 1. 本地训练. 不论数据域是`paddle_ivector`类型还是`paddle_matrix`类型,都可以通过调用下面的接口为原有的数据输入附加上序列信息,使之变为一个单层序列输入,代码片段如下: + + ```c + int seq_pos_array[] = {0, 5, 8, 10, 14}; + paddle_ivector seq_pos = paddle_ivector_create( + seq_pos_array, sizeof(seq_pos_array) / sizeof(int), false, false); + // Suppose the network only has one input data layer. + CHECK(paddle_arguments_set_sequence_start_pos(in_args, 0, 0, seq_pos)); + ``` + +- 双层序列 + + 图2 (b) 展示了一个含有4个序列的`batch`输入; + 1. 4个序列的长度分别为:5、3、2、4;这四个序列又分别含有3、2、1、2个子序列; + 1. 这时的需要同时提供: + - 外层序列在`batch`中的起始偏移`:[0, 5, 8, 10, 14]`; + - 内层序列在`batch`中的起始偏移:`[0, 2, 3, 5, 7, 8, 10, 13, 14]`; + 1. 不论数据域是`paddle_ivector`类型还是`paddle_matrix`类型,这时需要调用创建序列信息和为`argument`设置序列信息的接口**两次**,分别为数据输入添加外层序列和内层序列的序列信息,使之变为一个双层序列输入,代码片段如下: + ```c + // set the sequence start positions for the outter sequences. + int outter_seq_pos_array[] = {0, 5, 8, 10, 14}; + paddle_ivector seq_pos = + paddle_ivector_create(outter_seq_pos_array, + sizeof(outter_pos_array) / sizeof(int), + false, + false); + // The third parameter of this API indicates the sequence level. + // 0 for the outter sequence. 1 for the inner sequence. + // If the input is a sequence not the nested sequence, the third parameter is + // fixed to be 0. + CHECK(paddle_arguments_set_sequence_start_pos(in_args, 0, 0, seq_pos)); + + // set the sequence start positions for the outter sequences. + int inner_seq_pos_array[] = {0, 2, 3, 5, 7, 8, 10, 13, 14}; + paddle_ivector seq_pos = paddle_ivector_create( + inner_pos_array, sizeof(inner_pos_array) / sizeof(int), false, false); + // The third parameter of this API indicates the sequence level. + // 0 for the outter sequence. 1 for the inner sequence. + CHECK(paddle_arguments_set_sequence_start_pos(in_args, 0, 1, seq_pos)); + ``` + +注意事项: +1. 当一个`batch`中含有多个序列,**不支持序列长度为`0`的序列(也就是空输入)** 作为输入。不同计算层对空输入的处理策略有可能不同,潜在会引起未定义行为,或者引起行时错误,请在输入时进行合法性检查。 + +### Python 端数据类型说明 + +下表列出了Python端训练接口暴露的数据类型(`paddle.layer.data`函数`type`字段的取值)对应于调用C-API需要创建的数据类型: + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Python 端数据类型C-API 输入数据类型
paddle.data_type.integer_value整型数组,无需附加序列信息
paddle.data_type.dense_vector浮点型稠密矩阵,无需附加序列信息
paddle.data_type.sparse_binary_vector浮点型稀疏矩阵,无需提供非零元的值,默认为1,无需附加序列信息
paddle.data_type.sparse_vector浮点型稀疏矩阵,需提供非零元的值,无需附加序列信息
paddle.data_type.integer_value_sequence整型数组,需附加序列信息
paddle.data_type.dense_vector_sequence浮点型稠密矩阵,需附加序列信息
paddle.data_type.sparse_binary_vector_sequence浮点型稀疏矩阵,无需提供非零元的值,默认为1,需附加序列信息
paddle.data_type.sparse_vector_sequence浮点型稀疏矩阵,需提供非零元的值,需附加序列信息
paddle.data_type.integer_value_sub_sequence整型数组,需附加双层序列信息
paddle.data_type.dense_vector_sub_sequence浮点型稠密矩阵,需附加双层序列信息
paddle.data_type.sparse_binary_vector_sub_sequence浮点型稀疏矩阵,无需提供非零元的值,默认为1,需附加双层序列信息
paddle.data_type.sparse_vector_sub_sequence浮点型稀疏矩阵,需提供非零元的值,需附加双层序列信息
+ +
+ + +### 输出数据 + +PaddlePaddle中一个计算层的输出数据组织方式和输入数据组织方式完全相同。一个输出数据同样被组织为一个`argument`,`argument`通过`paddle_matrix`或`paddle_ivector`存数数据,如果输出是一个序列,那么会携带有`sequence_start_positions`信息。调用C-API相关接口,读取需要的结果即可。 + +### 总结 + +- 在PaddlePaddle内部,神经网络中一个计算层的输入/输出被组织为`argument`。 +- `argument`并不真正“存储”数据,而是将输入/输出信息有机地组织在一起。 +- 在`argument`内部由`paddle_ivector`(一维整型数组)和`paddle_matrix`(二维浮点型矩阵)来实际存储数据。 +如果是一个序列输入/输出由 `sequence start positions` 来记录输入/输出的序列信息。 + +于是,在组织神经网络输入时,需要思考完成以下工作: +1. 为每一个输入/输出创建`argument`。 + - C-API 中操作`argument`的接口请查看[argument.h](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/capi/arguments.h)。 +1. 为每一个`argument`创建`paddle_matrix`或者`paddle_ivector`来存储数据。 + - C-API 中操作`paddle_ivector`的接口请查看 [vector.h](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/capi/vector.h)。 + - C-API 中操作`paddle_matrix`的接口请查看[matrix.h](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/capi/matrix.h)。 +1. 如果输入是序列数据,需要创建并填写`sequence_start_positions`信息。 + - 通过调用 [`paddle_arguments_set_sequence_start_pos`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/capi/arguments.h#L137) 来为一个`argument`添加序列信息。 + - 通过调用 [`paddle_arguments_get_sequence_start_pos`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/capi/arguments.h#L150) 来读取一个`argument`添加序列信息。 + - 接口说明请查看 [argument.h](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/capi/arguments.h) 文件。 diff --git a/doc/howto/usage/capi/workflow_of_capi_cn.md b/doc/howto/usage/capi/workflow_of_capi_cn.md new file mode 100644 index 0000000000000000000000000000000000000000..e0a42fff12cf0f53dee18165e059150861524f74 --- /dev/null +++ b/doc/howto/usage/capi/workflow_of_capi_cn.md @@ -0,0 +1,119 @@ +## C-API 使用流程 + +这篇文档介绍 PaddlePaddle C-API 整体使用流程。 + +### 使用流程 + +使用 C-API 的工作流程如图1所示,分为(1)准备预测模型和(2)预测程序开发两大部分。 + +

+
图1. C-API使用流程示意图 +

+ +- 准备预测模型 + 1. 只将神经网络结构进行序列化。 + - 只对神经网络结构进行序列化,加载模型需同时指定:网络结构的序列化结果和模型参数存储目录。 + 1. 将网络结构定义和训练结束存储下来的模型参数文件(多个)合并入一个文件。 + - 神经网络模型结构和训练好的模型将被序列化合并入一个文件。 + - 预测时只需加载一个文件便于发布。 + - **注意**:以上两种方式只需选择其一即可。 +- 调用 C-API 开发预测序 + 1. 初始化PaddlePaddle运行环境。 + 1. 加载预测模型。 + 1. 创建神经网络输入,组织输入数据。 + 1. 进行前向计算,获得计算结果。 + 1. 清理和结束。 + +### 准备预测模型 + +准备预测模型部分,我们以手写数字识别任务为例进行介绍。手写数字识别任务定义了一个含有[两个隐层的简单全连接网络](https://github.com/PaddlePaddle/book/blob/develop/02.recognize_digits/README.cn.md#softmax回归softmax-regression),网络接受一幅图片作为输入,将图片分类到 0 ~ 9 类别标签之一。完整代码可以查看[此目录](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/capi/examples/model_inference/dense) 中的相关脚本。 + +调用C-API开发预测程序需要一个训练好的模型,运行[MNIST手写数字识别目录](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/capi/examples/model_inference/dense)下的[mnist_v2.py](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/capi/examples/model_inference/dense/mnist_v2.py)脚本,在终端执行`python mnist_v2.py`,会使用 PaddlePaddle 内置的 [MNIST 数据集](http://yann.lecun.com/exdb/mnist/)进行训练。训练好的模型默认保存在当前运行目录下的`models`目录中。 + +下面,我们将训练结束后存储下来的模型转换成预测模型。 + +1. 序列化神经网络模型配置 + + PaddlePaddle 使用 protobuf 来传输网络配置文件中定义的网络结构和相关参数,使用 C-API 进行预测时,需要将网络结构使用 protobuf 进行序列化,写入文件中。 + + 调用[`paddle.utils.dump_v2_config`](https://github.com/PaddlePaddle/Paddle/tree/develop/python/paddle/utils/dump_v2_config.py)中的`dump_v2_config`函数能够将使用 PaddlePaddle V2 API 定义的神经网络结构 dump 到指定文件中,示例代码如下: + + ```python + from paddle.utils.dump_v2_config import dump_v2_config + from mnist_v2 import network + + predict = network(is_infer=True) + dump_v2_config(predict, "trainer_config.bin", True) + ``` + + 对[手写数字识别](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/capi/examples/model_inference/dense)这个示例,[`mnist_v2.py`](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/capi/examples/model_inference/dense/mnist_v2.py)脚本集成了序列化神经网络结构的过程,可以直接运行 `python mnist_v2.py --task dump_config` 对神经网络结构进行序列化,结果会写入当前运行目录下的`trainer_config.bin`文件中。 + + 使用这种方式,需要**在运行时将神经网络的多个可学习参数放在同一个目录中**,C-API可以通过分别指定序列化后的网络结构文件和参数目录来加载训练好的模型。 + +2. 合并模型文件(可选) + + 一些情况为了便于发布,希望能够将序列化后的神经网络结构和训练好的模型参数打包进一个文件。对于这样的需求,可以使用`paddle.utils.merge_model`中的`merge_v2_model`接口对神经网络结构和训练好的参数进行序列化,将序列化结果写入一个文件内。 + + 代码示例如下: + + ```python + from paddle.utils.merge_model import merge_v2_modelss + from mnist_v2 import network + + net = network(is_infer=True) + param_file = "models/params_pass_4.tar" + output_file = "output.paddle.model" + merge_v2_model(net, param_file, output_file) + ``` + 对[手写数字识别](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/capi/examples/model_inference/dense)这个示例,可直接运行 `python` [merge_v2_model.py](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/capi/examples/model_inference/dense/merge_v2_model.py)。序列化结果会写入当前运行目录下的`output.paddle.model`文件中。使用这种方式,运行时C-API可以通过指定`output.paddle.model`文件的路径来加载预测模型。 + +#### 注意事项 +1. 为使用C-API,在调用`dump_v2_config`序列化神经网络结构时,参数`binary`必须指定为`True`。 +1. **预测使用的网络结构往往不同于训练**,通常需要去掉网络中的:(1)类别标签层;(2)损失函数层;(3)`evaluator`等,只留下核心计算层,请注意是否需要修改网络结构。 +1. 预测时,可以获取网络中定义的任意多个(大于等于一个)层前向计算的结果,需要哪些层的计算结果作为输出,就将这些层加入一个Python list中,作为调用`dump_v2_config`的第一个参数。 + +### 编写预测代码 + +预测代码更多详细示例代码请参考[C-API使用示例](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/capi/examples/model_inference) 目录下的代码示例。这一节对图1中预测代码编写的5个步骤进行介绍和说明。 + +#### step 1. 初始化PaddlePaddle运行环境 +第一步需调用[`paddle_init`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/capi/main.h#L27) 初始化PaddlePaddle运行环境,该接口接受两个参数:参数的个数和参数列表。 + +#### step2. 加载模型 + +这里介绍C-API使用中的一个重要概念:Gradient Machine。 + +概念上,在 PaddlePaddle 内部,一个GradientMachine类的对象管理着一组计算层(PaddlePaddle Layers)来完成前向和反向计算,并处理与之相关的所有细节。在调用C-API预测时,只需进行前向计算而无需调用反向计算。这篇文档之后部分会使用`gradient machine`来特指调用PaddlePaddle C-API创建的GradientMachine类的对象。每一个 `gradient machine` 都会管理维护一份训练好的模型,下面是C-API提供的,两种常用的模型加载方式: + +1. 调用[`paddle_gradient_machine_load_parameter_from_disk`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/capi/gradient_machine.h#L61)接口,从磁盘加载预测模型。这时`gradient machine`会独立拥有一份训练好的模型; +1. 调用[`paddle_gradient_machine_create_shared_param`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/capi/gradient_machine.h#L88)接口,与其它`gradient machine`的共享已经加载的预测模型。这种情况多出现在使用多线程预测时,通过多个线程共享同一个模型来减少内存开销。可参考[此示例](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/capi/examples/model_inference/multi_thread/main.c)。 + +- 注意事项 + 1. 使用PaddlePaddle V2 API训练,模型中所有可学习参数会被存为一个压缩文件,需要手动进行解压,将它们放在同一目录中,C-API不会直接加载 V2 API 存储的压缩文件。 + 1. 如果使用`merge model`方式将神经网络结构和训练好的参数序列化到一个文件,请参考此[示例](https://github.com/PaddlePaddle/Mobile/blob/develop/Demo/linux/paddle_image_recognizer.cpp#L59)。 + 1. 通过灵活使用以上两个接口,加载模型可其它多种方式,例如也可在程序运行过程中再加载另外一个模型。 + +#### step 3. 创建神经网络输入,组织输入数据 + +基本使用概念: +- 在PaddlePaddle内部,神经网络中一个计算层的输入输出被组织为一个 `Argument` 结构体,如果神经网络有多个输入或者多个输出,每一个输入/输出都会对应有自己的`Argument`。 +- `Argument` 并不真正“存储”数据,而是将输入/输出数据有机地组织在一起。 +- 在`Argument`内部由:1. `Matrix`(二维矩阵,存储浮点类型输入/输出);2. `IVector`(一维数组,**仅用于存储整型值**,多用于自然语言处理任务)来实际存储数据。 + +C-API支持的所有输入数据类型和他们的组织方式,请参考“输入/输出数据组织”一节。 + +这篇文档的之后部分会使用`argument`来特指PaddlePaddle C-API中神经网络的一个输入/输出,使用`paddle_matrix`**特指**`argument`中用于存储数据的`Matrix`类的对象。 + +在组织神经网络输入,获取输出时,需要思考完成以下工作: +1. 为每一个输入/输出创建`argument`; +1. 为每一个`argument`创建`paddle_matrix`来存储数据; + +与输入不同的是,不需在使用C-API时为输出`argument`的`paddle_matrix`对象分配空间。前向计算之后PaddlePaddle内部已经分配/管理了每个计算层输出的存储空间。 + +#### step 4. 前向计算 + +完成上述准备之后,通过调用 [`paddle_gradient_machine_forward`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/capi/gradient_machine.h#L73) 接口完成神经网络的前向计算。 + +#### step 5. 清理 + +结束预测之后,对使用的中间变量和资源进行清理和释放。 diff --git a/paddle/capi/examples/model_inference/dense/main.c b/paddle/capi/examples/model_inference/dense/main.c index 5eeaf7e31fac7c9ed0b9269e74a7e467bde155ef..376cd46fb09a156d426453986c87dcff6e2f71dd 100644 --- a/paddle/capi/examples/model_inference/dense/main.c +++ b/paddle/capi/examples/model_inference/dense/main.c @@ -3,59 +3,82 @@ #include "../common/common.h" +// Modify this path as needed. #define CONFIG_BIN "./trainer_config.bin" +// Modify this path as needed. +// This demo assumes that merged model is not used, then this path is the +// directory storing all the trained parameters. +// If the model is trained by PaddlePaddle V2 API, the model is saved as +// a compressed file. You need to uncompress the compressed file first. +#define MODEL_PATH "models/pass_4" int main() { - // Initalize Paddle + // Initalize the PaddlePaddle runtime environment. char* argv[] = {"--use_gpu=False"}; CHECK(paddle_init(1, (char**)argv)); - // Reading config binary file. It is generated by `convert_protobin.sh` + // Read the binary configuration file generated by `convert_protobin.sh` long size; void* buf = read_config(CONFIG_BIN, &size); - // Create a gradient machine for inference. + // Create the gradient machine for inference. paddle_gradient_machine machine; CHECK(paddle_gradient_machine_create_for_inference(&machine, buf, (int)size)); - CHECK(paddle_gradient_machine_randomize_param(machine)); - // Loading parameter. Uncomment the following line and change the directory. - // CHECK(paddle_gradient_machine_load_parameter_from_disk(machine, - // "./some_where_to_params")); + // Load the trained model. Modify the parameter MODEL_PATH to set the correct + // path of the trained model. + CHECK(paddle_gradient_machine_load_parameter_from_disk(machine, MODEL_PATH)); + + // Inputs and outputs of the network are organized as paddle_arguments object + // in C-API. In the comments below, "argument" specifically means one input of + // the neural network in PaddlePaddle C-API. paddle_arguments in_args = paddle_arguments_create_none(); - // There is only one input of this network. + // There is only one data layer in this demo MNIST network, invoke this + // function to create one argument. CHECK(paddle_arguments_resize(in_args, 1)); - // Create input matrix. - paddle_matrix mat = paddle_matrix_create(/* sample_num */ 1, - /* size */ 784, - /* useGPU */ false); - srand(time(0)); + // Each argument needs one matrix or one ivector (integer vector, for sparse + // index input, usually used in NLP task) to holds the real input data. + // In the comments below, "matrix" specifically means the object needed by + // argument to hold the data. Here we create the matrix for the above created + // agument to store the testing samples. + paddle_matrix mat = + paddle_matrix_create(/* height = batch size */ 1, + /* width = dimensionality of the data layer */ 784, + /* whether to use GPU */ false); paddle_real* array; - - // Get First row. + // Get the pointer pointing to the start address of the first row of the + // created matrix. CHECK(paddle_matrix_get_row(mat, 0, &array)); + // Fill the matrix with a randomly generated test sample. + srand(time(0)); for (int i = 0; i < 784; ++i) { array[i] = rand() / ((float)RAND_MAX); } + // Assign the matrix to the argument. CHECK(paddle_arguments_set_value(in_args, 0, mat)); + // Create the output argument. paddle_arguments out_args = paddle_arguments_create_none(); + + // Invoke the forward computation. CHECK(paddle_gradient_machine_forward(machine, in_args, out_args, - /* isTrain */ false)); - paddle_matrix prob = paddle_matrix_create_none(); + /* is train taks or not */ false)); + // Create the matrix to hold the forward result of the neural network. + paddle_matrix prob = paddle_matrix_create_none(); + // Access the matrix of the output argument, the predicted result is stored in + // which. CHECK(paddle_arguments_get_value(out_args, 0, prob)); uint64_t height; uint64_t width; - CHECK(paddle_matrix_get_shape(prob, &height, &width)); CHECK(paddle_matrix_get_row(prob, 0, &array)); @@ -68,6 +91,7 @@ int main() { } printf("\n"); + // The cleaning up. CHECK(paddle_matrix_destroy(prob)); CHECK(paddle_arguments_destroy(out_args)); CHECK(paddle_matrix_destroy(mat)); diff --git a/paddle/capi/examples/model_inference/dense/merge_v2_model.py b/paddle/capi/examples/model_inference/dense/merge_v2_model.py new file mode 100644 index 0000000000000000000000000000000000000000..c030d572cbdb15cb5e90f2685723a81efb230f81 --- /dev/null +++ b/paddle/capi/examples/model_inference/dense/merge_v2_model.py @@ -0,0 +1,8 @@ +from paddle.utils.merge_model import merge_v2_model + +from mnist_v2 import network + +net = network(is_infer=True) +param_file = "models/params_pass_4.tar" +output_file = "output.paddle.model" +merge_v2_model(net, param_file, output_file) diff --git a/paddle/capi/examples/model_inference/dense/mnist_v2.py b/paddle/capi/examples/model_inference/dense/mnist_v2.py new file mode 100644 index 0000000000000000000000000000000000000000..ee28111153ca2cf24b9789452c65a0f4c7b64538 --- /dev/null +++ b/paddle/capi/examples/model_inference/dense/mnist_v2.py @@ -0,0 +1,117 @@ +import os +import sys +import gzip +import logging +import argparse +from PIL import Image +import numpy as np + +import paddle.v2 as paddle +from paddle.utils.dump_v2_config import dump_v2_config + +logger = logging.getLogger("paddle") +logger.setLevel(logging.INFO) + + +def multilayer_perceptron(img, layer_size, lbl_dim): + for idx, size in enumerate(layer_size): + hidden = paddle.layer.fc(input=(img if not idx else hidden), + size=size, + act=paddle.activation.Relu()) + return paddle.layer.fc(input=hidden, + size=lbl_dim, + act=paddle.activation.Softmax()) + + +def network(input_dim=784, lbl_dim=10, is_infer=False): + images = paddle.layer.data( + name='pixel', type=paddle.data_type.dense_vector(input_dim)) + + predict = multilayer_perceptron( + images, layer_size=[128, 64], lbl_dim=lbl_dim) + + if is_infer: + return predict + else: + label = paddle.layer.data( + name='label', type=paddle.data_type.integer_value(lbl_dim)) + return paddle.layer.classification_cost(input=predict, label=label) + + +def main(task="train", use_gpu=False, trainer_count=1, save_dir="models"): + if task == "train": + if not os.path.exists(save_dir): + os.mkdir(save_dir) + + paddle.init(use_gpu=use_gpu, trainer_count=trainer_count) + cost = network() + parameters = paddle.parameters.create(cost) + optimizer = paddle.optimizer.Momentum( + learning_rate=0.1 / 128.0, + momentum=0.9, + regularization=paddle.optimizer.L2Regularization(rate=0.0005 * 128)) + + trainer = paddle.trainer.SGD(cost=cost, + parameters=parameters, + update_equation=optimizer) + + def event_handler(event): + if isinstance(event, paddle.event.EndIteration): + if event.batch_id % 100 == 0: + logger.info("Pass %d, Batch %d, Cost %f, %s" % + (event.pass_id, event.batch_id, event.cost, + event.metrics)) + if isinstance(event, paddle.event.EndPass): + with gzip.open( + os.path.join(save_dir, "params_pass_%d.tar" % + event.pass_id), "w") as f: + trainer.save_parameter_to_tar(f) + + trainer.train( + reader=paddle.batch( + paddle.reader.shuffle( + paddle.dataset.mnist.train(), buf_size=8192), + batch_size=128), + event_handler=event_handler, + num_passes=5) + elif task == "dump_config": + predict = network(is_infer=True) + dump_v2_config(predict, "trainer_config.bin", True) + else: + raise RuntimeError(("Error value for parameter task. " + "Available options are: train and dump_config.")) + + +def parse_cmd(): + parser = argparse.ArgumentParser( + description="PaddlePaddle MNIST demo for CAPI.") + parser.add_argument( + "--task", + type=str, + required=False, + help=("A string indicating the taks type. " + "Available options are: \"train\", \"dump_config\"."), + default="train") + parser.add_argument( + "--use_gpu", + type=bool, + help=("A bool flag indicating whether to use GPU device or not."), + default=False) + parser.add_argument( + "--trainer_count", + type=int, + help=("This parameter is only used in training task. It indicates " + "how many computing threads are created in training."), + default=1) + parser.add_argument( + "--save_dir", + type=str, + help=("This parameter is only used in training task. It indicates " + "path of the directory to save the trained models."), + default="models") + return parser.parse_args() + + +if __name__ == "__main__": + args = parse_cmd() + main(args.task, args.use_gpu, args.trainer_count, args.save_dir) diff --git a/paddle/capi/examples/model_inference/sparse_binary/main.c b/paddle/capi/examples/model_inference/sparse_binary/main.c index 8ba67aee560239d3050c7f40198d20df99ec370e..029b94ee63ba282aa48193ffd4f625657ddc3a60 100644 --- a/paddle/capi/examples/model_inference/sparse_binary/main.c +++ b/paddle/capi/examples/model_inference/sparse_binary/main.c @@ -1,5 +1,6 @@ #include #include + #include "../common/common.h" #define CONFIG_BIN "./trainer_config.bin" @@ -9,16 +10,18 @@ int main() { char* argv[] = {"--use_gpu=False"}; CHECK(paddle_init(1, (char**)argv)); - // Reading config binary file. It is generated by `convert_protobin.sh` + // Read the binary configuration file which is generated by + // `convert_protobin.sh` long size; void* buf = read_config(CONFIG_BIN, &size); - // Create a gradient machine for inference. + // Create the gradient machine for inference. paddle_gradient_machine machine; CHECK(paddle_gradient_machine_create_for_inference(&machine, buf, (int)size)); CHECK(paddle_gradient_machine_randomize_param(machine)); - // Loading parameter. Uncomment the following line and change the directory. + // Load the trained parameters. Uncomment the following line and change the + // directory as needed. // CHECK(paddle_gradient_machine_load_parameter_from_disk(machine, // "./some_where_to_params")); paddle_arguments in_args = paddle_arguments_create_none(); @@ -26,7 +29,7 @@ int main() { // There is only one input of this network. CHECK(paddle_arguments_resize(in_args, 1)); - // Create input matrix. + // Create the input matrix. paddle_matrix mat = paddle_matrix_create_sparse(1, 784, 3, true, false); srand(time(0)); paddle_real* array; diff --git a/paddle/framework/CMakeLists.txt b/paddle/framework/CMakeLists.txt index ed5f6310f4e1212844948dc8c2555e527b4d10e8..597ea959f230d88350796cef05b7d6f2a42e594a 100644 --- a/paddle/framework/CMakeLists.txt +++ b/paddle/framework/CMakeLists.txt @@ -47,7 +47,7 @@ cc_test(op_proto_maker_test SRCS op_proto_maker_test.cc DEPS op_proto_maker) cc_library(op_info SRCS op_info.cc DEPS attribute framework_proto) cc_library(shape_inference SRCS shape_inference.cc DEPS ddim attribute device_context) cc_library(operator SRCS operator.cc DEPS op_info device_context tensor scope glog - shape_inference data_transform) + shape_inference data_transform lod_tensor) cc_test(operator_test SRCS operator_test.cc DEPS operator op_registry init) cc_library(proto_desc SRCS var_desc.cc op_desc.cc block_desc.cc program_desc.cc DEPS shape_inference op_info operator glog) diff --git a/paddle/framework/data_device_transform.cc b/paddle/framework/data_device_transform.cc index b3fd48ae12c368ac7d83c4f3b6e2fb1939932ac0..d38d87927fc7ee0aa32eabff5cf83d7c4ca7b2b0 100644 --- a/paddle/framework/data_device_transform.cc +++ b/paddle/framework/data_device_transform.cc @@ -31,15 +31,14 @@ static const platform::DeviceContext* GetDeviceContext( } } -Tensor* DeviceTransform(const Tensor& in, const platform::Place& dst_place) { +void DeviceTransform(const Tensor& in, const platform::Place& dst_place, + Tensor* out) { VLOG(3) << "DeviceTransform in, src_place " << in.place() << " dst_place: " << dst_place; - Tensor* out = new Tensor(); auto* dev_ctx = GetDeviceContext(in.place(), dst_place); dev_ctx->Wait(); Copy(in, dst_place, *dev_ctx, out); dev_ctx->Wait(); - return out; } } // namespace framework diff --git a/paddle/framework/data_device_transform.h b/paddle/framework/data_device_transform.h index bebf0d1b320183f46ab226dc6493ba09a365fc35..b21ed0be34ad868ae4a404e913e9623db308b530 100644 --- a/paddle/framework/data_device_transform.h +++ b/paddle/framework/data_device_transform.h @@ -21,7 +21,8 @@ limitations under the License. */ namespace paddle { namespace framework { -Tensor* DeviceTransform(const Tensor& in, const platform::Place& dst_place); +void DeviceTransform(const Tensor& in, const platform::Place& dst_place, + Tensor* out); } // namespace framework } // namespace paddle diff --git a/paddle/framework/data_layout.h b/paddle/framework/data_layout.h index 3ab976ecac4dfb0571ebf5dc93f726939da01116..31817251ed09a7a1da7223c1e99b2eb369a3de30 100644 --- a/paddle/framework/data_layout.h +++ b/paddle/framework/data_layout.h @@ -14,7 +14,9 @@ limitations under the License. */ #pragma once -#include +#include +#include + #include "paddle/platform/enforce.h" namespace paddle { @@ -27,12 +29,19 @@ enum class DataLayout { }; inline DataLayout StringToDataLayout(const std::string& str) { - if (str == "NHWC" || str == "nhwc") { + std::string s(str); + for (size_t i = 0; i < s.size(); ++i) { + s[i] = toupper(s[i]); + } + + if (s == "NHWC") { return DataLayout::kNHWC; - } else if (str == "NCHW" || str == "nchw") { + } else if (s == "NCHW") { return DataLayout::kNCHW; + } else if (s == "ANYLAYOUT") { + return DataLayout::kAnyLayout; } else { - PADDLE_THROW("Unknown storage order string: %s", str); + PADDLE_THROW("Unknown storage order string: %s", s); } } @@ -49,7 +58,7 @@ inline std::string DataLayoutToString(const DataLayout& data_layout) { } } -inline std::ostream& operator<<(std::ostream& out, DataLayout l) { +inline std::ostream& operator<<(std::ostream& out, const DataLayout& l) { out << DataLayoutToString(l); return out; } diff --git a/paddle/framework/data_transform.cc b/paddle/framework/data_transform.cc index e56edb95396ef8de44da95ce795161d7cf1debc6..d826f0edace6d5afee5cd83f6e65d6dbaefae874 100644 --- a/paddle/framework/data_transform.cc +++ b/paddle/framework/data_transform.cc @@ -19,16 +19,14 @@ limitations under the License. */ namespace paddle { namespace framework { -Tensor* DataTransform(const OpKernelType& expected_kernel_type, - const OpKernelType& kernel_type_for_var, - const Tensor& input_tensor) { - Tensor* out = nullptr; +void DataTransform(const OpKernelType& expected_kernel_type, + const OpKernelType& kernel_type_for_var, + const Tensor& input_tensor, Tensor* out) { if (!platform::is_same_place(kernel_type_for_var.place_, expected_kernel_type.place_)) { - out = DeviceTransform(input_tensor, expected_kernel_type.place_); + DeviceTransform(input_tensor, expected_kernel_type.place_, out); } PADDLE_ENFORCE_NOT_NULL(out, "out should not be null"); - return out; } void CopyVariableWithTensor(const Variable& in_var, const Tensor& tensor, diff --git a/paddle/framework/data_transform.h b/paddle/framework/data_transform.h index ee95c7e8564d0392c8f25fce161d0f722c04761a..a4b78902379d5eb89f92ec47655ff93b49d0bfab 100644 --- a/paddle/framework/data_transform.h +++ b/paddle/framework/data_transform.h @@ -30,9 +30,9 @@ limitations under the License. */ namespace paddle { namespace framework { -Tensor* DataTransform(const OpKernelType& expected_kernel_type, - const OpKernelType& kernel_type_for_var, - const Tensor& input_tensor); +void DataTransform(const OpKernelType& expected_kernel_type, + const OpKernelType& kernel_type_for_var, + const Tensor& input_tensor, Tensor* out); void CopyVariableWithTensor(const Variable& in_var, const Tensor& tensor, Variable& out_var); diff --git a/paddle/framework/init.cc b/paddle/framework/init.cc index e12bac1d78e3f6bbc46849c06b53e3b93e147cfc..4ef82a541efaa35bcf831d5122570154f2fa2423 100644 --- a/paddle/framework/init.cc +++ b/paddle/framework/init.cc @@ -11,6 +11,7 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#include // for strdup #include #include @@ -60,7 +61,9 @@ void InitDevices() { } void InitGLOG(const std::string &prog_name) { - google::InitGoogleLogging(prog_name.c_str()); + // glog will not hold the ARGV[0] inside. + // Use strdup to alloc a new string. + google::InitGoogleLogging(strdup(prog_name.c_str())); google::InstallFailureSignalHandler(); } diff --git a/paddle/framework/lod_tensor.cc b/paddle/framework/lod_tensor.cc index 7ae94c646537e0d7c4687b949a1b06cd3a7f3404..87a57d095141cc456af2cbabbc227715a02375e9 100644 --- a/paddle/framework/lod_tensor.cc +++ b/paddle/framework/lod_tensor.cc @@ -69,6 +69,12 @@ std::ostream &operator<<(std::ostream &os, const LoDTensor &t) { return os; } +std::string LoDToString(const LoD &lod) { + std::ostringstream stream; + stream << lod; + return stream.str(); +} + LoD SliceInLevel(const LoD &in, size_t level, size_t elem_begin, size_t elem_end) { PADDLE_ENFORCE_LT(level, in.size()); diff --git a/paddle/framework/lod_tensor.h b/paddle/framework/lod_tensor.h index 37753f5f4ddea4755ad6211007c367de00aad754..88ea78f2682b2ffc962c9663f6b3c636dedb931d 100644 --- a/paddle/framework/lod_tensor.h +++ b/paddle/framework/lod_tensor.h @@ -60,6 +60,8 @@ using LoD = std::vector>; std::ostream& operator<<(std::ostream& os, const LoD& lod); std::ostream& operator<<(std::ostream& os, const LoDTensor& t); +std::string LoDToString(const LoD& lod); + LoD SliceInLevel(const LoD& in, size_t level, size_t elem_begin, size_t elem_end); /* diff --git a/paddle/framework/op_kernel_type.h b/paddle/framework/op_kernel_type.h index 053897784c1c4350deadf39e2a009220d38f65f9..312bd5f892ac23c847c87388c9cadf2161028d3e 100644 --- a/paddle/framework/op_kernel_type.h +++ b/paddle/framework/op_kernel_type.h @@ -85,5 +85,10 @@ inline std::string KernelTypeToString(const OpKernelType& kernel_key) { return stream.str(); } +inline bool TransFromNeeded(const OpKernelType& l, const OpKernelType& r) { + return (!platform::places_are_same_class(l.place_, r.place_)) || + (l.data_type_ != r.data_type_) || (l.data_layout_ != r.data_layout_); +} + } // namespace framework } // namespace paddle diff --git a/paddle/framework/op_registry_test.cc b/paddle/framework/op_registry_test.cc index 66f07b6757fe1fe613e61ac66057be43ef5aced7..341da8befd45abd1a3fc86581be33319a8791567 100644 --- a/paddle/framework/op_registry_test.cc +++ b/paddle/framework/op_registry_test.cc @@ -368,24 +368,6 @@ TEST(OperatorRegistrar, OpWithMultiKernel) { // TODO(qiao) add priority back // use all available kernels - paddle::framework::UseALL(); op->Run(scope, cuda_place); EXPECT_EQ(op_test_value, -10); - - // remove cuda kernels - paddle::framework::UseCPU(); - op->Run(scope, cpu_place); - - EXPECT_EQ(op_test_value, -9); - - // add cuda kernels - paddle::framework::UseCUDA(); - op->Run(scope, cuda_place); - - EXPECT_EQ(op_test_value, -10); - - // use cudnn kernel - paddle::framework::UseCUDNN(); - op->Run(scope, cuda_place); - EXPECT_EQ(op_test_value, -20); } diff --git a/paddle/framework/operator.cc b/paddle/framework/operator.cc index ef2c55cc3799ba2fac54f3c9370505b63ef22ad3..84c010df7c396fc21904ae3c980f5fad70b2ceac 100644 --- a/paddle/framework/operator.cc +++ b/paddle/framework/operator.cc @@ -11,6 +11,7 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#include #include #include @@ -21,61 +22,27 @@ limitations under the License. */ #include "paddle/framework/shape_inference.h" #include "paddle/framework/var_type.h" +DEFINE_bool(op_sync, false, + "Default cuda is asynchronous device, set to True will" + "force op run in synchronous mode."); + namespace paddle { namespace framework { -std::vector> kKernelPriority; - -void UseCPU() { - kKernelPriority.clear(); - /*Plain CPU*/ - auto pair0 = std::make_tuple(platform::CPUPlace(), LibraryType::kPlain); - kKernelPriority.insert(kKernelPriority.begin(), pair0); -} - -void UseMKLDNN() { - UseCPU(); -#if PADDLE_WITH_MKLML - { - /*MKLDNN Kernel*/ - auto pair0 = std::make_tuple(platform::CPUPlace(), LibraryType::kMKLDNN); - kKernelPriority.insert(kKernelPriority.begin(), pair0); - } -#endif -} - -void UseCUDA() { - UseMKLDNN(); -#if PADDLE_WITH_CUDA - /*Plain GPU*/ - auto pair0 = std::make_tuple(platform::CUDAPlace(0), LibraryType::kPlain); - kKernelPriority.insert(kKernelPriority.begin(), pair0); -#endif -} - -void UseCUDNN() { - UseCUDA(); -#if PADDLE_WITH_CUDA - if (platform::dynload::HasCUDNN()) { - /*CUDNN Kernel*/ - auto pair0 = std::make_tuple(platform::CUDAPlace(0), LibraryType::kCUDNN); - kKernelPriority.insert(kKernelPriority.begin(), pair0); - } -#endif -} - -void UseALL() { - UseCPU(); - UseMKLDNN(); - UseCUDA(); - UseCUDNN(); -} +std::vector> kKernelPriority = { + std::make_tuple(platform::CUDAPlace(0), LibraryType::kCUDNN), + std::make_tuple(platform::CUDAPlace(0), LibraryType::kPlain), + std::make_tuple(platform::CPUPlace(), LibraryType::kMKLDNN), + std::make_tuple(platform::CPUPlace(), LibraryType::kPlain), +}; static DDim GetDims(const Scope& scope, const std::string& name) { Variable* var = scope.FindVar(name); if (var == nullptr) { return DDim({-1}); - } else if (var->IsType()) { + } + + if (var->IsType()) { return var->Get().dims(); } else if (var->IsType()) { return var->Get().GetCompleteDims(); @@ -84,6 +51,21 @@ static DDim GetDims(const Scope& scope, const std::string& name) { } } +static LoD GetLoD(const Scope& scope, const std::string& name) { + Variable* var = scope.FindVar(name); + auto default_lod = LoD({{}}); + + if (var == nullptr) { + return default_lod; + } + + if (var->IsType()) { + return var->Get().lod(); + } else { + return default_lod; + } +} + std::string OperatorBase::Input(const std::string& name) const { auto& ins = Inputs(name); PADDLE_ENFORCE_LE(ins.size(), 1UL, @@ -125,7 +107,8 @@ std::string OperatorBase::DebugStringEx(const Scope* scope) const { for (size_t i = 0; i < input.second.size(); ++i) { ss << input.second[i]; if (scope) { - ss << "(" << GetDims(*scope, input.second[i]) << ")"; + ss << "[" << GetDims(*scope, input.second[i]) << "]"; + ss << "(" << GetLoD(*scope, input.second[i]) << ")"; } if (i != input.second.size() - 1) { ss << ", "; @@ -144,7 +127,8 @@ std::string OperatorBase::DebugStringEx(const Scope* scope) const { for (size_t i = 0; i < output.second.size(); ++i) { ss << output.second[i]; if (scope) { - ss << "(" << GetDims(*scope, output.second[i]) << ")"; + ss << "[" << GetDims(*scope, output.second[i]) << "]"; + ss << "(" << GetLoD(*scope, output.second[i]) << ")"; } if (i != output.second.size() - 1) { ss << ", "; @@ -247,36 +231,33 @@ static bool VarIsTensor(const Variable* var) { return var->IsType() || var->IsType(); } -static const Tensor* GetTensorFromVar(const Variable* var) { - const Tensor* t = nullptr; +static const Tensor* GetTensorFromVar(Variable* var) { if (var->IsType()) { - t = &(var->Get()); + return var->GetMutable(); } else if (var->IsType()) { - t = &(var->Get().value()); + return var->GetMutable()->mutable_value(); } else { PADDLE_THROW("Variable type_id %s, expect LoDTensor/SelectedRows.", var->Type().name()); } - return t; } static Tensor* GetMutableTensorFromVar(Variable* var) { - Tensor* t = nullptr; if (var->IsType()) { - t = var->GetMutable(); + return var->GetMutable(); } else if (var->IsType()) { - t = var->GetMutable()->mutable_value(); + return var->GetMutable()->mutable_value(); } else { PADDLE_THROW("Variable type_id %s, expect LoDTensor/SelectedRows.", var->Type().name()); } - return t; } template <> const Tensor* ExecutionContext::Input(const std::string& name) const { auto* var = InputVar(name); - return var == nullptr ? nullptr : GetTensorFromVar(var); + return var == nullptr ? nullptr + : GetTensorFromVar(const_cast(var)); } template <> @@ -319,6 +300,7 @@ bool OpSupportGPU(const std::string& op_type) { auto it = all_kernels.find(op_type); if (it == all_kernels.end()) { // All control operator must support GPU + return true; } for (auto& kern_pair : it->second) { @@ -492,21 +474,17 @@ void OperatorWithKernel::Run(const Scope& scope, } ExecutionContext ctx(*this, scope, *dev_ctx); - auto expected_kernel_key = this->GetExpectedKernelType(ctx); OpKernelMap& kernels = kernels_iter->second; - for (auto& candidate : kKernelPriority) { - auto candidate_key = - OpKernelType(expected_kernel_key.data_type_, std::get<0>(candidate), - expected_kernel_key.data_layout_, std::get<1>(candidate)); + // TODO(dzhwinter) : kernel fallback mechanism will be added when all the + // transform functions are ready. - if ((candidate_key == expected_kernel_key) || - (kernels.count(candidate_key))) { - expected_kernel_key = candidate_key; - break; - } - } + // for (auto& candidate : kKernelPriority) { + // Do selection + // } + + auto expected_kernel_key = this->GetExpectedKernelType(ctx); VLOG(3) << "expected_kernel_key:" << expected_kernel_key; @@ -520,7 +498,7 @@ void OperatorWithKernel::Run(const Scope& scope, if (tensor_in->IsInitialized()) { auto kernel_type_for_var = this->GetKernelTypeForVar( var_name_item.first, *tensor_in, expected_kernel_key); - if (kernel_type_for_var != expected_kernel_key) { + if (TransFromNeeded(kernel_type_for_var, expected_kernel_key)) { auto out_var_names = OutputVars(true); if (std::find(out_var_names.begin(), out_var_names.end(), var_name) != out_var_names.end()) { @@ -529,11 +507,13 @@ void OperatorWithKernel::Run(const Scope& scope, "does not support transform", var_name); } - VLOG(3) << "need to do transform for var " << var_name; + VLOG(3) << "Transform Variable " << var_name << " from " + << kernel_type_for_var << " to " << expected_kernel_key; auto* trans_var = new_scope.Var(var_name); - auto* out = DataTransform(expected_kernel_key, kernel_type_for_var, - *tensor_in); - CopyVariableWithTensor(*var, *out, *trans_var); + std::shared_ptr out(new Tensor); + DataTransform(expected_kernel_key, kernel_type_for_var, *tensor_in, + out.get()); + CopyVariableWithTensor(*var, *(out.get()), *trans_var); } } } @@ -542,8 +522,14 @@ void OperatorWithKernel::Run(const Scope& scope, auto kernel_iter = kernels.find(expected_kernel_key); - kernel_iter->second->Compute(ExecutionContext( - *this, new_scope, *pool.Get(expected_kernel_key.place_))); + auto* new_dev_ctx = pool.Get(expected_kernel_key.place_); + kernel_iter->second->Compute( + ExecutionContext(*this, new_scope, *new_dev_ctx)); + + /*For profiling/benchmark only*/ + if (FLAGS_op_sync) { + new_dev_ctx->Wait(); + } } proto::DataType OperatorWithKernel::IndicateDataType( diff --git a/paddle/framework/operator.h b/paddle/framework/operator.h index d5feb598649c97a9517b7c2b1764fd54ff9f8693..c9140f304c89e32a0fa8bd24722cc2e5dbc4e2e1 100644 --- a/paddle/framework/operator.h +++ b/paddle/framework/operator.h @@ -54,33 +54,9 @@ constexpr char kGradVarSuffix[] = "@GRAD"; constexpr char kZeroVarSuffix[] = "@ZERO"; // define some kernel priority +/* Define multiple kernel type fallback order*/ extern std::vector> kKernelPriority; -/** - * @brief Use cpu kernel only - */ -void UseCPU(); - -/** - * @brief Perfer MKLDNN kernel than Plain CPU kernel - */ -void UseMKLDNN(); - -/** - * @brief Perfer CUDA kernel than Plain CPU kernel - */ -void UseCUDA(); - -/** - * @brief Perfer cudnn kernel than Plain CUDA kernel - */ -void UseCUDNN(); - -/** - * @brief Use all available kernels - */ -void UseALL(); - inline std::string GradVarName(const std::string& var_name) { return var_name + kGradVarSuffix; } diff --git a/paddle/framework/tensor_util.h b/paddle/framework/tensor_util.h index f541d2ba693a169d074c070dd794a2dd4e52aabf..091b63bf0f907a5449f08f0e36abb6577fa5e43e 100644 --- a/paddle/framework/tensor_util.h +++ b/paddle/framework/tensor_util.h @@ -116,8 +116,8 @@ inline void Copy(const Tensor& src, const platform::Place& dst_place, * @param[in] src The external tensor. * @param[in] ctx The device context contains device resources. * - * * @note CopyFromVector assumes that the tensor has been resized - * before invoking. + * * @note CopyFromVector will resize dst to an 1D tensor with the same + * size as src. */ template inline void CopyFromVector(const std::vector& src, diff --git a/paddle/operators/CMakeLists.txt b/paddle/operators/CMakeLists.txt index 5889a50db09534b30f0f57b4e659df440901f3b1..2569535c257c3210c239b69cd464ae59a8f4747c 100644 --- a/paddle/operators/CMakeLists.txt +++ b/paddle/operators/CMakeLists.txt @@ -135,9 +135,8 @@ op_library(detection_output_op DEPS softmax) op_library(sequence_softmax_op DEPS softmax) op_library(sum_op DEPS selected_rows_functor) op_library(sgd_op DEPS selected_rows_functor) +op_library(print_op DEPS lod_tensor) op_library(adagrad_op DEPS selected_rows_functor) -op_library(conv_op DEPS vol2col) -op_library(pool_op DEPS pooling) op_library(maxout_op DEPS maxouting) op_library(unpool_op DEPS unpooling) op_library(pool_with_index_op DEPS pooling) @@ -148,12 +147,27 @@ op_library(max_sequence_len_op DEPS lod_rank_table) op_library(sequence_conv_op DEPS context_project) op_library(sequence_pool_op DEPS sequence_pooling) op_library(lstm_op DEPS sequence2batch lstm_compute) -op_library(conv_transpose_op DEPS vol2col) op_library(gru_op DEPS sequence2batch gru_compute) op_library(recurrent_op DEPS executor) op_library(warpctc_op DEPS dynload_warpctc sequence_padding math_function) op_library(cos_sim_op DEPS cos_sim_functor) op_library(parallel_do_op DEPS executor) + +# Regist multiple Kernel to pybind +if (WITH_GPU) +op_library(conv_op SRCS conv_op.cc conv_op.cu.cc conv_cudnn_op.cu.cc DEPS vol2col) +op_library(pool_op SRCS pool_op.cc pool_op.cu.cc pool_cudnn_op.cu.cc DEPS pooling) +op_library(conv_transpose_op SRCS conv_transpose_op.cc conv_transpose_op.cu.cc + conv_transpose_cudnn_op.cu.cc DEPS vol2col) +file(APPEND ${pybind_file} "USE_OP_DEVICE_KERNEL(conv2d, CUDNN);\n") +file(APPEND ${pybind_file} "USE_OP_DEVICE_KERNEL(pool2d, CUDNN);\n") +file(APPEND ${pybind_file} "USE_OP_DEVICE_KERNEL(conv2d_transpose, CUDNN);\n") +else() +op_library(conv_op SRCS conv_op.cc DEPS vol2col) +op_library(pool_op SRCS pool_op.cc DEPS pooling) +op_library(conv_transpose_op SRCS conv_transpose_op.cc DEPS vol2col) +endif() + # FIXME(typhoonzero): save/load depends lodtensor serialization functions op_library(save_op DEPS lod_tensor) op_library(load_op DEPS lod_tensor) diff --git a/paddle/operators/assign_value_op.cc b/paddle/operators/assign_value_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..d5671c1183a0f58d2aedb0723bd462684ac5636e --- /dev/null +++ b/paddle/operators/assign_value_op.cc @@ -0,0 +1,73 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/operators/assign_value_op.h" + +namespace paddle { +namespace operators { + +class AssignValueOp : public framework::OperatorWithKernel { + public: + AssignValueOp(const std::string &type, + const framework::VariableNameMap &inputs, + const framework::VariableNameMap &outputs, + const framework::AttributeMap &attrs) + : OperatorWithKernel(type, inputs, outputs, attrs) {} + + void InferShape(framework::InferShapeContext *ctx) const override { + PADDLE_ENFORCE(ctx->HasOutput("Out"), + "Output(Out) of AssignValueOp should not be null."); + auto shape = ctx->Attrs().Get>("shape"); + ctx->SetOutputDim("Out", framework::make_ddim(shape)); + } + + protected: + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext &ctx) const override { + return framework::OpKernelType( + framework::proto::DataType(ctx.Attr("dtype")), ctx.GetPlace()); + } +}; + +class AssignValueOpMaker : public framework::OpProtoAndCheckerMaker { + public: + AssignValueOpMaker(OpProto *proto, OpAttrChecker *op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddOutput("Out", "(Tensor) Output tensor of assign_value operator."); + AddAttr>("shape", + "(vector) " + "Shape of values."); + AddAttr("dtype", "data type of values") + .InEnum({framework::proto::DataType::INT32, + framework::proto::DataType::FP32}); + AddAttr>("fp32_values", "store the float values") + .SetDefault({}); + AddAttr>("int32_values", "store the int values") + .SetDefault({}); + AddComment(R"DOC( +AssignValue operator + +$$Out = values$$ +)DOC"); + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; + +REGISTER_OPERATOR(assign_value, ops::AssignValueOp, ops::AssignValueOpMaker); +REGISTER_OP_CPU_KERNEL(assign_value, ops::AssignValueKernel, + ops::AssignValueKernel); diff --git a/paddle/operators/pool_cudnn_op.h b/paddle/operators/assign_value_op.cu.cc similarity index 68% rename from paddle/operators/pool_cudnn_op.h rename to paddle/operators/assign_value_op.cu.cc index 5adf27f5bccae8542719612320bc6dbe21007634..b17e20150053cea4c6b9ed6a5f222f77f4a4bd36 100644 --- a/paddle/operators/pool_cudnn_op.h +++ b/paddle/operators/assign_value_op.cu.cc @@ -1,19 +1,19 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. -You may obtain a copy of the License at +Indicesou may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#pragma once - -#include "paddle/framework/op_registry.h" -#include "paddle/operators/pool_op.h" +#include "paddle/operators/assign_value_op.h" -namespace paddle { -namespace operators {} // namespace operators -} // namespace paddle +namespace ops = paddle::operators; +REGISTER_OP_CUDA_KERNEL(assign_value, ops::AssignValueKernel, + ops::AssignValueKernel); diff --git a/paddle/operators/assign_value_op.h b/paddle/operators/assign_value_op.h new file mode 100644 index 0000000000000000000000000000000000000000..db2e43077999fa0f9aaada74026dd701ab2bf464 --- /dev/null +++ b/paddle/operators/assign_value_op.h @@ -0,0 +1,50 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include "paddle/framework/eigen.h" +#include "paddle/framework/op_registry.h" +#include "paddle/platform/enforce.h" + +namespace paddle { +namespace operators { + +template +class AssignValueKernel : public framework::OpKernel { + public: + virtual void Compute(const framework::ExecutionContext& ctx) const { + auto shape = ctx.Attr>("shape"); + auto* out = ctx.Output("Out"); + int dtype = ctx.Attr("dtype"); + const char* value_name = nullptr; + switch (dtype) { + case framework::proto::DataType::INT32: + value_name = "int32_values"; + break; + case framework::proto::DataType::FP32: + value_name = "fp32_values"; + break; + default: + PADDLE_THROW("Unsupported dtype for assign_value_op: %d", dtype); + break; + } + auto values = ctx.Attr>(value_name); + framework::CopyFromVector(values, ctx.device_context(), out); + out->Resize(framework::make_ddim(shape)); + } +}; + +} // namespace operators +} // namespace paddle diff --git a/paddle/operators/conv_cudnn_op.cc b/paddle/operators/conv_cudnn_op.cc deleted file mode 100644 index 84d9ce1973a4cccadcb8f78feaecbcaa9e7af312..0000000000000000000000000000000000000000 --- a/paddle/operators/conv_cudnn_op.cc +++ /dev/null @@ -1,74 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#include "paddle/operators/conv_op.h" - -namespace paddle { -namespace operators { - -class CudnnConv2DOpMaker : public Conv2DOpMaker { - public: - CudnnConv2DOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : Conv2DOpMaker(proto, op_checker) { - AddAttr("workspace_size_MB", - "workspace size for cudnn, in MB, " - "workspace is a section of GPU memory which will be " - "allocated/freed each time the operator runs, larger " - "workspace size can increase performance but also requires " - "better hardware. This size should be chosen carefully.") - .SetDefault(4096); - } -}; - -class CudnnConv3DOpMaker : public Conv3DOpMaker { - public: - CudnnConv3DOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : Conv3DOpMaker(proto, op_checker) { - AddAttr("workspace_size_MB", - "workspace size for cudnn, in MB, " - "workspace is a section of GPU memory which will be " - "allocated/freed each time the operator runs, larger " - "workspace size can increase performance but also requires " - "better hardware. This size should be chosen carefully.") - .SetDefault(4096); - } -}; - -} // namespace operators -} // namespace paddle - -namespace ops = paddle::operators; -REGISTER_OP(conv2d_cudnn, ops::ConvOp, ops::CudnnConv2DOpMaker, - conv2d_cudnn_grad, ops::ConvOpGrad); - -REGISTER_OP(conv3d_cudnn, ops::ConvOp, ops::CudnnConv3DOpMaker, - conv3d_cudnn_grad, ops::ConvOpGrad); - -REGISTER_OP_CPU_KERNEL( - conv2d_cudnn, - ops::GemmConvKernel, - ops::GemmConvKernel); -REGISTER_OP_CPU_KERNEL( - conv2d_cudnn_grad, - ops::GemmConvGradKernel, - ops::GemmConvGradKernel); - -REGISTER_OP_CPU_KERNEL( - conv3d_cudnn, - ops::GemmConvKernel, - ops::GemmConvKernel); -REGISTER_OP_CPU_KERNEL( - conv3d_cudnn_grad, - ops::GemmConvGradKernel, - ops::GemmConvGradKernel); diff --git a/paddle/operators/conv_cudnn_op.cu.cc b/paddle/operators/conv_cudnn_op.cu.cc index 0c5ed3e4e80304c6fd174975166804347feb18b1..3a5409a7e3f29a4c46839d6395760fc7fe8c086e 100644 --- a/paddle/operators/conv_cudnn_op.cu.cc +++ b/paddle/operators/conv_cudnn_op.cu.cc @@ -32,7 +32,7 @@ static constexpr size_t kCONV_CUDNN_WORKSPACE_LIMIT_BYTES = static_cast(1024) * 1024 * 1024; template -class CudnnConvOpKernel : public framework::OpKernel { +class CUDNNConvOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()), @@ -147,7 +147,7 @@ class CudnnConvOpKernel : public framework::OpKernel { }; template -class CudnnConvGradOpKernel : public framework::OpKernel { +class CUDNNConvGradOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()), @@ -315,17 +315,16 @@ class CudnnConvGradOpKernel : public framework::OpKernel { } // namespace operators } // namespace paddle -// TODO(dzhwinter) : below register should be removed -REGISTER_OP_CUDA_KERNEL(conv2d_cudnn, - paddle::operators::CudnnConvOpKernel, - paddle::operators::CudnnConvOpKernel); -REGISTER_OP_CUDA_KERNEL(conv2d_cudnn_grad, - paddle::operators::CudnnConvGradOpKernel, - paddle::operators::CudnnConvGradOpKernel); - -REGISTER_OP_CUDA_KERNEL(conv3d_cudnn, - paddle::operators::CudnnConvOpKernel, - paddle::operators::CudnnConvOpKernel); -REGISTER_OP_CUDA_KERNEL(conv3d_cudnn_grad, - paddle::operators::CudnnConvGradOpKernel, - paddle::operators::CudnnConvGradOpKernel); +REGISTER_OP_KERNEL(conv2d, CUDNN, ::paddle::platform::CUDAPlace, + paddle::operators::CUDNNConvOpKernel, + paddle::operators::CUDNNConvOpKernel); +REGISTER_OP_KERNEL(conv2d_grad, CUDNN, ::paddle::platform::CUDAPlace, + paddle::operators::CUDNNConvGradOpKernel, + paddle::operators::CUDNNConvGradOpKernel); + +REGISTER_OP_KERNEL(conv3d, CUDNN, ::paddle::platform::CUDAPlace, + paddle::operators::CUDNNConvOpKernel, + paddle::operators::CUDNNConvOpKernel); +REGISTER_OP_KERNEL(conv3d_grad, CUDNN, ::paddle::platform::CUDAPlace, + paddle::operators::CUDNNConvGradOpKernel, + paddle::operators::CUDNNConvGradOpKernel); diff --git a/paddle/operators/conv_op.cc b/paddle/operators/conv_op.cc index 1468e3eb960a2b7c2e7af83ff701338596606922..424eccdb7dc57195d20f75460032a99c76e6adcd 100644 --- a/paddle/operators/conv_op.cc +++ b/paddle/operators/conv_op.cc @@ -67,6 +67,23 @@ void ConvOp::InferShape(framework::InferShapeContext* ctx) const { ctx->ShareLoD("Input", "Output"); } +framework::OpKernelType ConvOp::GetExpectedKernelType( + const framework::ExecutionContext& ctx) const { + bool use_cudnn = ctx.Attr("use_cudnn"); + framework::LibraryType library_; + if (use_cudnn) { + library_ = framework::LibraryType::kCUDNN; + } else { + library_ = framework::LibraryType::kPlain; + } + + std::string data_format = ctx.Attr("data_format"); + framework::DataLayout layout_ = framework::StringToDataLayout(data_format); + return framework::OpKernelType( + framework::ToDataType(ctx.Input("Input")->type()), ctx.GetPlace(), + layout_, library_); +} + Conv2DOpMaker::Conv2DOpMaker(OpProto* proto, OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput( @@ -108,6 +125,26 @@ Conv2DOpMaker::Conv2DOpMaker(OpProto* proto, OpAttrChecker* op_checker) "dilations(h_dilation, w_dilation) of " "convolution operator.") .SetDefault({1, 1}); + AddAttr( + "use_cudnn", + "(bool, default false) Only used in cudnn kernel, need install cudnn") + .SetDefault(false); + AddAttr( + "data_format", + "(string, default NCHW) Only used in " + "An optional string from: \"NHWC\", \"NCHW\". " + "Defaults to \"NHWC\". Specify the data format of the output data, " + "the input will be transformed automatically. ") + .SetDefault("AnyLayout"); + // TODO(dzhwinter): need to registered layout transform function + AddAttr("workspace_size_MB", + "Only used in cudnn kernel. Need set use_cudnn to true." + "workspace size for cudnn, in MB, " + "workspace is a section of GPU memory which will be " + "allocated/freed each time the operator runs, larger " + "workspace size can increase performance but also requires " + "better hardware. This size should be chosen carefully.") + .SetDefault(4096); AddComment(R"DOC( Convolution Operator. @@ -181,6 +218,25 @@ Conv3DOpMaker::Conv3DOpMaker(OpProto* proto, OpAttrChecker* op_checker) "dilations(d_dilation, h_dilation, w_dilation) of " "convolution operator.") .SetDefault({1, 1, 1}); + AddAttr( + "use_cudnn", + "(bool, default false) Only used in cudnn kernel, need install cudnn") + .SetDefault(false); + AddAttr( + "data_format", + "(string, default NCHW) Only used in " + "An optional string from: \"NHWC\", \"NCHW\". " + "Defaults to \"NHWC\". Specify the data format of the output data, " + "the input will be transformed automatically. ") + .SetDefault("AnyLayout"); + // TODO(dzhwinter): need to registered layout transform function + AddAttr("workspace_size_MB", + "Only used in cudnn kernel. workspace size for cudnn, in MB, " + "workspace is a section of GPU memory which will be " + "allocated/freed each time the operator runs, larger " + "workspace size can increase performance but also requires " + "better hardware. This size should be chosen carefully.") + .SetDefault(4096); AddComment(R"DOC( Convolution3D Operator. @@ -224,6 +280,23 @@ void ConvOpGrad::InferShape(framework::InferShapeContext* ctx) const { } } +framework::OpKernelType ConvOpGrad::GetExpectedKernelType( + const framework::ExecutionContext& ctx) const { + bool use_cudnn = ctx.Attr("use_cudnn"); + framework::LibraryType library_; + if (use_cudnn) { + library_ = framework::LibraryType::kCUDNN; + } else { + library_ = framework::LibraryType::kPlain; + } + + std::string data_format = ctx.Attr("data_format"); + framework::DataLayout layout_ = framework::StringToDataLayout(data_format); + return framework::OpKernelType( + framework::ToDataType(ctx.Input("Input")->type()), ctx.GetPlace(), + layout_, library_); +} + } // namespace operators } // namespace paddle diff --git a/paddle/operators/conv_op.h b/paddle/operators/conv_op.h index 83786e2329e7ae3c2908fdfdaeb1f79d19a53f47..5a8933e7915960f9fcbe92ae73c4f37b3b69ecaf 100644 --- a/paddle/operators/conv_op.h +++ b/paddle/operators/conv_op.h @@ -62,12 +62,20 @@ class ConvOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override; + + protected: + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext& ctx) const override; }; class ConvOpGrad : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override; + + protected: + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext& ctx) const override; }; template diff --git a/paddle/operators/conv_transpose_cudnn_op.cc b/paddle/operators/conv_transpose_cudnn_op.cc deleted file mode 100644 index 2e5333a265f2f59f31c651b8bb080599ec6e31a4..0000000000000000000000000000000000000000 --- a/paddle/operators/conv_transpose_cudnn_op.cc +++ /dev/null @@ -1,78 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#include "paddle/operators/conv_transpose_op.h" - -namespace paddle { -namespace operators { - -class CudnnConv2DTransposeOpMaker : public Conv2DTransposeOpMaker { - public: - CudnnConv2DTransposeOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : Conv2DTransposeOpMaker(proto, op_checker) { - AddAttr("workspace_size_MB", - "workspace size for cudnn, in MB, " - "workspace is a section of GPU memory which will be " - "allocated/freed each time the operator runs, larger " - "workspace size can increase performance but also requires " - "better hardward. This size should be carefully setted.") - .SetDefault(4096); - } -}; - -class CudnnConv3DTransposeOpMaker : public Conv3DTransposeOpMaker { - public: - CudnnConv3DTransposeOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : Conv3DTransposeOpMaker(proto, op_checker) { - AddAttr("workspace_size_MB", - "workspace size for cudnn, in MB, " - "workspace is a section of GPU memory which will be " - "allocated/freed each time the operator runs, larger " - "workspace size can increase performance but also requires " - "better hardward. This size should be carefully setted.") - .SetDefault(4096); - } -}; - -} // namespace operators -} // namespace paddle - -namespace ops = paddle::operators; -REGISTER_OP(conv2d_transpose_cudnn, ops::ConvTransposeOp, - ops::CudnnConv2DTransposeOpMaker, conv2d_transpose_cudnn_grad, - ops::ConvTransposeOpGrad); - -REGISTER_OP_CPU_KERNEL( - conv2d_transpose_cudnn, - ops::GemmConvTransposeKernel, - ops::GemmConvTransposeKernel); -REGISTER_OP_CPU_KERNEL( - conv2d_transpose_cudnn_grad, - ops::GemmConvTransposeGradKernel, - ops::GemmConvTransposeGradKernel); - -REGISTER_OP(conv3d_transpose_cudnn, ops::ConvTransposeOp, - ops::CudnnConv3DTransposeOpMaker, conv3d_transpose_cudnn_grad, - ops::ConvTransposeOpGrad); - -REGISTER_OP_CPU_KERNEL( - conv3d_transpose_cudnn, - ops::GemmConvTransposeKernel, - ops::GemmConvTransposeKernel); -REGISTER_OP_CPU_KERNEL( - conv3d_transpose_cudnn_grad, - ops::GemmConvTransposeGradKernel, - ops::GemmConvTransposeGradKernel); diff --git a/paddle/operators/conv_transpose_cudnn_op.cu.cc b/paddle/operators/conv_transpose_cudnn_op.cu.cc index fc37776ba1ed35aa6b2523eb593e9713cfcc54eb..23bc97e13c13e5c1d62a406be5e528ab272fa93a 100644 --- a/paddle/operators/conv_transpose_cudnn_op.cu.cc +++ b/paddle/operators/conv_transpose_cudnn_op.cu.cc @@ -28,10 +28,10 @@ using ScopedFilterDescriptor = platform::ScopedFilterDescriptor; using ScopedConvolutionDescriptor = platform::ScopedConvolutionDescriptor; using DataLayout = platform::DataLayout; -static constexpr size_t kConvCudnnWorkspaceLimitBytes = 1024 * 1024 * 1024; +static constexpr size_t kConvCUDNNWorkspaceLimitBytes = 1024 * 1024 * 1024; template -class CudnnConvTransposeOpKernel : public framework::OpKernel { +class CUDNNConvTransposeOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()), @@ -77,7 +77,7 @@ class CudnnConvTransposeOpKernel : public framework::OpKernel { // ------------------- cudnn conv workspace --------------------- void* cudnn_workspace = nullptr; size_t workspace_size_in_bytes; // final workspace to allocate. - size_t workspace_size_limit = kConvCudnnWorkspaceLimitBytes; + size_t workspace_size_limit = kConvCUDNNWorkspaceLimitBytes; if (user_workspace_size > 0) { workspace_size_limit = user_workspace_size * 1024 * 1024; } @@ -116,7 +116,7 @@ class CudnnConvTransposeOpKernel : public framework::OpKernel { }; template -class CudnnConvTransposeGradOpKernel : public framework::OpKernel { +class CUDNNConvTransposeGradOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()), @@ -161,7 +161,7 @@ class CudnnConvTransposeGradOpKernel : public framework::OpKernel { cudnnConvolutionBwdFilterAlgo_t filter_algo; size_t bwd_filter_ws_size, fwd_ws_size; size_t workspace_size_in_bytes = 0; - size_t workspace_size_limit = kConvCudnnWorkspaceLimitBytes; + size_t workspace_size_limit = kConvCUDNNWorkspaceLimitBytes; if (user_workspace_size > 0) { workspace_size_limit = user_workspace_size * 1024 * 1024; } @@ -236,16 +236,16 @@ class CudnnConvTransposeGradOpKernel : public framework::OpKernel { namespace ops = paddle::operators; -REGISTER_OP_CUDA_KERNEL(conv2d_transpose_cudnn, - ops::CudnnConvTransposeOpKernel, - ops::CudnnConvTransposeOpKernel); -REGISTER_OP_CUDA_KERNEL(conv2d_transpose_cudnn_grad, - ops::CudnnConvTransposeGradOpKernel, - ops::CudnnConvTransposeGradOpKernel); - -REGISTER_OP_CUDA_KERNEL(conv3d_transpose_cudnn, - ops::CudnnConvTransposeOpKernel, - ops::CudnnConvTransposeOpKernel); -REGISTER_OP_CUDA_KERNEL(conv3d_transpose_cudnn_grad, - ops::CudnnConvTransposeGradOpKernel, - ops::CudnnConvTransposeGradOpKernel); +REGISTER_OP_KERNEL(conv2d_transpose, CUDNN, ::paddle::platform::CUDAPlace, + ops::CUDNNConvTransposeOpKernel, + ops::CUDNNConvTransposeOpKernel); +REGISTER_OP_KERNEL(conv2d_transpose_grad, CUDNN, ::paddle::platform::CUDAPlace, + ops::CUDNNConvTransposeGradOpKernel, + ops::CUDNNConvTransposeGradOpKernel); + +REGISTER_OP_KERNEL(conv3d_transpose, CUDNN, ::paddle::platform::CUDAPlace, + ops::CUDNNConvTransposeOpKernel, + ops::CUDNNConvTransposeOpKernel); +REGISTER_OP_KERNEL(conv3d_transpose_grad, CUDNN, ::paddle::platform::CUDAPlace, + ops::CUDNNConvTransposeGradOpKernel, + ops::CUDNNConvTransposeGradOpKernel); diff --git a/paddle/operators/conv_transpose_op.cc b/paddle/operators/conv_transpose_op.cc index 74636d138f1e40474a1cc5453609dafe14fcaaab..cf4e8c0a303d6888c3b1f2475a483c4bfc90981b 100644 --- a/paddle/operators/conv_transpose_op.cc +++ b/paddle/operators/conv_transpose_op.cc @@ -58,6 +58,23 @@ void ConvTransposeOp::InferShape(framework::InferShapeContext* ctx) const { ctx->SetOutputDim("Output", framework::make_ddim(output_shape)); } +framework::OpKernelType ConvTransposeOp::GetExpectedKernelType( + const framework::ExecutionContext& ctx) const { + bool use_cudnn = ctx.Attr("use_cudnn"); + framework::LibraryType library_; + if (use_cudnn) { + library_ = framework::LibraryType::kCUDNN; + } else { + library_ = framework::LibraryType::kPlain; + } + + std::string data_format = ctx.Attr("data_format"); + framework::DataLayout layout_ = framework::StringToDataLayout(data_format); + return framework::OpKernelType( + framework::ToDataType(ctx.Input("Input")->type()), ctx.GetPlace(), + layout_, library_); +} + Conv2DTransposeOpMaker::Conv2DTransposeOpMaker(OpProto* proto, OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { @@ -94,6 +111,25 @@ Conv2DTransposeOpMaker::Conv2DTransposeOpMaker(OpProto* proto, "(vector default:{0, 0}), the paddings(h_pad, w_pad) of convolution " "transpose operator.") .SetDefault({0, 0}); + AddAttr( + "use_cudnn", + "(bool, default false) Only used in cudnn kernel, need install cudnn") + .SetDefault(false); + AddAttr( + "data_format", + "(string, default NCHW) Only used in " + "An optional string from: \"NHWC\", \"NCHW\". " + "Defaults to \"NHWC\". Specify the data format of the output data, " + "the input will be transformed automatically. ") + .SetDefault("AnyLayout"); + // TODO(dzhwinter): need to registered layout transform function + AddAttr("workspace_size_MB", + "Used in cudnn kernel only. workspace size for cudnn, in MB, " + "workspace is a section of GPU memory which will be " + "allocated/freed each time the operator runs, larger " + "workspace size can increase performance but also requires " + "better hardward. This size should be carefully setted.") + .SetDefault(4096); AddComment(R"DOC( Convolution2D Transpose Operator. @@ -163,6 +199,25 @@ Conv3DTransposeOpMaker::Conv3DTransposeOpMaker(OpProto* proto, "(vector default:{0, 0, 0}), paddings(d_pad, " "h_pad, w_pad) of convolution transpose operator.") .SetDefault({0, 0, 0}); + AddAttr( + "use_cudnn", + "(bool, default false) Only used in cudnn kernel, need install cudnn") + .SetDefault(false); + AddAttr( + "data_format", + "(string, default NCHW) Only used in " + "An optional string from: \"NHWC\", \"NCHW\". " + "Defaults to \"NHWC\". Specify the data format of the output data, " + "the input will be transformed automatically. ") + .SetDefault("AnyLayout"); + // TODO(dzhwinter): need to registered layout transform function + AddAttr("workspace_size_MB", + "Used in cudnn kernel only. workspace size for cudnn, in MB, " + "workspace is a section of GPU memory which will be " + "allocated/freed each time the operator runs, larger " + "workspace size can increase performance but also requires " + "better hardward. This size should be carefully setted.") + .SetDefault(4096); AddComment(R"DOC( Convolution3D Transpose Operator. @@ -205,6 +260,23 @@ void ConvTransposeOpGrad::InferShape(framework::InferShapeContext* ctx) const { } } +framework::OpKernelType ConvTransposeOpGrad::GetExpectedKernelType( + const framework::ExecutionContext& ctx) const { + bool use_cudnn = ctx.Attr("use_cudnn"); + framework::LibraryType library_; + if (use_cudnn) { + library_ = framework::LibraryType::kCUDNN; + } else { + library_ = framework::LibraryType::kPlain; + } + + std::string data_format = ctx.Attr("data_format"); + framework::DataLayout layout_ = framework::StringToDataLayout(data_format); + return framework::OpKernelType( + framework::ToDataType(ctx.Input("Input")->type()), ctx.GetPlace(), + layout_, library_); +} + } // namespace operators } // namespace paddle diff --git a/paddle/operators/conv_transpose_op.h b/paddle/operators/conv_transpose_op.h index 4c8f8a80672788e8b2919e500d3627adec1ad035..a42ade41b165d1bfa00d2db0e45d40cf5d7b00bc 100644 --- a/paddle/operators/conv_transpose_op.h +++ b/paddle/operators/conv_transpose_op.h @@ -42,12 +42,20 @@ class ConvTransposeOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override; + + protected: + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext& ctx) const override; }; class ConvTransposeOpGrad : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override; + + protected: + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext& ctx) const override; }; template diff --git a/paddle/operators/elementwise_add_op.cc b/paddle/operators/elementwise_add_op.cc index 70b7c9f2ec11bf8ad56a24324a53792955edc77d..37951fa7587c8a200f4733e5a46575461f5026cd 100644 --- a/paddle/operators/elementwise_add_op.cc +++ b/paddle/operators/elementwise_add_op.cc @@ -21,7 +21,7 @@ class ElementwiseAddOpMaker : public ElementwiseOpMaker { public: ElementwiseAddOpMaker(OpProto* proto, OpAttrChecker* op_checker) : ElementwiseOpMaker(proto, op_checker) { - SetComment("Add", "$Out = X + Y$"); + SetComment("Add", "Out = X + Y"); AddComment(comment_); } }; diff --git a/paddle/operators/elementwise_div_op.cc b/paddle/operators/elementwise_div_op.cc index 1fa960866fa2066a351ef2e65a3c77cf8b6595f7..6ebd58b1b3dd79a465e70a24f7aab56261290bf6 100644 --- a/paddle/operators/elementwise_div_op.cc +++ b/paddle/operators/elementwise_div_op.cc @@ -21,7 +21,7 @@ class ElementwiseDivOpMaker : public ElementwiseOpMaker { public: ElementwiseDivOpMaker(OpProto* proto, OpAttrChecker* op_checker) : ElementwiseOpMaker(proto, op_checker) { - SetComment("Div", "$Out = X / Y$"); + SetComment("Div", "Out = X / Y"); AddComment(comment_); } }; diff --git a/paddle/operators/elementwise_mul_op.cc b/paddle/operators/elementwise_mul_op.cc index a6d11736194cb79bdc247c721acf8bda9c81dbe5..450dd05c796e22794315274b73398e85c8145940 100644 --- a/paddle/operators/elementwise_mul_op.cc +++ b/paddle/operators/elementwise_mul_op.cc @@ -22,7 +22,7 @@ class ElementwiseMulOpMaker : public ElementwiseOpMaker { public: ElementwiseMulOpMaker(OpProto* proto, OpAttrChecker* op_checker) : ElementwiseOpMaker(proto, op_checker) { - SetComment("Mul", "$Out = X \\odot\\ Y$"); + SetComment("Mul", "Out = X \\odot\\ Y"); AddComment(comment_); } }; diff --git a/paddle/operators/elementwise_op.h b/paddle/operators/elementwise_op.h index f308ee05e11210540e41cda4b9a896f9f96c4730..a342595b546bfca1a344cf8a549597df6a29adec 100644 --- a/paddle/operators/elementwise_op.h +++ b/paddle/operators/elementwise_op.h @@ -58,7 +58,8 @@ Limited Elementwise {name} Operator. The equation is: -{equation} +.. math:: + {equation} X is a tensor of any dimension and the dimensions of tensor Y must be smaller than or equal to the dimensions of X. @@ -71,15 +72,16 @@ For case 2: Y will be broadcasted to match the shape of X and axis should be the starting dimension index for broadcasting Y onto X. -example: - shape(X) = (2, 3, 4, 5), shape(Y) = (,) - shape(X) = (2, 3, 4, 5), shape(Y) = (5,) - shape(X) = (2, 3, 4, 5), shape(Y) = (4, 5) - shape(X) = (2, 3, 4, 5), shape(Y) = (3, 4), with axis=1 - shape(X) = (2, 3, 4, 5), shape(Y) = (2), with axis=0 +For example + .. code-block:: python -Both the input X and Y can carry the LoD (Level of Details) information, -or not. But the output only shares the LoD information with input X. + shape(X) = (2, 3, 4, 5), shape(Y) = (,) + shape(X) = (2, 3, 4, 5), shape(Y) = (5,) + shape(X) = (2, 3, 4, 5), shape(Y) = (4, 5) + shape(X) = (2, 3, 4, 5), shape(Y) = (3, 4), with axis=1 + shape(X) = (2, 3, 4, 5), shape(Y) = (2), with axis=0 + +Either of the inputs X and Y or none can carry the LoD (Level of Details) information. However, the output only shares the LoD information with input X. )DOC"; AddComment(comment_); diff --git a/paddle/operators/elementwise_sub_op.cc b/paddle/operators/elementwise_sub_op.cc index 2a8d0845b1800277a7d3cd6ff6c5c984e92197ee..d3c51f0a697b7cb07a46871cdba2e84e902fd0f2 100644 --- a/paddle/operators/elementwise_sub_op.cc +++ b/paddle/operators/elementwise_sub_op.cc @@ -21,7 +21,7 @@ class ElementwiseSubOpMaker : public ElementwiseOpMaker { public: ElementwiseSubOpMaker(OpProto* proto, OpAttrChecker* op_checker) : ElementwiseOpMaker(proto, op_checker) { - SetComment("Sub", "$Out = X - Y$"); + SetComment("Sub", "Out = X - Y"); AddComment(comment_); } }; diff --git a/paddle/operators/math/sequence2batch.cc b/paddle/operators/math/sequence2batch.cc index 88977be1f8c030741c3a3a8f07a4feeb1d8bb4d9..e459a42ca251a9fc79f745f48a118ce898a0f77e 100644 --- a/paddle/operators/math/sequence2batch.cc +++ b/paddle/operators/math/sequence2batch.cc @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/operators/math/sequence2batch.h" +#include "paddle/operators/math/math_function.h" namespace paddle { namespace operators { diff --git a/paddle/operators/pool_cudnn_op.cc b/paddle/operators/pool_cudnn_op.cc deleted file mode 100644 index 77407f5cdf7e4ef7b76c38ef8992517b4bd1c5fe..0000000000000000000000000000000000000000 --- a/paddle/operators/pool_cudnn_op.cc +++ /dev/null @@ -1,39 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#include "paddle/operators/pool_cudnn_op.h" - -namespace ops = paddle::operators; - -REGISTER_OP(pool2d_cudnn, ops::PoolOp, ops::Pool2dOpMaker, pool2d_cudnn_grad, - ops::PoolOpGrad); - -REGISTER_OP_CPU_KERNEL( - pool2d_cudnn, ops::PoolKernel, - ops::PoolKernel); -REGISTER_OP_CPU_KERNEL( - pool2d_cudnn_grad, - ops::PoolGradKernel, - ops::PoolGradKernel) - -REGISTER_OP(pool3d_cudnn, ops::PoolOp, ops::Pool3dOpMaker, pool3d_cudnn_grad, - ops::PoolOpGrad); - -REGISTER_OP_CPU_KERNEL( - pool3d_cudnn, ops::PoolKernel, - ops::PoolKernel); -REGISTER_OP_CPU_KERNEL( - pool3d_cudnn_grad, - ops::PoolGradKernel, - ops::PoolGradKernel) diff --git a/paddle/operators/pool_cudnn_op.cu.cc b/paddle/operators/pool_cudnn_op.cu.cc index 2d0001ba1184c99d9fc642f60c97ba89cec97ccd..446fb0819d98e0eb3d81bb202a67c55a23fc06b6 100644 --- a/paddle/operators/pool_cudnn_op.cu.cc +++ b/paddle/operators/pool_cudnn_op.cu.cc @@ -12,7 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/pool_cudnn_op.h" +#include "paddle/framework/op_registry.h" +#include "paddle/operators/pool_op.h" #include "paddle/platform/cudnn_helper.h" namespace paddle { @@ -25,7 +26,7 @@ using DataLayout = platform::DataLayout; using PoolingMode = platform::PoolingMode; template -class PoolCudnnOpKernel : public framework::OpKernel { +class PoolCUDNNOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext &ctx) const override { PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()), @@ -86,7 +87,7 @@ class PoolCudnnOpKernel : public framework::OpKernel { }; template -class PoolCudnnGradOpKernel : public framework::OpKernel { +class PoolCUDNNGradOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext &ctx) const override { PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()), @@ -162,12 +163,16 @@ class PoolCudnnGradOpKernel : public framework::OpKernel { namespace ops = paddle::operators; -REGISTER_OP_CUDA_KERNEL(pool2d_cudnn, ops::PoolCudnnOpKernel, - ops::PoolCudnnOpKernel); -REGISTER_OP_CUDA_KERNEL(pool2d_cudnn_grad, ops::PoolCudnnGradOpKernel, - ops::PoolCudnnGradOpKernel); - -REGISTER_OP_CUDA_KERNEL(pool3d_cudnn, ops::PoolCudnnOpKernel, - ops::PoolCudnnOpKernel); -REGISTER_OP_CUDA_KERNEL(pool3d_cudnn_grad, ops::PoolCudnnGradOpKernel, - ops::PoolCudnnGradOpKernel); +REGISTER_OP_KERNEL(pool2d, CUDNN, ::paddle::platform::CUDAPlace, + ops::PoolCUDNNOpKernel, + ops::PoolCUDNNOpKernel); +REGISTER_OP_KERNEL(pool2d_grad, CUDNN, ::paddle::platform::CUDAPlace, + ops::PoolCUDNNGradOpKernel, + ops::PoolCUDNNGradOpKernel); + +REGISTER_OP_KERNEL(pool3d, CUDNN, ::paddle::platform::CUDAPlace, + ops::PoolCUDNNOpKernel, + ops::PoolCUDNNOpKernel); +REGISTER_OP_KERNEL(pool3d_grad, CUDNN, ::paddle::platform::CUDAPlace, + ops::PoolCUDNNGradOpKernel, + ops::PoolCUDNNGradOpKernel); diff --git a/paddle/operators/pool_op.cc b/paddle/operators/pool_op.cc index d3cf5fa638c53dfdfacec153211f447a1e2fa3bf..3e567efd082ed913ce3c19f87c93a2868ebe8864 100644 --- a/paddle/operators/pool_op.cc +++ b/paddle/operators/pool_op.cc @@ -61,6 +61,23 @@ void PoolOp::InferShape(framework::InferShapeContext *ctx) const { ctx->ShareLoD("X", "Out"); } +framework::OpKernelType PoolOp::GetExpectedKernelType( + const framework::ExecutionContext &ctx) const { + bool use_cudnn = ctx.Attr("use_cudnn"); + framework::LibraryType library_; + if (use_cudnn) { + library_ = framework::LibraryType::kCUDNN; + } else { + library_ = framework::LibraryType::kPlain; + } + + std::string data_format = ctx.Attr("data_format"); + framework::DataLayout layout_ = framework::StringToDataLayout(data_format); + return framework::OpKernelType( + framework::ToDataType(ctx.Input("X")->type()), ctx.GetPlace(), + layout_, library_); +} + void PoolOpGrad::InferShape(framework::InferShapeContext *ctx) const { PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) must not be null."); PADDLE_ENFORCE(ctx->HasOutput(framework::GradVarName("X")), @@ -68,6 +85,23 @@ void PoolOpGrad::InferShape(framework::InferShapeContext *ctx) const { ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X")); } +framework::OpKernelType PoolOpGrad::GetExpectedKernelType( + const framework::ExecutionContext &ctx) const { + bool use_cudnn = ctx.Attr("use_cudnn"); + framework::LibraryType library_; + if (use_cudnn) { + library_ = framework::LibraryType::kCUDNN; + } else { + library_ = framework::LibraryType::kPlain; + } + + std::string data_format = ctx.Attr("data_format"); + framework::DataLayout layout_ = framework::StringToDataLayout(data_format); + return framework::OpKernelType( + framework::ToDataType(ctx.Input("X")->type()), ctx.GetPlace(), + layout_, library_); +} + Pool2dOpMaker::Pool2dOpMaker(OpProto *proto, OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput( @@ -101,15 +135,27 @@ Pool2dOpMaker::Pool2dOpMaker(OpProto *proto, OpAttrChecker *op_checker) AddAttr>("strides", "(vector, default {1, 1}), strides(height, " "width) of pooling operator.") - .SetDefault({1, 1}); // TODO(Chengduo): Add checker. (Currently, + .SetDefault({1, 1}); + // TODO(Chengduo): Add checker. (Currently, // TypedAttrChecker don't support vector type.) AddAttr>( "paddings", "(vector, default {0,0}), paddings(height, width) of pooling " "operator." "If global_pooling = true, paddings and ksize will be ignored.") - .SetDefault({0, 0}); // TODO(Chengduo): Add checker. (Currently, - // TypedAttrChecker don't support vector type.) + .SetDefault({0, 0}); + AddAttr( + "use_cudnn", + "(bool, default false) Only used in cudnn kernel, need install cudnn") + .SetDefault(false); + AddAttr( + "data_format", + "(string, default NCHW) Only used in " + "An optional string from: \"NHWC\", \"NCHW\". " + "Defaults to \"NHWC\". Specify the data format of the output data, " + "the input will be transformed automatically. ") + .SetDefault("AnyLayout"); + // TODO(dzhwinter): need to registered layout transform function AddComment(R"DOC( Pool2d Operator. @@ -182,6 +228,19 @@ Pool3dOpMaker::Pool3dOpMaker(OpProto *proto, OpAttrChecker *op_checker) .SetDefault({0, 0, 0}); // TODO(Chengduo): Add checker. (Currently, // TypedAttrChecker don't support vector type.) + AddAttr( + "use_cudnn", + "(bool, default false) Only used in cudnn kernel, need install cudnn") + .SetDefault(false); + AddAttr( + "data_format", + "(string, default NCHW) Only used in " + "An optional string from: \"NHWC\", \"NCHW\". " + "Defaults to \"NHWC\". Specify the data format of the output data, " + "the input will be transformed automatically. ") + .SetDefault("AnyLayout"); + // TODO(dzhwinter): need to registered layout transform function + AddComment(R"DOC( Pool3d Operator. diff --git a/paddle/operators/pool_op.h b/paddle/operators/pool_op.h index 3860e295f4b4dbeb2d60cfb304847de39083f1e1..c3d82ecbdeb412f0234fcddc27361d79b58c7122 100644 --- a/paddle/operators/pool_op.h +++ b/paddle/operators/pool_op.h @@ -29,6 +29,10 @@ class PoolOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override; + + protected: + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext& ctx) const override; }; class PoolOpGrad : public framework::OperatorWithKernel { @@ -36,6 +40,10 @@ class PoolOpGrad : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override; + + protected: + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext& ctx) const override; }; class Pool2dOpMaker : public framework::OpProtoAndCheckerMaker { diff --git a/paddle/operators/print_op.cc b/paddle/operators/print_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..8b233d64c904a8870212af33c5839cfc555b5dc8 --- /dev/null +++ b/paddle/operators/print_op.cc @@ -0,0 +1,283 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#include +#include + +#include "paddle/framework/op_registry.h" +#include "paddle/framework/variable.h" + +namespace paddle { +namespace operators { + +#define CLOG std::cout + +const std::string kForward = "FORWARD"; +const std::string kBackward = "BACKWARD"; +const std::string kBoth = "BOTH"; + +struct Formater { + std::string message; + std::string name; + std::vector dims; + std::type_index dtype{typeid(char)}; + framework::LoD lod; + int summarize; + void* data{nullptr}; + + void operator()(size_t size) { + PrintMessage(); + PrintName(); + PrintDims(); + PrintDtype(); + PrintLod(); + PrintData(size); + } + + private: + void PrintMessage() { CLOG << std::time(nullptr) << "\t" << message; } + void PrintName() { + if (!name.empty()) { + CLOG << "Tensor[" << name << "]" << std::endl; + } + } + void PrintDims() { + if (!dims.empty()) { + CLOG << "\tshape: ["; + for (auto i : dims) { + CLOG << i << ","; + } + CLOG << "]" << std::endl; + } + } + void PrintDtype() { + if (dtype.hash_code() != typeid(char).hash_code()) { + CLOG << "\tdtype: " << dtype.name() << std::endl; + } + } + void PrintLod() { + if (!lod.empty()) { + CLOG << "\tLoD: ["; + for (auto level : lod) { + CLOG << "[ "; + for (auto i : level) { + CLOG << i << ","; + } + CLOG << " ]"; + } + CLOG << "]" << std::endl; + } + } + + void PrintData(size_t size) { + PADDLE_ENFORCE_NOT_NULL(data); + // print float + if (dtype.hash_code() == typeid(float).hash_code()) { + Display(size); + } + if (dtype.hash_code() == typeid(double).hash_code()) { + Display(size); + } + if (dtype.hash_code() == typeid(int).hash_code()) { + Display(size); + } + if (dtype.hash_code() == typeid(int64_t).hash_code()) { + Display(size); + } + } + + template + void Display(size_t size) { + auto* d = (T*)data; + CLOG << "\tdata: "; + if (summarize != -1) { + summarize = std::min(size, (size_t)summarize); + for (int i = 0; i < summarize; i++) { + CLOG << d[i] << ","; + } + } else { + for (size_t i = 0; i < size; i++) { + CLOG << d[i] << ","; + } + } + CLOG << std::endl; + } +}; + +// TODO(ChunweiYan) there should be some other printers for TensorArray +class TensorPrintOp : public framework::OperatorBase { + public: + TensorPrintOp(const std::string& type, + const framework::VariableNameMap& inputs, + const framework::VariableNameMap& outputs, + const framework::AttributeMap& attrs) + : OperatorBase(type, inputs, outputs, attrs) {} + + TensorPrintOp(const TensorPrintOp& o) + : framework::OperatorBase( + static_cast(o)) { + PADDLE_THROW("Not implemented."); + } + + void Run(const framework::Scope& scope, + const platform::Place& place) const override { + const framework::Variable* in_var_ptr = nullptr; + std::string phase = kForward; + std::string printed_var_name = ""; + + auto& inputs = Inputs(); + if (inputs.find("In") != inputs.end() && !Inputs("In").empty()) { + in_var_ptr = scope.FindVar(Input("In")); + printed_var_name = Inputs("In").front(); + } else if (inputs.find("In@GRAD") != inputs.end() && + !Inputs("In@GRAD").empty()) { + in_var_ptr = scope.FindVar(Input("In@GRAD")); + printed_var_name = Inputs("In@GRAD").front(); + phase = kBackward; + } else { + PADDLE_THROW("Unknown phase, should be forward or backward."); + } + + PADDLE_ENFORCE_NOT_NULL(in_var_ptr); + + auto& in_tensor = in_var_ptr->Get(); + auto* out_var_ptr = scope.FindVar(Output("Out")); + auto& out_tensor = *out_var_ptr->GetMutable(); + + // Just copy data from input tensor to output tensor + // output tensor share same memory with input tensor + out_tensor.ShareDataWith(in_tensor); + out_tensor.set_lod(in_tensor.lod()); + + std::string print_phase = Attr("print_phase"); + if (print_phase != phase && print_phase != kBoth) { + return; + } + + int first_n = Attr("first_n"); + if (first_n > 0 && ++times_ > first_n) return; + + framework::LoDTensor printed_tensor; + printed_tensor.set_lod(in_tensor.lod()); + printed_tensor.Resize(in_tensor.dims()); + + if (platform::is_cpu_place(in_tensor.place())) { + printed_tensor.ShareDataWith(in_tensor); + } else { + // copy data to cpu to print + platform::CPUPlace place; + framework::Copy(in_tensor, place, &printed_tensor); + } + + Formater formater; + if (Attr("print_tensor_name")) { + formater.name = printed_var_name; + } + if (Attr("print_tensor_type")) { + formater.dtype = printed_tensor.type(); + } + if (Attr("print_tensor_shape")) { + auto& dims = printed_tensor.dims(); + formater.dims.resize(dims.size()); + for (int i = 0; i < dims.size(); ++i) formater.dims[i] = dims[i]; + } + if (Attr("print_tensor_lod")) { + formater.lod = printed_tensor.lod(); + } + formater.summarize = Attr("summarize"); + formater.data = (void*)printed_tensor.data(); + formater(printed_tensor.numel()); + } + + private: + mutable int times_{0}; +}; + +class PrintOpProtoAndCheckMaker : public framework::OpProtoAndCheckerMaker { + public: + PrintOpProtoAndCheckMaker(OpProto* proto, OpAttrChecker* op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("In", "Input tensor to be displayed."); + AddAttr("first_n", "Only log `first_n` number of times."); + AddAttr("message", "A string message to print as a prefix."); + AddAttr("summarize", "Number of elements printed."); + AddAttr("print_tensor_name", "Whether to print the tensor name."); + AddAttr("print_tensor_type", "Whether to print the tensor's dtype."); + AddAttr("print_tensor_shape", "Whether to print the tensor's shape."); + AddAttr("print_tensor_lod", "Whether to print the tensor's lod."); + AddAttr( + "print_phase", + "(string, default 'BOTH') Which phase to display including 'FORWARD' " + "'BACKWARD' and 'BOTH'.") + .SetDefault(kBoth) + .InEnum({kForward, kBackward, kBoth}); + AddOutput("Out", "Output tensor with same data as input tensor."); + AddComment(R"DOC( +Creates a print op that will print when a tensor is accessed. + +Wraps the tensor passed in so that whenever that a tensor is accessed, +the message `message` is printed, along with the current value of the +tensor `t`.)DOC"); + } +}; + +class InferShapeForward : public framework::InferShapeBase { + public: + void operator()(framework::InferShapeContext* context) const override { + PADDLE_ENFORCE(context->HasInput("In"), "Input(In) should not be null."); + context->ShareLoD("In", /*->*/ "Out"); + context->SetOutputDim("Out", context->GetInputDim("In")); + } +}; + +class InferShapeBackward : public framework::InferShapeBase { + public: + void operator()(framework::InferShapeContext* context) const override { + PADDLE_ENFORCE(context->HasInput("In@GRAD"), + "Input(In@GRAD) should not be null."); + context->ShareLoD("In@GRAD", /*->*/ "Out"); + context->SetOutputDim("Out", context->GetInputDim("In@GRAD")); + } +}; + +class InferVarType : public framework::VarTypeInference { + public: + void operator()(const framework::OpDesc& op_desc, + framework::BlockDesc* block) const override {} +}; + +class PrintOpProtoAndCheckGradOpMaker + : public framework::SingleGradOpDescMaker { + public: + using framework::SingleGradOpDescMaker::SingleGradOpDescMaker; + + std::unique_ptr Apply() const override { + auto* op_desc_ptr = new framework::OpDesc(); + op_desc_ptr->SetType("print_grad"); + op_desc_ptr->SetInput("In@GRAD", OutputGrad("Out")); + op_desc_ptr->SetOutput("Out", InputGrad("In")); + op_desc_ptr->SetAttrMap(Attrs()); + return std::unique_ptr(op_desc_ptr); + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; + +REGISTER_OPERATOR(print, ops::TensorPrintOp, ops::PrintOpProtoAndCheckMaker, + ops::PrintOpProtoAndCheckGradOpMaker, ops::InferShapeForward, + ops::InferVarType); +REGISTER_OPERATOR(print_grad, ops::TensorPrintOp, ops::InferShapeBackward); diff --git a/paddle/operators/reorder_lod_tensor_by_rank_op.cc b/paddle/operators/reorder_lod_tensor_by_rank_op.cc index a055cdf7e8952995e57c28b3520c427caa75a4c1..3c30447949421da516213b47178828453671c693 100644 --- a/paddle/operators/reorder_lod_tensor_by_rank_op.cc +++ b/paddle/operators/reorder_lod_tensor_by_rank_op.cc @@ -26,22 +26,44 @@ class ReorderLoDTensorByRankTableOpProtoMaker ReorderLoDTensorByRankTableOpProtoMaker(OpProto *proto, OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("X", "(LoDTensor) the input lod tensor need to be reordered."); + AddInput("X", + "(LoDTensor), the input lod tensor to be reordered according to " + "Input(RankTable)."); AddInput("RankTable", - "(LoDRankTable) the rank table that input need follow"); - AddOutput("Out", "(LoDTensor) reordered lod tensor"); - AddComment(R"DOC(ReorderLoDTensorByRankTable + "(LoDRankTable), the rank table according to which Input(X) is " + "reordered."); + AddOutput("Out", "(LoDTensor), the reordered lod tensor."); + AddComment(R"DOC(ReorderLoDTensorByRankTable operator. -Reorder the input X by the rank of `RankTable`. If `RankTable` is ordered by -index [3, 0, 2, 1]. Input X will reorder its sequence, the third sequence of -X will be the first sequence of Output. - -NOTE: The RankTable does not need to be calculated by X. +Input(X) is a batch of sequences. Input(RankTable) stores new orders of the +input sequence batch. The reorder_lod_tensor_by_rank operator reorders the +Input(X) according to the information provided by Input(RankTable). For example: -The X = [Seq0, Seq1, Seq2, Seq3]. The indices of RankTable are [3, 0, 2, 1]. -The Out = [Seq3, Seq0, Seq2, Seq1] with correct LoD information. +If the indices stored in the Input(RankTable) are [3, 0, 2, 1], the +Input(X) will be reordered that the fourth sequence in Input(X) will become the +first one, and then followed by the original first, third, and the second one. + +This is: +X = [Seq0, Seq1, Seq2, Seq3]. The indices in RankTable are [3, 0, 2, 1]. +Out = [Seq3, Seq0, Seq2, Seq1] with a new LoD information. + +If the LoD information of Input(X) is empty, this means Input(X) is not sequence +data. This is also identical to a batch of sequences where each sequence has a +fixed length 1. In this case, the reorder_lod_tensor_by_rank operator reorders +each slice of Input(X) along the first axis according to Input(RankTable). + +This is: +X = [Slice0, Slice1, Slice2, Slice3] and its LoD information is empty. The +indices in RankTable are [3, 0, 2, 1]. +Out = [Slice3, Slice0, Slice2, Slice1] with no LoD information is appended. + +NOTE: This operator sorts Input(X) according to a given LoDRankTable which does +not need to be calculated according to Input(X). It can be calculated according +to another different sequence, and then this operator sorts Input(X) according +to the given LoDRankTable. + )DOC"); } }; diff --git a/paddle/operators/shrink_rnn_memory_op.cc b/paddle/operators/shrink_rnn_memory_op.cc index 3f5b2a9b84350c7dee5cb461ba6207e20e95c11b..bf870115a4d7b6f4d578df7707826973d4363ba6 100644 --- a/paddle/operators/shrink_rnn_memory_op.cc +++ b/paddle/operators/shrink_rnn_memory_op.cc @@ -45,7 +45,7 @@ class ShrinkRNNMemoryOp : public ArrayOp { rank_items.begin(); auto *out_var = scope.FindVar(Output("Out")); - PADDLE_ENFORCE(out_var != nullptr, "Output Out must be set"); + PADDLE_ENFORCE(out_var != nullptr, "Output(Out) must be set."); auto &out_tensor = *out_var->GetMutable(); size_t height = dst_num_rows; @@ -76,15 +76,17 @@ class ShrinkRNNMemoryOpProtoMaker : public framework::OpProtoAndCheckerMaker { "(LoDTensor) The step index. The RNN step memory 'X' will be " "shrinked to match the size of the input of the index'th step."); AddOutput("Out", "(LoDTensor) The shrinked RNN step memory."); - AddComment( - R"DOC( - In dynamic RNN, we are able to handle sequences of different lengths. - Because of the multiple lengths, the size of each step input can be - different, which may lead to a mismatching between the input of - the current step and the memory generated by the previous one. This - operator shrinks memory according to the size of the next step input, - to make sure that they can match each other. - )DOC"); + AddComment(R"DOC( +This operator is used to shrink output batch of memory defined in dynamic RNN. + +Dynamic RNN is able to handle variable-length sequences, in which, sequences in +a mini-batch are sorted by their lengths first. After that, the longest sequence +becomes the first one in the sorted batch, followed by the second longest, the +third longest, and so on. Dynamic RNN then slices a batch input timestep by +timestep from the sorted input. Once any sequence in the input batch reaches its +end, memory defined in dynamicRNN has to shrink its outputs to adapt to the input +batch size for the next time step. +)DOC"); } }; @@ -136,6 +138,7 @@ class ShrinkRNNMemoryGradOp : public ArrayOp { math::set_constant(dev_ctx, &rest_tensor, 0.0f); } } + dx_tensor.set_lod(x_tensor.lod()); } }; diff --git a/paddle/operators/while_op.cc b/paddle/operators/while_op.cc index 7a3400919efe6f3bed40e45a245b556beab6fce4..2fdd25dbbe68659f8a0a9da13a87148ed259127a 100644 --- a/paddle/operators/while_op.cc +++ b/paddle/operators/while_op.cc @@ -121,8 +121,8 @@ class WhileGradOp : public framework::OperatorBase { for (size_t i = 0; i < outside_og_names.size(); ++i) { auto outside_og_name = outside_og_names[i]; auto inside_og_name = inside_og_names[i]; - VLOG(10) << "Linking outside " << outside_og_name << " --> inside " - << inside_og_name; + VLOG(8) << "Linking outside " << outside_og_name << " --> inside " + << inside_og_name; auto &og_outside = detail::Ref(scope.FindVar(outside_og_name), "Cannot find Outside Gradient %s", outside_og_name); @@ -141,11 +141,11 @@ class WhileGradOp : public framework::OperatorBase { auto &outside_array = og_outside.Get(); auto &inside_array = detail::Ref(og_inside.GetMutable()); - VLOG(10) << outside_og_name << " size = " << outside_array.size(); + VLOG(8) << outside_og_name << " size = " << outside_array.size(); inside_array.resize(outside_array.size()); for (size_t j = 0; j < inside_array.size(); ++j) { - VLOG(10) << j << " " << outside_array[j].numel(); + VLOG(8) << j << " " << outside_array[j].numel(); if (outside_array[j].numel() != 0) { inside_array[j].set_lod(outside_array[j].lod()); inside_array[j].ShareDataWith(outside_array[j]); @@ -187,10 +187,14 @@ class WhileGradOp : public framework::OperatorBase { attrs["shape"] = framework::vectorize2int(inside_tensor.dims()); attrs["value"] = 0.0f; + auto var_name = pg_names[param_id]; auto zero_op = framework::OpRegistry::CreateOp( "fill_constant", framework::VariableNameMap{}, - {{"Out", {pg_names[param_id]}}}, attrs); + {{"Out", {var_name}}}, attrs); zero_op->Run(scope, dev_place); + scope.FindVar(var_name) + ->GetMutable() + ->set_lod(inside_tensor.lod()); } } @@ -231,7 +235,7 @@ class WhileGradOpDescMaker : public framework::SingleGradOpDescMaker { auto igs = InputGrad(kX, /*do not drop empty gradient*/ false); for (auto &each_ig : igs) { if (inner_op_outputs.find(each_ig) == inner_op_outputs.end()) { - VLOG(10) << "Ignore " << each_ig; + VLOG(8) << "Ignore " << each_ig; each_ig = framework::kEmptyVarName; } } diff --git a/paddle/platform/dynload/cudnn.cc b/paddle/platform/dynload/cudnn.cc index 76ec82e10840751a654c7d7f57da8d5570d2a9ce..701f6240fef2e9df25472c0caf6177e7f1964cfd 100644 --- a/paddle/platform/dynload/cudnn.cc +++ b/paddle/platform/dynload/cudnn.cc @@ -44,7 +44,7 @@ CUDNN_DNN_ROUTINE_EACH_R7(DEFINE_WRAP); #ifdef PADDLE_USE_DSO bool HasCUDNN() { - std::call_once(cudnn_dso_flag, GetCudnnDsoHandle, &cudnn_dso_handle); + std::call_once(cudnn_dso_flag, GetCUDNNDsoHandle, &cudnn_dso_handle); return cudnn_dso_handle != nullptr; } diff --git a/paddle/platform/dynload/cudnn.h b/paddle/platform/dynload/cudnn.h index 8c937b37d714a06c623f4e204bd572fdd200ea5d..b92634794947f6979255d6241ff3a333f3771bfb 100644 --- a/paddle/platform/dynload/cudnn.h +++ b/paddle/platform/dynload/cudnn.h @@ -36,7 +36,7 @@ extern void EnforceCUDNNLoaded(const char* fn_name); auto operator()(Args... args) -> decltype(__name(args...)) { \ using cudnn_func = decltype(__name(args...)) (*)(Args...); \ std::call_once(cudnn_dso_flag, \ - paddle::platform::dynload::GetCudnnDsoHandle, \ + paddle::platform::dynload::GetCUDNNDsoHandle, \ &cudnn_dso_handle); \ EnforceCUDNNLoaded(#__name); \ void* p_##__name = dlsym(cudnn_dso_handle, #__name); \ diff --git a/paddle/platform/dynload/dynamic_loader.cc b/paddle/platform/dynload/dynamic_loader.cc index 7a82d06a0acbfa44386d40df97f6b0e43ed46577..c8c09ae608fa7cc67a54fada9cfe86b40096a9fd 100644 --- a/paddle/platform/dynload/dynamic_loader.cc +++ b/paddle/platform/dynload/dynamic_loader.cc @@ -134,7 +134,7 @@ void GetCublasDsoHandle(void** dso_handle) { #endif } -void GetCudnnDsoHandle(void** dso_handle) { +void GetCUDNNDsoHandle(void** dso_handle) { #if defined(__APPLE__) || defined(__OSX__) GetDsoHandleFromSearchPath(FLAGS_cudnn_dir, "libcudnn.dylib", dso_handle, false); diff --git a/paddle/platform/dynload/dynamic_loader.h b/paddle/platform/dynload/dynamic_loader.h index c0e5452e5ae723ec314ebafde86a6ff63980be00..7b0c8c16d7484480550f3c753fe52d1f04651900 100644 --- a/paddle/platform/dynload/dynamic_loader.h +++ b/paddle/platform/dynload/dynamic_loader.h @@ -32,7 +32,7 @@ void GetCublasDsoHandle(void** dso_handle); * @param **dso_handle dso handler * */ -void GetCudnnDsoHandle(void** dso_handle); +void GetCUDNNDsoHandle(void** dso_handle); /** * @brief load the DSO of CURAND diff --git a/paddle/pybind/pybind.cc b/paddle/pybind/pybind.cc index 5d170c66e97f56440968ba568167e6845631e1cc..c5d70bc9f91bc92b28a546cc79b08a9fda150050 100644 --- a/paddle/pybind/pybind.cc +++ b/paddle/pybind/pybind.cc @@ -430,13 +430,8 @@ All parameter, weight, gradient are variables in Paddle. m.def("init_glog", framework::InitGLOG); m.def("init_devices", &framework::InitDevices); - m.def("use_cpu", framework::UseCPU); - m.def("use_mkldnn", framework::UseMKLDNN); - m.def("use_cuda", framework::UseCUDA); - m.def("use_cudnn", framework::UseCUDNN); - m.def("use_all", framework::UseALL); - m.def("is_compile_gpu", IsCompileGPU); + m.def("set_feed_variable", framework::SetFeedVariable); m.def("get_fetch_variable", framework::GetFetchVariable); diff --git a/paddle/pybind/tensor_py.h b/paddle/pybind/tensor_py.h index 6b4290972bade585d1a0c2ae919a2e712bdf308c..3b5210e2b91dd697e213a0c847f73c6969cd654b 100644 --- a/paddle/pybind/tensor_py.h +++ b/paddle/pybind/tensor_py.h @@ -14,7 +14,7 @@ limitations under the License. */ #pragma once #include -#include "paddle/framework/tensor.h" +#include "paddle/framework/lod_tensor.h" #include "paddle/memory/memcpy.h" #include "paddle/platform/device_context.h" #include "pybind11/numpy.h" @@ -97,14 +97,27 @@ inline py::buffer_info CastToPyBuffer(framework::Tensor &tensor) { template T TensorGetElement(framework::Tensor &self, size_t offset) { - PADDLE_ENFORCE(platform::is_cpu_place(self.place())); - return self.data()[offset]; + if (platform::is_cpu_place(self.place())) { + return self.data()[offset]; + } else { + std::shared_ptr dst(new framework::Tensor); + framework::Copy(self, platform::CPUPlace(), dst.get()); + return dst->data()[offset]; + } } +// TODO(dzhwinter) : fix the redundent Tensor allocate and free template void TensorSetElement(framework::Tensor &self, size_t offset, T elem) { - PADDLE_ENFORCE(platform::is_cpu_place(self.place())); - self.data()[offset] = elem; + if (platform::is_gpu_place(self.place())) { + std::shared_ptr dst(new framework::Tensor); + framework::Copy(self, platform::CPUPlace(), dst.get()); + dst->data()[offset] = elem; + framework::Copy(*dst.get(), self.place(), &self); + + } else if (platform::is_cpu_place(self.place())) { + self.data()[offset] = elem; + } } template diff --git a/paddle/scripts/submit_local.sh.in b/paddle/scripts/submit_local.sh.in index bb47ad614ed85923ce5d9704760ec6c5b5ae59ee..80fa0c72af65cbdc21ba955389318a233e02657c 100755 --- a/paddle/scripts/submit_local.sh.in +++ b/paddle/scripts/submit_local.sh.in @@ -49,7 +49,18 @@ function cpu_config() { if [ "@WITH_MKL@" == "OFF" ]; then return 0 fi - ht=`lscpu |grep "per core"|awk -F':' '{print $2}'|xargs` + platform="`uname -s`" + ht=0 + if [ $platform == "Linux" ]; then + ht=`lscpu |grep "per core"|awk -F':' '{print $2}'|xargs` + elif [ $platform == "Darwin" ]; then + if [`sysctl -n hw.physicalcpu` -eq `sysctl -n hw.logicalcpu`]; then + # HT is OFF + ht=1 + fi + else + return 0 + fi if [ $ht -eq 1 ]; then # HT is OFF if [ -z "$KMP_AFFINITY" ]; then export KMP_AFFINITY="granularity=fine,compact,0,0" @@ -72,7 +83,15 @@ function threads_config() { # according to trainer_count and total processors # only when MKL enabled # auto set OPENBLAS_NUM_THREADS when do not use MKL - processors=`grep "processor" /proc/cpuinfo|sort -u|wc -l` + platform="`uname -s`" + processors=0 + if [ $platform == "Linux" ]; then + processors=`grep "processor" /proc/cpuinfo|sort -u|wc -l` + elif [ $platform == "Darwin" ]; then + processors=`sysctl -n hw.logicalcpu` + else + return 0 + fi trainers=`grep -Eo 'trainer_count.[0-9]+' <<< "$@" |grep -Eo '[0-9]+'|xargs` if [ -z $trainers ]; then trainers=1 @@ -148,11 +167,7 @@ else: sys.exit(0) EOF -if [ "`uname -s`" == "Linux" ]; then - # only support on linux yet, with mac can use v2 - cpu_config -fi - +cpu_config # echo $KMP_AFFINITY $OMP_DYNAMIC case "$1" in diff --git a/python/paddle/utils/dump_v2_config.py b/python/paddle/utils/dump_v2_config.py new file mode 100644 index 0000000000000000000000000000000000000000..5dc2111e379fd39b40e1e9bcf2e577b57b101a68 --- /dev/null +++ b/python/paddle/utils/dump_v2_config.py @@ -0,0 +1,62 @@ +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import collections + +from paddle.trainer_config_helpers.layers import LayerOutput +from paddle.v2.layer import parse_network +from paddle.proto import TrainerConfig_pb2 + +__all__ = ["dump_v2_config"] + + +def dump_v2_config(topology, save_path, binary=False): + """ Dump the network topology to a specified file. + + This function is only used to dump network defined by using PaddlePaddle V2 + APIs. This function will NOT dump configurations related to PaddlePaddle + optimizer. + + :param topology: The output layers (can be more than one layers given in a + Python List or Tuple) of the entire network. Using the + specified layers (if more than one layer is given) as root, + traversing back to the data layer(s), all the layers + connected to the specified output layers will be dumped. + Layers not connceted to the specified will not be dumped. + :type topology: LayerOutput|List|Tuple + :param save_path: The path to save the dumped network topology. + :type save_path: str + :param binary: Whether to dump the serialized network topology or not. + The default value is false. NOTE that, if you call this + function to generate network topology for PaddlePaddle C-API, + a serialized version of network topology is required. When + using PaddlePaddle C-API, this flag MUST be set to True. + :type binary: bool + """ + + if isinstance(topology, LayerOutput): + topology = [topology] + elif isinstance(topology, collections.Sequence): + for out_layer in topology: + assert isinstance(out_layer, LayerOutput), ( + "The type of each element in the parameter topology " + "should be LayerOutput.") + else: + raise RuntimeError("Error input type for parameter topology.") + + model_str = parse_network(topology) + with open(save_path, "w") as fout: + if binary: + fout.write(model_str.SerializeToString()) + else: + fout.write(str(model_str)) diff --git a/python/paddle/utils/merge_model.py b/python/paddle/utils/merge_model.py index 421e953d2775f145800cf7179ec644697a265060..2b100207728a8532e900992f7db4d3910e893dea 100644 --- a/python/paddle/utils/merge_model.py +++ b/python/paddle/utils/merge_model.py @@ -30,7 +30,8 @@ def merge_v2_model(net, param_file, output_file): which ends with .tar.gz. @param net The output layer of the network for inference. - @param param_file Path of the parameters (.tar.gz) which is stored by v2 api. + @param param_file Path of the parameters (.tar.gz) which is stored by + v2 api. @param output_file Path of the merged file which will be generated. Usage: diff --git a/python/paddle/v2/fluid/__init__.py b/python/paddle/v2/fluid/__init__.py index ccd5998e3592a1f5dc795ee24875c1aed230587e..5afc663822cac62105f0e6191f927bb2c8c4a705 100644 --- a/python/paddle/v2/fluid/__init__.py +++ b/python/paddle/v2/fluid/__init__.py @@ -18,14 +18,29 @@ from param_attr import ParamAttr from data_feeder import DataFeeder from core import LoDTensor, CPUPlace, CUDAPlace from distribute_transpiler import DistributeTranspiler +from distribute_transpiler_simple import SimpleDistributeTranspiler import clip from memory_optimization_transpiler import memory_optimize Tensor = LoDTensor __all__ = framework.__all__ + executor.__all__ + [ - 'io', 'initializer', 'layers', 'nets', 'optimizer', 'backward', - 'regularizer', 'LoDTensor', 'CPUPlace', 'CUDAPlace', 'Tensor', 'ParamAttr' - 'DataFeeder', 'clip', 'DistributeTranspiler', 'memory_optimize' + 'io', + 'initializer', + 'layers', + 'nets', + 'optimizer', + 'backward', + 'regularizer', + 'LoDTensor', + 'CPUPlace', + 'CUDAPlace', + 'Tensor', + 'ParamAttr' + 'DataFeeder', + 'clip', + 'SimpleDistributeTranspiler', + 'DistributeTranspiler', + 'memory_optimize', ] @@ -58,7 +73,7 @@ def __bootstrap__(): read_env_flags = ['use_pinned_memory', 'check_nan_inf'] if core.is_compile_gpu(): - read_env_flags.append('fraction_of_gpu_memory_to_use') + read_env_flags += ['fraction_of_gpu_memory_to_use', 'op_sync'] core.init_gflags([sys.argv[0]] + ["--tryfromenv=" + ",".join(read_env_flags)]) core.init_glog(sys.argv[0]) diff --git a/python/paddle/v2/fluid/backward.py b/python/paddle/v2/fluid/backward.py index cea2d1e09068da20f4d2fdbfbd9a3e3a511ba267..43f6133a6534efb676dacea2e8b8d25846d91247 100644 --- a/python/paddle/v2/fluid/backward.py +++ b/python/paddle/v2/fluid/backward.py @@ -3,7 +3,10 @@ from . import core import collections import copy -__all__ = ['append_backward', 'calc_gradient'] +__all__ = [ + 'append_backward', + 'calc_gradient', +] def _rename_arg_(op_descs, old_name, new_name, begin_idx=None, end_idx=None): diff --git a/python/paddle/v2/fluid/clip.py b/python/paddle/v2/fluid/clip.py index b1fd1c2b65f10010fa959dbb47b3fbab114db2f2..be0c2735f24c40f7c90b29a29fe47880acfedee1 100644 --- a/python/paddle/v2/fluid/clip.py +++ b/python/paddle/v2/fluid/clip.py @@ -3,7 +3,10 @@ import layers from . import core __all__ = [ - 'GradientClipByValue', 'append_gradient_clip_ops', 'error_clip_callback' + 'GradientClipByValue', + 'ErrorClipByValue', + 'append_gradient_clip_ops', + 'error_clip_callback', ] @@ -23,12 +26,12 @@ class ErrorClipByValue(BaseErrorClipAttr): self.min = min def append_clip_op(self, block, grad_name): - block.append_op( - type="clip", - inputs={"X": grad_name}, - outputs={"Out": grad_name}, - attrs={"min": self.min, - "max": self.max}) + clip_op_desc = block.desc.append_op() + clip_op_desc.set_type("clip") + clip_op_desc.set_input("X", [grad_name]) + clip_op_desc.set_output("Out", [grad_name]) + clip_op_desc.set_attr("min", self.min) + clip_op_desc.set_attr("max", self.max) def error_clip_callback(block, context): @@ -39,6 +42,11 @@ def error_clip_callback(block, context): op_desc.output_arg_names()): fwd_var = block.var_recursive(grad_to_var[grad_n]) error_clip = getattr(fwd_var, "error_clip", None) + if not (error_clip is None or isinstance(error_clip, + BaseErrorClipAttr)): + raise TypeError( + "Variable's error_clip should be an instance of BaseErrorClipAttr or None." + ) if error_clip is not None: error_clip.append_clip_op(block, grad_n) diff --git a/python/paddle/v2/fluid/default_scope_funcs.py b/python/paddle/v2/fluid/default_scope_funcs.py index 60c6165b6bd959f7bb3d92afed667f00f73f144f..9aebc07f8e8aac2d6bfbe7a7817b4bd261859415 100644 --- a/python/paddle/v2/fluid/default_scope_funcs.py +++ b/python/paddle/v2/fluid/default_scope_funcs.py @@ -1,16 +1,16 @@ """ Default scope function. -`Paddle` manages Scope as programming language's scope. It just a -thread-local stack of Scope. Top of that stack is current scope, the bottom -of that stack is all scopes' parent. +`Paddle` manages Scope as programming language's scope. It just a +thread-local stack of Scope. Top of that stack is current scope, the bottom +of that stack is all scopes' parent. -Invoking `var/find_var` can `new/find` variable in current scope. -Invoking `enter_local_scope/leave_local_scope` can create or destroy local -scope. +Invoking `var/find_var` can `new/find` variable in current scope. +Invoking `enter_local_scope/leave_local_scope` can create or destroy local +scope. -A `scoped_function` will take a `function` as input. That function will be -invoked in a new local scope. +A `scoped_function` will take a `function` as input. That function will be +invoked in a new local scope. """ import paddle.v2.fluid.core @@ -19,8 +19,12 @@ import threading __tl_scope__ = threading.local() __all__ = [ - 'get_cur_scope', 'enter_local_scope', 'leave_local_scope', 'var', - 'find_var', 'scoped_function' + 'get_cur_scope', + 'enter_local_scope', + 'leave_local_scope', + 'var', + 'find_var', + 'scoped_function', ] @@ -71,7 +75,7 @@ def find_var(name): def scoped_function(func): """ invoke `func` in new scope. - + :param func: a callable function that will be run in new scope. :type func: callable """ diff --git a/python/paddle/v2/fluid/distribute_transpiler.py b/python/paddle/v2/fluid/distribute_transpiler.py index 49ece7b725e318d7526d58fe54c97cbe20200a7d..d17f9815cca5e3f4142da1357d2e5da6914a76cf 100644 --- a/python/paddle/v2/fluid/distribute_transpiler.py +++ b/python/paddle/v2/fluid/distribute_transpiler.py @@ -1,51 +1,62 @@ +from __future__ import print_function import framework from framework import Program, default_main_program, Parameter, Variable import optimizer from layer_helper import LayerHelper +from distributed_spliter import * +import math -def hash_name_to_server(params_grads, pserver_endpoints): - """ - :param param_grads: - :return: a map of pserver endpoint -> - params -> [param list] - grads -> [grad list] - """ - - def _hash_param(param_name, total): - return hash(param_name) % total - - param_grad_map = dict() - for param, grad in params_grads: - if param.trainable is True and grad is not None: - server_id = _hash_param(param.name, len(pserver_endpoints)) - server_for_param = pserver_endpoints[server_id] - if not param_grad_map.has_key(server_for_param): - param_grad_map[server_for_param] = {"params": [], "grads": []} - param_grad_map[server_for_param]["params"].append(param) - param_grad_map[server_for_param]["grads"].append(grad) - - return param_grad_map +class VarBlock: + def __init__(self, varname, offset, size): + self.varname = varname + # NOTE: real offset is offset * size + self.offset = offset + self.size = size + def __str__(self): + return "%s:%d:%d" % (self.varname, self.offset, self.size) -def round_robin(params_grads, pserver_endpoints): - assert (len(params_grads) > len(pserver_endpoints)) - param_grad_map = dict() - pserver_idx = 0 - for param, grad in params_grads: - if param.trainable is True: - server_for_param = pserver_endpoints[pserver_idx] - if not param_grad_map.has_key(server_for_param): - param_grad_map[server_for_param] = {"params": [], "grads": []} - - param_grad_map[server_for_param]["params"].append(param) - param_grad_map[server_for_param]["grads"].append(grad) +def split_dense_variable(var_list, + pserver_count, + min_block_size=1024, + max_block_size=1048576): + """ + We may need to split dense tensor to one or several blocks and put + them equally onto parameter server. One block is a sub-tensor + aligned by dim[0] of the tensor. + + We need to have a minimal block size so that the calculations in + the parameter server side can gain better performance. By default + mininum block size is 1024. The max block size is used to prevent + too large block that may causing send error. + """ + blocks = [] + for var in var_list: + split_count = pserver_count + var_numel = reduce(lambda x, y: x * y, var.shape) + max_pserver_count = int(math.floor(var_numel / float(min_block_size))) + if max_pserver_count == 0: + max_pserver_count = 1 + if max_pserver_count < pserver_count: + split_count = max_pserver_count + block_size = int(math.ceil(var_numel / float(split_count))) - pserver_idx += 1 - if pserver_idx >= len(pserver_endpoints): - pserver_idx = 0 - return param_grad_map + if len(var.shape) >= 2: + # align by dim1(width) + dim1 = reduce(lambda x, y: x * y, var.shape[1:]) + remains = block_size % dim1 + if remains != 0: + block_size += dim1 - remains + # update split_count after align + split_count = int(math.ceil(var_numel / float(block_size))) + for block_id in xrange(split_count): + curr_block_size = min(block_size, var_numel - ( + (block_id) * block_size)) + block = VarBlock(var.name, block_id, curr_block_size) + blocks.append(str(block)) + return blocks class DistributeTranspiler: @@ -58,7 +69,6 @@ class DistributeTranspiler: split_method=round_robin): """ Transpile the program to a distributed data-parallelism programs. - The main_program will be transform to use a remote parameter server to do parameter optimization. And the optimization graph will be put in to a parameter server program. @@ -66,60 +76,113 @@ class DistributeTranspiler: Use different methods to split trainable varialbles to different parameter servers. - Example to run: - - exe = fluid.Executor(place) - t = fluid.DistributeTranspiler() - t.transpile(optimize_ops, params_grads, pservers="127.0.0.1:6174", trainers=1) - - pserver_endpoint = os.getenv("PSERVER") - if pserver_endpoint: - pserver_prog = t.get_pserver_program(pserver_endpoint, optimize_ops) - exe.run(fluid.default_startup_program()) - exe.run(pserver_prog) - else: - feeder = fluid.DataFeeder(feed_list=[images, label], place=place) - exe.run(fluid.default_startup_program()) - - for pass_id in range(PASS_NUM): - ... - :param optimize_ops: op list of optimization, should be the return value of Optimizer.minimize :type optimize_ops: list :param program: program to optimize, default default_main_program :param pservers: parameter server endpoints like "m1:6174,m2:6174" :type pservers: string - :return: return a list of programs """ + assert (callable(split_method)) if program is None: program = default_main_program() self.program = program self.trainers = trainers self.optimize_ops = optimize_ops - self._optimize_distributed( - optimize_ops, - program, - params_grads, - pservers=pservers, - trainers=trainers, - split_method=split_method) - - def _clone_param(self, block, v): - assert isinstance(v, Parameter) - new_p = Parameter( - block=block, - shape=v.shape, - dtype=v.dtype, - type=v.type, - lod_level=v.lod_level, - stop_gradient=v.stop_gradient, - trainable=v.trainable, - optimize_attr=v.optimize_attr, - regularizer=v.regularizer, - name=v.name) - block.vars[new_p.name] = new_p + # steps to transpile: + # 1. split variable to multiple blocks, align by product(dim[1:]) (width). + # 2. modify trainer program add split_op to each Grad. + # 3. append send_op to trainer. + # 4. append concat_op to trainer to update local weights. + # 5. create new program as parameter server. + # 6. create parameter server program by split_method generated endpoint->VarBlock + + pserver_endpoints = pservers.split(",") + + # step1 + param_list = [pg[0] for pg in params_grads] + grad_list = [pg[1] for pg in params_grads] + # TODO: add split selected rows support + grad_blocks = split_dense_variable(grad_list, len(pserver_endpoints)) + param_blocks = split_dense_variable(param_list, len(pserver_endpoints)) + # step2 + grad_var_mapping = self._append_split_op(program, grad_blocks) + + # step3 + send_inputs = [] + send_outputs = [] + for b in grad_blocks: # append by order + varname, block_id, _ = b.split(":") + send_inputs.append(grad_var_mapping[varname][int(block_id)]) + + param_var_mapping = self._create_vars_from_blocklist(program, + param_blocks) + for b in param_blocks: + varname, block_id, _ = b.split(":") + send_outputs.append(param_var_mapping[varname][int(block_id)]) + # let send_op know which endpoint to send which var, eplist is of the same + # order of send_inputs. + eplist = split_method(send_inputs, pserver_endpoints) + # create mapping of endpoint -> splited var to create pserver side program + self.param_grad_ep_mapping = dict() + for i, ep in enumerate(eplist): + param = send_outputs[i] + grad = send_inputs[i] + if not self.param_grad_ep_mapping.has_key(ep): + self.param_grad_ep_mapping[ep] = {"params": [], "grads": []} + self.param_grad_ep_mapping[ep]["params"].append(param) + self.param_grad_ep_mapping[ep]["grads"].append(grad) + + send_op = program.global_block().append_op( + type="send", + inputs={"X": send_inputs}, + outputs={"Out": send_outputs}, + attrs={"endpoints": pserver_endpoints, + "epmap": eplist}) + # step4 + for varname, splited_var in param_var_mapping.iteritems(): + if len(splited_var) <= 1: + continue + orig_param = program.global_block().vars[varname] + concat = program.global_block().append_op( + type="concat", + inputs={"X": splited_var}, + outputs={"Out": [orig_param]}, + attrs={"axis": 0}) + + def _create_vars_from_blocklist(self, program, block_list): + block_map = dict() + var_mapping = dict() + for block_str in block_list: + varname, offset, size = block_str.split(":") + if not block_map.has_key(varname): + block_map[varname] = [] + block_map[varname].append((long(offset), long(size))) + for varname, splited in block_map.iteritems(): + orig_var = program.global_block().vars[varname] + var_mapping[varname] = [] + if len(splited) == 1: + var_mapping[varname] = [orig_var] + continue + orig_shape = orig_var.shape + orig_dim1_flatten = 1 + if len(orig_shape) >= 2: + orig_dim1_flatten = reduce(lambda x, y: x * y, orig_shape[1:]) + + for i, block in enumerate(splited): + size = block[1] + rows = size / orig_dim1_flatten + splited_shape = [rows] + if len(orig_shape) >= 2: + splited_shape.extend(orig_shape[1:]) + var = program.global_block().create_var( + name="%s.block%d" % (varname, i), + psersistable=False, + dtype=orig_var.dtype, + shape=splited_shape) # flattend splited var + var_mapping[varname].append(var) + return var_mapping def _clone_var(self, block, var): assert isinstance(var, Variable) @@ -129,34 +192,27 @@ class DistributeTranspiler: dtype=var.dtype, type=var.type, lod_level=var.lod_level, - persistable=var.persistable) + # HACK: let all param in pserver persistable so child + # program in recv can get them + persistable=True) - def _optimize_distributed(self, optimize_ops, program, params_and_grads, - **kwargs): - if kwargs.has_key("split_method"): - split_method = kwargs["split_method"] - else: - split_method = round_robin - - assert (callable(split_method)) - pserver_endpoints = kwargs["pservers"].split(",") - self.param_grad_map = split_method(params_and_grads, pserver_endpoints) - - send_op_ordered_inputs = [] - send_op_ordered_outputs = [] - epmap = [] - for ep, v in self.param_grad_map.iteritems(): - send_op_ordered_inputs.extend(v["grads"]) - send_op_ordered_outputs.extend(v["params"]) - for i in v["grads"]: - epmap.append(ep) - send_op = program.global_block().append_op( - type="send", - inputs={"X": send_op_ordered_inputs - }, # inputs is a list of tensors to be send - outputs={"Out": send_op_ordered_outputs}, - attrs={"endpoints": pserver_endpoints, - "epmap": epmap}) + def _append_split_op(self, program, gradblocks): + var_mapping = self._create_vars_from_blocklist(program, gradblocks) + for varname, splited_vars in var_mapping.iteritems(): + # variable that don't need to split have empty splited_vars + if len(splited_vars) <= 1: + continue + orig_var = program.global_block().vars[varname] + sections = [] + for v in splited_vars: + sections.append(v.shape[0]) + program.global_block().append_op( + type="split", + inputs={"X": orig_var}, + outputs={"Out": splited_vars}, + attrs={"sections": sections} # assume split evenly + ) + return var_mapping def get_trainer_program(self): # remove optimize ops and add a send op to main_program @@ -174,69 +230,267 @@ class DistributeTranspiler: var_list.append(var_each) return var_list - def get_pserver_program(self, endpoint, optimize_ops): - pserver_program = Program() - for v in self.param_grad_map[endpoint]["params"]: - self._clone_param(pserver_program.global_block(), v) + def _get_optimizer_input_shape(self, op_type, varkey, orig_shape, + param_shape): + """ + Returns the shape for optimizer inputs that need to be reshaped when + Param and Grad is splited to multiple servers. + """ + # HACK(typhoonzero): Should use functions of corresponding optimizer in + # optimizer.py to get the shape, do not bind this in the transpiler. + if op_type == "adam": + if varkey in ["Moment1", "Moment2"]: + return param_shape + elif op_type == "adagrad": + if varkey == "Moment": + return param_shape + elif op_type == "adamax": + if varkey in ["Moment", "InfNorm"]: + return param_shape + elif op_type == "momentum": + if varkey == "Velocity": + return param_shape + elif op_type == "": + if varkey == "Moment": + return param_shape + elif op_type == "sgd": + pass + return orig_shape - optimize_sub_program = Program() - grad_var_names = [ - var.name for var in self.param_grad_map[endpoint]["grads"] + def _is_op_on_pserver(self, endpoint, all_ops, idx): + """ + Recursively check if the op need to run on current server. + Assume that ops are in the execution order. + """ + param_names = [ + p.name for p in self.param_grad_ep_mapping[endpoint]["params"] ] - for opt_op in optimize_ops: - for _, var in opt_op.inputs.iteritems(): - # NOTE: append operators to merge gradients from multiple - # trainers. If trainers == 1, this is not needed. - if self.trainers > 1 and var.name in grad_var_names: + op = all_ops[idx] + if op.inputs.has_key("Param"): + if op.inputs["Param"].name in param_names: + return True + else: + for n in param_names: + if n.startswith(op.inputs["Param"].name+".block") and \ + n != op.inputs["Param"].name: + return True + return False + else: + j = idx - 1 + while j >= 0: + prev_op = all_ops[j] + prev_output_names = [o.name for o in prev_op.outputs.values()] + prev_input_names = [o.name for o in prev_op.inputs.values()] + found1 = False + found2 = False + for _, v in op.inputs.iteritems(): + if v.name in prev_output_names: + found1 = self._is_op_on_pserver(endpoint, all_ops, j) + # later ops may produce output for prev op's next batch use. + for _, v in op.outputs.iteritems(): + if v.name in prev_input_names: + found2 = self._is_op_on_pserver(endpoint, all_ops, j) + if found1 or found2: + return True + j -= 1 + return False + + def _append_pserver_ops(self, program, pserver_program, opt_op, endpoint): + new_inputs = dict() + # update param/grad shape first, then other inputs like + # moment can use the updated shape + for key, var in opt_op.inputs.iteritems(): + if key == "Grad": + grad_block = None + for g in self.param_grad_ep_mapping[endpoint]["grads"]: + if g.name.startswith(var.name): + grad_block = g + break + if not grad_block: + # do not append this op if current endpoint + # is not dealing with this grad block + return + merged_var = program.global_block().create_var( + name=grad_block.name, + persistable=grad_block.persistable, + dtype=grad_block.dtype, + shape=grad_block.shape) + # append merging ops if trainers > 1 + if self.trainers > 1: vars2merge = self._create_var_for_trainers( - optimize_sub_program.global_block(), var, self.trainers) - merged_var = optimize_sub_program.global_block().create_var( - name=var.name, - persistable=var.persistable, - dtype=var.dtype, - shape=var.shape) - optimize_sub_program.global_block().append_op( + program.global_block(), grad_block, self.trainers) + program.global_block().append_op( type="sum", inputs={"X": vars2merge}, outputs={"Out": merged_var}) - optimize_sub_program.global_block().append_op( + program.global_block().append_op( type="scale", inputs={"X": merged_var}, outputs={"Out": merged_var}, attrs={"scale": 1.0 / float(self.trainers)}) - else: - optimize_sub_program.global_block().create_var( - name=var.name, - persistable=var.persistable, - dtype=var.dtype, - shape=var.shape) + new_inputs[key] = merged_var + elif key == "Param": + # param is already created on global program + param_block = None + for p in self.param_grad_ep_mapping[endpoint]["params"]: + if p.name.startswith(var.name): + param_block = p + break + if not param_block: + return + tmpvar = program.global_block().create_var( + name=param_block.name, + persistable=True, + dtype=param_block.dtype, + shape=param_block.shape) + + new_inputs[key] = tmpvar + for key, var in opt_op.inputs.iteritems(): + if key in ["Param", "Grad"]: + continue + # update accumulator variable shape + param_shape = new_inputs["Param"].shape + new_shape = self._get_optimizer_input_shape(opt_op.type, key, + var.shape, param_shape) + tmpvar = program.global_block().create_var( + name=var.name, + persistable=var.persistable, + dtype=var.dtype, + shape=new_shape) + new_inputs[key] = tmpvar + # create var in pserver program global block. + # TODO(typhoonzero): put blocks in one program to avoid create two + # variables. + pserver_program.global_block().create_var( + name=var.name, + persistable=var.persistable, + dtype=var.dtype, + shape=new_shape) + + # change outputs ParamOut variable + opt_op.outputs["ParamOut"] = new_inputs["Param"] + program.global_block().append_op( + type=opt_op.type, + inputs=new_inputs, + outputs=opt_op.outputs, + attrs=opt_op.attrs) + + def _append_pserver_non_opt_ops(self, program, pserver_program, opt_op): + for _, var in opt_op.inputs.iteritems(): + program.global_block().create_var( + name=var.name, + persistable=var.persistable, + dtype=var.dtype, + shape=var.shape) + pserver_program.global_block().create_var( + name=var.name, + persistable=var.persistable, + dtype=var.dtype, + shape=var.shape) + program.global_block().append_op( + type=opt_op.type, + inputs=opt_op.inputs, + outputs=opt_op.outputs, + attrs=opt_op.attrs) + + def get_pserver_program(self, endpoint, optimize_ops): + """ + get pserver side program by endpoint + + NOTE: assume blocks of the same variable is not distributed + on the same pserver, only change param/grad varnames for + trainers to fetch. For each pserver endpoint, server side + program must be a sub-set of the original optimization program. + """ + # step5 + pserver_program = Program() + for v in self.param_grad_ep_mapping[endpoint]["params"]: + self._clone_var(pserver_program.global_block(), v) + # step6 + optimize_sub_program = Program() + for idx, opt_op in enumerate(optimize_ops): + is_op_on_pserver = self._is_op_on_pserver(endpoint, optimize_ops, + idx) + if not is_op_on_pserver: + continue if opt_op.inputs.has_key("Grad"): - if opt_op.inputs["Grad"].name in grad_var_names: - optimize_sub_program.global_block().append_op( - type=opt_op.type, - inputs=opt_op.inputs, - outputs=opt_op.outputs, - attrs=opt_op.attrs) + self._append_pserver_ops(optimize_sub_program, pserver_program, + opt_op, endpoint) else: - optimize_sub_program.global_block().append_op( - type=opt_op.type, - inputs=opt_op.inputs, - outputs=opt_op.outputs, - attrs=opt_op.attrs) + self._append_pserver_non_opt_ops(optimize_sub_program, + pserver_program, opt_op) pserver_program.global_block().append_op( type="recv", - inputs={"RX": - self.param_grad_map[endpoint]["grads"]}, # grads to recv + inputs={"RX": self.param_grad_ep_mapping[endpoint]["grads"] + }, # grads to recv outputs={}, attrs={ "OptimizeProgram": optimize_sub_program.desc, "endpoint": endpoint, - "ParamList": - [p.name for p in self.param_grad_map[endpoint]["params"]], - "GradList": - [p.name for p in self.param_grad_map[endpoint]["grads"]], + "ParamList": [ + p.name + for p in self.param_grad_ep_mapping[endpoint]["params"] + ], + "GradList": [ + p.name + for p in self.param_grad_ep_mapping[endpoint]["grads"] + ], "Trainers": self.trainers }) pserver_program.sync_with_cpp() return pserver_program + + def get_startup_program(self, endpoint, pserver_program): + """ + Get startup program for current parameter server. + Modify operator input variables if there are variables that + was splited to several blocks. + """ + s_prog = Program() + orig_s_prog = framework.default_startup_program() + params = self.param_grad_ep_mapping[endpoint]["params"] + + def _get_splited_name_and_shape(varname): + for idx, splited_param in enumerate(params): + pname = splited_param.name + if pname.startswith(varname) and varname != pname: + return pname, splited_param.shape + return "", [] + + # 1. create vars in pserver program to startup program + pserver_vars = pserver_program.global_block().vars + created_var_map = dict() + for _, var in pserver_vars.iteritems(): + tmpvar = s_prog.global_block().create_var( + name=var.name, + persistable=var.persistable, + dtype=var.dtype, + shape=var.shape) + created_var_map[var.name] = tmpvar + + # 2. rename op outputs + for op in orig_s_prog.global_block().ops: + new_outputs = dict() + # do not append startup op if var is not on this pserver + op_on_pserver = False + for key, var in op.outputs.iteritems(): + newname, _ = _get_splited_name_and_shape(var.name) + if newname: + op_on_pserver = True + new_outputs[key] = created_var_map[newname] + elif var.name in pserver_vars: + op_on_pserver = True + new_outputs[key] = pserver_vars[var.name] + + if op_on_pserver: + if op.type in [ + "gaussian_random", "fill_constant", "uniform_random" + ]: + op.attrs["shape"] = new_outputs["Out"].shape + s_prog.global_block().append_op( + type=op.type, + inputs=op.inputs, + outputs=new_outputs, + attrs=op.attrs) + return s_prog diff --git a/python/paddle/v2/fluid/distribute_transpiler_simple.py b/python/paddle/v2/fluid/distribute_transpiler_simple.py new file mode 100644 index 0000000000000000000000000000000000000000..32db3df9aa2a9381d39809e5bcb18a558704c7ed --- /dev/null +++ b/python/paddle/v2/fluid/distribute_transpiler_simple.py @@ -0,0 +1,242 @@ +import framework +from framework import Program, default_main_program, Parameter, Variable +import optimizer +from layer_helper import LayerHelper + + +def hash_name_to_server(params_grads, pserver_endpoints): + """ + :param param_grads: + :return: a map of pserver endpoint -> + params -> [param list] + grads -> [grad list] + """ + + def _hash_param(param_name, total): + return hash(param_name) % total + + param_grad_map = dict() + for param, grad in params_grads: + if param.trainable is True and grad is not None: + server_id = _hash_param(param.name, len(pserver_endpoints)) + server_for_param = pserver_endpoints[server_id] + if not param_grad_map.has_key(server_for_param): + param_grad_map[server_for_param] = {"params": [], "grads": []} + param_grad_map[server_for_param]["params"].append(param) + param_grad_map[server_for_param]["grads"].append(grad) + + return param_grad_map + + +def round_robin(params_grads, pserver_endpoints): + assert (len(params_grads) > len(pserver_endpoints)) + + param_grad_map = dict() + pserver_idx = 0 + for param, grad in params_grads: + if param.trainable is True: + server_for_param = pserver_endpoints[pserver_idx] + if not param_grad_map.has_key(server_for_param): + param_grad_map[server_for_param] = {"params": [], "grads": []} + + param_grad_map[server_for_param]["params"].append(param) + param_grad_map[server_for_param]["grads"].append(grad) + + pserver_idx += 1 + if pserver_idx >= len(pserver_endpoints): + pserver_idx = 0 + return param_grad_map + + +class SimpleDistributeTranspiler: + def transpile(self, + optimize_ops, + params_grads, + program=None, + pservers="127.0.0.1:6174", + trainers=1, + split_method=round_robin): + """ + Transpile the program to a distributed data-parallelism programs. + + The main_program will be transform to use a remote parameter server + to do parameter optimization. And the optimization graph will be put + in to a parameter server program. + + Use different methods to split trainable varialbles to different + parameter servers. + + Example to run: + + exe = fluid.Executor(place) + t = fluid.DistributeTranspiler() + t.transpile(optimize_ops, params_grads, pservers="127.0.0.1:6174", trainers=1) + + pserver_endpoint = os.getenv("PSERVER") + if pserver_endpoint: + pserver_prog = t.get_pserver_program(pserver_endpoint, optimize_ops) + exe.run(fluid.default_startup_program()) + exe.run(pserver_prog) + else: + feeder = fluid.DataFeeder(feed_list=[images, label], place=place) + exe.run(fluid.default_startup_program()) + + for pass_id in range(PASS_NUM): + ... + + :param optimize_ops: op list of optimization, should be the + return value of Optimizer.minimize + :type optimize_ops: list + :param program: program to optimize, default default_main_program + :param pservers: parameter server endpoints like "m1:6174,m2:6174" + :type pservers: string + + :return: return a list of programs + """ + if program is None: + program = default_main_program() + self.program = program + self.trainers = trainers + self.optimize_ops = optimize_ops + self._optimize_distributed( + optimize_ops, + program, + params_grads, + pservers=pservers, + trainers=trainers, + split_method=split_method) + + def _clone_param(self, block, v): + assert isinstance(v, Parameter) + new_p = Parameter( + block=block, + shape=v.shape, + dtype=v.dtype, + type=v.type, + lod_level=v.lod_level, + stop_gradient=v.stop_gradient, + trainable=v.trainable, + optimize_attr=v.optimize_attr, + regularizer=v.regularizer, + name=v.name) + block.vars[new_p.name] = new_p + + def _clone_var(self, block, var): + assert isinstance(var, Variable) + return block.create_var( + name=var.name, + shape=var.shape, + dtype=var.dtype, + type=var.type, + lod_level=var.lod_level, + persistable=var.persistable) + + def _optimize_distributed(self, optimize_ops, program, params_and_grads, + **kwargs): + if kwargs.has_key("split_method"): + split_method = kwargs["split_method"] + else: + split_method = round_robin + + assert (callable(split_method)) + pserver_endpoints = kwargs["pservers"].split(",") + self.param_grad_map = split_method(params_and_grads, pserver_endpoints) + + send_op_ordered_inputs = [] + send_op_ordered_outputs = [] + epmap = [] + for ep, v in self.param_grad_map.iteritems(): + send_op_ordered_inputs.extend(v["grads"]) + send_op_ordered_outputs.extend(v["params"]) + for i in v["grads"]: + epmap.append(ep) + send_op = program.global_block().append_op( + type="send", + inputs={"X": send_op_ordered_inputs + }, # inputs is a list of tensors to be send + outputs={"Out": send_op_ordered_outputs}, + attrs={"endpoints": pserver_endpoints, + "epmap": epmap}) + + def get_trainer_program(self): + # remove optimize ops and add a send op to main_program + self.program.global_block().delete_ops(self.optimize_ops) + return self.program + + def _create_var_for_trainers(self, block, var, trainers): + var_list = [] + for i in xrange(trainers): + var_each = block.create_var( + name="%s.trainer_%d" % (var.name, i), + psersistable=var.persistable, + dtype=var.dtype, + shape=var.shape) + var_list.append(var_each) + return var_list + + def get_pserver_program(self, endpoint, optimize_ops): + pserver_program = Program() + for v in self.param_grad_map[endpoint]["params"]: + self._clone_param(pserver_program.global_block(), v) + + optimize_sub_program = Program() + grad_var_names = [ + var.name for var in self.param_grad_map[endpoint]["grads"] + ] + for opt_op in optimize_ops: + for _, var in opt_op.inputs.iteritems(): + # NOTE: append operators to merge gradients from multiple + # trainers. If trainers == 1, this is not needed. + if self.trainers > 1 and var.name in grad_var_names: + vars2merge = self._create_var_for_trainers( + optimize_sub_program.global_block(), var, self.trainers) + merged_var = optimize_sub_program.global_block().create_var( + name=var.name, + persistable=var.persistable, + dtype=var.dtype, + shape=var.shape) + optimize_sub_program.global_block().append_op( + type="sum", + inputs={"X": vars2merge}, + outputs={"Out": merged_var}) + optimize_sub_program.global_block().append_op( + type="scale", + inputs={"X": merged_var}, + outputs={"Out": merged_var}, + attrs={"scale": 1.0 / float(self.trainers)}) + else: + optimize_sub_program.global_block().create_var( + name=var.name, + persistable=var.persistable, + dtype=var.dtype, + shape=var.shape) + + if opt_op.inputs.has_key("Grad"): + if opt_op.inputs["Grad"].name in grad_var_names: + optimize_sub_program.global_block().append_op( + type=opt_op.type, + inputs=opt_op.inputs, + outputs=opt_op.outputs, + attrs=opt_op.attrs) + else: + optimize_sub_program.global_block().append_op( + type=opt_op.type, + inputs=opt_op.inputs, + outputs=opt_op.outputs, + attrs=opt_op.attrs) + pserver_program.global_block().append_op( + type="recv", + inputs={"RX": + self.param_grad_map[endpoint]["grads"]}, # grads to recv + outputs={}, + attrs={ + "OptimizeProgram": optimize_sub_program.desc, + "endpoint": endpoint, + "ParamList": + [p.name for p in self.param_grad_map[endpoint]["params"]], + "GradList": + [p.name for p in self.param_grad_map[endpoint]["grads"]], + "Trainers": self.trainers + }) + pserver_program.sync_with_cpp() + return pserver_program diff --git a/python/paddle/v2/fluid/distributed_spliter.py b/python/paddle/v2/fluid/distributed_spliter.py new file mode 100644 index 0000000000000000000000000000000000000000..eff30f7bb66b5149bb24615593463e8715e78576 --- /dev/null +++ b/python/paddle/v2/fluid/distributed_spliter.py @@ -0,0 +1,35 @@ +def hash_name(varlist, pserver_endpoints): + """ + hash variable names to several endpoints. + + :param varlist: a list of Variables + :return: a map of pserver endpoint -> varname + """ + + def _hash_block(block_str, total): + return hash(block_str) % total + + eplist = [] + for var in varlist: + server_id = _hash_block(var.name(), len(pserver_endpoints)) + server_for_param = pserver_endpoints[server_id] + eplist.append(server_for_param) + return eplist + + +def round_robin(varlist, pserver_endpoints): + """ + distribute variables to several endpoints. + """ + assert (len(varlist) > len(pserver_endpoints)) + + eplist = [] + pserver_idx = 0 + for var in varlist: + server_for_param = pserver_endpoints[pserver_idx] + eplist.append(server_for_param) + + pserver_idx += 1 + if pserver_idx >= len(pserver_endpoints): + pserver_idx = 0 + return eplist diff --git a/python/paddle/v2/fluid/evaluator.py b/python/paddle/v2/fluid/evaluator.py index e186ee96c387acf24471d4e26ce020c4ecac8d19..dc083f37b5f357e835fc1a45c25a420b2c3d9798 100644 --- a/python/paddle/v2/fluid/evaluator.py +++ b/python/paddle/v2/fluid/evaluator.py @@ -4,7 +4,10 @@ import layers from framework import Program, unique_name, Variable, program_guard from layer_helper import LayerHelper -__all__ = ['Accuracy', 'ChunkEvaluator'] +__all__ = [ + 'Accuracy', + 'ChunkEvaluator', +] def _clone_var_(block, var): @@ -21,19 +24,19 @@ def _clone_var_(block, var): class Evaluator(object): """ Base Class for all evaluators - + Args: - name(str): The name of evaluator. such as, "accuracy". Used for generate + name(str): The name of evaluator. such as, "accuracy". Used for generate temporary variable name. - main_program(Program, optional): The evaluator should be added to this + main_program(Program, optional): The evaluator should be added to this main_program. Default default_main_program() - startup_program(Program, optional):The parameter should be added to this + startup_program(Program, optional):The parameter should be added to this startup_program. Default default_startup_program() - + Attributes: - states(list): The list of state variables. states will be reset to zero + states(list): The list of state variables. states will be reset to zero when `reset` is invoked. - metrics(list): The list of metrics variables. They will be calculate + metrics(list): The list of metrics variables. They will be calculate every mini-batch """ @@ -66,14 +69,14 @@ class Evaluator(object): def create_state(self, suffix, dtype, shape): """ - Create state variable. - + Create state variable. + NOTE: It is not a public API. - + Args: - suffix(str): the state suffix. - dtype(str|core.DataType): the state data type - shape(tuple|list): the shape of state + suffix(str): the state suffix. + dtype(str|core.DataType): the state data type + shape(tuple|list): the shape of state Returns: State variable @@ -127,8 +130,8 @@ class Accuracy(Evaluator): class ChunkEvaluator(Evaluator): """ - Accumulate counter numbers output by chunk_eval from mini-batches and - compute the precision recall and F1-score using the accumulated counter + Accumulate counter numbers output by chunk_eval from mini-batches and + compute the precision recall and F1-score using the accumulated counter numbers. """ diff --git a/python/paddle/v2/fluid/framework.py b/python/paddle/v2/fluid/framework.py index 3ef6b33192d9509b765173de8981bc7ff18486e5..f78f2a331a6ea882df3368c54013b08c1a1debc5 100644 --- a/python/paddle/v2/fluid/framework.py +++ b/python/paddle/v2/fluid/framework.py @@ -7,9 +7,15 @@ import proto.framework_pb2 as framework_pb2 from . import core __all__ = [ - 'Block', 'Variable', 'Program', 'Operator', 'default_startup_program', - 'default_main_program', 'program_guard', 'switch_startup_program', - 'switch_main_program' + 'Block', + 'Variable', + 'Program', + 'Operator', + 'default_startup_program', + 'default_main_program', + 'program_guard', + 'switch_startup_program', + 'switch_main_program', ] EMPTY_VAR_NAME = core.kEmptyVarName() @@ -274,6 +280,9 @@ class Variable(object): uid = core.unique_integer(prefix) # unique during whole process. return "_".join([prefix, str(uid)]) + def set_error_clip(self, error_clip): + self.error_clip = error_clip + def get_all_op_protos(): """ diff --git a/python/paddle/v2/fluid/initializer.py b/python/paddle/v2/fluid/initializer.py index c0839caaf2bb5bc43a76a13b5782cc519a4afe63..c3ed1a9089603abe86d815f6826d084d23e01d99 100644 --- a/python/paddle/v2/fluid/initializer.py +++ b/python/paddle/v2/fluid/initializer.py @@ -1,7 +1,12 @@ import framework import numpy as np -__all__ = ['Constant', 'Uniform', 'Normal', 'Xavier'] +__all__ = [ + 'Constant', + 'Uniform', + 'Normal', + 'Xavier', +] class Initializer(object): diff --git a/python/paddle/v2/fluid/io.py b/python/paddle/v2/fluid/io.py index c63567601accd8c072368351f2838857bb61c818..54b6978ebaa02e1a070a666f60cd61b66d3ac1f8 100644 --- a/python/paddle/v2/fluid/io.py +++ b/python/paddle/v2/fluid/io.py @@ -4,13 +4,29 @@ import cPickle as pickle from paddle.v2.fluid.framework import Program, Parameter, default_main_program, Variable __all__ = [ - 'save_vars', 'save_params', 'save_persistables', 'load_vars', 'load_params', - 'load_persistables', "save_inference_model", "load_inference_model", - "get_inference_program" + 'save_vars', + 'save_params', + 'save_persistables', + 'load_vars', + 'load_params', + 'load_persistables', + 'save_inference_model', + 'load_inference_model', + 'get_inference_program', ] def is_parameter(var): + """Check whether the variable is a Parameter. + + This function checks whether the input variable is a Parameter. + + Args: + var : The input variable. + + Returns: + boolean result whether the variable is a Parameter. + """ return isinstance(var, Parameter) diff --git a/python/paddle/v2/fluid/layers/control_flow.py b/python/paddle/v2/fluid/layers/control_flow.py index f134e56cda63e9e8f62288b8f5235e2c785adc8e..d6365aed91c39e2f3088f0899cad4f5bfde0d1bd 100644 --- a/python/paddle/v2/fluid/layers/control_flow.py +++ b/python/paddle/v2/fluid/layers/control_flow.py @@ -12,7 +12,7 @@ __all__ = [ 'array_to_lod_tensor', 'increment', 'array_write', 'create_array', 'less_than', 'array_read', 'shrink_memory', 'array_length', 'IfElse', 'DynamicRNN', 'ConditionalBlock', 'StaticRNN', 'reorder_lod_tensor_by_rank', - 'ParallelDo' + 'ParallelDo', 'Print' ] @@ -110,6 +110,67 @@ def merge_lod_tensor(in_true, in_false, x, mask, level=0): return out +def Print(input, + first_n=-1, + message=None, + summarize=-1, + print_tensor_name=True, + print_tensor_type=True, + print_tensor_shape=True, + print_tensor_lod=True, + print_phase='both'): + ''' + **Print operator** + + This creates a print op that will print when a tensor is accessed. + + Wraps the tensor passed in so that whenever that a tensor is accessed, + the message `message` is printed, along with the current value of the + tensor `t`. + + Args: + input (Variable): A Tensor to print. + summarize (int): Print this number of elements in the tensor, will print + all if left is negative. + message (str): A string message to print as a prefix. + first_n (int): Only log `first_n` number of times. + print_tensor_name (bool): Print the tensor name. + print_tensor_type (bool): Print the tensor type. + print_tensor_shape (bool): Print the tensor shape. + print_tensor_lod (bool): Print the tensor lod. + print_phase (bool): Which phase to displace, including 'forward', + 'backward' and 'both'. If set to 'backward' or 'both', will + print the gradients of input tensor. + + Returns: + Variable: Output tensor, same data with input tensor. + + Examples: + .. code-block:: python + + value = some_layer(...) + Print(value, summarize=10, + message="The content of some_layer: ") + ''' + helper = LayerHelper('print', **locals()) + out = helper.create_tmp_variable(dtype=helper.input_dtype()) + helper.append_op( + type='print', + inputs={'In': input}, + attrs={ + 'first_n': first_n, + 'summarize': summarize, + 'message': message or "", + 'print_tensor_name': print_tensor_name, + 'print_tensor_type': print_tensor_type, + 'print_tensor_shape': print_tensor_shape, + 'print_tensor_lod': print_tensor_lod, + 'print_phase': print_phase.upper() + }, + outputs={'Out': out}) + return out + + class BlockGuard(object): """ BlockGuard class. @@ -687,11 +748,10 @@ def topk(input, k): def lod_tensor_to_array(x, table): - """This function performs the operation that converts an LOD_Tensor to - an array. + """ Convert a LOD_TENSOR to an LOD_TENSOR_ARRAY. Args: - x (Variable|list): The tensor that needs to be converted to an array. + x (Variable|list): The LOD tensor to be converted to a LOD tensor array. table (ParamAttr|list): The variable that stores the level of lod which is ordered by sequence length in descending order. @@ -721,11 +781,10 @@ def lod_tensor_to_array(x, table): def array_to_lod_tensor(x, table): - """This function performs the operations that converts an array to - an LOD_Tensor. + """Convert a LoD_Tensor_Aarry to an LoDTensor. Args: - x (Variable|list): The array that needs to be converted to a tensor. + x (Variable|list): The lod tensor array to be converted to a tensor. table (ParamAttr|list): The variable that stores the level of lod which is ordered by sequence length in descending order. @@ -753,7 +812,8 @@ def array_to_lod_tensor(x, table): def increment(x, value=1.0, in_place=True): - """This function performs an operation that increments each value in the + """ + This function performs an operation that increments each value in the input :math:`x` by an amount: :math:`value` as mentioned in the input parameter. This operation is performed in-place by default. @@ -786,17 +846,24 @@ def increment(x, value=1.0, in_place=True): def array_write(x, i, array=None): - """This function performs the operation to write the data out as an - LOD_TENSOR_ARRAY. + """ + This function writes the given input variable to the specified position + indicating by the arrary index to an output LOD_TENSOR_ARRAY. If the + output LOD_TENSOR_ARRAY is not given(None), a new one will be created and + returned. Args: x (Variable|list): The input tensor from which the data will be read. - i (Variable|list): The subscript index in tensor array, that points the - place from which data will be read. - array (Variable|list): The data can be read into this variable if - this is assigned. + i (Variable|list): The index of the output LOD_TENSOR_ARRAY, pointing to + the position to which the input tensor will be + written. + array (Variable|list): The output LOD_TENSOR_ARRAY to which the input + tensor will be written. If this parameter is + NONE, a new LOD_TENSOR_ARRAY will be created and + returned. + Returns: - Variable: The tensor type variable that has the data written to it. + Variable: The output LOD_TENSOR_ARRAY where the input tensor is written. Examples: .. code-block::python @@ -1159,7 +1226,8 @@ class DynamicRNN(object): self.lod_rank_table = None self.max_seq_len = None self.step_idx = None - self.zero_idx = fill_constant(shape=[1], value=0, dtype='int64') + self.zero_idx = fill_constant( + shape=[1], value=0, dtype='int64', force_cpu=True) self.mem_dict = dict() self.output_array = [] self.outputs = [] @@ -1173,7 +1241,7 @@ class DynamicRNN(object): self._assert_in_rnn_block_("step_input") if not isinstance(x, Variable): raise TypeError( - "step_input() can only take a Variable as its input") + "step_input() can only take a Variable as its input.") parent_block = self._parent_block_() if self.lod_rank_table is None: self.lod_rank_table = parent_block.create_var( @@ -1234,7 +1302,8 @@ class DynamicRNN(object): def block(self): if self.status != DynamicRNN.BEFORE_RNN: raise ValueError("rnn.block() can only be invoke once") - self.step_idx = fill_constant(shape=[1], dtype='int64', value=0) + self.step_idx = fill_constant( + shape=[1], dtype='int64', value=0, force_cpu=True) self.step_idx.stop_gradient = False self.status = DynamicRNN.IN_RNN with self.while_op.block(): @@ -1254,8 +1323,8 @@ class DynamicRNN(object): def __call__(self, *args, **kwargs): if self.status != DynamicRNN.AFTER_RNN: - raise ValueError( - "Dynamic RNN outputs can only be retrieved after rnn block") + raise ValueError(("Output of the dynamic RNN can only be visited " + "outside the rnn block.")) if len(self.outputs) == 1: return self.outputs[0] else: diff --git a/python/paddle/v2/fluid/layers/nn.py b/python/paddle/v2/fluid/layers/nn.py index 48a6bee5588949f708e6c588152be9e174f3ad69..99a40ce45a2ff5c89fdfb2f0c170dbc34ee696bc 100644 --- a/python/paddle/v2/fluid/layers/nn.py +++ b/python/paddle/v2/fluid/layers/nn.py @@ -9,12 +9,33 @@ from ..param_attr import ParamAttr from tensor import concat __all__ = [ - 'fc', 'embedding', 'dynamic_lstm', 'gru_unit', 'linear_chain_crf', - 'crf_decoding', 'cos_sim', 'cross_entropy', 'square_error_cost', 'accuracy', - 'chunk_eval', 'sequence_conv', 'conv2d', 'sequence_pool', 'pool2d', - 'batch_norm', 'beam_search_decode', 'conv2d_transpose', 'sequence_expand', - 'lstm_unit', 'reduce_sum', 'reduce_mean', 'reduce_max', 'reduce_min', - 'sequence_first_step', 'sequence_last_step', 'dropout' + 'fc', + 'embedding', + 'dynamic_lstm', + 'gru_unit', + 'linear_chain_crf', + 'crf_decoding', + 'cos_sim', + 'cross_entropy', + 'square_error_cost', + 'accuracy', + 'chunk_eval', + 'sequence_conv', + 'conv2d', + 'sequence_pool', + 'pool2d', + 'batch_norm', + 'beam_search_decode', + 'conv2d_transpose', + 'sequence_expand', + 'lstm_unit', + 'reduce_sum', + 'reduce_mean', + 'reduce_max', + 'reduce_min', + 'sequence_first_step', + 'sequence_last_step', + 'dropout', ] @@ -248,13 +269,13 @@ def gru_unit(input, h_t & = dot((1-u_t), m_t) + dot(u_t, h_{t-1}) The inputs of gru unit includes :math:`z_t`, :math:`h_{t-1}`. In terms - of the equation above, the :math:`z_t` is split into 3 parts - - :math:`xu_t`, :math:`xr_t` and :math:`xm_t`. This means that in order to - implement a full GRU unit operator for an input, a fully + of the equation above, the :math:`z_t` is split into 3 parts - + :math:`xu_t`, :math:`xr_t` and :math:`xm_t`. This means that in order to + implement a full GRU unit operator for an input, a fully connected layer has to be applied, such that :math:`z_t = W_{fc}x_t`. - The terms :math:`u_t` and :math:`r_t` represent the update and reset gates - of the GRU cell. Unlike LSTM, GRU has one lesser gate. However, there is + The terms :math:`u_t` and :math:`r_t` represent the update and reset gates + of the GRU cell. Unlike LSTM, GRU has one lesser gate. However, there is an intermediate candidate hidden output, which is denoted by :math:`m_t`. This layer has three outputs :math:`h_t`, :math:`dot(r_t, h_{t-1})` and concatenation of :math:`u_t`, :math:`r_t` and :math:`m_t`. @@ -276,7 +297,7 @@ def gru_unit(input, .. code-block:: python # assuming we have x_t_data and prev_hidden of size=10 - x_t = fluid.layers.fc(input=x_t_data, size=30) + x_t = fluid.layers.fc(input=x_t_data, size=30) hidden_val, r_h_val, gate_val = fluid.layers.gru_unit(input=x_t, hidden = prev_hidden) @@ -754,7 +775,7 @@ def conv2d(input, pre_bias = helper.create_tmp_variable(dtype) helper.append_op( - type='conv2d_cudnn', + type='conv2d', inputs={ 'Input': input, 'Filter': filter_param, @@ -983,7 +1004,7 @@ def batch_norm(input, default_initializer=Constant(1.0)) bias = helper.create_parameter( - attr=helper.param_attr, shape=param_shape, dtype=dtype, is_bias=True) + attr=helper.bias_attr, shape=param_shape, dtype=dtype, is_bias=True) mean = helper.create_global_variable( dtype=input.dtype, diff --git a/python/paddle/v2/fluid/layers/ops.py b/python/paddle/v2/fluid/layers/ops.py index d3a5b70785947148d6e208b4d8dafec8bb52ff85..51a85dbbd3357fabc62fb5b43269fdf79da21bfb 100644 --- a/python/paddle/v2/fluid/layers/ops.py +++ b/python/paddle/v2/fluid/layers/ops.py @@ -1,7 +1,34 @@ from ..registry import register_layer __activations__ = [ - 'abs', 'tanh', 'sigmoid', 'relu', 'sqrt', 'ceil', 'floor', 'log', 'round' + 'sigmoid', + 'logsigmoid', + 'exp', + 'relu', + 'tanh', + 'tanh_shrink', + 'softshrink', + 'sqrt', + 'abs', + 'ceil', + 'floor', + 'round', + 'reciprocal', + 'log', + 'square', + 'softplus', + 'softsign', + 'brelu', + 'leaky_relu', + 'soft_relu', + 'elu', + 'relu6', + 'pow', + 'stanh', + 'hard_shrink', + 'thresholded_relu', + 'hard_sigmoid', + 'swish', ] __all__ = [ diff --git a/python/paddle/v2/fluid/layers/tensor.py b/python/paddle/v2/fluid/layers/tensor.py index 5f12ecfc14f7521948acdf27f1d6249e8052abc5..2217c56b62af958709bb180bb360c938d1f0b312 100644 --- a/python/paddle/v2/fluid/layers/tensor.py +++ b/python/paddle/v2/fluid/layers/tensor.py @@ -1,9 +1,21 @@ from ..layer_helper import LayerHelper from ..param_attr import ParamAttr +from ..framework import convert_np_dtype_to_dtype_ +from ..framework import Variable +from ..core import DataType +import numpy __all__ = [ - 'create_tensor', 'create_parameter', 'cast', 'concat', 'sums', 'assign', - 'fill_constant_batch_size_like', 'fill_constant', 'ones', 'zeros' + 'create_tensor', + 'create_parameter', + 'cast', + 'concat', + 'sums', + 'assign', + 'fill_constant_batch_size_like', + 'fill_constant', + 'ones', + 'zeros', ] @@ -121,7 +133,7 @@ def assign(input, output): This function copies the *input* Variable to the *output* Variable. Args: - input(Variable): The source variable + input(Variable|numpy.ndarray): The source variable output(Variable): The destination variable Returns: @@ -134,37 +146,64 @@ def assign(input, output): fluid.layers.assign(hidden, out) """ helper = LayerHelper('assign', **locals()) - helper.append_op( - type='scale', - inputs={'X': [input]}, - outputs={'Out': [output]}, - attrs={'scale': 1.0}) + if isinstance(input, Variable): + helper.append_op( + type='scale', + inputs={'X': [input]}, + outputs={'Out': [output]}, + attrs={'scale': 1.0}) + elif isinstance(input, numpy.ndarray): + dtype = convert_np_dtype_to_dtype_(input.dtype) + if dtype == DataType.FP32: + value_name = "fp32_values" + values = [float(v) for v in input.flat] + elif dtype == DataType.INT32: + value_name = "int32_values" + values = [int(v) for v in input.flat] + else: + raise ValueError("Unsupported dtype %s", input.dtype) + if input.size > 1024 * 1024: + raise ValueError("The size of input is too big. Please consider " + "saving it to file and 'load_op' to load it") + + helper.append_op( + type='assign_value', + outputs={'Out': [output]}, + attrs={ + 'dtype': dtype, + 'shape': list(input.shape), + value_name: values + }) + else: + raise ValueError("Wrong type for assign input: %s" % type(input)) + return output -def fill_constant(shape, dtype, value, out=None): +def fill_constant(shape, dtype, value, force_cpu=False, out=None): """ **fill_constant** - This function creates a tensor of specified *shape* and - *dtype*, and initializes this with a constant supplied in *value*. + This function creates a tensor with specified `shape` and `dtype`, and + initializes it with a constant specifed by `value`. - It also sets *stop_gradient* to True. + The attribute `stop_gradient` of the created tensor is set to True. Args: - shape(tuple|list|None): Shape of output tensor - dtype(np.dtype|core.DataType|str): Data type of output tensor - value(float): Constant value to initialize the output tensor - out(Variable): Output Variable to initialize + shape(tuple|list|None): Shape of the output tensor. + dtype(np.dtype|core.DataType|str): Data type of the output tensor. + value(float): The constant value used to initialize the output tensor. + out(Variable): The output tensor. Returns: - Variable: The tensor variable storing the output + Variable: The tensor variable storing the output. Examples: .. code-block:: python data = fluid.layers.fill_constant(shape=[1], value=0, dtype='int64') """ + helper = LayerHelper("fill_constant", **locals()) if out is None: out = helper.create_tmp_variable(dtype=dtype) @@ -172,9 +211,12 @@ def fill_constant(shape, dtype, value, out=None): type='fill_constant', inputs={}, outputs={'Out': [out]}, - attrs={'shape': shape, - 'dtype': out.dtype, - 'value': float(value)}) + attrs={ + 'shape': shape, + 'dtype': out.dtype, + 'value': float(value), + 'force_cpu': force_cpu + }) out.stop_gradient = True return out diff --git a/python/paddle/v2/fluid/memory_optimization_transpiler.py b/python/paddle/v2/fluid/memory_optimization_transpiler.py index 6800d7ddbb141a8bc2be10abe68ab86771c71156..293b116957ff9a7c02417bc268b4c0b4b2fc0a15 100644 --- a/python/paddle/v2/fluid/memory_optimization_transpiler.py +++ b/python/paddle/v2/fluid/memory_optimization_transpiler.py @@ -121,8 +121,10 @@ class ControlFlowGraph(object): # and dtype_to_size[cache_dtype] if x_dtype == cache_dtype: print( - "Hit Cache !!!! cache pool index is %d, var name is %s, cached var name is %s, var shape is %s " - % + ("Hit Cache !!!! cache pool index " + "is %d, var name is %s, " + "cached var name is %s, " + "var shape is %s ") % (index, x, cache_var, str(cache_shape))) self.pool.pop(index) _rename_arg_( diff --git a/python/paddle/v2/fluid/nets.py b/python/paddle/v2/fluid/nets.py index 54886a8f2cc63474fe82290c0a12771b4cbdba72..47b550bf4d851a6c19fa88cc5fff2a7a0afc9bda 100644 --- a/python/paddle/v2/fluid/nets.py +++ b/python/paddle/v2/fluid/nets.py @@ -1,6 +1,9 @@ import layers -__all__ = ["simple_img_conv_pool", "sequence_conv_pool"] +__all__ = [ + "simple_img_conv_pool", + "sequence_conv_pool", +] def simple_img_conv_pool(input, diff --git a/python/paddle/v2/fluid/registry.py b/python/paddle/v2/fluid/registry.py index 7aa82906114b355277185211134bb791e5dc43f9..94b16bca8c95e7d76377b1cd6e60532069fb452f 100644 --- a/python/paddle/v2/fluid/registry.py +++ b/python/paddle/v2/fluid/registry.py @@ -8,7 +8,11 @@ import proto.framework_pb2 as framework_pb2 from framework import OpProtoHolder, Variable, Program, Operator from paddle.v2.fluid.layer_helper import LayerHelper, unique_name -__all__ = ['deprecated', 'register_layer', 'autodoc'] +__all__ = [ + 'deprecated', + 'register_layer', + 'autodoc', +] def _convert_(name): @@ -80,11 +84,10 @@ def _generate_doc_string_(op_proto): def register_layer(op_type): - """ - Register an Python layer for an Operator + """Register the Python layer for an Operator. Args: - op_type: The name of the operator to be created + op_type: The name of the operator to be created. This function takes in the operator type (sigmoid, mean , average etc) and creates the operator functionality. @@ -98,16 +101,16 @@ def register_layer(op_type): if len(not_intermediate_outputs) != 1: raise ValueError("Only one non intermediate output operator can be", - "automatically generated") + "automatically generated.") if not_intermediate_outputs[0].duplicable: raise ValueError( - "Only non duplicable op can be automatically generated") + "Only non duplicable op can be automatically generated.") for output in intermediate_outputs: if output.duplicable: raise ValueError("The op can be automatically generated only when ", - "all intermediate ops are not duplicable") + "all intermediate ops are not duplicable.") o_name = not_intermediate_outputs[0].name intermediate_output_names = [output.name for output in intermediate_outputs] diff --git a/python/paddle/v2/fluid/regularizer.py b/python/paddle/v2/fluid/regularizer.py index d1955b00479676448d99603a31249aa7ac6a0d3f..117c45c49f14ab53db5a3a7b8360ba173cc87bf1 100644 --- a/python/paddle/v2/fluid/regularizer.py +++ b/python/paddle/v2/fluid/regularizer.py @@ -1,6 +1,10 @@ import framework -__all__ = ['append_regularization_ops', 'L1Decay', 'L2Decay'] +__all__ = [ + 'append_regularization_ops', + 'L1Decay', + 'L2Decay', +] def append_regularization_ops(parameters_and_grads, regularization=None): diff --git a/python/paddle/v2/fluid/tests/CMakeLists.txt b/python/paddle/v2/fluid/tests/CMakeLists.txt index e795627bfe9e8ad0c196349a332e62e975f20aa3..9a0240cbf65c7a79e29babc2abcb157ada684c5e 100644 --- a/python/paddle/v2/fluid/tests/CMakeLists.txt +++ b/python/paddle/v2/fluid/tests/CMakeLists.txt @@ -5,3 +5,4 @@ foreach(src ${TEST_OPS}) endforeach() add_subdirectory(book) +add_subdirectory(book_distribute) diff --git a/python/paddle/v2/fluid/tests/book_distribute/CMakeLists.txt b/python/paddle/v2/fluid/tests/book_distribute/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..4d7664469e481344cf9eea84688f068b4fb99dee --- /dev/null +++ b/python/paddle/v2/fluid/tests/book_distribute/CMakeLists.txt @@ -0,0 +1,5 @@ +file(GLOB TEST_OPS RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" "test_*.py") +string(REPLACE ".py" "" TEST_OPS "${TEST_OPS}") +foreach(src ${TEST_OPS}) + py_test(${src} SRCS ${src}.py) +endforeach() diff --git a/python/paddle/v2/fluid/tests/book_distribute/notest_dist_fit_a_line.py b/python/paddle/v2/fluid/tests/book_distribute/notest_dist_fit_a_line.py new file mode 100644 index 0000000000000000000000000000000000000000..bb339c440bd0d229d2ae348cf5a7745b16d156d5 --- /dev/null +++ b/python/paddle/v2/fluid/tests/book_distribute/notest_dist_fit_a_line.py @@ -0,0 +1,62 @@ +import numpy as np +import paddle.v2 as paddle +import paddle.v2.fluid as fluid +import os + +x = fluid.layers.data(name='x', shape=[13], dtype='float32') + +y_predict = fluid.layers.fc(input=x, size=1, act=None) + +y = fluid.layers.data(name='y', shape=[1], dtype='float32') + +cost = fluid.layers.square_error_cost(input=y_predict, label=y) +avg_cost = fluid.layers.mean(x=cost) + +sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.001) +optimize_ops, params_grads = sgd_optimizer.minimize(avg_cost) + +BATCH_SIZE = 20 + +train_reader = paddle.batch( + paddle.reader.shuffle( + paddle.dataset.uci_housing.train(), buf_size=500), + batch_size=BATCH_SIZE) + +place = fluid.CPUPlace() +feeder = fluid.DataFeeder(place=place, feed_list=[x, y]) +exe = fluid.Executor(place) + +t = fluid.DistributeTranspiler() +# all parameter server endpoints list for spliting parameters +pserver_endpoints = os.getenv("PSERVERS") +# server endpoint for current node +current_endpoint = os.getenv("SERVER_ENDPOINT") +# run as trainer or parameter server +training_role = os.getenv("TRAINING_ROLE", + "TRAINER") # get the training role: trainer/pserver +t.transpile(optimize_ops, params_grads, pservers=pserver_endpoints, trainers=2) + +if training_role == "PSERVER": + if not current_endpoint: + print("need env SERVER_ENDPOINT") + exit(1) + pserver_prog = t.get_pserver_program(current_endpoint, optimize_ops) + exe.run(fluid.default_startup_program()) + exe.run(pserver_prog) +else: + trainer_prog = t.get_trainer_program() + + exe.run(fluid.default_startup_program()) + + PASS_NUM = 100 + for pass_id in range(PASS_NUM): + fluid.io.save_persistables(exe, "./fit_a_line.model/") + fluid.io.load_persistables(exe, "./fit_a_line.model/") + for data in train_reader(): + avg_loss_value, = exe.run(trainer_prog, + feed=feeder.feed(data), + fetch_list=[avg_cost]) + + if avg_loss_value[0] < 10.0: + exit(0) +exit(1) diff --git a/python/paddle/v2/fluid/tests/book_distribute/notest_dist_label_semantic_roles.py b/python/paddle/v2/fluid/tests/book_distribute/notest_dist_label_semantic_roles.py new file mode 100644 index 0000000000000000000000000000000000000000..5fa5e0e5f34e6904e0e66d3ab4149cdfcffeb244 --- /dev/null +++ b/python/paddle/v2/fluid/tests/book_distribute/notest_dist_label_semantic_roles.py @@ -0,0 +1,225 @@ +import math + +import numpy as np +import paddle.v2 as paddle +import paddle.v2.dataset.conll05 as conll05 +import paddle.v2.fluid as fluid +import time +import os + +word_dict, verb_dict, label_dict = conll05.get_dict() +word_dict_len = len(word_dict) +label_dict_len = len(label_dict) +pred_len = len(verb_dict) + +mark_dict_len = 2 +word_dim = 32 +mark_dim = 5 +hidden_dim = 512 +depth = 8 +mix_hidden_lr = 1e-3 + +IS_SPARSE = True +PASS_NUM = 10 +BATCH_SIZE = 20 + +embedding_name = 'emb' + + +def load_parameter(file_name, h, w): + with open(file_name, 'rb') as f: + f.read(16) # skip header. + return np.fromfile(f, dtype=np.float32).reshape(h, w) + + +def db_lstm(word, predicate, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2, mark, + **ignored): + # 8 features + predicate_embedding = fluid.layers.embedding( + input=predicate, + size=[pred_len, word_dim], + dtype='float32', + is_sparse=IS_SPARSE, + param_attr='vemb') + + mark_embedding = fluid.layers.embedding( + input=mark, + size=[mark_dict_len, mark_dim], + dtype='float32', + is_sparse=IS_SPARSE) + + word_input = [word, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2] + emb_layers = [ + fluid.layers.embedding( + size=[word_dict_len, word_dim], + input=x, + param_attr=fluid.ParamAttr( + name=embedding_name, trainable=False)) for x in word_input + ] + emb_layers.append(predicate_embedding) + emb_layers.append(mark_embedding) + + hidden_0_layers = [ + fluid.layers.fc(input=emb, size=hidden_dim) for emb in emb_layers + ] + + hidden_0 = fluid.layers.sums(input=hidden_0_layers) + + lstm_0 = fluid.layers.dynamic_lstm( + input=hidden_0, + size=hidden_dim, + candidate_activation='relu', + gate_activation='sigmoid', + cell_activation='sigmoid') + + # stack L-LSTM and R-LSTM with direct edges + input_tmp = [hidden_0, lstm_0] + + for i in range(1, depth): + mix_hidden = fluid.layers.sums(input=[ + fluid.layers.fc(input=input_tmp[0], size=hidden_dim), + fluid.layers.fc(input=input_tmp[1], size=hidden_dim) + ]) + + lstm = fluid.layers.dynamic_lstm( + input=mix_hidden, + size=hidden_dim, + candidate_activation='relu', + gate_activation='sigmoid', + cell_activation='sigmoid', + is_reverse=((i % 2) == 1)) + + input_tmp = [mix_hidden, lstm] + + feature_out = fluid.layers.sums(input=[ + fluid.layers.fc(input=input_tmp[0], size=label_dict_len), + fluid.layers.fc(input=input_tmp[1], size=label_dict_len) + ]) + + return feature_out + + +def to_lodtensor(data, place): + seq_lens = [len(seq) for seq in data] + cur_len = 0 + lod = [cur_len] + for l in seq_lens: + cur_len += l + lod.append(cur_len) + flattened_data = np.concatenate(data, axis=0).astype("int64") + flattened_data = flattened_data.reshape([len(flattened_data), 1]) + res = fluid.LoDTensor() + res.set(flattened_data, place) + res.set_lod([lod]) + return res + + +def main(): + # define network topology + word = fluid.layers.data( + name='word_data', shape=[1], dtype='int64', lod_level=1) + predicate = fluid.layers.data( + name='verb_data', shape=[1], dtype='int64', lod_level=1) + ctx_n2 = fluid.layers.data( + name='ctx_n2_data', shape=[1], dtype='int64', lod_level=1) + ctx_n1 = fluid.layers.data( + name='ctx_n1_data', shape=[1], dtype='int64', lod_level=1) + ctx_0 = fluid.layers.data( + name='ctx_0_data', shape=[1], dtype='int64', lod_level=1) + ctx_p1 = fluid.layers.data( + name='ctx_p1_data', shape=[1], dtype='int64', lod_level=1) + ctx_p2 = fluid.layers.data( + name='ctx_p2_data', shape=[1], dtype='int64', lod_level=1) + mark = fluid.layers.data( + name='mark_data', shape=[1], dtype='int64', lod_level=1) + feature_out = db_lstm(**locals()) + target = fluid.layers.data( + name='target', shape=[1], dtype='int64', lod_level=1) + crf_cost = fluid.layers.linear_chain_crf( + input=feature_out, + label=target, + param_attr=fluid.ParamAttr( + name='crfw', learning_rate=mix_hidden_lr)) + avg_cost = fluid.layers.mean(x=crf_cost) + + # TODO(qiao) + # check other optimizers and check why out will be NAN + sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.0001) + optimize_ops, params_grads = sgd_optimizer.minimize(avg_cost) + + # TODO(qiao) + # add dependency track and move this config before optimizer + crf_decode = fluid.layers.crf_decoding( + input=feature_out, param_attr=fluid.ParamAttr(name='crfw')) + + chunk_evaluator = fluid.evaluator.ChunkEvaluator( + input=crf_decode, + label=target, + chunk_scheme="IOB", + num_chunk_types=int(math.ceil((label_dict_len - 1) / 2.0))) + + train_data = paddle.batch( + paddle.reader.shuffle( + paddle.dataset.conll05.test(), buf_size=8192), + batch_size=BATCH_SIZE) + place = fluid.CPUPlace() + feeder = fluid.DataFeeder( + feed_list=[ + word, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2, predicate, mark, target + ], + place=place) + exe = fluid.Executor(place) + + t = fluid.DistributeTranspiler() + pserver_endpoints = os.getenv("PSERVERS") + # server endpoint for current node + current_endpoint = os.getenv("SERVER_ENDPOINT") + # run as trainer or parameter server + training_role = os.getenv( + "TRAINING_ROLE", "TRAINER") # get the training role: trainer/pserver + t.transpile( + optimize_ops, params_grads, pservers=pserver_endpoints, trainers=2) + + if training_role == "PSERVER": + if not current_endpoint: + print("need env SERVER_ENDPOINT") + exit(1) + pserver_prog = t.get_pserver_program(current_endpoint, optimize_ops) + exe.run(fluid.default_startup_program()) + exe.run(pserver_prog) + elif training_role == "TRAINER": + trainer_prog = t.get_trainer_program() + start_time = time.time() + batch_id = 0 + exe.run(fluid.default_startup_program()) + embedding_param = fluid.global_scope().find_var( + embedding_name).get_tensor() + embedding_param.set( + load_parameter(conll05.get_embedding(), word_dict_len, word_dim), + place) + for pass_id in xrange(PASS_NUM): + chunk_evaluator.reset(exe) + for data in train_data(): + cost, precision, recall, f1_score = exe.run( + trainer_prog, + feed=feeder.feed(data), + fetch_list=[avg_cost] + chunk_evaluator.metrics) + pass_precision, pass_recall, pass_f1_score = chunk_evaluator.eval( + exe) + + if batch_id % 10 == 0: + print("avg_cost:" + str(cost) + " precision:" + str( + precision) + " recall:" + str(recall) + " f1_score:" + + str(f1_score) + " pass_precision:" + str( + pass_precision) + " pass_recall:" + str( + pass_recall) + " pass_f1_score:" + str( + pass_f1_score)) + if batch_id != 0: + print("second per batch: " + str((time.time( + ) - start_time) / batch_id)) + + batch_id = batch_id + 1 + + +if __name__ == '__main__': + main() diff --git a/python/paddle/v2/fluid/tests/book_distribute/test_dist_word2vec.py b/python/paddle/v2/fluid/tests/book_distribute/notest_dist_word2vec.py similarity index 100% rename from python/paddle/v2/fluid/tests/book_distribute/test_dist_word2vec.py rename to python/paddle/v2/fluid/tests/book_distribute/notest_dist_word2vec.py diff --git a/python/paddle/v2/fluid/tests/book_distribute/notest_understand_sentiment_conv_dist.py b/python/paddle/v2/fluid/tests/book_distribute/notest_understand_sentiment_conv_dist.py new file mode 100644 index 0000000000000000000000000000000000000000..db419e23abcd06ca39011b1bef078b0cafb5100e --- /dev/null +++ b/python/paddle/v2/fluid/tests/book_distribute/notest_understand_sentiment_conv_dist.py @@ -0,0 +1,110 @@ +from __future__ import print_function +import os +import numpy as np +import paddle.v2 as paddle +import paddle.v2.fluid as fluid + + +def convolution_net(data, label, input_dim, class_dim=2, emb_dim=32, + hid_dim=32): + emb = fluid.layers.embedding(input=data, size=[input_dim, emb_dim]) + conv_3 = fluid.nets.sequence_conv_pool( + input=emb, + num_filters=hid_dim, + filter_size=3, + act="tanh", + pool_type="sqrt") + conv_4 = fluid.nets.sequence_conv_pool( + input=emb, + num_filters=hid_dim, + filter_size=4, + act="tanh", + pool_type="sqrt") + prediction = fluid.layers.fc(input=[conv_3, conv_4], + size=class_dim, + act="softmax") + cost = fluid.layers.cross_entropy(input=prediction, label=label) + avg_cost = fluid.layers.mean(x=cost) + adam_optimizer = fluid.optimizer.Adam(learning_rate=0.002) + optimize_ops, params_grads = adam_optimizer.minimize(avg_cost) + accuracy = fluid.evaluator.Accuracy(input=prediction, label=label) + return avg_cost, accuracy, accuracy.metrics[0], optimize_ops, params_grads + + +def to_lodtensor(data, place): + seq_lens = [len(seq) for seq in data] + cur_len = 0 + lod = [cur_len] + for l in seq_lens: + cur_len += l + lod.append(cur_len) + flattened_data = np.concatenate(data, axis=0).astype("int64") + flattened_data = flattened_data.reshape([len(flattened_data), 1]) + res = fluid.LoDTensor() + res.set(flattened_data, place) + res.set_lod([lod]) + return res + + +def main(): + BATCH_SIZE = 100 + PASS_NUM = 5 + + word_dict = paddle.dataset.imdb.word_dict() + dict_dim = len(word_dict) + class_dim = 2 + + data = fluid.layers.data( + name="words", shape=[1], dtype="int64", lod_level=1) + label = fluid.layers.data(name="label", shape=[1], dtype="int64") + cost, accuracy, acc_out, optimize_ops, params_grads = convolution_net( + data, label, input_dim=dict_dim, class_dim=class_dim) + + train_data = paddle.batch( + paddle.reader.shuffle( + paddle.dataset.imdb.train(word_dict), buf_size=1000), + batch_size=BATCH_SIZE) + place = fluid.CPUPlace() + exe = fluid.Executor(place) + + t = fluid.DistributeTranspiler() + + # all parameter server endpoints list for spliting parameters + pserver_endpoints = os.getenv("PSERVERS") + # server endpoint for current node + current_endpoint = os.getenv("SERVER_ENDPOINT") + # run as trainer or parameter server + training_role = os.getenv( + "TRAINING_ROLE", "TRAINER") # get the training role: trainer/pserver + t.transpile( + optimize_ops, params_grads, pservers=pserver_endpoints, trainers=2) + + exe.run(fluid.default_startup_program()) + + if training_role == "PSERVER": + if not current_endpoint: + print("need env SERVER_ENDPOINT") + exit(1) + pserver_prog = t.get_pserver_program(current_endpoint, optimize_ops) + exe.run(pserver_prog) + elif training_role == "TRAINER": + trainer_prog = t.get_trainer_program() + feeder = fluid.DataFeeder(feed_list=[data, label], place=place) + + for pass_id in xrange(PASS_NUM): + accuracy.reset(exe) + for data in train_data(): + cost_val, acc_val = exe.run(trainer_prog, + feed=feeder.feed(data), + fetch_list=[cost, acc_out]) + pass_acc = accuracy.eval(exe) + print("cost=" + str(cost_val) + " acc=" + str(acc_val) + + " pass_acc=" + str(pass_acc)) + if cost_val < 1.0 and pass_acc > 0.8: + exit(0) + else: + print("environment var TRAINER_ROLE should be TRAINER os PSERVER") + + +if __name__ == '__main__': + main() diff --git a/python/paddle/v2/fluid/tests/book_distribute/test_split_var.py b/python/paddle/v2/fluid/tests/book_distribute/test_split_var.py new file mode 100644 index 0000000000000000000000000000000000000000..cfb48a59154527160f622c12ae429bac31483631 --- /dev/null +++ b/python/paddle/v2/fluid/tests/book_distribute/test_split_var.py @@ -0,0 +1,39 @@ +import math +import unittest +from paddle.v2.fluid.distribute_transpiler import split_dense_variable +import paddle.v2.fluid as fluid +import paddle.v2.fluid.core as core +import random + + +class TestSplitVar(unittest.TestCase): + def test_check_output(self): + # split below shapes to 10 servers + shapes = [[3, 5], [1024], [28, 784], [8, 1020], [800, 10]] + expected_sizes = [ + [15], [1024], + [2352, 2352, 2352, 2352, 2352, 2352, 2352, 2352, 2352, 784], + [2040, 2040, 2040, 2040], + [1150, 1150, 1150, 1150, 1150, 1150, 1100] + ] + var_list = [] + program = fluid.Program() + for shape in shapes: + var = program.global_block().create_var( + name=str(random.randint(10000, 99999)), + persistable=True, + # dtype=core.VarDesc.VarType.LOD_TENSOR, + shape=shape) + var_list.append(var) + blocks = split_dense_variable(var_list, 10) + all_sizes = [] + for s in expected_sizes: + for s2 in s: + all_sizes.append(s2) + for i, block_str in enumerate(blocks): + varname, block_id, size = block_str.split(":") + self.assertEqual(int(size), all_sizes[i]) + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/v2/fluid/tests/op_test.py b/python/paddle/v2/fluid/tests/op_test.py index b77d2b1268f27c5ec3c34839aaad9b75f0132c2e..276cf2c5f2daa711f61158107f7d6539e676ef20 100644 --- a/python/paddle/v2/fluid/tests/op_test.py +++ b/python/paddle/v2/fluid/tests/op_test.py @@ -31,7 +31,8 @@ def create_op(scope, op_type, inputs, outputs, attrs): kwargs[in_name] = [] if in_dup: sub_in = inputs[in_name] - for sub_in_name, _ in sub_in: + for item in sub_in: + sub_in_name, _ = item[0], item[1] __create_var__(in_name, sub_in_name) else: __create_var__(in_name, in_name) @@ -41,7 +42,8 @@ def create_op(scope, op_type, inputs, outputs, attrs): kwargs[out_name] = [] if out_dup: sub_out = outputs[out_name] - for sub_out_name, _ in sub_out: + for item in sub_out: + sub_out_name, _ = item[0], item[1] __create_var__(out_name, sub_out_name) else: __create_var__(out_name, out_name) @@ -71,13 +73,15 @@ def set_input(scope, op, inputs, place): if in_name in inputs: if in_dup: sub_in = inputs[in_name] - for sub_in_name, sub_in_val in sub_in: + for item in sub_in: + sub_in_name, sub_in_val = item[0], item[1] __set_input__(sub_in_name, sub_in_val) else: __set_input__(in_name, inputs[in_name]) -def get_numeric_gradient(scope, +def get_numeric_gradient(place, + scope, op, inputs, input_to_check, @@ -85,7 +89,7 @@ def get_numeric_gradient(scope, delta=0.005, in_place=False): # FIXME: change this method by compile time concepts - set_input(scope, op, inputs, core.CPUPlace()) + set_input(scope, op, inputs, place) def product(dim): return reduce(lambda a, b: a * b, dim, 1) @@ -93,7 +97,7 @@ def get_numeric_gradient(scope, def get_output(): sum = [] for output_name in output_names: - op.run(scope, core.CPUPlace()) + op.run(scope, place) sum.append( np.array(scope.find_var(output_name).get_tensor()).mean()) return np.array(sum).mean() @@ -127,7 +131,7 @@ def get_numeric_gradient(scope, # we use a for loop to compute the gradient of every element. for i in xrange(tensor_size): if in_place: - set_input(scope, op, inputs, core.CPUPlace()) + set_input(scope, op, inputs, place) # get one input element throw it's index i. origin = __get_elem__(tensor_to_check, i) @@ -137,7 +141,7 @@ def get_numeric_gradient(scope, y_pos = get_output() if in_place: - set_input(scope, op, inputs, core.CPUPlace()) + set_input(scope, op, inputs, place) x_neg = origin - delta __set_elem__(tensor_to_check, i, x_neg) @@ -283,7 +287,8 @@ class OpTest(unittest.TestCase): if not isinstance(sub_out, list): raise AssertionError("sub_out type %s is not list", type(sub_out)) - for sub_out_name, expect in sub_out: + for item in sub_out: + sub_out_name, expect = item[0], item[1] idx = find_actual(sub_out_name, fetch_list) actual = outs[idx] actual_t = np.array(actual) @@ -347,6 +352,24 @@ class OpTest(unittest.TestCase): in_place=False, max_relative_error=0.005, user_defined_grads=None): + places = [core.CPUPlace()] + if core.is_compile_gpu() and core.op_support_gpu(self.op_type): + places.append(core.CUDAPlace(0)) + for place in places: + self.check_grad_with_place(place, inputs_to_check, output_names, + no_grad_set, numeric_grad_delta, + in_place, max_relative_error, + user_defined_grads) + + def check_grad_with_place(self, + place, + inputs_to_check, + output_names, + no_grad_set=None, + numeric_grad_delta=0.005, + in_place=False, + max_relative_error=0.005, + user_defined_grads=None): self.scope = core.Scope() op_inputs = self.inputs if hasattr(self, "inputs") else dict() op_outputs = self.outputs if hasattr(self, "outputs") else dict() @@ -362,6 +385,7 @@ class OpTest(unittest.TestCase): numeric_grads = user_defined_grads or [ get_numeric_gradient( + place, self.scope, self.op, self.inputs, @@ -370,22 +394,12 @@ class OpTest(unittest.TestCase): delta=numeric_grad_delta, in_place=in_place) for input_to_check in inputs_to_check ] - cpu_place = core.CPUPlace() - cpu_analytic_grads = self._get_gradient(inputs_to_check, cpu_place, - output_names, no_grad_set) - - self.__assert_is_close(numeric_grads, cpu_analytic_grads, - inputs_to_check, max_relative_error, - "Gradient Check On %s" % str(cpu_place)) - - if core.is_compile_gpu() and self.op.support_gpu(): - gpu_place = core.CUDAPlace(0) - gpu_analytic_grads = self._get_gradient(inputs_to_check, gpu_place, - output_names, no_grad_set) - - self.__assert_is_close(numeric_grads, gpu_analytic_grads, - inputs_to_check, max_relative_error, - "Gradient Check On %s" % str(gpu_place)) + analytic_grads = self._get_gradient(inputs_to_check, place, + output_names, no_grad_set) + + self.__assert_is_close(numeric_grads, analytic_grads, inputs_to_check, + max_relative_error, + "Gradient Check On %s" % str(place)) @staticmethod def _create_var_descs_(block, var_dict): diff --git a/python/paddle/v2/fluid/tests/test_assign_value_op.py b/python/paddle/v2/fluid/tests/test_assign_value_op.py new file mode 100644 index 0000000000000000000000000000000000000000..51b99d091825ab3edc2175202ae5d8a364a54378 --- /dev/null +++ b/python/paddle/v2/fluid/tests/test_assign_value_op.py @@ -0,0 +1,40 @@ +import paddle.v2.fluid as fluid +import paddle.v2.fluid.layers as layers +import op_test +import numpy +import unittest +import paddle.v2.fluid.framework as framework + + +class TestAssignValueOp(op_test.OpTest): + def setUp(self): + self.op_type = "assign_value" + x = numpy.random.random(size=(2, 5)).astype(numpy.float32) + self.inputs = {} + self.outputs = {'Out': x} + self.attrs = { + 'shape': x.shape, + 'dtype': framework.convert_np_dtype_to_dtype_(x.dtype), + 'fp32_values': [float(v) for v in x.flat] + } + + def test_forward(self): + self.check_output() + + def test_assign(self): + val = ( + -100 + 200 * numpy.random.random(size=(2, 5))).astype(numpy.int32) + x = layers.create_tensor(dtype="float32") + layers.assign(input=val, output=x) + exe = fluid.Executor(fluid.CPUPlace()) + fetched_x = exe.run(fluid.default_main_program(), + feed={}, + fetch_list=[x])[0] + self.assertTrue( + numpy.array_equal(fetched_x, val), + "fetch_x=%s val=%s" % (fetched_x, val)) + self.assertEqual(fetched_x.dtype, val.dtype) + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/v2/fluid/tests/test_clip.py b/python/paddle/v2/fluid/tests/test_clip.py new file mode 100644 index 0000000000000000000000000000000000000000..a71823f7e82d2f3007de3d17afa1770c688792d4 --- /dev/null +++ b/python/paddle/v2/fluid/tests/test_clip.py @@ -0,0 +1,67 @@ +from __future__ import print_function +import numpy as np +import paddle.v2 as paddle +import paddle.v2.fluid as fluid + +BATCH_SIZE = 128 +CLIP_MAX = 2e-6 +CLIP_MIN = -1e-6 + +prog = fluid.framework.Program() + +with fluid.program_guard(main_program=prog): + image = fluid.layers.data(name='x', shape=[784], dtype='float32') + + hidden1 = fluid.layers.fc(input=image, size=128, act='relu') + hidden2 = fluid.layers.fc(input=hidden1, size=64, act='relu') + predict = fluid.layers.fc(input=hidden2, size=10, act='softmax') + + label = fluid.layers.data(name='y', shape=[1], dtype='int64') + + cost = fluid.layers.cross_entropy(input=predict, label=label) + avg_cost = fluid.layers.mean(x=cost) + +prog_clip = prog.clone() +prog_clip.block(0).var(hidden1.name).set_error_clip( + fluid.clip.ErrorClipByValue( + max=CLIP_MAX, min=CLIP_MIN)) + +avg_cost_clip = prog_clip.block(0).var(avg_cost.name) +fluid.backward.append_backward(loss=avg_cost) +fluid.backward.append_backward( + loss=avg_cost_clip, callback=fluid.clip.error_clip_callback) + +hidden1_grad = prog.block(0).var(hidden1.name + "@GRAD") +hidden1_grad_clip = prog_clip.block(0).var(hidden1.name + "@GRAD") + +hidden2_grad = prog.block(0).var(hidden2.name + "@GRAD") +hidden2_grad_clip = prog_clip.block(0).var(hidden2.name + "@GRAD") + +train_reader = paddle.batch( + paddle.reader.shuffle( + paddle.dataset.mnist.train(), buf_size=8192), + batch_size=BATCH_SIZE) + +place = fluid.CPUPlace() +exe = fluid.Executor(place) +feeder = fluid.DataFeeder(feed_list=[image, label], place=place) +exe.run(fluid.default_startup_program()) + +count = 0 +for data in train_reader(): + count += 1 + if count > 5: + break + out1, out2 = exe.run(prog, + feed=feeder.feed(data), + fetch_list=[hidden1_grad, hidden2_grad]) + out1_clip, out2_clip = exe.run( + prog_clip, + feed=feeder.feed(data), + fetch_list=[hidden1_grad_clip, hidden2_grad_clip]) + if not ((out1.clip( + min=CLIP_MIN, max=CLIP_MAX) == out1_clip).all() and + (out2 == out2_clip).all()): + exit(1) + +exit(0) diff --git a/python/paddle/v2/fluid/tests/test_conv2d_op.py b/python/paddle/v2/fluid/tests/test_conv2d_op.py index 958300e655e012b91598360105ca2734c3bd2c37..e9a19d1774f843b94d3817d516880752fafd5628 100644 --- a/python/paddle/v2/fluid/tests/test_conv2d_op.py +++ b/python/paddle/v2/fluid/tests/test_conv2d_op.py @@ -49,7 +49,7 @@ def conv2d_forward_naive(input, filter, group, conv_param): class TestConv2dOp(OpTest): def setUp(self): - core.use_cuda() + self.use_cudnn = False self.init_op_type() self.init_group() self.init_dilation() @@ -70,30 +70,59 @@ class TestConv2dOp(OpTest): 'strides': self.stride, 'paddings': self.pad, 'groups': self.groups, - 'dilations': self.dilations + 'dilations': self.dilations, + 'use_cudnn': self.use_cudnn } self.outputs = {'Output': output} def test_check_output(self): - self.check_output() + if self.use_cudnn: + place = core.CUDAPlace(0) + self.check_output_with_place(place, atol=1e-5) + else: + self.check_output() def test_check_grad(self): - self.check_grad( - set(['Input', 'Filter']), 'Output', max_relative_error=0.02) + if self.use_cudnn: + place = core.CUDAPlace(0) + self.check_grad_with_place( + place, + set(['Input', 'Filter']), + 'Output', + max_relative_error=0.02) + else: + self.check_grad( + set(['Input', 'Filter']), 'Output', max_relative_error=0.02) def test_check_grad_no_filter(self): - self.check_grad( - ['Input'], - 'Output', - max_relative_error=0.02, - no_grad_set=set(['Filter'])) + if self.use_cudnn: + place = core.CUDAPlace(0) + self.check_grad_with_place( + place, ['Input'], + 'Output', + max_relative_error=0.02, + no_grad_set=set(['Filter'])) + else: + self.check_grad( + ['Input'], + 'Output', + max_relative_error=0.02, + no_grad_set=set(['Filter'])) def test_check_grad_no_input(self): - self.check_grad( - ['Filter'], - 'Output', - max_relative_error=0.02, - no_grad_set=set(['Input'])) + if self.use_cudnn: + place = core.CUDAPlace(0) + self.check_grad_with_place( + place, ['Filter'], + 'Output', + max_relative_error=0.02, + no_grad_set=set(['Input'])) + else: + self.check_grad( + ['Filter'], + 'Output', + max_relative_error=0.02, + no_grad_set=set(['Input'])) def init_test_case(self): self.pad = [0, 0] @@ -167,39 +196,39 @@ class TestWithDilation(TestConv2dOp): self.groups = 3 -#----------------Conv2dCudnn---------------- -class TestCudnn(TestConv2dOp): +#----------------Conv2dCUDNN---------------- +class TestCUDNN(TestConv2dOp): def init_op_type(self): - core.use_cudnn() - self.op_type = "conv2d_cudnn" + self.use_cudnn = True + self.op_type = "conv2d" -class TestCudnnWithPad(TestWithPad): +class TestCUDNNWithPad(TestWithPad): def init_op_type(self): - core.use_cudnn() - self.op_type = "conv2d_cudnn" + self.use_cudnn = True + self.op_type = "conv2d" -class TestCudnnWithStride(TestWithStride): +class TestCUDNNWithStride(TestWithStride): def init_op_type(self): - core.use_cudnn() - self.op_type = "conv2d_cudnn" + self.use_cudnn = True + self.op_type = "conv2d" -class TestCudnnWithGroup(TestWithGroup): +class TestCUDNNWithGroup(TestWithGroup): def init_op_type(self): - core.use_cudnn() - self.op_type = "conv2d_cudnn" + self.use_cudnn = True + self.op_type = "conv2d" -class TestCudnnWith1x1(TestWith1x1): +class TestCUDNNWith1x1(TestWith1x1): def init_op_type(self): - core.use_cudnn() - self.op_type = "conv2d_cudnn" + self.use_cudnn = True + self.op_type = "conv2d" # cudnn v5 does not support dilation conv. -# class TestCudnnWithDilation(TestWithDilation): +# class TestCUDNNWithDilation(TestWithDilation): # def init_op_type(self): # self.op_type = "conv_cudnn" diff --git a/python/paddle/v2/fluid/tests/test_conv2d_transpose_op.py b/python/paddle/v2/fluid/tests/test_conv2d_transpose_op.py index d59537b924d57d40f7d740d99eb814c95f528e5f..4aec32fc6e7540e3e3c788bbdc20abed147cbc93 100644 --- a/python/paddle/v2/fluid/tests/test_conv2d_transpose_op.py +++ b/python/paddle/v2/fluid/tests/test_conv2d_transpose_op.py @@ -1,5 +1,7 @@ import unittest import numpy as np + +import paddle.v2.fluid.core as core from op_test import OpTest @@ -37,6 +39,7 @@ def conv2dtranspose_forward_naive(input_, filter_, attrs): class TestConv2dTransposeOp(OpTest): def setUp(self): # init as conv transpose + self.use_cudnn = False self.init_op_type() self.init_test_case() @@ -47,7 +50,9 @@ class TestConv2dTransposeOp(OpTest): self.attrs = { 'strides': self.stride, 'paddings': self.pad, - 'dilations': self.dilations + 'dilations': self.dilations, + 'use_cudnn': self.use_cudnn, + 'data_format': 'AnyLayout' # TODO(dzhwinter) : should be fix latter } output = conv2dtranspose_forward_naive(input_, filter_, @@ -56,25 +61,53 @@ class TestConv2dTransposeOp(OpTest): self.outputs = {'Output': output} def test_check_output(self): - self.check_output() + if self.use_cudnn: + place = core.CUDAPlace(0) + self.check_output_with_place(place, atol=1e-5) + else: + self.check_output() def test_check_grad_no_input(self): - self.check_grad( - ['Filter'], - 'Output', - max_relative_error=0.02, - no_grad_set=set(['Input'])) + if self.use_cudnn: + place = core.CUDAPlace(0) + self.check_grad_with_place( + place, ['Filter'], + 'Output', + max_relative_error=0.02, + no_grad_set=set(['Input'])) + else: + self.check_grad( + ['Filter'], + 'Output', + max_relative_error=0.02, + no_grad_set=set(['Input'])) def test_check_grad_no_filter(self): - self.check_grad( - ['Input'], - 'Output', - max_relative_error=0.02, - no_grad_set=set(['Filter'])) + if self.use_cudnn: + place = core.CUDAPlace(0) + self.check_grad_with_place( + place, ['Input'], + 'Output', + max_relative_error=0.02, + no_grad_set=set(['Filter'])) + else: + self.check_grad( + ['Input'], + 'Output', + max_relative_error=0.02, + no_grad_set=set(['Filter'])) def test_check_grad(self): - self.check_grad( - set(['Input', 'Filter']), 'Output', max_relative_error=0.02) + if self.use_cudnn: + place = core.CUDAPlace(0) + self.check_grad_with_place( + place, + set(['Input', 'Filter']), + 'Output', + max_relative_error=0.02) + else: + self.check_grad( + set(['Input', 'Filter']), 'Output', max_relative_error=0.02) def init_test_case(self): self.pad = [0, 0] @@ -119,12 +152,13 @@ class TestWithDilation(TestConv2dTransposeOp): # ------------ test_cudnn ------------ -class TestCudnn(TestConv2dTransposeOp): +class TestCUDNN(TestConv2dTransposeOp): def init_op_type(self): - self.op_type = "conv2d_transpose_cudnn" + self.use_cudnn = True + self.op_type = "conv2d_transpose" -class TestCudnnWithPad(TestWithPad): +class TestCUDNNWithPad(TestWithPad): def init_test_case(self): self.pad = [1, 1] self.stride = [1, 1] @@ -134,10 +168,11 @@ class TestCudnnWithPad(TestWithPad): self.filter_size = [f_c, 6, 3, 3] def init_op_type(self): - self.op_type = "conv2d_transpose_cudnn" + self.use_cudnn = True + self.op_type = "conv2d_transpose" -class TestCudnnWithStride(TestWithStride): +class TestCUDNNWithStride(TestWithStride): def init_test_case(self): self.pad = [1, 1] self.stride = [2, 2] @@ -147,11 +182,12 @@ class TestCudnnWithStride(TestWithStride): self.filter_size = [f_c, 6, 3, 3] def init_op_type(self): - self.op_type = "conv2d_transpose_cudnn" + self.use_cudnn = True + self.op_type = "conv2d_transpose" # #cudnn v5 does not support dilation conv. -# class TestCudnnWithDilation(TestWithDilation): +# class TestCUDNNWithDilation(TestWithDilation): # def init_test_case(self): # self.pad = [1, 1] # self.stride = [2, 2] @@ -161,7 +197,7 @@ class TestCudnnWithStride(TestWithStride): # self.filter_size = [f_c, 6, 3, 3] # # def init_op_type(self): -# self.op_type = "conv2d_transpose_cudnn" +# self.op_type = "conv2d_transpose" if __name__ == '__main__': unittest.main() diff --git a/python/paddle/v2/fluid/tests/test_conv3d_op.py b/python/paddle/v2/fluid/tests/test_conv3d_op.py index 8593dff20b5c283d5862206dfb0c0d2501039d07..df911e1a2f04501936fc332c7b4b829af248116e 100644 --- a/python/paddle/v2/fluid/tests/test_conv3d_op.py +++ b/python/paddle/v2/fluid/tests/test_conv3d_op.py @@ -1,5 +1,7 @@ import unittest import numpy as np + +import paddle.v2.fluid.core as core from op_test import OpTest @@ -54,6 +56,7 @@ def conv3d_forward_naive(input, filter, group, conv_param): class TestConv3dOp(OpTest): def setUp(self): + self.use_cudnn = False self.init_group() self.init_op_type() self.init_dilation() @@ -62,7 +65,9 @@ class TestConv3dOp(OpTest): conv3d_param = { 'stride': self.stride, 'pad': self.pad, - 'dilations': self.dilations + 'dilations': self.dilations, + 'use_cudnn': self.use_cudnn, + 'data_format': 'AnyLayout' # TODO(dzhwinter) : should be fix latter } input = np.random.random(self.input_size).astype("float32") filter = np.random.random(self.filter_size).astype("float32") @@ -79,25 +84,53 @@ class TestConv3dOp(OpTest): self.outputs = {'Output': output} def test_check_output(self): - self.check_output() + if self.use_cudnn: + place = core.CUDAPlace(0) + self.check_output_with_place(place, atol=1e-5) + else: + self.check_output() def test_check_grad(self): - self.check_grad( - set(['Input', 'Filter']), 'Output', max_relative_error=0.03) + if self.use_cudnn: + place = core.CUDAPlace(0) + self.check_grad_with_place( + place, + set(['Input', 'Filter']), + 'Output', + max_relative_error=0.03) + else: + self.check_grad( + set(['Input', 'Filter']), 'Output', max_relative_error=0.03) def test_check_grad_no_filter(self): - self.check_grad( - ['Input'], - 'Output', - max_relative_error=0.03, - no_grad_set=set(['Filter'])) + if self.use_cudnn: + place = core.CUDAPlace(0) + self.check_grad_with_place( + place, ['Input'], + 'Output', + max_relative_error=0.03, + no_grad_set=set(['Filter'])) + else: + self.check_grad( + ['Input'], + 'Output', + max_relative_error=0.03, + no_grad_set=set(['Filter'])) def test_check_grad_no_input(self): - self.check_grad( - ['Filter'], - 'Output', - max_relative_error=0.03, - no_grad_set=set(['Input'])) + if self.use_cudnn: + place = core.CUDAPlace(0) + self.check_grad_with_place( + place, ['Filter'], + 'Output', + max_relative_error=0.03, + no_grad_set=set(['Input'])) + else: + self.check_grad( + ['Filter'], + 'Output', + max_relative_error=0.03, + no_grad_set=set(['Input'])) def init_test_case(self): self.pad = [0, 0, 0] @@ -169,31 +202,35 @@ class TestWithDilation(TestConv3dOp): self.groups = 3 -class TestCudnn(TestConv3dOp): +class TestCUDNN(TestConv3dOp): def init_op_type(self): - self.op_type = "conv3d_cudnn" + self.use_cudnn = True + self.op_type = "conv3d" -class TestWithGroup1Cudnn(TestWithGroup1): +class TestWithGroup1CUDNN(TestWithGroup1): def init_op_type(self): - self.op_type = "conv3d_cudnn" + self.use_cudnn = True + self.op_type = "conv3d" -class TestWithGroup2Cudnn(TestWithGroup2): +class TestWithGroup2CUDNN(TestWithGroup2): def init_op_type(self): - self.op_type = "conv3d_cudnn" + self.use_cudnn = True + self.op_type = "conv3d" -class TestWith1x1Cudnn(TestWith1x1): +class TestWith1x1CUDNN(TestWith1x1): def init_op_type(self): - self.op_type = "conv3d_cudnn" + self.use_cudnn = True + self.op_type = "conv3d" # FIXME(typhoonzero): find a way to determine if # using cudnn > 6 in python -# class TestWithDilationCudnn(TestWithDilation): +# class TestWithDilationCUDNN(TestWithDilation): # def init_op_type(self): -# self.op_type = "conv3d_cudnn" +# self.op_type = "conv3d" if __name__ == '__main__': unittest.main() diff --git a/python/paddle/v2/fluid/tests/test_conv3d_transpose_op.py b/python/paddle/v2/fluid/tests/test_conv3d_transpose_op.py index a353f9b4d40233de46237005138f21430f4d865a..a42a9c4f33ffd5a8ee267fa910ef763301453a03 100644 --- a/python/paddle/v2/fluid/tests/test_conv3d_transpose_op.py +++ b/python/paddle/v2/fluid/tests/test_conv3d_transpose_op.py @@ -1,5 +1,7 @@ import unittest import numpy as np + +import paddle.v2.fluid.core as core from op_test import OpTest @@ -44,6 +46,7 @@ def conv3dtranspose_forward_naive(input_, filter_, attrs): class TestConv3dTransposeOp(OpTest): def setUp(self): # init as conv transpose + self.use_cudnn = False self.init_op_type() self.init_test_case() @@ -54,7 +57,9 @@ class TestConv3dTransposeOp(OpTest): self.attrs = { 'strides': self.stride, 'paddings': self.pad, - 'dilations': self.dilations + 'dilations': self.dilations, + 'use_cudnn': self.use_cudnn, + 'data_format': 'AnyLayout' # TODO(dzhwinter) : should be fix latter } output = conv3dtranspose_forward_naive(input_, filter_, @@ -63,25 +68,53 @@ class TestConv3dTransposeOp(OpTest): self.outputs = {'Output': output} def test_check_output(self): - self.check_output() + if self.use_cudnn: + place = core.CUDAPlace(0) + self.check_output_with_place(place, atol=1e-5) + else: + self.check_output() def test_check_grad(self): - self.check_grad( - set(['Input', 'Filter']), 'Output', max_relative_error=0.02) + if self.use_cudnn: + place = core.CUDAPlace(0) + self.check_grad_with_place( + place, + set(['Input', 'Filter']), + 'Output', + max_relative_error=0.03) + else: + self.check_grad( + set(['Input', 'Filter']), 'Output', max_relative_error=0.03) def test_check_grad_no_filter(self): - self.check_grad( - ['Input'], - 'Output', - max_relative_error=0.02, - no_grad_set=set(['Filter'])) + if self.use_cudnn: + place = core.CUDAPlace(0) + self.check_grad_with_place( + place, ['Input'], + 'Output', + max_relative_error=0.03, + no_grad_set=set(['Filter'])) + else: + self.check_grad( + ['Input'], + 'Output', + max_relative_error=0.03, + no_grad_set=set(['Filter'])) def test_check_grad_no_input(self): - self.check_grad( - ['Filter'], - 'Output', - max_relative_error=0.02, - no_grad_set=set(['Input'])) + if self.use_cudnn: + place = core.CUDAPlace(0) + self.check_grad_with_place( + place, ['Filter'], + 'Output', + max_relative_error=0.03, + no_grad_set=set(['Input'])) + else: + self.check_grad( + ['Filter'], + 'Output', + max_relative_error=0.03, + no_grad_set=set(['Input'])) def init_test_case(self): self.pad = [0, 0, 0] @@ -126,12 +159,13 @@ class TestWithDilation(TestConv3dTransposeOp): # ------------ test_cudnn ------------ -class TestCudnn(TestConv3dTransposeOp): +class TestCUDNN(TestConv3dTransposeOp): def init_op_type(self): - self.op_type = "conv3d_transpose_cudnn" + self.use_cudnn = True + self.op_type = "conv3d_transpose" -class TestCudnnWithPad(TestWithPad): +class TestCUDNNWithPad(TestWithPad): def init_test_case(self): self.pad = [1, 1, 1] self.stride = [1, 1, 1] @@ -141,10 +175,11 @@ class TestCudnnWithPad(TestWithPad): self.filter_size = [f_c, 6, 3, 3, 3] def init_op_type(self): - self.op_type = "conv3d_transpose_cudnn" + self.use_cudnn = True + self.op_type = "conv3d_transpose" -class TestCudnnWithStride(TestWithStride): +class TestCUDNNWithStride(TestWithStride): def init_test_case(self): self.pad = [1, 1, 1] self.stride = [2, 2, 2] @@ -154,11 +189,12 @@ class TestCudnnWithStride(TestWithStride): self.filter_size = [f_c, 6, 3, 3, 3] def init_op_type(self): - self.op_type = "conv3d_transpose_cudnn" + self.use_cudnn = True + self.op_type = "conv3d_transpose" # #cudnn v5 does not support dilation conv. -# class TestCudnnWithDilation(TestWithDilation): +# class TestCUDNNWithDilation(TestWithDilation): # def init_test_case(self): # self.pad = [1, 1, 1] # self.stride = [2, 2, 2] @@ -168,7 +204,7 @@ class TestCudnnWithStride(TestWithStride): # self.filter_size = [f_c, 6, 3, 3, 3] # # def init_op_type(self): -# self.op_type = "conv3d_transpose_cudnn" +# self.op_type = "conv3d_transpose" if __name__ == '__main__': unittest.main() diff --git a/python/paddle/v2/fluid/tests/test_parallel_op.py b/python/paddle/v2/fluid/tests/test_parallel_op.py index 59ed041e7fa1dd68c0f8d610f2575886442d1b4d..6c4c39ad59c0ec490c7c3b469e9fa219b28735ba 100644 --- a/python/paddle/v2/fluid/tests/test_parallel_op.py +++ b/python/paddle/v2/fluid/tests/test_parallel_op.py @@ -1,45 +1,160 @@ import unittest -import paddle.v2.fluid.layers as layers import paddle.v2.fluid as fluid -from paddle.v2.fluid.framework import Program -from paddle.v2.fluid.executor import Executor -from paddle.v2.fluid.backward import append_backward -import numpy as np -import paddle.v2.fluid.core as core - - -class ParallelOpTest(unittest.TestCase): - def setUp(self): - x = layers.data( - shape=[-1, 30, 40], - dtype='float32', - name='x', - append_batch_size=False, - stop_gradient=False) - - places = layers.get_places(device_count=4) - pd = layers.ParallelDo(places=places) - - with pd.do(): - data = pd.read_input(x) - hidden = layers.fc(input=data, size=7) - pd.write_output(hidden) - data = pd() - loss = layers.mean(x=data) - sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.001) - sgd_optimizer.minimize(loss) - - exe = fluid.Executor(fluid.CPUPlace()) - exe.run(fluid.default_startup_program()) - exe.run(fluid.default_main_program(), - feed={ - x.name: np.random.uniform(0.1, 0.6, - (20, 30, 40)).astype("float32") - }) - - def test_forward(self): - pass +import numpy +import sys +# TODO(dzhwinter): get places op check need to be enhanced. +sys.exit(0) + + +class BaseParallelForTest(unittest.TestCase): + def run_test(self, callback, feed, fetch): + """ + Run the unittest for parallel.for + Args: + callback(callable): A callable function returns a generator. There + are two yields in the generator function. The first yield + returns the data layers, and the second yield returns the loss. + The modified data variables will be sent back during the first + yield. + + feed(dict): The executor feeding dictionary. + fetch(list|basestr): The fetch name lists. + + Returns: + None + + Raises: + AssertionError when the computation of cpu, parallel.for in cpu, + gpu, parallel.for in gpu are different. + + """ + cpu = fluid.CPUPlace() + result_cpu = self._run_test_impl_( + callback=callback, + feed=feed, + fetch=fetch, + place=cpu, + use_parallel=False) + result_cpu_parallel = self._run_test_impl_( + callback=callback, + feed=feed, + fetch=fetch, + place=cpu, + use_parallel=True) + if fluid.core.is_compile_gpu(): + gpu = fluid.CUDAPlace(0) + result_gpu = self._run_test_impl_( + callback=callback, + feed=feed, + fetch=fetch, + place=gpu, + use_parallel=False) + result_gpu_parallel = self._run_test_impl_( + callback=callback, + feed=feed, + fetch=fetch, + place=gpu, + use_parallel=True) + self._assert_same_(fetch, result_cpu, result_cpu_parallel, + result_gpu, result_gpu_parallel) + else: + self._assert_same_(fetch, result_cpu, result_cpu_parallel) + + def _run_test_impl_(self, callback, feed, fetch, place, use_parallel=False): + """ + Run a single test, returns the fetch values + Args: + place(Place): the computation place. + use_parallel(bool): Whether use parallel.for or not. + + Returns: + Fetched numpy arrays. + + """ + if isinstance(fetch, basestring): + fetch = [fetch] + main = fluid.Program() + startup = fluid.Program() + # Fix seed + main.random_seed = 10 + startup.random_seed = 10 + + with fluid.program_guard(main, startup): + generator = callback() + # Automatically insert parallel do if use_parallel = True + if use_parallel: + places = fluid.layers.get_places() + pd = fluid.layers.ParallelDo(places) + data = next(generator) + + if isinstance(data, fluid.Variable): + data = [data] + + with pd.do(): + ins = map(pd.read_input, data) + if len(ins) == 1: + ins = ins[0] + loss = generator.send(ins) # patch input + pd.write_output(loss) + + loss = pd() + else: + data = next(generator) + loss = generator.send(data) + self.assertIsNotNone(loss) + avg_loss = fluid.layers.mean(x=loss) + fluid.backward.append_backward(loss=avg_loss) + + exe = fluid.Executor(place) + exe.run(startup) + return exe.run(main, feed=feed, fetch_list=fetch) + + def _assert_same_(self, fetch, *args): + """ + Assert the return values of `run_test` are same. + Args: + fetch: Fetch list. Used for print error message + *args: The fetch result lists of each situations. + + Returns: + None + + Raises: + AssertionError + + """ + + def _impl_(a, b, fetch_id, item_id): + item_str = ['CPU', 'ParallelCPU', 'GPU', 'ParallelGPU'] + flag = numpy.allclose(a, b, rtol=0.1) + self.assertTrue(flag, "The {0} are different in {1}".format( + fetch[fetch_id], item_str[item_id])) + + for i, items in enumerate(zip(*args)): + self.assertGreater(len(items), 0) + for j in range(1, len(items)): + _impl_(items[0], items[j], fetch_id=i, item_id=j) + + +class ParallelOpTest(BaseParallelForTest): + def test_simple_fc(self): + def __network__(): + x = fluid.layers.data(shape=[784], dtype='float32', name='img') + # FIXME: This is a bug of parallel.do + x.stop_gradient = False + x = yield x + hidden = fluid.layers.fc(input=x, size=200, param_attr='fc1.w') + loss = fluid.layers.mean(x=hidden) + yield loss + + self.run_test( + callback=__network__, + feed={ + 'img': + numpy.random.random(size=(128 * 3, 784)).astype('float32') + }, + fetch='fc1.w@GRAD') if __name__ == '__main__': diff --git a/python/paddle/v2/fluid/tests/test_pool2d_op.py b/python/paddle/v2/fluid/tests/test_pool2d_op.py index 5dff6270f455395ce6ca8ae2428236f630467095..71accc3f65bb2d65ad7e7c83eb15242f0e1c8aa4 100644 --- a/python/paddle/v2/fluid/tests/test_pool2d_op.py +++ b/python/paddle/v2/fluid/tests/test_pool2d_op.py @@ -1,5 +1,7 @@ import unittest import numpy as np + +import paddle.v2.fluid.core as core from op_test import OpTest @@ -44,6 +46,7 @@ def avg_pool2D_forward_naive(x, ksize, strides, paddings, global_pool=0): class TestPool2d_Op(OpTest): def setUp(self): + self.use_cudnn = False self.init_test_case() self.init_global_pool() self.init_op_type() @@ -62,15 +65,25 @@ class TestPool2d_Op(OpTest): 'ksize': self.ksize, 'pooling_type': self.pool_type, 'global_pooling': self.global_pool, + 'use_cudnn': self.use_cudnn, + 'data_format': 'AnyLayout' # TODO(dzhwinter) : should be fix latter } self.outputs = {'Out': output.astype('float32')} def test_check_output(self): - self.check_output() + if self.use_cudnn: + place = core.CUDAPlace(0) + self.check_output_with_place(place, atol=1e-5) + else: + self.check_output() def test_check_grad(self): - if self.pool_type != "max": + if self.use_cudnn and self.pool_type != "max": + place = core.CUDAPlace(0) + self.check_grad_with_place( + place, set(['X']), 'Out', max_relative_error=0.07) + elif self.pool_type != "max": self.check_grad(set(['X']), 'Out', max_relative_error=0.07) def init_test_case(self): @@ -153,35 +166,41 @@ class TestCase5(TestCase2): self.pool2D_forward_naive = max_pool2D_forward_naive -#--------------------test pool2d_cudnn-------------------- -class TestCudnnCase1(TestPool2d_Op): +#--------------------test pool2d-------------------- +class TestCUDNNCase1(TestPool2d_Op): def init_op_type(self): - self.op_type = "pool2d_cudnn" + self.use_cudnn = True + self.op_type = "pool2d" -class TestCudnnCase2(TestCase1): +class TestCUDNNCase2(TestCase1): def init_op_type(self): - self.op_type = "pool2d_cudnn" + self.use_cudnn = True + self.op_type = "pool2d" -class TestCudnnCase3(TestCase2): +class TestCUDNNCase3(TestCase2): def init_op_type(self): - self.op_type = "pool2d_cudnn" + self.use_cudnn = True + self.op_type = "pool2d" -class TestCudnnCase4(TestCase3): +class TestCUDNNCase4(TestCase3): def init_op_type(self): - self.op_type = "pool2d_cudnn" + self.use_cudnn = True + self.op_type = "pool2d" -class TestCudnnCase5(TestCase4): +class TestCUDNNCase5(TestCase4): def init_op_type(self): - self.op_type = "pool2d_cudnn" + self.use_cudnn = True + self.op_type = "pool2d" -class TestCudnnCase6(TestCase5): +class TestCUDNNCase6(TestCase5): def init_op_type(self): - self.op_type = "pool2d_cudnn" + self.use_cudnn = True + self.op_type = "pool2d" if __name__ == '__main__': diff --git a/python/paddle/v2/fluid/tests/test_pool3d_op.py b/python/paddle/v2/fluid/tests/test_pool3d_op.py index 2ba86665a7d207e61159c02643fa40daca3be080..8f410862aff5af633968d4c3c919563c874cc200 100644 --- a/python/paddle/v2/fluid/tests/test_pool3d_op.py +++ b/python/paddle/v2/fluid/tests/test_pool3d_op.py @@ -1,5 +1,7 @@ import unittest import numpy as np + +import paddle.v2.fluid.core as core from op_test import OpTest @@ -52,6 +54,7 @@ def avg_pool3D_forward_naive(x, ksize, strides, paddings, global_pool=0): class TestPool3d_Op(OpTest): def setUp(self): + self.use_cudnn = False self.init_test_case() self.init_global_pool() self.init_op_type() @@ -71,15 +74,25 @@ class TestPool3d_Op(OpTest): 'ksize': self.ksize, 'pooling_type': self.pool_type, 'global_pooling': self.global_pool, + 'use_cudnn': self.use_cudnn, + 'data_format': 'AnyLayout' # TODO(dzhwinter) : should be fix latter } self.outputs = {'Out': output.astype('float32')} def test_check_output(self): - self.check_output() + if self.use_cudnn: + place = core.CUDAPlace(0) + self.check_output_with_place(place, atol=1e-5) + else: + self.check_output() def test_check_grad(self): - if self.pool_type != "max": + if self.use_cudnn and self.pool_type != "max": + place = core.CUDAPlace(0) + self.check_grad_with_place( + place, set(['X']), 'Out', max_relative_error=0.07) + elif self.pool_type != "max": self.check_grad(set(['X']), 'Out', max_relative_error=0.07) def init_test_case(self): @@ -163,35 +176,41 @@ class TestCase5(TestCase2): self.pool3D_forward_naive = max_pool3D_forward_naive -#--------------------test pool3d_cudnn-------------------- -class TestCudnnCase1(TestPool3d_Op): +#--------------------test pool3d-------------------- +class TestCUDNNCase1(TestPool3d_Op): def init_op_type(self): - self.op_type = "pool3d_cudnn" + self.use_cudnn = True + self.op_type = "pool3d" -class TestCudnnCase2(TestCase1): +class TestCUDNNCase2(TestCase1): def init_op_type(self): - self.op_type = "pool3d_cudnn" + self.use_cudnn = True + self.op_type = "pool3d" -class TestCudnnCase3(TestCase2): +class TestCUDNNCase3(TestCase2): def init_op_type(self): - self.op_type = "pool3d_cudnn" + self.use_cudnn = True + self.op_type = "pool3d" -class TestCudnnCase4(TestCase3): +class TestCUDNNCase4(TestCase3): def init_op_type(self): - self.op_type = "pool3d_cudnn" + self.use_cudnn = True + self.op_type = "pool3d" -class TestCudnnCase5(TestCase4): +class TestCUDNNCase5(TestCase4): def init_op_type(self): - self.op_type = "pool3d_cudnn" + self.use_cudnn = True + self.op_type = "pool3d" -class TestCudnnCase6(TestCase5): +class TestCUDNNCase6(TestCase5): def init_op_type(self): - self.op_type = "pool3d_cudnn" + self.use_cudnn = True + self.op_type = "pool3d" if __name__ == '__main__': diff --git a/python/paddle/v2/fluid/tests/test_print_op.py b/python/paddle/v2/fluid/tests/test_print_op.py new file mode 100644 index 0000000000000000000000000000000000000000..1550d0af5edd087951a3c9ec8da862a0f5d5e1b1 --- /dev/null +++ b/python/paddle/v2/fluid/tests/test_print_op.py @@ -0,0 +1,55 @@ +import unittest +import paddle.v2.fluid.core as core +from paddle.v2.fluid.executor import Executor +import paddle.v2.fluid.layers as layers +from paddle.v2.fluid.backward import append_backward +from paddle.v2.fluid.framework import switch_main_program +from paddle.v2.fluid.framework import Program +import numpy as np + + +class TestPrintOpCPU(unittest.TestCase): + def setUp(self): + self.place = core.CPUPlace() + self.x_tensor = core.LoDTensor() + tensor_np = np.random.random(size=(2, 3)).astype('float32') + self.x_tensor.set(tensor_np, self.place) + self.x_tensor.set_lod([[0, 1, 1]]) + + def build_network(self, only_forward, **kargs): + x = layers.data('x', shape=[3], dtype='float32', lod_level=1) + x.stop_gradient = False + printed = layers.Print(input=x, **kargs) + if only_forward: return printed + loss = layers.mean(x=printed) + append_backward(loss=loss) + return loss + + def test_forward(self): + switch_main_program(Program()) + printed = self.build_network(True, print_phase='forward') + exe = Executor(self.place) + outs = exe.run(feed={'x': self.x_tensor}, + fetch_list=[printed], + return_numpy=False) + + def test_backward(self): + switch_main_program(Program()) + loss = self.build_network(False, print_phase='backward') + exe = Executor(self.place) + outs = exe.run(feed={'x': self.x_tensor}, + fetch_list=[loss], + return_numpy=False) + + +class TestPrintOpGPU(TestPrintOpCPU): + def setUp(self): + self.place = core.CUDAPlace(0) + self.x_tensor = core.LoDTensor() + tensor_np = np.random.random(size=(2, 3)).astype('float32') + self.x_tensor.set(tensor_np, self.place) + self.x_tensor.set_lod([[0, 1, 1]]) + + +if __name__ == '__main__': + unittest.main() diff --git a/tools/manylinux1/Dockerfile.android b/tools/manylinux1/Dockerfile.android new file mode 100644 index 0000000000000000000000000000000000000000..b6cae228a0c45ab70ba8ecc80ae4df7e0fa5bdbc --- /dev/null +++ b/tools/manylinux1/Dockerfile.android @@ -0,0 +1,55 @@ +FROM ubuntu:16.04 +MAINTAINER PaddlePaddle Authors + +ARG UBUNTU_MIRROR +RUN /bin/bash -c 'if [[ -n ${UBUNTU_MIRROR} ]]; then sed -i 's#http://archive.ubuntu.com/ubuntu#${UBUNTU_MIRROR}#g' /etc/apt/sources.list; fi' + +# ENV variables +ARG ANDROID_ABI +ARG ANDROID_API + +ENV ANDROID_ABI=${ANDROID_ABI:-"armeabi-v7a"} +ENV ANDROID_API=${ANDROID_API:-21} + +ENV HOME=/root \ + ANDROID_NDK_HOME=/opt/android-ndk-linux \ + ANDROID_TOOLCHAINS_DIR=/opt/toolchains + +RUN apt-get update && \ + apt-get install -y \ + git python-dev python-pip python-numpy \ + wget curl tar unzip gcc g++ locales clang-format-3.8 swig cmake && \ + apt-get clean -y + +# Install Go and glide +RUN wget -qO- go.tgz https://storage.googleapis.com/golang/go1.8.1.linux-amd64.tar.gz | \ + tar -xz -C /usr/local && \ + mkdir /root/gopath && \ + mkdir /root/gopath/bin && \ + mkdir /root/gopath/src +ENV GOROOT=/usr/local/go GOPATH=/root/gopath +# should not be in the same line with GOROOT definition, otherwise docker build could not find GOROOT. +ENV PATH=${PATH}:${GOROOT}/bin:${GOPATH}/bin + +# git credential to skip password typing +RUN git config --global credential.helper store + +# Fix locales to en_US.UTF-8 +RUN localedef -i en_US -f UTF-8 en_US.UTF-8 + +RUN pip install --upgrade pip && \ + pip install -U 'protobuf==3.1.0' && \ + pip install -U wheel sphinx && \ + pip install pre-commit + +# Android NDK +RUN mkdir -p ${ANDROID_TOOLCHAINS_DIR} && \ + mkdir -p /opt/android-ndk-tmp && \ + cd /opt/android-ndk-tmp && \ + wget -q https://dl.google.com/android/repository/android-ndk-r14b-linux-x86_64.zip && \ + unzip -q android-ndk-r14b-linux-x86_64.zip && \ + mv android-ndk-r14b ${ANDROID_NDK_HOME} && \ + rm -rf /opt/android-ndk-tmp + +CMD ["bash", "/paddle/paddle/scripts/docker/build_android.sh"] + diff --git a/tools/manylinux1/Dockerfile.x64 b/tools/manylinux1/Dockerfile.x64 new file mode 100644 index 0000000000000000000000000000000000000000..2c6ba650a5d7996bef212e88a16f2a159ca377e7 --- /dev/null +++ b/tools/manylinux1/Dockerfile.x64 @@ -0,0 +1,54 @@ +# NOTE The manylinux1 policy mandates CentOS-5. We replace it with CentOS-6 in +# order to satisfy the build of capnproto library (a nupic.core dependency), +# which requires some headers and symbols not present on CentOS-5 (e.g., +# signalfd.h, pipe2, O_NONBLOCK, SOCK_NONBLOCK, etc.). See +# https://github.com/sandstorm-io/capnproto/issues/350. +FROM nvidia/cuda: +MAINTAINER Numenta, based on the ManyLinux project + +ENV LC_ALL en_US.UTF-8 +ENV LANG en_US.UTF-8 +ENV LANGUAGE en_US.UTF-8 +ENV PATH /opt/rh/devtoolset-2/root/usr/bin:$PATH +ENV LD_LIBRARY_PATH /opt/rh/devtoolset-2/root/usr/lib64:/opt/rh/devtoolset-2/root/usr/lib:/usr/local/lib64:/usr/local/lib:${LD_LIBRARY_PATH} +ENV PKG_CONFIG_PATH=/usr/local/lib/pkgconfig + +COPY build_scripts /build_scripts +RUN bash build_scripts/build.sh && rm -r build_scripts + +ENV SSL_CERT_FILE=/opt/_internal/certs.pem + +# for paddle +RUN wget --no-check-certificate -qO- https://storage.googleapis.com/golang/go1.8.1.linux-amd64.tar.gz | \ + tar -xz -C /usr/local && \ + mkdir /root/gopath && \ + mkdir /root/gopath/bin && \ + mkdir /root/gopath/src + + +ENV GOROOT=/usr/local/go GOPATH=/root/gopath +ENV PATH=${GOROOT}/bin:${GOPATH}/bin:${PATH} + +# protobuf 3.1.0 +RUN cd /opt && wget -q --no-check-certificate https://github.com/google/protobuf/releases/download/v3.1.0/protobuf-cpp-3.1.0.tar.gz && \ + tar xzf protobuf-cpp-3.1.0.tar.gz && \ + cd protobuf-3.1.0 && ./configure && make -j4 && make install && cd .. && rm -f protobuf-cpp-3.1.0.tar.gz + + +RUN yum install -y sqlite-devel zlib-devel openssl-devel boost boost-devel pcre-devel vim tk-devel tkinter libtool + +RUN wget -O /root/requirements.txt https://raw.githubusercontent.com/PaddlePaddle/Paddle/develop/python/requirements.txt + +RUN LD_LIBRARY_PATH=/opt/_internal/cpython-2.7.11-ucs4/lib:${LD_LIBRARY_PATH} /opt/python/cp27-cp27mu/bin/pip install -r /root/requirements.txt && \ + LD_LIBRARY_PATH=/opt/_internal/cpython-2.7.11-ucs2/lib:${LD_LIBRARY_PATH} /opt/python/cp27-cp27m/bin/pip install -r /root/requirements.txt && \ + go get github.com/Masterminds/glide && \ + rm -rf /root/requirements.txt + +RUN LD_LIBRARY_PATH=/opt/_internal/cpython-2.7.11-ucs4/lib:${LD_LIBRARY_PATH} /opt/python/cp27-cp27mu/bin/pip install pre-commit 'ipython==5.3.0' opencv-python && \ + LD_LIBRARY_PATH=/opt/_internal/cpython-2.7.11-ucs2/lib:${LD_LIBRARY_PATH} /opt/python/cp27-cp27m/bin/pip install pre-commit 'ipython==5.3.0' opencv-python + +RUN wget -O /opt/swig-2.0.12.tar.gz https://sourceforge.net/projects/swig/files/swig/swig-2.0.12/swig-2.0.12.tar.gz/download && \ + cd /opt && tar xzf swig-2.0.12.tar.gz && cd /opt/swig-2.0.12 && ./configure && make && make install && cd /opt && rm swig-2.0.12.tar.gz + +RUN mkdir -p /src && cd /src && git clone https://github.com/NVIDIA/nccl.git nccl && cd nccl &&\ + make -j `nproc` install && cd .. && rm -rf nccl diff --git a/tools/manylinux1/README.md b/tools/manylinux1/README.md new file mode 100644 index 0000000000000000000000000000000000000000..cb0a9ac22cda6fb6f585ab8fd95179573c760f28 --- /dev/null +++ b/tools/manylinux1/README.md @@ -0,0 +1,30 @@ +# buildtools + +We release PaddlePaddle and PaddlePaddle Fluid as shared libraries, +which, we hope could be released as wheel packages on PyPI, so we need +to make sure that the build follows the +[manulinux1](https://www.python.org/dev/peps/pep-0513/) standard. + +The manylinux standard suggests building Python modules on an old +system, because that a module would anyway depend on some shared +libraries, and Linux's shared library standard states that those built +with newer version compilers cannot work with those with older +versions. The suggested building environment is as old as CentOS 5. +However, PaddlePaddle relies on CUDA, and the earlies version of +[CentOS works with CUDA is 6](https://hub.docker.com/r/nvidia/cuda/). +So, here we provide a Docker image basing on CentOS 6 and CUDA for +building PaddlePaddle and making the release supports "as-manylinux as +possible." or "sufficiently many Linux" according to [this +discussion](https://mail.python.org/pipermail/wheel-builders/2016-July/000175.html). + +The build output of our Docker image includes multiple wheel files -- +some contain the CPU-only binary, some others support CUDA; some are +compatible with the cp27m Python ABI, some others with cp27. + +To build these wheels, please run the following commands: + +```bash +git clone https://github.com/paddlepaddle/paddle +cd paddle/tools/manylinux1 +REPO=[yourrepo] ./build_all.sh +``` diff --git a/tools/manylinux1/build_all.sh b/tools/manylinux1/build_all.sh new file mode 100755 index 0000000000000000000000000000000000000000..097bedb5265d00f8aa362bb0272af633c97192ba --- /dev/null +++ b/tools/manylinux1/build_all.sh @@ -0,0 +1,26 @@ +#!/bin/bash +set -xe + +REPO="${REPO:-typhoon1986}" + +# NOTE: version matches are determined! +sed 's//7.5-cudnn5-devel-centos6/g' Dockerfile.x64 | \ +sed 's//NVCC_GENCODE="-gencode=arch=compute_35,code=sm_35 -gencode=arch=compute_50,code=sm_50 -gencode=arch=compute_52,code=sm_52"/g'> Dockerfile.tmp +docker build -t ${REPO}/paddle_manylinux_devel:cuda7.5_cudnn5 -f Dockerfile.tmp . +docker push ${REPO}/paddle_manylinux_devel:cuda7.5_cudnn5 + +sed 's//8.0-cudnn5-devel-centos6/g' Dockerfile.x64 | \ +sed 's//NVCC_GENCODE="-gencode=arch=compute_35,code=sm_35 -gencode=arch=compute_50,code=sm_50 -gencode=arch=compute_52,code=sm_52 -gencode=arch=compute_60,code=sm_60 -gencode=arch=compute_60,code=compute_60 -gencode=arch=compute_61,code=sm_61 -gencode=arch=compute_62,code=sm_62"/g'> Dockerfile.tmp +docker build -t ${REPO}/paddle_manylinux_devel:cuda8.0_cudnn5 -f Dockerfile.tmp . +docker push ${REPO}/paddle_manylinux_devel:cuda8.0_cudnn5 + +sed 's//8.0-cudnn7-devel-centos6/g' Dockerfile.x64 | \ +sed 's//NVCC_GENCODE="-gencode=arch=compute_35,code=sm_35 -gencode=arch=compute_50,code=sm_50 -gencode=arch=compute_52,code=sm_52 -gencode=arch=compute_60,code=sm_60 -gencode=arch=compute_60,code=compute_60 -gencode=arch=compute_61,code=sm_61 -gencode=arch=compute_62,code=sm_62"/g'> Dockerfile.tmp + +docker build -t ${REPO}/paddle_manylinux_devel:cuda8.0_cudnn7 -f Dockerfile.tmp . +docker push ${REPO}/paddle_manylinux_devel:cuda8.0_cudnn7 + +sed 's//9.0-cudnn7-devel-centos6/g' Dockerfile.x64 | \ +sed 's//NVCC_GENCODE="-gencode=arch=compute_35,code=sm_35 -gencode=arch=compute_50,code=sm_50 -gencode=arch=compute_52,code=sm_52 -gencode=arch=compute_60,code=sm_60 -gencode=arch=compute_60,code=compute_60 -gencode=arch=compute_61,code=sm_61 -gencode=arch=compute_62,code=sm_62 -gencode=arch=compute_70,code=sm_70"/g'> Dockerfile.tmp +docker build -t ${REPO}/paddle_manylinux_devel:cuda9.0_cudnn7 -f Dockerfile.tmp . +docker push ${REPO}/paddle_manylinux_devel:cuda9.0_cudnn7 diff --git a/tools/manylinux1/build_scripts/build.sh b/tools/manylinux1/build_scripts/build.sh new file mode 100644 index 0000000000000000000000000000000000000000..93591fa9ddad8a78df344e1e912a5f1c7e93dfa4 --- /dev/null +++ b/tools/manylinux1/build_scripts/build.sh @@ -0,0 +1,152 @@ +#!/bin/bash +# Top-level build script called from Dockerfile + +# Stop at any error, show all commands +set -ex + +# Python versions to be installed in /opt/$VERSION_NO +# NOTE Only need python 2.7.11 for nupic.core/nupic.bindings at this time, so +# remove others to expedite build and reduce docker image size. The original +# manylinux docker image project builds many python versions. +# NOTE We added back 3.5.1, since auditwheel requires python 3.3+ +CPYTHON_VERSIONS="2.7.11 3.5.1" + +# openssl version to build, with expected sha256 hash of .tar.gz +# archive +OPENSSL_ROOT=openssl-1.0.2l +OPENSSL_HASH=ce07195b659e75f4e1db43552860070061f156a98bb37b672b101ba6e3ddf30c +EPEL_RPM_HASH=e5ed9ecf22d0c4279e92075a64c757ad2b38049bcf5c16c4f2b75d5f6860dc0d +DEVTOOLS_HASH=a8ebeb4bed624700f727179e6ef771dafe47651131a00a78b342251415646acc +PATCHELF_HASH=d9afdff4baeacfbc64861454f368b7f2c15c44d245293f7587bbf726bfe722fb +CURL_ROOT=curl-7.49.1 +CURL_HASH=eb63cec4bef692eab9db459033f409533e6d10e20942f4b060b32819e81885f1 +AUTOCONF_ROOT=autoconf-2.69 +AUTOCONF_HASH=954bd69b391edc12d6a4a51a2dd1476543da5c6bbf05a95b59dc0dd6fd4c2969 + +# Dependencies for compiling Python that we want to remove from +# the final image after compiling Python +PYTHON_COMPILE_DEPS="zlib-devel bzip2-devel ncurses-devel sqlite-devel readline-devel tk-devel gdbm-devel db4-devel libpcap-devel xz-devel" + +# Libraries that are allowed as part of the manylinux1 profile +MANYLINUX1_DEPS="glibc-devel libstdc++-devel glib2-devel libX11-devel libXext-devel libXrender-devel mesa-libGL-devel libICE-devel libSM-devel ncurses-devel" + +# Get build utilities +MY_DIR=$(dirname "${BASH_SOURCE[0]}") +source $MY_DIR/build_utils.sh + +# EPEL support +yum -y install wget curl +curl -sLO https://dl.fedoraproject.org/pub/epel/6/x86_64/epel-release-6-8.noarch.rpm +check_sha256sum epel-release-6-8.noarch.rpm $EPEL_RPM_HASH + +# Dev toolset (for LLVM and other projects requiring C++11 support) +curl -sLO http://people.centos.org/tru/devtools-2/devtools-2.repo +check_sha256sum devtools-2.repo $DEVTOOLS_HASH +mv devtools-2.repo /etc/yum.repos.d/devtools-2.repo +rpm -Uvh --replacepkgs epel-release-6*.rpm +rm -f epel-release-6*.rpm + +# Development tools and libraries +yum -y install bzip2 make git patch unzip bison yasm diffutils \ + automake which file \ + kernel-devel-`uname -r` \ + devtoolset-2-binutils devtoolset-2-gcc \ + devtoolset-2-gcc-c++ devtoolset-2-gcc-gfortran \ + ${PYTHON_COMPILE_DEPS} + +# Install more recent version of cmake +# curl -O https://cmake.org/files/v3.8/cmake-3.8.1-Linux-x86_64.sh +# /bin/sh cmake-3.8.1-Linux-x86_64.sh --prefix=/usr/local --skip-license +# rm cmake-3.8.1-Linux-x86_64.sh + +wget -q https://cmake.org/files/v3.5/cmake-3.5.2.tar.gz && tar xzf cmake-3.5.2.tar.gz && \ +cd cmake-3.5.2 && ./bootstrap && \ +make -j4 && make install && cd .. && rm cmake-3.5.2.tar.gz + + +# Install newest autoconf +build_autoconf $AUTOCONF_ROOT $AUTOCONF_HASH +autoconf --version + +# Compile the latest Python releases. +# (In order to have a proper SSL module, Python is compiled +# against a recent openssl [see env vars above], which is linked +# statically. We delete openssl afterwards.) +build_openssl $OPENSSL_ROOT $OPENSSL_HASH +mkdir -p /opt/python +build_cpythons $CPYTHON_VERSIONS + +PY35_BIN=/opt/python/cp35-cp35m/bin +# NOTE Since our custom manylinux image builds pythons with shared +# libpython, we need to add libpython's dir to LD_LIBRARY_PATH before running +# python. +ORIGINAL_LD_LIBRARY_PATH="${LD_LIBRARY_PATH}" +LD_LIBRARY_PATH="${ORIGINAL_LD_LIBRARY_PATH}:$(dirname ${PY35_BIN})/lib" + +# Our openssl doesn't know how to find the system CA trust store +# (https://github.com/pypa/manylinux/issues/53) +# And it's not clear how up-to-date that is anyway +# So let's just use the same one pip and everyone uses +LD_LIBRARY_PATH="${ORIGINAL_LD_LIBRARY_PATH}:$(dirname ${PY35_BIN})/lib" $PY35_BIN/pip install certifi +ln -s $($PY35_BIN/python -c 'import certifi; print(certifi.where())') \ + /opt/_internal/certs.pem +# If you modify this line you also have to modify the versions in the +# Dockerfiles: +export SSL_CERT_FILE=/opt/_internal/certs.pem + +# Install newest curl +build_curl $CURL_ROOT $CURL_HASH +rm -rf /usr/local/include/curl /usr/local/lib/libcurl* /usr/local/lib/pkgconfig/libcurl.pc +hash -r +curl --version +curl-config --features + +# Now we can delete our built SSL +rm -rf /usr/local/ssl + +# Install patchelf (latest with unreleased bug fixes) +curl -sLO https://nipy.bic.berkeley.edu/manylinux/patchelf-0.9njs2.tar.gz +check_sha256sum patchelf-0.9njs2.tar.gz $PATCHELF_HASH +tar -xzf patchelf-0.9njs2.tar.gz +(cd patchelf-0.9njs2 && ./configure && make && make install) +rm -rf patchelf-0.9njs2.tar.gz patchelf-0.9njs2 + +# Install latest pypi release of auditwheel +LD_LIBRARY_PATH="${ORIGINAL_LD_LIBRARY_PATH}:$(dirname ${PY35_BIN})/lib" $PY35_BIN/pip install auditwheel +ln -s $PY35_BIN/auditwheel /usr/local/bin/auditwheel + +# Clean up development headers and other unnecessary stuff for +# final image +yum -y erase wireless-tools gtk2 libX11 hicolor-icon-theme \ + avahi freetype bitstream-vera-fonts \ + ${PYTHON_COMPILE_DEPS} > /dev/null 2>&1 +yum -y install ${MANYLINUX1_DEPS} +yum -y clean all > /dev/null 2>&1 +yum list installed +# we don't need libpython*.a, and they're many megabytes +find /opt/_internal -name '*.a' -print0 | xargs -0 rm -f +# Strip what we can -- and ignore errors, because this just attempts to strip +# *everything*, including non-ELF files: +find /opt/_internal -type f -print0 \ + | xargs -0 -n1 strip --strip-unneeded 2>/dev/null || true +# We do not need the Python test suites, or indeed the precompiled .pyc and +# .pyo files. Partially cribbed from: +# https://github.com/docker-library/python/blob/master/3.4/slim/Dockerfile +find /opt/_internal \ + \( -type d -a -name test -o -name tests \) \ + -o \( -type f -a -name '*.pyc' -o -name '*.pyo' \) \ + -print0 | xargs -0 rm -f + +for PYTHON in /opt/python/*/bin/python; do + # Add matching directory of libpython shared library to library lookup path + LD_LIBRARY_PATH="${ORIGINAL_LD_LIBRARY_PATH}:$(dirname $(dirname ${PYTHON}))/lib" + + # Smoke test to make sure that our Pythons work, and do indeed detect as + # being manylinux compatible: + LD_LIBRARY_PATH="${ORIGINAL_LD_LIBRARY_PATH}:$(dirname $(dirname ${PYTHON}))/lib" $PYTHON $MY_DIR/manylinux1-check.py + # Make sure that SSL cert checking works + LD_LIBRARY_PATH="${ORIGINAL_LD_LIBRARY_PATH}:$(dirname $(dirname ${PYTHON}))/lib" $PYTHON $MY_DIR/ssl-check.py +done + +# Restore LD_LIBRARY_PATH +LD_LIBRARY_PATH="${ORIGINAL_LD_LIBRARY_PATH}" diff --git a/tools/manylinux1/build_scripts/build_utils.sh b/tools/manylinux1/build_scripts/build_utils.sh new file mode 100755 index 0000000000000000000000000000000000000000..10422ae3bd00f4e0dd059af0384f8cc17e4b7855 --- /dev/null +++ b/tools/manylinux1/build_scripts/build_utils.sh @@ -0,0 +1,173 @@ +#!/bin/bash +# Helper utilities for build + +PYTHON_DOWNLOAD_URL=https://www.python.org/ftp/python +# XXX: the official https server at www.openssl.org cannot be reached +# with the old versions of openssl and curl in Centos 5.11 hence the fallback +# to the ftp mirror: +# OPENSSL_DOWNLOAD_URL=ftp://ftp.openssl.org/source +OPENSSL_DOWNLOAD_URL=https://www.openssl.org/source +# Ditto the curl sources +CURL_DOWNLOAD_URL=http://curl.askapache.com/download + +GET_PIP_URL=https://bootstrap.pypa.io/get-pip.py + +AUTOCONF_DOWNLOAD_URL=http://ftp.gnu.org/gnu/autoconf + + +function check_var { + if [ -z "$1" ]; then + echo "required variable not defined" + exit 1 + fi +} + + +function lex_pyver { + # Echoes Python version string padded with zeros + # Thus: + # 3.2.1 -> 003002001 + # 3 -> 003000000 + echo $1 | awk -F "." '{printf "%03d%03d%03d", $1, $2, $3}' +} + + +function do_cpython_build { + local py_ver=$1 + check_var $py_ver + local ucs_setting=$2 + check_var $ucs_setting + tar -xzf Python-$py_ver.tgz + pushd Python-$py_ver + if [ "$ucs_setting" = "none" ]; then + unicode_flags="" + dir_suffix="" + else + local unicode_flags="--enable-unicode=$ucs_setting" + local dir_suffix="-$ucs_setting" + fi + local prefix="/opt/_internal/cpython-${py_ver}${dir_suffix}" + mkdir -p ${prefix}/lib + # -Wformat added for https://bugs.python.org/issue17547 on Python 2.6 + + # NOTE --enable-shared for generating libpython shared library needed for + # linking of some of the nupic.core test executables. + CFLAGS="-Wformat" ./configure --prefix=${prefix} --enable-shared $unicode_flags > /dev/null + make -j2 > /dev/null + make install > /dev/null + popd + echo "ZZZ looking for libpython" + find / -name 'libpython*.so*' + rm -rf Python-$py_ver + # Some python's install as bin/python3. Make them available as + # bin/python. + if [ -e ${prefix}/bin/python3 ]; then + ln -s python3 ${prefix}/bin/python + fi + # NOTE Make libpython shared library visible to python calls below + LD_LIBRARY_PATH="${prefix}/lib" ${prefix}/bin/python get-pip.py + LD_LIBRARY_PATH="${prefix}/lib" ${prefix}/bin/pip install wheel + local abi_tag=$(LD_LIBRARY_PATH="${prefix}/lib" ${prefix}/bin/python ${MY_DIR}/python-tag-abi-tag.py) + ln -s ${prefix} /opt/python/${abi_tag} +} + + +function build_cpython { + local py_ver=$1 + check_var $py_ver + check_var $PYTHON_DOWNLOAD_URL + wget -q $PYTHON_DOWNLOAD_URL/$py_ver/Python-$py_ver.tgz + if [ $(lex_pyver $py_ver) -lt $(lex_pyver 3.3) ]; then + # NOTE We only need wide unicode for nupic.bindings wheel + do_cpython_build $py_ver ucs2 + do_cpython_build $py_ver ucs4 + else + do_cpython_build $py_ver none + fi + rm -f Python-$py_ver.tgz +} + + +function build_cpythons { + check_var $GET_PIP_URL + curl -sLO $GET_PIP_URL + for py_ver in $@; do + build_cpython $py_ver + done + rm get-pip.py +} + + +function do_openssl_build { + ./config no-ssl2 no-shared -fPIC --prefix=/usr/local/ssl > /dev/null + make > /dev/null + make install > /dev/null +} + + +function check_sha256sum { + local fname=$1 + check_var ${fname} + local sha256=$2 + check_var ${sha256} + + echo "${sha256} ${fname}" > ${fname}.sha256 + sha256sum -c ${fname}.sha256 + rm ${fname}.sha256 +} + + +function build_openssl { + local openssl_fname=$1 + check_var ${openssl_fname} + local openssl_sha256=$2 + check_var ${openssl_sha256} + check_var ${OPENSSL_DOWNLOAD_URL} + curl -sLO ${OPENSSL_DOWNLOAD_URL}/${openssl_fname}.tar.gz + check_sha256sum ${openssl_fname}.tar.gz ${openssl_sha256} + tar -xzf ${openssl_fname}.tar.gz + (cd ${openssl_fname} && do_openssl_build) + rm -rf ${openssl_fname} ${openssl_fname}.tar.gz +} + + +function do_curl_build { + LIBS=-ldl ./configure --with-ssl --disable-shared > /dev/null + make > /dev/null + make install > /dev/null +} + + +function build_curl { + local curl_fname=$1 + check_var ${curl_fname} + local curl_sha256=$2 + check_var ${curl_sha256} + check_var ${CURL_DOWNLOAD_URL} + curl -sLO ${CURL_DOWNLOAD_URL}/${curl_fname}.tar.bz2 + check_sha256sum ${curl_fname}.tar.bz2 ${curl_sha256} + tar -jxf ${curl_fname}.tar.bz2 + (cd ${curl_fname} && do_curl_build) + rm -rf ${curl_fname} ${curl_fname}.tar.bz2 +} + + +function do_standard_install { + ./configure > /dev/null + make > /dev/null + make install > /dev/null +} + + +function build_autoconf { + local autoconf_fname=$1 + check_var ${autoconf_fname} + local autoconf_sha256=$2 + check_var ${autoconf_sha256} + check_var ${AUTOCONF_DOWNLOAD_URL} + curl -sLO ${AUTOCONF_DOWNLOAD_URL}/${autoconf_fname}.tar.gz + check_sha256sum ${autoconf_fname}.tar.gz ${autoconf_sha256} + tar -zxf ${autoconf_fname}.tar.gz + (cd ${autoconf_fname} && do_standard_install) + rm -rf ${autoconf_fname} ${autoconf_fname}.tar.gz +} diff --git a/tools/manylinux1/build_scripts/manylinux1-check.py b/tools/manylinux1/build_scripts/manylinux1-check.py new file mode 100644 index 0000000000000000000000000000000000000000..47fd3d673be662d2229480ee650dc3799301c31e --- /dev/null +++ b/tools/manylinux1/build_scripts/manylinux1-check.py @@ -0,0 +1,56 @@ +# Logic copied from PEP 513 + + +def is_manylinux1_compatible(): + # Only Linux, and only x86-64 / i686 + from distutils.util import get_platform + if get_platform() not in ["linux-x86_64", "linux-i686"]: + return False + + # Check for presence of _manylinux module + try: + import _manylinux + return bool(_manylinux.manylinux1_compatible) + except (ImportError, AttributeError): + # Fall through to heuristic check below + pass + + # Check glibc version. CentOS 5 uses glibc 2.5. + return have_compatible_glibc(2, 5) + + +def have_compatible_glibc(major, minimum_minor): + import ctypes + + process_namespace = ctypes.CDLL(None) + try: + gnu_get_libc_version = process_namespace.gnu_get_libc_version + except AttributeError: + # Symbol doesn't exist -> therefore, we are not linked to + # glibc. + return False + + # Call gnu_get_libc_version, which returns a string like "2.5". + gnu_get_libc_version.restype = ctypes.c_char_p + version_str = gnu_get_libc_version() + # py2 / py3 compatibility: + if not isinstance(version_str, str): + version_str = version_str.decode("ascii") + + # Parse string and check against requested version. + version = [int(piece) for piece in version_str.split(".")] + assert len(version) == 2 + if major != version[0]: + return False + if minimum_minor > version[1]: + return False + return True + + +import sys +if is_manylinux1_compatible(): + print("%s is manylinux1 compatible" % (sys.executable, )) + sys.exit(0) +else: + print("%s is NOT manylinux1 compatible" % (sys.executable, )) + sys.exit(1) diff --git a/tools/manylinux1/build_scripts/python-tag-abi-tag.py b/tools/manylinux1/build_scripts/python-tag-abi-tag.py new file mode 100644 index 0000000000000000000000000000000000000000..301fbf07a47fef03c91d9dd5f49c2894a5971319 --- /dev/null +++ b/tools/manylinux1/build_scripts/python-tag-abi-tag.py @@ -0,0 +1,7 @@ +# Utility script to print the python tag + the abi tag for a Python +# See PEP 425 for exactly what these are, but an example would be: +# cp27-cp27mu + +from wheel.pep425tags import get_abbr_impl, get_impl_ver, get_abi_tag + +print("{0}{1}-{2}".format(get_abbr_impl(), get_impl_ver(), get_abi_tag())) diff --git a/tools/manylinux1/build_scripts/ssl-check.py b/tools/manylinux1/build_scripts/ssl-check.py new file mode 100644 index 0000000000000000000000000000000000000000..a85d91978c510cccd366c174c317e6a3bdb589bd --- /dev/null +++ b/tools/manylinux1/build_scripts/ssl-check.py @@ -0,0 +1,32 @@ +# cf. https://github.com/pypa/manylinux/issues/53 + +GOOD_SSL = "https://google.com" +BAD_SSL = "https://self-signed.badssl.com" + +import sys + +print("Testing SSL certificate checking for Python:", sys.version) + +if (sys.version_info[:2] < (2, 7) or sys.version_info[:2] < (3, 4)): + print("This version never checks SSL certs; skipping tests") + sys.exit(0) + +if sys.version_info[0] >= 3: + from urllib.request import urlopen + EXC = OSError +else: + from urllib import urlopen + EXC = IOError + +print("Connecting to %s should work" % (GOOD_SSL, )) +urlopen(GOOD_SSL) +print("...it did, yay.") + +print("Connecting to %s should fail" % (BAD_SSL, )) +try: + urlopen(BAD_SSL) + # If we get here then we failed: + print("...it DIDN'T!!!!!11!!1one!") + sys.exit(1) +except EXC: + print("...it did, yay.")