diff --git a/.copyright.hook b/.copyright.hook index 09afff2072df3384a429d01d06188218ae6e85d1..86b16ebdc46047c7cb3d7731a71cbf9647a1f2fe 100644 --- a/.copyright.hook +++ b/.copyright.hook @@ -9,7 +9,7 @@ import subprocess import platform COPYRIGHT = ''' - Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. +Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/cmake/external/mkldnn.cmake b/cmake/external/mkldnn.cmake index 5759e5c489724332793bf103b7aacf7ffb068611..83c1cf1457dfc2d98039b7a03e8a569a0352991a 100644 --- a/cmake/external/mkldnn.cmake +++ b/cmake/external/mkldnn.cmake @@ -53,7 +53,7 @@ ExternalProject_Add( ${EXTERNAL_PROJECT_LOG_ARGS} DEPENDS ${MKLDNN_DEPENDS} GIT_REPOSITORY "https://github.com/01org/mkl-dnn.git" - GIT_TAG "v0.11" + GIT_TAG "v0.14" PREFIX ${MKLDNN_SOURCES_DIR} UPDATE_COMMAND "" CMAKE_ARGS -DCMAKE_INSTALL_PREFIX=${MKLDNN_INSTALL_DIR} diff --git a/contrib/inference/README.md b/contrib/inference/README.md new file mode 100644 index 0000000000000000000000000000000000000000..20969fac6c8f894ffb4a02b48f795e2a0dcbd096 --- /dev/null +++ b/contrib/inference/README.md @@ -0,0 +1,27 @@ +# Embed Paddle Inference in Your Application + +Paddle inference offers the APIs in `C` and `C++` languages. + +One can easily deploy a model trained by Paddle following the steps as below: + +1. Optimize the native model; +2. Write some codes for deployment. + + +Let's explain the steps in detail. + +## Optimize the native Fluid Model + +The native model that get from the training phase needs to be optimized for that. + +- Clean the noise such as the cost operators that do not need inference; +- Prune unnecessary computation fork that has nothing to do with the output; +- Remove extraneous variables; +- Memory reuse for native Fluid executor; +- Translate the model storage format to some third-party engine's, so that the inference API can utilize the engine for acceleration; + +We have an official tool to do the optimization, call `paddle_inference_optimize --help` for more information. + +## Write some codes + +Read `paddle_inference_api.h` for more information. diff --git a/contrib/inference/paddle_inference_api.h b/contrib/inference/paddle_inference_api.h new file mode 100644 index 0000000000000000000000000000000000000000..dbaa7c95b97e954537707566e5b7458e6afd14c8 --- /dev/null +++ b/contrib/inference/paddle_inference_api.h @@ -0,0 +1,69 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#pragma once + +#include +#include + +namespace paddle { + +class Predictor { +public: + struct Attr; + Predictor() = default; + + // Build the network before inference. + bool Init(const Attr& attr); + + // Predict an record. + // Arguments: + // inputs: the name of the input variables. + // outputs: the name of the output varaibles. + // input_shapes: the shape of the input variables. + // output_shapes: the shape of the output variables. + // input_data: the data of the input variables. + // output_data: the data of the output variables. + bool Run(const std::vector& inputs, + const std::vector& outputs, + const std::vector>& input_shapes, + const std::vector>& output_shapes, + const std::vector>& input_data, + std::vector>* output_data); + + // Clone a predictor that share the model weights. + Predictor* Clone(); + + // Destroy the Predictor. + ~Predictor(); + + struct Attr { + enum class EngineKind; + + std::string model_dir; // path to the model directory. + bool enable_engine{false}; // Enable to execute (part of) the model on + // third-party engines. + EngineKind engine_kind{Attr::EngineKind::kNone}; + + enum class EngineKind { + kNone = -1, // Use the native Fluid facility. + kAnakin, // Use Anakin for inference. + kTensorRT, // Use TensorRT for inference. + kAutoMixedAnakin, // Automatically mix Fluid with Anakin. + kAutoMixedTensorRT, // Automatically mix Fluid with TensorRT. + }; + }; +}; + +} // namespace paddle diff --git a/doc/fluid/design/motivation/api.md b/doc/fluid/design/motivation/api.md index e6a4638d9100d9b07c3ee6b92b530a17eae1c162..bc222564e3ec28e306ca0572b6a23104f6e9cbc5 100644 --- a/doc/fluid/design/motivation/api.md +++ b/doc/fluid/design/motivation/api.md @@ -77,8 +77,7 @@ print "The sematic-vector of testA: ", paddle.infer(fA, parameters, testA) ### Example 2. Sharing Parameters between "Models" -We use [GAN](https://github.com/PaddlePaddle/book/tree/develop/gan) in -this example. In the following example program, `d0` and `d1` +We use GAN in this example. In the following example program, `d0` and `d1` correspond to the two networks in the following figure: diff --git a/doc/fluid/design/multi_devices/operator_kernel_type.md b/doc/fluid/design/multi_devices/operator_kernel_type.md index 8c1bc8f76a337006497e5ab5e5a710f9f49261b8..5e391bd62b4f4e123a9a6f35b7adf5726f205635 100644 --- a/doc/fluid/design/multi_devices/operator_kernel_type.md +++ b/doc/fluid/design/multi_devices/operator_kernel_type.md @@ -75,7 +75,7 @@ Different layout leads to different implementation of the operator kernel. There - The inference of Layout is at run-time, not at compile-time. -- Every operator has to implement different kernels for different layouts. Let's take MKLDNN as an example. If we want to implement an MKLDNN convolution operator, we have to implement all the kernels for different layouts, which are listed [here](http://01org.github.io/mkl-dnn/structmkldnn_1_1memory.html). And we will have a special macro to register kernels for MKLDNN operators. +- Every operator has to implement different kernels for different layouts. Let's take MKLDNN as an example. If we want to implement an MKLDNN convolution operator, we have to implement all the kernels for different layouts, which are listed [here](http://intel.github.io/mkl-dnn/structmkldnn_1_1memory.html). And we will have a special macro to register kernels for MKLDNN operators. `Layout` is also defined as a enum variable: diff --git a/doc/fluid/howto/cluster/nccl2_rdma_training.md b/doc/fluid/howto/cluster/nccl2_rdma_training.md new file mode 100644 index 0000000000000000000000000000000000000000..cecd5c3a7a7339e3be6772543a534728ec132105 --- /dev/null +++ b/doc/fluid/howto/cluster/nccl2_rdma_training.md @@ -0,0 +1,110 @@ +# Distributed Training with NCCL2 and RDMA + +When doing distributed multi-GPU training, network bandwith often becomes the +bottle neck. We introduce a way to use NCCL2 to do such training job to +achieve best performace. + +## Prepare Hardwares with RDMA and Multiple GPUs + +I'm using two Linux servers each of them is installed with 8 GPUs and +one 100Gb RDMA card. +Base environment is: + +* OS: CentOS 7.4 +* RDMA device: "Mellanox Technologies MT27700 Family [ConnectX-4]" +* Kernel version: `4.4.88-1.el7.elrepo.x86_64` +* Docker version: `1.12.6` +* Docker storage driver: `overlay2` +* IP addresses: 192.168.16.30,192.168.16.34 + +In general, the steps including: + +1. Install GPU drivers +1. Install RDMA drivers +1. Install "InfiniBand Support" +1. Use docker to run tests and make sure GPUs and RDMA can work inside + the container. + +I'll ommit section "Install GPU drivers" because we can find it easily +somewhere else. + +### Install RDMA drivers + +For my case, I've got two machines with device +"Mellanox Technologies MT27700 Family [ConnectX-4]" installed. The OS was +"CentOS 7.4" and I updated the kernel to version 4.4 so that docker can +work with latest overlay2 filesystem. + +***NOTE: before you start, make sure you have a way to get a console +of the server other than ssh because we may need to re-configure the +network device.*** + +1. Go to http://www.mellanox.com/page/products_dyn?product_family=26, + download `MLNX_OFED` software in the bottom of the page, and upload it + onto the server. +1. Run `./mlnxofedinstall --add-kernel-support` in the software package. +1. Run `/etc/init.d/openibd restart` to make everything work, note that + this operation may cause the network goes down if you are using this + RDMA device as default network device and use ssh to login the server. +1. Re-configure the network interface, for example: + `ifconfig eth2 192.168.16.30/20 up`, then add routes if needed: + `ip route add default via 192.168.16.1 dev eth2`. +1. Do the same thing on the other node. +1. Use `ping` to test if the two nodes have typical ICMP connection. +1. Use either `udaddy` or `ib_write_bw` to test the network connection is + ready and have the desired bandwith. + +### Prepare Docker Image to Run RDMA Programs + +1. Build a docker image using cuda base image like: `nvidia/cuda:8.0-cudnn5-devel-ubuntu16.04` and install paddlepaddle whl + package in it. +1. Start a docker container and mount GPU driver libs into it (you can + skip this step if you are using nvidia-docker). +1. Mount RDMA dirvers and libs into the docker image (see below section), + also `udaddy` and `ib_write_bw` if needed. +1. Mount GPU devices and RDMA devices into the container using `--device` + or just use privileged mode `--privileged`. +1. Start the container using host network mode: `--net=host` + +### RDMA Library Files Needed + +Usually, `MLNX_OFED` install latest supported libs under +`/usr/lib64/mlnx_ofed/valgrind`. Other libs also needed to run RDMA programs +is listed below. These libs must be mounted into the docker container. + +* Libs under `/usr/lib64/mlnx_ofed/valgrind` + * libibcm.so + * libibverbs.so + * libmlx4.so + * libmlx5.so + * libmlx5-rdmav2.so + * librdmacm.so +* Other libs: + * libnl-3.so.200 + * libnl-route-3.so.200 + * libnuma.so.1 + +## Start to Run the Training Job + +Setting NCCL environment variables to turn NCCL switches on and off: + + +| Env Name | Description | +| --- | --- | +| NCCL_SOCKET_IFNAME | The RDMA device, e.g. eth2 | +| NCCL_P2P_DISABLE | Set to 1 to disable P2P transfer between GPUs | +| NCCL_IB_DISABLE | Set to 1 to disable using RDMA | +| NCCL_IB_CUDA_SUPPORT | Set to 1 to enable GPU Direct if supported | +| NCCL_DEBUG | Set debug level: VERSION, WARN, INFO | + +My two servers are: `192.168.16.30,192.168.16.34`, On node 1, Run : + +```bash +PADDLE_TRAINER_ID=0 PADDLE_PORT=48372 PADDLE_WORKERS=192.168.16.30,192.168.16.34 POD_IP=192.168.16.30 stdbuf -oL python vgg16.py +``` + +On node 2, Run: + +```bash +PADDLE_TRAINER_ID=1 PADDLE_PORT=48372 PADDLE_WORKERS=192.168.16.30,192.168.16.34 POD_IP=192.168.16.34 stdbuf -oL python vgg16.py +``` diff --git a/doc/v2/design/mkl/mkldnn.md b/doc/v2/design/mkl/mkldnn.md index 1bd2e7bc34ee79eb753b3520d97e5e7beca89b0b..bd5bcf6f67168c21cebb046a629b948d1661e75c 100644 --- a/doc/v2/design/mkl/mkldnn.md +++ b/doc/v2/design/mkl/mkldnn.md @@ -5,7 +5,7 @@ 充分展现英特尔平台的优势,有效提升PaddlePaddle在英特尔架构上的性能。
-
+
Figure 1. PaddlePaddle on IA
@@ -42,16 +42,43 @@ Figure 1. PaddlePaddle on IA MKL,MKLML以及MKL-DNN三者关系如下表: -| Name | Open Source | License | Descriptions | -| :---------- | :--------------- | :---------- | :------------ | -| MKL | No | Proprietary | Accelerate math processing routines | -| MKLML | No | Proprietary | Small package of MKL, especially for Machine Learning | -| MKL-DNN | Yes | Apache 2.0 | Accelerate primitives processing routines especially for Deep Neural Networks | + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameOpen SourceLicenseDescriptions
MKLNoProprietaryAccelerate math processing routines
MKLMLNoProprietarySmall package of MKL, especially for Machine Learning
MKL-DNNYesApache 2.0Accelerate primitives processing routines especially for Deep Neural Networks
MKLML可以与MKL-DNN共同使用,以此达到最好的性能。
-
+
Figure 2. PaddlePaddle with MKL Engines
@@ -103,7 +130,7 @@ MKL-DNN的库目前只有动态库`libmkldnn.so`。 所以我们定义了一个`MKLDNNMatrix`用于管理MKL-DNN数据的不同格式以及相互之间的转换。
-
+
Figure 3. MKLDNNMatrix
@@ -113,7 +140,7 @@ Figure 3. MKLDNNMatrix 子类只需要使用定义好的接口,实现具体的函数功能即可。
-
+
Figure 4. MKLDNNLayer
@@ -150,7 +177,7 @@ Figure 4. MKLDNNLayer 所以整体上,在实现每个子类的时候就不需要关心分支的事情了。
-
+
Figure 5. Merge Gradients
diff --git a/doc/v2/images/FullyConnected.jpg b/doc/v2/images/FullyConnected.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b2241f401434e527f95ee4e0e541a3f2ff78fd1e Binary files /dev/null and b/doc/v2/images/FullyConnected.jpg differ diff --git a/doc/v2/images/add_security_group.png b/doc/v2/images/add_security_group.png new file mode 100644 index 0000000000000000000000000000000000000000..bd34f46c9b0ada7027fd53e553e7d033255d25fc Binary files /dev/null and b/doc/v2/images/add_security_group.png differ diff --git a/doc/v2/images/bi_lstm.jpg b/doc/v2/images/bi_lstm.jpg new file mode 100644 index 0000000000000000000000000000000000000000..adec1606d64d6e35ffe7e62abfa9a09309b05c84 Binary files /dev/null and b/doc/v2/images/bi_lstm.jpg differ diff --git a/doc/v2/images/checkpointing.png b/doc/v2/images/checkpointing.png new file mode 100644 index 0000000000000000000000000000000000000000..c221e8474f90f37e31416cbb19c9452207a0d14c Binary files /dev/null and b/doc/v2/images/checkpointing.png differ diff --git a/doc/v2/images/create_efs.png b/doc/v2/images/create_efs.png new file mode 100644 index 0000000000000000000000000000000000000000..e5f1526033d1daf401700989af1d25919bcb7675 Binary files /dev/null and b/doc/v2/images/create_efs.png differ diff --git a/doc/v2/images/csr.png b/doc/v2/images/csr.png new file mode 100644 index 0000000000000000000000000000000000000000..3dc10b8de4f6d3f517624956b1694b689405a031 Binary files /dev/null and b/doc/v2/images/csr.png differ diff --git a/doc/v2/images/data_dispatch.png b/doc/v2/images/data_dispatch.png new file mode 100644 index 0000000000000000000000000000000000000000..5bdcc24d6a6d193cb014f8c38b362451fded5e54 Binary files /dev/null and b/doc/v2/images/data_dispatch.png differ diff --git a/doc/v2/images/dataset.graffle b/doc/v2/images/dataset.graffle new file mode 100644 index 0000000000000000000000000000000000000000..c10a423ed16a23229a9ee33d11bfc82bb59646c8 Binary files /dev/null and b/doc/v2/images/dataset.graffle differ diff --git a/doc/v2/images/dataset.png b/doc/v2/images/dataset.png new file mode 100644 index 0000000000000000000000000000000000000000..2fb7f1cce3b6dd21489392557826e95a9f207c34 Binary files /dev/null and b/doc/v2/images/dataset.png differ diff --git a/doc/v2/images/doc_en.png b/doc/v2/images/doc_en.png new file mode 100644 index 0000000000000000000000000000000000000000..ed6b9178fba91a3bdf45ae797a9924f84146fbc8 Binary files /dev/null and b/doc/v2/images/doc_en.png differ diff --git a/doc/v2/images/efs_mount.png b/doc/v2/images/efs_mount.png new file mode 100644 index 0000000000000000000000000000000000000000..0f9e3cab98445707e5e9baa18ddabe15cdf04576 Binary files /dev/null and b/doc/v2/images/efs_mount.png differ diff --git a/doc/v2/images/encoder-decoder-attention-model.png b/doc/v2/images/encoder-decoder-attention-model.png new file mode 100644 index 0000000000000000000000000000000000000000..79f911d4ba12ac0c0d1a936c9df639c302786914 Binary files /dev/null and b/doc/v2/images/encoder-decoder-attention-model.png differ diff --git a/doc/v2/images/engine.png b/doc/v2/images/engine.png new file mode 100644 index 0000000000000000000000000000000000000000..1f5f65c2cc765a514a3ba9e7b7f468e1dc4b0c3b Binary files /dev/null and b/doc/v2/images/engine.png differ diff --git a/doc/v2/images/file_storage.graffle b/doc/v2/images/file_storage.graffle new file mode 100644 index 0000000000000000000000000000000000000000..50a17e70fa255495337c529a3bf12a5c0024a5be Binary files /dev/null and b/doc/v2/images/file_storage.graffle differ diff --git a/doc/v2/images/file_storage.png b/doc/v2/images/file_storage.png new file mode 100644 index 0000000000000000000000000000000000000000..fccb4e3e7e738224c7f1584326bd5f351ce799aa Binary files /dev/null and b/doc/v2/images/file_storage.png differ diff --git a/doc/v2/images/glossary_rnn.dot b/doc/v2/images/glossary_rnn.dot new file mode 100644 index 0000000000000000000000000000000000000000..2cd0fb1820c44b0e8e0b869f9d39fcad27efa758 --- /dev/null +++ b/doc/v2/images/glossary_rnn.dot @@ -0,0 +1,42 @@ +digraph G{ + subgraph cluster_timestep0 { + label="recurrent timestep i-1" + bgcolor=lightgray + node [style=filled,color=white] + fc0_0 [label="fc 0"] + fc0_1 [label="fc 1"] + fc0_2 [label="fc 2"] + + fc0_0 -> fc0_1 + fc0_1 -> fc0_2 + } + + subgraph cluster_timestep1 { + label="recurrent timestep i" + node [style=filled]; + fc1_0 [label="fc 0"] + fc1_1 [label="fc 1"] + fc1_2 [label="fc 2"] + color=blue + + fc1_0 -> fc1_1 + fc1_1 -> fc1_2 + } + + subgraph cluster_timestep2 { + label="recurrent timestep i+1" + bgcolor=lightgray + node [style=filled,color=white] + fc2_0 [label="fc 0"] + fc2_1 [label="fc 1"] + fc2_2 [label="fc 2"] + + fc2_0 -> fc2_1 + fc2_1 -> fc2_2 + } + + + fc0_1 -> fc1_1 [style="dotted" constraint=false] + fc1_1 -> fc2_1 [style="dotted" constraint=false] + +} \ No newline at end of file diff --git a/doc/v2/images/glossary_rnn_with_memory.dot b/doc/v2/images/glossary_rnn_with_memory.dot new file mode 100644 index 0000000000000000000000000000000000000000..0f101ec2d8f15aec76c57f328046b6b55cf0c7eb --- /dev/null +++ b/doc/v2/images/glossary_rnn_with_memory.dot @@ -0,0 +1,48 @@ +digraph G{ + subgraph cluster_timestep0 { + label="recurrent timestep i-1" + bgcolor=lightgray + node [style=filled,color=white] + fc0_0 [label="fc 0"] + fc0_1 [label="fc 1"] + fc0_2 [label="fc 2"] + m0 [label="memory"] + fc0_0 -> fc0_1 + fc0_1 -> fc0_2 + fc0_1 -> m0 + m0 -> fc0_1 + } + + subgraph cluster_timestep1 { + label="recurrent timestep i" + node [style=filled]; + fc1_0 [label="fc 0"] + fc1_1 [label="fc 1"] + fc1_2 [label="fc 2"] + m1 [label="memory"] + color=blue + fc1_0 -> fc1_1 + fc1_1 -> fc1_2 + fc1_1 -> m1 + m1 -> fc1_1 + } + + subgraph cluster_timestep2 { + label="recurrent timestep i+1" + bgcolor=lightgray + node [style=filled,color=white] + fc2_0 [label="fc 0"] + fc2_1 [label="fc 1"] + fc2_2 [label="fc 2"] + m2 [label="memory"] + fc2_0 -> fc2_1 + fc2_1 -> fc2_2 + fc2_1 -> m2 + m2 -> fc2_1 + } + + + m0 -> m1 [style="dotted" constraint=false] + m1 -> m2 [style="dotted" constraint=false] + +} \ No newline at end of file diff --git a/doc/v2/images/gradients.png b/doc/v2/images/gradients.png new file mode 100644 index 0000000000000000000000000000000000000000..f031bcf8e4cec14e63075b8b9d2c7bbd9f1b1a3c Binary files /dev/null and b/doc/v2/images/gradients.png differ diff --git a/doc/v2/images/init_lock.graffle b/doc/v2/images/init_lock.graffle new file mode 100644 index 0000000000000000000000000000000000000000..fa9149f21b1311eed48ef72ec55e556559d0fc94 Binary files /dev/null and b/doc/v2/images/init_lock.graffle differ diff --git a/doc/v2/images/init_lock.png b/doc/v2/images/init_lock.png new file mode 100644 index 0000000000000000000000000000000000000000..92404ee6d6c0f9a7727952bae3c869ba338ecd7f Binary files /dev/null and b/doc/v2/images/init_lock.png differ diff --git a/doc/v2/images/k8s-paddle-arch.png b/doc/v2/images/k8s-paddle-arch.png new file mode 100644 index 0000000000000000000000000000000000000000..b3800c4fe81302d35e49f7dbacb9221c4dfa5cde Binary files /dev/null and b/doc/v2/images/k8s-paddle-arch.png differ diff --git a/doc/v2/images/layers.png b/doc/v2/images/layers.png new file mode 100644 index 0000000000000000000000000000000000000000..306f79b7a844610915eb8944128f57d2b7a3065a Binary files /dev/null and b/doc/v2/images/layers.png differ diff --git a/doc/v2/images/managed_policy.png b/doc/v2/images/managed_policy.png new file mode 100644 index 0000000000000000000000000000000000000000..c7ecda555b81d7750e9292a9ab72d2f517f76a2a Binary files /dev/null and b/doc/v2/images/managed_policy.png differ diff --git a/doc/v2/images/matrix.png b/doc/v2/images/matrix.png new file mode 100644 index 0000000000000000000000000000000000000000..c33ce9cf0335e47cc8c1253304d0fe179186e6f2 Binary files /dev/null and b/doc/v2/images/matrix.png differ diff --git a/doc/v2/images/nvvp1.png b/doc/v2/images/nvvp1.png new file mode 100644 index 0000000000000000000000000000000000000000..1af23ac3c52929b2b0645d2f9fa4d4c6db1f6e77 Binary files /dev/null and b/doc/v2/images/nvvp1.png differ diff --git a/doc/v2/images/nvvp2.png b/doc/v2/images/nvvp2.png new file mode 100644 index 0000000000000000000000000000000000000000..177c9db708da6863d1075f3e615f5962dbe18b29 Binary files /dev/null and b/doc/v2/images/nvvp2.png differ diff --git a/doc/v2/images/nvvp3.png b/doc/v2/images/nvvp3.png new file mode 100644 index 0000000000000000000000000000000000000000..d8f393667d6569b6f1e61ffccac43fae5888b6db Binary files /dev/null and b/doc/v2/images/nvvp3.png differ diff --git a/doc/v2/images/nvvp4.png b/doc/v2/images/nvvp4.png new file mode 100644 index 0000000000000000000000000000000000000000..51f2f3e183295de6cf8ddaf2b3b8a0862aa35f01 Binary files /dev/null and b/doc/v2/images/nvvp4.png differ diff --git a/doc/v2/images/overview.png b/doc/v2/images/overview.png new file mode 100644 index 0000000000000000000000000000000000000000..8fb7bbb9dd654bf363d701d0c8cd4a557043d188 Binary files /dev/null and b/doc/v2/images/overview.png differ diff --git a/doc/v2/images/paddle-cloud-in-data-center.png b/doc/v2/images/paddle-cloud-in-data-center.png new file mode 100644 index 0000000000000000000000000000000000000000..da5d1a77562480ad1d886f5f21dbd84001d3d508 Binary files /dev/null and b/doc/v2/images/paddle-cloud-in-data-center.png differ diff --git a/doc/v2/images/paddle-etcd.graffle b/doc/v2/images/paddle-etcd.graffle new file mode 100644 index 0000000000000000000000000000000000000000..f973dc9b9dbf72e9bc31e2d32822916cd281f8d9 Binary files /dev/null and b/doc/v2/images/paddle-etcd.graffle differ diff --git a/doc/v2/images/paddle-etcd.png b/doc/v2/images/paddle-etcd.png new file mode 100644 index 0000000000000000000000000000000000000000..57981ceb4b94f0f7d6dfa63f3d28c0402bf9cc31 Binary files /dev/null and b/doc/v2/images/paddle-etcd.png differ diff --git a/doc/v2/images/paddle-model-sharding.graffle b/doc/v2/images/paddle-model-sharding.graffle new file mode 100644 index 0000000000000000000000000000000000000000..fba30f0ca2b47f0d202a432821d95e55aac37ec8 Binary files /dev/null and b/doc/v2/images/paddle-model-sharding.graffle differ diff --git a/doc/v2/images/paddle-model-sharding.png b/doc/v2/images/paddle-model-sharding.png new file mode 100644 index 0000000000000000000000000000000000000000..8c3f6724ef46c6527e63a4cd8cb0b50fe0167124 Binary files /dev/null and b/doc/v2/images/paddle-model-sharding.png differ diff --git a/doc/v2/images/paddle-ps-0.png b/doc/v2/images/paddle-ps-0.png new file mode 100644 index 0000000000000000000000000000000000000000..47ef32806f182cab003da77f1556823b3f6d1721 Binary files /dev/null and b/doc/v2/images/paddle-ps-0.png differ diff --git a/doc/v2/images/paddle-ps-1.png b/doc/v2/images/paddle-ps-1.png new file mode 100644 index 0000000000000000000000000000000000000000..f3125db73096c52bac6e7c60e1675552857c0774 Binary files /dev/null and b/doc/v2/images/paddle-ps-1.png differ diff --git a/doc/v2/images/paddle-ps.graffle b/doc/v2/images/paddle-ps.graffle new file mode 100644 index 0000000000000000000000000000000000000000..0e536ffdd91cd696008b4c01bad3cb53edebdc16 Binary files /dev/null and b/doc/v2/images/paddle-ps.graffle differ diff --git a/doc/v2/images/paddle-task-queues.graffle b/doc/v2/images/paddle-task-queues.graffle new file mode 100644 index 0000000000000000000000000000000000000000..4263ed8bfd2ef0e55058828bf23f2fac3595e5fd Binary files /dev/null and b/doc/v2/images/paddle-task-queues.graffle differ diff --git a/doc/v2/images/paddle-task-queues.png b/doc/v2/images/paddle-task-queues.png new file mode 100644 index 0000000000000000000000000000000000000000..5f980266795776752cebd0c346b85c4a75a47780 Binary files /dev/null and b/doc/v2/images/paddle-task-queues.png differ diff --git a/doc/v2/images/paddle-task-states.graffle b/doc/v2/images/paddle-task-states.graffle new file mode 100644 index 0000000000000000000000000000000000000000..cf1a0b9246d9386a949d2dbb8c32fe84f72eea83 Binary files /dev/null and b/doc/v2/images/paddle-task-states.graffle differ diff --git a/doc/v2/images/paddle-task-states.png b/doc/v2/images/paddle-task-states.png new file mode 100644 index 0000000000000000000000000000000000000000..4ae43cb66c071aee9eb90d875e2373b29af9c3e0 Binary files /dev/null and b/doc/v2/images/paddle-task-states.png differ diff --git a/doc/v2/images/ps_cn.png b/doc/v2/images/ps_cn.png new file mode 100644 index 0000000000000000000000000000000000000000..f9525739cc8bc6506adde642aafa0a85ae3ebebc Binary files /dev/null and b/doc/v2/images/ps_cn.png differ diff --git a/doc/v2/images/ps_en.png b/doc/v2/images/ps_en.png new file mode 100644 index 0000000000000000000000000000000000000000..6537d3d56589ca9f19a77a50a970e4b5275e6ce0 Binary files /dev/null and b/doc/v2/images/ps_en.png differ diff --git a/doc/v2/images/pserver_and_trainer.png b/doc/v2/images/pserver_and_trainer.png new file mode 100644 index 0000000000000000000000000000000000000000..f41fe48920590333ad332bb51eb18e03dc251541 Binary files /dev/null and b/doc/v2/images/pserver_and_trainer.png differ diff --git a/doc/v2/images/pserver_init.graffle b/doc/v2/images/pserver_init.graffle new file mode 100644 index 0000000000000000000000000000000000000000..5f3f1f52be8aa7f9049a8fcd6b7c93c8560c1676 Binary files /dev/null and b/doc/v2/images/pserver_init.graffle differ diff --git a/doc/v2/images/pserver_init.png b/doc/v2/images/pserver_init.png new file mode 100644 index 0000000000000000000000000000000000000000..dfe491ff98dd7db1c336093c80964a260df2cd90 Binary files /dev/null and b/doc/v2/images/pserver_init.png differ diff --git a/doc/v2/images/route53_create_recordset.png b/doc/v2/images/route53_create_recordset.png new file mode 100644 index 0000000000000000000000000000000000000000..34e476c7beac30fcdde13fccc4cc8d08b4be3d35 Binary files /dev/null and b/doc/v2/images/route53_create_recordset.png differ diff --git a/doc/v2/images/route53_create_zone.png b/doc/v2/images/route53_create_zone.png new file mode 100644 index 0000000000000000000000000000000000000000..25b7ddb831c5cba97f4b2edddd27da3234d621af Binary files /dev/null and b/doc/v2/images/route53_create_zone.png differ diff --git a/doc/v2/images/sequence_data.png b/doc/v2/images/sequence_data.png new file mode 100644 index 0000000000000000000000000000000000000000..6e47a46b8955dfe977e85898fe3c9f33ed28de7e Binary files /dev/null and b/doc/v2/images/sequence_data.png differ diff --git a/doc/v2/images/simple_full_hierarchical_recurrent.dot b/doc/v2/images/simple_full_hierarchical_recurrent.dot new file mode 100644 index 0000000000000000000000000000000000000000..ff278a0323bb2c3ef07bf6f016a3a8df05783581 --- /dev/null +++ b/doc/v2/images/simple_full_hierarchical_recurrent.dot @@ -0,0 +1,30 @@ +digraph G { + rankdir=LR; + + subgraph cluster_t0 { + a [label="4"] + b [label="5"] + c [label="2"] + } + + subgraph cluster_t1 { + d [label="0"] + e [label="9"] + } + + subgraph cluster_t2 { + f [label="8"] + g [label="1"] + h [label="4"] + } + + a -> b; + b -> c; + c -> d [constraint=false]; + + d -> e; + e -> f [constraint=false]; + + f -> g; + g -> h; +} \ No newline at end of file diff --git a/doc/v2/images/simple_full_recurrent.dot b/doc/v2/images/simple_full_recurrent.dot new file mode 100644 index 0000000000000000000000000000000000000000..cee281fbac993afbd0cc3416570f95965cdf0a59 --- /dev/null +++ b/doc/v2/images/simple_full_recurrent.dot @@ -0,0 +1,19 @@ +digraph G { + rankdir=LR; + a [label="4"] + b [label="5"] + c [label="2"] + d [label="0"] + e [label="9"] + f [label="8"] + g [label="1"] + h [label="4"] + + a -> b; + b -> c; + c -> d; + d -> e; + e -> f; + f -> g; + g -> h; +} \ No newline at end of file diff --git a/doc/v2/images/submit-job.graffle b/doc/v2/images/submit-job.graffle new file mode 100644 index 0000000000000000000000000000000000000000..677cdfb6d9a32168bf71729eb841fa1ca0dd31d6 Binary files /dev/null and b/doc/v2/images/submit-job.graffle differ diff --git a/doc/v2/images/submit-job.png b/doc/v2/images/submit-job.png new file mode 100644 index 0000000000000000000000000000000000000000..3046a460a7ba708079e88a560debaa215a694680 Binary files /dev/null and b/doc/v2/images/submit-job.png differ diff --git a/doc/v2/images/trainer.graffle b/doc/v2/images/trainer.graffle new file mode 100644 index 0000000000000000000000000000000000000000..43415ed8cf61a5acfa34f8e56b9577f338dbf254 Binary files /dev/null and b/doc/v2/images/trainer.graffle differ diff --git a/doc/v2/images/trainer.png b/doc/v2/images/trainer.png new file mode 100644 index 0000000000000000000000000000000000000000..6537d3d56589ca9f19a77a50a970e4b5275e6ce0 Binary files /dev/null and b/doc/v2/images/trainer.png differ diff --git a/doc/v2/images/trainer_cn.png b/doc/v2/images/trainer_cn.png new file mode 100644 index 0000000000000000000000000000000000000000..f9525739cc8bc6506adde642aafa0a85ae3ebebc Binary files /dev/null and b/doc/v2/images/trainer_cn.png differ diff --git a/doc/v2/images/worker_security_group.png b/doc/v2/images/worker_security_group.png new file mode 100644 index 0000000000000000000000000000000000000000..57eb0265a34ad4223b69600d2a3dd355482e0bf5 Binary files /dev/null and b/doc/v2/images/worker_security_group.png differ diff --git a/doc/v2/images/workflow_of_CAPI.png b/doc/v2/images/workflow_of_CAPI.png new file mode 100644 index 0000000000000000000000000000000000000000..a4399ade048b3fe10d2d9c714bc34333ca068edb Binary files /dev/null and b/doc/v2/images/workflow_of_CAPI.png differ diff --git a/paddle/fluid/framework/block_desc.cc b/paddle/fluid/framework/block_desc.cc index 1b6f656a006489485a55b5c13b5e2de93c3da0ed..fd409ed4c0f7a504686765909e9c71692aab8824 100644 --- a/paddle/fluid/framework/block_desc.cc +++ b/paddle/fluid/framework/block_desc.cc @@ -134,6 +134,11 @@ OpDesc *BlockDesc::PrependOp() { return ops_.front().get(); } +void BlockDesc::PrependAllocatedOp(std::unique_ptr &&op_desc) { + need_update_ = true; + ops_.emplace_front(std::move(op_desc)); +} + OpDesc *BlockDesc::InsertOp(size_t index) { need_update_ = true; auto it = ops_.begin() + index; diff --git a/paddle/fluid/framework/block_desc.h b/paddle/fluid/framework/block_desc.h index eef19c4f09c60b9df18f154c85c421f5bff9413f..600601669c5d56a3ffc2fb9c804ffad5fde58f0b 100644 --- a/paddle/fluid/framework/block_desc.h +++ b/paddle/fluid/framework/block_desc.h @@ -88,6 +88,8 @@ class BlockDesc { OpDesc *PrependOp(); + void PrependAllocatedOp(std::unique_ptr &&op_desc); + OpDesc *InsertOp(size_t index); /* diff --git a/paddle/fluid/framework/details/broadcast_op_handle.cc b/paddle/fluid/framework/details/broadcast_op_handle.cc index 2afa47c81bead6fb104f49886713bf75dc1b4dc0..d5ca061944f33939cea59a5275e691b1966194fa 100644 --- a/paddle/fluid/framework/details/broadcast_op_handle.cc +++ b/paddle/fluid/framework/details/broadcast_op_handle.cc @@ -38,9 +38,7 @@ void BroadcastOpHandle::RunImpl() { out_var_handles.size(), places_.size(), "The number of output should equal to the number of places."); - // Wait input done, this Wait is asynchronous operation platform::Place - // &in_place; - WaitInputVarGenerated(*in_var_handle); + WaitInputVarGenerated(); std::vector var_scopes; for (auto *s : local_scopes_) { @@ -50,29 +48,9 @@ void BroadcastOpHandle::RunImpl() { auto *in_var = var_scopes.at(in_var_handle->scope_idx_)->FindVar(in_var_handle->name_); PADDLE_ENFORCE_NOT_NULL(in_var); - Tensor &in_tensor = VariableVisitor::GetMutableTensor(in_var); - // NOTE: The tensors' Place of input and output must be all on GPU or all on - // CPU. - for (auto *out_var_handle : out_var_handles) { - if (out_var_handle->IsTheSameVar(*in_var_handle)) { - continue; - } - auto t_out_p = out_var_handle->place_; - auto *out_var = var_scopes.at(out_var_handle->scope_idx_) - ->FindVar(out_var_handle->name_); - PADDLE_ENFORCE_NOT_NULL(out_var); - if (platform::is_gpu_place(in_tensor.place())) { - PADDLE_ENFORCE(platform::is_gpu_place(t_out_p), - "Places of input and output must be all on GPU."); - } else { - t_out_p = platform::CPUPlace(); - } - VariableVisitor::ShareDimsAndLoD(*in_var, out_var); - VariableVisitor::GetMutableTensor(out_var).mutable_data(t_out_p, - in_tensor.type()); - } + InitOutputValue(*in_var_handle, out_var_handles); if (platform::is_cpu_place(in_tensor.place())) { for (auto *out_var_handle : out_var_handles) { @@ -147,11 +125,37 @@ void BroadcastOpHandle::RunImpl() { } } -void BroadcastOpHandle::WaitInputVarGenerated(const VarHandle &in_var) { - if (in_var.generated_op_) { - for (auto &pair : dev_ctxes_) { - in_var.generated_op_->Wait(pair.second); +void BroadcastOpHandle::InitOutputValue( + const VarHandle &in_var_handle, + const std::vector &out_var_handles) const { + std::vector var_scopes; + for (auto *s : local_scopes_) { + var_scopes.emplace_back(s->FindVar(kLocalExecScopeName)->Get()); + } + auto *in_var = + var_scopes.at(in_var_handle.scope_idx_)->FindVar(in_var_handle.name_); + + Tensor &in_tensor = VariableVisitor::GetMutableTensor(in_var); + + // NOTE: The tensors' Place of input and output must be all on GPU or all on + // CPU. + for (auto *out_var_handle : out_var_handles) { + if (out_var_handle->IsTheSameVar(in_var_handle)) { + continue; } + auto t_out_p = out_var_handle->place_; + auto *out_var = var_scopes.at(out_var_handle->scope_idx_) + ->FindVar(out_var_handle->name_); + PADDLE_ENFORCE_NOT_NULL(out_var); + if (is_gpu_place(in_tensor.place())) { + PADDLE_ENFORCE(platform::is_gpu_place(t_out_p), + "Places of input and output must be all on GPU."); + } else { + t_out_p = platform::CPUPlace(); + } + VariableVisitor::ShareDimsAndLoD(*in_var, out_var); + VariableVisitor::GetMutableTensor(out_var).mutable_data(t_out_p, + in_tensor.type()); } } diff --git a/paddle/fluid/framework/details/broadcast_op_handle.h b/paddle/fluid/framework/details/broadcast_op_handle.h index 984a95008c0393eff01c2d419cc98949aed14980..629aa00cb817c4b1446e7b750ca62a7c6b1db670 100644 --- a/paddle/fluid/framework/details/broadcast_op_handle.h +++ b/paddle/fluid/framework/details/broadcast_op_handle.h @@ -57,7 +57,6 @@ struct BroadcastOpHandle : public OpHandleBase { protected: void RunImpl() override; - void WaitInputVarGenerated(const VarHandle &in_var); private: const std::vector &local_scopes_; @@ -65,6 +64,9 @@ struct BroadcastOpHandle : public OpHandleBase { #ifdef PADDLE_WITH_CUDA const platform::NCCLContextMap *nccl_ctxs_; #endif + + void InitOutputValue(const VarHandle &in_var_handle, + const std::vector &out_var_handles) const; }; } // namespace details } // namespace framework diff --git a/paddle/fluid/framework/details/computation_op_handle.cc b/paddle/fluid/framework/details/computation_op_handle.cc index 7ff0efe09387b7e5d7cfe0dfe5e129ca9914d90b..df05bb06333d6b964f2f5434c3d43214e5d2cb7a 100644 --- a/paddle/fluid/framework/details/computation_op_handle.cc +++ b/paddle/fluid/framework/details/computation_op_handle.cc @@ -26,20 +26,20 @@ ComputationOpHandle::ComputationOpHandle(const OpDesc &op_desc, Scope *scope, place_(place) {} void ComputationOpHandle::RunImpl() { - auto *cur_ctx = dev_ctxes_[place_]; - for (auto *in : inputs_) { - bool need_wait = in->generated_op_ && - in->generated_op_->DeviceContext(place_) != cur_ctx; - if (need_wait) { - in->generated_op_->Wait(cur_ctx); - } - } + WaitInputVarGenerated(place_); this->RunAndRecordEvent([this] { op_->Run(*scope_->FindVar(kLocalExecScopeName)->Get(), place_); }); } +bool ComputationOpHandle::NeedWait(VarHandleBase *in_var) { + bool need_wait = + in_var && in_var->generated_op_ && + in_var->generated_op_->DeviceContext(place_) != dev_ctxes_[place_]; + return need_wait; +} + std::string ComputationOpHandle::Name() const { return op_->Type(); } } // namespace details } // namespace framework diff --git a/paddle/fluid/framework/details/computation_op_handle.h b/paddle/fluid/framework/details/computation_op_handle.h index c363b973d9abbae6bea76c2458fbe82a37a342ca..36e6f1bf59a7646e1dff6c4844f2a36a5caf363a 100644 --- a/paddle/fluid/framework/details/computation_op_handle.h +++ b/paddle/fluid/framework/details/computation_op_handle.h @@ -36,6 +36,8 @@ struct ComputationOpHandle : public OpHandleBase { protected: void RunImpl() override; + virtual bool NeedWait(VarHandleBase *in_var); + private: std::unique_ptr op_; Scope *scope_; diff --git a/paddle/fluid/framework/details/fetch_op_handle.cc b/paddle/fluid/framework/details/fetch_op_handle.cc index a3cae8c64cdff8594c8971b0458c443f54375f11..b1c9dd0d15223f7d1bf6ea44144589f1de927e3e 100644 --- a/paddle/fluid/framework/details/fetch_op_handle.cc +++ b/paddle/fluid/framework/details/fetch_op_handle.cc @@ -31,7 +31,7 @@ FetchOpHandle::~FetchOpHandle() { } } -void FetchOpHandle::Wait(platform::DeviceContext *waited_dev) { +void FetchOpHandle::RecordWaitEventOnCtx(platform::DeviceContext *waited_ctx) { PADDLE_THROW("Nobody should wait FetchOp. Unexpceted Error"); } @@ -45,14 +45,8 @@ void FetchOpHandle::WaitAndMergeCPUTensors() const { } void FetchOpHandle::RunImpl() { - auto cpu_ctx = - platform::DeviceContextPool::Instance().Get(platform::CPUPlace()); - for (auto *input : inputs_) { - auto *var = static_cast(input); - if (var->generated_op_) { - var->generated_op_->Wait(cpu_ctx); - } - } + WaitInputVarGenerated(platform::CPUPlace()); + tensors_.resize(inputs_.size()); auto *var_handle = static_cast(inputs_[0]); auto &var_name = var_handle->name_; @@ -79,6 +73,15 @@ void FetchOpHandle::RunImpl() { this->WaitAndMergeCPUTensors(); } +void FetchOpHandle::WaitInputVarGenerated(const platform::Place &place) { + auto cpu_ctx = platform::DeviceContextPool::Instance().Get(place); + for (auto *input : inputs_) { + if (input->generated_op_) { + input->generated_op_->RecordWaitEventOnCtx(cpu_ctx); + } + } +} + std::string FetchOpHandle::Name() const { return "Fetch"; } } // namespace details diff --git a/paddle/fluid/framework/details/fetch_op_handle.h b/paddle/fluid/framework/details/fetch_op_handle.h index b49f3df338dc11310a4a0c27c8aaae3602373fcc..e696a7a9ce562e7f1b7fe6633623cb940810fbe1 100644 --- a/paddle/fluid/framework/details/fetch_op_handle.h +++ b/paddle/fluid/framework/details/fetch_op_handle.h @@ -33,7 +33,7 @@ struct FetchOpHandle : public OpHandleBase { ~FetchOpHandle(); - void Wait(platform::DeviceContext *waited_dev) override; + void RecordWaitEventOnCtx(platform::DeviceContext *waited_ctx) override; void WaitAndMergeCPUTensors() const; @@ -42,6 +42,8 @@ struct FetchOpHandle : public OpHandleBase { protected: void RunImpl() override; + virtual void WaitInputVarGenerated(const platform::Place &place); + private: FeedFetchList *data_; size_t offset_; diff --git a/paddle/fluid/framework/details/gather_op_handle.cc b/paddle/fluid/framework/details/gather_op_handle.cc index 3dfc972a44c62bd2adfc1331f29ffb1cca537652..2be02304566cf5dbe348fa01fc4171990eafd158 100644 --- a/paddle/fluid/framework/details/gather_op_handle.cc +++ b/paddle/fluid/framework/details/gather_op_handle.cc @@ -55,7 +55,7 @@ void GatherOpHandle::RunImpl() { "Currently, gather_op only can gather SelectedRows."); // Wait input done, this Wait is asynchronous operation - WaitInputVarGenerated(in_var_handles); + WaitInputVarGenerated(); auto &pre_in_value = pre_in_var->Get(); std::vector out_rows; @@ -111,17 +111,6 @@ void GatherOpHandle::RunImpl() { }); } -void GatherOpHandle::WaitInputVarGenerated( - const std::vector &in_var_handles) { - for (auto *in : in_var_handles) { - if (in->generated_op_) { - for (auto pair : dev_ctxes_) { - in->generated_op_->Wait(pair.second); - } - } - } -} - std::string GatherOpHandle::Name() const { return "gather"; } } // namespace details } // namespace framework diff --git a/paddle/fluid/framework/details/gather_op_handle.h b/paddle/fluid/framework/details/gather_op_handle.h index c394dd7a14b07cb956aa1aedfc0df4fa25744dd7..d11ef8556aa8840949ca8dc7aa176413f70b9f22 100644 --- a/paddle/fluid/framework/details/gather_op_handle.h +++ b/paddle/fluid/framework/details/gather_op_handle.h @@ -39,7 +39,6 @@ struct GatherOpHandle : public OpHandleBase { protected: void RunImpl() override; - void WaitInputVarGenerated(const std::vector &in_var_handles); private: const std::vector &local_scopes_; diff --git a/paddle/fluid/framework/details/nccl_all_reduce_op_handle.cc b/paddle/fluid/framework/details/nccl_all_reduce_op_handle.cc index 16aa5d067ab7a222af8fbb6ca8ec18222ecd799b..95aa599cd3e403e9cc66b2b5ad35d0d214d1ab5b 100644 --- a/paddle/fluid/framework/details/nccl_all_reduce_op_handle.cc +++ b/paddle/fluid/framework/details/nccl_all_reduce_op_handle.cc @@ -34,12 +34,7 @@ void NCCLAllReduceOpHandle::RunImpl() { return; // No need to all reduce when GPU count = 1; } else { // Wait input done - for (auto *in : inputs_) { - auto &p = static_cast(in)->place_; - if (in->generated_op_) { - in->generated_op_->Wait(dev_ctxes_[p]); - } - } + WaitInputVarGenerated(); auto &var_name = static_cast(this->inputs_[0])->name_; int dtype = -1; diff --git a/paddle/fluid/framework/details/op_handle_base.cc b/paddle/fluid/framework/details/op_handle_base.cc index 534d77860f87be08c8834efd373d90eb199ed6a2..6b064650b4f09737836bda4a43fa421720077929 100644 --- a/paddle/fluid/framework/details/op_handle_base.cc +++ b/paddle/fluid/framework/details/op_handle_base.cc @@ -56,15 +56,15 @@ void OpHandleBase::Run(bool use_event) { RunImpl(); } -void OpHandleBase::Wait(platform::DeviceContext *waited_dev) { +void OpHandleBase::RecordWaitEventOnCtx(platform::DeviceContext *waited_ctx) { #ifdef PADDLE_WITH_CUDA - if (platform::is_cpu_place(waited_dev->GetPlace()) || events_.empty()) { + if (platform::is_cpu_place(waited_ctx->GetPlace()) || events_.empty()) { for (auto &dev_ctx : dev_ctxes_) { dev_ctx.second->Wait(); } } else { auto stream = - static_cast(waited_dev)->stream(); + static_cast(waited_ctx)->stream(); for (auto &ev : events_) { PADDLE_ENFORCE(cudaStreamWaitEvent(stream, ev.second, 0)); } @@ -86,6 +86,28 @@ void OpHandleBase::AddOutput(VarHandleBase *out) { out->generated_op_ = this; } +void OpHandleBase::WaitInputVarGenerated() { + for (auto in_var : inputs_) { + if (NeedWait(in_var)) { + for (auto &pair : dev_ctxes_) { + in_var->generated_op_->RecordWaitEventOnCtx(pair.second); + } + } + } +} + +void OpHandleBase::WaitInputVarGenerated(const platform::Place &place) { + for (auto *in : inputs_) { + if (NeedWait(in)) { + in->generated_op_->RecordWaitEventOnCtx(dev_ctxes_[place]); + } + } +} + +bool OpHandleBase::NeedWait(VarHandleBase *in_var) { + return in_var && in_var->generated_op_; +} + void OpHandleBase::RunAndRecordEvent(const std::function &callback) { #ifdef PADDLE_WITH_CUDA if (!events_.empty()) { // Use event diff --git a/paddle/fluid/framework/details/op_handle_base.h b/paddle/fluid/framework/details/op_handle_base.h index 00f213f3ed294adcce7c540e3ff346de8e2be7fb..fe1735d05dde5f09d5c72c68e5002d16f0083eb5 100644 --- a/paddle/fluid/framework/details/op_handle_base.h +++ b/paddle/fluid/framework/details/op_handle_base.h @@ -38,12 +38,24 @@ class OpHandleBase { void Run(bool use_event); - virtual void Wait(platform::DeviceContext *waited_dev); + virtual void RecordWaitEventOnCtx(platform::DeviceContext *waited_ctx); void AddInput(VarHandleBase *in); void AddOutput(VarHandleBase *out); + // This method adds the wait events of all the input on all the device + // context. + // NODE: This Wait is asynchronous operation. + virtual void WaitInputVarGenerated(); + + // This method adds the wait events of all the input on the specified device + // context. + // NODE: This Wait is asynchronous operation. + virtual void WaitInputVarGenerated(const platform::Place &place); + + virtual bool NeedWait(VarHandleBase *in_var); + // If the Op involves data transfer of multiple devices that // will likely block other computations. virtual bool IsMultiDeviceTransfer() { return false; } diff --git a/paddle/fluid/framework/details/reduce_op_handle.cc b/paddle/fluid/framework/details/reduce_op_handle.cc index 1bb04c1dfca107f4b7ce4c599e9aa132de3e5985..7160e346dad0615e2fd32b70c096880af0359e1a 100644 --- a/paddle/fluid/framework/details/reduce_op_handle.cc +++ b/paddle/fluid/framework/details/reduce_op_handle.cc @@ -51,7 +51,7 @@ void ReduceOpHandle::RunImpl() { PADDLE_ENFORCE_NOT_NULL(pre_in_var); // Wait input done, this Wait is asynchronous operation - WaitInputVarGenerated(in_var_handles); + WaitInputVarGenerated(); // NOTE: The Places of all input tensor must be all on CPU or all on GPU. std::vector in_places; // used to get dev_ctx @@ -80,19 +80,21 @@ void ReduceOpHandle::RunImpl() { } if (pre_in_var->IsType()) { - std::vector in_selected_rows = - GetInputValues(in_var_handles, var_scopes); - - GatherSelectedRows(in_selected_rows, in_places, dev_ctxes_, t_out_p, - out_var->GetMutable()); + this->RunAndRecordEvent([&] { + std::vector in_selected_rows = + GetInputValues(in_var_handles, var_scopes); + GatherSelectedRows(in_selected_rows, in_places, dev_ctxes_, t_out_p, + out_var->GetMutable()); + }); } else { std::vector lod_tensors = GetInputValues(in_var_handles, var_scopes); - if (paddle::platform::is_cpu_place(lod_tensors[0]->place())) { - ReduceLoDTensor func(lod_tensors, - out_var->GetMutable()); - VisitDataType(ToDataType(lod_tensors[0]->type()), func); + this->RunAndRecordEvent([&] { + ReduceLoDTensor func(lod_tensors, + out_var->GetMutable()); + VisitDataType(ToDataType(lod_tensors[0]->type()), func); + }); } else if (paddle::platform::is_gpu_place(lod_tensors[0]->place())) { #ifdef PADDLE_WITH_CUDA auto pre_in = pre_in_var->Get(); @@ -157,17 +159,6 @@ std::vector ReduceOpHandle::GetInputValues( return in_selected_rows; } -void ReduceOpHandle::WaitInputVarGenerated( - const std::vector &in_var_handles) { - for (auto *in : in_var_handles) { - if (in->generated_op_) { - for (auto pair : dev_ctxes_) { - in->generated_op_->Wait(pair.second); - } - } - } -} - std::string ReduceOpHandle::Name() const { return "reduce"; } } // namespace details } // namespace framework diff --git a/paddle/fluid/framework/details/reduce_op_handle.h b/paddle/fluid/framework/details/reduce_op_handle.h index 59731d348d17755fbd8bf3b6fa29b32bdefaf71e..c652a2f4eb0f9b73cb19ebbd9d0809210b280ad3 100644 --- a/paddle/fluid/framework/details/reduce_op_handle.h +++ b/paddle/fluid/framework/details/reduce_op_handle.h @@ -60,8 +60,6 @@ struct ReduceOpHandle : public OpHandleBase { protected: void RunImpl() override; - void WaitInputVarGenerated(const std::vector &in_var_handles); - template std::vector GetInputValues( const std::vector &in_var_handles, diff --git a/paddle/fluid/framework/details/scale_loss_grad_op_handle.cc b/paddle/fluid/framework/details/scale_loss_grad_op_handle.cc index 1cd3113030086104e7fc5c4ba3364a5ff027632b..d9c387e79dc71288e7330597fed57171d447f31b 100644 --- a/paddle/fluid/framework/details/scale_loss_grad_op_handle.cc +++ b/paddle/fluid/framework/details/scale_loss_grad_op_handle.cc @@ -29,6 +29,7 @@ ScaleLossGradOpHandle::ScaleLossGradOpHandle(size_t num_dev, Scope *scope, ScaleLossGradOpHandle::~ScaleLossGradOpHandle() {} void ScaleLossGradOpHandle::RunImpl() { + // Doesn't wait any event std::string var_name = static_cast(this->outputs_[0])->name_; auto &local_scope = *scope_->FindVar(kLocalExecScopeName)->Get(); diff --git a/paddle/fluid/framework/details/send_op_handle.cc b/paddle/fluid/framework/details/send_op_handle.cc index bd97c5260dbba935e422793e0aa6aac8b6875627..7109659dd7001f91e7674ac7bebbe3a59794cfc0 100644 --- a/paddle/fluid/framework/details/send_op_handle.cc +++ b/paddle/fluid/framework/details/send_op_handle.cc @@ -26,6 +26,7 @@ SendOpHandle::SendOpHandle(const framework::OpDesc &op_desc, place_(place) {} void SendOpHandle::RunImpl() { + // TODO(wuyi): need further analysis whether wait VarDummyHandle. // Wait input done for (auto *in : inputs_) { auto &p = static_cast(in)->place_; @@ -33,7 +34,7 @@ void SendOpHandle::RunImpl() { continue; } if (in->generated_op_) { - in->generated_op_->Wait(dev_ctxes_[p]); + in->generated_op_->RecordWaitEventOnCtx(dev_ctxes_[p]); } } auto &tmp_scope = local_scope_->FindVar(kLocalExecScopeName)->Get(); diff --git a/paddle/fluid/framework/details/threaded_ssa_graph_executor.cc b/paddle/fluid/framework/details/threaded_ssa_graph_executor.cc index 5e6ed5cb7cdc534332d402380458f39aecd841b8..e90523ebe8dc720d10034e3af9b0e51bb7a2fde9 100644 --- a/paddle/fluid/framework/details/threaded_ssa_graph_executor.cc +++ b/paddle/fluid/framework/details/threaded_ssa_graph_executor.cc @@ -14,8 +14,6 @@ #include "paddle/fluid/framework/details/threaded_ssa_graph_executor.h" -#include "paddle/fluid/framework/details/fetch_op_handle.h" - namespace paddle { namespace framework { namespace details { @@ -45,73 +43,33 @@ FeedFetchList ThreadedSSAGraphExecutor::Run( // Should revisit it if overlapping is available. std::unordered_set delayed_ops; - auto InsertPendingVar = [&pending_vars, &ready_vars](VarHandleBase &var) { - pending_vars.insert(&var); - if (var.generated_op_ == nullptr) { - ready_vars.Push(&var); - } - }; - - auto InsertPendingOp = [&pending_ops](OpHandleBase &op_instance) { - pending_ops.insert({&op_instance, op_instance.Inputs().size()}); - }; - // Transform SSAGraph to pending_ops & pending_vars for (auto &var_map : graph_->vars_) { for (auto &name_pair : var_map) { for (auto &version_pair : name_pair.second) { - InsertPendingVar(*version_pair); + InsertPendingVar(&pending_vars, &ready_vars, version_pair.get()); } } } for (auto &var : graph_->dep_vars_) { - InsertPendingVar(*var); + InsertPendingVar(&pending_vars, &ready_vars, var.get()); } for (auto &op : graph_->ops_) { if (op->Inputs().empty()) { // Special case, Op has no input. ready_ops.insert(op.get()); } else { - InsertPendingOp(*op); + InsertPendingOp(&pending_ops, op.get()); } } // Step 2. Insert FetchOps std::vector> fetch_ops; - FeedFetchList fetch_data(fetch_tensors.size()); - - std::unordered_map> fetched_vars; - - for (auto &fetch_var_name : fetch_tensors) { - for (auto &var_map : graph_->vars_) { - auto it = var_map.find(fetch_var_name); - if (it != var_map.end()) { - fetched_vars[fetch_var_name].push_back(it->second.rbegin()->get()); - } - } - } - std::unordered_set> fetch_dependencies; - for (size_t i = 0; i < fetch_tensors.size(); ++i) { - auto &var_name = fetch_tensors[i]; - auto &vars = fetched_vars.at(var_name); - auto *op = new FetchOpHandle(&fetch_data, i, &local_scopes_); - fetch_ops.emplace_back(op); - - for (auto &p : places_) { - op->SetDeviceContext(p, fetch_ctxs_.Get(p)); - } - - for (auto *var : vars) { - op->AddInput(var); - } + FeedFetchList fetch_data(fetch_tensors.size()); - auto *fetch_dummy = new DummyVarHandle(); - op->AddOutput(fetch_dummy); - fetch_dependencies.emplace(fetch_dummy); - InsertPendingVar(*fetch_dummy); - InsertPendingOp(*op); - } + InsertFetchOps(fetch_tensors, &fetch_ops, &fetch_dependencies, &pending_ops, + &pending_vars, &ready_vars, &fetch_data); auto run_all_ops = [&](std::unordered_set &set) { for (auto *op : set) { @@ -174,6 +132,60 @@ FeedFetchList ThreadedSSAGraphExecutor::Run( return fetch_data; } +void ThreadedSSAGraphExecutor::InsertFetchOps( + const std::vector &fetch_tensors, + std::vector> *fetch_ops, + std::unordered_set> *fetch_dependencies, + std::unordered_map *pending_ops, + std::unordered_set *pending_vars, + BlockingQueue *ready_vars, FeedFetchList *fetch_data) { + std::unordered_map> fetched_vars; + + for (auto &fetch_var_name : fetch_tensors) { + for (auto &var_map : graph_->vars_) { + auto it = var_map.find(fetch_var_name); + if (it != var_map.end()) { + fetched_vars[fetch_var_name].push_back(it->second.rbegin()->get()); + } + } + } + + for (size_t i = 0; i < fetch_tensors.size(); ++i) { + auto &var_name = fetch_tensors[i]; + auto &vars = fetched_vars.at(var_name); + auto *op = new FetchOpHandle(fetch_data, i, &local_scopes_); + fetch_ops->emplace_back(op); + + for (auto &p : places_) { + op->SetDeviceContext(p, fetch_ctxs_.Get(p)); + } + + for (auto *var : vars) { + op->AddInput(var); + } + + auto *fetch_dummy = new DummyVarHandle(); + op->AddOutput(fetch_dummy); + fetch_dependencies->emplace(fetch_dummy); + this->InsertPendingVar(pending_vars, ready_vars, fetch_dummy); + this->InsertPendingOp(pending_ops, op); + } +} + +void ThreadedSSAGraphExecutor::InsertPendingOp( + std::unordered_map *pending_ops, + OpHandleBase *op_instance) const { + pending_ops->insert({op_instance, op_instance->Inputs().size()}); +} + +void ThreadedSSAGraphExecutor::InsertPendingVar( + std::unordered_set *pending_vars, + BlockingQueue *ready_vars, VarHandleBase *var) const { + pending_vars->insert(var); + if (var->generated_op_ == nullptr) { + ready_vars->Push(var); + } +} void ThreadedSSAGraphExecutor::RunOp( BlockingQueue *ready_var_q, details::OpHandleBase *op) { auto op_run = [ready_var_q, op, this] { diff --git a/paddle/fluid/framework/details/threaded_ssa_graph_executor.h b/paddle/fluid/framework/details/threaded_ssa_graph_executor.h index d089b79d91327e38408439a8019ec5189ff6d189..f18a88526b3238220fc56fd07299643d32c8b58b 100644 --- a/paddle/fluid/framework/details/threaded_ssa_graph_executor.h +++ b/paddle/fluid/framework/details/threaded_ssa_graph_executor.h @@ -23,6 +23,7 @@ #include #include "ThreadPool.h" // ThreadPool in thrird party #include "paddle/fluid/framework/blocking_queue.h" +#include "paddle/fluid/framework/details/fetch_op_handle.h" #include "paddle/fluid/framework/details/ssa_graph_executor.h" namespace paddle { @@ -58,6 +59,21 @@ class ThreadedSSAGraphExecutor : public SSAGraphExecutor { std::unique_ptr exception_; std::atomic running_ops_; bool allow_op_delay_; + + void InsertPendingOp(std::unordered_map *pending_ops, + OpHandleBase *op_instance) const; + + void InsertPendingVar(std::unordered_set *pending_vars, + BlockingQueue *ready_vars, + VarHandleBase *var) const; + + void InsertFetchOps( + const std::vector &fetch_tensors, + std::vector> *fetch_ops, + std::unordered_set> *fetch_dependencies, + std::unordered_map *pending_ops, + std::unordered_set *pending_vars, + BlockingQueue *ready_vars, FeedFetchList *fetch_data); }; } // namespace details diff --git a/paddle/fluid/inference/CMakeLists.txt b/paddle/fluid/inference/CMakeLists.txt index 50f635a41a99b2ae292d13afde5637a3bf4e6f8c..b98aeed8a0aaabfd39560fad3c074a6668b4f024 100644 --- a/paddle/fluid/inference/CMakeLists.txt +++ b/paddle/fluid/inference/CMakeLists.txt @@ -20,7 +20,9 @@ if(NOT APPLE) endif() if(WITH_TESTING) + # both tests/book and analysis depends the models that generated by python/paddle/fluid/tests/book add_subdirectory(tests/book) + add_subdirectory(analysis) endif() if (TENSORRT_FOUND) diff --git a/paddle/fluid/inference/analysis/CMakeLists.txt b/paddle/fluid/inference/analysis/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..de7becae4d25d48111fea8d2123bc85aef70230a --- /dev/null +++ b/paddle/fluid/inference/analysis/CMakeLists.txt @@ -0,0 +1 @@ +cc_library(dot SRCS dot.cc) diff --git a/paddle/fluid/inference/analysis/dot.cc b/paddle/fluid/inference/analysis/dot.cc new file mode 100644 index 0000000000000000000000000000000000000000..d5471ffcb594a6915e9e65c0fee5adc5f5bdf40c --- /dev/null +++ b/paddle/fluid/inference/analysis/dot.cc @@ -0,0 +1,23 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/inference/analysis/dot.h" + +namespace paddle { +namespace inference { +namespace analysis { +size_t Dot::counter = 0; +} // namespace analysis +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/analysis/dot.h b/paddle/fluid/inference/analysis/dot.h new file mode 100644 index 0000000000000000000000000000000000000000..3359987874f2d74d7e4646baa38790431c4b28fd --- /dev/null +++ b/paddle/fluid/inference/analysis/dot.h @@ -0,0 +1,154 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* + * This file implements some helper classes and methods for DOT programming + * support. It will give a visualization of the graph and that helps to debug + * the logics of each Pass. + */ +#pragma once + +#include +#include +#include +#include + +namespace paddle { +namespace inference { +namespace analysis { + +/* + * A Dot template that helps to build a DOT graph definition. + */ +class Dot { + public: + static size_t counter; + + struct Attr { + std::string key; + std::string value; + + Attr(const std::string& key, const std::string& value) + : key(key), value(value) {} + + std::string repr() const { + std::stringstream ss; + ss << key << "=" << '"' << value << '"'; + return ss.str(); + } + }; + + struct Node { + std::string name; + std::vector attrs; + + Node(const std::string& name, const std::vector& attrs) + : name(name), + attrs(attrs), + id_("node_" + std::to_string(Dot::counter++)) {} + + std::string id() const { return id_; } + + std::string repr() const { + std::stringstream ss; + CHECK(!name.empty()); + ss << id_; + for (size_t i = 0; i < attrs.size(); i++) { + if (i == 0) { + ss << "[label=" << '"' << name << '"' << " "; + } + ss << attrs[i].repr(); + ss << ((i < attrs.size() - 1) ? " " : "]"); + } + return ss.str(); + } + + private: + std::string id_; + }; + + struct Edge { + std::string source; + std::string target; + std::vector attrs; + + Edge(const std::string& source, const std::string& target, + const std::vector& attrs) + : source(source), target(target), attrs(attrs) {} + + std::string repr() const { + std::stringstream ss; + CHECK(!source.empty()); + CHECK(!target.empty()); + ss << source << "->" << target; + for (size_t i = 0; i < attrs.size(); i++) { + if (i == 0) { + ss << "["; + } + ss << attrs[i].repr(); + ss << ((i < attrs.size() - 1) ? " " : "]"); + } + return ss.str(); + } + }; + + Dot() = default; + + explicit Dot(const std::vector& attrs) : attrs_(attrs) {} + + void AddNode(const std::string& name, const std::vector& attrs) { + CHECK(!nodes_.count(name)) << "duplicate Node '" << name << "'"; + nodes_.emplace(name, Node{name, attrs}); + } + + void AddEdge(const std::string& source, const std::string& target, + const std::vector& attrs) { + CHECK(!source.empty()); + CHECK(!target.empty()); + auto sid = nodes_.at(source).id(); + auto tid = nodes_.at(target).id(); + edges_.emplace_back(sid, tid, attrs); + } + + // Compile to DOT language codes. + std::string Build() const { + std::stringstream ss; + const std::string indent = " "; + ss << "digraph G {" << '\n'; + + // Add graph attrs + for (const auto& attr : attrs_) { + ss << indent << attr.repr() << '\n'; + } + // add nodes + for (auto& item : nodes_) { + ss << indent << item.second.repr() << '\n'; + } + // add edges + for (auto& edge : edges_) { + ss << indent << edge.repr() << '\n'; + } + ss << "} // end G"; + return ss.str(); + } + + private: + std::unordered_map nodes_; + std::vector edges_; + std::vector attrs_; +}; + +} // namespace analysis +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/engine.h b/paddle/fluid/inference/engine.h index 6b0ac92fa908427a89a6a5fa74dacc3b24abd1c3..de0375551e16ec53b90414c7446234fda98bf706 100644 --- a/paddle/fluid/inference/engine.h +++ b/paddle/fluid/inference/engine.h @@ -19,6 +19,9 @@ limitations under the License. */ namespace paddle { namespace inference { +struct Buffer; +enum class DeviceType { UNK = -1, CPU, GPU }; + /* * EngineBase is the base class of all inference engines. An inference engine * takes a paddle program as input, and outputs the result in fluid Tensor @@ -45,8 +48,20 @@ class EngineBase { // Execute the engine, that will run the inference network. virtual void Execute(int batch_size) = 0; + // Return the IO buffer that allocated in engine. One can read/write directly + // on the buffer. If the buffer's buffer is nullptr, one can also allocate + // memory and maintain it outside the engine. + virtual Buffer& buffer(const std::string& name) = 0; + virtual ~EngineBase() {} }; // class EngineBase +struct Buffer { + void* buffer{nullptr}; // buffer should be allocated only once. + int max_size; // buffer allocated space. + int size; // data size. + DeviceType device{DeviceType::UNK}; // tells which device this buffer is on. +}; + } // namespace inference } // namespace paddle diff --git a/paddle/fluid/inference/tensorrt/CMakeLists.txt b/paddle/fluid/inference/tensorrt/CMakeLists.txt index 288789d6e484100820c937e6081701f1e9245706..677b3e04af8e7f5662a15fb32e3b03f45d262733 100644 --- a/paddle/fluid/inference/tensorrt/CMakeLists.txt +++ b/paddle/fluid/inference/tensorrt/CMakeLists.txt @@ -1,4 +1,5 @@ +nv_library(tensorrt_engine SRCS engine.cc DEPS framework_proto) nv_test(test_tensorrt SRCS test_tensorrt.cc DEPS dynload_cuda device_context dynamic_loader) -nv_test(test_tensorrt_engine SRCS test_engine.cc engine.cc DEPS dynload_cuda) -set(ENGINE_FILE ${CMAKE_CURRENT_SOURCE_DIR}/engine.cc) +nv_test(test_tensorrt_engine SRCS test_engine.cc DEPS dynload_cuda tensorrt_engine) + add_subdirectory(convert) diff --git a/paddle/fluid/inference/tensorrt/convert/CMakeLists.txt b/paddle/fluid/inference/tensorrt/convert/CMakeLists.txt index 3c5909c0be1c690d5148ecfb32b1b6c2dd6f3211..5178c54c08400125d190078dac6c52d021f8488b 100644 --- a/paddle/fluid/inference/tensorrt/convert/CMakeLists.txt +++ b/paddle/fluid/inference/tensorrt/convert/CMakeLists.txt @@ -1,4 +1,4 @@ nv_test(test_op_converter SRCS test_op_converter.cc mul_op.cc conv2d_op.cc DEPS ${FLUID_CORE_MODULES}) -nv_test(test_trt_activation_op SRCS test_activation_op.cc ${ENGINE_FILE} activation_op.cc - DEPS ${FLUID_CORE_MODULES} activation_op) +nv_test(test_trt_activation_op SRCS test_activation_op.cc activation_op.cc + DEPS ${FLUID_CORE_MODULES} activation_op tensorrt_engine) nv_test(test_io_converter SRCS test_io_converter.cc io_converter.cc DEPS dynload_cuda dynamic_loader lod_tensor) diff --git a/paddle/fluid/inference/tensorrt/engine.cc b/paddle/fluid/inference/tensorrt/engine.cc index df123a59079acc5f549e733b412ab302aa397a92..1c296e33a610493b889359c43629003fd76b893c 100644 --- a/paddle/fluid/inference/tensorrt/engine.cc +++ b/paddle/fluid/inference/tensorrt/engine.cc @@ -30,16 +30,24 @@ void TensorRTEngine::Build(const DescType& paddle_model) { } void TensorRTEngine::Execute(int batch_size) { - infer_context_->enqueue(batch_size, buffers_.data(), *stream_, nullptr); + std::vector buffers; + for (auto& buf : buffers_) { + PADDLE_ENFORCE_NOT_NULL(buf.buffer, "buffer should be allocated"); + PADDLE_ENFORCE_GT(buf.max_size, 0); + PADDLE_ENFORCE(buf.device == DeviceType::GPU); + buffers.push_back(buf.buffer); + } + infer_context_->enqueue(batch_size, buffers.data(), *stream_, nullptr); cudaStreamSynchronize(*stream_); } TensorRTEngine::~TensorRTEngine() { // clean buffer - for (auto& buffer : buffers_) { - if (buffer != nullptr) { - PADDLE_ENFORCE_EQ(0, cudaFree(buffer)); - buffer = nullptr; + for (auto& buf : buffers_) { + if (buf.buffer != nullptr) { + PADDLE_ENFORCE_EQ(0, cudaFree(buf.buffer)); + buf.buffer = nullptr; + buf.max_size = 0; } } } @@ -59,7 +67,7 @@ void TensorRTEngine::FreezeNetwork() { infer_context_.reset(infer_engine_->createExecutionContext()); // allocate GPU buffers. - buffers_.resize(buffer_sizes_.size(), nullptr); + buffers_.resize(buffer_sizes_.size()); for (auto& item : buffer_sizes_) { if (item.second == 0) { auto slot_offset = infer_engine_->getBindingIndex(item.first.c_str()); @@ -67,7 +75,11 @@ void TensorRTEngine::FreezeNetwork() { infer_engine_->getBindingDataType(slot_offset))] * AccumDims(infer_engine_->getBindingDimensions(slot_offset)); } - PADDLE_ENFORCE_EQ(0, cudaMalloc(&buffer(item.first), item.second)); + auto& buf = buffer(item.first); + CHECK(buf.buffer == nullptr); // buffer should be allocated only once. + PADDLE_ENFORCE_EQ(0, cudaMalloc(&buf.buffer, item.second)); + buf.size = buf.max_size = item.second; + buf.device = DeviceType::GPU; } } @@ -113,7 +125,7 @@ void TensorRTEngine::DeclareOutput(const std::string& name) { } void* TensorRTEngine::GetOutputInGPU(const std::string& name) { - return buffer(name); + return buffer(name).buffer; } void TensorRTEngine::GetOutputInCPU(const std::string& name, void* dst, @@ -123,11 +135,13 @@ void TensorRTEngine::GetOutputInCPU(const std::string& name, void* dst, PADDLE_ENFORCE(it != buffer_sizes_.end()); PADDLE_ENFORCE_GT(it->second, 0); PADDLE_ENFORCE_GE(max_size, it->second); - PADDLE_ENFORCE_EQ(0, cudaMemcpyAsync(dst, buffer(name), it->second, + auto& buf = buffer(name); + PADDLE_ENFORCE_NOT_NULL(buf.buffer, "buffer should be allocated before"); + PADDLE_ENFORCE_EQ(0, cudaMemcpyAsync(dst, buf.buffer, it->second, cudaMemcpyDeviceToHost, *stream_)); } -void*& TensorRTEngine::buffer(const std::string& name) { +Buffer& TensorRTEngine::buffer(const std::string& name) { PADDLE_ENFORCE(infer_engine_ != nullptr, "call FreezeNetwork first."); auto it = buffer_sizes_.find(name); PADDLE_ENFORCE(it != buffer_sizes_.end()); @@ -137,10 +151,12 @@ void*& TensorRTEngine::buffer(const std::string& name) { void TensorRTEngine::SetInputFromCPU(const std::string& name, void* data, size_t size) { - void* buf = buffer(name); - cudaMemcpyAsync(buf, data, size, cudaMemcpyHostToDevice, *stream_); - PADDLE_ENFORCE_EQ( - 0, cudaMemcpyAsync(buf, data, size, cudaMemcpyHostToDevice, *stream_)); + auto& buf = buffer(name); + PADDLE_ENFORCE_NOT_NULL(buf.buffer); + PADDLE_ENFORCE_LE(size, buf.max_size, "buffer is too small"); + PADDLE_ENFORCE(buf.device == DeviceType::GPU); + PADDLE_ENFORCE_EQ(0, cudaMemcpyAsync(buf.buffer, data, size, + cudaMemcpyHostToDevice, *stream_)); } void TensorRTEngine::SetITensor(const std::string& name, diff --git a/paddle/fluid/inference/tensorrt/engine.h b/paddle/fluid/inference/tensorrt/engine.h index ec919b943d3281dd675b15e2f14adb7b3487f46f..b8298c6059e8644327194a1fcf7a7438cc9a7286 100644 --- a/paddle/fluid/inference/tensorrt/engine.h +++ b/paddle/fluid/inference/tensorrt/engine.h @@ -87,7 +87,9 @@ class TensorRTEngine : public EngineBase { // these memory directly for acceleration, for example, output the converted // data directly to the buffer to save data copy overhead. // NOTE this should be used after calling `FreezeNetwork`. - void*& buffer(const std::string& name); + Buffer& buffer(const std::string& name) override; + + cudaStream_t* stream() { return stream_; } // Fill an input from CPU memory with name and size. void SetInputFromCPU(const std::string& name, void* data, size_t size); @@ -116,7 +118,7 @@ class TensorRTEngine : public EngineBase { cudaStream_t* stream_; nvinfer1::ILogger& logger_; - std::vector buffers_; + std::vector buffers_; // max data size for the buffers. std::unordered_map buffer_sizes_; std::unordered_map diff --git a/paddle/fluid/inference/tensorrt/test_engine.cc b/paddle/fluid/inference/tensorrt/test_engine.cc index a08b78f930d30d674247a713fadd3e42e5ada350..e635f0f87d577a1f1ac74687ee60f762be525418 100644 --- a/paddle/fluid/inference/tensorrt/test_engine.cc +++ b/paddle/fluid/inference/tensorrt/test_engine.cc @@ -77,6 +77,37 @@ TEST_F(TensorRTEngineTest, add_layer) { ASSERT_EQ(y_cpu, x_v * 2 + 3); } +TEST_F(TensorRTEngineTest, add_layer_multi_dim) { + // Weight in CPU memory. + // It seems tensorrt FC use col-major: [[1.0, 3.3], [1.1, 4.4]] + // instead of row-major, which is [[1.0, 1.1], [3.3, 4.4]] + float raw_weight[4] = {1.0, 1.1, 3.3, 4.4}; + float raw_bias[2] = {1.3, 2.4}; + + TensorRTEngine::Weight weight(nvinfer1::DataType::kFLOAT, raw_weight, 4); + TensorRTEngine::Weight bias(nvinfer1::DataType::kFLOAT, raw_bias, 2); + auto* x = engine_->DeclareInput("x", nvinfer1::DataType::kFLOAT, + nvinfer1::DimsCHW{1, 2, 1}); + auto* fc_layer = TRT_ENGINE_ADD_LAYER(engine_, FullyConnected, *x, 2, + weight.get(), bias.get()); + PADDLE_ENFORCE(fc_layer != nullptr); + + engine_->DeclareOutput(fc_layer, 0, "y"); + engine_->FreezeNetwork(); + ASSERT_EQ(engine_->engine()->getNbBindings(), 2); + + float x_v[2] = {1.0, 2.0}; + engine_->SetInputFromCPU("x", reinterpret_cast(&x_v), + 2 * sizeof(float)); + engine_->Execute(1); + + LOG(INFO) << "to get output"; + float y_cpu[2] = {-1., -1.}; + engine_->GetOutputInCPU("y", &y_cpu[0], sizeof(float) * 2); + ASSERT_EQ(y_cpu[0], 4.5); + ASSERT_EQ(y_cpu[1], 14.5); +} + } // namespace tensorrt } // namespace inference } // namespace paddle diff --git a/paddle/fluid/operators/detail/sendrecvop_utils.cc b/paddle/fluid/operators/detail/sendrecvop_utils.cc index d68cf467f7b0c6157dc1f69571e5d0c0b3c70348..1a8a1af20fa446dbd537944409ef0ca1e3e9116f 100644 --- a/paddle/fluid/operators/detail/sendrecvop_utils.cc +++ b/paddle/fluid/operators/detail/sendrecvop_utils.cc @@ -29,129 +29,127 @@ namespace paddle { namespace operators { namespace detail { +using VarMsg = sendrecv::VariableMessage; + +void GetTensorPayload(framework::Variable* var, + const platform::DeviceContext& ctx, VarMsg* request, + void** payload, size_t* payload_size) { + auto tensor = var->Get(); + // FIXME(wuyi): data types in send_recv.proto is copied from + // framework.proto + request->set_data_type( + static_cast(framework::ToDataType(tensor.type()))); + for (auto& dim : framework::vectorize(tensor.dims())) { + request->add_dims(dim); + } + const framework::LoD lod = tensor.lod(); + if (lod.size() > 0) { + request->set_lod_level(lod.size()); + for (auto& each : lod) { + VarMsg::LodData* lod_inner = request->add_lod(); + for (auto& d : each) { + lod_inner->add_lod_data(d); + } + } + } + if (platform::is_gpu_place(ctx.GetPlace())) { +#ifdef PADDLE_WITH_CUDA + PADDLE_ENFORCE(platform::is_gpu_place(tensor.place())); + platform::CPUPlace cpu; + auto& gpu_dev_ctx = static_cast(ctx); + auto copy_size = tensor.numel() * framework::SizeOfType(tensor.type()); + *payload = memory::Alloc(cpu, copy_size); + + memory::Copy(cpu, *payload, boost::get(tensor.place()), + reinterpret_cast(tensor.data()), copy_size, + gpu_dev_ctx.stream()); + ctx.Wait(); +#endif + } else { + *payload = tensor.data(); + } + *payload_size = tensor.numel() * framework::SizeOfType(tensor.type()); +} + +void GetSelectedRowsPayload(framework::Variable* var, + const platform::DeviceContext& ctx, VarMsg* request, + void** payload, size_t* payload_size) { + auto* slr = var->GetMutable(); + request->set_data_type( + static_cast(framework::ToDataType(slr->value().type()))); + request->set_lod_level(0); + request->set_slr_height(slr->height()); + + for (auto& dim : framework::vectorize(slr->value().dims())) { + request->add_dims(dim); + } + + auto* tensor = slr->mutable_value(); + if (platform::is_gpu_place(ctx.GetPlace())) { +#ifdef PADDLE_WITH_CUDA + platform::CPUPlace cpu; + auto& gpu_dev_ctx = static_cast(ctx); + auto copy_size = tensor->numel() * framework::SizeOfType(tensor->type()); + *payload = memory::Alloc(cpu, copy_size); + memory::Copy(cpu, *payload, + boost::get(tensor->place()), + reinterpret_cast(tensor->data()), copy_size, + gpu_dev_ctx.stream()); + ctx.Wait(); +#endif + } else { + *payload = slr->mutable_value()->data(); + } + *payload_size = tensor->numel() * framework::SizeOfType(tensor->type()); +} + void SerializeToByteBuffer(const std::string& name, framework::Variable* var, const platform::DeviceContext& ctx, ::grpc::ByteBuffer* msg, const std::string& out_name) { - using VarMsg = sendrecv::VariableMessage; - // When using GPU, need to free the copied CPU buffer - // when the ByteBuffer destroies - // TODO(typhoonzero): add unref here, if we have dependent - // parallelism execution, need to know when to free the tensor. + // Default DestroyCallback does nothing, When using GPU + // the CPU buffer need to be freed. DestroyCallback destroy_callback = [](void* backing) {}; - - auto buffer = std::unique_ptr(new char[1024]); - void* buf = buffer.get(); - + VarMsg request; void* payload = nullptr; size_t payload_size; - ProtoEncodeHelper e(static_cast(buf), 1024); + + request.set_varname(name); // Note: normally the profiler is enabled in 1 trainer, hence only // 1 trainer returns true for ShouldSendProfileState(). It tells PS // servers the trainer's profiling state so that PS can follow the // trainer. - if (platform::ShouldSendProfileState()) { - e.WriteBool(VarMsg::kProfileFieldNumber, platform::IsProfileEnabled()); + request.set_profile(platform::IsProfileEnabled()); + if (!out_name.empty()) { + request.set_out_varname(out_name); } - e.WriteString(VarMsg::kVarnameFieldNumber, name); if (var->IsType()) { - e.WriteUint64(VarMsg::kTypeFieldNumber, 0); + request.set_type(::sendrecv::LOD_TENSOR); + GetTensorPayload(var, ctx, &request, &payload, &payload_size); } else if (var->IsType()) { - e.WriteUint64(VarMsg::kTypeFieldNumber, 1); + request.set_type(::sendrecv::SELECTED_ROWS); + GetSelectedRowsPayload(var, ctx, &request, &payload, &payload_size); + } else { + PADDLE_THROW("Serialize does not support type: %s", + typeid(var->Type()).name()); } - if (!out_name.empty()) { - e.WriteString(VarMsg::kOutVarnameFieldNumber, out_name); + if (platform::is_gpu_place(ctx.GetPlace())) { + // GPU data is copied to CPU buffer when sending, + // free the buffer when possible. + destroy_callback = [](void* backing) { + platform::CPUPlace cpu; + memory::Free(cpu, backing); + }; } - switch (framework::ToVarType(var->Type())) { - case framework::proto::VarType_Type_LOD_TENSOR: { - auto tensor = var->Get(); - e.WriteUint64(VarMsg::kDataTypeFieldNumber, - framework::ToDataType(tensor.type())); - for (auto& dim : framework::vectorize(tensor.dims())) { - e.WriteUint64(VarMsg::kDimsFieldNumber, dim); - } - auto lod = tensor.lod(); // std::vector> - if (lod.size() > 0) { - e.WriteUint64(VarMsg::kLodLevelFieldNumber, lod.size()); - - for (auto& each : lod) { - e.WriteVarlengthBeginning(VarMsg::kLodFieldNumber, - 2 + // tag + varintlength of submessage - 1 + // kLodDataFieldNumber - each.size()); - // auto copied from GPU - for (auto& d : each) { - e.WriteUint64(VarMsg::LodData::kLodDataFieldNumber, d); - } - } - } - if (platform::is_gpu_place(ctx.GetPlace())) { -#ifdef PADDLE_WITH_CUDA - PADDLE_ENFORCE(platform::is_gpu_place(tensor.place())); - platform::CPUPlace cpu; - auto& gpu_dev_ctx = - static_cast(ctx); - auto copy_size = tensor.numel() * framework::SizeOfType(tensor.type()); - payload = memory::Alloc(cpu, copy_size); - - memory::Copy(cpu, payload, - boost::get(tensor.place()), - reinterpret_cast(tensor.data()), - copy_size, gpu_dev_ctx.stream()); - ctx.Wait(); - destroy_callback = [](void* backing) { - platform::CPUPlace cpu; - memory::Free(cpu, backing); - }; -#endif - } else { - payload = tensor.data(); - } - payload_size = tensor.numel() * framework::SizeOfType(tensor.type()); - e.WriteVarlengthBeginning(VarMsg::kSerializedFieldNumber, payload_size); - } break; - case framework::proto::VarType_Type_SELECTED_ROWS: { - // TODO(typhoonzero): selectedrows implement should not use unique_ptr - auto* slr = var->GetMutable(); - e.WriteUint64(VarMsg::kDataTypeFieldNumber, - framework::ToDataType(slr->value().type())); - for (auto& dim : framework::vectorize(slr->value().dims())) { - e.WriteUint64(VarMsg::kDimsFieldNumber, dim); - } - e.WriteUint64(VarMsg::kLodLevelFieldNumber, 0); - e.WriteUint64(VarMsg::kSlrHeightFieldNumber, slr->height()); - auto* tensor = slr->mutable_value(); - if (platform::is_gpu_place(ctx.GetPlace())) { -#ifdef PADDLE_WITH_CUDA - platform::CPUPlace cpu; - auto& gpu_dev_ctx = - static_cast(ctx); - auto copy_size = - tensor->numel() * framework::SizeOfType(tensor->type()); - payload = memory::Alloc(cpu, copy_size); - memory::Copy(cpu, payload, - boost::get(tensor->place()), - reinterpret_cast(tensor->data()), - copy_size, gpu_dev_ctx.stream()); - ctx.Wait(); - destroy_callback = [](void* backing) { - platform::CPUPlace cpu; - memory::Free(cpu, backing); - }; -#endif - } else { - payload = slr->mutable_value()->data(); - } - payload_size = tensor->numel() * framework::SizeOfType(tensor->type()); - e.WriteVarlengthBeginning(VarMsg::kSerializedFieldNumber, payload_size); - } break; - default: - PADDLE_THROW("Serialize does not support type: %s", - typeid(var->Type()).name()); - break; - } + std::string header; + request.AppendToString(&header); + auto buffer = std::unique_ptr(new char[1024]); + void* buf = buffer.get(); + ProtoEncodeHelper e(static_cast(buf), 1024); + e.WriteRawBytes(std::string(header.data(), header.size())); + e.WriteVarlengthBeginning(VarMsg::kSerializedFieldNumber, payload_size); // steal reference of tensor data ::grpc::Slice slices[4]; // metadata, tensor, rows meta, rows int num_slices = 2; // only SelectedRows have rows buffer @@ -162,12 +160,9 @@ void SerializeToByteBuffer(const std::string& name, framework::Variable* var, static_cast(payload)), ::grpc::Slice::STEAL_REF); - if (framework::ToVarType(var->Type()) == - framework::proto::VarType_Type_SELECTED_ROWS) { + if (var->IsType()) { auto* slr = var->GetMutable(); - ProtoEncodeHelper e2(static_cast(buf), 128); - // NOTE: rows is of type int64_t size_t rows_memory_size = slr->rows().size() * framework::SizeOfType(typeid(int64_t)); e2.WriteVarlengthBeginning(VarMsg::kRowsFieldNumber, rows_memory_size); @@ -178,10 +173,7 @@ void SerializeToByteBuffer(const std::string& name, framework::Variable* var, grpc_slice_new_with_user_data( const_cast( reinterpret_cast(slr->rows().data())), - rows_memory_size, - [](void* backing) { - // TODO(typhoonzero): add unref here, same as above. - }, + rows_memory_size, [](void* backing) {}, const_cast( reinterpret_cast(slr->rows().data()))), ::grpc::Slice::STEAL_REF); diff --git a/paddle/fluid/operators/detail/serde_test.cc b/paddle/fluid/operators/detail/serde_test.cc index e9eaaf1cbcd07ed1c8d6fb0b025bc1f1500718fd..15892295e6901fe649788c9e34604008fc8cbdfa 100644 --- a/paddle/fluid/operators/detail/serde_test.cc +++ b/paddle/fluid/operators/detail/serde_test.cc @@ -117,11 +117,11 @@ void RunTestLodTensor(platform::Place place, int from_type = 0) { // serialize var to ByteBuffer framework::Variable var; auto* tensor = var.GetMutable(); - tensor->Resize(framework::make_ddim({4, 8, 4, 2})); + tensor->Resize(framework::make_ddim({512, 8, 4, 2})); framework::LoD lod; lod.push_back(framework::Vector({1, 3, 8})); tensor->set_lod(lod); - int tensor_numel = 4 * 8 * 4 * 2; + int tensor_numel = 512 * 8 * 4 * 2; platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance(); auto& ctx = *pool.Get(place); tensor->mutable_data(place); @@ -142,7 +142,7 @@ void RunTestLodTensor(platform::Place place, int from_type = 0) { EXPECT_TRUE(varmsg.ParseFromString(tmp)); EXPECT_EQ(varmsg.varname(), "myvar"); EXPECT_EQ(varmsg.type(), 0); - EXPECT_EQ(varmsg.dims()[0], 4); + EXPECT_EQ(varmsg.dims()[0], 512); EXPECT_EQ(varmsg.dims()[1], 8); EXPECT_EQ(varmsg.dims()[2], 4); EXPECT_EQ(varmsg.dims()[3], 2); diff --git a/paddle/fluid/operators/detail/variable_response.cc b/paddle/fluid/operators/detail/variable_response.cc index f4a374d56d28a30201f0d482e97e1a40e7a8bf41..99602a05d023f30c2eed8df25e7534fdc9ef2ced 100644 --- a/paddle/fluid/operators/detail/variable_response.cc +++ b/paddle/fluid/operators/detail/variable_response.cc @@ -210,15 +210,15 @@ bool ParseLodData(::google::protobuf::io::CodedInputStream* input, } if (wt == WIRETYPE_LENGTH_DELIMITED) { - int length = 0; - if (!input->ReadVarintSizeAsInt(&length)) { + int num_bytes = 0; + if (!input->ReadVarintSizeAsInt(&num_bytes)) { return tag; } - - for (int i = 0; i < length; i++) { + int start_pos = input->CurrentPosition(); + while (input->CurrentPosition() - start_pos < num_bytes) { uint64_t v; if (!input->ReadVarint64(&v)) { - return false; + return tag; } lod->push_back(v); } @@ -275,8 +275,8 @@ int VariableResponse::Parse(Source* source) { break; } case sendrecv::VariableMessage::kTypeFieldNumber: { - uint64_t v; - if ((wt != WIRETYPE_VARINT) || !input.ReadVarint64(&v)) { + uint32_t v; + if ((wt != WIRETYPE_VARINT) || !input.ReadVarint32(&v)) { return tag; } @@ -284,8 +284,8 @@ int VariableResponse::Parse(Source* source) { break; } case sendrecv::VariableMessage::kDataTypeFieldNumber: { - uint64_t v = 0; - if ((wt != WIRETYPE_VARINT) || !input.ReadVarint64(&v)) { + uint32_t v = 0; + if ((wt != WIRETYPE_VARINT) || !input.ReadVarint32(&v)) { return tag; } @@ -305,11 +305,12 @@ int VariableResponse::Parse(Source* source) { // packed if (wt == WIRETYPE_LENGTH_DELIMITED) { - int length = 0; - if (!input.ReadVarintSizeAsInt(&length)) { + int num_bytes = 0; + if (!input.ReadVarintSizeAsInt(&num_bytes)) { return tag; } - for (int i = 0; i < length; i++) { + int start_pos = input.CurrentPosition(); + while (input.CurrentPosition() - start_pos < num_bytes) { uint64_t v; if (!input.ReadVarint64(&v)) { return tag; @@ -318,7 +319,6 @@ int VariableResponse::Parse(Source* source) { } break; } - return tag; } case sendrecv::VariableMessage::kLodLevelFieldNumber: { @@ -372,9 +372,9 @@ int VariableResponse::Parse(Source* source) { meta_.varname() != "", "meta info should be got first!"); - int length = 0; + int num_bytes = 0; if (wt != WIRETYPE_LENGTH_DELIMITED || - !ReadVarintSizeAsInt(&input, &length)) { + !ReadVarintSizeAsInt(&input, &num_bytes)) { return tag; } @@ -382,14 +382,14 @@ int VariableResponse::Parse(Source* source) { if (meta_.type() == sendrecv::LOD_TENSOR) { PADDLE_ENFORCE(meta_.lod_size() >= 0, "lod info should be got first!"); - if (!CopyLodTensorData(&input, *dev_ctx_, dims, length)) { + if (!CopyLodTensorData(&input, *dev_ctx_, dims, num_bytes)) { return tag; } break; } if (meta_.type() == sendrecv::SELECTED_ROWS) { - if (!CopySelectRowsTensorData(&input, *dev_ctx_, dims, length)) { + if (!CopySelectRowsTensorData(&input, *dev_ctx_, dims, num_bytes)) { return tag; } break; @@ -403,13 +403,13 @@ int VariableResponse::Parse(Source* source) { meta_.varname() != "", "meta info should be got first!"); - int length = 0; + int num_bytes = 0; if (wt != WIRETYPE_LENGTH_DELIMITED || - !ReadVarintSizeAsInt(&input, &length)) { + !ReadVarintSizeAsInt(&input, &num_bytes)) { return tag; } - if (!CopySelectRowsData(&input, *dev_ctx_, length)) { + if (!CopySelectRowsData(&input, *dev_ctx_, num_bytes)) { return tag; } break; diff --git a/paddle/fluid/operators/load_op.cc b/paddle/fluid/operators/load_op.cc index 6ffe0bec5e38432676ecadfa1abbbe70a1425bb1..abf7becb2e7fc134e3a52ec4c118847c14a20b9c 100644 --- a/paddle/fluid/operators/load_op.cc +++ b/paddle/fluid/operators/load_op.cc @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include +#include "paddle/fluid/framework/data_type_transform.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/platform/device_context.h" #include "paddle/fluid/platform/profiler.h" @@ -47,17 +48,25 @@ class LoadOp : public framework::OperatorBase { DeserializeFromStream(fin, tensor, *dev_ctx); - if (platform::is_gpu_place(place)) { - // copy CPU to GPU - framework::LoDTensor cpu_tensor; - cpu_tensor.ShareDataWith(*tensor); - cpu_tensor.set_lod(tensor->lod()); - - // reset tensor + auto load_as_fp16 = Attr("load_as_fp16"); + auto in_dtype = framework::ToDataType(tensor->type()); + auto out_dtype = load_as_fp16 ? framework::proto::VarType::FP16 : in_dtype; + + if (in_dtype != out_dtype) { + // convert to float16 tensor + auto in_kernel_type = framework::OpKernelType(in_dtype, place); + auto out_kernel_type = framework::OpKernelType(out_dtype, place); + framework::LoDTensor fp16_tensor; + // copy LoD info to the new tensor + fp16_tensor.set_lod(tensor->lod()); + framework::TransDataType(in_kernel_type, out_kernel_type, *tensor, + &fp16_tensor); + + // reset output tensor out_var->Clear(); tensor = out_var->GetMutable(); - tensor->set_lod(cpu_tensor.lod()); - TensorCopy(cpu_tensor, place, *dev_ctx, tensor); + tensor->set_lod(fp16_tensor.lod()); + tensor->ShareDataWith(fp16_tensor); } } }; @@ -67,6 +76,13 @@ class LoadOpProtoMaker : public framework::OpProtoAndCheckerMaker { LoadOpProtoMaker(OpProto *proto, OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddOutput("Out", "(Tensor) The tensor need to be loaded"); + AddAttr( + "load_as_fp16", + "(boolean, default false)" + "If true, the tensor will be first loaded and then " + "converted to float16 data type. Otherwise, the tensor will be " + "directly loaded without data type conversion.") + .SetDefault(false); AddAttr("file_path", "(string) " "Variable will be loaded from \"file_path\".") diff --git a/paddle/fluid/operators/math/blas.cc b/paddle/fluid/operators/math/blas.cc index 3eeb77546b97a0337b46216d837a4f4cff12c89f..6a143b3c056455595fdedc131b0c5f4ee756e1e0 100644 --- a/paddle/fluid/operators/math/blas.cc +++ b/paddle/fluid/operators/math/blas.cc @@ -13,10 +13,40 @@ // limitations under the License. #include "paddle/fluid/operators/math/blas.h" + +#include namespace paddle { namespace operators { namespace math { -// Do nothing. Blas is a header only library. +MatDescriptor CreateMatrixDescriptor(const framework::DDim &tensor_dim, + int num_flatten_cols, bool trans) { + PADDLE_ENFORCE_GT(tensor_dim.size(), 1); + MatDescriptor retv; + if (num_flatten_cols > 1) { + auto flatten_dim = framework::flatten_to_2d(tensor_dim, num_flatten_cols); + retv.height_ = flatten_dim[0]; + retv.width_ = flatten_dim[1]; + } else { + if (tensor_dim.size() == 2) { + retv.height_ = tensor_dim[0]; + retv.width_ = tensor_dim[1]; + } else { + auto dim_vec = framework::vectorize(tensor_dim); + retv.batch_size_ = 1; + for (size_t i = 0; i < dim_vec.size() - 2; ++i) { + retv.batch_size_ *= dim_vec[i]; + } + retv.height_ = dim_vec[dim_vec.size() - 2]; + retv.width_ = dim_vec[dim_vec.size() - 1]; + retv.stride_ = retv.height_ * retv.width_; + } + } + if (trans) { + std::swap(retv.width_, retv.height_); + } + retv.trans_ = trans; + return retv; +} } // namespace math } // namespace operators } // namespace paddle diff --git a/paddle/fluid/operators/math/blas.h b/paddle/fluid/operators/math/blas.h index 5cd2f855d1135e6dd8343efdaa9855d2526a3520..dabde43850db770d286b13cacd32bee181328d5c 100644 --- a/paddle/fluid/operators/math/blas.h +++ b/paddle/fluid/operators/math/blas.h @@ -46,6 +46,50 @@ namespace paddle { namespace operators { namespace math { +/** + * Matrix Descriptor of a memory buffer. + * + * It is used for Blas::MatMul. MatMul operator can be batched. + * if Mat A is [BatchSize, H, W], Mat B is [BatchSize, H, W]. It will be a + * `batch_size` times of GEMM. The batched GEMM could be faster base on the + * implementation of the blas library. The batch size could be zero. If any + * matrix of `matmul` has a batch size, the will be a batched GEMM, too. e.g., + * Mat A is [BatchSize, H1, W2], and Mat B [H2, W2], The result matrix wil be + * [BatchSize, H1, W2] + * + * The boolean flag, `trans`, describe the memory is the transpose of matrix or + * not. If the trans is true, the last two dims of matrix are transposed. The + * memory layout of the matrix is [Width, Height] or [BatchSize, Width, Height]. + * + * The MatDescriptor is not only the dimension or shape of a matrix, it also + * contains the layout, stride of matrix. It is clearer to have a structure than + * reuse `DDim`. + */ +struct MatDescriptor { + int64_t height_; + int64_t width_; + int64_t stride_{0}; + int64_t batch_size_{0}; + bool trans_; +}; + +/** + * Create Matrix Descriptor from a tensor dim, num_flatten_cols, and transpose + * flag + * + * @param tensor_dim: The dimension of the tensor. The rank of this dimension + * must larger than 1. + * + * @param num_flatten_cols: Reshape a tensor to a matrix. The matrix's first + * dimension(column length) will be the product of tensor's first `num_col_dims` + * dimensions. If num_flatten_cols is zero, the first N-2 dimension will be the + * batch_size of descriptor. + * + * @param trans: True if the matrix is transposed. + */ +extern MatDescriptor CreateMatrixDescriptor(const framework::DDim& tensor_dim, + int num_flatten_cols, bool trans); + template class Blas { public: @@ -90,6 +134,11 @@ class Blas { int K, T alpha, const T* A, const T* B, T beta, T* C, int batchCount, int64_t strideA, int64_t strideB) const; + template + void MatMul(const framework::Tensor& mat_a, const MatDescriptor& dim_a, + const framework::Tensor& mat_b, const MatDescriptor& dim_b, + T alpha, framework::Tensor* mat_out, T beta) const; + private: const DeviceContext& context_; }; diff --git a/paddle/fluid/operators/math/blas_impl.h b/paddle/fluid/operators/math/blas_impl.h index 7360cc0a90da499c372c6fb3f8d40a26f9093dd8..577cbe3beb806ffcb2f1a7d7a469402be9b69224 100644 --- a/paddle/fluid/operators/math/blas_impl.h +++ b/paddle/fluid/operators/math/blas_impl.h @@ -180,6 +180,31 @@ void Blas::BatchedGEMM( #endif } +template +template +void Blas::MatMul(const framework::Tensor &mat_a, + const MatDescriptor &dim_a, + const framework::Tensor &mat_b, + const MatDescriptor &dim_b, T alpha, + framework::Tensor *mat_out, T beta) const { + PADDLE_ENFORCE_EQ(dim_a.width_, dim_b.height_); + CBLAS_TRANSPOSE transA = !dim_a.trans_ ? CblasNoTrans : CblasTrans; + CBLAS_TRANSPOSE transB = !dim_b.trans_ ? CblasNoTrans : CblasTrans; + if (dim_a.batch_size_ == 0 && dim_b.batch_size_ == 0) { + this->template GEMM(transA, transB, dim_a.height_, dim_b.width_, + dim_a.width_, alpha, mat_a.data(), + mat_b.data(), beta, mat_out->data()); + } else { + PADDLE_ENFORCE(dim_a.batch_size_ == dim_b.batch_size_ || + dim_a.batch_size_ == 0 || dim_b.batch_size_ == 0); + this->template BatchedGEMM( + transA, transB, dim_a.height_, dim_b.width_, dim_a.width_, alpha, + mat_a.data(), mat_b.data(), beta, mat_out->data(), + dim_a.batch_size_ == 0 ? dim_b.batch_size_ : dim_a.batch_size_, + dim_a.stride_, dim_b.stride_); + } +} + } // namespace math } // namespace operators } // namespace paddle diff --git a/paddle/fluid/operators/math/matmul.h b/paddle/fluid/operators/math/matmul.h deleted file mode 100644 index 87fd38a324e007bcc939c31b6ae8e5d38c3e658c..0000000000000000000000000000000000000000 --- a/paddle/fluid/operators/math/matmul.h +++ /dev/null @@ -1,149 +0,0 @@ -/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once -#include -#include -#include "paddle/fluid/operators/math/blas.h" - -namespace paddle { -namespace operators { -namespace math { - -// Implements the logic of numpy matmul: -// https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.matmul.html -// -// but allowing also for a, b to be transposed -// -// Both a & b can be 1- to 3-dimensional. Higher rank tensors are not supported -// yet. -template -class MatMulFunctor { - public: - void operator()(const DeviceContext& context, const framework::Tensor& a, - bool trans_a, const framework::Tensor& b, bool trans_b, - T alpha, framework::Tensor* out, T beta) { - auto dim_a = a.dims(); - auto dim_b = b.dims(); - - PADDLE_ENFORCE(a.place() == b.place() && b.place() == out->place(), - "Tensors must all be in the same place."); - PADDLE_ENFORCE_GE(dim_a.size(), 1, - "Input tensor a must be at least 1-dimensional."); - PADDLE_ENFORCE_GE(dim_b.size(), 1, - "Input tensor b must be at least 1-dimensional."); - - std::vector out_dim; - int64_t batch_count = 1; - if (dim_a.size() > 3) { - PADDLE_ENFORCE(dim_b.size() == dim_a.size(), - "The dimensions of X and Y must be the same, and both of " - "them should be %d-dimensional.", - dim_b.size()); - // The first rank-2 dimensions are accumulated on the batch_count, and the - // last two dimensions are used for matrix multiplication. - for (int j = 0; j < dim_a.size() - 2; ++j) { - PADDLE_ENFORCE_EQ(dim_b[j], dim_a[j], - "The %d-th dimension of X and Y must be the same.", - j); - out_dim.push_back(dim_a[j]); - batch_count *= dim_a[j]; - } - } - - int M = 0, N = 0, kA = 0, kB = 0, batchCountA = 0, batchCountB = 0, - strideA = 0, strideB = 0; - - switch (dim_a.size()) { - case 1: - // similar to np.matmul: - // prepend dimension 1 (no transpose) or append dimension 1 (transpose) - M = trans_a ? dim_a[0] : 1; - kA = trans_a ? 1 : dim_a[0]; - break; - case 2: - M = trans_a ? dim_a[1] : dim_a[0]; - kA = trans_a ? dim_a[0] : dim_a[1]; - break; - case 3: - batchCountA = dim_a[0]; - M = trans_a ? dim_a[2] : dim_a[1]; - kA = trans_a ? dim_a[1] : dim_a[2]; - strideA = M * kA; - break; - default: - batchCountA = batch_count; - size_t mat_s = dim_a.size() - 2; - M = trans_a ? dim_a[mat_s + 1] : dim_a[mat_s]; - kA = trans_a ? dim_a[mat_s] : dim_a[mat_s + 1]; - strideA = M * kA; - } - - switch (dim_b.size()) { - case 1: - // similar to np.matmul: - // append dimension 1 (no transpose) or prepend dimension 1 (transpose) - kB = trans_b ? 1 : dim_b[0]; - N = trans_b ? dim_b[0] : 1; - break; - case 2: - kB = trans_b ? dim_b[1] : dim_b[0]; - N = trans_b ? dim_b[0] : dim_b[1]; - break; - case 3: - batchCountB = dim_b[0]; - kB = trans_b ? dim_b[2] : dim_b[1]; - N = trans_b ? dim_b[1] : dim_b[2]; - strideB = kB * N; - break; - default: - batchCountB = batch_count; - size_t mat_s = dim_b.size() - 2; - kB = trans_b ? dim_b[mat_s + 1] : dim_b[mat_s]; - N = trans_b ? dim_b[mat_s] : dim_b[mat_s + 1]; - strideB = kB * N; - } - - PADDLE_ENFORCE_EQ( - kA, kB, - "First matrix's width must be equal with second matrix's height."); - if (batchCountA && batchCountB) { - PADDLE_ENFORCE_EQ( - batchCountA, batchCountB, - "When input tensors a and b are both batched, they must have the " - "same batch dimension."); - } - int batchCount = std::max(batchCountA, batchCountB); - - CBLAS_TRANSPOSE transA = (trans_a == false) ? CblasNoTrans : CblasTrans; - CBLAS_TRANSPOSE transB = (trans_b == false) ? CblasNoTrans : CblasTrans; - - auto blas = GetBlas(context); - - if (!batchCount) { - // regular matrix multiplication - blas.GEMM(transA, transB, M, N, kA, alpha, a.data(), b.data(), beta, - out->data()); - } else { - // batched matrix multiplication - blas.BatchedGEMM(transA, transB, M, N, kA, alpha, a.data(), - b.data(), beta, out->data(), batchCount, strideA, - strideB); - } - } -}; - -} // namespace math -} // namespace operators -} // namespace paddle diff --git a/paddle/fluid/operators/matmul_op.cc b/paddle/fluid/operators/matmul_op.cc index e5d33fbc36438f97ff5b604e4efdbfbfa91fcee4..da21b8ad7d4e353e1dbe98fde1fbac1b0d37fd5d 100644 --- a/paddle/fluid/operators/matmul_op.cc +++ b/paddle/fluid/operators/matmul_op.cc @@ -12,14 +12,257 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/operators/matmul_op.h" #include +#include #include +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/detail/safe_ref.h" +#include "paddle/fluid/operators/math/blas.h" namespace paddle { namespace operators { +/** + * Get row matrix shape from a vector shape. If the rank of x_dim > 1, the + * original x_dim is returned. + */ +static framework::DDim RowMatrixFromVector(const framework::DDim& x_dim) { + if (x_dim.size() > 1) { + return x_dim; + } + return framework::make_ddim({1, x_dim[0]}); +} + +/** + * Get column matrix shape from a vector shape. If the ran of y_dim > 1, the + * original y_dim is returned. + */ +static framework::DDim ColumnMatrixFromVector(const framework::DDim& y_dim) { + if (y_dim.size() > 1) { + return y_dim; + } + return framework::make_ddim({y_dim[0], 1}); +} + +template +class MatMulKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& context) const override { + auto& x = + detail::Ref(context.Input("X"), "Cannot find X"); + auto& y = + detail::Ref(context.Input("Y"), "Cannot find Y"); + auto* out = context.Output("Out"); + out->mutable_data(context.GetPlace()); + + auto blas = math::GetBlas(context); + auto mat_dim_a = math::CreateMatrixDescriptor( + RowMatrixFromVector(x.dims()), 0, context.Attr("transpose_X")); + auto mat_dim_b = math::CreateMatrixDescriptor( + ColumnMatrixFromVector(y.dims()), 0, context.Attr("transpose_Y")); + blas.MatMul(x, mat_dim_a, y, mat_dim_b, T(1), out, T(0)); + } +}; + +// Reshape a rank-3 tensor from P x M x N to (P * M) x N. +// Identity op if the tensor is not of rank 3. +static framework::Tensor FoldInitDims(const framework::Tensor& input) { + auto output = input; + auto in_dims = input.dims(); + if (in_dims.size() == 3) { + output.Resize({in_dims[0] * in_dims[1], in_dims[2]}); + } + return output; +} + +// Reshape a rank-3 tensor from P x M x N to M x (P * N). +// (Warning: This requires transposing data and writes into new memory.) +// Identity op if the tensor is not of rank 3. +template +static framework::Tensor FoldHeadAndLastDims(const DeviceContext& context, + const framework::Tensor& input) { + auto in_dims = input.dims(); + if (in_dims.size() != 3) { + return input; + } + framework::Tensor output; + output.Resize({in_dims[1], in_dims[0], in_dims[2]}); + output.mutable_data(context.GetPlace()); + std::vector axis = {1, 0, 2}; + math::Transpose trans; + trans(context, input, &output, axis); + output.Resize({in_dims[1], in_dims[0] * in_dims[2]}); + + return output; +} + +/** + * Reshape a tensor to 3-D or 2-D tensor by matrix descriptor. + * + * The shape would be [BatchSize, H, W] or [H, W]. + * If transposed, `H,W` will be swapped. + */ +static void ReshapeTensorIntoMatrixSequence( + framework::Tensor* x, const math::MatDescriptor& descriptor) { + int64_t h, w; + h = descriptor.height_; + w = descriptor.width_; + if (descriptor.trans_) { + std::swap(w, h); + } + if (descriptor.batch_size_) { + x->Resize({descriptor.batch_size_, h, w}); + } else { + x->Resize({h, w}); + } +} + +/** + * Reshape the x,y,out tensor to 3-D or 2-D tensor by matrix descriptor + * Out = matmul(x, y) + * + * This method will first calculate X,Y matrix sequence, and then calculate + * the out shape. + * + * Assume X = [BatchSize, H1, W1], Y = [BatchSize, H2, W2] + * The out = [BatchSize, H1, W2] + * + * If there is no batch size in `X` and `Y`, the out will be [H1, W2] + * If any of `X` and `Y` has batch size BatchSize, the out will have the + * BatchSize. + */ +static void ReshapeXYOutIntoMatrixSequence(framework::Tensor* x, + framework::Tensor* y, + framework::Tensor* out, bool trans_x, + bool trans_y) { + auto x_dim = RowMatrixFromVector(x->dims()); + auto y_dim = ColumnMatrixFromVector(y->dims()); + auto mat_dim_x = math::CreateMatrixDescriptor(x_dim, 0, trans_x); + auto mat_dim_y = math::CreateMatrixDescriptor(y_dim, 0, trans_y); + if (mat_dim_x.batch_size_ == 0 && mat_dim_y.batch_size_ == 0) { + out->Resize({mat_dim_x.height_, mat_dim_y.width_}); + } else { + out->Resize({std::max(mat_dim_x.batch_size_, mat_dim_y.batch_size_), + mat_dim_x.height_, mat_dim_y.width_}); + } + + ReshapeTensorIntoMatrixSequence(x, mat_dim_x); + ReshapeTensorIntoMatrixSequence(y, mat_dim_y); +} + +// Using dimensional constraints on matrix multiplication, it is +// straight-forward to check the following table for when X and Y +// are both matrices. +// +// transpose_X | False | True | False | True +// transpose_Y | False | False | True | True +// -----------+----------+----------+----------+----------- +// dX = | dOut Y^T | Y dOut^T | dOut Y | Y^T dOut^T +// dY = | X^T dOut | X dOut | dOut^T X | dOut^T X^T +// +// When X is a vector of size K, we treat it instead as a matrix of shape +// (1, K). Similarly, when Y is a vector of size K, we treat it instead as +// a matrix of shape (K, 1). +// +// When X and Y are both 3-dimensional tensors, then the first dimension +// the batch dimension can be ignored and the exact same formulas apply +// as for two matrices. +// +// Finally, when, e.g., X is a 3-dimensional tensor but Y is a matrix, we end +// up with formulas like +// +// dY_{ij} = \sum_{p, m} X_{pmi} dOut_{pmj} +// +// To handle this sort of scenario, we reshape X : P x M x K, dOut: P x M x N +// to X: (P * M) x K, dOut: (P * M) x N. +template +class MatMulGradKernel : public framework::OpKernel { + public: + void MatMul(const framework::ExecutionContext& context, + const framework::Tensor& a, bool trans_a, + const framework::Tensor& b, bool trans_b, + framework::Tensor* out) const { + out->mutable_data(context.GetPlace()); + auto blas = math::GetBlas(context); + auto mat_dim_a = math::CreateMatrixDescriptor(a.dims(), 0, trans_a); + auto mat_dim_b = math::CreateMatrixDescriptor(b.dims(), 0, trans_b); + blas.MatMul(a, mat_dim_a, b, mat_dim_b, T(1), out, T(0)); + } + + void CalcInputGrad(const framework::ExecutionContext& context, + const framework::Tensor& a, bool trans_a, + bool is_fold_init_dims_a, const framework::Tensor& b, + bool trans_b, bool is_fold_init_dims_b, + framework::Tensor* out) const { + if (out == nullptr) return; + bool need_combine = (a.dims().size() == 3 || b.dims().size() == 3) && + out->dims().size() == 2; + if (!need_combine) { + MatMul(context, a, trans_a, b, trans_b, out); + } else { + auto& ctx = context.template device_context(); + MatMul(context, is_fold_init_dims_a + ? FoldInitDims(a) + : FoldHeadAndLastDims(ctx, a), + trans_a, is_fold_init_dims_b + ? FoldInitDims(b) + : FoldHeadAndLastDims(ctx, b), + trans_b, out); + } + } + + void Compute(const framework::ExecutionContext& context) const override { + auto x = *context.Input("X"); + auto y = *context.Input("Y"); + auto dout = + *context.Input(framework::GradVarName("Out")); + auto* dx = context.Output(framework::GradVarName("X")); + auto* dy = context.Output(framework::GradVarName("Y")); + bool transpose_x = context.Attr("transpose_X"); + bool transpose_y = context.Attr("transpose_Y"); + + ReshapeXYOutIntoMatrixSequence(&x, &y, &dout, transpose_x, transpose_y); + framework::DDim dx_dims; + if (dx) { + dx_dims = dx->dims(); + if (dx_dims != x.dims()) { + dx->Resize(x.dims()); + } + } + + framework::DDim dy_dims; + if (dy) { + dy_dims = dy->dims(); + if (dy_dims != y.dims()) { + dy->Resize(y.dims()); + } + } -using framework::Tensor; + if (transpose_x && transpose_y) { + CalcInputGrad(context, y, true, true, dout, true, false, dx); + CalcInputGrad(context, dout, true, true, x, true, false, dy); + } else if (transpose_x) { + CalcInputGrad(context, y, false, false, dout, true, false, dx); + CalcInputGrad(context, x, false, false, dout, false, true, dy); + } else if (transpose_y) { + CalcInputGrad(context, dout, false, false, y, false, true, dx); + CalcInputGrad(context, dout, true, true, x, false, true, dy); + } else { + CalcInputGrad(context, dout, false, false, y, true, false, dx); + CalcInputGrad(context, x, true, true, dout, false, true, dy); + } + + if (dx) { + if (dx_dims != x.dims()) { + dx->Resize(dx_dims); + } + } + if (dy) { + if (dy_dims != y.dims()) { + dy->Resize(dy_dims); + } + } + } +}; class MatMulOp : public framework::OperatorWithKernel { public: @@ -36,121 +279,41 @@ class MatMulOp : public framework::OperatorWithKernel { auto dim_x = context->GetInputDim("X"); auto dim_y = context->GetInputDim("Y"); - bool transpose_x = context->Attrs().Get("transpose_X"); - bool transpose_y = context->Attrs().Get("transpose_Y"); - - PADDLE_ENFORCE_GE(dim_x.size(), 1, - "Input tensor X must be at least 1-dimensional."); - PADDLE_ENFORCE_GE(dim_y.size(), 1, - "Input tensor Y must be at least 1-dimensional."); - - std::vector out_dim; - int64_t batch_count = 1; - if (dim_x.size() > 3) { - PADDLE_ENFORCE_EQ( - dim_y.size(), dim_x.size(), - "The dimensions of X and Y must be the same, and both of " - "them should be %d-dimensional.", - dim_x.size()); - - // The first rank-2 dimensions are accumulated on the batch_count, and the - // last two dimensions are used for matrix multiplication. - for (int j = 0; j < dim_x.size() - 2; ++j) { - PADDLE_ENFORCE_EQ(dim_y[j], dim_x[j], - "The %d-th dimension of X and Y must be the same.", - j); - out_dim.push_back(dim_x[j]); - batch_count *= dim_x[j]; - } - } - int M = 0, N = 0, KX = 0, KY = 0, batchCountX = 0, batchCountY = 0; - bool remove_initial_dim = false, remove_final_dim = false; - - switch (dim_x.size()) { - case 1: - if (transpose_x) { - M = dim_x[0]; - KX = 1; - } else { - M = 1; - KX = dim_x[0]; - remove_initial_dim = true; - } - break; - case 2: - M = transpose_x ? dim_x[1] : dim_x[0]; - KX = transpose_x ? dim_x[0] : dim_x[1]; - break; - case 3: - batchCountX = dim_x[0]; - M = transpose_x ? dim_x[2] : dim_x[1]; - KX = transpose_x ? dim_x[1] : dim_x[2]; - break; - default: - batchCountX = batch_count; - size_t mat_s = dim_x.size() - 2; - M = transpose_x ? dim_x[mat_s + 1] : dim_x[mat_s]; - KX = transpose_x ? dim_x[mat_s] : dim_x[mat_s + 1]; - break; - } + auto mat_dim_x = + math::CreateMatrixDescriptor(RowMatrixFromVector(dim_x), 0, + context->Attrs().Get("transpose_X")); + auto mat_dim_y = + math::CreateMatrixDescriptor(ColumnMatrixFromVector(dim_y), 0, + context->Attrs().Get("transpose_Y")); - switch (dim_y.size()) { - case 1: - if (transpose_y) { - N = dim_y[0]; - KY = 1; - } else { - N = 1; - KY = dim_y[0]; - remove_final_dim = true; - } - break; - case 2: - KY = transpose_y ? dim_y[1] : dim_y[0]; - N = transpose_y ? dim_y[0] : dim_y[1]; - break; - case 3: - batchCountY = dim_y[0]; - KY = transpose_y ? dim_y[2] : dim_y[1]; - N = transpose_y ? dim_y[1] : dim_y[2]; - break; - default: - batchCountY = batch_count; - size_t mat_s = dim_y.size() - 2; - KY = transpose_y ? dim_y[mat_s + 1] : dim_y[mat_s]; - N = transpose_y ? dim_y[mat_s] : dim_y[mat_s + 1]; + PADDLE_ENFORCE_EQ(mat_dim_x.width_, mat_dim_y.height_); + PADDLE_ENFORCE(mat_dim_x.batch_size_ == mat_dim_y.batch_size_ || + mat_dim_x.batch_size_ == 0 || mat_dim_y.batch_size_ == 0); + std::vector dim_out; + if (mat_dim_x.batch_size_ != 0) { + dim_out = framework::vectorize(dim_x); + dim_out[dim_out.size() - 2] = mat_dim_x.height_; + dim_out[dim_out.size() - 1] = mat_dim_y.width_; + } else if (mat_dim_y.batch_size_ != 0) { + dim_out = framework::vectorize(dim_y); + dim_out[dim_out.size() - 2] = mat_dim_x.height_; + dim_out[dim_out.size() - 1] = mat_dim_y.width_; + } else { + dim_out = {mat_dim_x.height_, mat_dim_y.width_}; } - PADDLE_ENFORCE_EQ( - KX, KY, - "First matrix's width must be equal with second matrix's height."); - if (batchCountX && batchCountY) { - PADDLE_ENFORCE_EQ( - batchCountX, batchCountY, - "When Input(X) and Input(Y) are both three dimensional, they " - "must have the same batch dimension."); + if (dim_x.size() == 1 && dim_out[dim_out.size() - 2] == 1) { + std::swap(dim_out[dim_out.size() - 2], dim_out[dim_out.size() - 1]); + dim_out.resize(dim_out.size() - 1); } - int batchCount = std::max(batchCountX, batchCountY); - std::vector dim_out; - if (batchCount) { - if (dim_x.size() > 3) { - dim_out.insert(dim_out.begin(), out_dim.begin(), out_dim.end()); - } else { - dim_out.push_back(batchCount); - } + if (dim_y.size() == 1 && dim_out[dim_out.size() - 1] == 1) { + dim_out.resize(dim_out.size() - 1); } - if (!remove_initial_dim) { - dim_out.push_back(M); - } - if (!remove_final_dim) { - dim_out.push_back(N); - } - if (dim_out.size() == 0) { - // We don't support 0-dimensional Tensors (scalars), so instead - // treat the output as a Tensor of shape (1, ) in this case. - dim_out.push_back(1); + + if (dim_out.empty()) { + dim_out = {1}; } context->SetOutputDim("Out", framework::make_ddim(dim_out)); context->ShareLoD("X", /*->*/ "Out"); @@ -233,15 +396,40 @@ class MatMulOpGrad : public framework::OperatorWithKernel { } }; +class MatMulOpGradMaker : public framework::SingleGradOpDescMaker { + public: + using framework::SingleGradOpDescMaker::SingleGradOpDescMaker; + + protected: + std::unique_ptr Apply() const override { + auto* retv = new framework::OpDesc(); + retv->SetType("matmul_grad"); + retv->SetInput("X", Input("X")); + retv->SetInput("Y", Input("Y")); + retv->SetInput(framework::GradVarName("Out"), OutputGrad("Out")); + retv->SetOutput(framework::GradVarName("X"), InputGrad("X")); + retv->SetOutput(framework::GradVarName("Y"), InputGrad("Y")); + retv->SetAttrMap(Attrs()); + return std::unique_ptr(retv); + } +}; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OPERATOR(matmul, ops::MatMulOp, ops::MatMulOpMaker, - paddle::framework::DefaultGradOpDescMaker); + ops::MatMulOpGradMaker); REGISTER_OPERATOR(matmul_grad, ops::MatMulOpGrad); REGISTER_OP_CPU_KERNEL( matmul, ops::MatMulKernel); REGISTER_OP_CPU_KERNEL( matmul_grad, ops::MatMulGradKernel); + +#ifdef PADDLE_WITH_CUDA +REGISTER_OP_CUDA_KERNEL( + matmul, ops::MatMulKernel); +REGISTER_OP_CUDA_KERNEL( + matmul_grad, + ops::MatMulGradKernel); +#endif diff --git a/paddle/fluid/operators/matmul_op.cu.cc b/paddle/fluid/operators/matmul_op.cu.cc deleted file mode 100644 index e021bbe645399e410cde5c3ff7035d4d68c71744..0000000000000000000000000000000000000000 --- a/paddle/fluid/operators/matmul_op.cu.cc +++ /dev/null @@ -1,22 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#include "paddle/fluid/operators/matmul_op.h" - -namespace ops = paddle::operators; -REGISTER_OP_CUDA_KERNEL( - matmul, ops::MatMulKernel); -REGISTER_OP_CUDA_KERNEL( - matmul_grad, - ops::MatMulGradKernel); diff --git a/paddle/fluid/operators/matmul_op.h b/paddle/fluid/operators/matmul_op.h deleted file mode 100644 index f2e9cfdcdbf93326ae193776a7d5f6a324373603..0000000000000000000000000000000000000000 --- a/paddle/fluid/operators/matmul_op.h +++ /dev/null @@ -1,244 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once -#include -#include -#include -#include "paddle/fluid/framework/op_registry.h" -#include "paddle/fluid/operators/math/math_function.h" -#include "paddle/fluid/operators/math/matmul.h" - -namespace paddle { -namespace operators { -namespace matmul_detail { - -using Tensor = framework::Tensor; -using DDim = framework::DDim; -using framework::make_ddim; -using framework::vectorize; - -template -class MatMulKernel : public framework::OpKernel { - public: - void Compute(const framework::ExecutionContext& context) const override { - const Tensor& x = *context.Input("X"); - const Tensor& y = *context.Input("Y"); - Tensor* out = context.Output("Out"); - out->mutable_data(context.GetPlace()); - bool transpose_x = context.Attr("transpose_X"); - bool transpose_y = context.Attr("transpose_Y"); - - math::MatMulFunctor()( - context.template device_context(), x, transpose_x, y, - transpose_y, T(1), out, T(0)); - } -}; - -template -inline Tensor Reshape(const Tensor& input, const DDim& dims) { - Tensor output; - output.ShareDataWith(input); - output.Resize(dims); - return output; -} - -// Reshape a rank-3 tensor from P x M x N to (P * M) x N. -// Identity op if the tensor is not of rank 3. -template -Tensor CombineBatchAndM(const Tensor& input) { - Tensor output; - output.ShareDataWith(input); - auto in_dims = input.dims(); - if (in_dims.size() == 3) { - std::vector out_dims = {in_dims[0] * in_dims[1], in_dims[2]}; - output.Resize(make_ddim(out_dims)); - } - return output; -} - -// Reshape a rank-3 tensor from P x M x N to M x (P * N). -// (Warning: This requires transposing data and writes into new memory.) -// Identity op if the tensor is not of rank 3. -template -Tensor CombineBatchAndN(const DeviceContext& context, const Tensor& input) { - Tensor output; - auto in_dims = input.dims(); - if (in_dims.size() == 3) { - output.Resize({in_dims[1], in_dims[0], in_dims[2]}); - output.mutable_data(context.GetPlace()); - std::vector axis = {1, 0, 2}; - math::Transpose trans; - trans(context, input, &output, axis); - std::vector out_dims = {in_dims[1], in_dims[0] * in_dims[2]}; - output.Resize({in_dims[1], in_dims[0] * in_dims[2]}); - } else { - output.ShareDataWith(input); - } - return output; -} - -// Using dimensional constraints on matrix multiplication, it is -// straight-forward to check the following table for when X and Y -// are both matrices. -// -// transpose_X | False | True | False | True -// transpose_Y | False | False | True | True -// -----------+----------+----------+----------+----------- -// dX = | dOut Y^T | Y dOut^T | dOut Y | Y^T dOut^T -// dY = | X^T dOut | X dOut | dOut^T X | dOut^T X^T -// -// When X is a vector of size K, we treat it instead as a matrix of shape -// (1, K). Similarly, when Y is a vector of size K, we treat it instead as -// a matrix of shape (K, 1). -// -// When X and Y are both 3-dimensional tensors, then the first dimension -// the batch dimension can be ignored and the exact same formulas apply -// as for two matrices. -// -// Finally, when, e.g., X is a 3-dimensional tensor but Y is a matrix, we end -// up with formulas like -// -// dY_{ij} = \sum_{p, m} X_{pmi} dOut_{pmj} -// -// To handle this sort of scenario, we reshape X : P x M x K, dOut: P x M x N -// to X: (P * M) x K, dOut: (P * M) x N. -template -class MatMulGradKernel : public framework::OpKernel { - public: - void Compute(const framework::ExecutionContext& context) const override { - const Tensor& x = *context.Input("X"); - const Tensor& y = *context.Input("Y"); - const Tensor& dout = *context.Input(framework::GradVarName("Out")); - Tensor* dx = context.Output(framework::GradVarName("X")); - Tensor* dy = context.Output(framework::GradVarName("Y")); - bool transpose_x = context.Attr("transpose_X"); - bool transpose_y = context.Attr("transpose_Y"); - - std::vector x_dims = vectorize(x.dims()); - std::vector y_dims = vectorize(y.dims()); - - // If X is a vector, reshape it to a matrix. - if (x_dims.size() == 1) { - x_dims.insert(x_dims.begin(), 1); - } - - // If Y is a vector, reshape it to a matrix. - if (y_dims.size() == 1) { - y_dims.push_back(1); - } - - int batch_count = 0; - // The first rank-2 dimensions are accumulated on the batch_count, and the - // last two dimensions are used for matrix multiplication. - if (x_dims.size() > 3) { - batch_count = accumulate(x_dims.begin(), x_dims.end() - 2, 1, - std::multiplies()); - } - // Fix the dOut dimensions. - int M = 0, N = 0, batchCountX = 0, batchCountY = 0; - - switch (x_dims.size()) { - case 2: - M = transpose_x ? x_dims[1] : x_dims[0]; - break; - case 3: - batchCountX = x_dims[0]; - M = transpose_x ? x_dims[2] : x_dims[1]; - break; - default: - batchCountX = batch_count; - size_t mat_s = x_dims.size() - 2; - M = transpose_x ? x_dims[mat_s + 1] : x_dims[mat_s]; - } - - switch (y_dims.size()) { - case 2: - N = transpose_y ? y_dims[0] : y_dims[1]; - break; - case 3: - batchCountY = y_dims[0]; - N = transpose_y ? y_dims[1] : y_dims[2]; - break; - default: - batchCountY = batch_count; - size_t mat_s = y_dims.size() - 2; - N = transpose_y ? y_dims[mat_s] : y_dims[mat_s + 1]; - } - if (batchCountX && batchCountY) { - PADDLE_ENFORCE_EQ( - batchCountX, batchCountY, - "When Input(X) and Input(Y) are both three dimensional, they " - "must have the same batch dimension."); - } - int batchCount = std::max(batchCountX, batchCountY); - std::vector dout_dims = {M, N}; - if (batchCount) { - if (x_dims.size() > 3) { - dout_dims.insert(dout_dims.begin(), x_dims.begin(), x_dims.end() - 2); - } else { - dout_dims.insert(dout_dims.begin(), batchCount); - } - } - Tensor X = Reshape(x, make_ddim(x_dims)); - Tensor Y = Reshape(y, make_ddim(y_dims)); - Tensor dOut = Reshape(dout, make_ddim(dout_dims)); - - auto& dev_ctx = context.template device_context(); - if (dx) { - dx->mutable_data(context.GetPlace()); - const Tensor& dOut_for_dX = - (x_dims.size() == 2 && y_dims.size() == 3) - ? CombineBatchAndN(dev_ctx, dOut) - : dOut; - if (x_dims.size() == 2 && y_dims.size() == 3) { - Y = transpose_y ? CombineBatchAndM(Y) - : CombineBatchAndN(dev_ctx, Y); - } - if (transpose_x) { - math::MatMulFunctor()( - dev_ctx, Y, transpose_y, dOut_for_dX, transpose_x, T(1), dx, T(0)); - } else { - math::MatMulFunctor()( - dev_ctx, dOut_for_dX, transpose_x, Y, !transpose_y, T(1), dx, T(0)); - } - } - - if (dy) { - dy->mutable_data(context.GetPlace()); - const Tensor& dOut_for_dY = (y_dims.size() == 2 && x_dims.size() == 3) - ? CombineBatchAndM(dOut) - : dOut; - if (y_dims.size() == 2 && x_dims.size() == 3) { - X = transpose_x ? CombineBatchAndN(dev_ctx, X) - : CombineBatchAndM(X); - dOut = CombineBatchAndM(dOut); - } - if (transpose_y) { - math::MatMulFunctor()( - dev_ctx, dOut_for_dY, transpose_y, X, transpose_x, T(1), dy, T(0)); - } else { - math::MatMulFunctor()( - dev_ctx, X, !transpose_x, dOut_for_dY, transpose_y, T(1), dy, T(0)); - } - } - } -}; -} // namespace matmul_detail - -using matmul_detail::MatMulKernel; -using matmul_detail::MatMulGradKernel; - -} // namespace operators -} // namespace paddle diff --git a/paddle/fluid/operators/save_combine_op.cc b/paddle/fluid/operators/save_combine_op.cc index 94703393bfa53124d16e34ae4373773eece5f11f..36e7522948eddd18e19707d8a96ec7d4e637c28f 100644 --- a/paddle/fluid/operators/save_combine_op.cc +++ b/paddle/fluid/operators/save_combine_op.cc @@ -18,6 +18,7 @@ limitations under the License. */ #include #include #include "paddle/fluid/framework/data_type.h" +#include "paddle/fluid/framework/data_type_transform.h" #include "paddle/fluid/framework/framework.pb.h" #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/op_registry.h" @@ -69,6 +70,7 @@ class SaveCombineOp : public framework::OperatorBase { const platform::Place &place) const override { auto filename = Attr("file_path"); auto overwrite = Attr("overwrite"); + auto save_as_fp16 = Attr("save_as_fp16"); bool is_present = FileExists(filename); if (is_present && !overwrite) { @@ -100,8 +102,24 @@ class SaveCombineOp : public framework::OperatorBase { inp_var_names[i]); auto &tensor = var->Get(); - // Serialize tensor - framework::SerializeToStream(fout, tensor, dev_ctx); + // Serialize tensors one by one + + // Check types to see if a fp16 transformation is required + auto in_dtype = framework::ToDataType(tensor.type()); + auto out_dtype = + save_as_fp16 ? framework::proto::VarType::FP16 : in_dtype; + + if (in_dtype != out_dtype) { + auto in_kernel_type = framework::OpKernelType(in_dtype, place); + auto out_kernel_type = framework::OpKernelType(out_dtype, place); + framework::LoDTensor out; + // copy LoD info to the new tensor + out.set_lod(tensor.lod()); + framework::TransDataType(in_kernel_type, out_kernel_type, tensor, &out); + framework::SerializeToStream(fout, out, dev_ctx); + } else { + framework::SerializeToStream(fout, tensor, dev_ctx); + } } fout.close(); } @@ -125,6 +143,12 @@ to a file on disk. "(boolean, default true)" "Overwrite the output file if it exists.") .SetDefault(true); + AddAttr("save_as_fp16", + "(boolean, default false)" + "If true, the tensor will be converted to float16 data " + "type and then saved. Otherwise, the tensor will be " + "directly saved without data type conversion.") + .SetDefault(false); AddAttr( "file_path", "(string)" diff --git a/paddle/fluid/operators/save_load_combine_op_test.cc b/paddle/fluid/operators/save_load_combine_op_test.cc index 2773c32a0a10269e28c24e12527711e3c5b8f869..47618c51d98eb9f58988f82c0aee0083565d81a6 100644 --- a/paddle/fluid/operators/save_load_combine_op_test.cc +++ b/paddle/fluid/operators/save_load_combine_op_test.cc @@ -17,15 +17,17 @@ limitations under the License. */ #include #include "gtest/gtest.h" #include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/platform/float16.h" USE_NO_KERNEL_OP(save_combine); USE_NO_KERNEL_OP(load_combine); -int* CreateForSaveCombineOp(int x, int y, const std::vector& lod_info, - std::string var_name, - const paddle::platform::CPUPlace& place, - paddle::framework::Scope* scope, - paddle::framework::LoD* expect_lod) { +template +T* CreateForSaveCombineOp(int x, int y, const std::vector& lod_info, + std::string var_name, + const paddle::platform::CPUPlace& place, + paddle::framework::Scope* scope, + paddle::framework::LoD* expect_lod) { auto var = scope->Var(var_name); auto tensor = var->GetMutable(); tensor->Resize({x, y}); @@ -34,9 +36,10 @@ int* CreateForSaveCombineOp(int x, int y, const std::vector& lod_info, (*expect_lod)[0].push_back(lod_info[i]); } tensor->set_lod(*expect_lod); - int* expect = tensor->mutable_data(place); + T* expect = tensor->mutable_data(place); for (int64_t i = 0; i < tensor->numel(); ++i) { - expect[i] = static_cast(i); + expect[i] = static_cast( + static_cast(i)); // For FP16, we intend to do float(float16(i)) } return expect; } @@ -48,18 +51,20 @@ paddle::framework::LoDTensor* GeneratePlaceholderBeforeLoad( return target; } -int* GetValuesAfterLoadCombineOp(paddle::framework::LoDTensor* target, - const paddle::framework::Scope& scope, - paddle::framework::LoD* actual_lod) { - int* actual = target->data(); +template +T* GetValuesAfterLoadCombineOp(paddle::framework::LoDTensor* target, + const paddle::framework::Scope& scope, + paddle::framework::LoD* actual_lod) { + T* actual = target->data(); *actual_lod = target->lod(); return actual; } -void CheckValues(int* expect, int* actual, paddle::framework::LoD expect_lod, - paddle::framework::LoD actual_lod, const int& numel) { - for (int64_t i = 0; i < numel; ++i) { - EXPECT_EQ(expect[i], actual[i]); +template +void CheckValues(T* expect, U* actual, const paddle::framework::LoD& expect_lod, + const paddle::framework::LoD& actual_lod, const int& numel) { + for (int i = 0; i < numel; ++i) { + EXPECT_EQ(expect[i], static_cast(actual[i])); } EXPECT_EQ(expect_lod.size(), actual_lod.size()); for (size_t i = 0; i < expect_lod.size(); ++i) { @@ -78,26 +83,26 @@ TEST(SaveLoadCombineOp, CPU) { std::vector lod1 = {0, 1, 2, 3, 10}; int numel1 = 100; paddle::framework::LoD expect_lod1; - int* expect1 = CreateForSaveCombineOp(10, 10, lod1, "test_var1", place, - &scope, &expect_lod1); + int* expect1 = CreateForSaveCombineOp(10, 10, lod1, "test_var1", + place, &scope, &expect_lod1); std::vector lod2 = {0, 2, 5, 10}; int numel2 = 200; paddle::framework::LoD expect_lod2; - int* expect2 = CreateForSaveCombineOp(10, 20, lod2, "test_var2", place, - &scope, &expect_lod2); + int* expect2 = CreateForSaveCombineOp(10, 20, lod2, "test_var2", + place, &scope, &expect_lod2); std::vector lod3 = {0, 2, 3, 20}; int numel3 = 4000; paddle::framework::LoD expect_lod3; - int* expect3 = CreateForSaveCombineOp(20, 200, lod3, "test_var3", place, - &scope, &expect_lod3); + int* expect3 = CreateForSaveCombineOp(20, 200, lod3, "test_var3", + place, &scope, &expect_lod3); std::vector lod4 = {0, 1, 20}; int numel4 = 1000; paddle::framework::LoD expect_lod4; - int* expect4 = CreateForSaveCombineOp(20, 50, lod4, "test_var4", place, - &scope, &expect_lod4); + int* expect4 = CreateForSaveCombineOp(20, 50, lod4, "test_var4", + place, &scope, &expect_lod4); // Set attributes std::string filename = "check_tensor.ls"; @@ -123,15 +128,92 @@ TEST(SaveLoadCombineOp, CPU) { load_combine_op->Run(scope, place); paddle::framework::LoD actual_lod1, actual_lod2, actual_lod3, actual_lod4; - int* actual1 = GetValuesAfterLoadCombineOp(target1, scope, &actual_lod1); - int* actual2 = GetValuesAfterLoadCombineOp(target2, scope, &actual_lod2); - int* actual3 = GetValuesAfterLoadCombineOp(target3, scope, &actual_lod3); - int* actual4 = GetValuesAfterLoadCombineOp(target4, scope, &actual_lod4); - - CheckValues(expect1, actual1, expect_lod1, actual_lod1, numel1); - CheckValues(expect2, actual2, expect_lod2, actual_lod2, numel2); - CheckValues(expect3, actual3, expect_lod3, actual_lod3, numel3); - CheckValues(expect4, actual4, expect_lod4, actual_lod4, numel4); + int* actual1 = GetValuesAfterLoadCombineOp(target1, scope, &actual_lod1); + int* actual2 = GetValuesAfterLoadCombineOp(target2, scope, &actual_lod2); + int* actual3 = GetValuesAfterLoadCombineOp(target3, scope, &actual_lod3); + int* actual4 = GetValuesAfterLoadCombineOp(target4, scope, &actual_lod4); + + CheckValues(expect1, actual1, expect_lod1, actual_lod1, numel1); + CheckValues(expect2, actual2, expect_lod2, actual_lod2, numel2); + CheckValues(expect3, actual3, expect_lod3, actual_lod3, numel3); + CheckValues(expect4, actual4, expect_lod4, actual_lod4, numel4); +} + +// FP16 version of SaveLoadCombineOp Test +TEST(SaveLoadCombineFP16Op, CPU) { + paddle::framework::Scope scope; + paddle::platform::CPUPlace place; + + std::vector lod1 = {0, 1, 2, 3, 10}; + int numel1 = 100; + paddle::framework::LoD expect_lod1; + float* expect1 = CreateForSaveCombineOp( + 10, 10, lod1, "test_var1", place, &scope, &expect_lod1); + + std::vector lod2 = {0, 2, 5, 10}; + int numel2 = 200; + paddle::framework::LoD expect_lod2; + float* expect2 = CreateForSaveCombineOp( + 10, 20, lod2, "test_var2", place, &scope, &expect_lod2); + + std::vector lod3 = {0, 20}; + int numel3 = 4000; + paddle::framework::LoD expect_lod3; + float* expect3 = CreateForSaveCombineOp( + 20, 200, lod3, "test_var3", place, &scope, &expect_lod3); + + std::vector lod4 = {0, 1, 20}; + int numel4 = 1000; + paddle::framework::LoD expect_lod4; + float* expect4 = CreateForSaveCombineOp( + 20, 50, lod4, "test_var4", place, &scope, &expect_lod4); + + // Set attributes + std::string filename = "check_tensor_fp16.ls"; + paddle::framework::AttributeMap attrs; + attrs.insert({"file_path", std::string(filename)}); + attrs.insert({"save_as_fp16", true}); + + // Run the save_combine_op + auto save_combine_op = paddle::framework::OpRegistry::CreateOp( + "save_combine", + {{"X", {"test_var1", "test_var2", "test_var3", "test_var4"}}}, {}, attrs); + save_combine_op->Run(scope, place); + + // Set up output vars + auto target1 = GeneratePlaceholderBeforeLoad("out_var1", &scope); + auto target2 = GeneratePlaceholderBeforeLoad("out_var2", &scope); + auto target3 = GeneratePlaceholderBeforeLoad("out_var3", &scope); + auto target4 = GeneratePlaceholderBeforeLoad("out_var4", &scope); + + // Run the load_combine_op + auto load_combine_op = paddle::framework::OpRegistry::CreateOp( + "load_combine", {}, + {{"Out", {"out_var1", "out_var2", "out_var3", "out_var4"}}}, attrs); + load_combine_op->Run(scope, place); + + paddle::framework::LoD actual_lod1, actual_lod2, actual_lod3, actual_lod4; + paddle::platform::float16* actual1 = + GetValuesAfterLoadCombineOp(target1, scope, + &actual_lod1); + paddle::platform::float16* actual2 = + GetValuesAfterLoadCombineOp(target2, scope, + &actual_lod2); + paddle::platform::float16* actual3 = + GetValuesAfterLoadCombineOp(target3, scope, + &actual_lod3); + paddle::platform::float16* actual4 = + GetValuesAfterLoadCombineOp(target4, scope, + &actual_lod4); + + CheckValues(expect1, actual1, expect_lod1, + actual_lod1, numel1); + CheckValues(expect2, actual2, expect_lod2, + actual_lod2, numel2); + CheckValues(expect3, actual3, expect_lod3, + actual_lod3, numel3); + CheckValues(expect4, actual4, expect_lod4, + actual_lod4, numel4); } // Test with original SaveLoadTest @@ -141,7 +223,7 @@ TEST(SaveLoadTestWithCombineOp, CPU) { auto var = scope.Var("test_var"); auto tensor = var->GetMutable(); - tensor->Resize({3, 10}); + tensor->Resize({3, 4000}); paddle::framework::LoD expect_lod; expect_lod.resize(1); expect_lod[0].push_back(0); diff --git a/paddle/fluid/operators/save_load_op_test.cc b/paddle/fluid/operators/save_load_op_test.cc index 74385ee47543e3f4887081c2225212996d3df3f1..c4fcc61af4b75e6dc7d5c31e20c5fff358637af5 100644 --- a/paddle/fluid/operators/save_load_op_test.cc +++ b/paddle/fluid/operators/save_load_op_test.cc @@ -63,14 +63,21 @@ TEST(SaveLoadOp, CPU) { } } -TEST(SaveLoadFP16Op, CPU) { +TEST(SaveFP16Op, CPU) { paddle::framework::Scope scope; paddle::platform::CPUPlace place; auto var = scope.Var("test_var"); auto tensor = var->GetMutable(); tensor->Resize({3, 10}); + paddle::framework::LoD expect_lod; + expect_lod.resize(1); + expect_lod[0].push_back(0); + expect_lod[0].push_back(1); + expect_lod[0].push_back(2); + expect_lod[0].push_back(3); + tensor->set_lod(expect_lod); float* expect = tensor->mutable_data(place); for (int64_t i = 0; i < tensor->numel(); ++i) { expect[i] = static_cast(paddle::platform::float16(i)); @@ -93,4 +100,60 @@ TEST(SaveLoadFP16Op, CPU) { for (int64_t i = 0; i < tensor->numel(); ++i) { EXPECT_EQ(expect[i], static_cast(actual[i])); } + auto& actual_lod = target->lod(); + EXPECT_EQ(expect_lod.size(), actual_lod.size()); + for (size_t i = 0; i < expect_lod.size(); ++i) { + for (size_t j = 0; j < expect_lod[i].size(); ++j) { + EXPECT_EQ(expect_lod[i][j], actual_lod[i][j]); + } + } +} + +TEST(LoadFP16Op, CPU) { + paddle::framework::Scope scope; + paddle::platform::CPUPlace place; + + auto var = scope.Var("test_var"); + auto tensor = var->GetMutable(); + tensor->Resize({3, 10}); + + paddle::framework::LoD expect_lod; + expect_lod.resize(1); + expect_lod[0].push_back(0); + expect_lod[0].push_back(1); + expect_lod[0].push_back(2); + expect_lod[0].push_back(3); + + tensor->set_lod(expect_lod); + float* expect = tensor->mutable_data(place); + for (int64_t i = 0; i < tensor->numel(); ++i) { + expect[i] = static_cast(paddle::platform::float16(i)); + } + + paddle::framework::AttributeMap attrs; + attrs.insert({"file_path", std::string("tensor.save")}); + attrs.insert({"load_as_fp16", true}); + + auto save_op = paddle::framework::OpRegistry::CreateOp( + "save", {{"X", {"test_var"}}}, {}, attrs); + save_op->Run(scope, place); + + auto load_var = scope.Var("out_var"); + auto load_op = paddle::framework::OpRegistry::CreateOp( + "load", {}, {{"Out", {"out_var"}}}, attrs); + load_op->Run(scope, place); + + auto target = load_var->Get(); + paddle::platform::float16* actual = target.data(); + for (int64_t i = 0; i < tensor->numel(); ++i) { + EXPECT_EQ(expect[i], static_cast(actual[i])); + } + + auto& actual_lod = target.lod(); + EXPECT_EQ(expect_lod.size(), actual_lod.size()); + for (size_t i = 0; i < expect_lod.size(); ++i) { + for (size_t j = 0; j < expect_lod[i].size(); ++j) { + EXPECT_EQ(expect_lod[i][j], actual_lod[i][j]); + } + } } diff --git a/paddle/scripts/paddle_build.sh b/paddle/scripts/paddle_build.sh index 1595cc9e8aad4d143ca62f84f812dbc791dc1d26..5bef232cd8fc44ded89ac56a790c8db0955b390a 100755 --- a/paddle/scripts/paddle_build.sh +++ b/paddle/scripts/paddle_build.sh @@ -398,7 +398,7 @@ function gen_dockerfile() { cat <> /paddle/build/Dockerfile <> ${PADDLE_ROOT}/build/Dockerfile <> ${PADDLE_ROOT}/build/Dockerfile <> ${PADDLE_ROOT}/build/Dockerfile < Variable(learning_rate) self._learning_rate_map = dict() @@ -77,7 +79,7 @@ class Optimizer(object): name=unique_name.generate("learning_rate"), shape=[1], value=float(self._learning_rate), - dtype='float32', + dtype='float32' if self._dtype == None else self._dtype, persistable=True) def global_learning_rate(self, program=None): @@ -200,6 +202,7 @@ class Optimizer(object): # Create any accumulators program = loss.block.program + self._dtype = loss.dtype with program_guard(program, startup_program): global_block = framework.default_main_program().global_block() start = len(global_block.ops) @@ -391,7 +394,7 @@ class AdamOptimizer(Optimizer): beta_shape = [1] self._beta1_pow_acc = self.helper.create_global_variable( name=unique_name.generate('beta1_pow_acc'), - dtype='float32', + dtype='float32' if self._dtype == None else self._dtype, shape=beta_shape, lod_level=0, persistable=True) @@ -400,7 +403,7 @@ class AdamOptimizer(Optimizer): self._beta2_pow_acc = self.helper.create_global_variable( name=unique_name.generate('beta2_pow_acc'), - dtype='float32', + dtype='float32' if self._dtype == None else self._dtype, shape=beta_shape, lod_level=0, persistable=True) @@ -493,7 +496,7 @@ class AdamaxOptimizer(Optimizer): beta_shape = [1] self._beta1_pow_acc = self.helper.create_global_variable( name=unique_name.generate('beta1_pow_acc'), - dtype='float32', + dtype='float32' if self._dtype == None else self._dtype, shape=beta_shape, lod_level=0, persistable=True) @@ -900,8 +903,10 @@ class ModelAverage(Optimizer): # param = (sum_1 + sum_2 + sum_3) / (num_accumulates + old_num_accumulates) tmp = layers.sum(x=[num_accumulates, old_num_accumulates]) sum = layers.sum(x=[sum_1, sum_2, sum_3]) - tmp = layers.cast(x=tmp, dtype='float32') - sum = layers.cast(x=sum, dtype='float32') + tmp = layers.cast( + x=tmp, dtype='float32' if self._dtype == None else self._dtype) + sum = layers.cast( + x=sum, dtype='float32' if self._dtype == None else self._dtype) layers.elementwise_div(x=sum, y=tmp, out=param) def _add_average_restore_op(self, block, param_grad): diff --git a/python/paddle/fluid/tests/book/image_classification/notest_image_classification_resnet.py b/python/paddle/fluid/tests/book/high-level-api/image_classification/notest_image_classification_resnet.py similarity index 100% rename from python/paddle/fluid/tests/book/image_classification/notest_image_classification_resnet.py rename to python/paddle/fluid/tests/book/high-level-api/image_classification/notest_image_classification_resnet.py diff --git a/python/paddle/fluid/tests/book/image_classification/notest_image_classification_vgg.py b/python/paddle/fluid/tests/book/high-level-api/image_classification/notest_image_classification_vgg.py similarity index 100% rename from python/paddle/fluid/tests/book/image_classification/notest_image_classification_vgg.py rename to python/paddle/fluid/tests/book/high-level-api/image_classification/notest_image_classification_vgg.py diff --git a/python/paddle/fluid/tests/book/label_semantic_roles/no_test_label_semantic_roles.py b/python/paddle/fluid/tests/book/high-level-api/label_semantic_roles/no_test_label_semantic_roles.py similarity index 100% rename from python/paddle/fluid/tests/book/label_semantic_roles/no_test_label_semantic_roles.py rename to python/paddle/fluid/tests/book/high-level-api/label_semantic_roles/no_test_label_semantic_roles.py diff --git a/python/paddle/fluid/tests/book/notest_recognize_digits/notest_recognize_digits_conv.py b/python/paddle/fluid/tests/book/high-level-api/recognize_digits/notest_recognize_digits_conv.py similarity index 100% rename from python/paddle/fluid/tests/book/notest_recognize_digits/notest_recognize_digits_conv.py rename to python/paddle/fluid/tests/book/high-level-api/recognize_digits/notest_recognize_digits_conv.py diff --git a/python/paddle/fluid/tests/book/notest_recognize_digits/notest_recognize_digits_mlp.py b/python/paddle/fluid/tests/book/high-level-api/recognize_digits/notest_recognize_digits_mlp.py similarity index 100% rename from python/paddle/fluid/tests/book/notest_recognize_digits/notest_recognize_digits_mlp.py rename to python/paddle/fluid/tests/book/high-level-api/recognize_digits/notest_recognize_digits_mlp.py diff --git a/python/paddle/fluid/tests/book/understand_sentiment/notest_understand_sentiment_stacked_lstm.py b/python/paddle/fluid/tests/book/high-level-api/understand_sentiment/notest_understand_sentiment_stacked_lstm.py similarity index 100% rename from python/paddle/fluid/tests/book/understand_sentiment/notest_understand_sentiment_stacked_lstm.py rename to python/paddle/fluid/tests/book/high-level-api/understand_sentiment/notest_understand_sentiment_stacked_lstm.py diff --git a/python/paddle/fluid/tests/book/word2vec/no_test_word2vec_new_api.py b/python/paddle/fluid/tests/book/high-level-api/word2vec/no_test_word2vec_new_api.py similarity index 91% rename from python/paddle/fluid/tests/book/word2vec/no_test_word2vec_new_api.py rename to python/paddle/fluid/tests/book/high-level-api/word2vec/no_test_word2vec_new_api.py index 35e163dc9df5a35ee5774b6b157366c4eabcb0f7..93f7757a66ea9b217c7831c7263936ece0aa8f18 100644 --- a/python/paddle/fluid/tests/book/word2vec/no_test_word2vec_new_api.py +++ b/python/paddle/fluid/tests/book/high-level-api/word2vec/no_test_word2vec_new_api.py @@ -80,8 +80,11 @@ def inference_program(is_sparse): def train_program(is_sparse): - next_word = fluid.layers.data(name='nextw', shape=[1], dtype='int64') + # The declaration of 'next_word' must be after the invoking of inference_program, + # or the data input order of train program would be [next_word, firstw, secondw, + # thirdw, forthw], which is not correct. predict_word = inference_program(is_sparse) + next_word = fluid.layers.data(name='nextw', shape=[1], dtype='int64') cost = fluid.layers.cross_entropy(input=predict_word, label=next_word) avg_cost = fluid.layers.mean(cost) return avg_cost @@ -90,14 +93,17 @@ def train_program(is_sparse): def train(use_cuda, is_sparse, save_path): train_reader = paddle.batch( paddle.dataset.imikolov.train(word_dict, N), BATCH_SIZE) + test_reader = paddle.batch( + paddle.dataset.imikolov.test(word_dict, N), BATCH_SIZE) place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() def event_handler(event): - print type(event) + # print type(event) if isinstance(event, fluid.EndEpochEvent): - avg_cost = trainer.test(reader=paddle.dataset.imikolov.test( - word_dict, N)) + outs = trainer.test(reader=test_reader) + avg_cost = outs[0] + print("loss= ", avg_cost) if avg_cost < 5.0: trainer.save_params(save_path) diff --git a/python/paddle/fluid/tests/book/test_label_semantic_roles.py b/python/paddle/fluid/tests/book/test_label_semantic_roles.py index 50ef29c4572f1b12fe9793bbf037cd7fe71a9e53..0faba33032d5dfc0b751a5191e7b2ae0c1f172bf 100644 --- a/python/paddle/fluid/tests/book/test_label_semantic_roles.py +++ b/python/paddle/fluid/tests/book/test_label_semantic_roles.py @@ -36,7 +36,7 @@ depth = 8 mix_hidden_lr = 1e-3 IS_SPARSE = True -PASS_NUM = 100 +PASS_NUM = 10 BATCH_SIZE = 10 embedding_name = 'emb' diff --git a/python/paddle/fluid/tests/unittests/test_matmul_op.py b/python/paddle/fluid/tests/unittests/test_matmul_op.py index 44ac4683891ffd3141a126740f4fddb47550e183..cae2c8fa87d9857de8f26cf4962d9370eca66243 100644 --- a/python/paddle/fluid/tests/unittests/test_matmul_op.py +++ b/python/paddle/fluid/tests/unittests/test_matmul_op.py @@ -111,21 +111,24 @@ class Generator(object): # Generate test cases for all possibilities -for dim_X in [1, 2, 3]: - for dim_Y in [1, 2, 3]: - for transpose_X in [False, True]: - for transpose_Y in [False, True]: - test_name = ( - 'TestMatMulOp_dimX_{}_dim_Y_{}_transX_{}_transY_{}'.format( - dim_X, dim_Y, transpose_X, transpose_Y)) - shape_X, shape_Y = generate_compatible_shapes( - dim_X, dim_Y, transpose_X, transpose_Y) - globals()[test_name] = type(test_name, (Generator, OpTest), { - 'shape_X': shape_X, - 'shape_Y': shape_Y, - 'transpose_X': transpose_X, - 'transpose_Y': transpose_Y, - }) +def inject_test(dim_x, dim_y, trans_x, trans_y): + test_name = ('TestMatMulOp_dimX_{}_dim_Y_{}_transX_{}_transY_{}'.format( + dim_x, dim_y, trans_x, trans_y)) + shape_x, shape_y = generate_compatible_shapes(dim_x, dim_y, trans_x, + trans_y) + globals()[test_name] = type(test_name, (Generator, OpTest), { + 'shape_X': shape_x, + 'shape_Y': shape_y, + 'transpose_X': trans_x, + 'transpose_Y': trans_y, + }) + + +for dim_X in (1, 2, 3): + for dim_Y in (1, 2, 3): + for transose_x in (False, True): + for transose_y in (False, True): + inject_test(dim_X, dim_Y, transose_x, transose_y) # Test case n-dim @@ -149,7 +152,7 @@ def generate_compatible_shapes(dim, transpose_X, transpose_Y): return shape_X, shape_Y -# Test case n-dim +# # Test case n-dim for dim in [4]: for transpose_X in [False, True]: for transpose_Y in [False, True]: diff --git a/python/paddle/fluid/tests/unittests/test_network_with_dtype.py b/python/paddle/fluid/tests/unittests/test_network_with_dtype.py new file mode 100644 index 0000000000000000000000000000000000000000..baafcdbb80238385752183ee0a8ff96a5da4659c --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_network_with_dtype.py @@ -0,0 +1,74 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import numpy as np +import paddle +import paddle.fluid as fluid +import paddle.fluid.core as core +from paddle.fluid.executor import Executor + +BATCH_SIZE = 20 + + +class TestNetWithDtype(unittest.TestCase): + def setUp(self): + self.dtype = "float64" + self.init_dtype() + self.x = fluid.layers.data(name='x', shape=[13], dtype=self.dtype) + self.y = fluid.layers.data(name='y', shape=[1], dtype=self.dtype) + y_predict = fluid.layers.fc(input=self.x, size=1, act=None) + + cost = fluid.layers.square_error_cost(input=y_predict, label=self.y) + avg_cost = fluid.layers.mean(cost) + self.fetch_list = [avg_cost] + + sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.001) + sgd_optimizer.minimize(avg_cost) + + def run_net_on_place(self, place): + train_reader = paddle.batch( + paddle.dataset.uci_housing.train(), batch_size=BATCH_SIZE) + feeder = fluid.DataFeeder(place=place, feed_list=[self.x, self.y]) + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + for data in train_reader(): + exe.run(fluid.default_main_program(), + feed=feeder.feed(data), + fetch_list=self.fetch_list) + # the main program is runable, the datatype is fully supported + break + + def init_dtype(self): + pass + + def test_cpu(self): + place = fluid.CPUPlace() + self.run_net_on_place(place) + + def test_gpu(self): + if not core.is_compiled_with_cuda(): + return + place = fluid.CUDAPlace(0) + self.run_net_on_place(place) + + +# TODO(dzhwinter): make sure the fp16 is runable +# class TestFloat16(SimpleNet): +# def init_dtype(self): +# self.dtype = "float16" + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/fluid/trainer.py b/python/paddle/fluid/trainer.py index d44cb16bfb1545fc840d1a38155ec407afd4473d..30b58b465ef2a7945ed87ce69397a050fc337623 100644 --- a/python/paddle/fluid/trainer.py +++ b/python/paddle/fluid/trainer.py @@ -75,11 +75,15 @@ class Trainer(object): self.train_program = framework.Program() with framework.program_guard(self.train_program, self.startup_program): - loss = program_func() + program_func_outs = program_func() + self.test_outputs = program_func_outs if isinstance( + program_func_outs, list) else [program_func_outs] + self.test_program = self.train_program.clone() if not isinstance(optimizer, opt_module.Optimizer): raise TypeError( "The optimizer should be an instance of Optimizer") - + # The fisrt element of program_func_outs is loss. + loss = self.test_outputs[0] optimize_ops, params_grads = optimizer.minimize(loss) self.place = Trainer._check_and_get_place(place) @@ -168,8 +172,17 @@ class Trainer(object): self._train_by_executor(num_epochs, event_handler, reader, feed_order) - def test(self, reader): - pass + def test(self, reader, feed_order=None): + """ + Test the model on given test data + + Args: + reader: The reader that yields test data. + feed_order: Feeding order of reader. None will following the defining + order in program + """ + + return self._test_by_executor(reader, feed_order, self.test_outputs) def save_params(self, param_path): # reference: save_persistables in io.py @@ -225,22 +238,10 @@ class Trainer(object): """ with self._prog_and_scope_guard(): - exe = executor.Executor(self.place) - if feed_order is None: - feed_var_list = [ - var - for var in self.train_program.global_block( - ).vars.itervalues() - if hasattr(var, 'is_data') and var.is_data - ] - else: - feed_var_list = [ - self.train_program.global_block().var(var_name) - for var_name in feed_order - ] - + feed_var_list = build_feed_var_list(self.train_program, feed_order) feeder = data_feeder.DataFeeder( feed_list=feed_var_list, place=self.place) + exe = executor.Executor(self.place) for epoch_id in range(num_epochs): event_handler(BeginEpochEvent(epoch_id)) for step_id, data in enumerate(reader()): @@ -248,3 +249,48 @@ class Trainer(object): exe.run(feed=feeder.feed(data), fetch_list=[]) event_handler(EndStepEvent(epoch_id, step_id)) event_handler(EndEpochEvent(epoch_id)) + + def _test_by_executor(self, reader, feed_order, fetch_list): + with executor.scope_guard(self.scope): + feed_var_list = build_feed_var_list(self.test_program, feed_order) + feeder = data_feeder.DataFeeder( + feed_list=feed_var_list, place=self.place) + exe = executor.Executor(self.place) + accumulated = len(fetch_list) * [0] + count = 0 + for data in reader(): + outs = exe.run(program=self.test_program, + feed=feeder.feed(data), + fetch_list=fetch_list) + accumulated = [x[0] + x[1][0] for x in zip(accumulated, outs)] + count += 1 + + return [x / count for x in accumulated] + + +def build_feed_var_list(program, feed_order): + if not isinstance(program, framework.Program): + raise TypeError("The 'program' should be an object of Program") + + if feed_order is None: + feed_var_list = [ + var for var in program.global_block().vars.itervalues() + if var.is_data + ] + elif isinstance(feed_order, list): + feed_var_list = [ + program.global_block().var(var_name) for var_name in feed_order + ] + else: + if not isinstance(feed_order, dict): + raise TypeError( + "The 'feed_order' should be either None, list or dict.") + if not sorted(feed_order.values()) == range(len(feed_order)): + raise ValueError( + "The values of 'feed_order' should be a permutation of [0, len(feed_order))" + ) + sorted_pair_list = sorted(feed_order.items(), key=lambda item: item[1]) + feed_var_list = [ + program.global_block().var(pair[0]) for pair in sorted_pair_list + ] + return feed_var_list diff --git a/python/paddle/fluid/transpiler/distribute_transpiler.py b/python/paddle/fluid/transpiler/distribute_transpiler.py index 8cd7cd5d3a9f81bbf8f3bb0fcb2c28eaf6166a50..635763ed35270ddfafeba67246d566b7f3523b81 100644 --- a/python/paddle/fluid/transpiler/distribute_transpiler.py +++ b/python/paddle/fluid/transpiler/distribute_transpiler.py @@ -18,7 +18,9 @@ import math import distributed_splitter as splitter from .. import core -from ..framework import Program, default_main_program, Variable, Parameter +from ..framework import Program, default_main_program, \ + default_startup_program, \ + Variable, Parameter, grad_var_name LOOKUP_TABLE_TYPE = "lookup_table" LOOKUP_TABLE_GRAD_TYPE = "lookup_table_grad" @@ -153,43 +155,43 @@ class DistributeTranspiler: split_method=splitter.round_robin, sync_mode=True): """ - Transpile the program to distributed data-parallelism programs. - The main_program will be transformed to use a remote parameter server - to do parameter optimization. And the optimization graph will be put - into a parameter server program. - - Use different methods to split trainable variables to different - parameter servers. - - Steps to transpile trainer: - 1. split variable to multiple blocks, aligned by product(dim[1:]) (width). - 2. rename splited grad variables to add trainer_id suffix ".trainer_%d". - 3. modify trainer program add split_op to each grad variable. - 4. append send_op to send splited variables to server and fetch - params(splited blocks or origin param) from server. - 5. append concat_op to merge splited blocks to update local weights. - - Steps to transpile pserver: - 1. create new program for parameter server. - 2. create params and grad variables that assigned to current server instance. - 3. create a sub-block in the server side program - 4. append ops that should run on current server instance. - 5. add listen_and_serv op - - :param trainer_id: one unique id for each trainer in a job. - :type trainer_id: int - :param program: program to transpile, default is default_main_program - :type program: Program - :param pservers: parameter server endpoints like "m1:6174,m2:6174" - :type pservers: string - :param trainers: total number of workers/trainers in the job - :type trainers: int - :param split_method: A function to determin how to split variables - to different servers equally. - :type split_method: function - :type sync_mode: boolean default True - :param sync_mode: if sync_mode is set True, it means that dist transpiler - will transpile the program into sync_mode pserver and trainer program. + Transpile the program to distributed data-parallelism programs. + The main_program will be transformed to use a remote parameter server + to do parameter optimization. And the optimization graph will be put + into a parameter server program. + + Use different methods to split trainable variables to different + parameter servers. + + Steps to transpile trainer: + 1. split variable to multiple blocks, aligned by product(dim[1:]) (width). + 2. rename splited grad variables to add trainer_id suffix ".trainer_%d". + 3. modify trainer program add split_op to each grad variable. + 4. append send_op to send splited variables to server and fetch + params(splited blocks or origin param) from server. + 5. append concat_op to merge splited blocks to update local weights. + + Steps to transpile pserver: + 1. create new program for parameter server. + 2. create params and grad variables that assigned to current server instance. + 3. create a sub-block in the server side program + 4. append ops that should run on current server instance. + 5. add listen_and_serv op + + :param trainer_id: one unique id for each trainer in a job. + :type trainer_id: int + :param program: program to transpile, default is default_main_program + :type program: Program + :param pservers: parameter server endpoints like "m1:6174,m2:6174" + :type pservers: string + :param trainers: total number of workers/trainers in the job + :type trainers: int + :param split_method: A function to determin how to split variables + to different servers equally. + :type split_method: function + :type sync_mode: boolean default True + :param sync_mode: if sync_mode is set True, it means that dist transpiler + will transpile the program into sync_mode pserver and trainer program. """ assert (callable(split_method)) if program is None: @@ -244,7 +246,7 @@ class DistributeTranspiler: ] grad_list = [ grad for grad in grad_list - if grad.name != framework.grad_var_name(self.table_name) + if grad.name != grad_var_name(self.table_name) ] self.table_param_grad = [ param_grad for param_grad in params_grads @@ -495,7 +497,7 @@ class DistributeTranspiler: were split to several blocks. """ s_prog = Program() - orig_s_prog = framework.default_startup_program() + orig_s_prog = default_startup_program() params = self.param_grad_ep_mapping[endpoint]["params"] def _get_splited_name_and_shape(varname): @@ -620,7 +622,7 @@ class DistributeTranspiler: # 2. add split_ids_op and send_vars_op to send gradient to pservers # there should only be one table_name all_ops = program.global_block().ops - table_grad_name = framework.grad_var_name(self.table_name) + table_grad_name = grad_var_name(self.table_name) for op in all_ops: if table_grad_name in op.output_arg_names: op_index = list(all_ops).index(op) @@ -693,7 +695,7 @@ class DistributeTranspiler: persistable=True) grad_var = _clone_var( pserver_program.global_block(), - self.origin_program.global_block().vars[framework.grad_var_name( + self.origin_program.global_block().vars[grad_var_name( self.table_name)], persistable=False) diff --git a/tools/aws_benchmarking/server/cluster_master.py b/tools/aws_benchmarking/server/cluster_master.py index 1333a942bf013a8182585b56e5843803c56945b1..a9b24846544d8aca5e4c7bd5709e70564c088431 100644 --- a/tools/aws_benchmarking/server/cluster_master.py +++ b/tools/aws_benchmarking/server/cluster_master.py @@ -20,6 +20,7 @@ import time import threading import logging import copy +import csv import netaddr import boto3 @@ -136,6 +137,12 @@ parser.add_argument( parser.add_argument( '--master_server_ip', type=str, default="", help="master server private ip") +parser.add_argument( + '--metric_data_identifier', + type=str, + default="**metrics_data: ", + help="key string to identify metrics data") + parser.add_argument( '--no_clean_up', type=str2bool, @@ -155,6 +162,11 @@ logging.basicConfig( log_files = ["master.log"] +metrics = {} + +metrics_csv_file_name = "metrics.csv" +is_metrics_file_created = False + def create_subnet(): # if no vpc id provided, list vpcs @@ -329,12 +341,42 @@ def create_pservers(): cleanup(args.task_name) +def save_metrics_data(str_msg): + #parse msg + logging.info("found metrics data, saving it to csv file") + global is_metrics_file_created + metrics_raw = str_msg.split(",") + with open(args.log_path + metrics_csv_file_name, 'a') as csvfile: + csv_fieldnames = [] + csv_write_data = {} + for metric in metrics_raw: + metric_data = metric.split("=") + metric_key = metric_data[0].strip() + metric_val = float(metric_data[1].strip()) + if not metric_key in metrics: + metrics[metric_key] = [] + metric_repo = metrics[metric_key] + metric_repo.append(metric_val) + csv_fieldnames.append(metric_key) + csv_write_data[metric_key] = metric_val + writer = csv.DictWriter(csvfile, fieldnames=csv_fieldnames) + if not is_metrics_file_created: + writer.writeheader() + is_metrics_file_created = True + writer.writerow(csv_write_data) + logging.info("csv file appended") + + def log_to_file(source, filename): if not filename in log_files: log_files.append(filename) with open(args.log_path + filename, "a") as log_file: for line in iter(source.readline, ""): log_file.write(line) + if (line.startswith(args.metric_data_identifier)): + #found key data, trying to add to csv + line = line.replace(args.metric_data_identifier, "") + save_metrics_data(line) def parse_command(command_raw, defaults={}):