diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 3c36cffcb4eeaaf7f8cff5167777628dd2697e7d..b1b02bcc2f4fd14297715bcf5bfd1617e3d5f0c9 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -58,6 +58,8 @@ PaddlePaddle uses this [Git branching model](http://nvie.com/posts/a-successful- create mode 100644 233 ``` + NOTE: The `yapf` installed by `pip install pre-commit` and `conda install -c conda-forge pre-commit` is slightly different. Paddle developers use `pip install pre-commit`. + 1. Build and test Users can build PaddlePaddle natively on Linux and Mac OS X. But to unify the building environment and to make it easy for debugging, the recommended way is [using Docker](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/howto/dev/build_en.md). diff --git a/Dockerfile b/Dockerfile index 80a96983ec1ca6b9ec440f7e95de6c328eb1ed40..4d6165b79a1d94b8f27d7f3ee1b6e2cee5992d31 100644 --- a/Dockerfile +++ b/Dockerfile @@ -29,7 +29,7 @@ RUN apt-get update && \ wget unzip unrar tar xz-utils bzip2 gzip coreutils ntp \ curl sed grep graphviz libjpeg-dev zlib1g-dev \ python-matplotlib gcc-4.8 g++-4.8 \ - automake locales clang-format swig doxygen cmake \ + automake locales clang-format swig cmake \ liblapack-dev liblapacke-dev \ clang-3.8 llvm-3.8 libclang-3.8-dev \ net-tools libtool ccache && \ diff --git a/benchmark/fluid/fluid_benchmark.py b/benchmark/fluid/fluid_benchmark.py index 30b070e4acac60caa97a4e8ffd07462cb347ee93..c1d458970a58bfac2a3369e8964eb100568b28f2 100644 --- a/benchmark/fluid/fluid_benchmark.py +++ b/benchmark/fluid/fluid_benchmark.py @@ -98,6 +98,8 @@ def parse_args(): '--use_fake_data', action='store_true', help='If set ommit the actual read data operators.') + parser.add_argument( + '--profile', action='store_true', help='If set, profile a few steps.') parser.add_argument( '--update_method', type=str, @@ -108,8 +110,8 @@ def parse_args(): return args -def append_nccl2_prepare(): - if os.getenv("PADDLE_TRAINER_ID", None) != None: +def append_nccl2_prepare(trainer_id): + if trainer_id >= 0: # append gen_nccl_id at the end of startup program trainer_id = int(os.getenv("PADDLE_TRAINER_ID")) port = os.getenv("PADDLE_PSERVER_PORT") @@ -136,12 +138,12 @@ def append_nccl2_prepare(): }) return nccl_id_var, num_trainers, trainer_id else: - raise Exception( - "must set PADDLE_TRAINER_ID env variables for dist train.") + raise Exception("must set positive PADDLE_TRAINER_ID env variables for " + "nccl-based dist train.") -def dist_transpile(): - if "PADDLE_TRAINING_ROLE" not in os.environ: +def dist_transpile(trainer_id): + if trainer_id < 0: return None, None # the port of all pservers, needed by both trainer and pserver @@ -158,9 +160,6 @@ def dist_transpile(): trainers = int(os.getenv("PADDLE_TRAINERS")) # the IP of the local machine, needed by pserver only current_endpoint = os.getenv("PADDLE_CURRENT_IP", "") + ":" + port - # the unique trainer id, starting from 0, needed by trainer - # only - trainer_id = int(os.getenv("PADDLE_TRAINER_ID", "0")) # the role, should be either PSERVER or TRAINER training_role = os.getenv("PADDLE_TRAINING_ROLE") @@ -295,6 +294,11 @@ def train_parallel(avg_loss, infer_prog, optimizer, train_reader, test_reader, iters = 0 start_time = time.time() for batch_id, data in enumerate(train_reader()): + if args.profile and pass_id == 0 and batch_id == 5: + profiler.start_profiler("All") + elif args.profile and pass_id == 0 and batch_id == 10: + profiler.stop_profiler("total", "/tmp/profile_%d" % trainer_id) + if iters == args.skip_batch_num: start_time = time.time() num_samples = 0 @@ -334,7 +338,11 @@ def print_arguments(args): def main(): args = parse_args() print_arguments(args) - nccl_id_var, num_trainers, trainer_id = None, 1, 0 + + # the unique trainer id, starting from 0, needed by trainer + # only + nccl_id_var, num_trainers, trainer_id = ( + None, 1, int(os.getenv("PADDLE_TRAINER_ID", "-1"))) if args.use_cprof: pr = cProfile.Profile() @@ -348,7 +356,7 @@ def main(): fluid.memory_optimize(fluid.default_main_program()) if args.update_method == "pserver": - train_prog, startup_prog = dist_transpile() + train_prog, startup_prog = dist_transpile(trainer_id) if not train_prog: raise Exception( "Must configure correct environments to run dist train.") @@ -364,7 +372,7 @@ def main(): train_args.append(fluid.default_startup_program()) if args.update_method == "nccl2": - nccl_id_var, num_trainers, trainer_id = append_nccl2_prepare() + nccl_id_var, num_trainers, trainer_id = append_nccl2_prepare(trainer_id) if args.gpus == 1: # NOTE: parallel executor use profiler interanlly if args.use_nvprof and args.device == 'GPU': diff --git a/benchmark/fluid/kube_gen_job.py b/benchmark/fluid/kube_gen_job.py index 39ba207fd96f71563504017e77dc0e87c249b3f8..9da8a69af1d7b671b2648b1b3702776c1c0650b0 100644 --- a/benchmark/fluid/kube_gen_job.py +++ b/benchmark/fluid/kube_gen_job.py @@ -49,7 +49,7 @@ def parse_args(): parser.add_argument( '--fluid', default=1, type=int, help='whether is fluid job') parser.add_argument( - '--rdma', action='store_ture', help='whether mount rdma libs') + '--rdma', action='store_true', help='whether mount rdma libs') parser.add_argument( '--disttype', default="pserver", diff --git a/doc/fluid/getstarted/Developer's_Guide_to_Paddle_Fluid.md b/doc/fluid/getstarted/Developer's_Guide_to_Paddle_Fluid.md index 0c0156c8e46378e7bbeea8072938b8ccfb9ab6d7..79df6c59578e2acf495a3453ab61f069c3f09a49 100644 --- a/doc/fluid/getstarted/Developer's_Guide_to_Paddle_Fluid.md +++ b/doc/fluid/getstarted/Developer's_Guide_to_Paddle_Fluid.md @@ -86,7 +86,7 @@

- +

--- @@ -123,12 +123,12 @@ - 在科学计算领域,计算图是一种描述计算的经典方式。下图展示了从前向计算图(蓝色)开始,通过添加反向(红色)和优化算法相关(绿色)操作,构建出整个计算图的过程: -- +-

- + - Fluid ==使用`Program`而不是计算图==来描述模型和优化过程。`Program`由`Block`、`Operator`和`Variable`构成,相关概念会在后文详细展开。 - 编译时 Fluid 接受前向计算(这里可以先简单的理解为是一段有序的计算流)`Program`,为这段前向计算按照:前向 -> 反向 -> 梯度 clip -> 正则 -> 优化 的顺序,添加相关 `Operator`和`Variable`到`Program`到完整的计算。 @@ -328,7 +328,7 @@
---- +--- ### 编译时概念 :==**[Transpiler](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/fluid/design/motivation/fluid_compiler.md)**== @@ -402,7 +402,7 @@ - `Scope` - 计算相关 - - `Block` + - `Block` - `Kernel`、`OpWithKernel`、`OpWithoutKernel` @@ -439,7 +439,7 @@
-- 执行相关 :`Executor` +- 执行相关 :`Executor`
@@ -798,7 +798,7 @@ class GPUAllocator : public SystemAllocator { - step 1:添加Place类型,由用户实现添加到框架 - 可以将Place类型理解为一个整数加上一个枚举型,包括:设备号 + 设备类型 - +

@@ -824,7 +824,7 @@ class GPUAllocator : public SystemAllocator { 1. DataType 执行数据类型 FP32/FP64/INT32/INT64 1. Memory layout: 运行时 Tensor 在内存中的排布格式 NCHW、 NHWC 1. 使用的库 - + 来区分Kernel,为同一个operator注册多个 Kernel。 ```cpp @@ -876,7 +876,7 @@ step 3: 运行时的 KernelType 推断和Kernel切换, --- @@ -1107,7 +1107,7 @@ void Run(const framework::Scope &scope,

-

+

@@ -1127,13 +1127,13 @@ void Run(const framework::Scope &scope, - 设计概览 - - 重构概览 [->](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/refactorization.md) - - fluid [->](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/fluid.md) + - 重构概览 [->](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/refactorization.md) + - fluid [->](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/fluid.md) - fluid_compiler [->](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/fluid/design/motivation/fluid_compiler.md) - 核心概念 - variable 描述 [->](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/var_desc.md) - Tensor [->](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/tensor.md) - - LoDTensor [->](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/lod_tensor.md) + - LoDTensor [->](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/lod_tensor.md) - TensorArray [->](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/tensor_array.md) - Program [->](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/program.md) - Block [->](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/block.md) @@ -1152,7 +1152,7 @@ void Run(const framework::Scope &scope, - 支持新设硬件设备库 [->](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/support_new_device.md) - 添加新的Operator [->](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/howto/dev/new_op_cn.md) - 添加新的Kernel [->]( -https://github.com/PaddlePaddle/Paddle/blob/develop/doc/howto/dev/new_op_kernel_en.md) +https://github.com/PaddlePaddle/Paddle/blob/develop/doc/howto/dev/new_op_kernel_en.md) @@ -1167,10 +1167,10 @@ https://github.com/PaddlePaddle/Paddle/blob/develop/doc/howto/dev/new_op_kernel_ Docker编译PaddlePaddle源码: [->](http://www.paddlepaddle.org/docs/develop/documentation/fluid/zh/build_and_install/docker_install_cn.html) - + PaddlePaddle 在 Dockerhub 地址:[->]( https://hub.docker.com/r/paddlepaddle/paddle/tags/) - + 1. 获取PaddlePaddle的Docker镜像 ```bash docker pull paddlepaddle/paddle:latest-dev @@ -1183,7 +1183,7 @@ PaddlePaddle 在 Dockerhub 地址:[->]( ``` 1. 进入docker container后,从源码编译,请参考文档 [->]( http://www.paddlepaddle.org/docs/develop/documentation/fluid/zh/build_and_install/build_from_source_cn.html) - + --- @@ -1196,7 +1196,7 @@ PaddlePaddle 在 Dockerhub 地址:[->]( 1. 开发推荐使用tag为`latest-dev`的镜像,其中打包了所有编译依赖。`latest`及`lastest-gpu`是production镜像,主要用于运行PaddlePaddle程序。 2. 在Docker中运行GPU程序,推荐使用nvidia-docker,[否则需要将CUDA库和设备挂载到Docker容器内](http://www.paddlepaddle.org/docs/develop/documentation/fluid/zh/build_and_install/docker_install_cn.html)。 - + ```bash nvidia-docker run -it -v $PWD/Paddle:/paddle paddlepaddle/paddle:latest-dev /bin/bash ``` @@ -1353,9 +1353,9 @@ Op注册实现在`.cc`文件;Kernel注册CPU实现在`.cc`文件中,CUDA实 } }; ``` - + - + --- ###### 实现带Kernel的Operator step2: 定义Operator类 @@ -1420,11 +1420,11 @@ class ClipOp : public framework::OperatorWithKernel { 2. override InferShape函数(参考 [clip_op](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/operators/clip_op.cc#L24)) 1. 什么是`functor` ? - + - 类或结构体仅重载了`()`,一般是可被多个kernel复用的计算函数。 - + ```cpp template class CrossEntropyFunctor { @@ -1438,9 +1438,9 @@ class ClipOp : public framework::OperatorWithKernel { }; ``` - + - 在 clip_op 内也会看到将一段计算函数抽象为functor的使用法: [->](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/operators/clip_op.h#L27)。 - + --- @@ -1504,7 +1504,7 @@ class ClipKernel : public framework::OpKernel { - 需要注意,Fluid中,不区分Cost Op和中间层Op,所有Op都必须正确处理接收到的梯度 2. 反向Op的输出 - 对可学习参数的求导结果 - - 对所有输入的求导结果 + - 对所有输入的求导结果 @@ -1520,7 +1520,7 @@ class ClipKernel : public framework::OpKernel { 1. 在`.cc`文件中注册前向、反向Op类,注册CPU Kernel。 - + ```cpp namespace ops = paddle::operators; REGISTER_OP(clip, ops::ClipOp, ops::ClipOpMaker, clip_grad, @@ -1530,13 +1530,13 @@ class ClipKernel : public framework::OpKernel { REGISTER_OP_CPU_KERNEL( clip_grad, ops::ClipGradKernel); ``` - + - 在上面的代码片段中: 1. `REGISTER_OP` : 注册`ops::ClipOp`类,类型名为`clip`,该类的`ProtoMaker`为`ops::ClipOpMaker`,注册`ops::ClipOpGrad`,类型名为`clip_grad` 1. `REGISTER_OP_WITHOUT_GRADIENT` : 用于注册没有反向的Op,例如:优化算法相关的Op 1. `REGISTER_OP_CPU_KERNEL` :注册`ops::ClipKernel`类,并特化模板参数为`paddle::platform::CPUPlace`和`float`类型,同理,注册`ops::ClipGradKernel`类 - + 1. 按照同样方法,在`.cu`文件中注册GPU Kernel - 如果CUDA Kernel的实现基于Eigen,需在 `.cu`的开始加上宏定义 `#define EIGEN_USE_GPU` @@ -1593,7 +1593,7 @@ class ClipKernel : public framework::OpKernel { ```bash make test ARGS="-R test_mul_op -V" ``` - + 或者: ``` @@ -1613,7 +1613,7 @@ class ClipKernel : public framework::OpKernel { - 如果多个Op依赖一些共用的函数,可以创建非`*_op.*`格式的文件来存放,如`gather.h`文件。 - + --- ### ==10.== 使用相关问题 @@ -1735,7 +1735,7 @@ class ClipKernel : public framework::OpKernel { y_data = np.random.randint(0, 8, [1]).astype("int32") y_tensor = core.Tensor() y_tensor.set(y_data, place) - + x_data = np.random.uniform(0.1, 1, [11, 8]).astype("float32") x_tensor = core.Tensor() x_tensor.set(x_data, place) diff --git a/doc/fluid/getstarted/index_cn.rst b/doc/fluid/getstarted/index_cn.rst index 75af7354be93a6eeabfa9ccf86903505402a7ca6..3daea71d0933a2774227ff2b5e744392ca6b1765 100644 --- a/doc/fluid/getstarted/index_cn.rst +++ b/doc/fluid/getstarted/index_cn.rst @@ -17,3 +17,4 @@ :maxdepth: 1 concepts/use_concepts_cn.rst + developer's_guide_to_paddle_fluid.md diff --git a/doc/fluid/getstarted/index_en.rst b/doc/fluid/getstarted/index_en.rst index 75a43f4af87c34830ec940068196e6ca72640501..fb20bb4f245281c3acf67c417979dc63c144fef3 100644 --- a/doc/fluid/getstarted/index_en.rst +++ b/doc/fluid/getstarted/index_en.rst @@ -16,3 +16,4 @@ Here is an example of linear regression. It introduces workflow of PaddlePaddle, :maxdepth: 1 concepts/index_en.rst + developer's_guide_to_paddle_fluid.md diff --git a/doc/fluid/getstarted/quickstart_cn.rst b/doc/fluid/getstarted/quickstart_cn.rst index 135beb75d0330f39d062753aa2aa83a077f36bb1..6a964d4f8561f30aa10936d2399698c51583442c 100644 --- a/doc/fluid/getstarted/quickstart_cn.rst +++ b/doc/fluid/getstarted/quickstart_cn.rst @@ -11,7 +11,7 @@ PaddlePaddle支持使用pip快速安装,目前支持CentOS 6以上, Ubuntu 14. pip install paddlepaddle -如果需要安装支持GPU的版本(cuda7.5_cudnn5_avx_openblas),需要执行: +如果需要安装支持GPU的版本(cuda8.0_cudnn5_avx_openblas),需要执行: .. code-block:: bash @@ -28,18 +28,18 @@ PaddlePaddle支持使用pip快速安装,目前支持CentOS 6以上, Ubuntu 14. import paddle.dataset.uci_housing as uci_housing import paddle.fluid as fluid - + with fluid.scope_guard(fluid.core.Scope()): # initialize executor with cpu exe = fluid.Executor(place=fluid.CPUPlace()) - # load inference model + # load inference model [inference_program, feed_target_names,fetch_targets] = \ fluid.io.load_inference_model(uci_housing.fluid_model(), exe) # run inference - result = exe.run(inference_program, - feed={feed_target_names[0]: uci_housing.predict_reader()}, + result = exe.run(inference_program, + feed={feed_target_names[0]: uci_housing.predict_reader()}, fetch_list=fetch_targets) - # print predicted price is $12,273.97 + # print predicted price is $12,273.97 print 'Predicted price: ${:,.2f}'.format(result[0][0][0] * 1000) 执行 :code:`python housing.py` 瞧! 它应该打印出预测住房数据的清单。 diff --git a/doc/fluid/getstarted/quickstart_en.rst b/doc/fluid/getstarted/quickstart_en.rst index df6619cfd039fc1fdca8cde57db9cc6aebf8f029..680122f25893a5a48fac103266bda4788f891f6d 100644 --- a/doc/fluid/getstarted/quickstart_en.rst +++ b/doc/fluid/getstarted/quickstart_en.rst @@ -12,7 +12,7 @@ Simply run the following command to install, the version is cpu_avx_openblas: pip install paddlepaddle -If you need to install GPU version (cuda7.5_cudnn5_avx_openblas), run: +If you need to install GPU version (cuda8.0_cudnn5_avx_openblas), run: .. code-block:: bash @@ -31,18 +31,18 @@ code: import paddle.dataset.uci_housing as uci_housing import paddle.fluid as fluid - + with fluid.scope_guard(fluid.core.Scope()): # initialize executor with cpu exe = fluid.Executor(place=fluid.CPUPlace()) - # load inference model + # load inference model [inference_program, feed_target_names,fetch_targets] = \ fluid.io.load_inference_model(uci_housing.fluid_model(), exe) # run inference - result = exe.run(inference_program, - feed={feed_target_names[0]: uci_housing.predict_reader()}, + result = exe.run(inference_program, + feed={feed_target_names[0]: uci_housing.predict_reader()}, fetch_list=fetch_targets) - # print predicted price is $12,273.97 + # print predicted price is $12,273.97 print 'Predicted price: ${:,.2f}'.format(result[0][0][0] * 1000) Run :code:`python housing.py` and voila! It should print out a list of predictions diff --git a/doc/v2/dev/contribute_to_paddle_cn.md b/doc/v2/dev/contribute_to_paddle_cn.md index d8bf093e09b53b302225739fa67146adc7976e4b..add06e42f1bbd221b48eb83e4e84d4a7c89e7483 100644 --- a/doc/v2/dev/contribute_to_paddle_cn.md +++ b/doc/v2/dev/contribute_to_paddle_cn.md @@ -51,6 +51,8 @@ Paddle 开发人员使用 [pre-commit](http://pre-commit.com/) 工具来管理 G Paddle 使用 `clang-format` 来调整 C/C++ 源代码格式,请确保 `clang-format` 版本在 3.8 以上。 +注:通过`pip install pre-commit`和`conda install -c conda-forge pre-commit`安装的`yapf`稍有不同的,Paddle 开发人员使用的是`pip install pre-commit`。 + ## 开始开发 在本例中,我删除了 README.md 中的一行,并创建了一个新文件。 diff --git a/paddle/contrib/inference/CMakeLists.txt b/paddle/contrib/inference/CMakeLists.txt index a4fe10f708e5bb8b28e34b2d91b2254c346c467f..3beb93c4e7fd1ce4dd2131cb53cb6e89e0f10ebd 100644 --- a/paddle/contrib/inference/CMakeLists.txt +++ b/paddle/contrib/inference/CMakeLists.txt @@ -13,7 +13,11 @@ # limitations under the License. # -function(inference_api_test TARGET_NAME TEST_SRC DEP_TEST) +if(APPLE) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=pessimizing-move") +endif(APPLE) + +function(inference_api_test TARGET_NAME TEST_SRC) set(options "") set(oneValueArgs "") set(multiValueArgs ARGS) @@ -32,8 +36,10 @@ function(inference_api_test TARGET_NAME TEST_SRC DEP_TEST) string(REGEX REPLACE "^_$" "" arg "${arg}") cc_test(${TARGET_NAME} SRCS ${TEST_SRC} - DEPS paddle_fluid_api paddle_inference_api paddle_inference_api_impl + DEPS paddle_fluid_api paddle_inference_api ARGS --dirname=${PYTHON_TESTS_DIR}/book/) + # TODO(panyx0178): Figure out how to add word2vec and image_classification + # as deps. # set_tests_properties(${TARGET_NAME} # PROPERTIES DEPENDS ${DEP_TEST}) endforeach() @@ -41,17 +47,12 @@ endfunction(inference_api_test) cc_library(paddle_inference_api - SRCS paddle_inference_api.cc + SRCS paddle_inference_api.cc paddle_inference_api_impl.cc DEPS ${FLUID_CORE_MODULES} ${GLOB_OP_LIB}) -cc_library(paddle_inference_api_impl - SRCS paddle_inference_api_impl.cc - DEPS paddle_inference_api paddle_fluid_api) - cc_test(test_paddle_inference_api SRCS test_paddle_inference_api.cc DEPS paddle_inference_api) inference_api_test(test_paddle_inference_api_impl - test_paddle_inference_api_impl.cc - test_word2vec) + test_paddle_inference_api_impl.cc) diff --git a/paddle/contrib/inference/paddle_inference_api.h b/paddle/contrib/inference/paddle_inference_api.h index f804d9b28697a6703d63d9a640c4ec337effaba6..b4c7f9bef4d2e83038ff223614a89e1b0493fc6f 100644 --- a/paddle/contrib/inference/paddle_inference_api.h +++ b/paddle/contrib/inference/paddle_inference_api.h @@ -45,10 +45,10 @@ struct PaddleTensor { }; /* -* A simple Inference API for Paddle. Currently this API might just be used by -* non-sequence scenerios. -* TODO(Superjomn) Prepare another API for NLP-related usages. -*/ + * A simple Inference API for Paddle. Currently this API can be used by + * non-sequence scenerios. + * TODO(Superjomn) Support another API for NLP-related usages. + */ class PaddlePredictor { public: struct Config; @@ -66,34 +66,38 @@ class PaddlePredictor { // be thread-safe. virtual std::unique_ptr Clone() = 0; - virtual bool InitShared() { return false; } // Destroy the Predictor. virtual ~PaddlePredictor() {} - friend std::unique_ptr CreatePaddlePredictor( - const PaddlePredictor::Config& config); + enum class EngineKind { + kNative = -1, // Use the native Fluid facility. + // TODO(Superjomn) support latter. + // kAnakin, // Use Anakin for inference. + // kTensorRT, // Use TensorRT for inference. + // kAutoMixedAnakin, // Automatically mix Fluid with Anakin. + // kAutoMixedTensorRT, // Automatically mix Fluid with TensorRT. + }; // The common configs for all the predictors. struct Config { - enum class EngineKind; - std::string model_dir; // path to the model directory. bool enable_engine{false}; // Enable to execute (part of) the model on - // third-party engines. - EngineKind engine_kind{Config::EngineKind::kNone}; - - enum class EngineKind { - kNone = -1, // Use the native Fluid facility. - kAnakin, // Use Anakin for inference. - kTensorRT, // Use TensorRT for inference. - kAutoMixedAnakin, // Automatically mix Fluid with Anakin. - kAutoMixedTensorRT, // Automatically mix Fluid with TensorRT. - }; }; }; +struct NativeConfig : public PaddlePredictor::Config { + bool use_gpu{false}; + int device; + float fraction_of_gpu_memory; + std::string prog_file; + std::string param_file; + bool share_variables; +}; + // A factory to help create difference predictor. -template +template < + typename ConfigT, + PaddlePredictor::EngineKind engine = PaddlePredictor::EngineKind::kNative> std::unique_ptr CreatePaddlePredictor(const ConfigT& config); } // namespace paddle diff --git a/paddle/contrib/inference/paddle_inference_api_impl.cc b/paddle/contrib/inference/paddle_inference_api_impl.cc index e7a0b341dda1ca8d2ccfc0d6c12a7ac3d4c691d5..989252f69e42778dfd791cdee02c550f2aa78803 100644 --- a/paddle/contrib/inference/paddle_inference_api_impl.cc +++ b/paddle/contrib/inference/paddle_inference_api_impl.cc @@ -54,7 +54,7 @@ std::string num2str(T a) { } } // namespace -bool PaddlePredictorImpl::Init() { +bool NativePaddlePredictor::Init() { VLOG(3) << "Predictor::init()"; // TODO(panyx0718): Should CPU vs GPU device be decided by id? @@ -96,14 +96,14 @@ bool PaddlePredictorImpl::Init() { return true; } -bool PaddlePredictorImpl::Run(const std::vector &inputs, - std::vector *output_data) { +bool NativePaddlePredictor::Run(const std::vector &inputs, + std::vector *output_data) { VLOG(3) << "Predictor::predict"; Timer timer; timer.tic(); // set feed variable - std::map feed_targets; - std::vector feeds; + std::map feed_targets; + std::vector feeds; if (!SetFeed(inputs, &feeds)) { LOG(ERROR) << "fail to set feed"; return false; @@ -112,8 +112,8 @@ bool PaddlePredictorImpl::Run(const std::vector &inputs, feed_targets[feed_target_names_[i]] = &feeds[i]; } // get fetch variable - std::map fetch_targets; - std::vector fetchs; + std::map fetch_targets; + std::vector fetchs; fetchs.resize(fetch_target_names_.size()); for (size_t i = 0; i < fetch_target_names_.size(); ++i) { fetch_targets[fetch_target_names_[i]] = &fetchs[i]; @@ -133,76 +133,33 @@ bool PaddlePredictorImpl::Run(const std::vector &inputs, return true; } -std::unique_ptr PaddlePredictorImpl::Clone() { +std::unique_ptr NativePaddlePredictor::Clone() { VLOG(3) << "Predictor::clone"; - std::unique_ptr cls(new PaddlePredictorImpl(config_)); - if (!cls->InitShared()) { - LOG(ERROR) << "fail to call InitShared"; + std::unique_ptr cls(new NativePaddlePredictor(config_)); + + if (!dynamic_cast(cls.get())->Init()) { + LOG(ERROR) << "fail to call Init"; return nullptr; } // fix manylinux compile error. return std::move(cls); } -// TODO(panyx0718): Consider merge with Init()? -bool PaddlePredictorImpl::InitShared() { - VLOG(3) << "Predictor::init_shared"; - // 1. Define place, executor, scope - if (this->config_.device >= 0) { - place_ = paddle::platform::CUDAPlace(); - } else { - place_ = paddle::platform::CPUPlace(); - } - this->executor_.reset(new paddle::framework::Executor(this->place_)); - this->scope_.reset(new paddle::framework::Scope()); - // Initialize the inference program - if (!this->config_.model_dir.empty()) { - // Parameters are saved in separate files sited in - // the specified `dirname`. - this->inference_program_ = paddle::inference::Load( - this->executor_.get(), this->scope_.get(), this->config_.model_dir); - } else if (!this->config_.prog_file.empty() && - !this->config_.param_file.empty()) { - // All parameters are saved in a single file. - // The file names should be consistent with that used - // in Python API `fluid.io.save_inference_model`. - this->inference_program_ = - paddle::inference::Load(this->executor_.get(), - this->scope_.get(), - this->config_.prog_file, - this->config_.param_file); - } - this->ctx_ = this->executor_->Prepare(*this->inference_program_, 0); - // 3. create variables - // TODO(panyx0718): why test share_variables. - if (config_.share_variables) { - this->executor_->CreateVariables( - *this->inference_program_, this->scope_.get(), 0); - } - // 4. Get the feed_target_names and fetch_target_names - this->feed_target_names_ = this->inference_program_->GetFeedTargetNames(); - this->fetch_target_names_ = this->inference_program_->GetFetchTargetNames(); - return true; -} - -bool PaddlePredictorImpl::SetFeed( - const std::vector &inputs, - std::vector *feeds) { +bool NativePaddlePredictor::SetFeed(const std::vector &inputs, + std::vector *feeds) { VLOG(3) << "Predictor::set_feed"; if (inputs.size() != feed_target_names_.size()) { LOG(ERROR) << "wrong feed input size."; return false; } for (size_t i = 0; i < feed_target_names_.size(); ++i) { - paddle::framework::LoDTensor input; - paddle::framework::DDim ddim = - paddle::framework::make_ddim(inputs[i].shape); + framework::LoDTensor input; + framework::DDim ddim = framework::make_ddim(inputs[i].shape); void *input_ptr; if (inputs[i].dtype == PaddleDType::INT64) { - input_ptr = - input.mutable_data(ddim, paddle::platform::CPUPlace()); + input_ptr = input.mutable_data(ddim, platform::CPUPlace()); } else if (inputs[i].dtype == PaddleDType::FLOAT32) { - input_ptr = input.mutable_data(ddim, paddle::platform::CPUPlace()); + input_ptr = input.mutable_data(ddim, platform::CPUPlace()); } else { LOG(ERROR) << "unsupported feed type " << inputs[i].dtype; return false; @@ -213,13 +170,12 @@ bool PaddlePredictorImpl::SetFeed( inputs[i].data.data, inputs[i].data.length); feeds->push_back(input); - LOG(ERROR) << "Actual feed type " << feeds->back().type().name(); } return true; } -bool PaddlePredictorImpl::GetFetch( - const std::vector &fetchs, +bool NativePaddlePredictor::GetFetch( + const std::vector &fetchs, std::vector *outputs) { VLOG(3) << "Predictor::get_fetch"; outputs->resize(fetchs.size()); @@ -284,27 +240,30 @@ bool PaddlePredictorImpl::GetFetch( return true; } -std::unique_ptr CreatePaddlePredictorImpl( - const VisConfig &config) { - VLOG(3) << "create PaddlePredictorImpl"; - // 1. GPU memeroy - std::vector flags; - if (config.fraction_of_gpu_memory >= 0.0f || - config.fraction_of_gpu_memory <= 0.95f) { - flags.push_back("dummpy"); - std::string flag = "--fraction_of_gpu_memory_to_use=" + - num2str(config.fraction_of_gpu_memory); - flags.push_back(flag); - VLOG(3) << "set flag: " << flag; - framework::InitGflags(flags); +template <> +std::unique_ptr +CreatePaddlePredictor( + const NativeConfig &config) { + VLOG(3) << "create NativePaddlePredictor"; + if (config.use_gpu) { + // 1. GPU memeroy + std::vector flags; + if (config.fraction_of_gpu_memory >= 0.0f || + config.fraction_of_gpu_memory <= 0.95f) { + flags.push_back("dummpy"); + std::string flag = "--fraction_of_gpu_memory_to_use=" + + num2str(config.fraction_of_gpu_memory); + flags.push_back(flag); + VLOG(3) << "set flag: " << flag; + framework::InitGflags(flags); + } } - std::unique_ptr predictor( - new PaddlePredictorImpl(config)); - if (!predictor->Init()) { + std::unique_ptr predictor(new NativePaddlePredictor(config)); + if (!dynamic_cast(predictor.get())->Init()) { return nullptr; } - return predictor; + return std::move(predictor); } } // namespace paddle diff --git a/paddle/contrib/inference/paddle_inference_api_impl.h b/paddle/contrib/inference/paddle_inference_api_impl.h index a0c7ff030735fc1c6b9d717f8f9e4addc7e0c6b0..84707e223d7aa3d1ebca933923e932b3973613ae 100644 --- a/paddle/contrib/inference/paddle_inference_api_impl.h +++ b/paddle/contrib/inference/paddle_inference_api_impl.h @@ -29,20 +29,10 @@ namespace paddle { -struct VisConfig : public PaddlePredictor::Config { - int device; - float fraction_of_gpu_memory; - std::string prog_file; - std::string param_file; - bool share_variables; -}; - -/* - * Do not use this, just a demo indicating how to customize a Predictor. - */ -class PaddlePredictorImpl : public PaddlePredictor { +class NativePaddlePredictor : public PaddlePredictor { public: - explicit PaddlePredictorImpl(const VisConfig &config) : config_(config) {} + explicit NativePaddlePredictor(const NativeConfig &config) + : config_(config) {} bool Init(); @@ -51,26 +41,22 @@ class PaddlePredictorImpl : public PaddlePredictor { std::unique_ptr Clone() override; - ~PaddlePredictorImpl() override{}; + ~NativePaddlePredictor() override{}; private: - bool InitShared() override; bool SetFeed(const std::vector &input_datas, - std::vector *feeds); - bool GetFetch(const std::vector &fetchs, + std::vector *feeds); + bool GetFetch(const std::vector &fetchs, std::vector *output_data); - VisConfig config_; - paddle::platform::Place place_; - std::unique_ptr executor_; - std::unique_ptr scope_; - std::unique_ptr ctx_; - std::unique_ptr inference_program_; + NativeConfig config_; + platform::Place place_; + std::unique_ptr executor_; + std::unique_ptr scope_; + std::unique_ptr ctx_; + std::unique_ptr inference_program_; std::vector feed_target_names_; std::vector fetch_target_names_; }; -std::unique_ptr CreatePaddlePredictorImpl( - const VisConfig &config); - } // namespace paddle diff --git a/paddle/contrib/inference/test_paddle_inference_api_impl.cc b/paddle/contrib/inference/test_paddle_inference_api_impl.cc index 2a58f6989d5dad23b2f267adafde2cc105bf5651..5240fc2f20211ac5d38c57b71db31d04a6dc536a 100644 --- a/paddle/contrib/inference/test_paddle_inference_api_impl.cc +++ b/paddle/contrib/inference/test_paddle_inference_api_impl.cc @@ -40,16 +40,20 @@ PaddleTensor LodTensorToPaddleTensor(framework::LoDTensor* t) { return pt; } -TEST(paddle_inference_api_impl, word2vec) { - VisConfig config; +NativeConfig GetConfig() { + NativeConfig config; config.model_dir = FLAGS_dirname + "word2vec.inference.model"; LOG(INFO) << "dirname " << config.model_dir; config.fraction_of_gpu_memory = 0.15; + config.use_gpu = true; config.device = 0; config.share_variables = true; + return config; +} - std::unique_ptr predictor = - CreatePaddlePredictorImpl(config); +TEST(paddle_inference_api_impl, word2vec) { + NativeConfig config = GetConfig(); + auto predictor = CreatePaddlePredictor(config); framework::LoDTensor first_word, second_word, third_word, fourth_word; framework::LoD lod{{0, 1}}; @@ -60,24 +64,90 @@ TEST(paddle_inference_api_impl, word2vec) { SetupLoDTensor(&third_word, lod, static_cast(0), dict_size - 1); SetupLoDTensor(&fourth_word, lod, static_cast(0), dict_size - 1); - std::vector cpu_feeds; - cpu_feeds.push_back(LodTensorToPaddleTensor(&first_word)); - cpu_feeds.push_back(LodTensorToPaddleTensor(&second_word)); - cpu_feeds.push_back(LodTensorToPaddleTensor(&third_word)); - cpu_feeds.push_back(LodTensorToPaddleTensor(&fourth_word)); + std::vector paddle_tensor_feeds; + paddle_tensor_feeds.push_back(LodTensorToPaddleTensor(&first_word)); + paddle_tensor_feeds.push_back(LodTensorToPaddleTensor(&second_word)); + paddle_tensor_feeds.push_back(LodTensorToPaddleTensor(&third_word)); + paddle_tensor_feeds.push_back(LodTensorToPaddleTensor(&fourth_word)); + + std::vector outputs; + ASSERT_TRUE(predictor->Run(paddle_tensor_feeds, &outputs)); + ASSERT_EQ(outputs.size(), 1UL); + size_t len = outputs[0].data.length; + float* data = static_cast(outputs[0].data.data); + for (int j = 0; j < len / sizeof(float); ++j) { + ASSERT_LT(data[j], 1.0); + ASSERT_GT(data[j], -1.0); + } + + std::vector cpu_feeds; + cpu_feeds.push_back(&first_word); + cpu_feeds.push_back(&second_word); + cpu_feeds.push_back(&third_word); + cpu_feeds.push_back(&fourth_word); + + framework::LoDTensor output1; + std::vector cpu_fetchs1; + cpu_fetchs1.push_back(&output1); + + TestInference(config.model_dir, cpu_feeds, cpu_fetchs1); + + float* lod_data = output1.data(); + for (size_t i = 0; i < output1.numel(); ++i) { + EXPECT_LT(lod_data[i] - data[i], 1e-3); + EXPECT_GT(lod_data[i] - data[i], -1e-3); + } + + free(outputs[0].data.data); +} + +TEST(paddle_inference_api_impl, image_classification) { + int batch_size = 2; + bool use_mkldnn = false; + bool repeat = false; + NativeConfig config = GetConfig(); + config.model_dir = + FLAGS_dirname + "image_classification_resnet.inference.model"; + + const bool is_combined = false; + std::vector> feed_target_shapes = + GetFeedTargetShapes(config.model_dir, is_combined); + + framework::LoDTensor input; + // Use normilized image pixels as input data, + // which should be in the range [0.0, 1.0]. + feed_target_shapes[0][0] = batch_size; + framework::DDim input_dims = framework::make_ddim(feed_target_shapes[0]); + SetupTensor( + &input, input_dims, static_cast(0), static_cast(1)); + std::vector cpu_feeds; + cpu_feeds.push_back(&input); + + framework::LoDTensor output1; + std::vector cpu_fetchs1; + cpu_fetchs1.push_back(&output1); + + TestInference(config.model_dir, + cpu_feeds, + cpu_fetchs1, + repeat, + is_combined, + use_mkldnn); + + auto predictor = CreatePaddlePredictor(config); + std::vector paddle_tensor_feeds; + paddle_tensor_feeds.push_back(LodTensorToPaddleTensor(&input)); std::vector outputs; - ASSERT_TRUE(predictor->Run(cpu_feeds, &outputs)); + ASSERT_TRUE(predictor->Run(paddle_tensor_feeds, &outputs)); ASSERT_EQ(outputs.size(), 1UL); - for (size_t i = 0; i < outputs.size(); ++i) { - size_t len = outputs[i].data.length; - float* data = static_cast(outputs[i].data.data); - for (size_t j = 0; j < len / sizeof(float); ++j) { - ASSERT_LT(data[j], 1.0); - ASSERT_GT(data[j], -1.0); - } - free(outputs[i].data.data); + size_t len = outputs[0].data.length; + float* data = static_cast(outputs[0].data.data); + float* lod_data = output1.data(); + for (size_t j = 0; j < len / sizeof(float); ++j) { + EXPECT_NEAR(lod_data[j], data[j], 1e-3); } + free(data); } } // namespace paddle diff --git a/paddle/fluid/framework/operator.cc b/paddle/fluid/framework/operator.cc index e3d2e5377eac49003b0082c39c9dd0460e2acd92..f87d5521492418d2daf5b7fba1500c4bb31e10f5 100644 --- a/paddle/fluid/framework/operator.cc +++ b/paddle/fluid/framework/operator.cc @@ -469,6 +469,7 @@ class RuntimeInferShapeContext : public InferShapeContext { protected: DDim GetDim(const std::string& name) const override { Variable* var = scope_.FindVar(name); + PADDLE_ENFORCE_NOT_NULL(var); if (var->IsType()) { return var->Get().dims(); } else if (var->IsType()) { diff --git a/paddle/fluid/framework/reader.cc b/paddle/fluid/framework/reader.cc index 76126f3dc64d71770d13f9d66bb30f176c112629..0b36f1116d15004b355e854e101abb9ad3297836 100644 --- a/paddle/fluid/framework/reader.cc +++ b/paddle/fluid/framework/reader.cc @@ -25,8 +25,10 @@ void FileReader::ReadNext(std::vector *out) { if (out->empty()) { return; } + + PADDLE_ENFORCE_EQ(out->size(), dims_.size()); for (size_t i = 0; i < dims_.size(); ++i) { - auto &actual = out->at(i).dims(); + auto &actual = (*out)[i].dims(); auto &expect = dims_[i]; PADDLE_ENFORCE_EQ(actual.size(), expect.size()); diff --git a/paddle/fluid/framework/selected_rows.cc b/paddle/fluid/framework/selected_rows.cc index b4168f38949c7fcb057ec8c5c562d0529a6d9e48..06ed87e7e8a2d5324b48a466b05207042ec1b7fa 100644 --- a/paddle/fluid/framework/selected_rows.cc +++ b/paddle/fluid/framework/selected_rows.cc @@ -18,8 +18,8 @@ namespace paddle { namespace framework { struct ReAllocateVisitor { - ReAllocateVisitor(framework::Tensor* tensor, const framework::DDim& dims) - : tensor_(tensor), dims_(dims) {} + ReAllocateVisitor(const framework::DDim& dims, framework::Tensor* tensor) + : dims_(dims), tensor_(tensor) {} template void operator()() const { @@ -34,8 +34,8 @@ struct ReAllocateVisitor { tensor_->ShareDataWith(cpu_tensor); } - framework::Tensor* tensor_; framework::DDim dims_; + framework::Tensor* tensor_; }; struct TensorCopyVisitor { @@ -158,6 +158,7 @@ bool SelectedRows::Set(int64_t key, const framework::Tensor& value) { } PADDLE_ENFORCE_EQ(value.dims()[0], static_cast(1), "The first dim of value should be 1."); + std::lock_guard lock(*auto_grown_mutex_.get()); auto index = Index(key); bool is_new_key = false; if (index == -1) { @@ -169,7 +170,7 @@ bool SelectedRows::Set(int64_t key, const framework::Tensor& value) { auto dims = value_->dims(); dims[0] = (dims[0] + 1) << 1; framework::VisitDataType(framework::ToDataType(value.type()), - ReAllocateVisitor(value_.get(), dims)); + ReAllocateVisitor(dims, value_.get())); } } diff --git a/paddle/fluid/framework/selected_rows.h b/paddle/fluid/framework/selected_rows.h index c80b05eed9b1c50325316057a8afc26d5d52e82c..7160670ddd204c20021ea87cdd67ee4721d03451 100644 --- a/paddle/fluid/framework/selected_rows.h +++ b/paddle/fluid/framework/selected_rows.h @@ -15,6 +15,8 @@ limitations under the License. */ #pragma once #include +#include +#include // NOLINT #include #include @@ -46,11 +48,13 @@ class SelectedRows { SelectedRows(const std::vector& rows, const int64_t& height) : rows_(rows), height_(height) { value_.reset(new Tensor()); + auto_grown_mutex_.reset(new std::mutex); } SelectedRows() { height_ = 0; value_.reset(new Tensor()); + auto_grown_mutex_.reset(new std::mutex); } platform::Place place() const { return value_->place(); } @@ -125,6 +129,7 @@ class SelectedRows { Vector rows_; std::unique_ptr value_{nullptr}; int64_t height_; + std::unique_ptr auto_grown_mutex_{nullptr}; }; /* diff --git a/paddle/fluid/framework/tensor_impl.h b/paddle/fluid/framework/tensor_impl.h index 0a1db7758bd9ec0dac133efcbf495de1d690021d..2f19ec0f0a9338e2b96d1f64eac45387bae4d1eb 100644 --- a/paddle/fluid/framework/tensor_impl.h +++ b/paddle/fluid/framework/tensor_impl.h @@ -39,7 +39,7 @@ template inline const T* Tensor::data() const { check_memory_size(); PADDLE_ENFORCE(std::is_same::value || - holder_->type().hash_code() == typeid(T).hash_code(), + holder_->type() == std::type_index(typeid(T)), "Tensor holds the wrong type, it holds %s", this->holder_->type().name()); @@ -53,7 +53,7 @@ template inline T* Tensor::data() { check_memory_size(); PADDLE_ENFORCE(std::is_same::value || - holder_->type().hash_code() == typeid(T).hash_code(), + holder_->type() == std::type_index(typeid(T)), "Tensor holds the wrong type, it holds %s", this->holder_->type().name()); return reinterpret_cast(reinterpret_cast(holder_->ptr()) + diff --git a/paddle/fluid/inference/CMakeLists.txt b/paddle/fluid/inference/CMakeLists.txt index cc4a725dfb3b3e7723a3a3a4008b20acdb53899d..ec16a1c600a3bafc1c4cbbd920360253c106e3a1 100644 --- a/paddle/fluid/inference/CMakeLists.txt +++ b/paddle/fluid/inference/CMakeLists.txt @@ -5,14 +5,19 @@ cc_library(paddle_fluid_api SRCS io.cc DEPS ${FLUID_CORE_MODULES} ${GLOB_OP_LIB}) -# Create static library get_property(fluid_modules GLOBAL PROPERTY FLUID_MODULES) -cc_library(paddle_fluid DEPS ${fluid_modules}) +if(WITH_CONTRIB) + set(fluid_modules "${fluid_modules}" paddle_inference_api) +endif() + +# Create static library +cc_library(paddle_fluid DEPS ${fluid_modules} paddle_fluid_api) # Create shared library cc_library(paddle_fluid_shared SHARED SRCS io.cc - DEPS ${fluid_modules}) + DEPS ${fluid_modules} paddle_fluid_api) + set_target_properties(paddle_fluid_shared PROPERTIES OUTPUT_NAME paddle_fluid) if(NOT APPLE) # TODO(liuyiqun): Temporarily disable the link flag because it is not support on Mac. diff --git a/paddle/fluid/inference/analysis/data_flow_graph.h b/paddle/fluid/inference/analysis/data_flow_graph.h index 9f6ce40ede25248a4f779b379c132806a4ec06ba..913e344d371ddf3ea05a53c216e5b3bea8f11c7b 100644 --- a/paddle/fluid/inference/analysis/data_flow_graph.h +++ b/paddle/fluid/inference/analysis/data_flow_graph.h @@ -21,7 +21,10 @@ limitations under the License. */ #include #include +#include #include +#include +#include #include "paddle/fluid/inference/analysis/graph_traits.h" #include "paddle/fluid/inference/analysis/node.h" diff --git a/paddle/fluid/inference/analysis/data_flow_graph_to_fluid_pass_tester.cc b/paddle/fluid/inference/analysis/data_flow_graph_to_fluid_pass_tester.cc index 60f159da9140516284449a0274906df004b23ac5..dcee75cee50ede1d2b660e88e06544440bd5ef77 100644 --- a/paddle/fluid/inference/analysis/data_flow_graph_to_fluid_pass_tester.cc +++ b/paddle/fluid/inference/analysis/data_flow_graph_to_fluid_pass_tester.cc @@ -44,6 +44,6 @@ TEST_F(DFG_Tester, Test) { LOG(INFO) << graph.nodes.size(); } -} // analysis -} // inference -} // paddle +}; // namespace analysis +}; // namespace inference +}; // namespace paddle diff --git a/paddle/fluid/inference/analysis/fluid_to_data_flow_graph_pass.cc b/paddle/fluid/inference/analysis/fluid_to_data_flow_graph_pass.cc index f848a7d1add79c3032da7defc34a406dccf29d2e..9f67c989cca4a936cd320b73efaae277263fb3e2 100644 --- a/paddle/fluid/inference/analysis/fluid_to_data_flow_graph_pass.cc +++ b/paddle/fluid/inference/analysis/fluid_to_data_flow_graph_pass.cc @@ -12,9 +12,11 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/inference/analysis/fluid_to_data_flow_graph_pass.h" +#include #include +#include "paddle/fluid/inference/analysis/fluid_to_data_flow_graph_pass.h" + namespace paddle { namespace inference { namespace analysis { diff --git a/paddle/fluid/inference/analysis/fluid_to_data_flow_graph_pass.h b/paddle/fluid/inference/analysis/fluid_to_data_flow_graph_pass.h index cd0d4fabaafe844bcc5bb8bfc2586971197d9167..33517e57becdffc0416f204247eac5feadb7ed82 100644 --- a/paddle/fluid/inference/analysis/fluid_to_data_flow_graph_pass.h +++ b/paddle/fluid/inference/analysis/fluid_to_data_flow_graph_pass.h @@ -19,6 +19,8 @@ #pragma once +#include + #include "paddle/fluid/framework/program_desc.h" #include "paddle/fluid/inference/analysis/data_flow_graph.h" #include "paddle/fluid/inference/analysis/pass.h" diff --git a/paddle/fluid/inference/analysis/fluid_to_data_flow_graph_pass_tester.cc b/paddle/fluid/inference/analysis/fluid_to_data_flow_graph_pass_tester.cc index 851c98bef305fa9e20dced5f7c26e9d1b6ddf4f2..817d32c92cdbdc234eef9ed5156891c2b11ced4c 100644 --- a/paddle/fluid/inference/analysis/fluid_to_data_flow_graph_pass_tester.cc +++ b/paddle/fluid/inference/analysis/fluid_to_data_flow_graph_pass_tester.cc @@ -32,6 +32,6 @@ TEST_F(DFG_Tester, Init) { LOG(INFO) << '\n' << graph.DotString(); } -} // analysis -} // inference -} // paddle +} // namespace analysis +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/analysis/helper.h b/paddle/fluid/inference/analysis/helper.h index 24ea9a4bae7132eb1692b0ffb02f8ab5e02b21a9..153dca576bd6734d62f00c4a7cb9b503506b33e2 100644 --- a/paddle/fluid/inference/analysis/helper.h +++ b/paddle/fluid/inference/analysis/helper.h @@ -50,7 +50,7 @@ struct DataTypeNamer { return dic_.at(x); } - const std::string &repr(size_t &hash) const { + const std::string &repr(size_t &hash) const { // NOLINT PADDLE_ENFORCE(dic_.count(hash), "unknown type for representation"); return dic_.at(hash); } @@ -62,7 +62,9 @@ struct DataTypeNamer { SET_TYPE(float); } - std::unordered_map dic_; + std::unordered_map + dic_; }; #undef SET_TYPE diff --git a/paddle/fluid/inference/analysis/pass.h b/paddle/fluid/inference/analysis/pass.h index 5c89b1304d84abc9a4942f12da46b4bfe76f44f5..aa0e8667b5e4a9e6156c25fcad03bb8eee3287f6 100644 --- a/paddle/fluid/inference/analysis/pass.h +++ b/paddle/fluid/inference/analysis/pass.h @@ -16,6 +16,7 @@ limitations under the License. */ #include #include +#include #include "paddle/fluid/framework/framework.pb.h" #include "paddle/fluid/inference/analysis/data_flow_graph.h" diff --git a/paddle/fluid/inference/analysis/subgraph_splitter.h b/paddle/fluid/inference/analysis/subgraph_splitter.h index ed90a0dcf31e154c4d82be08ce35e2f11d11c139..a31afbe6933da8d3c7a88142cc12d63b98b55796 100644 --- a/paddle/fluid/inference/analysis/subgraph_splitter.h +++ b/paddle/fluid/inference/analysis/subgraph_splitter.h @@ -18,6 +18,8 @@ limitations under the License. */ #pragma once +#include + #include "paddle/fluid/inference/analysis/data_flow_graph.h" #include "paddle/fluid/inference/analysis/node.h" diff --git a/paddle/fluid/inference/analysis/ut_helper.h b/paddle/fluid/inference/analysis/ut_helper.h index c86083d12153921672e15c172b874f77a8b46cde..722fa99a48a5f2b0e778904de0c35977d0ee3cc0 100644 --- a/paddle/fluid/inference/analysis/ut_helper.h +++ b/paddle/fluid/inference/analysis/ut_helper.h @@ -15,6 +15,7 @@ limitations under the License. */ #pragma once #include #include +#include #include "paddle/fluid/framework/executor.h" #include "paddle/fluid/inference/analysis/data_flow_graph.h" #include "paddle/fluid/inference/analysis/fluid_to_data_flow_graph_pass.h" diff --git a/paddle/fluid/inference/tensorrt/convert/ut_helper.h b/paddle/fluid/inference/tensorrt/convert/ut_helper.h index 37fcb5c50309db0ad0924a057a6b481750665531..e46c577cdae145c0d4ceb6bfa307f03d313514ce 100644 --- a/paddle/fluid/inference/tensorrt/convert/ut_helper.h +++ b/paddle/fluid/inference/tensorrt/convert/ut_helper.h @@ -19,6 +19,9 @@ limitations under the License. */ #pragma once +#include +#include + #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/inference/analysis/helper.h" @@ -58,7 +61,7 @@ class TRTConvertValidation { public: TRTConvertValidation() = delete; - TRTConvertValidation(int batch_size, int workspace_size = 1 << 10) { + explicit TRTConvertValidation(int batch_size, int workspace_size = 1024) { // create engine. engine_.reset(new TensorRTEngine(10, 1 << 10, &stream_)); engine_->InitNetwork(); diff --git a/paddle/fluid/inference/tensorrt/engine.cc b/paddle/fluid/inference/tensorrt/engine.cc index fb27c8394c1f94953093ed90627e63e6241130ed..a88236ae98e1816fc43796ead596c432b798d7de 100644 --- a/paddle/fluid/inference/tensorrt/engine.cc +++ b/paddle/fluid/inference/tensorrt/engine.cc @@ -131,6 +131,20 @@ void* TensorRTEngine::GetOutputInGPU(const std::string& name) { return buffer(name).buffer; } +void TensorRTEngine::GetOutputInGPU(const std::string& name, void* dst, + size_t max_size) { + // determine data size + auto it = buffer_sizes_.find(name); + PADDLE_ENFORCE(it != buffer_sizes_.end()); + PADDLE_ENFORCE_GT(it->second, 0); + PADDLE_ENFORCE_GE(max_size, it->second); + auto& buf = buffer(name); + PADDLE_ENFORCE_NOT_NULL(buf.buffer, "buffer should be allocated before"); + PADDLE_ENFORCE_EQ(cudaMemcpyAsync(dst, buf.buffer, it->second, + cudaMemcpyDeviceToDevice, *stream_), + 0); +} + void TensorRTEngine::GetOutputInCPU(const std::string& name, void* dst, size_t max_size) { // determine data size @@ -152,7 +166,7 @@ Buffer& TensorRTEngine::buffer(const std::string& name) { return buffers_[slot_offset]; } -void TensorRTEngine::SetInputFromCPU(const std::string& name, void* data, +void TensorRTEngine::SetInputFromCPU(const std::string& name, const void* data, size_t size) { auto& buf = buffer(name); PADDLE_ENFORCE_NOT_NULL(buf.buffer); @@ -162,6 +176,16 @@ void TensorRTEngine::SetInputFromCPU(const std::string& name, void* data, cudaMemcpyHostToDevice, *stream_)); } +void TensorRTEngine::SetInputFromGPU(const std::string& name, const void* data, + size_t size) { + auto& buf = buffer(name); + PADDLE_ENFORCE_NOT_NULL(buf.buffer); + PADDLE_ENFORCE_LE(size, buf.max_size, "buffer is too small"); + PADDLE_ENFORCE(buf.device == DeviceType::GPU); + PADDLE_ENFORCE_EQ(0, cudaMemcpyAsync(buf.buffer, data, size, + cudaMemcpyDeviceToDevice, *stream_)); +} + void TensorRTEngine::SetITensor(const std::string& name, nvinfer1::ITensor* tensor) { PADDLE_ENFORCE(tensor != nullptr); diff --git a/paddle/fluid/inference/tensorrt/engine.h b/paddle/fluid/inference/tensorrt/engine.h index b8298c6059e8644327194a1fcf7a7438cc9a7286..d9d3163b66d4c4c302d12edcc42f00e1cdfa5a30 100644 --- a/paddle/fluid/inference/tensorrt/engine.h +++ b/paddle/fluid/inference/tensorrt/engine.h @@ -92,13 +92,15 @@ class TensorRTEngine : public EngineBase { cudaStream_t* stream() { return stream_; } // Fill an input from CPU memory with name and size. - void SetInputFromCPU(const std::string& name, void* data, size_t size); + void SetInputFromCPU(const std::string& name, const void* data, size_t size); // TODO(Superjomn) is this method necessary given that buffer(xxx) can be // accessed directly. Fill an input from GPU memory with name and size. - void SetInputFromGPU(const std::string& name, void* data, size_t size); + void SetInputFromGPU(const std::string& name, const void* data, size_t size); // Get an output called name, the output of tensorrt is in GPU, so this method - // will just return the output's GPU memory address. + // Return the output's GPU memory address without copy. void* GetOutputInGPU(const std::string& name); + // Copy data into dst inside the GPU device. + void GetOutputInGPU(const std::string& name, void* dst, size_t max_size); // LOW EFFICENCY! Get output to CPU, this will trigger a memory copy from GPU // to CPU. void GetOutputInCPU(const std::string& name, void* dst, size_t max_size); diff --git a/paddle/fluid/operators/CMakeLists.txt b/paddle/fluid/operators/CMakeLists.txt index e00cc73565fc98615090367606b6ba4f58feacfd..de6ff29c6f8edbcf930546ff157a1c226e1311db 100644 --- a/paddle/fluid/operators/CMakeLists.txt +++ b/paddle/fluid/operators/CMakeLists.txt @@ -168,6 +168,8 @@ function(op_library TARGET) file(APPEND ${pybind_file} "USE_OP(relu);\n") elseif(${TARGET} STREQUAL "reduce") file(APPEND ${pybind_file} "USE_OP(reduce_sum);\n") + elseif(${TARGET} STREQUAL "fake_dequantize") + file(APPEND ${pybind_file} "USE_OP(fake_dequantize_max_abs);\n") else() file(APPEND ${pybind_file} "USE_OP(${TARGET});\n") endif() @@ -223,6 +225,11 @@ op_library(cross_entropy_op DEPS cross_entropy) op_library(softmax_with_cross_entropy_op DEPS cross_entropy softmax) op_library(softmax_op DEPS softmax) op_library(sequence_softmax_op DEPS softmax) +if (WITH_GPU AND TENSORRT_FOUND) + op_library(tensorrt_engine_op DEPS tensorrt_engine) +else() + set(DEPS_OPS ${DEPS_OPS} tensorrt_engine_op) +endif() op_library(sum_op DEPS selected_rows_functor) op_library(sgd_op DEPS selected_rows_functor) op_library(print_op DEPS lod_tensor) diff --git a/paddle/fluid/operators/cast_op.cc b/paddle/fluid/operators/cast_op.cc index 84660d042c7b12283fabc316d29609f5eddb825d..8d6a498dc941e44688ec8a2b49a6e080608f9b85 100644 --- a/paddle/fluid/operators/cast_op.cc +++ b/paddle/fluid/operators/cast_op.cc @@ -89,4 +89,5 @@ REGISTER_OP_CPU_KERNEL(cast, ops::CastOpKernel, ops::CastOpKernel, ops::CastOpKernel, ops::CastOpKernel, + ops::CastOpKernel, ops::CastOpKernel); diff --git a/paddle/fluid/operators/cast_op.cu b/paddle/fluid/operators/cast_op.cu index c486c5850e25fcf4370f02cb145c244743a4cc4b..657d162878c108760585ca9bd58e2fd34bf1fef3 100644 --- a/paddle/fluid/operators/cast_op.cu +++ b/paddle/fluid/operators/cast_op.cu @@ -21,5 +21,5 @@ using CastOpKernel = REGISTER_OP_CUDA_KERNEL(cast, CastOpKernel, CastOpKernel, CastOpKernel, CastOpKernel, - CastOpKernel, + CastOpKernel, CastOpKernel, CastOpKernel); diff --git a/paddle/fluid/operators/detail/CMakeLists.txt b/paddle/fluid/operators/detail/CMakeLists.txt index b9a66474c9afc27462f9c47af1a0465e2cec70bc..cf20530513cf6cd420e56b2f6378225f73c2bc8b 100644 --- a/paddle/fluid/operators/detail/CMakeLists.txt +++ b/paddle/fluid/operators/detail/CMakeLists.txt @@ -1,6 +1,7 @@ if(WITH_DISTRIBUTE) grpc_library(sendrecvop_grpc SRCS bytebuffer_stream.cc sendrecvop_utils.cc grpc_client.cc - grpc_server.cc variable_response.cc PROTO send_recv.proto DEPS lod_tensor selected_rows) + request_handler_impl.cc rpc_server.cc grpc_server.cc variable_response.cc PROTO send_recv.proto DEPS lod_tensor + selected_rows memory) set(DISTRIBUTE_COMPILE_FLAGS "-Wno-non-virtual-dtor -Wno-error=non-virtual-dtor -Wno-error=delete-non-virtual-dtor") set_source_files_properties(serde_test.cc grpc_server_test.cc PROPERTIES COMPILE_FLAGS ${DISTRIBUTE_COMPILE_FLAGS}) cc_test(serde_test SRCS serde_test.cc variable_response.cc DEPS grpc++_unsecure grpc_unsecure gpr diff --git a/paddle/fluid/operators/detail/grpc_client.cc b/paddle/fluid/operators/detail/grpc_client.cc index f7ce7786874285795878b655365974f082c00b44..da9ca1a0c1d55018141f0e4285fe35d7c437fd55 100644 --- a/paddle/fluid/operators/detail/grpc_client.cc +++ b/paddle/fluid/operators/detail/grpc_client.cc @@ -205,6 +205,8 @@ void RPCClient::AsyncSendFetchBarrier(const std::string& ep, int64_t time_out) { } bool RPCClient::Wait() { + VLOG(3) << "RPCClient begin Wait()" + << " req_count_:" << req_count_; if (req_count_ <= 0) { return true; } diff --git a/paddle/fluid/operators/detail/grpc_server.cc b/paddle/fluid/operators/detail/grpc_server.cc index 361cc24b5ba11e2654f1282327730befaeca9f55..e73756d89004bc48339c0aa31dd0857c2ca6722d 100644 --- a/paddle/fluid/operators/detail/grpc_server.cc +++ b/paddle/fluid/operators/detail/grpc_server.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. +/*Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,19 +12,12 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/operators/detail/grpc_server.h" - #include #include -using ::grpc::ServerAsyncResponseWriter; +#include "paddle/fluid/operators/detail/grpc_server.h" -DEFINE_int32(rpc_server_handle_send_threads, 20, - "Number of threads used to handle send at rpc server."); -DEFINE_int32(rpc_server_handle_get_threads, 20, - "Number of threads used to handle get at rpc server."); -DEFINE_int32(rpc_server_handle_prefetch_threads, 1, - "Number of threads used to handle prefetch at rpc server."); +using ::grpc::ServerAsyncResponseWriter; namespace paddle { namespace operators { @@ -36,49 +29,40 @@ enum CallStatus { PROCESS = 0, FINISH }; class RequestBase { public: explicit RequestBase(GrpcService::AsyncService* service, - ::grpc::ServerCompletionQueue* cq, bool sync_mode, - const platform::DeviceContext* dev_ctx) + ::grpc::ServerCompletionQueue* cq, + RequestHandler* request_handler, int req_id) : service_(service), cq_(cq), - sync_mode_(sync_mode), status_(PROCESS), - dev_ctx_(dev_ctx) { + request_handler_(request_handler), + req_id_(req_id) { PADDLE_ENFORCE(cq_); } virtual ~RequestBase() {} - virtual void Process() { assert(false); } + virtual void Process() = 0; CallStatus Status() { return status_; } void SetStatus(CallStatus status) { status_ = status; } - virtual std::string GetReqName() { - assert(false); - return ""; - } + virtual std::string GetReqName() = 0; protected: ::grpc::ServerContext ctx_; GrpcService::AsyncService* service_; ::grpc::ServerCompletionQueue* cq_; - const bool sync_mode_; CallStatus status_; - const platform::DeviceContext* dev_ctx_; + RequestHandler* request_handler_; + int req_id_; }; class RequestSend final : public RequestBase { public: explicit RequestSend(GrpcService::AsyncService* service, - ::grpc::ServerCompletionQueue* cq, bool sync_mode, - framework::Scope* scope, ReceivedQueue* queue, - const platform::DeviceContext* dev_ctx, int req_id) - : RequestBase(service, cq, sync_mode, dev_ctx), - queue_(queue), - responder_(&ctx_), - req_id_(req_id) { - if (sync_mode_) { - request_.reset(new VariableResponse(scope, dev_ctx_, false)); - } else { - request_.reset(new VariableResponse(scope, dev_ctx_, true)); - } + ::grpc::ServerCompletionQueue* cq, + RequestHandler* request_handler, int req_id) + : RequestBase(service, cq, request_handler, req_id), responder_(&ctx_) { + request_.reset(new VariableResponse(request_handler->scope(), + request_handler->dev_ctx(), + !request_handler->sync_mode())); int method_id = static_cast(detail::GrpcMethod::kSendVariable); service_->RequestAsyncUnary( method_id, &ctx_, request_.get(), &responder_, cq_, cq_, @@ -87,12 +71,17 @@ class RequestSend final : public RequestBase { virtual ~RequestSend() {} - virtual std::string GetReqName() { return request_->Varname(); } + std::string GetReqName() override { return request_->Varname(); } + + void Process() override { + std::string varname = GetReqName(); + VLOG(3) << "RequestSend var_name:" << varname; - virtual void Process() { - std::string var_name = GetReqName(); - VLOG(3) << "RequestSend " << var_name; - queue_->Push(std::make_pair(var_name, request_)); + auto scope = request_->GetMutableLocalScope(); + auto invar = request_->GetVar(); + framework::Variable* outvar = nullptr; + + request_handler_->Handle(varname, scope, invar, &outvar); status_ = FINISH; responder_.Finish(reply_, ::grpc::Status::OK, @@ -102,105 +91,85 @@ class RequestSend final : public RequestBase { protected: sendrecv::VoidMessage reply_; std::shared_ptr request_; - ReceivedQueue* queue_; ServerAsyncResponseWriter responder_; - int req_id_; }; class RequestGet final : public RequestBase { public: explicit RequestGet(GrpcService::AsyncService* service, - ::grpc::ServerCompletionQueue* cq, bool sync_mode, - framework::Scope* scope, - const platform::DeviceContext* dev_ctx, - framework::BlockingQueue* queue, - int req_id) - : RequestBase(service, cq, sync_mode, dev_ctx), - responder_(&ctx_), - scope_(scope), - queue_(queue), - req_id_(req_id) { + ::grpc::ServerCompletionQueue* cq, + RequestHandler* request_handler, int req_id) + : RequestBase(service, cq, request_handler, req_id), responder_(&ctx_) { auto method_id = static_cast(detail::GrpcMethod::kGetVariable); service_->RequestAsyncUnary( method_id, &ctx_, &request_, &responder_, cq_, cq_, - reinterpret_cast(static_cast(req_id_))); + reinterpret_cast(static_cast(req_id))); } virtual ~RequestGet() {} - virtual std::string GetReqName() { return request_.varname(); } + std::string GetReqName() override { return request_.varname(); } - virtual void Process() { + void Process() override { // proc request. - std::string var_name = request_.varname(); - VLOG(3) << "RequestGet " << var_name; - auto* var = scope_->FindVar(var_name); + std::string varname = request_.varname(); + VLOG(3) << "RequestGet " << varname; + + auto scope = request_handler_->scope(); + auto invar = scope->FindVar(varname); + framework::Variable* outvar = nullptr; - if (var_name != FETCH_BARRIER_MESSAGE) { - SerializeToByteBuffer(var_name, var, *dev_ctx_, &reply_); + request_handler_->Handle(varname, scope, invar, &outvar); + + if (outvar) { + SerializeToByteBuffer(varname, outvar, *request_handler_->dev_ctx(), + &reply_); } status_ = FINISH; responder_.Finish(reply_, ::grpc::Status::OK, reinterpret_cast(static_cast(req_id_))); - - if (var_name == FETCH_BARRIER_MESSAGE) { - sendrecv::VariableMessage msg; - MessageWithName msg_with_name = std::make_pair(var_name, msg); - queue_->Push(msg_with_name); - } } protected: sendrecv::VariableMessage request_; ::grpc::ByteBuffer reply_; ServerAsyncResponseWriter<::grpc::ByteBuffer> responder_; - framework::Scope* scope_; - framework::BlockingQueue* queue_; - int req_id_; }; class RequestPrefetch final : public RequestBase { public: explicit RequestPrefetch(GrpcService::AsyncService* service, - ::grpc::ServerCompletionQueue* cq, bool sync_mode, - framework::Scope* scope, - const platform::DeviceContext* dev_ctx, - framework::Executor* executor, - framework::ProgramDesc* program, - framework::ExecutorPrepareContext* prefetch_ctx, - int req_id) - : RequestBase(service, cq, sync_mode, dev_ctx), + ::grpc::ServerCompletionQueue* cq, + RequestHandler* request_handler, int req_id) + : RequestBase(service, cq, request_handler, req_id), responder_(&ctx_), - scope_(scope), - executor_(executor), - program_(program), - prefetch_ctx_(prefetch_ctx), - req_id_(req_id) { - // prefetch always create a new sub scope - request_.reset(new VariableResponse(scope, dev_ctx_, true)); + local_scope_(nullptr) { + request_.reset(new VariableResponse(request_handler->scope(), + request_handler->dev_ctx(), true)); int method_id = static_cast(detail::GrpcMethod::kPrefetchVariable); service_->RequestAsyncUnary( method_id, &ctx_, request_.get(), &responder_, cq_, cq_, - reinterpret_cast(static_cast(req_id_))); + reinterpret_cast(static_cast(req_id))); } virtual ~RequestPrefetch() {} - virtual std::string GetReqName() { return request_->Varname(); } + std::string GetReqName() override { return request_->Varname(); } - virtual void Process() { + void Process() override { // prefetch process... + std::string varname = request_->OutVarname(); + VLOG(3) << "RequestPrefetch " << varname; + + auto scope = request_->GetMutableLocalScope(); + auto invar = scope->FindVar(varname); + framework::Variable* outvar = nullptr; - std::string var_name = request_->OutVarname(); - VLOG(3) << "RequestPrefetch " << var_name; - auto var_desc = program_->Block(0).FindVar(var_name); - framework::Scope* local_scope = request_->GetMutableLocalScope(); - auto* var = local_scope->FindVar(var_name); - InitializeVariable(var, var_desc->GetType()); - executor_->RunPreparedContext(prefetch_ctx_, local_scope); + request_handler_->Handle(varname, scope, invar, &outvar); - SerializeToByteBuffer(var_name, var, *dev_ctx_, &reply_); + SerializeToByteBuffer(varname, outvar, *request_handler_->dev_ctx(), + &reply_); status_ = FINISH; responder_.Finish(reply_, ::grpc::Status::OK, @@ -211,202 +180,169 @@ class RequestPrefetch final : public RequestBase { std::shared_ptr request_; ::grpc::ByteBuffer reply_; ServerAsyncResponseWriter<::grpc::ByteBuffer> responder_; - framework::Scope* scope_; - framework::Executor* executor_; - framework::ProgramDesc* program_; - framework::ExecutorPrepareContext* prefetch_ctx_; - int req_id_; + framework::Scope* local_scope_; }; -void AsyncGRPCServer::WaitClientGet(int count) { - int fetch_barriers = 0; - while (fetch_barriers < count) { - auto msg = var_get_queue_.Pop(); - if (msg.first == FETCH_BARRIER_MESSAGE) { - fetch_barriers++; - } - } -} - void AsyncGRPCServer::WaitServerReady() { + VLOG(3) << "AsyncGRPCServer is wait server ready"; std::unique_lock lock(this->mutex_ready_); condition_ready_.wait(lock, [=] { return this->ready_ == 1; }); + VLOG(3) << "AsyncGRPCServer WaitSeverReady"; } -void AsyncGRPCServer::RunSyncUpdate() { +void AsyncGRPCServer::StartServer() { ::grpc::ServerBuilder builder; - builder.AddListeningPort(address_, ::grpc::InsecureServerCredentials(), + builder.AddListeningPort(bind_address_, ::grpc::InsecureServerCredentials(), &selected_port_); + builder.SetMaxSendMessageSize(std::numeric_limits::max()); builder.SetMaxReceiveMessageSize(std::numeric_limits::max()); builder.RegisterService(&service_); - cq_send_ = builder.AddCompletionQueue(); - cq_get_ = builder.AddCompletionQueue(); - cq_prefetch_ = builder.AddCompletionQueue(); + for (auto t : rpc_call_map_) { + rpc_cq_[t.first].reset(builder.AddCompletionQueue().release()); + } server_ = builder.BuildAndStart(); - LOG(INFO) << "Server listening on " << address_ + LOG(INFO) << "Server listening on " << bind_address_ << " selected port: " << selected_port_; - std::function send_register = std::bind( - &AsyncGRPCServer::TryToRegisterNewSendOne, this, std::placeholders::_1); - std::function get_register = std::bind( - &AsyncGRPCServer::TryToRegisterNewGetOne, this, std::placeholders::_1); - std::function prefetch_register = - std::bind(&AsyncGRPCServer::TryToRegisterNewPrefetchOne, this, - std::placeholders::_1); + std::function f = + std::bind(&AsyncGRPCServer::TryToRegisterNewOne, this, + std::placeholders::_1, std::placeholders::_2); - for (int i = 0; i < kSendReqsBufSize; ++i) { - TryToRegisterNewSendOne(i); - } - for (int i = 0; i < kGetReqsBufSize; ++i) { - TryToRegisterNewGetOne(i); - } - for (int i = 0; i < kPrefetchReqsBufSize; ++i) { - TryToRegisterNewPrefetchOne(i); - } + for (auto& t : rpc_call_map_) { + auto& rpc_name = t.first; + auto& cq = rpc_cq_[rpc_name]; + auto threadnum = rpc_thread_num_[rpc_name]; + auto& reqs = rpc_reqs_[rpc_name]; - for (int i = 0; i < FLAGS_rpc_server_handle_send_threads; ++i) { - t_sends_.emplace_back( - new std::thread(std::bind(&AsyncGRPCServer::HandleRequest, this, - cq_send_.get(), "cq_send", send_register))); - } - for (int i = 0; i < FLAGS_rpc_server_handle_get_threads; ++i) { - t_gets_.emplace_back( - new std::thread(std::bind(&AsyncGRPCServer::HandleRequest, this, - cq_get_.get(), "cq_get", get_register))); - } - for (int i = 0; i < FLAGS_rpc_server_handle_prefetch_threads; ++i) { - t_prefetchs_.emplace_back(new std::thread( - std::bind(&AsyncGRPCServer::HandleRequest, this, cq_prefetch_.get(), - "cq_prefetch", prefetch_register))); + reqs.reserve(kRequestBufSize); + + for (int i = 0; i < kRequestBufSize; i++) { + TryToRegisterNewOne(rpc_name, i); + } + + for (int i = 0; i < threadnum; i++) { + rpc_threads_[rpc_name].emplace_back(new std::thread(std::bind( + &AsyncGRPCServer::HandleRequest, this, cq.get(), rpc_name, f))); + VLOG(3) << t.first << " creates threads!"; + } } + { std::lock_guard lock(this->mutex_ready_); ready_ = 1; } condition_ready_.notify_all(); + // wait server server_->Wait(); - for (int i = 0; i < FLAGS_rpc_server_handle_send_threads; ++i) { - t_sends_[i]->join(); - } - for (int i = 0; i < FLAGS_rpc_server_handle_get_threads; ++i) { - t_gets_[i]->join(); - } - for (int i = 0; i < FLAGS_rpc_server_handle_prefetch_threads; ++i) { - t_prefetchs_[i]->join(); + + for (auto& t : rpc_threads_) { + auto& threads = t.second; + for (size_t i = 0; i < threads.size(); ++i) { + threads[i]->join(); + VLOG(3) << t.first << " threads ends!"; + } } } void AsyncGRPCServer::ShutdownQueue() { - std::unique_lock lock(cq_mutex_); - cq_send_->Shutdown(); - cq_get_->Shutdown(); - cq_prefetch_->Shutdown(); + for (auto& t : rpc_cq_) { + t.second->Shutdown(); + VLOG(3) << t.first << " shutdown!"; + } } -// This URL explains why shutdown is complicate: -void AsyncGRPCServer::ShutDown() { +void AsyncGRPCServer::ShutDownImpl() { + std::unique_lock lock(cq_mutex_); is_shut_down_ = true; ShutdownQueue(); + + VLOG(3) << "server_ shutdown!"; server_->Shutdown(); } -void AsyncGRPCServer::TryToRegisterNewSendOne(int i) { +void AsyncGRPCServer::TryToRegisterNewOne(const std::string& rpc_name, + int req_id) { std::unique_lock lock(cq_mutex_); if (is_shut_down_) { VLOG(3) << "shutdown, do not TryToRegisterNewSendOne"; return; } - RequestSend* send = new RequestSend(&service_, cq_send_.get(), sync_mode_, - scope_, &var_recv_queue_, dev_ctx_, i); - send_reqs_[i] = static_cast(send); - VLOG(4) << "Create RequestSend status:" << send->Status(); -} -void AsyncGRPCServer::TryToRegisterNewGetOne(int req_id) { - std::unique_lock lock(cq_mutex_); - if (is_shut_down_) { - VLOG(3) << "shutdown, do not TryToRegisterNewGetOne"; - return; + VLOG(4) << "register send rpc_name:" << rpc_name + << ", handler:" << rpc_call_map_[kRequestSend]; + + auto& reqs = rpc_reqs_[rpc_name]; + auto& handler = rpc_call_map_[rpc_name]; + auto& cq = rpc_cq_[rpc_name]; + + RequestBase* b = nullptr; + if (rpc_name == kRequestSend) { + b = new RequestSend(&service_, cq.get(), handler, req_id); + } else if (rpc_name == kRequestGet) { + b = new RequestGet(&service_, cq.get(), handler, req_id); + } else if (rpc_name == kRequestPrefetch) { + b = new RequestPrefetch(&service_, cq.get(), handler, req_id); + } else { + PADDLE_ENFORCE(false, "not surpported rpc"); } - RequestGet* get = new RequestGet(&service_, cq_get_.get(), sync_mode_, scope_, - dev_ctx_, &var_get_queue_, req_id); - get_reqs_[req_id] = static_cast(get); - VLOG(4) << "Create RequestGet status:" << get->Status(); -} -void AsyncGRPCServer::TryToRegisterNewPrefetchOne(int req_id) { - std::unique_lock lock(cq_mutex_); - if (is_shut_down_) { - VLOG(3) << "shutdown, do not TryToRegisterNewPrefetchOne"; - return; - } - RequestPrefetch* prefetch = new RequestPrefetch( - &service_, cq_prefetch_.get(), sync_mode_, scope_, dev_ctx_, executor_, - program_, prefetch_ctx_.get(), req_id); - prefetch_reqs_[req_id] = static_cast(prefetch); + reqs[req_id] = b; - VLOG(4) << "Create RequestPrefetch status:" << prefetch->Status(); + VLOG(4) << "Create RequestSend status:" << b->Status(); } -// FIXME(typhoonzero): change cq_name to enum. void AsyncGRPCServer::HandleRequest( - ::grpc::ServerCompletionQueue* cq, const std::string& cq_name, - std::function TryToRegisterNewOne) { + ::grpc::ServerCompletionQueue* cq, const std::string& rpc_name, + std::function TryToRegisterNewOne) { void* tag = NULL; bool ok = false; while (true) { - VLOG(3) << "HandleRequest for " << cq_name << " wait Next"; + VLOG(3) << "HandleRequest " << rpc_name << " wait next"; if (!cq->Next(&tag, &ok)) { - LOG(INFO) << cq_name << " CompletionQueue shutdown!"; + LOG(INFO) << "CompletionQueue " << rpc_name << " shutdown!"; break; } - VLOG(3) << "HandleRequest for " << cq_name << " get Next"; - int req_id = static_cast(reinterpret_cast(tag)); - if (sync_mode_) { - // FIXME(typhoonzero): de-couple the barriers with recv_op - if (!is_shut_down_ && cq_name == "cq_get") WaitCond(1); - if (!is_shut_down_ && cq_name == "cq_send") WaitCond(0); - VLOG(3) << "HandleRequest for " << cq_name << " after WaitCond"; - } + int req_id = static_cast(reinterpret_cast(tag)); + VLOG(3) << "HandleRequest " << rpc_name << ", req_id:" << req_id + << " get next"; + auto& reqs = rpc_reqs_[rpc_name]; RequestBase* base = nullptr; { - std::lock_guard l(cq_mutex_); - if (cq_name == "cq_get") { - base = get_reqs_[req_id]; - } else if (cq_name == "cq_send") { - base = send_reqs_[req_id]; - } else if (cq_name == "cq_prefetch") { - base = prefetch_reqs_[req_id]; - } + PADDLE_ENFORCE(req_id >= 0 && req_id < kRequestBufSize); + std::unique_lock lock(cq_mutex_); + base = reqs[req_id]; } + // reference: // https://github.com/tensorflow/tensorflow/issues/5596 // https://groups.google.com/forum/#!topic/grpc-io/xftlRy-IQwM // https://groups.google.com/forum/#!topic/grpc-io/ywATt88Ef_I if (!ok) { - LOG(WARNING) << cq_name << " recv no regular event:argument name[" + LOG(WARNING) << "completion queue:" << rpc_name + << " recv no regular event:argument name[" << base->GetReqName() << "]"; - TryToRegisterNewOne(req_id); + TryToRegisterNewOne(rpc_name, req_id); delete base; continue; } + VLOG(3) << "queue id:" << rpc_name << ", req_id:" << req_id + << ", status:" << base->Status(); + switch (base->Status()) { case PROCESS: { base->Process(); - VLOG(4) << cq_name << " PROCESS status:" << base->Status(); break; } case FINISH: { - TryToRegisterNewOne(req_id); - VLOG(4) << cq_name << " FINISH status:" << base->Status(); + TryToRegisterNewOne(rpc_name, req_id); delete base; break; } @@ -415,20 +351,6 @@ void AsyncGRPCServer::HandleRequest( } } -void AsyncGRPCServer::WaitCond(int cond) { - std::unique_lock lock(this->barrier_mutex_); - barrier_condition_.wait(lock, - [=] { return this->barrier_cond_step_ == cond; }); -} - -void AsyncGRPCServer::SetCond(int cond) { - { - std::lock_guard lock(this->barrier_mutex_); - barrier_cond_step_ = cond; - } - barrier_condition_.notify_all(); -} - } // namespace detail } // namespace operators } // namespace paddle diff --git a/paddle/fluid/operators/detail/grpc_server.h b/paddle/fluid/operators/detail/grpc_server.h index bdff9801a928699f8391bfb68c1c7bd2d75aa642..d1fcbc414f123c5c4810d9cecf807a406aa2c405 100644 --- a/paddle/fluid/operators/detail/grpc_server.h +++ b/paddle/fluid/operators/detail/grpc_server.h @@ -14,6 +14,8 @@ limitations under the License. */ #pragma once +#include +#include #include #include // NOLINT #include @@ -28,6 +30,8 @@ limitations under the License. */ #include "paddle/fluid/framework/selected_rows.h" #include "paddle/fluid/framework/var_type.h" #include "paddle/fluid/operators/detail/grpc_service.h" +#include "paddle/fluid/operators/detail/request_handler.h" +#include "paddle/fluid/operators/detail/rpc_server.h" #include "paddle/fluid/operators/detail/send_recv.grpc.pb.h" #include "paddle/fluid/operators/detail/send_recv.pb.h" #include "paddle/fluid/operators/detail/sendrecvop_utils.h" @@ -37,106 +41,48 @@ namespace paddle { namespace operators { namespace detail { -typedef std::pair> - ReceivedMessage; -typedef framework::BlockingQueue ReceivedQueue; - -typedef std::pair MessageWithName; class RequestBase; -class AsyncGRPCServer final { +class AsyncGRPCServer final : public RPCServer { public: - explicit AsyncGRPCServer(const std::string &address, bool sync_mode) - : address_(address), sync_mode_(sync_mode), ready_(0) {} - - ~AsyncGRPCServer() {} - void WaitServerReady(); - void RunSyncUpdate(); - - // functions to sync server barrier status. - void WaitCond(int cond); - void SetCond(int cond); - void WaitClientGet(int count); - - void SetScope(framework::Scope *scope) { scope_ = scope; } - - void SetDevCtx(const platform::DeviceContext *dev_ctx) { dev_ctx_ = dev_ctx; } - - void SetProgram(framework::ProgramDesc *program) { program_ = program; } - - void SetExecutor(framework::Executor *executor) { executor_ = executor; } - - void SetPrefetchPreparedCtx( - std::unique_ptr prepared) { - prefetch_ctx_.reset(prepared.release()); - } - - int GetSelectedPort() const { return selected_port_; } - - const ReceivedMessage Get() { return this->var_recv_queue_.Pop(); } + explicit AsyncGRPCServer(const std::string& address, int client_num) + : RPCServer(address, client_num), ready_(0) {} - void Push(const std::string &msg_name) { - this->var_recv_queue_.Push(std::make_pair(msg_name, nullptr)); - } + virtual ~AsyncGRPCServer() {} + void WaitServerReady() override; + void StartServer() override; - void ShutDown(); + private: + void HandleRequest( + ::grpc::ServerCompletionQueue* cq, const std::string& rpc_name, + std::function TryToRegisterNewOne); - protected: - void HandleRequest(::grpc::ServerCompletionQueue *cq, - const std::string &cq_name, - std::function TryToRegisterNewOne); - void TryToRegisterNewSendOne(int req_id); - void TryToRegisterNewGetOne(int req_id); - void TryToRegisterNewPrefetchOne(int req_id); + void TryToRegisterNewOne(const std::string& rpc_name, int req_id); void ShutdownQueue(); + void ShutDownImpl() override; private: - static const int kSendReqsBufSize = 100; - static const int kGetReqsBufSize = 100; - static const int kPrefetchReqsBufSize = 10; + static const int kRequestBufSize = 100; std::mutex cq_mutex_; volatile bool is_shut_down_ = false; - std::unique_ptr<::grpc::ServerCompletionQueue> cq_send_; - std::unique_ptr<::grpc::ServerCompletionQueue> cq_get_; - std::unique_ptr<::grpc::ServerCompletionQueue> cq_prefetch_; - - RequestBase *send_reqs_[kSendReqsBufSize]; - RequestBase *get_reqs_[kGetReqsBufSize]; - RequestBase *prefetch_reqs_[kPrefetchReqsBufSize]; GrpcService::AsyncService service_; std::unique_ptr<::grpc::Server> server_; - std::string address_; - const bool sync_mode_; - framework::Scope *scope_; - const platform::DeviceContext *dev_ctx_; - - // received variable from RPC, operators fetch variable from this queue. - framework::BlockingQueue var_get_queue_; - // client send variable to this queue. - ReceivedQueue var_recv_queue_; - // condition of the sub program std::mutex barrier_mutex_; mutable int barrier_cond_step_; std::condition_variable barrier_condition_; - std::vector> t_sends_; - std::vector> t_gets_; - std::vector> t_prefetchs_; - - std::unique_ptr t_prefetch_; - - std::unique_ptr prefetch_ctx_; - framework::ProgramDesc *program_; - framework::Executor *executor_; - int selected_port_; - std::mutex mutex_ready_; std::condition_variable condition_ready_; + int ready_; + + std::map> rpc_cq_; + std::map>> rpc_threads_; + std::map> rpc_reqs_; }; }; // namespace detail diff --git a/paddle/fluid/operators/detail/grpc_server_test.cc b/paddle/fluid/operators/detail/grpc_server_test.cc index 350a7ee1234da5b88d09ea955ce14b7c161d804e..f97f638701cfb263f28dddbdc3bc80fb16468744 100644 --- a/paddle/fluid/operators/detail/grpc_server_test.cc +++ b/paddle/fluid/operators/detail/grpc_server_test.cc @@ -24,13 +24,16 @@ limitations under the License. */ #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/operator.h" +#include "paddle/fluid/operators/detail/request_handler_impl.h" + namespace framework = paddle::framework; namespace platform = paddle::platform; namespace detail = paddle::operators::detail; USE_OP(lookup_table); -std::unique_ptr rpc_service_; +std::unique_ptr g_rpc_service; +std::unique_ptr g_req_handler; framework::BlockDesc* AppendPrefetchBlcok(framework::ProgramDesc* program) { auto root_block = program->MutableBlock(0); @@ -88,8 +91,7 @@ void InitTensorsOnServer(framework::Scope* scope, platform::CPUPlace* place, } } -void StartServer(const std::string& endpoint) { - rpc_service_.reset(new detail::AsyncGRPCServer(endpoint, true)); +void StartServer() { framework::ProgramDesc program; framework::Scope scope; platform::CPUPlace place; @@ -99,42 +101,59 @@ void StartServer(const std::string& endpoint) { auto prepared = exe.Prepare(program, block->ID()); InitTensorsOnServer(&scope, &place, 10); - rpc_service_->SetProgram(&program); - rpc_service_->SetPrefetchPreparedCtx(std::move(prepared)); - rpc_service_->SetDevCtx(&ctx); - rpc_service_->SetScope(&scope); - rpc_service_->SetExecutor(&exe); + g_req_handler->SetProgram(&program); + g_req_handler->SetPrefetchPreparedCtx(std::move(prepared)); + g_req_handler->SetDevCtx(&ctx); + g_req_handler->SetScope(&scope); + g_req_handler->SetExecutor(&exe); + + g_rpc_service->RegisterRPC(detail::kRequestPrefetch, g_req_handler.get()); + g_req_handler->SetRPCServer(g_rpc_service.get()); + + std::thread server_thread( + std::bind(&detail::AsyncGRPCServer::StartServer, g_rpc_service.get())); - rpc_service_->RunSyncUpdate(); + // FIXME(gongwb): don't use hard time. + sleep(10); + LOG(INFO) << "got nccl id and stop server..."; + g_rpc_service->ShutDown(); + server_thread.join(); } -TEST(PREFETCH, DISABLED_CPU) { - // start up a server instance backend - std::thread server_thread(StartServer, "127.0.0.1:8889"); - sleep(2); +TEST(PREFETCH, CPU) { + g_req_handler.reset(new detail::RequestPrefetchHandler(true)); + g_rpc_service.reset(new detail::AsyncGRPCServer("127.0.0.1:0", 1)); + + std::thread server_thread(StartServer); + g_rpc_service->WaitServerReady(); + + detail::RPCClient client; + int port = g_rpc_service->GetSelectedPort(); + std::string ep = paddle::string::Sprintf("127.0.0.1:%d", port); + framework::Scope scope; platform::CPUPlace place; platform::CPUDeviceContext ctx(place); - // create var on local scope - int64_t rows_numel = 5; - InitTensorsOnClient(&scope, &place, rows_numel); - std::string in_var_name("ids"); - std::string out_var_name("out"); - - auto client = detail::RPCClient::GetInstance(); - client->AsyncPrefetchVariable("127.0.0.1:8889", ctx, scope, in_var_name, - out_var_name); - client->Wait(); - - auto var = scope.Var(out_var_name); - auto value = var->GetMutable()->value(); - auto ptr = value.mutable_data(place); - - rpc_service_->ShutDown(); - server_thread.join(); - rpc_service_.reset(nullptr); - - for (int64_t i = 0; i < rows_numel; ++i) { - EXPECT_EQ(ptr[0 + i * value.dims()[1]], static_cast(i * 2)); + { + // create var on local scope + int64_t rows_numel = 5; + InitTensorsOnClient(&scope, &place, rows_numel); + std::string in_var_name("ids"); + std::string out_var_name("out"); + + client.AsyncPrefetchVariable(ep, ctx, scope, in_var_name, out_var_name); + client.Wait(); + auto var = scope.Var(out_var_name); + auto value = var->GetMutable()->value(); + auto ptr = value.mutable_data(place); + + for (int64_t i = 0; i < rows_numel; ++i) { + EXPECT_EQ(ptr[0 + i * value.dims()[1]], static_cast(i * 2)); + } } + + server_thread.join(); + LOG(INFO) << "begin reset"; + g_rpc_service.reset(nullptr); + g_req_handler.reset(nullptr); } diff --git a/paddle/fluid/operators/detail/request_handler.h b/paddle/fluid/operators/detail/request_handler.h new file mode 100644 index 0000000000000000000000000000000000000000..4bc5e7f10ee2a8939d230fe96517bd9f56c13933 --- /dev/null +++ b/paddle/fluid/operators/detail/request_handler.h @@ -0,0 +1,127 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include + +#include +#include +#include +#include + +#include "paddle/fluid/framework/data_type.h" +#include "paddle/fluid/framework/executor.h" +#include "paddle/fluid/framework/lod_tensor.h" +#include "paddle/fluid/framework/program_desc.h" +#include "paddle/fluid/framework/scope.h" +#include "paddle/fluid/framework/selected_rows.h" +#include "paddle/fluid/framework/var_type.h" +#include "paddle/fluid/operators/detail/sendrecvop_utils.h" + +namespace paddle { +namespace operators { +namespace detail { + +constexpr char kRequestSend[] = "RequestSend"; +constexpr char kRequestGet[] = "RequestGet"; +constexpr char kRequestPrefetch[] = "RequestPrefetch"; + +class RPCServer; + +class RequestHandler { + public: + explicit RequestHandler(bool sync_mode) + : sync_mode_(sync_mode), + dev_ctx_(nullptr), + executor_(nullptr), + scope_(nullptr), + program_(nullptr), + rpc_server_(nullptr) {} + + virtual ~RequestHandler() {} + + // Set attributes. + void SetScope(framework::Scope* scope) { scope_ = scope; } + void SetDevCtx(const platform::DeviceContext* dev_ctx) { dev_ctx_ = dev_ctx; } + void SetProgram(framework::ProgramDesc* program) { program_ = program; } + void SetExecutor(framework::Executor* executor) { executor_ = executor; } + void SetPrefetchPreparedCtx( + std::unique_ptr prepared) { + prefetch_ctx_.reset(prepared.release()); + } + + // Used for async. + void SetGradToPreparedCtx( + std::unordered_map< + std::string, std::shared_ptr>* g) { + grad_to_prepared_ctx_ = g; + } + + void SetRPCServer(RPCServer* rpc_server) { rpc_server_ = rpc_server; } + + // Get attributes. + bool sync_mode() { return sync_mode_; } + framework::Scope* scope() { return scope_; } + const platform::DeviceContext* dev_ctx() { return dev_ctx_; } + framework::ExecutorPrepareContext* prefetch_ctx() { + return prefetch_ctx_.get(); + } + framework::ProgramDesc* program() { return program_; } + framework::Executor* executor() { return executor_; } + std::vector& sparse_vars() { return sparse_vars_; } + + // This function processes user's rpc request. + // The implemention is in request_handler_impl. + // example: + // std::string varname = request_.varname(); + // + // auto scope = request_handler_->scope(); + // auto invar = scope->FindVar(varname); + // framework::Variable* outvar = nullptr; + // + // request_handler_->Handle(varname, scope, invar, &outvar); + // if (outvar) { + // SerializeToByteBuffer(varname, outvar, + // *request_handler_->dev_ctx(), &reply_); + // } + virtual bool Handle(const std::string& varname, framework::Scope* scope, + framework::Variable* var, + framework::Variable** outvar) = 0; + + protected: + const bool sync_mode_; + + const platform::DeviceContext* dev_ctx_; + framework::Executor* executor_; + framework::Scope* scope_; + framework::ProgramDesc* program_; + std::unique_ptr prefetch_ctx_; + + // Used for async. + std::unordered_map>* + grad_to_prepared_ctx_; + + // Record received sparse variables, so that + // we could reset those after execute optimize program + std::vector sparse_vars_; + RPCServer* rpc_server_; + + std::mutex sparse_var_mutex_; +}; + +} // namespace detail +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/detail/request_handler_impl.cc b/paddle/fluid/operators/detail/request_handler_impl.cc new file mode 100644 index 0000000000000000000000000000000000000000..f16c06d52f4fb86d51083a8b3b98d05a64c1af74 --- /dev/null +++ b/paddle/fluid/operators/detail/request_handler_impl.cc @@ -0,0 +1,115 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include +#include + +#include "paddle/fluid/framework/blocking_queue.h" +#include "paddle/fluid/framework/data_type.h" +#include "paddle/fluid/framework/lod_tensor.h" +#include "paddle/fluid/framework/scope.h" +#include "paddle/fluid/framework/selected_rows.h" +#include "paddle/fluid/operators/detail/request_handler_impl.h" +#include "paddle/fluid/operators/detail/rpc_server.h" +#include "paddle/fluid/operators/detail/sendrecvop_utils.h" +#include "paddle/fluid/operators/detail/variable_response.h" + +namespace paddle { +namespace operators { +namespace detail { + +bool RequestSendHandler::Handle(const std::string& varname, + framework::Scope* scope, + framework::Variable* invar, + framework::Variable** outvar) { + VLOG(4) << "RequestSendHandler:" << varname; + + // Async + if (!sync_mode_) { + try { + executor_->RunPreparedContext((*grad_to_prepared_ctx_)[varname].get(), + scope); + } catch (std::exception& e) { + LOG(ERROR) << "async: run sub program error " << e.what(); + return false; + } + return true; + } + + // Sync + if (varname == BATCH_BARRIER_MESSAGE) { + VLOG(3) << "sync: recv batch barrier message"; + rpc_server_->IncreaseBatchBarrier(kRequestSend); + } else { + VLOG(3) << "sync: received var_name: " << varname; + if (sync_mode_) { + rpc_server_->WaitCond(kRequestSend); + } + + if (invar == nullptr) { + LOG(ERROR) << "sync: Can not find server side var: " << varname; + PADDLE_THROW("sync: Can not find server side var"); + return false; + } + + if (invar->IsType()) { + std::unique_lock lock(sparse_var_mutex_); + sparse_vars_.push_back(invar); + } + } + + return true; +} + +bool RequestGetHandler::Handle(const std::string& varname, + framework::Scope* scope, + framework::Variable* invar, + framework::Variable** outvar) { + VLOG(4) << "RequestGetHandler:" << varname; + + if (varname != FETCH_BARRIER_MESSAGE) { + if (sync_mode_) { + rpc_server_->WaitCond(kRequestGet); + } + *outvar = scope_->FindVar(varname); + return true; + } + + // FETCH_BARRIER_MESSAGE + if (sync_mode_) { + VLOG(3) << "sync: recv fetch barrier message"; + rpc_server_->IncreaseBatchBarrier(kRequestGet); + } + + return true; +} + +bool RequestPrefetchHandler::Handle(const std::string& varname, + framework::Scope* scope, + framework::Variable* invar, + framework::Variable** outvar) { + VLOG(4) << "RequestPrefetchHandler " << varname; + + auto var_desc = program_->Block(0).FindVar(varname); + *outvar = scope->FindVar(varname); + InitializeVariable(*outvar, var_desc->GetType()); + executor_->RunPreparedContext(prefetch_ctx_.get(), scope); + + return true; +} + +} // namespace detail +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/detail/request_handler_impl.h b/paddle/fluid/operators/detail/request_handler_impl.h new file mode 100644 index 0000000000000000000000000000000000000000..8d0c62232b68ad6c05e751c25103802ee12db57e --- /dev/null +++ b/paddle/fluid/operators/detail/request_handler_impl.h @@ -0,0 +1,64 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include + +#include +#include +#include +#include + +#include "paddle/fluid/framework/data_type.h" +#include "paddle/fluid/framework/executor.h" +#include "paddle/fluid/framework/lod_tensor.h" +#include "paddle/fluid/framework/program_desc.h" +#include "paddle/fluid/framework/scope.h" +#include "paddle/fluid/framework/selected_rows.h" +#include "paddle/fluid/framework/var_type.h" +#include "paddle/fluid/operators/detail/request_handler.h" +#include "paddle/fluid/operators/detail/sendrecvop_utils.h" + +namespace paddle { +namespace operators { +namespace detail { + +class RequestSendHandler final : public RequestHandler { + public: + explicit RequestSendHandler(bool sync_mode) : RequestHandler(sync_mode) {} + virtual ~RequestSendHandler() {} + bool Handle(const std::string& varname, framework::Scope* scope, + framework::Variable* var, framework::Variable** outvar) override; +}; + +class RequestGetHandler final : public RequestHandler { + public: + explicit RequestGetHandler(bool sync_mode) : RequestHandler(sync_mode) {} + virtual ~RequestGetHandler() {} + bool Handle(const std::string& varname, framework::Scope* scope, + framework::Variable* var, framework::Variable** outvar) override; +}; + +class RequestPrefetchHandler final : public RequestHandler { + public: + explicit RequestPrefetchHandler(bool sync_mode) : RequestHandler(sync_mode) {} + virtual ~RequestPrefetchHandler() {} + bool Handle(const std::string& varname, framework::Scope* scope, + framework::Variable* var, framework::Variable** outvar) override; +}; + +} // namespace detail +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/detail/rpc_server.cc b/paddle/fluid/operators/detail/rpc_server.cc new file mode 100644 index 0000000000000000000000000000000000000000..448763372a8c224cc68319a4a444915896b68234 --- /dev/null +++ b/paddle/fluid/operators/detail/rpc_server.cc @@ -0,0 +1,113 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include +#include +#include + +#include "paddle/fluid/operators/detail/rpc_server.h" + +namespace paddle { +namespace operators { +namespace detail { + +void RPCServer::ShutDown() { + LOG(INFO) << "RPCServer ShutDown "; + ShutDownImpl(); + + exit_flag_ = true; + barrier_cond_.notify_all(); + rpc_cond_.notify_all(); +} + +void RPCServer::SavePort() const { + auto file_path = string::Sprintf("/tmp/paddle.%d.port", ::getpid()); + std::ofstream port_file; + port_file.open(file_path); + port_file << selected_port_; + port_file.close(); + VLOG(4) << "selected port written to " << file_path; +} + +void RPCServer::WaitBarrier(const std::string& rpc_name) { + std::unique_lock lock(this->mutex_); + barrier_cond_.wait(lock, [=] { + return (barrier_counter_[rpc_name] >= client_num_ || exit_flag_.load()); + }); + + VLOG(3) << "batch_barrier_:" << barrier_counter_[rpc_name]; +} + +void RPCServer::IncreaseBatchBarrier(const std::string rpc_name) { + VLOG(3) << "RPCServer begin IncreaseBatchBarrier " << rpc_name; + int b = 0; + { + std::unique_lock lock(mutex_); + b = ++barrier_counter_[rpc_name]; + } + + VLOG(3) << "RPCServer IncreaseBatchBarrier " << rpc_name + << ", barrier_count:" << b << ", fan_in" << client_num_; + + if (b >= client_num_) { + barrier_cond_.notify_all(); + } +} + +void RPCServer::ResetBarrierCounter() { + VLOG(3) << "RPCServer ResetBarrierCounter "; + std::unique_lock lock(mutex_); + for (auto& t : barrier_counter_) { + t.second = 0; + } +} + +void RPCServer::RegisterRPC(const std::string& rpc_name, + RequestHandler* handler, int thread_num) { + rpc_call_map_[rpc_name] = handler; + rpc_thread_num_[rpc_name] = thread_num; + + static int cond = -1; + rpc_cond_map_[rpc_name] = ++cond; + VLOG(4) << "RegisterRPC rpc_name:" << rpc_name << ", handler:" << handler + << ", cond:" << rpc_cond_map_[rpc_name]; +} + +void RPCServer::SetCond(const std::string& rpc_name) { + VLOG(3) << "RPCServer SetCond " << rpc_name; + { + std::unique_lock lock(mutex_); + cur_cond_ = rpc_cond_map_[rpc_name]; + } + + rpc_cond_.notify_all(); +} + +void RPCServer::WaitCond(const std::string& rpc_name) { + VLOG(3) << "RPCServer WaitCond " << rpc_name; + int cond = 0; + { + std::unique_lock lock(mutex_); + cond = rpc_cond_map_[rpc_name]; + } + + std::unique_lock lock(mutex_); + rpc_cond_.wait( + lock, [=] { return (cur_cond_.load() == cond || exit_flag_.load()); }); +} + +} // namespace detail +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/detail/rpc_server.h b/paddle/fluid/operators/detail/rpc_server.h new file mode 100644 index 0000000000000000000000000000000000000000..c2e7ae706c9dc6776e09b25e424b30f110c3855d --- /dev/null +++ b/paddle/fluid/operators/detail/rpc_server.h @@ -0,0 +1,91 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include +#include // NOLINT +#include +#include +#include "paddle/fluid/operators/detail/request_handler.h" + +namespace paddle { +namespace operators { +namespace detail { + +class RPCServer { + public: + explicit RPCServer(const std::string& address, int client_num) + : cur_cond_(0), + bind_address_(address), + exit_flag_(false), + selected_port_(0), + client_num_(client_num) {} + + virtual ~RPCServer() {} + virtual void StartServer() = 0; + virtual void WaitServerReady() = 0; + + void ShutDown(); + + bool IsExit() { return exit_flag_.load(); } + + int GetSelectedPort() const { return selected_port_; } + void SavePort() const; + + // RegisterRPC, register the rpc method name to a handler + // class, and auto generate a condition id for this call + // to be used for the barrier. + void RegisterRPC(const std::string& rpc_name, RequestHandler* handler, + int thread_num = 5); + + // Wait util all the clients have reached the barrier for one + // rpc method. This function should be called in the + // RequestHandler if you want to run the server/client in a + // synchronous mode. + void WaitBarrier(const std::string& rpc_name); + + void SetCond(const std::string& rpc_name); + void WaitCond(const std::string& rpc_name); + void IncreaseBatchBarrier(const std::string rpc_name); + void ResetBarrierCounter(); + + protected: + virtual void ShutDownImpl() = 0; + + private: + std::mutex mutex_; + std::unordered_map barrier_counter_; + std::condition_variable barrier_cond_; + + std::unordered_map rpc_cond_map_; + std::atomic cur_cond_; + std::condition_variable rpc_cond_; + + protected: + std::string bind_address_; + std::atomic exit_flag_; + int selected_port_; + + const int client_num_; + + std::unordered_map rpc_call_map_; + std::unordered_map rpc_thread_num_; + friend class RequestHandler; +}; + +}; // namespace detail +}; // namespace operators +}; // namespace paddle diff --git a/paddle/fluid/operators/detail/variable_response.h b/paddle/fluid/operators/detail/variable_response.h index bf624da2a6c26472e47711b3c6409f78afba0a64..69cfd784f8dd4f129f50c6882061e53e8535b949 100644 --- a/paddle/fluid/operators/detail/variable_response.h +++ b/paddle/fluid/operators/detail/variable_response.h @@ -67,8 +67,8 @@ class VariableResponse { framework::Scope* GetMutableLocalScope() const { return local_scope_; } - inline std::string Varname() { return meta_.varname(); } - inline std::string OutVarname() { return meta_.out_varname(); } + inline std::string Varname() const { return meta_.varname(); } + inline std::string OutVarname() const { return meta_.out_varname(); } // should call parse first. framework::Variable* GetVar() { diff --git a/paddle/fluid/operators/fake_dequantize_op.cc b/paddle/fluid/operators/fake_dequantize_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..43f949111104ee56efc8625bdd609e412ef7f37d --- /dev/null +++ b/paddle/fluid/operators/fake_dequantize_op.cc @@ -0,0 +1,76 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/operators/fake_dequantize_op.h" +#include + +namespace paddle { +namespace operators { + +class FakeDequantizeMaxAbsOp : public framework::OperatorWithKernel { + public: + FakeDequantizeMaxAbsOp(const std::string &type, + const framework::VariableNameMap &inputs, + const framework::VariableNameMap &outputs, + const framework::AttributeMap &attrs) + : OperatorWithKernel(type, inputs, outputs, attrs) {} + + void InferShape(framework::InferShapeContext *ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("X"), + "Input(X) of FakeDequantizeMaxAbsOp should not be null."); + PADDLE_ENFORCE(ctx->HasOutput("Out"), + "Output(Out) of FakeDequantizeMaxAbsOp should not be null."); + ctx->SetOutputDim("Out", ctx->GetInputDim("X")); + ctx->ShareLoD("X", /*->*/ "Out"); + } +}; + +class FakeDequantizeMaxAbsOpMaker : public framework::OpProtoAndCheckerMaker { + public: + void Make() override { + AddInput("X", + "(Tensor) The input with float-32/64 type is the " + "low precision tensor."); + AddOutput("Out", + "(Tensor) The output is the dequantized high " + "precision tensor."); + AddAttr("num_bits", + "(int) `num_bits` is the quantization level bits, " + "such as 2, 5, 8."); + AddAttr("scale", + "(float) The maximum absolute value of low precision tensor." + "It is usually calculated by the fake_quantize_max_abs_op."); + AddComment(R"DOC( +FakeDequantizeMaxAbsOp operator. + +This calculation is an opposite operation of FakeQuantizeMaxAbsOp: + +$$Out = \frac{scale*X}{2^{num_bits} - 1}$$ + +)DOC"); + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +using CPU = paddle::platform::CPUDeviceContext; + +REGISTER_OPERATOR(fake_dequantize_max_abs, ops::FakeDequantizeMaxAbsOp, + ops::FakeDequantizeMaxAbsOpMaker, + paddle::framework::EmptyGradOpMaker); +REGISTER_OP_CPU_KERNEL(fake_dequantize_max_abs, + ops::FakeDequantizeMaxAbsKernel, + ops::FakeDequantizeMaxAbsKernel); diff --git a/paddle/fluid/operators/fake_dequantize_op.cu b/paddle/fluid/operators/fake_dequantize_op.cu new file mode 100644 index 0000000000000000000000000000000000000000..1bd38d1bd2c3a6f90d2fbad415d61efaead3afe9 --- /dev/null +++ b/paddle/fluid/operators/fake_dequantize_op.cu @@ -0,0 +1,21 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/operators/fake_dequantize_op.h" + +namespace ops = paddle::operators; +using CUDA = paddle::platform::CUDADeviceContext; +REGISTER_OP_CUDA_KERNEL(fake_dequantize_max_abs, + ops::FakeDequantizeMaxAbsKernel, + ops::FakeDequantizeMaxAbsKernel); diff --git a/paddle/fluid/operators/fake_dequantize_op.h b/paddle/fluid/operators/fake_dequantize_op.h new file mode 100644 index 0000000000000000000000000000000000000000..0901e68b3761159c3cc9c6684567bee38ec3f16d --- /dev/null +++ b/paddle/fluid/operators/fake_dequantize_op.h @@ -0,0 +1,42 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/op_registry.h" + +namespace paddle { +namespace operators { +template +class FakeDequantizeMaxAbsKernel : public framework::OpKernel { + public: + virtual void Compute(const framework::ExecutionContext& ctx) const { + auto* in = ctx.Input("X"); + auto* out = ctx.Output("Out"); + out->mutable_data(in->place()); + + int num_bits = ctx.Attr("num_bits"); + T scale = static_cast(ctx.Attr("scale")); + int range = std::pow(2, num_bits) - 1; + + auto eigen_out = framework::EigenVector::Flatten(*out); + auto eigen_in = framework::EigenVector::Flatten(*in); + auto& dev = *ctx.template device_context().eigen_device(); + eigen_out.device(dev) = (scale / range) * eigen_in; + } +}; + +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/gen_nccl_id_op.cc b/paddle/fluid/operators/gen_nccl_id_op.cc index a5678f63466d368b3dd59380c18f9625cabd368b..4bce2d322d825110a446c9bc5eccdacf0ba3c943 100644 --- a/paddle/fluid/operators/gen_nccl_id_op.cc +++ b/paddle/fluid/operators/gen_nccl_id_op.cc @@ -23,6 +23,7 @@ limitations under the License. */ #include "paddle/fluid/framework/threadpool.h" #include "paddle/fluid/operators/detail/grpc_client.h" #include "paddle/fluid/operators/detail/grpc_server.h" +#include "paddle/fluid/operators/detail/request_handler_impl.h" #include "paddle/fluid/platform/nccl_helper.h" namespace paddle { @@ -75,19 +76,23 @@ class GenNCCLIdOp : public framework::OperatorBase { // NOTE: Can not use unique_ptr here because the default // deleter will call GRPC Server's base class's dtor and // that will cause a wired crash. - detail::AsyncGRPCServer rpc_service(endpoint, true); + detail::RequestSendHandler rpc_h(true); + detail::AsyncGRPCServer rpc_service(endpoint, 1); + rpc_service.RegisterRPC(detail::kRequestSend, &rpc_h); + rpc_h.SetRPCServer(&rpc_service); + framework::ProgramDesc empty_program; framework::Executor executor(dev_ctx.GetPlace()); - rpc_service.SetScope(scope); - rpc_service.SetDevCtx(&dev_ctx); - rpc_service.SetProgram(&empty_program); - rpc_service.SetExecutor(&executor); + rpc_h.SetScope(scope); + rpc_h.SetDevCtx(&dev_ctx); + rpc_h.SetProgram(&empty_program); + rpc_h.SetExecutor(&executor); std::thread server_thread( - std::bind(&detail::AsyncGRPCServer::RunSyncUpdate, &rpc_service)); - rpc_service.SetCond(0); + std::bind(&detail::AsyncGRPCServer::StartServer, &rpc_service)); + rpc_service.SetCond(detail::kRequestSend); VLOG(3) << "start getting nccl id from trainer 0..."; - auto recv = rpc_service.Get(); + rpc_service.WaitBarrier(detail::kRequestSend); VLOG(3) << "got nccl id and stop server..."; rpc_service.ShutDown(); VLOG(3) << "rpc server stopped"; diff --git a/paddle/fluid/operators/listen_and_serv_op.cc b/paddle/fluid/operators/listen_and_serv_op.cc index df5f229acd75ee3df55d46444a63d9f1915f9d22..71e75c25321812c849e205460217b174d80654be 100644 --- a/paddle/fluid/operators/listen_and_serv_op.cc +++ b/paddle/fluid/operators/listen_and_serv_op.cc @@ -19,14 +19,16 @@ limitations under the License. */ #include // NOLINT #include +#include "paddle/fluid/operators/detail/grpc_server.h" +#include "paddle/fluid/operators/detail/request_handler_impl.h" #include "paddle/fluid/operators/listen_and_serv_op.h" #include "paddle/fluid/platform/profiler.h" namespace paddle { namespace operators { -void RunServer(std::shared_ptr service) { - service->RunSyncUpdate(); +void RunServer(std::shared_ptr service) { + service->StartServer(); VLOG(4) << "RunServer thread end"; } static void split(const std::string &str, char sep, @@ -67,8 +69,6 @@ static void ParallelExecuteBlocks( for (size_t i = 0; i < fs.size(); ++i) fs[i].wait(); } -std::atomic_int ListenAndServOp::selected_port_{0}; - ListenAndServOp::ListenAndServOp(const std::string &type, const framework::VariableNameMap &inputs, const framework::VariableNameMap &outputs, @@ -78,7 +78,6 @@ ListenAndServOp::ListenAndServOp(const std::string &type, ListenAndServOp::~ListenAndServOp() { Stop(); } void ListenAndServOp::Stop() { - rpc_service_->Push(LISTEN_TERMINATE_MESSAGE); rpc_service_->ShutDown(); server_thread_->join(); auto file_path = string::Sprintf("/tmp/paddle.%d.port", ::getpid()); @@ -87,26 +86,13 @@ void ListenAndServOp::Stop() { void ListenAndServOp::SavePort() const { // NOTE: default write file to /tmp/paddle.selected_port - selected_port_ = rpc_service_->GetSelectedPort(); - auto file_path = string::Sprintf("/tmp/paddle.%d.port", ::getpid()); - std::ofstream port_file; - port_file.open(file_path); - port_file << selected_port_.load(); - port_file.close(); - VLOG(4) << "selected port written to " << file_path; -} - -void ListenAndServOp::WaitServerReady() { - while (selected_port_.load() == 0) { - } + rpc_service_->SavePort(); } void ListenAndServOp::RunSyncLoop(framework::Executor *executor, framework::ProgramDesc *program, framework::Scope *recv_scope, framework::BlockDesc *prefetch_block) const { - auto fan_in = Attr("Fanin"); - size_t num_blocks = program->Size(); PADDLE_ENFORCE_GE(num_blocks, 2, "server program should have at least 2 blocks"); @@ -121,49 +107,24 @@ void ListenAndServOp::RunSyncLoop(framework::Executor *executor, optimize_prepared.begin(), std::shared_ptr(nullptr)); - bool exit_flag = false; + rpc_service_->ResetBarrierCounter(); // Record received sparse variables, so that // we could reset those after execute optimize program std::vector sparse_vars; - while (!exit_flag && !SignalHandler::IsProgramExit()) { + while (true) { // Get from multiple trainers, we don't care about the order in which // the gradients arrives, just add suffix 0~n and merge the gradient. - rpc_service_->SetCond(0); - size_t recv_var_cnt = 0; - int batch_barrier = 0; - while (batch_barrier != fan_in) { - const detail::ReceivedMessage v = rpc_service_->Get(); - auto recv_var_name = v.first; - if (recv_var_name == LISTEN_TERMINATE_MESSAGE) { - LOG(INFO) << "received terminate message and exit"; - exit_flag = true; - break; - } else if (recv_var_name == BATCH_BARRIER_MESSAGE) { - VLOG(3) << "recv batch barrier message"; - batch_barrier++; - continue; - } else { - VLOG(3) << "received grad: " << recv_var_name; - recv_var_cnt++; - auto var = v.second->GetVar(); - if (var == nullptr) { - LOG(ERROR) << "Can not find server side var: " << recv_var_name; - PADDLE_THROW("Can not find server side var"); - } - if (var->IsType()) { - sparse_vars.push_back(var); - } - } - } - if (exit_flag) { - rpc_service_->SetCond(1); - rpc_service_->ShutDown(); + rpc_service_->SetCond(detail::kRequestSend); + rpc_service_->WaitBarrier(detail::kRequestSend); + + if (rpc_service_->IsExit()) { + LOG(WARNING) << "get exit!rpc_processor break!"; + rpc_service_->SetCond(detail::kRequestGet); break; } // NOTE: if is_gpu_place, CUDA kernels are launched by multiple threads // and this will still work. - // The optimize blocks which have the same parent ID would run parallel // TODO(Yancey1989): need to use ParallelExecutor for future int32_t last_parent_blkid = program->Block(1).Parent(); @@ -194,52 +155,18 @@ void ListenAndServOp::RunSyncLoop(framework::Executor *executor, var->GetMutable()->mutable_rows()->clear(); } - rpc_service_->SetCond(1); - // FIXME(typhoonzero): use another condition to sync wait clients get. - rpc_service_->WaitClientGet(fan_in); - sparse_vars.clear(); + rpc_service_->SetCond(detail::kRequestGet); + rpc_service_->WaitBarrier(detail::kRequestGet); + rpc_service_->ResetBarrierCounter(); } // while(true) } -static void AsyncUpdateThread( - const std::string &var_name, const bool &exit_flag, - const std::shared_ptr &queue, - framework::Executor *executor, - framework::ExecutorPrepareContext *prepared) { - VLOG(3) << "update thread for " << var_name << " started"; - while (!exit_flag && !SignalHandler::IsProgramExit()) { - const detail::ReceivedMessage v = queue->Pop(); - if (SignalHandler::IsProgramExit()) { - VLOG(3) << "update thread for " << var_name << " exit"; - break; - } - auto recv_var_name = v.first; - VLOG(4) << "async update " << recv_var_name; - auto var = v.second->GetVar(); - if (var == nullptr) { - LOG(ERROR) << "Can not find server side var: " << recv_var_name; - PADDLE_THROW("Can not find server side var"); - } - auto fs = framework::Async([var_name, &executor, &v, prepared] { - try { - executor->RunPreparedContext(prepared, - v.second->GetMutableLocalScope()); - } catch (const std::exception &e) { - LOG(ERROR) << "run sub program error " << e.what(); - } - }); - fs.wait(); - } -} - void ListenAndServOp::RunAsyncLoop(framework::Executor *executor, framework::ProgramDesc *program) const { VLOG(3) << "RunAsyncLoop in"; // grad name to block id std::unordered_map grad_to_block_id; std::unordered_map id_to_grad; - std::unordered_map> - grad_to_queue; auto grad_to_block_id_str = Attr>("grad_to_block_id"); @@ -249,13 +176,9 @@ void ListenAndServOp::RunAsyncLoop(framework::Executor *executor, VLOG(3) << "after split, grad = " << pieces[0] << ", id=" << pieces[1]; PADDLE_ENFORCE_EQ(pieces.size(), 2); PADDLE_ENFORCE_EQ(grad_to_block_id.count(pieces[0]), 0); + int block_id = std::stoi(pieces[1]); grad_to_block_id[pieces[0]] = block_id; - std::shared_ptr queue = - std::make_shared(); - grad_to_queue[pieces[0]] = queue; - // record blocking queue in SignalHandler - SignalHandler::RegisterBlockingQueue(queue); id_to_grad[block_id] = pieces[0]; } size_t num_blocks = program->Size(); @@ -274,39 +197,36 @@ void ListenAndServOp::RunAsyncLoop(framework::Executor *executor, grad_to_prepared_ctx[id_to_grad[block_list[i]]] = optimize_prepared[i]; } - bool exit_flag = false; + request_send_handler_->SetGradToPreparedCtx(&grad_to_prepared_ctx); + request_get_handler_->SetGradToPreparedCtx(&grad_to_prepared_ctx); + request_prefetch_handler_->SetGradToPreparedCtx(&grad_to_prepared_ctx); - VLOG(3) << "start async optimize threads"; - std::vector> fs; - for (auto iter = grad_to_queue.begin(); iter != grad_to_queue.end(); iter++) { - std::string grad_name = iter->first; - VLOG(3) << "create async update thread for " << grad_name; - fs.push_back(framework::AsyncIO([grad_name, &exit_flag, &executor, - &grad_to_queue, &grad_to_prepared_ctx]() { - AsyncUpdateThread(grad_name, exit_flag, grad_to_queue[grad_name], - executor, grad_to_prepared_ctx[grad_name].get()); - })); - } VLOG(3) << "RunAsyncLoop into while"; - while (!exit_flag && !SignalHandler::IsProgramExit()) { - const detail::ReceivedMessage v = rpc_service_->Get(); - auto recv_var_name = v.first; - if (recv_var_name == LISTEN_TERMINATE_MESSAGE) { - LOG(INFO) << "received terminate message and exit"; - exit_flag = true; + while (true) { + if (rpc_service_->IsExit()) { + LOG(INFO) << "get exit!rpc_processor break!"; break; - } else { - VLOG(3) << "received grad: " << recv_var_name; - grad_to_queue[recv_var_name]->Push(v); } - if (exit_flag) { - rpc_service_->ShutDown(); - break; - } + sleep(1); } // while(true) } +static void FillRequestCtx(detail::RequestHandler *h, framework::Scope *scope, + platform::DeviceContext *dev_ctx, + framework::Executor *executor, + framework::ProgramDesc *program, + framework::ExecutorPrepareContext *prefetch_ctx, + detail::RPCServer *rpc_server) { + h->SetScope(scope); + h->SetDevCtx(dev_ctx); + h->SetExecutor(executor); + h->SetProgram(program); + h->SetPrefetchPreparedCtx(std::move( + std::unique_ptr(prefetch_ctx))); + h->SetRPCServer(rpc_server); +} + void ListenAndServOp::RunImpl(const framework::Scope &scope, const platform::Place &dev_place) const { // Mark this as PS that it should decide profiling by listening from trainer. @@ -316,27 +236,42 @@ void ListenAndServOp::RunImpl(const framework::Scope &scope, framework::Scope &recv_scope = scope.NewScope(); bool sync_mode = Attr("sync_mode"); + auto fan_in = Attr("Fanin"); PADDLE_ENFORCE(!rpc_service_); std::string endpoint = Attr("endpoint"); - rpc_service_.reset(new detail::AsyncGRPCServer(endpoint, sync_mode)); + LOG(INFO) << "sync_mode:" << sync_mode << ", fan_in:" << fan_in + << ", end_point:" << endpoint; + + // request_handler_.reset(new detail::GRPCRequestSendHandler(sync_mode)); + rpc_service_.reset(new detail::AsyncGRPCServer(endpoint, fan_in)); + request_send_handler_.reset(new detail::RequestSendHandler(sync_mode)); + request_get_handler_.reset(new detail::RequestGetHandler(sync_mode)); + request_prefetch_handler_.reset( + new detail::RequestPrefetchHandler(sync_mode)); + + rpc_service_->RegisterRPC(detail::kRequestSend, request_send_handler_.get()); + rpc_service_->RegisterRPC(detail::kRequestGet, request_get_handler_.get()); + rpc_service_->RegisterRPC(detail::kRequestPrefetch, + request_prefetch_handler_.get()); auto *optimize_block = Attr(kOptimizeBlock); auto *prefetch_block = Attr(kPrefetchBlock); auto *program = optimize_block->Program(); framework::Executor executor(dev_place); - // prepare rpc_service - rpc_service_->SetScope(&recv_scope); - rpc_service_->SetDevCtx(&dev_ctx); - rpc_service_->SetProgram(program); - rpc_service_->SetExecutor(&executor); - // prepare for prefetch VLOG(3) << "prefetch block id is " << prefetch_block->ID(); auto prefetch_prepared = executor.Prepare(*program, prefetch_block->ID()); - rpc_service_->SetPrefetchPreparedCtx(std::move(prefetch_prepared)); + + auto f = std::bind(FillRequestCtx, std::placeholders::_1, &recv_scope, + &dev_ctx, &executor, program, prefetch_prepared.release(), + rpc_service_.get()); + + f(request_send_handler_.get()); + f(request_get_handler_.get()); + f(request_prefetch_handler_.get()); // start the server listening after all member initialized. server_thread_.reset(new std::thread(RunServer, rpc_service_)); @@ -348,8 +283,6 @@ void ListenAndServOp::RunImpl(const framework::Scope &scope, signal(SIGTERM, SignalHandler::StopAndExit); // Write to a file of server selected port for python use. - std::string file_path = string::Sprintf("/tmp/paddle.%d.selected_port", - static_cast(::getpid())); SavePort(); if (sync_mode) { RunSyncLoop(&executor, program, &recv_scope, prefetch_block); @@ -385,27 +318,9 @@ class ListenAndServOpMaker : public framework::OpProtoAndCheckerMaker { } }; -bool SignalHandler::program_exit_flag_ = false; - -SignalHandler::BlockingQueueSet SignalHandler::blocking_queue_set_{}; - void SignalHandler::StopAndExit(int signal_num) { VLOG(3) << "Catch interrupt signal: " << signal_num << ", program will exit"; - - program_exit_flag_ = true; - - // awake all blocking queues - for (BlockingQueueSet::iterator iter = blocking_queue_set_.begin(); - iter != blocking_queue_set_.end(); iter++) { - iter->get()->Push( - std::make_pair(std::string(LISTEN_TERMINATE_MESSAGE), nullptr)); - } - - exit(EXIT_SUCCESS); -} - -void SignalHandler::RegisterBlockingQueue(BlockingQueue &queue) { - blocking_queue_set_.insert(queue); + exit(0); } } // namespace operators diff --git a/paddle/fluid/operators/listen_and_serv_op.h b/paddle/fluid/operators/listen_and_serv_op.h index 6f868369dcf2067fd71f4107d20c79ead0cf9f56..87952cb0e683596b2b0395890b6e25b15f74d7e2 100644 --- a/paddle/fluid/operators/listen_and_serv_op.h +++ b/paddle/fluid/operators/listen_and_serv_op.h @@ -23,7 +23,8 @@ limitations under the License. */ #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/threadpool.h" -#include "paddle/fluid/operators/detail/grpc_server.h" +#include "paddle/fluid/operators/detail/request_handler.h" +#include "paddle/fluid/operators/detail/rpc_server.h" namespace paddle { namespace operators { @@ -31,7 +32,7 @@ namespace operators { constexpr char kOptimizeBlock[] = "OptimizeBlock"; constexpr char kPrefetchBlock[] = "PrefetchBlock"; -void RunServer(std::shared_ptr service); +void RunServer(std::shared_ptr service); class ListenAndServOp : public framework::OperatorBase { public: @@ -52,41 +53,27 @@ class ListenAndServOp : public framework::OperatorBase { void SavePort() const; - void WaitServerReady(); - - int GetSelectedPort() { return selected_port_; } + int GetSelectedPort() { return rpc_service_->GetSelectedPort(); } void Stop() override; void RunImpl(const framework::Scope& scope, const platform::Place& dev_place) const override; - static void ResetPort() { selected_port_ = 0; } - protected: - mutable std::shared_ptr rpc_service_; + mutable std::shared_ptr rpc_service_; + mutable std::shared_ptr request_send_handler_; + mutable std::shared_ptr request_get_handler_; + mutable std::shared_ptr request_prefetch_handler_; + mutable std::shared_ptr server_thread_; - // FIXME(wuyi): it's static so that the operator can be cloned. - static std::atomic_int selected_port_; }; class SignalHandler { - public: - typedef std::shared_ptr BlockingQueue; - typedef std::unordered_set BlockingQueueSet; - public: static void StopAndExit(int signal_num); - static void RegisterBlockingQueue(BlockingQueue&); - - static inline bool IsProgramExit() { return program_exit_flag_; } - private: - static bool program_exit_flag_; - - static BlockingQueueSet blocking_queue_set_; - DISABLE_COPY_AND_ASSIGN(SignalHandler); }; diff --git a/paddle/fluid/operators/mul_mkldnn_op.cc b/paddle/fluid/operators/mul_mkldnn_op.cc deleted file mode 100644 index a5f3a98f678a870d30eebfc4cf329de7c93266ee..0000000000000000000000000000000000000000 --- a/paddle/fluid/operators/mul_mkldnn_op.cc +++ /dev/null @@ -1,197 +0,0 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#include "mkldnn.hpp" -#include "paddle/fluid/framework/tensor.h" -#include "paddle/fluid/operators/mul_op.h" -#include "paddle/fluid/platform/device_context.h" -#include "paddle/fluid/platform/mkldnn_helper.h" - -namespace paddle { -namespace operators { - -using paddle::framework::Tensor; -using paddle::platform::MKLDNNDeviceContext; - -template -mkldnn::memory::desc type(const std::vector& dims, Format&& f) { - return platform::MKLDNNMemDesc(dims, mkldnn::memory::data_type::f32, f); -} - -template -class MulMKLDNNOpKernel : public paddle::framework::OpKernel { - void Compute(const paddle::framework::ExecutionContext& ctx) const override { - PADDLE_ENFORCE(paddle::platform::is_cpu_place(ctx.GetPlace()), - "It must use CPUPlace."); - - auto& dev_ctx = ctx.template device_context(); - auto mkldnn_engine = dev_ctx.GetEngine(); - - auto input = ctx.Input("X"); - auto weight = ctx.Input("Y"); - - PADDLE_ENFORCE(input->dims().size() & (2 | 4), - "Input must be with 2 or 4 dimensions, i.e. NC or NCHW"); - PADDLE_ENFORCE(weight->dims().size() & (2 | 4), - "Weights must be with 2 or 4 dimensions, i.e. OI or OIHW"); - - std::vector w_tz = paddle::framework::vectorize2int(weight->dims()); - std::vector src_tz = paddle::framework::vectorize2int(input->dims()); - - auto src_md = - src_tz.size() != 2 - ? type(src_tz, mkldnn::memory::format::nchw) - : type({src_tz[0], src_tz[1]}, mkldnn::memory::format::nc); - - auto dst_md = type({src_tz[0], w_tz[1]}, mkldnn::memory::format::nc); - - auto weights_md = - src_tz.size() != 2 - ? type({w_tz[1], src_tz[1], src_tz[2], src_tz[3]}, - mkldnn::memory::format::oihw) - : type({w_tz[1], src_tz[1]}, mkldnn::memory::format::oi); - - auto output = ctx.Output("Out"); - T* output_data = output->mutable_data(ctx.GetPlace()); - - const std::string key = ctx.op().Output("Out"); - const std::string key_fc_pd = key + "@mul_pd"; - - const T* input_data = input->data(); - const T* w_data = weight->data(); - - auto dst_memory = mkldnn::memory({dst_md, mkldnn_engine}, output_data); - - auto src_memory = mkldnn::memory({src_md, mkldnn_engine}, - platform::to_void_cast(input_data)); - - auto weights_memory = mkldnn::memory({weights_md, mkldnn_engine}, - platform::to_void_cast(w_data)); - - auto pd = platform::MKLDNNFwdPrimitiveDesc( - mkldnn_engine, src_md, weights_md, dst_md); - - dev_ctx.SetBlob(key_fc_pd, pd); - - auto forward = mkldnn::inner_product_forward(*pd, src_memory, - weights_memory, dst_memory); - - std::vector pipeline = {forward}; - mkldnn::stream(mkldnn::stream::kind::eager).submit(pipeline).wait(); - } -}; - -template -class MulMKLDNNGradOpKernel : public paddle::framework::OpKernel { - public: - void Compute(const paddle::framework::ExecutionContext& ctx) const override { - PADDLE_ENFORCE(paddle::platform::is_cpu_place(ctx.GetPlace()), - "It must use CPUPlace."); - - auto& dev_ctx = ctx.template device_context(); - auto mkldnn_engine = dev_ctx.GetEngine(); - - const Tensor* input = ctx.Input("X"); - const Tensor* w = ctx.Input("Y"); - - const Tensor* out_grad = ctx.Input(framework::GradVarName("Out")); - Tensor* input_grad = ctx.Output(framework::GradVarName("X")); - Tensor* w_grad = ctx.Output(framework::GradVarName("Y")); - - const std::string key = ctx.op().Input("Out"); - const std::string key_fc_pd = key + "@mul_pd"; - - const T* input_data = input->data(); - const T* w_data = w->data(); - const T* out_grad_data = out_grad->data(); - T* input_grad_data = nullptr; - T* w_grad_data = nullptr; - - if (input_grad) { - input_grad_data = input_grad->mutable_data(ctx.GetPlace()); - } - if (w_grad) { - w_grad_data = w_grad->mutable_data(ctx.GetPlace()); - } - - std::vector src_tz = paddle::framework::vectorize2int(input->dims()); - std::vector w_tz = paddle::framework::vectorize2int(w->dims()); - - auto src_md = - src_tz.size() != 2 - ? type(src_tz, mkldnn::memory::format::nchw) - : type({src_tz[0], src_tz[1]}, mkldnn::memory::format::nc); - - auto dst_md = type({src_tz[0], w_tz[1]}, mkldnn::memory::format::nc); - - auto weights_md = - src_tz.size() != 2 - ? type({w_tz[1], src_tz[1], src_tz[2], src_tz[3]}, - mkldnn::memory::format::oihw) - : type({w_tz[1], src_tz[1]}, mkldnn::memory::format::oi); - - auto src_memory = mkldnn::memory({src_md, mkldnn_engine}, - platform::to_void_cast(input_data)); - - auto dst_memory = mkldnn::memory({dst_md, mkldnn_engine}, - platform::to_void_cast(out_grad_data)); - - auto weight_memory = mkldnn::memory({weights_md, mkldnn_engine}, - platform::to_void_cast(w_data)); - - auto pd = - std::static_pointer_cast( - dev_ctx.GetBlob(key_fc_pd)); - - PADDLE_ENFORCE(pd != nullptr, "Fail to find pd in device context"); - - if (w_grad) { - auto weights_grad_memory = mkldnn::memory( - {weights_md, mkldnn_engine}, platform::to_void_cast(w_grad_data)); - - auto bwd_weight_pd = platform::MKLDNNBwdPrimitiveDesc< - mkldnn::inner_product_backward_weights>(mkldnn_engine, *pd, src_md, - weights_md, dst_md); - - auto bwd_weights_prim = mkldnn::inner_product_backward_weights( - bwd_weight_pd, src_memory, dst_memory, weights_grad_memory); - - std::vector pipeline{bwd_weights_prim}; - mkldnn::stream(mkldnn::stream::kind::eager).submit(pipeline).wait(); - } - - if (input_grad) { - auto src_grad_memory = mkldnn::memory( - {src_md, mkldnn_engine}, platform::to_void_cast(input_grad_data)); - - auto bwd_data_pd = - platform::MKLDNNBwdPrimitiveDesc( - mkldnn_engine, *pd, src_md, weights_md, dst_md); - - auto bwd_data_prim = mkldnn::inner_product_backward_data( - bwd_data_pd, dst_memory, weight_memory, src_grad_memory); - - std::vector pipeline{bwd_data_prim}; - mkldnn::stream(mkldnn::stream::kind::eager).submit(pipeline).wait(); - } - } -}; -} // namespace operators -} // namespace paddle - -REGISTER_OP_KERNEL(mul, MKLDNN, ::paddle::platform::CPUPlace, - paddle::operators::MulMKLDNNOpKernel); - -REGISTER_OP_KERNEL(mul_grad, MKLDNN, ::paddle::platform::CPUPlace, - paddle::operators::MulMKLDNNGradOpKernel); diff --git a/paddle/fluid/operators/mul_op.cc b/paddle/fluid/operators/mul_op.cc index a43739463c85b38e1dba04c6ec1bfcf4b6cbfa63..51993398bd3427e1f0da155918395bc50fa65e45 100644 --- a/paddle/fluid/operators/mul_op.cc +++ b/paddle/fluid/operators/mul_op.cc @@ -16,10 +16,6 @@ limitations under the License. */ #include #include -#ifdef PADDLE_WITH_MKLDNN -#include "paddle/fluid/platform/mkldnn_helper.h" -#endif - namespace paddle { namespace operators { @@ -76,22 +72,6 @@ class MulOp : public framework::OperatorWithKernel { ctx->SetOutputDim("Out", framework::make_ddim(output_dims)); ctx->ShareLoD("X", /*->*/ "Out"); } - - private: - framework::OpKernelType GetExpectedKernelType( - const framework::ExecutionContext& ctx) const override { - framework::LibraryType library{framework::LibraryType::kPlain}; -#ifdef PADDLE_WITH_MKLDNN - if (library == framework::LibraryType::kPlain && - platform::CanMKLDNNBeUsed(ctx)) { - library = framework::LibraryType::kMKLDNN; - } -#endif - framework::DataLayout layout{framework::DataLayout::kAnyLayout}; - return framework::OpKernelType( - framework::ToDataType(ctx.Input("X")->type()), ctx.GetPlace(), - layout, library); - } }; class MulOpMaker : public framework::OpProtoAndCheckerMaker { @@ -120,9 +100,6 @@ class MulOpMaker : public framework::OpProtoAndCheckerMaker { )DOC") .SetDefault(1) .EqualGreaterThan(1); - AddAttr("use_mkldnn", - "(bool, default false) Only used in mkldnn kernel") - .SetDefault(false); AddAttr( "y_num_col_dims", R"DOC((int, default 1), The mul_op can take tensors with more than two, @@ -177,22 +154,6 @@ class MulGradOp : public framework::OperatorWithKernel { ctx->SetOutputDim(y_grad_name, y_dims); } } - - private: - framework::OpKernelType GetExpectedKernelType( - const framework::ExecutionContext& ctx) const override { - framework::LibraryType library{framework::LibraryType::kPlain}; -#ifdef PADDLE_WITH_MKLDNN - if (library == framework::LibraryType::kPlain && - platform::CanMKLDNNBeUsed(ctx)) { - library = framework::LibraryType::kMKLDNN; - } -#endif - framework::DataLayout layout{framework::DataLayout::kAnyLayout}; - return framework::OpKernelType( - framework::ToDataType(ctx.Input("X")->type()), ctx.GetPlace(), - layout, library); - } }; } // namespace operators diff --git a/paddle/fluid/operators/random_crop_op.cc b/paddle/fluid/operators/random_crop_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..b14b559e31dd422f8ebe4002988a9746dfdf28a2 --- /dev/null +++ b/paddle/fluid/operators/random_crop_op.cc @@ -0,0 +1,81 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#include "paddle/fluid/operators/random_crop_op.h" + +namespace paddle { +namespace operators { + +class RandomCropOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + protected: + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext& ctx) const override { + return framework::OpKernelType( + framework::ToDataType(ctx.Input("X")->type()), + ctx.device_context()); + } +}; + +class RandomCropOpMaker : public framework::OpProtoAndCheckerMaker { + public: + void Make() override { + AddInput("X", "A batch of instances to random crop."); + AddInput("Seed", "The random seed."); + AddOutput("Out", "The cropped instance batch."); + AddOutput("SeedOut", "The random seed after random cropping.") + .AsDispensable(); + AddAttr>("shape", "The shape of a cropped instance."); + AddComment(R"DOC( + This operator takes a batch of instance, and do random cropping on each instance. + It means that cropping positions differs on each instance, which is determined + by an uniform random generator. All cropped instances have the same shape, which + is determined by the operator's attribute 'shape'. + )DOC"); + } +}; + +class RandomCropOpInferShape : public framework::InferShapeBase { + public: + void operator()(framework::InferShapeContext* ctx) const override { + auto seed_dim = ctx->GetInputDim("Seed"); + PADDLE_ENFORCE(seed_dim.size() == 1 && seed_dim[0] == 1); + auto shape = ctx->Attrs().Get>("shape"); + auto x_dim = ctx->GetInputDim("X"); + PADDLE_ENFORCE_GT(x_dim.size(), static_cast(shape.size())); + auto out_dim = framework::vectorize2int(x_dim); + for (size_t i = 1; i <= shape.size(); ++i) { + size_t x_i = x_dim.size() - i; + size_t shape_i = shape.size() - i; + PADDLE_ENFORCE_GE(x_dim[x_i], shape[shape_i]); + out_dim[x_i] = shape[shape_i]; + } + ctx->SetOutputDim("Out", framework::make_ddim(out_dim)); + ctx->SetOutputDim("SeedOut", framework::make_ddim({1})); + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +namespace f = paddle::framework; +REGISTER_OPERATOR(random_crop, ops::RandomCropOp, ops::RandomCropOpMaker, + ops::RandomCropOpInferShape, f::EmptyGradOpMaker); + +template +using Kernel = ops::RandomCropKernel; +REGISTER_OP_CPU_KERNEL(random_crop, Kernel, Kernel, Kernel, + Kernel, Kernel); diff --git a/paddle/fluid/operators/random_crop_op.cu b/paddle/fluid/operators/random_crop_op.cu new file mode 100644 index 0000000000000000000000000000000000000000..6fc9bedc55b4d349ddf3d109c7f9049113235f0c --- /dev/null +++ b/paddle/fluid/operators/random_crop_op.cu @@ -0,0 +1,21 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/operators/random_crop_op.h" + +namespace ops = paddle::operators; +template +using Kernel = ops::RandomCropKernel; +REGISTER_OP_CUDA_KERNEL(random_crop, Kernel, Kernel, Kernel, + Kernel, Kernel); diff --git a/paddle/fluid/operators/random_crop_op.h b/paddle/fluid/operators/random_crop_op.h new file mode 100644 index 0000000000000000000000000000000000000000..f3261cbdc986b0cc724315c1eb92b8b84e18c742 --- /dev/null +++ b/paddle/fluid/operators/random_crop_op.h @@ -0,0 +1,181 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/detail/safe_ref.h" +#include "paddle/fluid/platform/device_context.h" +#include "paddle/fluid/platform/for_range.h" +#ifdef PADDLE_WITH_CUDA +#include +#endif + +namespace paddle { +namespace operators { + +template +struct Random; + +template <> +struct Random { + using Engine = std::minstd_rand; + + template + using UniformIntDist = std::uniform_int_distribution; +}; + +#ifdef PADDLE_WITH_CUDA +template <> +struct Random { + using Engine = thrust::minstd_rand; + + template + using UniformIntDist = thrust::uniform_int_distribution; +}; +#endif + +template +HOSTDEVICE inline void StridedMemcpy(const T* x, const size_t* x_dims, T* out, + const size_t* out_dims, int i, int rank, + size_t prod_x_remain, + size_t prod_out_remain, + const size_t* offsets) { + size_t x_dim_i = x_dims[i]; + size_t out_dim_i = out_dims[i]; + size_t x_stride = prod_x_remain / x_dim_i; + size_t out_stride = prod_out_remain / out_dim_i; + size_t offset_i = offsets[i]; + + if (i == rank - 1) { + PADDLE_ASSERT(x_stride == 1 && out_stride == 1); + x += offset_i; + for (size_t j = 0; j < out_dim_i; ++j) { + *out++ = *x++; + } + } else { + x += offset_i * x_stride; + for (size_t j = 0; j < out_dim_i; ++j) { + StridedMemcpy(x, x_dims, out, out_dims, i + 1, rank, x_stride, + out_stride, offsets); + x += x_stride; + out += out_stride; + } + } +} + +template +struct RandomCropFunctor { + const T* x_; + T* out_; + size_t x_dims_[9]; + size_t out_dims_[9]; + int num_batchsize_dims_; + int rank_; + int64_t seed_; + + size_t prod_batchsize_dims_; + size_t prod_x_ins_dims_; + size_t prod_out_ins_dims_; + + RandomCropFunctor(const T* x, T* out, const framework::DDim& x_dims, + const framework::DDim& out_dims, int num_batchsize_dims, + int64_t seed) + : x_(x), + out_(out), + num_batchsize_dims_(num_batchsize_dims), + rank_(x_dims.size()), + seed_(seed) { + PADDLE_ENFORCE_EQ(x_dims.size(), out_dims.size()); + PADDLE_ENFORCE_GT(rank_, num_batchsize_dims_); + prod_batchsize_dims_ = 1; + prod_x_ins_dims_ = 1; + prod_out_ins_dims_ = 1; + for (size_t i = 0; i < static_cast(rank_); ++i) { + size_t x_dim_i = x_dims[i]; + size_t out_dim_i = out_dims[i]; + x_dims_[i] = x_dim_i; + out_dims_[i] = out_dim_i; + if (i < static_cast(num_batchsize_dims_)) { + PADDLE_ENFORCE_EQ(x_dim_i, out_dim_i); + prod_batchsize_dims_ *= x_dim_i; + } else { + prod_x_ins_dims_ *= x_dim_i; + prod_out_ins_dims_ *= out_dim_i; + } + } + } + + HOSTDEVICE void operator()(size_t ins_idx) { + typename Random::Engine engine(seed_); + engine.discard(ins_idx * (rank_ - num_batchsize_dims_)); + size_t offsets[9]; + for (int i = num_batchsize_dims_; i < rank_; ++i) { + typename Random::template UniformIntDist dist( + 0, x_dims_[i] - out_dims_[i]); + offsets[i - num_batchsize_dims_] = dist(engine); + } + + const T* x = x_ + ins_idx * prod_x_ins_dims_; + T* out = out_ + ins_idx * prod_out_ins_dims_; + + StridedMemcpy(x, x_dims_ + num_batchsize_dims_, out, + out_dims_ + num_batchsize_dims_, 0, + rank_ - num_batchsize_dims_, prod_x_ins_dims_, + prod_out_ins_dims_, offsets); + } +}; + +template +class RandomCropKernel : public framework::OpKernel { + public: + virtual void Compute(const framework::ExecutionContext& ctx) const { + auto& seed_tensor = detail::Ref(ctx.Input("Seed")); + int64_t seed = 0; + if (platform::is_cpu_place(seed_tensor.place())) { + seed = *seed_tensor.data(); + } else { + LOG(WARNING) << "It is slow to place seed in GPU memory. Please verify " + "your program"; + framework::LoDTensor cpu_seed; + framework::TensorCopySync(seed_tensor, platform::CPUPlace(), &cpu_seed); + seed = *cpu_seed.data(); + } + auto shape = ctx.Attr>("shape"); + auto& x = detail::Ref(ctx.Input("X")); + auto& out = detail::Ref(ctx.Output("Out")); + + int num_batchsize_dims = x.dims().size() - shape.size(); + RandomCropFunctor functor( + x.data(), out.mutable_data(ctx.GetPlace()), x.dims(), out.dims(), + num_batchsize_dims, seed); + platform::ForRange for_range( + ctx.template device_context(), + functor.prod_batchsize_dims_); + + for_range(functor); + + Random::Engine engine(seed); + engine.discard(functor.prod_batchsize_dims_ * + (functor.rank_ - functor.num_batchsize_dims_)); + *ctx.Output("SeedOut")->mutable_data( + platform::CPUPlace()) = engine(); + } +}; + +// TODO(fengjiayi): Backward of random crop op + +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/reader/create_custom_reader_op.cc b/paddle/fluid/operators/reader/create_custom_reader_op.cc index 4ecbf8ed4f0473a552b778fd6c64c92b946cd458..331224a59899b4a7d517ca4f7141fb5b8f4f5168 100644 --- a/paddle/fluid/operators/reader/create_custom_reader_op.cc +++ b/paddle/fluid/operators/reader/create_custom_reader_op.cc @@ -23,13 +23,12 @@ namespace reader { class CustomReader : public framework::DecoratedReader { public: CustomReader(ReaderBase* reader, const framework::BlockDesc& sub_block, - const platform::Place& dev_place, const std::vector& source_var_names, const std::vector& sink_var_names) : DecoratedReader(reader), program_(*sub_block.Program()), sub_block_id_(sub_block.ID()), - exe_(framework::Executor(dev_place)), + exe_(framework::Executor(platform::CPUPlace())), source_var_names_(source_var_names), sink_var_names_(sink_var_names) {} @@ -60,7 +59,7 @@ class CreateCustomReaderOp : public framework::OperatorBase { const auto& underlying_reader = scope.FindVar(Input("UnderlyingReader")) ->Get(); out->Reset( - new CustomReader(underlying_reader.Get(), *sub_block, dev_place, + new CustomReader(underlying_reader.Get(), *sub_block, Attr>("source_var_names"), Attr>("sink_var_names"))); } @@ -85,9 +84,10 @@ class CreateCustomReaderOpMaker : public DecoratedReaderMakerBase { CreateCustomReader Operator A custom reader can be used for input data preprocessing. - A custom reader holds its own sub-block, which will be executed in its - 'ReadNext()' function. Users can configurate their own preprocessing - pipelines by inserting operators into custom reader's sub-block. + A custom reader holds its own sub-block, which will be executed in CPU + in its 'ReadNext()' function. Users can configurate their own + preprocessing pipelines by inserting operators into custom reader's + sub-block. )DOC"); } }; diff --git a/paddle/fluid/operators/send_barrier_op.cc b/paddle/fluid/operators/send_barrier_op.cc index 2c77ee2e2792d6fdd76bacd68b6c3b4a296b2e3a..bcd8e81609a37cc544f5a5cc4188400c1632a668 100644 --- a/paddle/fluid/operators/send_barrier_op.cc +++ b/paddle/fluid/operators/send_barrier_op.cc @@ -46,6 +46,8 @@ class SendBarrierOp : public framework::OperatorBase { auto rpc_client = detail::RPCClient::GetInstance(); + VLOG(3) << "SendBarrierOp sync_mode:" << sync_mode; + // need to wait before sending send_barrier message PADDLE_ENFORCE(rpc_client->Wait()); if (sync_mode) { diff --git a/paddle/fluid/operators/tensorrt_engine_op.cc b/paddle/fluid/operators/tensorrt_engine_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..83e768b4dc9c607b0f73d7183462d772ae7ab994 --- /dev/null +++ b/paddle/fluid/operators/tensorrt_engine_op.cc @@ -0,0 +1,70 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#ifdef PADDLE_WITH_CUDA + +#include "paddle/fluid/operators/tensorrt_engine_op.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/inference/tensorrt/convert/op_converter.h" +#include "paddle/fluid/inference/utils/singleton.h" + +namespace paddle { +namespace operators { + +template +void paddle::operators::TensorRTEngineKernel::Prepare( + const framework::ExecutionContext &context) const { + // Get the ProgramDesc and pass to convert. + const auto &block = context.Attr("subgraph"); + max_batch_ = context.Attr("max_batch"); + auto max_workspace = context.Attr("max_workspace"); + engine_.reset(new inference::tensorrt::TensorRTEngine( + max_batch_, max_workspace, nullptr)); + inference::Singleton::Global().ConvertBlock( + block, engine_.get()); + engine_->FreezeNetwork(); +} + +class TensorRTEngineOpMaker : public framework::OpProtoAndCheckerMaker { + public: + void Make() override { + AddInput("Xs", "A list of inputs.").AsDuplicable(); + AddOutput("Ys", "A list of outputs").AsDuplicable(); + AddAttr("subgraph", "the subgraph"); + AddComment("TensorRT engine operator."); + } +}; + +class TensorRTEngineInferVarType : public framework::VarTypeInference { + public: + void operator()(const framework::OpDesc &op_desc, + framework::BlockDesc *block) const override {} +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; + +REGISTER_OPERATOR(tensorrt_engine, ops::TensorRTEngineOp, + ops::TensorRTEngineOpMaker, ops::TensorRTEngineOpMaker); + +REGISTER_OP_CPU_KERNEL( + tensorrt_engine, + ops::TensorRTEngineKernel, + ops::TensorRTEngineKernel, + ops::TensorRTEngineKernel, + ops::TensorRTEngineKernel); + +#endif // PADDLE_WITH_CUDA diff --git a/paddle/fluid/operators/tensorrt_engine_op.h b/paddle/fluid/operators/tensorrt_engine_op.h new file mode 100644 index 0000000000000000000000000000000000000000..fe273d386c529be3df05a955f492e2c39d4d8812 --- /dev/null +++ b/paddle/fluid/operators/tensorrt_engine_op.h @@ -0,0 +1,110 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#pragma once + +#ifdef PADDLE_WITH_CUDA + +#include "paddle/fluid/framework/operator.h" +#include "paddle/fluid/inference/analysis/helper.h" +#include "paddle/fluid/inference/tensorrt/engine.h" + +namespace paddle { +namespace operators { + +class TensorRTEngineOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + protected: + void InferShape(framework::InferShapeContext* ctx) const override {} + + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext& ctx) const override { + framework::OpKernelType kt = framework::OpKernelType( + framework::ToDataType( + ctx.Input("pre_ids")->type()), + platform::CPUPlace()); + return kt; + } +}; + +template +class TensorRTEngineKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& context) const override { + if (!engine_) { + Prepare(context); + } + auto input_names = context.op().Inputs("Xs"); + PADDLE_ENFORCE(!input_names.empty(), "should pass more than one inputs"); + // Try to determine a batch_size + auto* tensor0 = context.Input(input_names.front()); + PADDLE_ENFORCE_NOT_NULL(tensor0); + int batch_size = tensor0->dims()[0]; + PADDLE_ENFORCE_LE(batch_size, max_batch_); + + // Convert input tensor from fluid to engine. + for (const auto& x : context.Inputs("Xs")) { + // convert input and copy to TRT engine's buffer + auto* v = context.scope().FindVar(x); + PADDLE_ENFORCE_NOT_NULL(v, "no variable called %s", x); + auto& t = v->Get(); + if (platform::is_cpu_place(t.place())) { + engine_->SetInputFromCPU(x, static_cast(t.data()), + t.memory_size()); + } else { + engine_->SetInputFromGPU(x, static_cast(t.data()), + t.memory_size()); + } + } + // Execute the engine. + PADDLE_ENFORCE_GT(batch_size, 0); + engine_->Execute(batch_size); + // Convert output tensor from engine to fluid + for (const auto& y : context.Outputs("Ys")) { + // convert output and copy to fluid. + nvinfer1::ITensor* trt_t = engine_->GetITensor(y); + auto dims = trt_t->getDimensions(); + // Use the output ITensor's dims to reshape the Fluid Tensor. + std::vector ddim(dims.d, dims.d + dims.nbDims); + + auto* fluid_v = context.scope().FindVar(y); + PADDLE_ENFORCE_NOT_NULL(fluid_v, "no output variable called %s", y); + auto* fluid_t = fluid_v->GetMutable(); + fluid_t->Resize(framework::make_ddim(ddim)); + auto size = inference::analysis::AccuDims(dims.d, dims.nbDims); + if (platform::is_cpu_place(fluid_t->place())) { + engine_->GetOutputInCPU( + y, fluid_t->mutable_data(platform::CPUPlace()), size); + } else { + engine_->GetOutputInGPU( + y, fluid_t->mutable_data(platform::CUDAPlace()), size); + } + } + } + + protected: + // Build the engine. + void Prepare(const framework::ExecutionContext& context) const; + + private: + mutable std::unique_ptr engine_; + mutable int max_batch_{0}; +}; + +} // namespace operators +} // namespace paddle + +#endif // PADDLE_WITH_CUDA diff --git a/paddle/fluid/operators/test_send_nccl_id.cc b/paddle/fluid/operators/test_send_nccl_id.cc index 719f039a0f5fcd7445bf1589a683f122e6d62ba0..a845ba2eb038fa6a8e70dfbac06c31c19dbb9e3e 100644 --- a/paddle/fluid/operators/test_send_nccl_id.cc +++ b/paddle/fluid/operators/test_send_nccl_id.cc @@ -21,6 +21,8 @@ limitations under the License. */ #include "paddle/fluid/framework/operator.h" #include "paddle/fluid/framework/program_desc.h" #include "paddle/fluid/operators/detail/grpc_client.h" +#include "paddle/fluid/operators/detail/grpc_server.h" +#include "paddle/fluid/operators/detail/request_handler_impl.h" #include "paddle/fluid/operators/listen_and_serv_op.h" #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/operators/math/selected_rows_functor.h" @@ -35,42 +37,44 @@ namespace m = paddle::operators::math; namespace detail = paddle::operators::detail; namespace string = paddle::string; -std::unique_ptr rpc_service; +std::unique_ptr g_rpc_service; +std::unique_ptr g_req_handler; -void StartServer(std::atomic* initialized) { +void StartServer() { f::Scope scope; p::CPUPlace place; scope.Var(NCCL_ID_VARNAME); p::DeviceContextPool& pool = p::DeviceContextPool::Instance(); auto& dev_ctx = *pool.Get(p::CPUPlace()); - rpc_service.reset(new detail::AsyncGRPCServer("127.0.0.1:0", true)); - f::ProgramDesc empty_program; f::Executor executor(dev_ctx.GetPlace()); - rpc_service->SetScope(&scope); - rpc_service->SetDevCtx(&dev_ctx); - rpc_service->SetProgram(&empty_program); - rpc_service->SetExecutor(&executor); + g_req_handler->SetScope(&scope); + g_req_handler->SetDevCtx(&dev_ctx); + g_req_handler->SetProgram(&empty_program); + g_req_handler->SetExecutor(&executor); + + g_rpc_service->RegisterRPC(detail::kRequestSend, g_req_handler.get()); + g_req_handler->SetRPCServer(g_rpc_service.get()); std::thread server_thread( - std::bind(&detail::AsyncGRPCServer::RunSyncUpdate, rpc_service.get())); - *initialized = true; - rpc_service->SetCond(0); - auto recv = rpc_service->Get(); + std::bind(&detail::AsyncGRPCServer::StartServer, g_rpc_service.get())); + + g_rpc_service->SetCond(detail::kRequestSend); + std::cout << "before WaitFanInOfSend" << std::endl; + g_rpc_service->WaitBarrier(detail::kRequestSend); + LOG(INFO) << "got nccl id and stop server..."; - rpc_service->ShutDown(); + g_rpc_service->ShutDown(); server_thread.join(); } -TEST(SendNcclId, DISABLED_Normal) { - std::atomic initialized{false}; - std::thread server_thread(StartServer, &initialized); - while (!initialized) { - } - // wait server to start - // sleep(2); - rpc_service->WaitServerReady(); +TEST(SendNcclId, GrpcServer) { + g_req_handler.reset(new detail::RequestSendHandler(true)); + g_rpc_service.reset(new detail::AsyncGRPCServer("127.0.0.1:0", 1)); + + std::thread server_thread(StartServer); + g_rpc_service->WaitServerReady(); f::Scope scope; p::CPUPlace place; @@ -78,17 +82,20 @@ TEST(SendNcclId, DISABLED_Normal) { auto& dev_ctx = *pool.Get(p::CPUPlace()); auto var = scope.Var(NCCL_ID_VARNAME); - // var->SetType(f::proto::VarType_Type_RAW); auto id = var->GetMutable(); p::dynload::ncclGetUniqueId(id); - int port = rpc_service->GetSelectedPort(); + int port = g_rpc_service->GetSelectedPort(); + std::string ep = string::Sprintf("127.0.0.1:%d", port); detail::RPCClient client; - + LOG(INFO) << "connect to server" << ep; client.AsyncSendVariable(ep, dev_ctx, scope, NCCL_ID_VARNAME); client.Wait(); + client.AsyncSendBatchBarrier(ep); + client.Wait(); + server_thread.join(); - auto* ptr = rpc_service.release(); - delete ptr; + g_rpc_service.reset(nullptr); + g_req_handler.reset(nullptr); } diff --git a/paddle/fluid/operators/top_k_op.h b/paddle/fluid/operators/top_k_op.h index d44eeae8e6ff9ac87ab093d04e3f5427743f0c08..7ddb82ef6ff063868a4b9b603b8ab89700b9dd13 100644 --- a/paddle/fluid/operators/top_k_op.h +++ b/paddle/fluid/operators/top_k_op.h @@ -55,6 +55,9 @@ class TopkKernel : public framework::OpKernel { // NOTE: eigen shape doesn't affect paddle tensor. eg_input.reshape(flat2dims); +#ifdef PADDLE_WITH_MKLML +#pragma omp parallel for +#endif for (size_t i = 0; i < row; i++) { std::vector> vec; for (size_t j = 0; j < col; j++) { diff --git a/paddle/fluid/platform/nccl_helper.h b/paddle/fluid/platform/nccl_helper.h index 09367889a9517956ad01ad2847c31e2633cc643d..6f8e3f22db54d166cf97cfdd3d009058207a7ca5 100644 --- a/paddle/fluid/platform/nccl_helper.h +++ b/paddle/fluid/platform/nccl_helper.h @@ -15,6 +15,7 @@ #pragma once #include +#include #include // NOLINT #include #include diff --git a/paddle/fluid/platform/profiler.cc b/paddle/fluid/platform/profiler.cc index 2fb5c6dc6b8ad25fa1ad5fcf7c2acfedd5be4a83..3d8d64e4c2758675067834810ebb9aee1e88fdb9 100644 --- a/paddle/fluid/platform/profiler.cc +++ b/paddle/fluid/platform/profiler.cc @@ -38,6 +38,7 @@ struct EventList; static int64_t profiler_lister_id = 0; static bool should_send_profile_state = false; +std::mutex profiler_mu; // The profiler state, the initial value is ProfilerState::kDisabled static ProfilerState g_state = ProfilerState::kDisabled; @@ -228,6 +229,8 @@ void EnableProfiler(ProfilerState state) { PADDLE_ENFORCE(state != ProfilerState::kDisabled, "Can't enbale profling, since the input state is ", "ProfilerState::kDisabled"); + + std::lock_guard l(profiler_mu); if (state == g_state) { return; } @@ -295,7 +298,7 @@ void PrintProfiler(const std::vector>& events_table, } else if (g_state == ProfilerState::kAll) { place = "All"; } else { - PADDLE_THROW("Invalid profiler state"); + PADDLE_THROW("Invalid profiler state", g_state); } std::cout << "Place: " << place << std::endl; @@ -443,6 +446,7 @@ void ParseEvents(const std::vector>& events, void DisableProfiler(EventSortingKey sorted_key, const std::string& profile_path) { + std::lock_guard l(profiler_mu); if (g_state == ProfilerState::kDisabled) return; // Mark the profiling stop. Mark("_stop_profiler_", nullptr); @@ -466,7 +470,7 @@ void SetProfileListener() { std::mt19937 rng; rng.seed(std::random_device()()); std::uniform_int_distribution dist6( - 1, std::numeric_limits::max()); + 1, std::numeric_limits::max()); profiler_lister_id = dist6(rng); } int64_t ListenerId() { return profiler_lister_id; } diff --git a/paddle/fluid/pybind/pybind.cc b/paddle/fluid/pybind/pybind.cc index 50a1c07251b5bc4e7cc27de63f5457d3f94daef5..3af8941be69fe507bc105e26b608ec768e4b5998 100644 --- a/paddle/fluid/pybind/pybind.cc +++ b/paddle/fluid/pybind/pybind.cc @@ -117,6 +117,7 @@ PYBIND11_PLUGIN(core) { .def("set", PyCPUTensorSetFromArray) .def("set", PyCPUTensorSetFromArray) .def("set", PyCPUTensorSetFromArray) + .def("set", PyCPUTensorSetFromArray) #ifdef PADDLE_WITH_CUDA .def("set", PyCUDATensorSetFromArray) .def("set", PyCUDATensorSetFromArray) @@ -124,12 +125,14 @@ PYBIND11_PLUGIN(core) { .def("set", PyCUDATensorSetFromArray) .def("set", PyCUDATensorSetFromArray) .def("set", PyCUDATensorSetFromArray) + .def("set", PyCUDATensorSetFromArray) .def("set", PyCUDAPinnedTensorSetFromArray) .def("set", PyCUDAPinnedTensorSetFromArray) .def("set", PyCUDAPinnedTensorSetFromArray) .def("set", PyCUDAPinnedTensorSetFromArray) .def("set", PyCUDAPinnedTensorSetFromArray) .def("set", PyCUDAPinnedTensorSetFromArray) + .def("set", PyCUDAPinnedTensorSetFromArray) #endif .def("shape", [](Tensor &self) { return vectorize(self.dims()); }) .def("set_float_element", TensorSetElement) @@ -492,6 +495,7 @@ All parameter, weight, gradient are variables in Paddle. m.def("enable_profiler", platform::EnableProfiler); m.def("disable_profiler", platform::DisableProfiler); + m.def("is_profiler_enabled", platform::IsProfileEnabled); m.def("reset_profiler", platform::ResetProfiler); // -- python binds for parallel executor. diff --git a/paddle/function/neon/NeonDepthwiseConv.cpp b/paddle/function/neon/NeonDepthwiseConv.cpp index 85bc95bb88ca606e289fb6dad4946a77faf3d5fb..d7ac83da41aaba5cd38b042d0381dea527f9c42d 100644 --- a/paddle/function/neon/NeonDepthwiseConv.cpp +++ b/paddle/function/neon/NeonDepthwiseConv.cpp @@ -66,18 +66,18 @@ class NeonDepthwiseConvFunction : public ConvFunctionBase { float* inputPadding = inputData; int padInputHeight = inputHeight + 2 * paddingH(); int padInputWidth = inputWidth + 2 * paddingW(); - if (paddingH() > 0 || paddingW() > 0) { - int newSize = batchSize * inputChannels * padInputHeight * padInputWidth; - resizeBuffer(newSize); - inputPadding = reinterpret_cast(memory_->getBuf()); - neon::Padding::run(inputData, - inputPadding, - batchSize * inputChannels, - inputHeight, - inputWidth, - padInputHeight, - padInputWidth); - } + int newSize = + batchSize * (inputChannels + 1) * padInputHeight * padInputWidth; + + resizeBuffer(newSize); + inputPadding = reinterpret_cast(memory_->getBuf()); + neon::Padding::run(inputData, + inputPadding, + batchSize * inputChannels, + inputHeight, + inputWidth, + padInputHeight, + padInputWidth); std::function diff --git a/paddle/scripts/paddle_build.sh b/paddle/scripts/paddle_build.sh index fd3834ee21d8858016c3039cfea152904ac573e2..8eeea1805d8610f6f27f422337f3526688b73de3 100755 --- a/paddle/scripts/paddle_build.sh +++ b/paddle/scripts/paddle_build.sh @@ -183,7 +183,7 @@ function build() { ============================================ EOF make clean - make -j `nproc` + make install -j `nproc` } function build_android() { diff --git a/python/paddle/fluid/data_feeder.py b/python/paddle/fluid/data_feeder.py index a44e078d0c13717643a6cfc6dd8bff5901ee9c97..7940dabcfb03cc9eb46f678365685a6e99bcceec 100644 --- a/python/paddle/fluid/data_feeder.py +++ b/python/paddle/fluid/data_feeder.py @@ -36,9 +36,11 @@ class DataToLoDTensorConverter(object): self.dtype = 'float64' elif dtype == core.VarDesc.VarType.INT32: self.dtype = 'int32' + elif dtype == core.VarDesc.VarType.UINT8: + self.dtype = 'uint8' else: raise ValueError("dtype must be any of [int32, float32, int64, " - "float64]") + "float64, uint8]") self.data = [] self.lod = [] diff --git a/python/paddle/fluid/debuger.py b/python/paddle/fluid/debugger.py similarity index 100% rename from python/paddle/fluid/debuger.py rename to python/paddle/fluid/debugger.py diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 21d74deab70182b52ccf60537d85d2359cc0ceb7..63ec83151477770ea64070cae4f5e4fcc497f7af 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -82,6 +82,7 @@ __all__ = [ 'roi_pool', 'dice_loss', 'upsampling_bilinear2d', + 'random_crop', ] @@ -154,7 +155,8 @@ def fc(input, Examples: .. code-block:: python - data = fluid.layers.data(name="data", shape=[32, 32], dtype="float32") + data = fluid.layers.data( + name="data", shape=[32, 32], dtype="float32") fc = fluid.layers.fc(input=data, size=1000, act="tanh") """ @@ -177,11 +179,8 @@ def fc(input, inputs={"X": input_var, "Y": w}, outputs={"Out": tmp}, - attrs={ - "x_num_col_dims": num_flatten_dims, - "y_num_col_dims": 1, - "use_mkldnn": use_mkldnn - }) + attrs={"x_num_col_dims": num_flatten_dims, + "y_num_col_dims": 1}) mul_results.append(tmp) if len(mul_results) == 1: @@ -349,7 +348,8 @@ def dynamic_lstm(input, cell_activation(str): The activation for cell output. Choices = ["sigmoid", "tanh", "relu", "identity"], default "tanh". candidate_activation(str): The activation for candidate hidden state. - Choices = ["sigmoid", "tanh", "relu", "identity"], + Choices = ["sigmoid", "tanh", + "relu", "identity"], default "tanh". dtype(str): Data type. Choices = ["float32", "float64"], default "float32". name(str|None): A name for this layer(optional). If set None, the layer @@ -516,10 +516,12 @@ def dynamic_lstmp(input, cell_activation(str): The activation for cell output. Choices = ["sigmoid", "tanh", "relu", "identity"], default "tanh". candidate_activation(str): The activation for candidate hidden state. - Choices = ["sigmoid", "tanh", "relu", "identity"], + Choices = ["sigmoid", "tanh", + "relu", "identity"], default "tanh". proj_activation(str): The activation for projection output. - Choices = ["sigmoid", "tanh", "relu", "identity"], + Choices = ["sigmoid", "tanh", + "relu", "identity"], default "tanh". dtype(str): Data type. Choices = ["float32", "float64"], default "float32". name(str|None): A name for this layer(optional). If set None, the layer @@ -855,7 +857,7 @@ def cos_sim(X, Y): return out -def dropout(x, dropout_prob, is_test=False, seed=None): +def dropout(x, dropout_prob, is_test=False, seed=None, name=None): """ Computes dropout. @@ -873,6 +875,8 @@ def dropout(x, dropout_prob, is_test=False, seed=None): parameter is set to None, a random seed is used. NOTE: If an integer seed is given, always the same output units will be dropped. DO NOT use a fixed seed in training. + name(str|None): A name for this layer(optional). If set None, the layer + will be named automatically. Returns: Variable: A tensor variable. @@ -1117,7 +1121,7 @@ def sequence_softmax(input, param_attr=None, bias_attr=None, use_cudnn=True): return softmax_out -def softmax(input, param_attr=None, bias_attr=None, use_cudnn=True): +def softmax(input, param_attr=None, bias_attr=None, use_cudnn=True, name=None): helper = LayerHelper('softmax', **locals()) dtype = helper.input_dtype() softmax_out = helper.create_tmp_variable(dtype) @@ -2172,7 +2176,8 @@ def reduce_mean(input, dim=None, keep_dim=False, name=None): fluid.layers.reduce_mean(x) # [0.4375] fluid.layers.reduce_mean(x, dim=0) # [0.15, 0.25, 0.55, 0.8] fluid.layers.reduce_mean(x, dim=-1) # [0.475, 0.4] - fluid.layers.reduce_mean(x, dim=1, keep_dim=True) # [[0.475], [0.4]] + fluid.layers.reduce_mean( + x, dim=1, keep_dim=True) # [[0.475], [0.4]] # x is a Tensor variable with shape [2, 2, 2] and elements as below: # [[[1.0, 2.0], [3.0, 4.0]], @@ -2391,7 +2396,8 @@ def split(input, num_or_sections, dim=-1, name=None): x0.shape # [3, 3, 5] x1.shape # [3, 3, 5] x2.shape # [3, 3, 5] - x0, x1, x2 = fluid.layers.split(x, num_or_sections=[2, 3, 4], dim=1) + x0, x1, x2 = fluid.layers.split( + x, num_or_sections=[2, 3, 4], dim=1) x0.shape # [3, 2, 5] x1.shape # [3, 3, 5] x2.shape # [3, 4, 5] @@ -2610,7 +2616,7 @@ def matmul(x, y, transpose_x=False, transpose_y=False, name=None): return out -def topk(input, k): +def topk(input, k, name=None): """ This operator is used to find values and indices of the k largest entries for the last dimension. @@ -2626,6 +2632,8 @@ def topk(input, k): input(Variable): The input variable which can be a vector or Tensor with higher rank. k(int): An integer value to specify the top k largest elements. + name(str|None): A name for this layer(optional). If set None, the layer + will be named automatically. Returns: values(Variable): The k largest elements along each last dimensional @@ -3301,7 +3309,8 @@ def softmax_with_cross_entropy(logits, label, soft_label=False): data = fluid.layers.data(name='data', shape=[128], dtype='float32') label = fluid.layers.data(name='label', shape=[1], dtype='int64') fc = fluid.layers.fc(input=data, size=100) - out = fluid.layers.softmax_with_cross_entropy(logits=fc, label=label) + out = fluid.layers.softmax_with_cross_entropy( + logits=fc, label=label) """ helper = LayerHelper('softmax_with_cross_entropy', **locals()) softmax = helper.create_tmp_variable(dtype=logits.dtype) @@ -3348,7 +3357,8 @@ def smooth_l1(x, y, inside_weight=None, outside_weight=None, sigma=None): .. code-block:: python data = fluid.layers.data(name='data', shape=[128], dtype='float32') - label = fluid.layers.data(name='label', shape=[100], dtype='float32') + label = fluid.layers.data( + name='label', shape=[100], dtype='float32') fc = fluid.layers.fc(input=data, size=100) out = fluid.layers.smooth_l1(x=fc, y=label) """ @@ -3670,7 +3680,8 @@ def lrn(input, n=5, k=1.0, alpha=1e-4, beta=0.75, name=None): Examples: .. code-block:: python - data = fluid.layers.data(name="data", shape=[3, 112, 112], dtype="float32") + data = fluid.layers.data( + name="data", shape=[3, 112, 112], dtype="float32") lrn = fluid.layers.lrn(input=data) """ helper = LayerHelper('lrn', **locals()) @@ -3925,10 +3936,10 @@ def upsampling_bilinear2d(input, out_shape=None, scale=None, name=None): Bilinear interpolation is an extension of linear interpolation for interpolating functions of two variables (e.g. H-direction and W-direction in this layer) on a rectilinear 2D grid. - + For details, please refer to Wikipedia: https://en.wikipedia.org/wiki/Bilinear_interpolation - + Args: input (Variable): The input tensor of bilinear interpolation, This is a 4-D tensor of the shape @@ -3946,7 +3957,7 @@ def upsampling_bilinear2d(input, out_shape=None, scale=None, name=None): Returns: out (Variable): The output is a 4-D tensor of the shape (num_batches, channls, out_h, out_w). - + Examples: .. code-block:: python @@ -3979,3 +3990,33 @@ def upsampling_bilinear2d(input, out_shape=None, scale=None, name=None): attrs={"out_h": out_h, "out_w": out_w}) return out + + +def random_crop(input, shape, seed=1): + helper = LayerHelper("random_crop", **locals()) + dtype = helper.input_dtype() + out = helper.create_tmp_variable(dtype) + if isinstance(seed, int): + seed_value = seed + seed = helper.create_tmp_variable(dtype="int64") + helper.append_op( + type="fill_constant", + inputs={}, + outputs={"Out": seed}, + attrs={ + "dtype": seed.dtype, + "shape": [1], + "value": float(seed_value), + "force_cpu": True + }) + elif not isinstance(seed, Variable): + raise ValueError("'seed' must be a Variable or an int.") + seed_out = helper.create_tmp_variable(dtype="int64") + helper.append_op( + type="random_crop", + inputs={"X": input, + "Seed": seed}, + outputs={"Out": out, + "SeedOut": seed_out}, + attrs={"shape": shape}) + return out diff --git a/python/paddle/fluid/layers/tensor.py b/python/paddle/fluid/layers/tensor.py index 4be0dc6a6bfeed3ac254f5c363d3560973c031b4..be34cc81a5d5ca0e781e5984b6c3eeaa4e25eb90 100644 --- a/python/paddle/fluid/layers/tensor.py +++ b/python/paddle/fluid/layers/tensor.py @@ -112,7 +112,7 @@ def cast(x, dtype): return out -def concat(input, axis=0): +def concat(input, axis=0, name=None): """ **Concat** @@ -122,6 +122,8 @@ def concat(input, axis=0): Args: input(list): List of tensors to be concatenated axis(int): Integer axis along which the tensors will be concatenated + name(str|None): A name for this layer(optional). If set None, the layer + will be named automatically. Returns: Variable: Output variable of the concatenation diff --git a/python/paddle/fluid/profiler.py b/python/paddle/fluid/profiler.py index 04fd05cc33cff3d720be75923d4af3767942669f..e2bd1d4c9a1ea5ddc0dfd19c769dcb40bfd6d04c 100644 --- a/python/paddle/fluid/profiler.py +++ b/python/paddle/fluid/profiler.py @@ -16,7 +16,10 @@ import core from contextlib import contextmanager import os -__all__ = ['cuda_profiler', 'reset_profiler', 'profiler'] +__all__ = [ + 'cuda_profiler', 'reset_profiler', 'profiler', 'start_profiler', + 'stop_profiler' +] NVPROF_CONFIG = [ "gpustarttimestamp", @@ -72,20 +75,31 @@ def reset_profiler(): core.reset_profiler() -@contextmanager -def profiler(state, sorted_key=None, profile_path='/tmp/profile'): - """The profiler interface. - Different from cuda_profiler, this profiler can be used to profile both CPU - and GPU program. By defalut, it records the CPU and GPU operator kernels, - if you want to profile other program, you can refer the profiling tutorial - to add more records. +def start_profiler(state): + """Enable the profiler. + + Args: + state (string) : The profiling state, which should be 'CPU', 'GPU' + or 'All'. 'CPU' means only profile CPU. 'GPU' means profiling + GPU as well. 'All' also generates timeline. + """ + if core.is_profiler_enabled(): + return + if state not in ['CPU', 'GPU', "All"]: + raise ValueError("The state must be 'CPU' or 'GPU' or 'All'.") + if state == "GPU": + prof_state = core.ProfilerState.kCUDA + elif state == "CPU": + prof_state = core.ProfilerState.kCPU + else: + prof_state = core.ProfilerState.kAll + core.enable_profiler(prof_state) + + +def stop_profiler(sorted_key=None, profile_path='/tmp/profile'): + """Stop the profiler. Args: - state (string) : The profiling state, which should be 'CPU' or 'GPU', - telling the profiler to use CPU timer or GPU timer for profiling. - Although users may have already specified the execution place - (CPUPlace/CUDAPlace) in the begining, for flexibility the profiler - would not inherit this place. sorted_key (string) : If None, the profiling results will be printed in the order of first end time of events. Otherwise, the profiling results will be sorted by the this flag. This flag should be one @@ -98,17 +112,8 @@ def profiler(state, sorted_key=None, profile_path='/tmp/profile'): profile_path (string) : If state == 'All', it will write a profile proto output file. """ - if state not in ['CPU', 'GPU', "All"]: - raise ValueError("The state must be 'CPU' or 'GPU' or 'All'.") - if state == "GPU": - prof_state = core.ProfilerState.kCUDA - elif state == "CPU": - prof_state = core.ProfilerState.kCPU - else: - prof_state = core.ProfilerState.kAll - core.enable_profiler(prof_state) - yield - + if not core.is_profiler_enabled(): + return sorted_key = 'default' if sorted_key is None else sorted_key if sorted_key not in ['default', 'calls', 'total', 'max', 'min', 'ave']: raise ValueError("The sorted_key must be None or in 'calls', 'total', " @@ -124,3 +129,34 @@ def profiler(state, sorted_key=None, profile_path='/tmp/profile'): # TODO(qingqing) : redirect C++ ostream to Python stream. # with core.ostream_redirect(stdout=True, stderr=True): core.disable_profiler(key_map[sorted_key], profile_path) + + +@contextmanager +def profiler(state, sorted_key=None, profile_path='/tmp/profile'): + """The profiler interface. + Different from cuda_profiler, this profiler can be used to profile both CPU + and GPU program. By defalut, it records the CPU and GPU operator kernels, + if you want to profile other program, you can refer the profiling tutorial + to add more records. + + Args: + state (string) : The profiling state, which should be 'CPU' or 'GPU', + telling the profiler to use CPU timer or GPU timer for profiling. + Although users may have already specified the execution place + (CPUPlace/CUDAPlace) in the begining, for flexibility the profiler + would not inherit this place. + sorted_key (string) : If None, the profiling results will be printed + in the order of first end time of events. Otherwise, the profiling + results will be sorted by the this flag. This flag should be one + of 'calls', 'total', 'max', 'min' or 'ave'. + The `calls` means sorting by the number of calls. + The `total` means sorting by the total execution time. + The `max` means sorting by the maximum execution time. + The `min` means sorting by the minimum execution time. + The `ave` means sorting by the average execution time. + profile_path (string) : If state == 'All', it will write a profile + proto output file. + """ + start_profiler(state) + yield + stop_profiler(sorted_key, profile_path) diff --git a/python/paddle/fluid/tests/book/high-level-api/label_semantic_roles/test_label_semantic_roles_newapi.py b/python/paddle/fluid/tests/book/high-level-api/label_semantic_roles/test_label_semantic_roles_newapi.py index 9464df59797c0b8c35611ee56de6bf362ac7a4a5..8cce398ff33695dc15ae6fb01a887194596af001 100755 --- a/python/paddle/fluid/tests/book/high-level-api/label_semantic_roles/test_label_semantic_roles_newapi.py +++ b/python/paddle/fluid/tests/book/high-level-api/label_semantic_roles/test_label_semantic_roles_newapi.py @@ -217,8 +217,6 @@ def infer(use_cuda, inference_program, params_dirname): # The range of random integers is [low, high] word = fluid.create_random_int_lodtensor( lod, base_shape, place, low=0, high=WORD_DICT_LEN - 1) - pred = fluid.create_random_int_lodtensor( - lod, base_shape, place, low=0, high=PRED_DICT_LEN - 1) ctx_n2 = fluid.create_random_int_lodtensor( lod, base_shape, place, low=0, high=WORD_DICT_LEN - 1) ctx_n1 = fluid.create_random_int_lodtensor( @@ -229,18 +227,20 @@ def infer(use_cuda, inference_program, params_dirname): lod, base_shape, place, low=0, high=WORD_DICT_LEN - 1) ctx_p2 = fluid.create_random_int_lodtensor( lod, base_shape, place, low=0, high=WORD_DICT_LEN - 1) + pred = fluid.create_random_int_lodtensor( + lod, base_shape, place, low=0, high=PRED_DICT_LEN - 1) mark = fluid.create_random_int_lodtensor( lod, base_shape, place, low=0, high=MARK_DICT_LEN - 1) results = inferencer.infer( { 'word_data': word, - 'verb_data': pred, 'ctx_n2_data': ctx_n2, 'ctx_n1_data': ctx_n1, 'ctx_0_data': ctx_0, 'ctx_p1_data': ctx_p1, 'ctx_p2_data': ctx_p2, + 'verb_data': pred, 'mark_data': mark }, return_numpy=False) diff --git a/python/paddle/fluid/tests/book/high-level-api/machine_translation/test_machine_translation.py b/python/paddle/fluid/tests/book/high-level-api/machine_translation/test_machine_translation.py index 7204c7b3c7648a24de89d41e205db5b18ed2a5fc..d4b723d3e6b619709ab3dc76a32ae87f1cdec274 100644 --- a/python/paddle/fluid/tests/book/high-level-api/machine_translation/test_machine_translation.py +++ b/python/paddle/fluid/tests/book/high-level-api/machine_translation/test_machine_translation.py @@ -53,7 +53,7 @@ def encoder(is_sparse): return encoder_out -def decoder_train(context, is_sparse): +def train_decoder(context, is_sparse): # decoder trg_language_word = pd.data( name="target_language_word", shape=[1], dtype='int64', lod_level=1) @@ -81,7 +81,7 @@ def decoder_train(context, is_sparse): return rnn() -def decoder_decode(context, is_sparse): +def decode(context, is_sparse): init_state = context array_len = pd.fill_constant(shape=[1], dtype='int64', value=max_length) counter = pd.zeros(shape=[1], dtype='int64', force_cpu=True) @@ -148,31 +148,9 @@ def decoder_decode(context, is_sparse): return translation_ids, translation_scores -def set_init_lod(data, lod, place): - res = fluid.LoDTensor() - res.set(data, place) - res.set_lod(lod) - return res - - -def to_lodtensor(data, place): - seq_lens = [len(seq) for seq in data] - cur_len = 0 - lod = [cur_len] - for l in seq_lens: - cur_len += l - lod.append(cur_len) - flattened_data = np.concatenate(data, axis=0).astype("int64") - flattened_data = flattened_data.reshape([len(flattened_data), 1]) - res = fluid.LoDTensor() - res.set(flattened_data, place) - res.set_lod([lod]) - return res - - def train_program(is_sparse): context = encoder(is_sparse) - rnn_out = decoder_train(context, is_sparse) + rnn_out = train_decoder(context, is_sparse) label = pd.data( name="target_language_next_word", shape=[1], dtype='int64', lod_level=1) cost = pd.cross_entropy(input=rnn_out, label=label) @@ -218,13 +196,12 @@ def train(use_cuda, is_sparse, is_local=True): def decode_main(use_cuda, is_sparse): - if use_cuda and not fluid.core.is_compiled_with_cuda(): return place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() context = encoder(is_sparse) - translation_ids, translation_scores = decoder_decode(context, is_sparse) + translation_ids, translation_scores = decode(context, is_sparse) exe = Executor(place) exe.run(framework.default_startup_program()) @@ -234,26 +211,32 @@ def decode_main(use_cuda, is_sparse): [1. for _ in range(batch_size)], dtype='float32') init_ids_data = init_ids_data.reshape((batch_size, 1)) init_scores_data = init_scores_data.reshape((batch_size, 1)) - init_lod = [i for i in range(batch_size)] + [batch_size] + init_lod = [1] * batch_size init_lod = [init_lod, init_lod] + init_ids = fluid.create_lod_tensor(init_ids_data, init_lod, place) + init_scores = fluid.create_lod_tensor(init_scores_data, init_lod, place) + train_data = paddle.batch( paddle.reader.shuffle( paddle.dataset.wmt14.train(dict_size), buf_size=1000), batch_size=batch_size) - for _, data in enumerate(train_data()): - init_ids = set_init_lod(init_ids_data, init_lod, place) - init_scores = set_init_lod(init_scores_data, init_lod, place) - src_word_data = to_lodtensor(map(lambda x: x[0], data), place) + feed_order = ['src_word_id'] + feed_list = [ + framework.default_main_program().global_block().var(var_name) + for var_name in feed_order + ] + feeder = fluid.DataFeeder(feed_list, place) + + for data in train_data(): + feed_dict = feeder.feed(map(lambda x: [x[0]], data)) + feed_dict['init_ids'] = init_ids + feed_dict['init_scores'] = init_scores result_ids, result_scores = exe.run( framework.default_main_program(), - feed={ - 'src_word_id': src_word_data, - 'init_ids': init_ids, - 'init_scores': init_scores - }, + feed=feed_dict, fetch_list=[translation_ids, translation_scores], return_numpy=False) print result_ids.lod() diff --git a/python/paddle/fluid/tests/book/test_machine_translation.py b/python/paddle/fluid/tests/book/test_machine_translation.py index e8a75f473f62df528b7f39bf5f9085076e005c25..23e5900f127a7a3253c551f8f7fbceba08382209 100644 --- a/python/paddle/fluid/tests/book/test_machine_translation.py +++ b/python/paddle/fluid/tests/book/test_machine_translation.py @@ -147,28 +147,6 @@ def decoder_decode(context, is_sparse): return translation_ids, translation_scores -def set_init_lod(data, lod, place): - res = fluid.LoDTensor() - res.set(data, place) - res.set_lod(lod) - return res - - -def to_lodtensor(data, place): - seq_lens = [len(seq) for seq in data] - cur_len = 0 - lod = [cur_len] - for l in seq_lens: - cur_len += l - lod.append(cur_len) - flattened_data = np.concatenate(data, axis=0).astype("int64") - flattened_data = flattened_data.reshape([len(flattened_data), 1]) - res = fluid.LoDTensor() - res.set(flattened_data, place) - res.set_lod([lod]) - return res - - def train_main(use_cuda, is_sparse, is_local=True): if use_cuda and not fluid.core.is_compiled_with_cuda(): return @@ -192,23 +170,25 @@ def train_main(use_cuda, is_sparse, is_local=True): paddle.dataset.wmt14.train(dict_size), buf_size=1000), batch_size=batch_size) + feed_order = [ + 'src_word_id', 'target_language_word', 'target_language_next_word' + ] + exe = Executor(place) def train_loop(main_program): exe.run(framework.default_startup_program()) + feed_list = [ + main_program.global_block().var(var_name) for var_name in feed_order + ] + feeder = fluid.DataFeeder(feed_list, place) + batch_id = 0 for pass_id in xrange(1): for data in train_data(): - word_data = to_lodtensor(map(lambda x: x[0], data), place) - trg_word = to_lodtensor(map(lambda x: x[1], data), place) - trg_word_next = to_lodtensor(map(lambda x: x[2], data), place) outs = exe.run(main_program, - feed={ - 'src_word_id': word_data, - 'target_language_word': trg_word, - 'target_language_next_word': trg_word_next - }, + feed=feeder.feed(data), fetch_list=[avg_cost]) avg_cost_val = np.array(outs[0]) print('pass_id=' + str(pass_id) + ' batch=' + str(batch_id) + @@ -258,26 +238,32 @@ def decode_main(use_cuda, is_sparse): [1. for _ in range(batch_size)], dtype='float32') init_ids_data = init_ids_data.reshape((batch_size, 1)) init_scores_data = init_scores_data.reshape((batch_size, 1)) - init_lod = [i for i in range(batch_size)] + [batch_size] + init_lod = [1] * batch_size init_lod = [init_lod, init_lod] + init_ids = fluid.create_lod_tensor(init_ids_data, init_lod, place) + init_scores = fluid.create_lod_tensor(init_scores_data, init_lod, place) + train_data = paddle.batch( paddle.reader.shuffle( paddle.dataset.wmt14.train(dict_size), buf_size=1000), batch_size=batch_size) - for _, data in enumerate(train_data()): - init_ids = set_init_lod(init_ids_data, init_lod, place) - init_scores = set_init_lod(init_scores_data, init_lod, place) - src_word_data = to_lodtensor(map(lambda x: x[0], data), place) + feed_order = ['src_word_id'] + feed_list = [ + framework.default_main_program().global_block().var(var_name) + for var_name in feed_order + ] + feeder = fluid.DataFeeder(feed_list, place) + + for data in train_data(): + feed_dict = feeder.feed(map(lambda x: [x[0]], data)) + feed_dict['init_ids'] = init_ids + feed_dict['init_scores'] = init_scores result_ids, result_scores = exe.run( framework.default_main_program(), - feed={ - 'src_word_id': src_word_data, - 'init_ids': init_ids, - 'init_scores': init_scores - }, + feed=feed_dict, fetch_list=[translation_ids, translation_scores], return_numpy=False) print result_ids.lod() diff --git a/python/paddle/fluid/tests/book/notest_rnn_encoder_decoder.py b/python/paddle/fluid/tests/book/test_rnn_encoder_decoder.py similarity index 87% rename from python/paddle/fluid/tests/book/notest_rnn_encoder_decoder.py rename to python/paddle/fluid/tests/book/test_rnn_encoder_decoder.py index ce640dece8a5067bd10f410a2bb58874b7cc0908..7ada57def6bfedb113ea1a56f9677116b80488ea 100644 --- a/python/paddle/fluid/tests/book/notest_rnn_encoder_decoder.py +++ b/python/paddle/fluid/tests/book/test_rnn_encoder_decoder.py @@ -152,29 +152,6 @@ def seq_to_seq_net(): return avg_cost, prediction -def to_lodtensor(data, place): - seq_lens = [len(seq) for seq in data] - cur_len = 0 - lod = [cur_len] - for l in seq_lens: - cur_len += l - lod.append(cur_len) - flattened_data = np.concatenate(data, axis=0).astype("int64") - flattened_data = flattened_data.reshape([len(flattened_data), 1]) - res = core.LoDTensor() - res.set(flattened_data, place) - res.set_lod([lod]) - return res - - -def create_random_lodtensor(lod, place, low, high): - data = np.random.random_integers(low, high, [lod[-1], 1]).astype("int64") - res = fluid.LoDTensor() - res.set(data, place) - res.set_lod([lod]) - return res - - def train(use_cuda, save_dirname=None): [avg_cost, prediction] = seq_to_seq_net() @@ -188,22 +165,20 @@ def train(use_cuda, save_dirname=None): place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() exe = Executor(place) - exe.run(framework.default_startup_program()) + feed_order = ['source_sequence', 'target_sequence', 'label_sequence'] + feed_list = [ + framework.default_main_program().global_block().var(var_name) + for var_name in feed_order + ] + feeder = fluid.DataFeeder(feed_list, place) + batch_id = 0 for pass_id in xrange(2): for data in train_data(): - word_data = to_lodtensor(map(lambda x: x[0], data), place) - trg_word = to_lodtensor(map(lambda x: x[1], data), place) - trg_word_next = to_lodtensor(map(lambda x: x[2], data), place) - outs = exe.run(framework.default_main_program(), - feed={ - 'source_sequence': word_data, - 'target_sequence': trg_word, - 'label_sequence': trg_word_next - }, + feed=feeder.feed(data), fetch_list=[avg_cost]) avg_cost_val = np.array(outs[0]) @@ -237,9 +212,23 @@ def infer(use_cuda, save_dirname=None): [inference_program, feed_target_names, fetch_targets] = fluid.io.load_inference_model(save_dirname, exe) - lod = [0, 4, 10] - word_data = create_random_lodtensor(lod, place, low=0, high=1) - trg_word = create_random_lodtensor(lod, place, low=0, high=1) + # Setup input by creating LoDTensor to represent sequence of words. + # Here each word is the basic element of the LoDTensor and the shape of + # each word (base_shape) should be [1] since it is simply an index to + # look up for the corresponding word vector. + # Suppose the length_based level of detail (lod) info is set to [[4, 6]], + # which has only one lod level. Then the created LoDTensor will have only + # one higher level structure (sequence of words, or sentence) than the basic + # element (word). Hence the LoDTensor will hold data for two sentences of + # length 4 and 6, respectively. + # Note that lod info should be a list of lists. + lod = [[4, 6]] + base_shape = [1] + # The range of random integers is [low, high] + word_data = fluid.create_random_int_lodtensor( + lod, base_shape, place, low=0, high=1) + trg_word = fluid.create_random_int_lodtensor( + lod, base_shape, place, low=0, high=1) # Construct feed as a dictionary of {feed_target_name: feed_target_data} # and results will contain a list of data corresponding to fetch_targets. diff --git a/python/paddle/fluid/tests/unittests/test_debugger.py b/python/paddle/fluid/tests/unittests/test_debugger.py index 67b03f635b6f8a3003efabe5425325080d47f61c..870952f2f916dcdec5991ac5c10d2da3a7ab18a8 100644 --- a/python/paddle/fluid/tests/unittests/test_debugger.py +++ b/python/paddle/fluid/tests/unittests/test_debugger.py @@ -15,7 +15,7 @@ import unittest import paddle.fluid as fluid import paddle.fluid.core as core -from paddle.fluid import debuger +from paddle.fluid import debugger from paddle.fluid.framework import Program @@ -51,9 +51,9 @@ class TestDebugger(unittest.TestCase): outputs={"Out": mul_out}, attrs={"x_num_col_dims": 1}) - print(debuger.pprint_program_codes(p)) + print(debugger.pprint_program_codes(p)) - debuger.draw_block_graphviz(p.block(0), path="./test.dot") + debugger.draw_block_graphviz(p.block(0), path="./test.dot") if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_fake_dequantize_op.py b/python/paddle/fluid/tests/unittests/test_fake_dequantize_op.py new file mode 100644 index 0000000000000000000000000000000000000000..281068e945e76a42635868d19573498f79fde1f3 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_fake_dequantize_op.py @@ -0,0 +1,60 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +import numpy as np +import math +from op_test import OpTest + + +def quantize_max_abs(x, num_bits): + range = math.pow(2, num_bits) - 1 + scale = np.max(np.abs(x).flatten()) + y = np.round(x / scale * range) + return y, scale + + +def dequantize_max_abs(x, num_bits, scale): + range = math.pow(2, num_bits) - 1 + y = (scale / range) * x + return y + + +class TestFakeDequantizeMaxAbsOp(OpTest): + def set_args(self): + self.num_bits = 8 + + def setUp(self): + self.set_args() + self.op_type = "fake_dequantize_max_abs" + x = np.random.randn(31, 65).astype("float32") + yq, scale = quantize_max_abs(x, self.num_bits) + print 'scale ', scale + ydq = dequantize_max_abs(yq, self.num_bits, scale) + + self.inputs = {'X': yq} + self.attrs = {'num_bits': self.num_bits, 'scale': float(scale)} + self.outputs = {'Out': ydq} + + def test_check_output(self): + self.check_output() + + +class TestFakeDequantizeMaxAbsOp5Bits(OpTest): + def set_args(self): + self.num_bits = 5 + + +if __name__ == "__main__": + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_mul_mkldnn_op.py b/python/paddle/fluid/tests/unittests/test_mul_mkldnn_op.py deleted file mode 100644 index 42d68ef376dc4a664a96ff5a24545c1997ee924a..0000000000000000000000000000000000000000 --- a/python/paddle/fluid/tests/unittests/test_mul_mkldnn_op.py +++ /dev/null @@ -1,44 +0,0 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest -from test_mul_op import TestMulOp, TestMulOp2, TestFP16MulOp1, TestFP16MulOp2 - - -class TestMKLDNNMulOp(TestMulOp): - def init_op_test(self): - super(TestMKLDNNMulOp, self).setUp() - self.attrs = {"use_mkldnn": True} - - -class TestMKLDNNMulOp2(TestMulOp2): - def init_op_test(self): - super(TestMKLDNNMulOp2, self).setUp() - self.attrs = {"use_mkldnn": True} - - -class TestMKLDNNFP16MulOp1(TestFP16MulOp1): - def init_op_test(self): - super(TestMKLDNNFP16MulOp1, self).setUp() - self.attrs = {"use_mkldnn": True} - - -class TestMKLDNNFP16MulOp2(TestFP16MulOp2): - def init_op_test(self): - super(TestMKLDNNFP16MulOp2, self).setUp() - self.attrs = {"use_mkldnn": True} - - -if __name__ == "__main__": - unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_mul_op.py b/python/paddle/fluid/tests/unittests/test_mul_op.py index d984393c89f44f5b9679a22bf7bb6182599233e3..862b7f8cb93620da4dd4673028776cfe565eeb0b 100644 --- a/python/paddle/fluid/tests/unittests/test_mul_op.py +++ b/python/paddle/fluid/tests/unittests/test_mul_op.py @@ -21,12 +21,10 @@ from op_test import OpTest class TestMulOp(OpTest): def setUp(self): self.op_type = "mul" - self.use_mkldnn = False self.inputs = { 'X': np.random.random((32, 84)).astype("float32"), 'Y': np.random.random((84, 100)).astype("float32") } - self.attrs = {'use_mkldnn': self.use_mkldnn} self.outputs = {'Out': np.dot(self.inputs['X'], self.inputs['Y'])} def test_check_output(self): @@ -47,16 +45,11 @@ class TestMulOp(OpTest): class TestMulOp2(OpTest): def setUp(self): self.op_type = "mul" - self.use_mkldnn = False self.inputs = { 'X': np.random.random((15, 4, 12, 10)).astype("float32"), 'Y': np.random.random((4, 30, 8, 2, 9)).astype("float32") } - self.attrs = { - 'x_num_col_dims': 2, - 'y_num_col_dims': 2, - 'use_mkldnn': self.use_mkldnn - } + self.attrs = {'x_num_col_dims': 2, 'y_num_col_dims': 2} result = np.dot(self.inputs['X'].reshape(15 * 4, 12 * 10), self.inputs['Y'].reshape(4 * 30, 8 * 2 * 9)) result = result.reshape(15, 4, 8, 2, 9) @@ -80,11 +73,9 @@ class TestMulOp2(OpTest): class TestFP16MulOp1(OpTest): def setUp(self): self.op_type = "mul" - self.use_mkldnn = False x = np.random.random((32, 84)).astype("float16") y = np.random.random((84, 100)).astype("float16") self.inputs = {'X': x.view(np.uint16), 'Y': y.view(np.uint16)} - self.attrs = {'use_mkldnn': self.use_mkldnn} self.outputs = {'Out': np.dot(x, y)} def test_check_output(self): @@ -97,15 +88,10 @@ class TestFP16MulOp1(OpTest): class TestFP16MulOp2(OpTest): def setUp(self): self.op_type = "mul" - self.use_mkldnn = False x = np.random.random((15, 4, 12, 10)).astype("float16") y = np.random.random((4, 30, 8, 2, 9)).astype("float16") self.inputs = {'X': x.view(np.uint16), 'Y': y.view(np.uint16)} - self.attrs = { - 'x_num_col_dims': 2, - 'y_num_col_dims': 2, - 'use_mkldnn': self.use_mkldnn - } + self.attrs = {'x_num_col_dims': 2, 'y_num_col_dims': 2} result = np.dot( x.reshape(15 * 4, 12 * 10), y.reshape(4 * 30, 8 * 2 * 9)) result = result.reshape(15, 4, 8, 2, 9) diff --git a/python/paddle/fluid/tests/unittests/test_operator_desc.py b/python/paddle/fluid/tests/unittests/test_operator_desc.py index 8b15aa6822aee7bb4d53dcf1d87565fae5504821..c098a5a0cb0364f9ec93c95c1ef50912e574b3d9 100644 --- a/python/paddle/fluid/tests/unittests/test_operator_desc.py +++ b/python/paddle/fluid/tests/unittests/test_operator_desc.py @@ -63,10 +63,7 @@ class TestOperator(unittest.TestCase): self.assertEqual(mul_op.output("Out"), ["mul.out"]) self.assertEqual( set(mul_op.attr_names), - set([ - "x_num_col_dims", "y_num_col_dims", "use_mkldnn", "op_role", - "op_role_var" - ])) + set(["x_num_col_dims", "y_num_col_dims", "op_role", "op_role_var"])) self.assertEqual(mul_op.has_attr("x_num_col_dims"), True) self.assertEqual(mul_op.attr_type("x_num_col_dims"), core.AttrType.INT) self.assertEqual(mul_op.attr("x_num_col_dims"), 1) diff --git a/python/paddle/fluid/tests/unittests/test_random_crop_op.py b/python/paddle/fluid/tests/unittests/test_random_crop_op.py new file mode 100644 index 0000000000000000000000000000000000000000..1c708d0386da4028f1f3d177d0a3fd494c077c6e --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_random_crop_op.py @@ -0,0 +1,46 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +import numpy as np +import paddle.fluid.core as core +from op_test import OpTest + + +class TestRandomCropOp(OpTest): + def setUp(self): + to_crop = np.array([[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]] * + 5).astype("float32") + self.possible_res = [ + np.array([[1, 2, 3], [5, 6, 7]]), np.array([[2, 3, 4], [6, 7, 8]]), + np.array([[5, 6, 7], [9, 10, 11]]), + np.array([[6, 7, 8], [10, 11, 12]]) + ] + self.op_type = "random_crop" + self.inputs = {'X': to_crop, 'Seed': np.array([10])} + self.outputs = {'Out': np.array([]), 'SeedOut': np.array([])} + self.attrs = {'shape': [2, 3]} + + def test_check_output(self): + self.check_output_customized(self.verify_output) + + def verify_output(self, outs): + out = np.array(outs[1]) + for ins in out[:]: + is_equal = [(ins == res).all() for res in self.possible_res] + self.assertIn(True, is_equal) + + +if __name__ == "__main__": + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_split_var.py b/python/paddle/fluid/tests/unittests/test_split_var.py index 0c5e8901b903375c7d4de32943e657b205d8fae9..157def9b56e44092a86023035d1ab444c938aa07 100644 --- a/python/paddle/fluid/tests/unittests/test_split_var.py +++ b/python/paddle/fluid/tests/unittests/test_split_var.py @@ -14,7 +14,7 @@ import math import unittest -from paddle.fluid.transpiler.distribute_transpiler import split_dense_variable +from paddle.fluid.transpiler.distribute_transpiler import split_variable import paddle.fluid as fluid import paddle.fluid.core as core import random @@ -31,7 +31,7 @@ class TestSplitVar(unittest.TestCase): # dtype=core.VarDesc.VarType.LOD_TENSOR, shape=shape) var_list.append(var) - blocks = split_dense_variable(var_list, 10, min_size) + blocks = split_variable(var_list, 10, min_size) all_sizes = [] for s in expected_sizes: for s2 in s: diff --git a/python/paddle/fluid/transpiler/details/__init__.py b/python/paddle/fluid/transpiler/details/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..dc597c33849dc06cc975b245099672f64c3539d3 --- /dev/null +++ b/python/paddle/fluid/transpiler/details/__init__.py @@ -0,0 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from program_utils import * +from ufind import * diff --git a/python/paddle/fluid/transpiler/details/program_utils.py b/python/paddle/fluid/transpiler/details/program_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..f10b496306a002ee131d01798a0698b807d379ca --- /dev/null +++ b/python/paddle/fluid/transpiler/details/program_utils.py @@ -0,0 +1,37 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +def delete_ops(block, ops): + try: + start = list(block.ops).index(ops[0]) + end = list(block.ops).index(ops[-1]) + [block.remove_op(start) for _ in xrange(end - start + 1)] + except Exception, e: + raise e + block.program.sync_with_cpp() + + +def find_op_by_input_arg(block, arg_name): + for index, op in enumerate(block.ops): + if arg_name in op.input_arg_names: + return index + return -1 + + +def find_op_by_output_arg(block, arg_name): + for index, op in enumerate(block.ops): + if arg_name in op.output_arg_names: + return index + return -1 diff --git a/python/paddle/fluid/transpiler/details/ufind.py b/python/paddle/fluid/transpiler/details/ufind.py new file mode 100644 index 0000000000000000000000000000000000000000..0e30d0e3f9c5712c494daf17b2b4bcec86f69c23 --- /dev/null +++ b/python/paddle/fluid/transpiler/details/ufind.py @@ -0,0 +1,64 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +class UnionFind(object): + """ Union-find data structure. + + Union-find is a data structure that keeps track of a set of elements partitioned + into a number of disjoint (non-overlapping) subsets. + + Reference: + https://en.wikipedia.org/wiki/Disjoint-set_data_structure + + Args: + elements(list): The initialize element list. + """ + + def __init__(self, elementes=None): + self._parents = [] # index -> parent index + self._index = {} # element -> index + self._curr_idx = 0 + if not elementes: + elementes = [] + for ele in elementes: + self._parents.append(self._curr_idx) + self._index.update({ele: self._curr_idx}) + self._curr_idx += 1 + + def find(self, x): + # Find the root index of given element x, + # execute the path compress while findind the root index + if not x in self._index: + return -1 + idx = self._index[x] + while idx != self._parents[idx]: + t = self._parents[idx] + self._parents[idx] = self._parents[t] + idx = t + return idx + + def union(self, x, y): + # Union two given element + x_root = self.find(x) + y_root = self.find(y) + + if x_root == y_root: + return + self._parents[x_root] = y_root + + def is_connected(self, x, y): + # If two given elements have the same root index, + # then they are connected. + return self.find(x) == self.find(y) diff --git a/python/paddle/fluid/transpiler/distribute_transpiler.py b/python/paddle/fluid/transpiler/distribute_transpiler.py index e9b7d9e9d2dea54a33068d5c3fe3fbf22620d1ea..06b0a1375ce6568cca864cd8a2dd69ee46b223a7 100644 --- a/python/paddle/fluid/transpiler/distribute_transpiler.py +++ b/python/paddle/fluid/transpiler/distribute_transpiler.py @@ -11,6 +11,30 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +""" +Transpile the program to distributed data-parallelism programs. +The main_program will be transformed to use a remote parameter server +to do parameter optimization. And the optimization graph will be put +into a parameter server program. + +Use different methods to split trainable variables to different +parameter servers. + +Steps to transpile trainer: +1. split variable to multiple blocks, aligned by product(dim[1:]) (width). +2. rename splited grad variables to add trainer_id suffix ".trainer_%d". +3. modify trainer program add split_op to each grad variable. +4. append send_op to send splited variables to server and fetch + params(splited blocks or origin param) from server. +5. append concat_op to merge splited blocks to update local weights. + +Steps to transpile pserver: +1. create new program for parameter server. +2. create params and grad variables that assigned to current server instance. +3. create a sub-block in the server side program +4. append ops that should run on current server instance. +5. add listen_and_serv op +""" from __future__ import print_function @@ -21,9 +45,11 @@ from .. import core, framework from ..framework import Program, default_main_program, \ default_startup_program, \ Variable, Parameter, grad_var_name +from details import * LOOKUP_TABLE_TYPE = "lookup_table" LOOKUP_TABLE_GRAD_TYPE = "lookup_table_grad" +OP_ROLE_VAR_ATTR_NAME = core.op_proto_and_checker_maker.kOpRoleVarAttrName() RPC_OP_ROLE_ATTR_NAME = op_role_attr_name = core.op_proto_and_checker_maker.kOpRoleAttrName( ) RPC_OP_ROLE_ATTR_VALUE = core.op_proto_and_checker_maker.OpRole.RPC @@ -40,62 +66,11 @@ class VarBlock: return "%s:%d:%d" % (self.varname, self.offset, self.size) -class UnionFind(object): - """ Union-find data structure. - - Union-find is a data structure that keeps track of a set of elements partitioned - into a number of disjoint (non-overlapping) subsets. - - Reference: - https://en.wikipedia.org/wiki/Disjoint-set_data_structure - - Args: - elements(list): The initialize element list. - """ - - def __init__(self, elementes=None): - self._parents = [] # index -> parent index - self._index = {} # element -> index - self._curr_idx = 0 - if not elementes: - elementes = [] - for ele in elementes: - self._parents.append(self._curr_idx) - self._index.update({ele: self._curr_idx}) - self._curr_idx += 1 - - def find(self, x): - # Find the root index of given element x, - # execute the path compress while findind the root index - if not x in self._index: - return -1 - idx = self._index[x] - while idx != self._parents[idx]: - t = self._parents[idx] - self._parents[idx] = self._parents[t] - idx = t - return idx - - def union(self, x, y): - # Union two given element - x_root = self.find(x) - y_root = self.find(y) - - if x_root == y_root: - return - self._parents[x_root] = y_root - - def is_connected(self, x, y): - # If two given elements have the same root index, - # then they are connected. - return self.find(x) == self.find(y) - - def same_or_split_var(p_name, var_name): return p_name == var_name or p_name.startswith(var_name + ".block") -def split_dense_variable(var_list, service_count, min_block_size=8192): +def split_variable(var_list, service_count, min_block_size=8192): """ We may need to split dense tensor to one or more blocks and put them equally onto parameter server. One block is a sub-tensor @@ -141,99 +116,15 @@ def split_dense_variable(var_list, service_count, min_block_size=8192): return blocks -def delete_ops(block, ops): - try: - start = list(block.ops).index(ops[0]) - end = list(block.ops).index(ops[-1]) - [block.remove_op(start) for _ in xrange(end - start + 1)] - except Exception, e: - raise e - block.program.sync_with_cpp() - - -def find_op_by_input_arg(block, arg_name): - for index, op in enumerate(block.ops): - if arg_name in op.input_arg_names: - return index - return -1 - - -def find_op_by_output_arg(block, arg_name): - for index, op in enumerate(block.ops): - if arg_name in op.output_arg_names: - return index - return -1 - - class DistributeTranspiler: - def transpile(self, - trainer_id, - program=None, - pservers="127.0.0.1:6174", - trainers=1, - split_method=RoundRobin, - sync_mode=True): - """ - Transpile the program to distributed data-parallelism programs. - The main_program will be transformed to use a remote parameter server - to do parameter optimization. And the optimization graph will be put - into a parameter server program. - - Use different methods to split trainable variables to different - parameter servers. - - Steps to transpile trainer: - 1. split variable to multiple blocks, aligned by product(dim[1:]) (width). - 2. rename splited grad variables to add trainer_id suffix ".trainer_%d". - 3. modify trainer program add split_op to each grad variable. - 4. append send_op to send splited variables to server and fetch - params(splited blocks or origin param) from server. - 5. append concat_op to merge splited blocks to update local weights. - - Steps to transpile pserver: - 1. create new program for parameter server. - 2. create params and grad variables that assigned to current server instance. - 3. create a sub-block in the server side program - 4. append ops that should run on current server instance. - 5. add listen_and_serv op - - :param trainer_id: one unique id for each trainer in a job. - :type trainer_id: int - :param program: program to transpile, default is default_main_program - :type program: Program - :param pservers: parameter server endpoints like "m1:6174,m2:6174" - :type pservers: string - :param trainers: total number of workers/trainers in the job - :type trainers: int - :param split_method: A function to determin how to split variables - to different servers equally. - :type split_method: function - :type sync_mode: boolean default True - :param sync_mode: if sync_mode is set True, it means that dist transpiler - will transpile the program into sync_mode pserver and trainer program. - """ - assert (split_method.__bases__[0] == PSDispatcher) - if program is None: - program = default_main_program() - self.origin_program = program - self.trainer_num = trainers - self.sync_mode = sync_mode - # TODO(typhoonzero): currently trainer_id is fetched from cluster system - # like Kubernetes, we should port this to use etcd later when developing - # fluid distributed training with fault-tolerance. - self.trainer_id = trainer_id - pserver_endpoints = pservers.split(",") - self.pserver_endpoints = pserver_endpoints - self.optimize_ops, params_grads = self._get_optimize_pass() - ps_dispatcher = split_method(pserver_endpoints) - + def _has_distributed_lookup_table(self): # process lookup_table_op # 1. check all lookup_table_op is distributed # 2. check all lookup_table_op share the same table. distributed_lookup_table_ops = [] # support only one distributed_lookup_table now self.table_name = None - for op in program.global_block().ops: + for op in self.origin_program.global_block().ops: if op.type == LOOKUP_TABLE_TYPE: if op.attrs['is_distributed'] is True: if self.table_name is None: @@ -246,20 +137,13 @@ class DistributeTranspiler: if self.table_name is not None: assert op.input("W")[0] != self.table_name - self.has_distributed_lookup_table = len( - distributed_lookup_table_ops) > 0 - - # step1: For large parameters and gradients, split them into smaller - # blocks. - param_list = [] - grad_list = [] - for p, g in params_grads: - # skip parameter marked not trainable - if type(p) == Parameter and p.trainable == False: - continue - param_list.append(p) - grad_list.append(g) + return len(distributed_lookup_table_ops) > 0 + def _update_dist_lookup_table_vars(self, param_list, grad_list, + params_grads): + # TODO(wuyi): put find a way to put dist lookup table stuff all together. + # update self.table_param_grad and self.trainer_side_table_grad_list + program = self.origin_program if self.has_distributed_lookup_table: param_list = [ param for param in param_list if param.name != self.table_name @@ -277,7 +161,7 @@ class DistributeTranspiler: self.trainer_side_table_grad_list = [ program.global_block().create_var( name="%s.trainer_%d.pserver_%d" % - (table_grad_var.name, trainer_id, index), + (table_grad_var.name, self.trainer_id, index), type=table_grad_var.type, shape=table_grad_var.shape, dtype=table_grad_var.dtype) @@ -293,23 +177,41 @@ class DistributeTranspiler: for index in range(len(self.pserver_endpoints)) ] - grad_blocks = split_dense_variable(grad_list, len(pserver_endpoints)) - param_blocks = split_dense_variable(param_list, len(pserver_endpoints)) + def _init_splited_vars(self, split_method): + # update these mappings for further transpile: + # 1. param_var_mapping: param var name -> [splited params vars] + # 2. grad_var_mapping: grad var name -> [splited grads vars] + # 3. grad_param_mapping: grad.blockx -> param.blockx + # 4. param_grad_ep_mapping: ep -> {"params": [], "grads": []} + + param_list = [] + grad_list = [] + for p, g in self.params_grads: + # skip parameter marked not trainable + if type(p) == Parameter and p.trainable == False: + continue + param_list.append(p) + grad_list.append(g) + + self._update_dist_lookup_table_vars(param_list, grad_list, + self.params_grads) + + grad_blocks = split_variable(grad_list, len(self.pserver_endpoints)) + param_blocks = split_variable(param_list, len(self.pserver_endpoints)) assert (len(grad_blocks) == len(param_blocks)) - # step2: Create new vars for the parameters and gradients blocks and - # add ops to do the split. - param_var_mapping = self._create_vars_from_blocklist(program, - param_blocks) - grad_var_mapping = self._create_vars_from_blocklist( - program, grad_blocks, add_trainer_suffix=self.trainer_num > 1) - grad_param_mapping = dict() + # origin_varname -> [splited_var] + self.param_var_mapping = self._create_vars_from_blocklist( + self.origin_program, param_blocks) + self.grad_var_mapping = self._create_vars_from_blocklist( + self.origin_program, + grad_blocks, + add_trainer_suffix=self.trainer_num > 1) + self.grad_param_mapping = dict() for g, p in zip(grad_blocks, param_blocks): g_name, g_bid, _ = g.split(":") p_name, p_bid, _ = p.split(":") - grad_param_mapping[grad_var_mapping[g_name][int(g_bid)]] = \ - param_var_mapping[p_name][int(p_bid)] - - # step 3: transpile trainer side program, insert recv op and send op. + self.grad_param_mapping[self.grad_var_mapping[g_name][int(g_bid)]] = \ + self.param_var_mapping[p_name][int(p_bid)] # create mapping of endpoint -> split var to create pserver side program self.param_grad_ep_mapping = dict() @@ -322,10 +224,50 @@ class DistributeTranspiler: }) for ep in self.pserver_endpoints ] + def transpile(self, + trainer_id, + program=None, + pservers="127.0.0.1:6174", + trainers=1, + split_method=RoundRobin, + sync_mode=True): + """ + :param trainer_id: one unique id for each trainer in a job. + :type trainer_id: int + :param program: program to transpile, default is default_main_program + :type program: Program + :param pservers: parameter server endpoints like "m1:6174,m2:6174" + :type pservers: string + :param trainers: total number of workers/trainers in the job + :type trainers: int + :param split_method: A function to determin how to split variables + to different servers equally. + :type split_method: function + :type sync_mode: boolean default True + :param sync_mode: if sync_mode is set True, it means that dist transpiler + will transpile the program into sync_mode pserver and trainer program. + """ + assert (split_method.__bases__[0] == PSDispatcher) + if program is None: + program = default_main_program() + self.origin_program = program + self.trainer_num = trainers + self.sync_mode = sync_mode + self.trainer_id = trainer_id + pserver_endpoints = pservers.split(",") + self.pserver_endpoints = pserver_endpoints + self.optimize_ops, self.params_grads = self._get_optimize_pass() + + ps_dispatcher = split_method(self.pserver_endpoints) + self.has_distributed_lookup_table = self._has_distributed_lookup_table() + + # split and create vars, then put splited vars in dicts for later use. + self._init_splited_vars(split_method) + # step 3.1: insert send op to send gradient vars to parameter servers ps_dispatcher.reset() send_vars = [] - for orig_varname, splited_vars in grad_var_mapping.items(): + for orig_varname, splited_vars in self.grad_var_mapping.items(): eplist = ps_dispatcher.dispatch(splited_vars) if len(splited_vars) == 1: orig_varname = splited_vars[0].name @@ -367,7 +309,7 @@ class DistributeTranspiler: # step 3.2: insert recv op to receive parameters from parameter server recv_vars = [] for _, var in enumerate(send_vars): - recv_vars.append(grad_param_mapping[var]) + recv_vars.append(self.grad_param_mapping[var]) ps_dispatcher.reset() eplist = ps_dispatcher.dispatch(recv_vars) @@ -375,7 +317,7 @@ class DistributeTranspiler: self.param_grad_ep_mapping[ep]["params"].append(recv_vars[i]) self.param_grad_ep_mapping[ep]["grads"].append(send_vars[i]) # step4: Concat the parameters splits together after recv. - for varname, splited_var in param_var_mapping.iteritems(): + for varname, splited_var in self.param_var_mapping.iteritems(): eps = [] for var in splited_var: index = [v.name for v in recv_vars].index(var.name) @@ -399,7 +341,7 @@ class DistributeTranspiler: RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE }) - for varname, splited_var in param_var_mapping.iteritems(): + for varname, splited_var in self.param_var_mapping.iteritems(): if len(splited_var) <= 1: continue orig_param = program.global_block().vars[varname] @@ -440,7 +382,6 @@ class DistributeTranspiler: # we don't need to create them when grad arrives. # change client side var name to origin name by # removing ".trainer_%d" suffix - suff_idx = v.name.find(".trainer_") if suff_idx >= 0: orig_var_name = v.name[:suff_idx] @@ -477,24 +418,14 @@ class DistributeTranspiler: # located on current pserver opt_op_on_pserver = [] for _, op in enumerate(self.optimize_ops): - if self._is_opt_op(op) and self._is_opt_op_on_pserver(endpoint, op): + if self._is_optimizer_op(op) and self._is_opt_op_on_pserver( + endpoint, op): opt_op_on_pserver.append(op) # step 3.3 # Iterate through the ops, and if an op and the optimize ops # which located on current pserver are in one set, then # append it into the sub program. - # We try to put optimization program run parallelly, assume - # optimization program always looks like: - # - # prevop -> prevop -> opt op -> following op -> following op; -> - # prevop -> prevop -> opt op -> following op -> following op; -> - # global op -> global op - # - # we put operators that can run parallelly to many program blocks. - # in above example, we seperate ops by the ";". Global ops must run - # after all the optimize ops finished. - global_ops = [] # HACK: optimization global ops only used to scale beta1 and beta2 # replace it with dependency engine. @@ -502,12 +433,18 @@ class DistributeTranspiler: if self._is_adam_connected_op(op): global_ops.append(op) - def __append_optimize_op__(op, block, grad_to_block_id): - if self._is_opt_op(op): + def __append_optimize_op__(op, block, grad_to_block_id, merged_var): + if self._is_optimizer_op(op): self._append_pserver_ops(block, op, endpoint, grad_to_block_id, - self.origin_program) + self.origin_program, merged_var) else: - self._append_pserver_non_opt_ops(block, op) + self._append_pserver_non_opt_ops(block, op, endpoint) + + def __op_have_grad_input__(op): + for varname in op.input_arg_names: + if varname.find("@GRAD") >= 0: + return varname + return "" # append lr decay ops to the child block if exists lr_ops = self._get_lr_ops() @@ -515,17 +452,26 @@ class DistributeTranspiler: lr_decay_block = pserver_program.create_block( pserver_program.num_blocks - 1) for _, op in enumerate(lr_ops): - self._append_pserver_non_opt_ops(lr_decay_block, op) + self._append_pserver_non_opt_ops(lr_decay_block, op, endpoint) # append op to the current block grad_to_block_id = [] pre_block_idx = pserver_program.num_blocks - 1 for idx, opt_op in enumerate(opt_op_on_pserver): per_opt_block = pserver_program.create_block(pre_block_idx) + # append grad merging ops before clip and weight decay + for _, op in enumerate(self.optimize_ops): + # find the origin @GRAD var before clipping + grad_varname_for_block = __op_have_grad_input__(op) + if ufind.is_connected(op, opt_op) and grad_varname_for_block: + merged_var = self._append_pserver_grad_merge_ops( + per_opt_block, grad_varname_for_block, endpoint, + grad_to_block_id, self.origin_program) for _, op in enumerate(self.optimize_ops): # optimizer is connected to itself if ufind.is_connected(op, opt_op) and op not in global_ops: - __append_optimize_op__(op, per_opt_block, grad_to_block_id) + __append_optimize_op__(op, per_opt_block, grad_to_block_id, + merged_var) # append global ops if global_ops: @@ -533,15 +479,7 @@ class DistributeTranspiler: pserver_program.num_blocks - 1) for glb_op in global_ops: __append_optimize_op__(glb_op, opt_state_block, - grad_to_block_id) - - # NOT USED: single block version: - # - # for _, op in enumerate(self.optimize_ops): - # for _, opt_op in enumerate(opt_op_on_pserver): - # if ufind.is_connected(op, opt_op): - # __append_optimize_op__(glb_op, optimize_block) - # break + grad_to_block_id, None) # process distributed lookup_table prefetch_block = None @@ -631,6 +569,8 @@ class DistributeTranspiler: attrs=op.attrs) return s_prog + # ====================== private transpiler functions ===================== + # transpiler function for dis lookup_table def _replace_lookup_table_op_with_prefetch(self, program, pserver_endpoints): @@ -836,7 +776,6 @@ class DistributeTranspiler: return table_opt_block - # ====================== private transpiler functions ===================== def _create_vars_from_blocklist(self, program, block_list, @@ -979,17 +918,74 @@ class DistributeTranspiler: pass return orig_shape - def _orig_varname(self, varname): - suff_idx = varname.find(".trainer_") + def _get_varname_parts(self, varname): + # returns origin, blockid, trainerid orig_var_name = "" - if suff_idx >= 0: - orig_var_name = varname[:suff_idx] + trainer_part = "" + block_part = "" + trainer_idx = varname.find(".trainer_") + if trainer_idx >= 0: + trainer_part = varname[trainer_idx + 1:] + else: + trainer_idx = len(varname) + block_index = varname.find(".block") + if block_index >= 0: + block_part = varname[block_index + 1:trainer_idx] else: - orig_var_name = varname - return orig_var_name + block_index = len(varname) + orig_var_name = varname[0:min(block_index, trainer_idx)] + return orig_var_name, block_part, trainer_part + + def _orig_varname(self, varname): + orig, _, _ = self._get_varname_parts(varname) + return orig + + def _append_pserver_grad_merge_ops(self, optimize_block, + grad_varname_for_block, endpoint, + grad_to_block_id, origin_program): + program = optimize_block.program + pserver_block = program.global_block() + grad_block = None + for g in self.param_grad_ep_mapping[endpoint]["grads"]: + if self._orig_varname(g.name) == \ + self._orig_varname(grad_varname_for_block): + grad_block = g + break + if not grad_block: + # do not append this op if current endpoint + # is not dealing with this grad block + return + orig_varname, block_name, trainer_name = self._get_varname_parts( + grad_block.name) + if block_name: + merged_var_name = '.'.join([orig_varname, block_name]) + else: + merged_var_name = orig_varname + merged_var = \ + pserver_block.vars[merged_var_name] + grad_to_block_id.append(merged_var.name + ":" + str(optimize_block.idx)) + if self.sync_mode and self.trainer_num > 1: + vars2merge = [] + for i in xrange(self.trainer_num): + per_trainer_name = "%s.trainer_%d" % \ + (merged_var_name, i) + vars2merge.append(pserver_block.vars[per_trainer_name]) + + optimize_block.append_op( + type="sum", + inputs={"X": vars2merge}, + outputs={"Out": merged_var}) + # TODO(panyx0718): What if it's SELECTED_ROWS. + if not merged_var.type == core.VarDesc.VarType.SELECTED_ROWS: + optimize_block.append_op( + type="scale", + inputs={"X": merged_var}, + outputs={"Out": merged_var}, + attrs={"scale": 1.0 / float(self.trainer_num)}) + return merged_var def _append_pserver_ops(self, optimize_block, opt_op, endpoint, - grad_to_block_id, origin_program): + grad_to_block_id, origin_program, merged_var): program = optimize_block.program pserver_block = program.global_block() new_inputs = dict() @@ -997,40 +993,6 @@ class DistributeTranspiler: # moment can use the updated shape for key in opt_op.input_names: if key == "Grad": - grad_block = None - for g in self.param_grad_ep_mapping[endpoint]["grads"]: - if same_or_split_var( - self._orig_varname(g.name), - self._orig_varname(opt_op.input(key)[0])): - grad_block = g - break - if not grad_block: - # do not append this op if current endpoint - # is not dealing with this grad block - return - merged_var = \ - pserver_block.vars[self._orig_varname(grad_block.name)] - grad_to_block_id.append(merged_var.name + ":" + str( - optimize_block.idx)) - if self.sync_mode and self.trainer_num > 1: - vars2merge = [] - for i in xrange(self.trainer_num): - per_trainer_name = "%s.trainer_%d" % \ - (self._orig_varname(grad_block.name), i) - vars2merge.append(pserver_block.vars[per_trainer_name]) - - optimize_block.append_op( - type="sum", - inputs={"X": vars2merge}, - outputs={"Out": merged_var}) - # TODO(panyx0718): What if it's SELECTED_ROWS. - if not merged_var.type == core.VarDesc.VarType.SELECTED_ROWS: - optimize_block.append_op( - type="scale", - inputs={"X": merged_var}, - outputs={"Out": merged_var}, - attrs={"scale": 1.0 / float(self.trainer_num)}) - new_inputs[key] = merged_var elif key == "Param": # param is already created on global program @@ -1089,17 +1051,31 @@ class DistributeTranspiler: outputs=outputs, attrs=opt_op.attrs) - def _append_pserver_non_opt_ops(self, optimize_block, opt_op): + def _is_splited_grad_var(self, var, var_dict): + grad_block = None + for _, g in var_dict.iteritems(): + if self._orig_varname(g.name) == self._orig_varname(var.name): + if g.name.find(".trainer_") == -1: + grad_block = g + break + return grad_block + + def _append_pserver_non_opt_ops(self, optimize_block, opt_op, endpoint): program = optimize_block.program # Append the ops for parameters that do not need to be optimized/updated inputs = self._get_input_map_from_op( self.origin_program.global_block().vars, opt_op) - for varlist in inputs.itervalues(): + for key, varlist in inputs.iteritems(): if not isinstance(varlist, list): varlist = [varlist] - for var in varlist: - if not program.global_block().vars.has_key(var.name): + # for ops like clipping and weight decay, get the splited var + # for inputs/outputs + grad_block = self._is_splited_grad_var( + var, program.global_block().vars) + if grad_block: + inputs[key] = grad_block + elif not program.global_block().vars.has_key(var.name): program.global_block().create_var( name=var.name, persistable=var.persistable, @@ -1108,13 +1084,16 @@ class DistributeTranspiler: outputs = self._get_output_map_from_op( self.origin_program.global_block().vars, opt_op) - - for varlist in outputs.itervalues(): + for key, varlist in outputs.iteritems(): if not isinstance(varlist, list): varlist = [varlist] - for var in varlist: - program.global_block().clone_variable(var) + grad_block = self._is_splited_grad_var( + var, program.global_block().vars) + if grad_block: + outputs[key] = grad_block + elif not program.global_block().vars.has_key(var.name): + program.global_block().clone_variable(var) optimize_block.append_op( type=opt_op.type, @@ -1160,9 +1139,17 @@ class DistributeTranspiler: ufind.union(op1, op2) return ufind - def _is_opt_op(self, op): - # NOTE: It's a HACK implement. - # optimize op: SGDOptimize, MomentumOptimizer, AdamOptimizer and etc... + def _is_opt_role_op(self, op): + # NOTE: depend on oprole to find out whether this op is for + # optimize + op_maker = core.op_proto_and_checker_maker + optimize_role = core.op_proto_and_checker_maker.OpRole.Optimize + if op_maker.kOpRoleAttrName() in op.attrs and \ + int(op.attrs[op_maker.kOpRoleAttrName()]) == int(optimize_role): + return True + return False + + def _is_optimizer_op(self, op): if "Param" in op.input_names and \ "LearningRate" in op.input_names: return True @@ -1212,7 +1199,7 @@ class DistributeTranspiler: # find learning rate variables by optimize op lr_vars = set() for op in self.optimize_ops: - if self._is_opt_op(op): + if self._is_optimizer_op(op): lr_vars.add(op.input("LearningRate")[0]) find_ops = [] @@ -1229,7 +1216,7 @@ class DistributeTranspiler: # NOTE: we need to skip all optimize ops, since it is connected # with forward/backward ops and lr ops, we only need the lr ops. if op1 != op2 and self._is_op_connected(op1, op2) and \ - not self._is_opt_op(op1) and not self._is_opt_op(op2): + not self._is_optimizer_op(op1) and not self._is_optimizer_op(op2): ufind.union(op1, op2) # find all ops which is related with lr var for op1 in block.ops: @@ -1250,13 +1237,21 @@ class DistributeTranspiler: block = self.origin_program.global_block() opt_ops = [] params_grads = [] + origin_var_dict = self.origin_program.global_block().vars for op in block.ops: - if self._is_opt_op(op): + if self._is_opt_role_op(op): opt_ops.append(op) - params_grads.append((self.origin_program.global_block().var( - op.input("Param")[0]), - self.origin_program.global_block().var( - op.input("Grad")[0]))) + # HACK(wuyi): if we find grad vars from input of optimize + # ops, we may get the output of clip op. Use syntax "@GRAD" + # and op_role_var to get the pair. + for input_name in op.input_arg_names: + if input_name.find("@GRAD") != -1 and \ + op.attrs[RPC_OP_ROLE_ATTR_NAME]: + param_name = op.attrs[OP_ROLE_VAR_ATTR_NAME][0] + params_grads.append([ + origin_var_dict[param_name], + origin_var_dict[input_name] + ]) elif self._is_adam_connected_op(op): opt_ops.append(op) else: diff --git a/python/setup.py.in b/python/setup.py.in index c42601d335f01491156dc3591341c1a3213aecfe..8257f1d5e212a84188a4c51bc2d0f4d4c7af91fb 100644 --- a/python/setup.py.in +++ b/python/setup.py.in @@ -69,7 +69,8 @@ packages=['paddle', 'paddle.fluid.proto', 'paddle.fluid.proto.profiler', 'paddle.fluid.layers', - 'paddle.fluid.transpiler'] + 'paddle.fluid.transpiler', + 'paddle.fluid.transpiler.details'] if '${WITH_FLUID_ONLY}'== 'OFF': packages+=['paddle.proto',