diff --git a/.copyright.hook b/.copyright.hook index 09afff2072df3384a429d01d06188218ae6e85d1..86b16ebdc46047c7cb3d7731a71cbf9647a1f2fe 100644 --- a/.copyright.hook +++ b/.copyright.hook @@ -9,7 +9,7 @@ import subprocess import platform COPYRIGHT = ''' - Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. +Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/contrib/inference/README.md b/contrib/inference/README.md new file mode 100644 index 0000000000000000000000000000000000000000..20969fac6c8f894ffb4a02b48f795e2a0dcbd096 --- /dev/null +++ b/contrib/inference/README.md @@ -0,0 +1,27 @@ +# Embed Paddle Inference in Your Application + +Paddle inference offers the APIs in `C` and `C++` languages. + +One can easily deploy a model trained by Paddle following the steps as below: + +1. Optimize the native model; +2. Write some codes for deployment. + + +Let's explain the steps in detail. + +## Optimize the native Fluid Model + +The native model that get from the training phase needs to be optimized for that. + +- Clean the noise such as the cost operators that do not need inference; +- Prune unnecessary computation fork that has nothing to do with the output; +- Remove extraneous variables; +- Memory reuse for native Fluid executor; +- Translate the model storage format to some third-party engine's, so that the inference API can utilize the engine for acceleration; + +We have an official tool to do the optimization, call `paddle_inference_optimize --help` for more information. + +## Write some codes + +Read `paddle_inference_api.h` for more information. diff --git a/contrib/inference/paddle_inference_api.h b/contrib/inference/paddle_inference_api.h new file mode 100644 index 0000000000000000000000000000000000000000..dbaa7c95b97e954537707566e5b7458e6afd14c8 --- /dev/null +++ b/contrib/inference/paddle_inference_api.h @@ -0,0 +1,69 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#pragma once + +#include +#include + +namespace paddle { + +class Predictor { +public: + struct Attr; + Predictor() = default; + + // Build the network before inference. + bool Init(const Attr& attr); + + // Predict an record. + // Arguments: + // inputs: the name of the input variables. + // outputs: the name of the output varaibles. + // input_shapes: the shape of the input variables. + // output_shapes: the shape of the output variables. + // input_data: the data of the input variables. + // output_data: the data of the output variables. + bool Run(const std::vector& inputs, + const std::vector& outputs, + const std::vector>& input_shapes, + const std::vector>& output_shapes, + const std::vector>& input_data, + std::vector>* output_data); + + // Clone a predictor that share the model weights. + Predictor* Clone(); + + // Destroy the Predictor. + ~Predictor(); + + struct Attr { + enum class EngineKind; + + std::string model_dir; // path to the model directory. + bool enable_engine{false}; // Enable to execute (part of) the model on + // third-party engines. + EngineKind engine_kind{Attr::EngineKind::kNone}; + + enum class EngineKind { + kNone = -1, // Use the native Fluid facility. + kAnakin, // Use Anakin for inference. + kTensorRT, // Use TensorRT for inference. + kAutoMixedAnakin, // Automatically mix Fluid with Anakin. + kAutoMixedTensorRT, // Automatically mix Fluid with TensorRT. + }; + }; +}; + +} // namespace paddle diff --git a/doc/fluid/design/motivation/api.md b/doc/fluid/design/motivation/api.md index e6a4638d9100d9b07c3ee6b92b530a17eae1c162..bc222564e3ec28e306ca0572b6a23104f6e9cbc5 100644 --- a/doc/fluid/design/motivation/api.md +++ b/doc/fluid/design/motivation/api.md @@ -77,8 +77,7 @@ print "The sematic-vector of testA: ", paddle.infer(fA, parameters, testA) ### Example 2. Sharing Parameters between "Models" -We use [GAN](https://github.com/PaddlePaddle/book/tree/develop/gan) in -this example. In the following example program, `d0` and `d1` +We use GAN in this example. In the following example program, `d0` and `d1` correspond to the two networks in the following figure: diff --git a/doc/fluid/design/multi_devices/operator_kernel_type.md b/doc/fluid/design/multi_devices/operator_kernel_type.md index 8c1bc8f76a337006497e5ab5e5a710f9f49261b8..5e391bd62b4f4e123a9a6f35b7adf5726f205635 100644 --- a/doc/fluid/design/multi_devices/operator_kernel_type.md +++ b/doc/fluid/design/multi_devices/operator_kernel_type.md @@ -75,7 +75,7 @@ Different layout leads to different implementation of the operator kernel. There - The inference of Layout is at run-time, not at compile-time. -- Every operator has to implement different kernels for different layouts. Let's take MKLDNN as an example. If we want to implement an MKLDNN convolution operator, we have to implement all the kernels for different layouts, which are listed [here](http://01org.github.io/mkl-dnn/structmkldnn_1_1memory.html). And we will have a special macro to register kernels for MKLDNN operators. +- Every operator has to implement different kernels for different layouts. Let's take MKLDNN as an example. If we want to implement an MKLDNN convolution operator, we have to implement all the kernels for different layouts, which are listed [here](http://intel.github.io/mkl-dnn/structmkldnn_1_1memory.html). And we will have a special macro to register kernels for MKLDNN operators. `Layout` is also defined as a enum variable: diff --git a/doc/fluid/howto/cluster/nccl2_rdma_training.md b/doc/fluid/howto/cluster/nccl2_rdma_training.md new file mode 100644 index 0000000000000000000000000000000000000000..cecd5c3a7a7339e3be6772543a534728ec132105 --- /dev/null +++ b/doc/fluid/howto/cluster/nccl2_rdma_training.md @@ -0,0 +1,110 @@ +# Distributed Training with NCCL2 and RDMA + +When doing distributed multi-GPU training, network bandwith often becomes the +bottle neck. We introduce a way to use NCCL2 to do such training job to +achieve best performace. + +## Prepare Hardwares with RDMA and Multiple GPUs + +I'm using two Linux servers each of them is installed with 8 GPUs and +one 100Gb RDMA card. +Base environment is: + +* OS: CentOS 7.4 +* RDMA device: "Mellanox Technologies MT27700 Family [ConnectX-4]" +* Kernel version: `4.4.88-1.el7.elrepo.x86_64` +* Docker version: `1.12.6` +* Docker storage driver: `overlay2` +* IP addresses: 192.168.16.30,192.168.16.34 + +In general, the steps including: + +1. Install GPU drivers +1. Install RDMA drivers +1. Install "InfiniBand Support" +1. Use docker to run tests and make sure GPUs and RDMA can work inside + the container. + +I'll ommit section "Install GPU drivers" because we can find it easily +somewhere else. + +### Install RDMA drivers + +For my case, I've got two machines with device +"Mellanox Technologies MT27700 Family [ConnectX-4]" installed. The OS was +"CentOS 7.4" and I updated the kernel to version 4.4 so that docker can +work with latest overlay2 filesystem. + +***NOTE: before you start, make sure you have a way to get a console +of the server other than ssh because we may need to re-configure the +network device.*** + +1. Go to http://www.mellanox.com/page/products_dyn?product_family=26, + download `MLNX_OFED` software in the bottom of the page, and upload it + onto the server. +1. Run `./mlnxofedinstall --add-kernel-support` in the software package. +1. Run `/etc/init.d/openibd restart` to make everything work, note that + this operation may cause the network goes down if you are using this + RDMA device as default network device and use ssh to login the server. +1. Re-configure the network interface, for example: + `ifconfig eth2 192.168.16.30/20 up`, then add routes if needed: + `ip route add default via 192.168.16.1 dev eth2`. +1. Do the same thing on the other node. +1. Use `ping` to test if the two nodes have typical ICMP connection. +1. Use either `udaddy` or `ib_write_bw` to test the network connection is + ready and have the desired bandwith. + +### Prepare Docker Image to Run RDMA Programs + +1. Build a docker image using cuda base image like: `nvidia/cuda:8.0-cudnn5-devel-ubuntu16.04` and install paddlepaddle whl + package in it. +1. Start a docker container and mount GPU driver libs into it (you can + skip this step if you are using nvidia-docker). +1. Mount RDMA dirvers and libs into the docker image (see below section), + also `udaddy` and `ib_write_bw` if needed. +1. Mount GPU devices and RDMA devices into the container using `--device` + or just use privileged mode `--privileged`. +1. Start the container using host network mode: `--net=host` + +### RDMA Library Files Needed + +Usually, `MLNX_OFED` install latest supported libs under +`/usr/lib64/mlnx_ofed/valgrind`. Other libs also needed to run RDMA programs +is listed below. These libs must be mounted into the docker container. + +* Libs under `/usr/lib64/mlnx_ofed/valgrind` + * libibcm.so + * libibverbs.so + * libmlx4.so + * libmlx5.so + * libmlx5-rdmav2.so + * librdmacm.so +* Other libs: + * libnl-3.so.200 + * libnl-route-3.so.200 + * libnuma.so.1 + +## Start to Run the Training Job + +Setting NCCL environment variables to turn NCCL switches on and off: + + +| Env Name | Description | +| --- | --- | +| NCCL_SOCKET_IFNAME | The RDMA device, e.g. eth2 | +| NCCL_P2P_DISABLE | Set to 1 to disable P2P transfer between GPUs | +| NCCL_IB_DISABLE | Set to 1 to disable using RDMA | +| NCCL_IB_CUDA_SUPPORT | Set to 1 to enable GPU Direct if supported | +| NCCL_DEBUG | Set debug level: VERSION, WARN, INFO | + +My two servers are: `192.168.16.30,192.168.16.34`, On node 1, Run : + +```bash +PADDLE_TRAINER_ID=0 PADDLE_PORT=48372 PADDLE_WORKERS=192.168.16.30,192.168.16.34 POD_IP=192.168.16.30 stdbuf -oL python vgg16.py +``` + +On node 2, Run: + +```bash +PADDLE_TRAINER_ID=1 PADDLE_PORT=48372 PADDLE_WORKERS=192.168.16.30,192.168.16.34 POD_IP=192.168.16.34 stdbuf -oL python vgg16.py +``` diff --git a/paddle/fluid/framework/CMakeLists.txt b/paddle/fluid/framework/CMakeLists.txt index 340b891e41671df7e61a4a66ec538d4603bb9842..ab71e0e63ce18e4f221a046eeb2c39499c1c3816 100644 --- a/paddle/fluid/framework/CMakeLists.txt +++ b/paddle/fluid/framework/CMakeLists.txt @@ -57,7 +57,7 @@ cc_library(data_transform SRCS data_transform.cc DEPS math_function tensor cc_library(attribute SRCS attribute.cc DEPS framework_proto boost) cc_test(program_desc_test SRCS program_desc_test.cc DEPS proto_desc device_context) -cc_library(op_proto_maker SRCS op_proto_maker.cc DEPS framework_proto attribute) +cc_library(op_proto_maker SRCS op_proto_maker.cc DEPS framework_proto attribute glog) cc_test(op_proto_maker_test SRCS op_proto_maker_test.cc DEPS op_proto_maker) cc_library(op_info SRCS op_info.cc DEPS attribute framework_proto) cc_library(shape_inference SRCS shape_inference.cc DEPS ddim attribute device_context) diff --git a/paddle/fluid/framework/block_desc.cc b/paddle/fluid/framework/block_desc.cc index 1b6f656a006489485a55b5c13b5e2de93c3da0ed..fd409ed4c0f7a504686765909e9c71692aab8824 100644 --- a/paddle/fluid/framework/block_desc.cc +++ b/paddle/fluid/framework/block_desc.cc @@ -134,6 +134,11 @@ OpDesc *BlockDesc::PrependOp() { return ops_.front().get(); } +void BlockDesc::PrependAllocatedOp(std::unique_ptr &&op_desc) { + need_update_ = true; + ops_.emplace_front(std::move(op_desc)); +} + OpDesc *BlockDesc::InsertOp(size_t index) { need_update_ = true; auto it = ops_.begin() + index; diff --git a/paddle/fluid/framework/block_desc.h b/paddle/fluid/framework/block_desc.h index eef19c4f09c60b9df18f154c85c421f5bff9413f..600601669c5d56a3ffc2fb9c804ffad5fde58f0b 100644 --- a/paddle/fluid/framework/block_desc.h +++ b/paddle/fluid/framework/block_desc.h @@ -88,6 +88,8 @@ class BlockDesc { OpDesc *PrependOp(); + void PrependAllocatedOp(std::unique_ptr &&op_desc); + OpDesc *InsertOp(size_t index); /* diff --git a/paddle/fluid/framework/data_device_transform_test.cu b/paddle/fluid/framework/data_device_transform_test.cu index df4caa45eba2470f7528d2fbd99cca39cae0b596..a91fe5c99d397ef1bf04f6d22e988b6d3f33e500 100644 --- a/paddle/fluid/framework/data_device_transform_test.cu +++ b/paddle/fluid/framework/data_device_transform_test.cu @@ -32,8 +32,7 @@ struct AddFunctor { class OpKernelTestProtoAndCheckerMaker : public OpProtoAndCheckerMaker { public: - OpKernelTestProtoAndCheckerMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() { AddInput("input", "input1 of test op"); AddOutput("output", "output of test op"); AddAttr("use_gpu", "force to use gpu kernel").SetDefault(false); diff --git a/paddle/fluid/framework/details/broadcast_op_handle.cc b/paddle/fluid/framework/details/broadcast_op_handle.cc index 2afa47c81bead6fb104f49886713bf75dc1b4dc0..d5ca061944f33939cea59a5275e691b1966194fa 100644 --- a/paddle/fluid/framework/details/broadcast_op_handle.cc +++ b/paddle/fluid/framework/details/broadcast_op_handle.cc @@ -38,9 +38,7 @@ void BroadcastOpHandle::RunImpl() { out_var_handles.size(), places_.size(), "The number of output should equal to the number of places."); - // Wait input done, this Wait is asynchronous operation platform::Place - // &in_place; - WaitInputVarGenerated(*in_var_handle); + WaitInputVarGenerated(); std::vector var_scopes; for (auto *s : local_scopes_) { @@ -50,29 +48,9 @@ void BroadcastOpHandle::RunImpl() { auto *in_var = var_scopes.at(in_var_handle->scope_idx_)->FindVar(in_var_handle->name_); PADDLE_ENFORCE_NOT_NULL(in_var); - Tensor &in_tensor = VariableVisitor::GetMutableTensor(in_var); - // NOTE: The tensors' Place of input and output must be all on GPU or all on - // CPU. - for (auto *out_var_handle : out_var_handles) { - if (out_var_handle->IsTheSameVar(*in_var_handle)) { - continue; - } - auto t_out_p = out_var_handle->place_; - auto *out_var = var_scopes.at(out_var_handle->scope_idx_) - ->FindVar(out_var_handle->name_); - PADDLE_ENFORCE_NOT_NULL(out_var); - if (platform::is_gpu_place(in_tensor.place())) { - PADDLE_ENFORCE(platform::is_gpu_place(t_out_p), - "Places of input and output must be all on GPU."); - } else { - t_out_p = platform::CPUPlace(); - } - VariableVisitor::ShareDimsAndLoD(*in_var, out_var); - VariableVisitor::GetMutableTensor(out_var).mutable_data(t_out_p, - in_tensor.type()); - } + InitOutputValue(*in_var_handle, out_var_handles); if (platform::is_cpu_place(in_tensor.place())) { for (auto *out_var_handle : out_var_handles) { @@ -147,11 +125,37 @@ void BroadcastOpHandle::RunImpl() { } } -void BroadcastOpHandle::WaitInputVarGenerated(const VarHandle &in_var) { - if (in_var.generated_op_) { - for (auto &pair : dev_ctxes_) { - in_var.generated_op_->Wait(pair.second); +void BroadcastOpHandle::InitOutputValue( + const VarHandle &in_var_handle, + const std::vector &out_var_handles) const { + std::vector var_scopes; + for (auto *s : local_scopes_) { + var_scopes.emplace_back(s->FindVar(kLocalExecScopeName)->Get()); + } + auto *in_var = + var_scopes.at(in_var_handle.scope_idx_)->FindVar(in_var_handle.name_); + + Tensor &in_tensor = VariableVisitor::GetMutableTensor(in_var); + + // NOTE: The tensors' Place of input and output must be all on GPU or all on + // CPU. + for (auto *out_var_handle : out_var_handles) { + if (out_var_handle->IsTheSameVar(in_var_handle)) { + continue; } + auto t_out_p = out_var_handle->place_; + auto *out_var = var_scopes.at(out_var_handle->scope_idx_) + ->FindVar(out_var_handle->name_); + PADDLE_ENFORCE_NOT_NULL(out_var); + if (is_gpu_place(in_tensor.place())) { + PADDLE_ENFORCE(platform::is_gpu_place(t_out_p), + "Places of input and output must be all on GPU."); + } else { + t_out_p = platform::CPUPlace(); + } + VariableVisitor::ShareDimsAndLoD(*in_var, out_var); + VariableVisitor::GetMutableTensor(out_var).mutable_data(t_out_p, + in_tensor.type()); } } diff --git a/paddle/fluid/framework/details/broadcast_op_handle.h b/paddle/fluid/framework/details/broadcast_op_handle.h index 984a95008c0393eff01c2d419cc98949aed14980..629aa00cb817c4b1446e7b750ca62a7c6b1db670 100644 --- a/paddle/fluid/framework/details/broadcast_op_handle.h +++ b/paddle/fluid/framework/details/broadcast_op_handle.h @@ -57,7 +57,6 @@ struct BroadcastOpHandle : public OpHandleBase { protected: void RunImpl() override; - void WaitInputVarGenerated(const VarHandle &in_var); private: const std::vector &local_scopes_; @@ -65,6 +64,9 @@ struct BroadcastOpHandle : public OpHandleBase { #ifdef PADDLE_WITH_CUDA const platform::NCCLContextMap *nccl_ctxs_; #endif + + void InitOutputValue(const VarHandle &in_var_handle, + const std::vector &out_var_handles) const; }; } // namespace details } // namespace framework diff --git a/paddle/fluid/framework/details/computation_op_handle.cc b/paddle/fluid/framework/details/computation_op_handle.cc index 7ff0efe09387b7e5d7cfe0dfe5e129ca9914d90b..df05bb06333d6b964f2f5434c3d43214e5d2cb7a 100644 --- a/paddle/fluid/framework/details/computation_op_handle.cc +++ b/paddle/fluid/framework/details/computation_op_handle.cc @@ -26,20 +26,20 @@ ComputationOpHandle::ComputationOpHandle(const OpDesc &op_desc, Scope *scope, place_(place) {} void ComputationOpHandle::RunImpl() { - auto *cur_ctx = dev_ctxes_[place_]; - for (auto *in : inputs_) { - bool need_wait = in->generated_op_ && - in->generated_op_->DeviceContext(place_) != cur_ctx; - if (need_wait) { - in->generated_op_->Wait(cur_ctx); - } - } + WaitInputVarGenerated(place_); this->RunAndRecordEvent([this] { op_->Run(*scope_->FindVar(kLocalExecScopeName)->Get(), place_); }); } +bool ComputationOpHandle::NeedWait(VarHandleBase *in_var) { + bool need_wait = + in_var && in_var->generated_op_ && + in_var->generated_op_->DeviceContext(place_) != dev_ctxes_[place_]; + return need_wait; +} + std::string ComputationOpHandle::Name() const { return op_->Type(); } } // namespace details } // namespace framework diff --git a/paddle/fluid/framework/details/computation_op_handle.h b/paddle/fluid/framework/details/computation_op_handle.h index c363b973d9abbae6bea76c2458fbe82a37a342ca..36e6f1bf59a7646e1dff6c4844f2a36a5caf363a 100644 --- a/paddle/fluid/framework/details/computation_op_handle.h +++ b/paddle/fluid/framework/details/computation_op_handle.h @@ -36,6 +36,8 @@ struct ComputationOpHandle : public OpHandleBase { protected: void RunImpl() override; + virtual bool NeedWait(VarHandleBase *in_var); + private: std::unique_ptr op_; Scope *scope_; diff --git a/paddle/fluid/framework/details/fetch_op_handle.cc b/paddle/fluid/framework/details/fetch_op_handle.cc index a3cae8c64cdff8594c8971b0458c443f54375f11..b1c9dd0d15223f7d1bf6ea44144589f1de927e3e 100644 --- a/paddle/fluid/framework/details/fetch_op_handle.cc +++ b/paddle/fluid/framework/details/fetch_op_handle.cc @@ -31,7 +31,7 @@ FetchOpHandle::~FetchOpHandle() { } } -void FetchOpHandle::Wait(platform::DeviceContext *waited_dev) { +void FetchOpHandle::RecordWaitEventOnCtx(platform::DeviceContext *waited_ctx) { PADDLE_THROW("Nobody should wait FetchOp. Unexpceted Error"); } @@ -45,14 +45,8 @@ void FetchOpHandle::WaitAndMergeCPUTensors() const { } void FetchOpHandle::RunImpl() { - auto cpu_ctx = - platform::DeviceContextPool::Instance().Get(platform::CPUPlace()); - for (auto *input : inputs_) { - auto *var = static_cast(input); - if (var->generated_op_) { - var->generated_op_->Wait(cpu_ctx); - } - } + WaitInputVarGenerated(platform::CPUPlace()); + tensors_.resize(inputs_.size()); auto *var_handle = static_cast(inputs_[0]); auto &var_name = var_handle->name_; @@ -79,6 +73,15 @@ void FetchOpHandle::RunImpl() { this->WaitAndMergeCPUTensors(); } +void FetchOpHandle::WaitInputVarGenerated(const platform::Place &place) { + auto cpu_ctx = platform::DeviceContextPool::Instance().Get(place); + for (auto *input : inputs_) { + if (input->generated_op_) { + input->generated_op_->RecordWaitEventOnCtx(cpu_ctx); + } + } +} + std::string FetchOpHandle::Name() const { return "Fetch"; } } // namespace details diff --git a/paddle/fluid/framework/details/fetch_op_handle.h b/paddle/fluid/framework/details/fetch_op_handle.h index b49f3df338dc11310a4a0c27c8aaae3602373fcc..e696a7a9ce562e7f1b7fe6633623cb940810fbe1 100644 --- a/paddle/fluid/framework/details/fetch_op_handle.h +++ b/paddle/fluid/framework/details/fetch_op_handle.h @@ -33,7 +33,7 @@ struct FetchOpHandle : public OpHandleBase { ~FetchOpHandle(); - void Wait(platform::DeviceContext *waited_dev) override; + void RecordWaitEventOnCtx(platform::DeviceContext *waited_ctx) override; void WaitAndMergeCPUTensors() const; @@ -42,6 +42,8 @@ struct FetchOpHandle : public OpHandleBase { protected: void RunImpl() override; + virtual void WaitInputVarGenerated(const platform::Place &place); + private: FeedFetchList *data_; size_t offset_; diff --git a/paddle/fluid/framework/details/gather_op_handle.cc b/paddle/fluid/framework/details/gather_op_handle.cc index 3dfc972a44c62bd2adfc1331f29ffb1cca537652..2be02304566cf5dbe348fa01fc4171990eafd158 100644 --- a/paddle/fluid/framework/details/gather_op_handle.cc +++ b/paddle/fluid/framework/details/gather_op_handle.cc @@ -55,7 +55,7 @@ void GatherOpHandle::RunImpl() { "Currently, gather_op only can gather SelectedRows."); // Wait input done, this Wait is asynchronous operation - WaitInputVarGenerated(in_var_handles); + WaitInputVarGenerated(); auto &pre_in_value = pre_in_var->Get(); std::vector out_rows; @@ -111,17 +111,6 @@ void GatherOpHandle::RunImpl() { }); } -void GatherOpHandle::WaitInputVarGenerated( - const std::vector &in_var_handles) { - for (auto *in : in_var_handles) { - if (in->generated_op_) { - for (auto pair : dev_ctxes_) { - in->generated_op_->Wait(pair.second); - } - } - } -} - std::string GatherOpHandle::Name() const { return "gather"; } } // namespace details } // namespace framework diff --git a/paddle/fluid/framework/details/gather_op_handle.h b/paddle/fluid/framework/details/gather_op_handle.h index c394dd7a14b07cb956aa1aedfc0df4fa25744dd7..d11ef8556aa8840949ca8dc7aa176413f70b9f22 100644 --- a/paddle/fluid/framework/details/gather_op_handle.h +++ b/paddle/fluid/framework/details/gather_op_handle.h @@ -39,7 +39,6 @@ struct GatherOpHandle : public OpHandleBase { protected: void RunImpl() override; - void WaitInputVarGenerated(const std::vector &in_var_handles); private: const std::vector &local_scopes_; diff --git a/paddle/fluid/framework/details/nccl_all_reduce_op_handle.cc b/paddle/fluid/framework/details/nccl_all_reduce_op_handle.cc index 16aa5d067ab7a222af8fbb6ca8ec18222ecd799b..95aa599cd3e403e9cc66b2b5ad35d0d214d1ab5b 100644 --- a/paddle/fluid/framework/details/nccl_all_reduce_op_handle.cc +++ b/paddle/fluid/framework/details/nccl_all_reduce_op_handle.cc @@ -34,12 +34,7 @@ void NCCLAllReduceOpHandle::RunImpl() { return; // No need to all reduce when GPU count = 1; } else { // Wait input done - for (auto *in : inputs_) { - auto &p = static_cast(in)->place_; - if (in->generated_op_) { - in->generated_op_->Wait(dev_ctxes_[p]); - } - } + WaitInputVarGenerated(); auto &var_name = static_cast(this->inputs_[0])->name_; int dtype = -1; diff --git a/paddle/fluid/framework/details/op_handle_base.cc b/paddle/fluid/framework/details/op_handle_base.cc index 534d77860f87be08c8834efd373d90eb199ed6a2..6b064650b4f09737836bda4a43fa421720077929 100644 --- a/paddle/fluid/framework/details/op_handle_base.cc +++ b/paddle/fluid/framework/details/op_handle_base.cc @@ -56,15 +56,15 @@ void OpHandleBase::Run(bool use_event) { RunImpl(); } -void OpHandleBase::Wait(platform::DeviceContext *waited_dev) { +void OpHandleBase::RecordWaitEventOnCtx(platform::DeviceContext *waited_ctx) { #ifdef PADDLE_WITH_CUDA - if (platform::is_cpu_place(waited_dev->GetPlace()) || events_.empty()) { + if (platform::is_cpu_place(waited_ctx->GetPlace()) || events_.empty()) { for (auto &dev_ctx : dev_ctxes_) { dev_ctx.second->Wait(); } } else { auto stream = - static_cast(waited_dev)->stream(); + static_cast(waited_ctx)->stream(); for (auto &ev : events_) { PADDLE_ENFORCE(cudaStreamWaitEvent(stream, ev.second, 0)); } @@ -86,6 +86,28 @@ void OpHandleBase::AddOutput(VarHandleBase *out) { out->generated_op_ = this; } +void OpHandleBase::WaitInputVarGenerated() { + for (auto in_var : inputs_) { + if (NeedWait(in_var)) { + for (auto &pair : dev_ctxes_) { + in_var->generated_op_->RecordWaitEventOnCtx(pair.second); + } + } + } +} + +void OpHandleBase::WaitInputVarGenerated(const platform::Place &place) { + for (auto *in : inputs_) { + if (NeedWait(in)) { + in->generated_op_->RecordWaitEventOnCtx(dev_ctxes_[place]); + } + } +} + +bool OpHandleBase::NeedWait(VarHandleBase *in_var) { + return in_var && in_var->generated_op_; +} + void OpHandleBase::RunAndRecordEvent(const std::function &callback) { #ifdef PADDLE_WITH_CUDA if (!events_.empty()) { // Use event diff --git a/paddle/fluid/framework/details/op_handle_base.h b/paddle/fluid/framework/details/op_handle_base.h index 00f213f3ed294adcce7c540e3ff346de8e2be7fb..fe1735d05dde5f09d5c72c68e5002d16f0083eb5 100644 --- a/paddle/fluid/framework/details/op_handle_base.h +++ b/paddle/fluid/framework/details/op_handle_base.h @@ -38,12 +38,24 @@ class OpHandleBase { void Run(bool use_event); - virtual void Wait(platform::DeviceContext *waited_dev); + virtual void RecordWaitEventOnCtx(platform::DeviceContext *waited_ctx); void AddInput(VarHandleBase *in); void AddOutput(VarHandleBase *out); + // This method adds the wait events of all the input on all the device + // context. + // NODE: This Wait is asynchronous operation. + virtual void WaitInputVarGenerated(); + + // This method adds the wait events of all the input on the specified device + // context. + // NODE: This Wait is asynchronous operation. + virtual void WaitInputVarGenerated(const platform::Place &place); + + virtual bool NeedWait(VarHandleBase *in_var); + // If the Op involves data transfer of multiple devices that // will likely block other computations. virtual bool IsMultiDeviceTransfer() { return false; } diff --git a/paddle/fluid/framework/details/op_registry.h b/paddle/fluid/framework/details/op_registry.h index 06603db31e0092382c0cc05482a038473d647ef1..1c4b059cd0aeff803ca7436d3f198e97a06cd012 100644 --- a/paddle/fluid/framework/details/op_registry.h +++ b/paddle/fluid/framework/details/op_registry.h @@ -95,7 +95,10 @@ struct OpInfoFiller { void operator()(const char* op_type, OpInfo* info) const { info->proto_ = new proto::OpProto; info->checker_ = new OpAttrChecker(); - auto maker = T(info->proto_, info->checker_); + T maker; + maker.SetProto(info->proto_); + maker.SetChecker(info->checker_); + maker.Make(); maker.Validate(); info->proto_->set_type(op_type); PADDLE_ENFORCE( diff --git a/paddle/fluid/framework/details/reduce_op_handle.cc b/paddle/fluid/framework/details/reduce_op_handle.cc index 1bb04c1dfca107f4b7ce4c599e9aa132de3e5985..7160e346dad0615e2fd32b70c096880af0359e1a 100644 --- a/paddle/fluid/framework/details/reduce_op_handle.cc +++ b/paddle/fluid/framework/details/reduce_op_handle.cc @@ -51,7 +51,7 @@ void ReduceOpHandle::RunImpl() { PADDLE_ENFORCE_NOT_NULL(pre_in_var); // Wait input done, this Wait is asynchronous operation - WaitInputVarGenerated(in_var_handles); + WaitInputVarGenerated(); // NOTE: The Places of all input tensor must be all on CPU or all on GPU. std::vector in_places; // used to get dev_ctx @@ -80,19 +80,21 @@ void ReduceOpHandle::RunImpl() { } if (pre_in_var->IsType()) { - std::vector in_selected_rows = - GetInputValues(in_var_handles, var_scopes); - - GatherSelectedRows(in_selected_rows, in_places, dev_ctxes_, t_out_p, - out_var->GetMutable()); + this->RunAndRecordEvent([&] { + std::vector in_selected_rows = + GetInputValues(in_var_handles, var_scopes); + GatherSelectedRows(in_selected_rows, in_places, dev_ctxes_, t_out_p, + out_var->GetMutable()); + }); } else { std::vector lod_tensors = GetInputValues(in_var_handles, var_scopes); - if (paddle::platform::is_cpu_place(lod_tensors[0]->place())) { - ReduceLoDTensor func(lod_tensors, - out_var->GetMutable()); - VisitDataType(ToDataType(lod_tensors[0]->type()), func); + this->RunAndRecordEvent([&] { + ReduceLoDTensor func(lod_tensors, + out_var->GetMutable()); + VisitDataType(ToDataType(lod_tensors[0]->type()), func); + }); } else if (paddle::platform::is_gpu_place(lod_tensors[0]->place())) { #ifdef PADDLE_WITH_CUDA auto pre_in = pre_in_var->Get(); @@ -157,17 +159,6 @@ std::vector ReduceOpHandle::GetInputValues( return in_selected_rows; } -void ReduceOpHandle::WaitInputVarGenerated( - const std::vector &in_var_handles) { - for (auto *in : in_var_handles) { - if (in->generated_op_) { - for (auto pair : dev_ctxes_) { - in->generated_op_->Wait(pair.second); - } - } - } -} - std::string ReduceOpHandle::Name() const { return "reduce"; } } // namespace details } // namespace framework diff --git a/paddle/fluid/framework/details/reduce_op_handle.h b/paddle/fluid/framework/details/reduce_op_handle.h index 59731d348d17755fbd8bf3b6fa29b32bdefaf71e..c652a2f4eb0f9b73cb19ebbd9d0809210b280ad3 100644 --- a/paddle/fluid/framework/details/reduce_op_handle.h +++ b/paddle/fluid/framework/details/reduce_op_handle.h @@ -60,8 +60,6 @@ struct ReduceOpHandle : public OpHandleBase { protected: void RunImpl() override; - void WaitInputVarGenerated(const std::vector &in_var_handles); - template std::vector GetInputValues( const std::vector &in_var_handles, diff --git a/paddle/fluid/framework/details/scale_loss_grad_op_handle.cc b/paddle/fluid/framework/details/scale_loss_grad_op_handle.cc index 1cd3113030086104e7fc5c4ba3364a5ff027632b..d9c387e79dc71288e7330597fed57171d447f31b 100644 --- a/paddle/fluid/framework/details/scale_loss_grad_op_handle.cc +++ b/paddle/fluid/framework/details/scale_loss_grad_op_handle.cc @@ -29,6 +29,7 @@ ScaleLossGradOpHandle::ScaleLossGradOpHandle(size_t num_dev, Scope *scope, ScaleLossGradOpHandle::~ScaleLossGradOpHandle() {} void ScaleLossGradOpHandle::RunImpl() { + // Doesn't wait any event std::string var_name = static_cast(this->outputs_[0])->name_; auto &local_scope = *scope_->FindVar(kLocalExecScopeName)->Get(); diff --git a/paddle/fluid/framework/details/send_op_handle.cc b/paddle/fluid/framework/details/send_op_handle.cc index bd97c5260dbba935e422793e0aa6aac8b6875627..7109659dd7001f91e7674ac7bebbe3a59794cfc0 100644 --- a/paddle/fluid/framework/details/send_op_handle.cc +++ b/paddle/fluid/framework/details/send_op_handle.cc @@ -26,6 +26,7 @@ SendOpHandle::SendOpHandle(const framework::OpDesc &op_desc, place_(place) {} void SendOpHandle::RunImpl() { + // TODO(wuyi): need further analysis whether wait VarDummyHandle. // Wait input done for (auto *in : inputs_) { auto &p = static_cast(in)->place_; @@ -33,7 +34,7 @@ void SendOpHandle::RunImpl() { continue; } if (in->generated_op_) { - in->generated_op_->Wait(dev_ctxes_[p]); + in->generated_op_->RecordWaitEventOnCtx(dev_ctxes_[p]); } } auto &tmp_scope = local_scope_->FindVar(kLocalExecScopeName)->Get(); diff --git a/paddle/fluid/framework/details/threaded_ssa_graph_executor.cc b/paddle/fluid/framework/details/threaded_ssa_graph_executor.cc index 5e6ed5cb7cdc534332d402380458f39aecd841b8..e90523ebe8dc720d10034e3af9b0e51bb7a2fde9 100644 --- a/paddle/fluid/framework/details/threaded_ssa_graph_executor.cc +++ b/paddle/fluid/framework/details/threaded_ssa_graph_executor.cc @@ -14,8 +14,6 @@ #include "paddle/fluid/framework/details/threaded_ssa_graph_executor.h" -#include "paddle/fluid/framework/details/fetch_op_handle.h" - namespace paddle { namespace framework { namespace details { @@ -45,73 +43,33 @@ FeedFetchList ThreadedSSAGraphExecutor::Run( // Should revisit it if overlapping is available. std::unordered_set delayed_ops; - auto InsertPendingVar = [&pending_vars, &ready_vars](VarHandleBase &var) { - pending_vars.insert(&var); - if (var.generated_op_ == nullptr) { - ready_vars.Push(&var); - } - }; - - auto InsertPendingOp = [&pending_ops](OpHandleBase &op_instance) { - pending_ops.insert({&op_instance, op_instance.Inputs().size()}); - }; - // Transform SSAGraph to pending_ops & pending_vars for (auto &var_map : graph_->vars_) { for (auto &name_pair : var_map) { for (auto &version_pair : name_pair.second) { - InsertPendingVar(*version_pair); + InsertPendingVar(&pending_vars, &ready_vars, version_pair.get()); } } } for (auto &var : graph_->dep_vars_) { - InsertPendingVar(*var); + InsertPendingVar(&pending_vars, &ready_vars, var.get()); } for (auto &op : graph_->ops_) { if (op->Inputs().empty()) { // Special case, Op has no input. ready_ops.insert(op.get()); } else { - InsertPendingOp(*op); + InsertPendingOp(&pending_ops, op.get()); } } // Step 2. Insert FetchOps std::vector> fetch_ops; - FeedFetchList fetch_data(fetch_tensors.size()); - - std::unordered_map> fetched_vars; - - for (auto &fetch_var_name : fetch_tensors) { - for (auto &var_map : graph_->vars_) { - auto it = var_map.find(fetch_var_name); - if (it != var_map.end()) { - fetched_vars[fetch_var_name].push_back(it->second.rbegin()->get()); - } - } - } - std::unordered_set> fetch_dependencies; - for (size_t i = 0; i < fetch_tensors.size(); ++i) { - auto &var_name = fetch_tensors[i]; - auto &vars = fetched_vars.at(var_name); - auto *op = new FetchOpHandle(&fetch_data, i, &local_scopes_); - fetch_ops.emplace_back(op); - - for (auto &p : places_) { - op->SetDeviceContext(p, fetch_ctxs_.Get(p)); - } - - for (auto *var : vars) { - op->AddInput(var); - } + FeedFetchList fetch_data(fetch_tensors.size()); - auto *fetch_dummy = new DummyVarHandle(); - op->AddOutput(fetch_dummy); - fetch_dependencies.emplace(fetch_dummy); - InsertPendingVar(*fetch_dummy); - InsertPendingOp(*op); - } + InsertFetchOps(fetch_tensors, &fetch_ops, &fetch_dependencies, &pending_ops, + &pending_vars, &ready_vars, &fetch_data); auto run_all_ops = [&](std::unordered_set &set) { for (auto *op : set) { @@ -174,6 +132,60 @@ FeedFetchList ThreadedSSAGraphExecutor::Run( return fetch_data; } +void ThreadedSSAGraphExecutor::InsertFetchOps( + const std::vector &fetch_tensors, + std::vector> *fetch_ops, + std::unordered_set> *fetch_dependencies, + std::unordered_map *pending_ops, + std::unordered_set *pending_vars, + BlockingQueue *ready_vars, FeedFetchList *fetch_data) { + std::unordered_map> fetched_vars; + + for (auto &fetch_var_name : fetch_tensors) { + for (auto &var_map : graph_->vars_) { + auto it = var_map.find(fetch_var_name); + if (it != var_map.end()) { + fetched_vars[fetch_var_name].push_back(it->second.rbegin()->get()); + } + } + } + + for (size_t i = 0; i < fetch_tensors.size(); ++i) { + auto &var_name = fetch_tensors[i]; + auto &vars = fetched_vars.at(var_name); + auto *op = new FetchOpHandle(fetch_data, i, &local_scopes_); + fetch_ops->emplace_back(op); + + for (auto &p : places_) { + op->SetDeviceContext(p, fetch_ctxs_.Get(p)); + } + + for (auto *var : vars) { + op->AddInput(var); + } + + auto *fetch_dummy = new DummyVarHandle(); + op->AddOutput(fetch_dummy); + fetch_dependencies->emplace(fetch_dummy); + this->InsertPendingVar(pending_vars, ready_vars, fetch_dummy); + this->InsertPendingOp(pending_ops, op); + } +} + +void ThreadedSSAGraphExecutor::InsertPendingOp( + std::unordered_map *pending_ops, + OpHandleBase *op_instance) const { + pending_ops->insert({op_instance, op_instance->Inputs().size()}); +} + +void ThreadedSSAGraphExecutor::InsertPendingVar( + std::unordered_set *pending_vars, + BlockingQueue *ready_vars, VarHandleBase *var) const { + pending_vars->insert(var); + if (var->generated_op_ == nullptr) { + ready_vars->Push(var); + } +} void ThreadedSSAGraphExecutor::RunOp( BlockingQueue *ready_var_q, details::OpHandleBase *op) { auto op_run = [ready_var_q, op, this] { diff --git a/paddle/fluid/framework/details/threaded_ssa_graph_executor.h b/paddle/fluid/framework/details/threaded_ssa_graph_executor.h index d089b79d91327e38408439a8019ec5189ff6d189..f18a88526b3238220fc56fd07299643d32c8b58b 100644 --- a/paddle/fluid/framework/details/threaded_ssa_graph_executor.h +++ b/paddle/fluid/framework/details/threaded_ssa_graph_executor.h @@ -23,6 +23,7 @@ #include #include "ThreadPool.h" // ThreadPool in thrird party #include "paddle/fluid/framework/blocking_queue.h" +#include "paddle/fluid/framework/details/fetch_op_handle.h" #include "paddle/fluid/framework/details/ssa_graph_executor.h" namespace paddle { @@ -58,6 +59,21 @@ class ThreadedSSAGraphExecutor : public SSAGraphExecutor { std::unique_ptr exception_; std::atomic running_ops_; bool allow_op_delay_; + + void InsertPendingOp(std::unordered_map *pending_ops, + OpHandleBase *op_instance) const; + + void InsertPendingVar(std::unordered_set *pending_vars, + BlockingQueue *ready_vars, + VarHandleBase *var) const; + + void InsertFetchOps( + const std::vector &fetch_tensors, + std::vector> *fetch_ops, + std::unordered_set> *fetch_dependencies, + std::unordered_map *pending_ops, + std::unordered_set *pending_vars, + BlockingQueue *ready_vars, FeedFetchList *fetch_data); }; } // namespace details diff --git a/paddle/fluid/framework/op_proto_maker.h b/paddle/fluid/framework/op_proto_maker.h index 0beb57ce1609d2e90c05d3255647bd321bc1f6a9..b01a520bba19c1be32363a1a5c381666c82e6afc 100644 --- a/paddle/fluid/framework/op_proto_maker.h +++ b/paddle/fluid/framework/op_proto_maker.h @@ -14,56 +14,57 @@ limitations under the License. */ #pragma once #include +#include "glog/logging.h" #include "paddle/fluid/framework/attribute.h" #include "paddle/fluid/framework/framework.pb.h" - namespace paddle { namespace framework { // this class not only make proto but also init attribute checkers. class OpProtoAndCheckerMaker { public: - using OpProto = proto::OpProto; - using OpAttrChecker = framework::OpAttrChecker; - OpProtoAndCheckerMaker(OpProto* proto, OpAttrChecker* op_checker) - : proto_(proto), op_checker_(op_checker) {} + virtual void Make() = 0; virtual ~OpProtoAndCheckerMaker() { - PADDLE_ENFORCE(validated_, "should call Validate after build"); + CHECK(validated_) << "should call Validate after build"; } + void SetProto(proto::OpProto *proto) { proto_ = proto; } + + void SetChecker(OpAttrChecker *attr_checker) { op_checker_ = attr_checker; } + void Validate(); protected: struct VariableBuilder { - OpProto::Var* var_; + proto::OpProto::Var *var_; - VariableBuilder& AsDuplicable() { + VariableBuilder &AsDuplicable() { var_->set_duplicable(true); return *this; } - VariableBuilder& AsIntermediate() { + VariableBuilder &AsIntermediate() { var_->set_intermediate(true); return *this; } - VariableBuilder& AsDispensable() { + VariableBuilder &AsDispensable() { var_->set_dispensable(true); return *this; } }; - VariableBuilder AddInput(const std::string& name, const std::string& comment); + VariableBuilder AddInput(const std::string &name, const std::string &comment); - VariableBuilder AddOutput(const std::string& name, - const std::string& comment); + VariableBuilder AddOutput(const std::string &name, + const std::string &comment); template - TypedAttrChecker& AddAttr(const std::string& name, - const std::string& comment, + TypedAttrChecker &AddAttr(const std::string &name, + const std::string &comment, bool generated = false) { - auto* attr = proto_->add_attrs(); + auto *attr = proto_->add_attrs(); attr->set_name(name); attr->set_comment(comment); attr->set_generated(generated); @@ -71,21 +72,14 @@ class OpProtoAndCheckerMaker { return op_checker_->AddAttrChecker(name); } - void AddComment(const std::string& comment) { proto_->set_comment(comment); } + void AddComment(const std::string &comment) { proto_->set_comment(comment); } private: void CheckNoDuplicatedInOutAttrs(); - OpProto* proto_; - OpAttrChecker* op_checker_; + proto::OpProto *proto_; + OpAttrChecker *op_checker_; bool validated_{false}; }; - -class NOPMaker : public OpProtoAndCheckerMaker { - public: - NOPMaker(OpProto* proto, framework::OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) {} -}; - } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/op_proto_maker_test.cc b/paddle/fluid/framework/op_proto_maker_test.cc index a8d8c6386af940d4a14016b30de344e1c7877b22..9b5badbc81f9ddf083c81f57f5355e07a8e5e4a2 100644 --- a/paddle/fluid/framework/op_proto_maker_test.cc +++ b/paddle/fluid/framework/op_proto_maker_test.cc @@ -18,9 +18,7 @@ limitations under the License. */ class TestAttrProtoMaker : public paddle::framework::OpProtoAndCheckerMaker { public: - TestAttrProtoMaker(paddle::framework::proto::OpProto* proto, - paddle::framework::OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() { AddAttr("scale", "scale of test op"); AddAttr("scale", "scale of test op"); } @@ -29,15 +27,16 @@ class TestAttrProtoMaker : public paddle::framework::OpProtoAndCheckerMaker { TEST(ProtoMaker, DuplicatedAttr) { paddle::framework::proto::OpProto op_proto; paddle::framework::OpAttrChecker op_checker; - auto proto_maker = TestAttrProtoMaker(&op_proto, &op_checker); + TestAttrProtoMaker proto_maker; + proto_maker.SetProto(&op_proto); + proto_maker.SetChecker(&op_checker); + proto_maker.Make(); ASSERT_THROW(proto_maker.Validate(), paddle::platform::EnforceNotMet); } class TestInOutProtoMaker : public paddle::framework::OpProtoAndCheckerMaker { public: - TestInOutProtoMaker(paddle::framework::proto::OpProto* proto, - paddle::framework::OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() { AddInput("input", "input of test op"); AddInput("input", "input of test op"); } @@ -46,6 +45,9 @@ class TestInOutProtoMaker : public paddle::framework::OpProtoAndCheckerMaker { TEST(ProtoMaker, DuplicatedInOut) { paddle::framework::proto::OpProto op_proto; paddle::framework::OpAttrChecker op_checker; - auto proto_maker = TestInOutProtoMaker(&op_proto, &op_checker); + TestAttrProtoMaker proto_maker; + proto_maker.SetProto(&op_proto); + proto_maker.SetChecker(&op_checker); + proto_maker.Make(); ASSERT_THROW(proto_maker.Validate(), paddle::platform::EnforceNotMet); } diff --git a/paddle/fluid/framework/op_registry_test.cc b/paddle/fluid/framework/op_registry_test.cc index 6dc4cf261bad3c004aa53fba5502fe166e3a47f7..18b1649cc71d5edd5b07740bbad1fe8f81128898 100644 --- a/paddle/fluid/framework/op_registry_test.cc +++ b/paddle/fluid/framework/op_registry_test.cc @@ -33,8 +33,7 @@ class CosineOp : public OperatorBase { class CosineOpProtoAndCheckerMaker : public OpProtoAndCheckerMaker { public: - CosineOpProtoAndCheckerMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() { AddInput("input", "input of cosine op"); AddOutput("output", "output of cosine op"); AddAttr("scale", "scale of cosine op") @@ -55,8 +54,7 @@ class MyTestOp : public OperatorBase { class MyTestOpProtoAndCheckerMaker : public OpProtoAndCheckerMaker { public: - MyTestOpProtoAndCheckerMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() { AddInput("input", "input of cosine op").AsDuplicable(); AddOutput("output", "output of cosine op").AsIntermediate(); auto my_checker = [](int i) { @@ -212,10 +210,7 @@ namespace framework { class OpKernelTestMaker : public OpProtoAndCheckerMaker { public: - OpKernelTestMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { - AddComment("NoGradOp, same input output. no Grad"); - } + void Make() { AddComment("NoGradOp, same input output. no Grad"); } }; class OpWithKernelTest : public OperatorWithKernel { @@ -275,9 +270,9 @@ TEST(OperatorRegistrar, CUDA) { static int op_test_value = 0; -using paddle::platform::DeviceContext; using paddle::platform::CPUDeviceContext; using paddle::platform::CUDADeviceContext; +using paddle::platform::DeviceContext; namespace paddle { namespace framework { diff --git a/paddle/fluid/framework/operator_test.cc b/paddle/fluid/framework/operator_test.cc index 1bf8c81469bb4afdd00921cfa0acf6089dedbbaa..74043b5d7990178976baf2fad991ae03f9c8dd25 100644 --- a/paddle/fluid/framework/operator_test.cc +++ b/paddle/fluid/framework/operator_test.cc @@ -46,8 +46,7 @@ class OpWithoutKernelTest : public OperatorBase { class OpWithoutKernelCheckerMaker : public OpProtoAndCheckerMaker { public: - OpWithoutKernelCheckerMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() { AddInput("input", "input of test op"); AddOutput("output", "output of test op"); AddAttr("scale", "scale of cosine op"); @@ -98,8 +97,7 @@ namespace framework { class OpKernelTestProtoAndCheckerMaker : public OpProtoAndCheckerMaker { public: - OpKernelTestProtoAndCheckerMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() { AddInput("x", "input of test op"); AddOutput("y", "output of test op"); AddAttr("scale", "scale of cosine op") @@ -137,9 +135,7 @@ class CPUKernelTest : public OpKernel { class OpKernelTestMultiInputsProtoAndCheckerMaker : public OpProtoAndCheckerMaker { public: - OpKernelTestMultiInputsProtoAndCheckerMaker(OpProto* proto, - OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() { AddInput("xs", "inputs of test op").AsDuplicable(); AddInput("k", "input of test op"); AddOutput("ys", "outputs of test op").AsDuplicable(); diff --git a/paddle/fluid/framework/var_type_inference_test.cc b/paddle/fluid/framework/var_type_inference_test.cc index 9e33003b442762210c990b35f30bc3524963b8b4..14b81ddfecb8c996ae8709910c022a074e91eb3c 100644 --- a/paddle/fluid/framework/var_type_inference_test.cc +++ b/paddle/fluid/framework/var_type_inference_test.cc @@ -24,8 +24,7 @@ namespace framework { class SumOpMaker : public OpProtoAndCheckerMaker { public: - SumOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() { AddInput("X", "").AsDuplicable(); AddOutput("Out", ""); AddComment(""); diff --git a/paddle/fluid/inference/tensorrt/test_engine.cc b/paddle/fluid/inference/tensorrt/test_engine.cc index 69dbb9a3f2b92c97813f31e179a35a753bbb62d9..e635f0f87d577a1f1ac74687ee60f762be525418 100644 --- a/paddle/fluid/inference/tensorrt/test_engine.cc +++ b/paddle/fluid/inference/tensorrt/test_engine.cc @@ -98,7 +98,7 @@ TEST_F(TensorRTEngineTest, add_layer_multi_dim) { float x_v[2] = {1.0, 2.0}; engine_->SetInputFromCPU("x", reinterpret_cast(&x_v), - 2 * sizeof(float)); + 2 * sizeof(float)); engine_->Execute(1); LOG(INFO) << "to get output"; diff --git a/paddle/fluid/operators/CMakeLists.txt b/paddle/fluid/operators/CMakeLists.txt index 256aded8ca234a24229e11f27b9e3e25728ad293..fbeacb66cefab84c342ee11bcb7bf0fd55da0645 100644 --- a/paddle/fluid/operators/CMakeLists.txt +++ b/paddle/fluid/operators/CMakeLists.txt @@ -166,6 +166,8 @@ function(op_library TARGET) # NOTE(*): activation use macro to regist the kernels, set use_op manually. if(${TARGET} STREQUAL "activation") file(APPEND ${pybind_file} "USE_OP(relu);\n") + elseif(${TARGET} STREQUAL "reduce") + file(APPEND ${pybind_file} "USE_OP(reduce_sum);\n") else() file(APPEND ${pybind_file} "USE_OP(${TARGET});\n") endif() diff --git a/paddle/fluid/operators/accuracy_op.cc b/paddle/fluid/operators/accuracy_op.cc index ac10d759fecb56635d1303fd383a5f9ea18f0a4d..42fcace17926641b5caf677eb3c8ba5222e37190 100644 --- a/paddle/fluid/operators/accuracy_op.cc +++ b/paddle/fluid/operators/accuracy_op.cc @@ -63,8 +63,7 @@ class AccuracyOp : public framework::OperatorWithKernel { class AccuracyOpMaker : public framework::OpProtoAndCheckerMaker { public: - AccuracyOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { // TODO(typhoonzero): support both inference value and indices. AddInput("Out", "The network output of topk (inferences)"); AddInput("Indices", "The the network output of topk (indices)"); diff --git a/paddle/fluid/operators/activation_op.cc b/paddle/fluid/operators/activation_op.cc index 87ef55c50b0be46492a695928625d140345d415d..55482abdf09516077a94ca99140ae7961f0915aa 100644 --- a/paddle/fluid/operators/activation_op.cc +++ b/paddle/fluid/operators/activation_op.cc @@ -19,19 +19,18 @@ limitations under the License. */ namespace paddle { namespace operators { -#define REGISTER_ACTIVATION_OP_MAKER(OP_NAME, OP_COMMENT) \ - class OP_NAME##OpMaker \ - : public ::paddle::framework::OpProtoAndCheckerMaker { \ - public: \ - OP_NAME##OpMaker(OpProto *proto, OpAttrChecker *op_checker) \ - : ::paddle::framework::OpProtoAndCheckerMaker(proto, op_checker) { \ - AddInput("X", "Input of " #OP_NAME "operator"); \ - AddOutput("Out", "Output of" #OP_NAME "operator"); \ - AddAttr("use_mkldnn", \ - "(bool, default false) Only used in mkldnn kernel") \ - .SetDefault(false); \ - AddComment(#OP_COMMENT); \ - } \ +#define REGISTER_ACTIVATION_OP_MAKER(OP_NAME, OP_COMMENT) \ + class OP_NAME##OpMaker \ + : public ::paddle::framework::OpProtoAndCheckerMaker { \ + public: \ + void Make() override { \ + AddInput("X", "Input of " #OP_NAME "operator"); \ + AddOutput("Out", "Output of" #OP_NAME "operator"); \ + AddAttr("use_mkldnn", \ + "(bool, default false) Only used in mkldnn kernel") \ + .SetDefault(false); \ + AddComment(#OP_COMMENT); \ + } \ } #define REGISTER_ACTIVATION_OP_GRAD_MAKER(OP_NAME, KERNEL_TYPE) \ @@ -204,8 +203,7 @@ $$out = \frac{x}{1 + |x|}$$ class LeakyReluOpMaker : public framework::OpProtoAndCheckerMaker { public: - LeakyReluOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : framework::OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "Input of LeakyRelu operator"); AddOutput("Out", "Output of LeakyRelu operator"); AddAttr("alpha", "The small negative slope").SetDefault(0.02f); @@ -220,8 +218,7 @@ $out = \max(x, \alpha * x)$ class SoftShrinkOpMaker : public framework::OpProtoAndCheckerMaker { public: - SoftShrinkOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : framework::OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "Input of Softshrink operator"); AddOutput("Out", "Output of Softshrink operator"); AddAttr("lambda", "non-negative offset").SetDefault(0.5f); @@ -242,8 +239,7 @@ $$ class HardShrinkOpMaker : public framework::OpProtoAndCheckerMaker { public: - HardShrinkOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : framework::OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "Input of HardShrink operator"); AddOutput("Out", "Output of HardShrink operator"); AddAttr("threshold", "The value of threshold for HardShrink") @@ -265,8 +261,7 @@ $$ class BReluOpMaker : public framework::OpProtoAndCheckerMaker { public: - BReluOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : framework::OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "Input of BRelu operator"); AddOutput("Out", "Output of BRelu operator"); AddAttr("t_min", "The min marginal value of BRelu") @@ -284,8 +279,7 @@ $out = \max(\min(x, t_{min}), t_{max})$ class SoftReluOpMaker : public framework::OpProtoAndCheckerMaker { public: - SoftReluOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : framework::OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "Input of SoftRelu operator"); AddOutput("Out", "Output of SoftRelu operator"); AddAttr("threshold", "The threshold value of SoftRelu") @@ -301,8 +295,7 @@ $out = \ln(1 + \exp(\max(\min(x, threshold), threshold))$ class ELUOpMaker : public framework::OpProtoAndCheckerMaker { public: - ELUOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : framework::OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "Input of ELU operator"); AddOutput("Out", "Output of ELU operator"); AddAttr("alpha", "The alpha value of ELU").SetDefault(1.0f); @@ -320,8 +313,7 @@ $out = \max(0, x) + \min(0, \alpha * (e^x - 1))$ class Relu6OpMaker : public framework::OpProtoAndCheckerMaker { public: - Relu6OpMaker(OpProto *proto, OpAttrChecker *op_checker) - : framework::OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "Input of Relu6 operator"); AddOutput("Out", "Output of Relu6 operator"); AddAttr("threshold", "The threshold value of Relu6") @@ -337,8 +329,7 @@ $out = \min(\max(0, x), 6)$ class PowOpMaker : public framework::OpProtoAndCheckerMaker { public: - PowOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : framework::OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "Input of Pow operator"); AddOutput("Out", "Output of Pow operator"); AddAttr("factor", "The exponential factor of Pow").SetDefault(1.0f); @@ -353,8 +344,7 @@ $out = x^{factor}$ class STanhOpMaker : public framework::OpProtoAndCheckerMaker { public: - STanhOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : framework::OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "Input of STanh operator"); AddOutput("Out", "Output of STanh operator"); AddAttr("scale_a", "The scale parameter of a for the input") @@ -372,8 +362,7 @@ $$out = b * \frac{e^{a * x} - e^{-a * x}}{e^{a * x} + e^{-a * x}}$$ class ThresholdedReluOpMaker : public framework::OpProtoAndCheckerMaker { public: - ThresholdedReluOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : framework::OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "Input of ThresholdedRelu operator"); AddOutput("Out", "Output of ThresholdedRelu operator"); AddAttr("threshold", "The threshold location of activation") @@ -394,8 +383,7 @@ $$ class HardSigmoidOpMaker : public framework::OpProtoAndCheckerMaker { public: - HardSigmoidOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : framework::OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "Input of HardSigmoid operator"); AddOutput("Out", "Output of HardSigmoid operator"); AddAttr("slope", "Slope for linear approximation of sigmoid") @@ -420,8 +408,7 @@ It is recommended to use the defaults for this activation. class SwishOpMaker : public framework::OpProtoAndCheckerMaker { public: - SwishOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : framework::OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "Input of Swish operator"); AddOutput("Out", "Output of Swish operator"); AddAttr("beta", "Constant beta of swish operator").SetDefault(1.0f); diff --git a/paddle/fluid/operators/adadelta_op.cc b/paddle/fluid/operators/adadelta_op.cc index 7bdb3f274aa9bacb6b261e0d0cd00b72f1d409ae..d1970515f58969948b1d2db5847e4344112f77f9 100644 --- a/paddle/fluid/operators/adadelta_op.cc +++ b/paddle/fluid/operators/adadelta_op.cc @@ -66,8 +66,7 @@ class AdadeltaOp : public framework::OperatorWithKernel { class AdadeltaOpMaker : public framework::OpProtoAndCheckerMaker { public: - AdadeltaOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("Param", "(Tensor) Input parameter"); AddInput("Grad", "(Tensor) Input gradient"); AddInput("AvgSquaredGrad", "(Tensor) Input average of squared gradient"); diff --git a/paddle/fluid/operators/adagrad_op.cc b/paddle/fluid/operators/adagrad_op.cc index 1227129429addb0ed412c7f1755fd39c9ca77157..a3ef9ad9f91f1f626bd33876693ecc17ad76b96b 100644 --- a/paddle/fluid/operators/adagrad_op.cc +++ b/paddle/fluid/operators/adagrad_op.cc @@ -67,8 +67,7 @@ class AdagradOp : public framework::OperatorWithKernel { class AdagradOpMaker : public framework::OpProtoAndCheckerMaker { public: - AdagradOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("Param", "(Tensor) Input parameter"); AddInput("Grad", "(Tensor) Input gradient"); AddInput("Moment", "(Tensor) Second moment"); diff --git a/paddle/fluid/operators/adam_op.cc b/paddle/fluid/operators/adam_op.cc index f12f0c6663d1785b8af852244ffe32358fb1b693..99b0239855d6241b064a5883c2be3d58078b3b61 100644 --- a/paddle/fluid/operators/adam_op.cc +++ b/paddle/fluid/operators/adam_op.cc @@ -80,8 +80,7 @@ class AdamOp : public framework::OperatorWithKernel { class AdamOpMaker : public framework::OpProtoAndCheckerMaker { public: - AdamOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("Param", "(Tensor) Input parameter"); AddInput("Grad", "(Tensor) Input gradient"); AddInput("LearningRate", "(Tensor) Learning rate"); diff --git a/paddle/fluid/operators/adamax_op.cc b/paddle/fluid/operators/adamax_op.cc index 608b855d58a2f701fbb8631cb5f24768a61f3deb..32062574bcf71ff96e451eaa6865b6bbfc3b1c80 100644 --- a/paddle/fluid/operators/adamax_op.cc +++ b/paddle/fluid/operators/adamax_op.cc @@ -74,8 +74,7 @@ class AdamaxOp : public framework::OperatorWithKernel { class AdamaxOpMaker : public framework::OpProtoAndCheckerMaker { public: - AdamaxOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("Param", "(Tensor) Input parameter"); AddInput("Grad", "(Tensor) Input gradient"); AddInput("LearningRate", "(Tensor) Learning rate"); diff --git a/paddle/fluid/operators/array_to_lod_tensor_op.cc b/paddle/fluid/operators/array_to_lod_tensor_op.cc index 5db2e4540ef170079328f24ac8d30f7b1901fa1e..149226e92d4d08a25c211bce686ff03c5d7ddf40 100644 --- a/paddle/fluid/operators/array_to_lod_tensor_op.cc +++ b/paddle/fluid/operators/array_to_lod_tensor_op.cc @@ -123,8 +123,7 @@ class ArrayToLoDTensorOp : public framework::OperatorBase { class ArrayToLoDTensorOpProtoMaker : public framework::OpProtoAndCheckerMaker { public: - ArrayToLoDTensorOpProtoMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "(std::vector) A vector of tensors that is going to " "be casted to a big LoDTensor."); diff --git a/paddle/fluid/operators/assign_op.cc b/paddle/fluid/operators/assign_op.cc index d372213e1b6008b0c4227103dd40730f86a84301..d9294048a9e89662958fd5c6af4fcbe5da3814c2 100644 --- a/paddle/fluid/operators/assign_op.cc +++ b/paddle/fluid/operators/assign_op.cc @@ -94,8 +94,7 @@ class AssignOp : public framework::OperatorBase { class AssignOpProtoMaker : public framework::OpProtoAndCheckerMaker { public: - AssignOpProtoMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "(LoDTensor, SelectedRows or LoDTensorArray) The input variable " "could be LoDTensor, SelectedRows or LoDTensorArray.") diff --git a/paddle/fluid/operators/assign_value_op.cc b/paddle/fluid/operators/assign_value_op.cc index 993610fdedde4bafd99f59a0adeeeef4526eb089..4ad6f3443db33fd14b67091d14fd877b951730ff 100644 --- a/paddle/fluid/operators/assign_value_op.cc +++ b/paddle/fluid/operators/assign_value_op.cc @@ -45,8 +45,7 @@ class AssignValueOp : public framework::OperatorWithKernel { class AssignValueOpMaker : public framework::OpProtoAndCheckerMaker { public: - AssignValueOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddOutput("Out", "(Tensor) Output tensor of assign_value operator."); AddAttr>("shape", "(vector) " diff --git a/paddle/fluid/operators/auc_op.cc b/paddle/fluid/operators/auc_op.cc index a168eaeab56128b75bbe97d7ccf843a081b5dced..c9871a9fe6b3b0d0cf671c2d155715f92c94fd8f 100644 --- a/paddle/fluid/operators/auc_op.cc +++ b/paddle/fluid/operators/auc_op.cc @@ -50,8 +50,7 @@ class AucOp : public framework::OperatorWithKernel { class AucOpMaker : public framework::OpProtoAndCheckerMaker { public: - AucOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("Out", "A floating point 2D tensor, values are in the range [0, 1]." "Each row is sorted in descending order. This input should be the" diff --git a/paddle/fluid/operators/average_accumulates_op.cc b/paddle/fluid/operators/average_accumulates_op.cc index b21deaf9258567c05a8816b14ac7d6462964e8ba..25864e95d7e290c7f684501893e99c828c511979 100644 --- a/paddle/fluid/operators/average_accumulates_op.cc +++ b/paddle/fluid/operators/average_accumulates_op.cc @@ -111,8 +111,7 @@ class AverageAccumulatesOp : public framework::OperatorWithKernel { class AverageAccumulatesOpMaker : public framework::OpProtoAndCheckerMaker { public: - AverageAccumulatesOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("param", "(Tensor), The parameter to be accumulated."); AddInput("in_sum_1", "(Tensor), A tensor used to store the parameter " diff --git a/paddle/fluid/operators/batch_norm_op.cc b/paddle/fluid/operators/batch_norm_op.cc index b4bd40d0311bf10ec1fddabab2ee131fe02baf52..6ec8c9d18b466142acdb46b0f46826a2aca7a47e 100644 --- a/paddle/fluid/operators/batch_norm_op.cc +++ b/paddle/fluid/operators/batch_norm_op.cc @@ -126,8 +126,7 @@ class BatchNormOp : public framework::OperatorWithKernel { class BatchNormOpMaker : public framework::OpProtoAndCheckerMaker { public: - BatchNormOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddAttr("is_test", "").SetDefault(false); AddAttr("momentum", "").SetDefault(0.9); AddAttr("epsilon", "") diff --git a/paddle/fluid/operators/batch_size_like.h b/paddle/fluid/operators/batch_size_like.h index dd51a11fbe6ad5e528197b67536518c4b31fa355..483c9f8c2191fa4eb98b91112f9d6753e2fbddc3 100644 --- a/paddle/fluid/operators/batch_size_like.h +++ b/paddle/fluid/operators/batch_size_like.h @@ -53,8 +53,7 @@ class BatchSizeLikeOp : public framework::OperatorWithKernel { class BatchSizeLikeOpMaker : public framework::OpProtoAndCheckerMaker { public: - BatchSizeLikeOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : framework::OpProtoAndCheckerMaker(proto, op_checker) { + void Make() final { AddInput("Input", "(Tensor) Tensor " "whose input_dim_idx'th dimension specifies the batch_size"); @@ -68,7 +67,11 @@ class BatchSizeLikeOpMaker : public framework::OpProtoAndCheckerMaker { AddAttr("output_dim_idx", "(int, default 0) The index of output's batch size dimension") .SetDefault(0); + Apply(); } + + protected: + virtual void Apply() = 0; }; } // namespace operators diff --git a/paddle/fluid/operators/beam_search_decode_op.cc b/paddle/fluid/operators/beam_search_decode_op.cc index 68fb988afd8af4e9ac3acb4506c1c31fcf85e5a3..c3dd22119ddab8ecf9213ee274e4cbd4f05e78fd 100644 --- a/paddle/fluid/operators/beam_search_decode_op.cc +++ b/paddle/fluid/operators/beam_search_decode_op.cc @@ -134,8 +134,7 @@ class BeamSearchDecodeOp : public framework::OperatorBase { class BeamSearchDecodeOpProtoMaker : public framework::OpProtoAndCheckerMaker { public: - BeamSearchDecodeOpProtoMaker(OpProto* proto, OpAttrChecker* op_checker) - : framework::OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("Ids", "(LodTensorArray)" "score of the candidate words in each step"); diff --git a/paddle/fluid/operators/beam_search_op.cc b/paddle/fluid/operators/beam_search_op.cc index cff097cca13f3b92c7efe4b69259fdf7c75b3760..df0b50881f4e3ec6f57bdb2b63033931059c486e 100644 --- a/paddle/fluid/operators/beam_search_op.cc +++ b/paddle/fluid/operators/beam_search_op.cc @@ -197,8 +197,7 @@ std::string ItemToString(const BeamSearch::Item &item) { class BeamSearchOpMaker : public framework::OpProtoAndCheckerMaker { public: - BeamSearchOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { // inputs and outputs stored in proto AddInput("pre_ids", "ids in previous step"); AddInput("ids", "a LoDTensor of shape of [None,k]"); diff --git a/paddle/fluid/operators/bilinear_interp_op.cc b/paddle/fluid/operators/bilinear_interp_op.cc index 69f79bf93be8ac7df9cab43b84cf755f2f3dfeaa..d46fda54e7a9d5bc737a7ec2116daca33ffa015f 100644 --- a/paddle/fluid/operators/bilinear_interp_op.cc +++ b/paddle/fluid/operators/bilinear_interp_op.cc @@ -41,8 +41,7 @@ class BilinearInterpOp : public framework::OperatorWithKernel { class BilinearInterpOpMaker : public framework::OpProtoAndCheckerMaker { public: - BilinearInterpOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "(Tensor) The input tensor of bilinear interpolation, " "This is a 4-D tensor with shape of (N x C x h x w)"); diff --git a/paddle/fluid/operators/bilinear_tensor_product_op.cc b/paddle/fluid/operators/bilinear_tensor_product_op.cc index e910ad92d1051aa89fdb3290a977ff376378a227..8d261a118a75ee16027faf60341cefd30c3cdbba 100644 --- a/paddle/fluid/operators/bilinear_tensor_product_op.cc +++ b/paddle/fluid/operators/bilinear_tensor_product_op.cc @@ -65,8 +65,7 @@ class BilinearTensorProductOp : public framework::OperatorWithKernel { class BilinearTensorProductOpMaker : public framework::OpProtoAndCheckerMaker { public: - BilinearTensorProductOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "The first input of bilinear_tensor_product operator."); AddInput("Y", "The second input of bilinear_tensor_product operator."); AddInput("Weight", diff --git a/paddle/fluid/operators/bipartite_match_op.cc b/paddle/fluid/operators/bipartite_match_op.cc index 1218d9fdc1e6101d17bc09a4ae769f5fbf8e7b15..d437ad5c19828331c749244404ba80d0f3acda2a 100644 --- a/paddle/fluid/operators/bipartite_match_op.cc +++ b/paddle/fluid/operators/bipartite_match_op.cc @@ -182,8 +182,7 @@ class BipartiteMatchKernel : public framework::OpKernel { class BipartiteMatchOpMaker : public framework::OpProtoAndCheckerMaker { public: - BipartiteMatchOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput( "DistMat", "(LoDTensor or Tensor) this input is a 2-D LoDTensor with shape " diff --git a/paddle/fluid/operators/box_coder_op.cc b/paddle/fluid/operators/box_coder_op.cc index ec416f725e75fae57484751ee8a066c0b9da8a70..ce9bf10dc3c4718c5adfc0d5034d06c22f9d54e1 100644 --- a/paddle/fluid/operators/box_coder_op.cc +++ b/paddle/fluid/operators/box_coder_op.cc @@ -60,8 +60,7 @@ class BoxCoderOp : public framework::OperatorWithKernel { class BoxCoderOpMaker : public framework::OpProtoAndCheckerMaker { public: - BoxCoderOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput( "PriorBox", "(Tensor, default Tensor) " diff --git a/paddle/fluid/operators/cast_op.cc b/paddle/fluid/operators/cast_op.cc index dd0068d571f72c9c22334e523cd091fe4c8da5a6..84660d042c7b12283fabc316d29609f5eddb825d 100644 --- a/paddle/fluid/operators/cast_op.cc +++ b/paddle/fluid/operators/cast_op.cc @@ -21,8 +21,7 @@ namespace operators { class CastOpProtoMaker : public framework::OpProtoAndCheckerMaker { public: - CastOpProtoMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "The input tensor of cast op"); AddOutput("Out", "The output tensor of cast op"); AddAttr("out_dtype", "output data type"); diff --git a/paddle/fluid/operators/channel_close_op.cc b/paddle/fluid/operators/channel_close_op.cc index 5892650c49e2e9d7345fb94465d124cff57f0a6f..8e2db250a069c488ee98f618bc03df6485022456 100644 --- a/paddle/fluid/operators/channel_close_op.cc +++ b/paddle/fluid/operators/channel_close_op.cc @@ -50,8 +50,7 @@ class ChannelCloseOpOpInferShape : public framework::InferShapeBase { class ChannelCloseOpMaker : public framework::OpProtoAndCheckerMaker { public: - ChannelCloseOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput(kChannel, "The Channel Variable that should be closed by" " the ChannelClose Op."); diff --git a/paddle/fluid/operators/channel_create_op.cc b/paddle/fluid/operators/channel_create_op.cc index b2fdfd0e1f24ed071bb57b7de8f99b2d5e1d3196..a7f59e4088e3fb328e5b5a83eed65f0f90edb9f0 100644 --- a/paddle/fluid/operators/channel_create_op.cc +++ b/paddle/fluid/operators/channel_create_op.cc @@ -91,8 +91,7 @@ class ChannelCreateOpOpInferShape : public framework::InferShapeBase { class ChannelCreateOpMaker : public framework::OpProtoAndCheckerMaker { public: - ChannelCreateOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddOutput(kOutput, "The object of a Channel type created by ChannelCreate Op."); AddAttr("capacity", "The size of the buffer of Channel.") diff --git a/paddle/fluid/operators/channel_recv_op.cc b/paddle/fluid/operators/channel_recv_op.cc index 25c5c3c95ef6899589c98570df6ecbf9b3241d89..101015e837e28b504b71d919abd5f908a102c812 100644 --- a/paddle/fluid/operators/channel_recv_op.cc +++ b/paddle/fluid/operators/channel_recv_op.cc @@ -72,8 +72,7 @@ class ChannelRecvOp : public framework::OperatorBase { class ChannelRecvOpMaker : public framework::OpProtoAndCheckerMaker { public: - ChannelRecvOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput(Channel, "(Channel) A variable which \"receives\" the a value sent" "to it by a channel_send op.") diff --git a/paddle/fluid/operators/channel_send_op.cc b/paddle/fluid/operators/channel_send_op.cc index 66d33617ede5bef8a95de14f5b447c0910fe3eb4..67d6deb511d883ac69426ddd34be2199367cd4c7 100644 --- a/paddle/fluid/operators/channel_send_op.cc +++ b/paddle/fluid/operators/channel_send_op.cc @@ -57,8 +57,7 @@ class ChannelSendOp : public framework::OperatorBase { class ChannelSendOpMaker : public framework::OpProtoAndCheckerMaker { public: - ChannelSendOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput(Channel, "(Channel) A variable which \"sends\" the passed in value to " "a listening receiver.") diff --git a/paddle/fluid/operators/chunk_eval_op.cc b/paddle/fluid/operators/chunk_eval_op.cc index 95440ff89e883e754795c67cd58a08f1131df368..62636bb2f9078768180ab1e0016e3565617d24d2 100644 --- a/paddle/fluid/operators/chunk_eval_op.cc +++ b/paddle/fluid/operators/chunk_eval_op.cc @@ -66,8 +66,7 @@ class ChunkEvalOp : public framework::OperatorWithKernel { class ChunkEvalOpMaker : public framework::OpProtoAndCheckerMaker { public: - ChunkEvalOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("Inference", "(Tensor, default: Tensor). " "Predictions from the network."); diff --git a/paddle/fluid/operators/clip_by_norm_op.cc b/paddle/fluid/operators/clip_by_norm_op.cc index f43726b4793f284f14226f90c94ac6eebf632bd5..c87bded034e382c981d119e8499d6780e288031f 100644 --- a/paddle/fluid/operators/clip_by_norm_op.cc +++ b/paddle/fluid/operators/clip_by_norm_op.cc @@ -37,8 +37,7 @@ class ClipByNormOp : public framework::OperatorWithKernel { class ClipByNormOpMaker : public framework::OpProtoAndCheckerMaker { public: - ClipByNormOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "(Tensor) The input of clip_by_norm op." "The number of dimensions must be between [1, 9]."); diff --git a/paddle/fluid/operators/clip_op.cc b/paddle/fluid/operators/clip_op.cc index c71139fc7c01a696299296e43d06cf195fb3d03f..a679f7e2536a0a44148193f423f5ffe11b5e35fc 100644 --- a/paddle/fluid/operators/clip_op.cc +++ b/paddle/fluid/operators/clip_op.cc @@ -38,8 +38,7 @@ class ClipOp : public framework::OperatorWithKernel { template class ClipOpMaker : public framework::OpProtoAndCheckerMaker { public: - ClipOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "(Tensor)The input of clip op." "The number of dimensions must be between [1, 9]."); diff --git a/paddle/fluid/operators/compare_op.cc b/paddle/fluid/operators/compare_op.cc index 3a6a357e81949014a70e5bae1ee0e1c8b9d0c2ce..3a4819f3dec9704a4a7c8910dd22e80fda082335 100644 --- a/paddle/fluid/operators/compare_op.cc +++ b/paddle/fluid/operators/compare_op.cc @@ -21,8 +21,7 @@ namespace operators { template class CompareOpProtoMaker : public framework::OpProtoAndCheckerMaker { public: - CompareOpProtoMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { OpComment comment; AddInput("X", string::Sprintf("(LoDTensor) the left hand operand of %s operator", diff --git a/paddle/fluid/operators/concat_op.cc b/paddle/fluid/operators/concat_op.cc index 3bb3bd4eb15881afb5ae42beb944b76b5e8207cb..38337f9aa52435c445420047957500d21069506a 100644 --- a/paddle/fluid/operators/concat_op.cc +++ b/paddle/fluid/operators/concat_op.cc @@ -63,8 +63,7 @@ class ConcatOp : public framework::OperatorWithKernel { class ConcatOpMaker : public framework::OpProtoAndCheckerMaker { public: - ConcatOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "Input tensors of concat operator.").AsDuplicable(); AddOutput("Out", "Output tensor of concat operator."); AddAttr("axis", diff --git a/paddle/fluid/operators/conditional_block_op.cc b/paddle/fluid/operators/conditional_block_op.cc index 27f74a789beef02d31ebceb9b909e97ebd68232a..5984f80d04bdeb232f8e24264ae979725af24ef4 100644 --- a/paddle/fluid/operators/conditional_block_op.cc +++ b/paddle/fluid/operators/conditional_block_op.cc @@ -108,8 +108,7 @@ class ConditionalBlockOp : public ConditionalOp { class ConditionalBlockOpProtoMaker : public framework::OpProtoAndCheckerMaker { public: - ConditionalBlockOpProtoMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "The conditional variable of this operator. If X is empty, the " "whole sub-block will not be executed.") diff --git a/paddle/fluid/operators/conv_op.cc b/paddle/fluid/operators/conv_op.cc index 92748993c32ffb93ae25db8d9916798e657cc804..697d91484257984b104a13b0572cf19b16f8d37e 100644 --- a/paddle/fluid/operators/conv_op.cc +++ b/paddle/fluid/operators/conv_op.cc @@ -106,8 +106,7 @@ framework::OpKernelType ConvOp::GetExpectedKernelType( library); } -Conv2DOpMaker::Conv2DOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { +void Conv2DOpMaker::Make() { AddInput( "Input", "(Tensor) The input tensor of convolution operator. " @@ -200,8 +199,7 @@ $$ )DOC"); } -Conv3DOpMaker::Conv3DOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { +void Conv3DOpMaker::Make() { AddInput( "Input", "(Tensor) The input tensor of convolution operator. " diff --git a/paddle/fluid/operators/conv_op.h b/paddle/fluid/operators/conv_op.h index f462f00c0803c12ee2f2b0f94dc90afdca500da3..b3140116dfe6a17a400bb88219ff43b249ecb32a 100644 --- a/paddle/fluid/operators/conv_op.h +++ b/paddle/fluid/operators/conv_op.h @@ -60,12 +60,12 @@ inline bool IsExpand(const std::vector& filter_dim, // operator implementations can reuse the code. class Conv2DOpMaker : public framework::OpProtoAndCheckerMaker { public: - Conv2DOpMaker(OpProto* proto, OpAttrChecker* op_checker); + void Make() override; }; class Conv3DOpMaker : public framework::OpProtoAndCheckerMaker { public: - Conv3DOpMaker(OpProto* proto, OpAttrChecker* op_checker); + void Make() override; }; class ConvOp : public framework::OperatorWithKernel { diff --git a/paddle/fluid/operators/conv_shift_op.cc b/paddle/fluid/operators/conv_shift_op.cc index 82fdd308207adb159632dbb9decd67fd2d1c4646..f2549e814d6f3b5674fe2eec1139f1c3dc6fa0b4 100644 --- a/paddle/fluid/operators/conv_shift_op.cc +++ b/paddle/fluid/operators/conv_shift_op.cc @@ -75,8 +75,7 @@ class ConvShiftGradOp : public framework::OperatorWithKernel { class ConvShiftOpMaker : public framework::OpProtoAndCheckerMaker { public: - ConvShiftOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : framework::OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "(Tensor, default Tensor), a 2-D tensor with shape B x M, " "where B is the batch size and M is the data dimension."); diff --git a/paddle/fluid/operators/conv_transpose_op.cc b/paddle/fluid/operators/conv_transpose_op.cc index d699dcafa4e2c7e0a3ffb62ec3985e4961fa2133..c27c8e273168407d3aacb05cd6628887cc5760ad 100644 --- a/paddle/fluid/operators/conv_transpose_op.cc +++ b/paddle/fluid/operators/conv_transpose_op.cc @@ -84,9 +84,7 @@ framework::OpKernelType ConvTransposeOp::GetExpectedKernelType( layout_, library_); } -Conv2DTransposeOpMaker::Conv2DTransposeOpMaker(OpProto* proto, - OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { +void Conv2DTransposeOpMaker::Make() { AddInput( "Input", "(Tensor) The input tensor of convolution transpose operator. " @@ -168,9 +166,7 @@ Example: )DOC"); } -Conv3DTransposeOpMaker::Conv3DTransposeOpMaker(OpProto* proto, - OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { +void Conv3DTransposeOpMaker::Make() { AddInput("Input", "(Tensor) The input tensor of convolution transpose operator." "The format of input tensor is NCDHW. Where N is batch size, C is " diff --git a/paddle/fluid/operators/conv_transpose_op.h b/paddle/fluid/operators/conv_transpose_op.h index 898121412b17cd6fbbbeb57e9d63842e592703ac..f9d205a5b5c4cff74d02a6c89b83f7584e4a6824 100644 --- a/paddle/fluid/operators/conv_transpose_op.h +++ b/paddle/fluid/operators/conv_transpose_op.h @@ -30,12 +30,12 @@ using DDim = framework::DDim; // operator implementations can reuse the code. class Conv2DTransposeOpMaker : public framework::OpProtoAndCheckerMaker { public: - Conv2DTransposeOpMaker(OpProto* proto, OpAttrChecker* op_checker); + void Make() override; }; class Conv3DTransposeOpMaker : public framework::OpProtoAndCheckerMaker { public: - Conv3DTransposeOpMaker(OpProto* proto, OpAttrChecker* op_checker); + void Make() override; }; class ConvTransposeOp : public framework::OperatorWithKernel { diff --git a/paddle/fluid/operators/cos_sim_op.cc b/paddle/fluid/operators/cos_sim_op.cc index 04ca878e687f9b8e5239d8c4aad7e5f262fda0fa..046dd11910bb0ff46b567c3b89883582782205d3 100644 --- a/paddle/fluid/operators/cos_sim_op.cc +++ b/paddle/fluid/operators/cos_sim_op.cc @@ -62,8 +62,7 @@ class CosSimOp : public framework::OperatorWithKernel { class CosSimOpMaker : public framework::OpProtoAndCheckerMaker { public: - CosSimOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "The 1st input of cos_sim op."); AddInput("Y", "The 2nd input of cos_sim op."); AddOutput("Out", "The output of cos_sim op."); diff --git a/paddle/fluid/operators/crf_decoding_op.cc b/paddle/fluid/operators/crf_decoding_op.cc index a83013c428a77a0ead545d87852e1017bc927edf..40f43936db662f2b18ffa540da4794755b5d6fc7 100644 --- a/paddle/fluid/operators/crf_decoding_op.cc +++ b/paddle/fluid/operators/crf_decoding_op.cc @@ -18,8 +18,7 @@ namespace paddle { namespace operators { class CRFDecodingOpMaker : public framework::OpProtoAndCheckerMaker { public: - CRFDecodingOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("Emission", "(LoDTensor, default: LoDTensor). A LoDTensor with shape " "[N x D] where N is the size of the mini-batch and D is the total " diff --git a/paddle/fluid/operators/crop_op.cc b/paddle/fluid/operators/crop_op.cc index a8f1fbd529c71d1915c75fa90b7e4e8239d2fa3f..669b3bbe9df4cae1aa381184092dfa51157ab6a3 100644 --- a/paddle/fluid/operators/crop_op.cc +++ b/paddle/fluid/operators/crop_op.cc @@ -52,8 +52,7 @@ class CropOp : public framework::OperatorWithKernel { class CropOpMaker : public framework::OpProtoAndCheckerMaker { public: - CropOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "The input of pad op. " "The input should be a k-D tensor(k > 0 and k < 7)."); diff --git a/paddle/fluid/operators/cross_entropy_op.cc b/paddle/fluid/operators/cross_entropy_op.cc index 2b2a9dc8319f964875371214168ce04cb67fc818..a3bec3da45136bca5cb2763e7ffd6b67703a1813 100644 --- a/paddle/fluid/operators/cross_entropy_op.cc +++ b/paddle/fluid/operators/cross_entropy_op.cc @@ -111,8 +111,7 @@ class CrossEntropyGradientOp : public framework::OperatorWithKernel { class CrossEntropyOpMaker : public framework::OpProtoAndCheckerMaker { public: - CrossEntropyOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "(Tensor, default Tensor), a 2-D tensor with shape [N x D]," " where N is the batch size and D is the number of classes. " diff --git a/paddle/fluid/operators/ctc_align_op.cc b/paddle/fluid/operators/ctc_align_op.cc index 19e7649660edd0bc90bc6a9537b1cdbb2e7e8ebc..d2b440d9d2e50340af7a7bb4e76e55beea1bcb46 100644 --- a/paddle/fluid/operators/ctc_align_op.cc +++ b/paddle/fluid/operators/ctc_align_op.cc @@ -44,8 +44,7 @@ class CTCAlignOp : public framework::OperatorWithKernel { class CTCAlignOpMaker : public framework::OpProtoAndCheckerMaker { public: - CTCAlignOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("Input", "(LodTensor, default: LoDTensor), Its shape is " "[Lp, 1], where Lp is the sum of all input sequences' length."); diff --git a/paddle/fluid/operators/cumsum_op.cc b/paddle/fluid/operators/cumsum_op.cc index f7c516a0ba375a68e3adeb44c99f2808dc0418bb..92bb835e8f18e17ae1355fdec29f43b8ffb70460 100644 --- a/paddle/fluid/operators/cumsum_op.cc +++ b/paddle/fluid/operators/cumsum_op.cc @@ -29,8 +29,7 @@ class CumOp : public framework::OperatorWithKernel { class CumsumOpMaker : public framework::OpProtoAndCheckerMaker { public: - CumsumOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : framework::OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "Input of Cumsum operator"); AddOutput("Out", "Output of Cumsum operator"); AddAttr("axis", diff --git a/paddle/fluid/operators/decayed_adagrad_op.cc b/paddle/fluid/operators/decayed_adagrad_op.cc index 5a1315fb2a80bf7f7f57388d0d6832686442c4ff..c0f2b49a04d9e88502c4b63bca493cd2b7ad1c5c 100644 --- a/paddle/fluid/operators/decayed_adagrad_op.cc +++ b/paddle/fluid/operators/decayed_adagrad_op.cc @@ -62,8 +62,7 @@ class DecayedAdagradOp : public framework::OperatorWithKernel { class DecayedAdagradOpMaker : public framework::OpProtoAndCheckerMaker { public: - DecayedAdagradOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("Param", "(Tensor) Input parameter"); AddInput("Grad", "(Tensor) Input gradient"); AddInput("Moment", "(Tensor) Second moment"); diff --git a/paddle/fluid/operators/delete_var_op.cc b/paddle/fluid/operators/delete_var_op.cc index 1fe9404c00335edbe3594486f8c403e69f2ab08f..d7a9bfbc437dbf4c723b9c87ff62ec6b62c38638 100644 --- a/paddle/fluid/operators/delete_var_op.cc +++ b/paddle/fluid/operators/delete_var_op.cc @@ -34,8 +34,7 @@ class DeleteVarOp : public framework::OperatorBase { class DeleteVarOpInfoMaker : public framework::OpProtoAndCheckerMaker { public: - DeleteVarOpInfoMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "The input of delete op").AsDuplicable(); AddComment(R"DOC( Delete Operator. diff --git a/paddle/fluid/operators/detail/sendrecvop_utils.cc b/paddle/fluid/operators/detail/sendrecvop_utils.cc index d68cf467f7b0c6157dc1f69571e5d0c0b3c70348..1a8a1af20fa446dbd537944409ef0ca1e3e9116f 100644 --- a/paddle/fluid/operators/detail/sendrecvop_utils.cc +++ b/paddle/fluid/operators/detail/sendrecvop_utils.cc @@ -29,129 +29,127 @@ namespace paddle { namespace operators { namespace detail { +using VarMsg = sendrecv::VariableMessage; + +void GetTensorPayload(framework::Variable* var, + const platform::DeviceContext& ctx, VarMsg* request, + void** payload, size_t* payload_size) { + auto tensor = var->Get(); + // FIXME(wuyi): data types in send_recv.proto is copied from + // framework.proto + request->set_data_type( + static_cast(framework::ToDataType(tensor.type()))); + for (auto& dim : framework::vectorize(tensor.dims())) { + request->add_dims(dim); + } + const framework::LoD lod = tensor.lod(); + if (lod.size() > 0) { + request->set_lod_level(lod.size()); + for (auto& each : lod) { + VarMsg::LodData* lod_inner = request->add_lod(); + for (auto& d : each) { + lod_inner->add_lod_data(d); + } + } + } + if (platform::is_gpu_place(ctx.GetPlace())) { +#ifdef PADDLE_WITH_CUDA + PADDLE_ENFORCE(platform::is_gpu_place(tensor.place())); + platform::CPUPlace cpu; + auto& gpu_dev_ctx = static_cast(ctx); + auto copy_size = tensor.numel() * framework::SizeOfType(tensor.type()); + *payload = memory::Alloc(cpu, copy_size); + + memory::Copy(cpu, *payload, boost::get(tensor.place()), + reinterpret_cast(tensor.data()), copy_size, + gpu_dev_ctx.stream()); + ctx.Wait(); +#endif + } else { + *payload = tensor.data(); + } + *payload_size = tensor.numel() * framework::SizeOfType(tensor.type()); +} + +void GetSelectedRowsPayload(framework::Variable* var, + const platform::DeviceContext& ctx, VarMsg* request, + void** payload, size_t* payload_size) { + auto* slr = var->GetMutable(); + request->set_data_type( + static_cast(framework::ToDataType(slr->value().type()))); + request->set_lod_level(0); + request->set_slr_height(slr->height()); + + for (auto& dim : framework::vectorize(slr->value().dims())) { + request->add_dims(dim); + } + + auto* tensor = slr->mutable_value(); + if (platform::is_gpu_place(ctx.GetPlace())) { +#ifdef PADDLE_WITH_CUDA + platform::CPUPlace cpu; + auto& gpu_dev_ctx = static_cast(ctx); + auto copy_size = tensor->numel() * framework::SizeOfType(tensor->type()); + *payload = memory::Alloc(cpu, copy_size); + memory::Copy(cpu, *payload, + boost::get(tensor->place()), + reinterpret_cast(tensor->data()), copy_size, + gpu_dev_ctx.stream()); + ctx.Wait(); +#endif + } else { + *payload = slr->mutable_value()->data(); + } + *payload_size = tensor->numel() * framework::SizeOfType(tensor->type()); +} + void SerializeToByteBuffer(const std::string& name, framework::Variable* var, const platform::DeviceContext& ctx, ::grpc::ByteBuffer* msg, const std::string& out_name) { - using VarMsg = sendrecv::VariableMessage; - // When using GPU, need to free the copied CPU buffer - // when the ByteBuffer destroies - // TODO(typhoonzero): add unref here, if we have dependent - // parallelism execution, need to know when to free the tensor. + // Default DestroyCallback does nothing, When using GPU + // the CPU buffer need to be freed. DestroyCallback destroy_callback = [](void* backing) {}; - - auto buffer = std::unique_ptr(new char[1024]); - void* buf = buffer.get(); - + VarMsg request; void* payload = nullptr; size_t payload_size; - ProtoEncodeHelper e(static_cast(buf), 1024); + + request.set_varname(name); // Note: normally the profiler is enabled in 1 trainer, hence only // 1 trainer returns true for ShouldSendProfileState(). It tells PS // servers the trainer's profiling state so that PS can follow the // trainer. - if (platform::ShouldSendProfileState()) { - e.WriteBool(VarMsg::kProfileFieldNumber, platform::IsProfileEnabled()); + request.set_profile(platform::IsProfileEnabled()); + if (!out_name.empty()) { + request.set_out_varname(out_name); } - e.WriteString(VarMsg::kVarnameFieldNumber, name); if (var->IsType()) { - e.WriteUint64(VarMsg::kTypeFieldNumber, 0); + request.set_type(::sendrecv::LOD_TENSOR); + GetTensorPayload(var, ctx, &request, &payload, &payload_size); } else if (var->IsType()) { - e.WriteUint64(VarMsg::kTypeFieldNumber, 1); + request.set_type(::sendrecv::SELECTED_ROWS); + GetSelectedRowsPayload(var, ctx, &request, &payload, &payload_size); + } else { + PADDLE_THROW("Serialize does not support type: %s", + typeid(var->Type()).name()); } - if (!out_name.empty()) { - e.WriteString(VarMsg::kOutVarnameFieldNumber, out_name); + if (platform::is_gpu_place(ctx.GetPlace())) { + // GPU data is copied to CPU buffer when sending, + // free the buffer when possible. + destroy_callback = [](void* backing) { + platform::CPUPlace cpu; + memory::Free(cpu, backing); + }; } - switch (framework::ToVarType(var->Type())) { - case framework::proto::VarType_Type_LOD_TENSOR: { - auto tensor = var->Get(); - e.WriteUint64(VarMsg::kDataTypeFieldNumber, - framework::ToDataType(tensor.type())); - for (auto& dim : framework::vectorize(tensor.dims())) { - e.WriteUint64(VarMsg::kDimsFieldNumber, dim); - } - auto lod = tensor.lod(); // std::vector> - if (lod.size() > 0) { - e.WriteUint64(VarMsg::kLodLevelFieldNumber, lod.size()); - - for (auto& each : lod) { - e.WriteVarlengthBeginning(VarMsg::kLodFieldNumber, - 2 + // tag + varintlength of submessage - 1 + // kLodDataFieldNumber - each.size()); - // auto copied from GPU - for (auto& d : each) { - e.WriteUint64(VarMsg::LodData::kLodDataFieldNumber, d); - } - } - } - if (platform::is_gpu_place(ctx.GetPlace())) { -#ifdef PADDLE_WITH_CUDA - PADDLE_ENFORCE(platform::is_gpu_place(tensor.place())); - platform::CPUPlace cpu; - auto& gpu_dev_ctx = - static_cast(ctx); - auto copy_size = tensor.numel() * framework::SizeOfType(tensor.type()); - payload = memory::Alloc(cpu, copy_size); - - memory::Copy(cpu, payload, - boost::get(tensor.place()), - reinterpret_cast(tensor.data()), - copy_size, gpu_dev_ctx.stream()); - ctx.Wait(); - destroy_callback = [](void* backing) { - platform::CPUPlace cpu; - memory::Free(cpu, backing); - }; -#endif - } else { - payload = tensor.data(); - } - payload_size = tensor.numel() * framework::SizeOfType(tensor.type()); - e.WriteVarlengthBeginning(VarMsg::kSerializedFieldNumber, payload_size); - } break; - case framework::proto::VarType_Type_SELECTED_ROWS: { - // TODO(typhoonzero): selectedrows implement should not use unique_ptr - auto* slr = var->GetMutable(); - e.WriteUint64(VarMsg::kDataTypeFieldNumber, - framework::ToDataType(slr->value().type())); - for (auto& dim : framework::vectorize(slr->value().dims())) { - e.WriteUint64(VarMsg::kDimsFieldNumber, dim); - } - e.WriteUint64(VarMsg::kLodLevelFieldNumber, 0); - e.WriteUint64(VarMsg::kSlrHeightFieldNumber, slr->height()); - auto* tensor = slr->mutable_value(); - if (platform::is_gpu_place(ctx.GetPlace())) { -#ifdef PADDLE_WITH_CUDA - platform::CPUPlace cpu; - auto& gpu_dev_ctx = - static_cast(ctx); - auto copy_size = - tensor->numel() * framework::SizeOfType(tensor->type()); - payload = memory::Alloc(cpu, copy_size); - memory::Copy(cpu, payload, - boost::get(tensor->place()), - reinterpret_cast(tensor->data()), - copy_size, gpu_dev_ctx.stream()); - ctx.Wait(); - destroy_callback = [](void* backing) { - platform::CPUPlace cpu; - memory::Free(cpu, backing); - }; -#endif - } else { - payload = slr->mutable_value()->data(); - } - payload_size = tensor->numel() * framework::SizeOfType(tensor->type()); - e.WriteVarlengthBeginning(VarMsg::kSerializedFieldNumber, payload_size); - } break; - default: - PADDLE_THROW("Serialize does not support type: %s", - typeid(var->Type()).name()); - break; - } + std::string header; + request.AppendToString(&header); + auto buffer = std::unique_ptr(new char[1024]); + void* buf = buffer.get(); + ProtoEncodeHelper e(static_cast(buf), 1024); + e.WriteRawBytes(std::string(header.data(), header.size())); + e.WriteVarlengthBeginning(VarMsg::kSerializedFieldNumber, payload_size); // steal reference of tensor data ::grpc::Slice slices[4]; // metadata, tensor, rows meta, rows int num_slices = 2; // only SelectedRows have rows buffer @@ -162,12 +160,9 @@ void SerializeToByteBuffer(const std::string& name, framework::Variable* var, static_cast(payload)), ::grpc::Slice::STEAL_REF); - if (framework::ToVarType(var->Type()) == - framework::proto::VarType_Type_SELECTED_ROWS) { + if (var->IsType()) { auto* slr = var->GetMutable(); - ProtoEncodeHelper e2(static_cast(buf), 128); - // NOTE: rows is of type int64_t size_t rows_memory_size = slr->rows().size() * framework::SizeOfType(typeid(int64_t)); e2.WriteVarlengthBeginning(VarMsg::kRowsFieldNumber, rows_memory_size); @@ -178,10 +173,7 @@ void SerializeToByteBuffer(const std::string& name, framework::Variable* var, grpc_slice_new_with_user_data( const_cast( reinterpret_cast(slr->rows().data())), - rows_memory_size, - [](void* backing) { - // TODO(typhoonzero): add unref here, same as above. - }, + rows_memory_size, [](void* backing) {}, const_cast( reinterpret_cast(slr->rows().data()))), ::grpc::Slice::STEAL_REF); diff --git a/paddle/fluid/operators/detail/serde_test.cc b/paddle/fluid/operators/detail/serde_test.cc index e9eaaf1cbcd07ed1c8d6fb0b025bc1f1500718fd..15892295e6901fe649788c9e34604008fc8cbdfa 100644 --- a/paddle/fluid/operators/detail/serde_test.cc +++ b/paddle/fluid/operators/detail/serde_test.cc @@ -117,11 +117,11 @@ void RunTestLodTensor(platform::Place place, int from_type = 0) { // serialize var to ByteBuffer framework::Variable var; auto* tensor = var.GetMutable(); - tensor->Resize(framework::make_ddim({4, 8, 4, 2})); + tensor->Resize(framework::make_ddim({512, 8, 4, 2})); framework::LoD lod; lod.push_back(framework::Vector({1, 3, 8})); tensor->set_lod(lod); - int tensor_numel = 4 * 8 * 4 * 2; + int tensor_numel = 512 * 8 * 4 * 2; platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance(); auto& ctx = *pool.Get(place); tensor->mutable_data(place); @@ -142,7 +142,7 @@ void RunTestLodTensor(platform::Place place, int from_type = 0) { EXPECT_TRUE(varmsg.ParseFromString(tmp)); EXPECT_EQ(varmsg.varname(), "myvar"); EXPECT_EQ(varmsg.type(), 0); - EXPECT_EQ(varmsg.dims()[0], 4); + EXPECT_EQ(varmsg.dims()[0], 512); EXPECT_EQ(varmsg.dims()[1], 8); EXPECT_EQ(varmsg.dims()[2], 4); EXPECT_EQ(varmsg.dims()[3], 2); diff --git a/paddle/fluid/operators/detail/variable_response.cc b/paddle/fluid/operators/detail/variable_response.cc index f4a374d56d28a30201f0d482e97e1a40e7a8bf41..99602a05d023f30c2eed8df25e7534fdc9ef2ced 100644 --- a/paddle/fluid/operators/detail/variable_response.cc +++ b/paddle/fluid/operators/detail/variable_response.cc @@ -210,15 +210,15 @@ bool ParseLodData(::google::protobuf::io::CodedInputStream* input, } if (wt == WIRETYPE_LENGTH_DELIMITED) { - int length = 0; - if (!input->ReadVarintSizeAsInt(&length)) { + int num_bytes = 0; + if (!input->ReadVarintSizeAsInt(&num_bytes)) { return tag; } - - for (int i = 0; i < length; i++) { + int start_pos = input->CurrentPosition(); + while (input->CurrentPosition() - start_pos < num_bytes) { uint64_t v; if (!input->ReadVarint64(&v)) { - return false; + return tag; } lod->push_back(v); } @@ -275,8 +275,8 @@ int VariableResponse::Parse(Source* source) { break; } case sendrecv::VariableMessage::kTypeFieldNumber: { - uint64_t v; - if ((wt != WIRETYPE_VARINT) || !input.ReadVarint64(&v)) { + uint32_t v; + if ((wt != WIRETYPE_VARINT) || !input.ReadVarint32(&v)) { return tag; } @@ -284,8 +284,8 @@ int VariableResponse::Parse(Source* source) { break; } case sendrecv::VariableMessage::kDataTypeFieldNumber: { - uint64_t v = 0; - if ((wt != WIRETYPE_VARINT) || !input.ReadVarint64(&v)) { + uint32_t v = 0; + if ((wt != WIRETYPE_VARINT) || !input.ReadVarint32(&v)) { return tag; } @@ -305,11 +305,12 @@ int VariableResponse::Parse(Source* source) { // packed if (wt == WIRETYPE_LENGTH_DELIMITED) { - int length = 0; - if (!input.ReadVarintSizeAsInt(&length)) { + int num_bytes = 0; + if (!input.ReadVarintSizeAsInt(&num_bytes)) { return tag; } - for (int i = 0; i < length; i++) { + int start_pos = input.CurrentPosition(); + while (input.CurrentPosition() - start_pos < num_bytes) { uint64_t v; if (!input.ReadVarint64(&v)) { return tag; @@ -318,7 +319,6 @@ int VariableResponse::Parse(Source* source) { } break; } - return tag; } case sendrecv::VariableMessage::kLodLevelFieldNumber: { @@ -372,9 +372,9 @@ int VariableResponse::Parse(Source* source) { meta_.varname() != "", "meta info should be got first!"); - int length = 0; + int num_bytes = 0; if (wt != WIRETYPE_LENGTH_DELIMITED || - !ReadVarintSizeAsInt(&input, &length)) { + !ReadVarintSizeAsInt(&input, &num_bytes)) { return tag; } @@ -382,14 +382,14 @@ int VariableResponse::Parse(Source* source) { if (meta_.type() == sendrecv::LOD_TENSOR) { PADDLE_ENFORCE(meta_.lod_size() >= 0, "lod info should be got first!"); - if (!CopyLodTensorData(&input, *dev_ctx_, dims, length)) { + if (!CopyLodTensorData(&input, *dev_ctx_, dims, num_bytes)) { return tag; } break; } if (meta_.type() == sendrecv::SELECTED_ROWS) { - if (!CopySelectRowsTensorData(&input, *dev_ctx_, dims, length)) { + if (!CopySelectRowsTensorData(&input, *dev_ctx_, dims, num_bytes)) { return tag; } break; @@ -403,13 +403,13 @@ int VariableResponse::Parse(Source* source) { meta_.varname() != "", "meta info should be got first!"); - int length = 0; + int num_bytes = 0; if (wt != WIRETYPE_LENGTH_DELIMITED || - !ReadVarintSizeAsInt(&input, &length)) { + !ReadVarintSizeAsInt(&input, &num_bytes)) { return tag; } - if (!CopySelectRowsData(&input, *dev_ctx_, length)) { + if (!CopySelectRowsData(&input, *dev_ctx_, num_bytes)) { return tag; } break; diff --git a/paddle/fluid/operators/detection_map_op.cc b/paddle/fluid/operators/detection_map_op.cc index 38f43b6d031372948bd82c686a2d9ce5f8ecd07c..0ccf701b61349274ce0627dfeaf7cfad384215cd 100644 --- a/paddle/fluid/operators/detection_map_op.cc +++ b/paddle/fluid/operators/detection_map_op.cc @@ -78,8 +78,7 @@ class DetectionMAPOp : public framework::OperatorWithKernel { class DetectionMAPOpMaker : public framework::OpProtoAndCheckerMaker { public: - DetectionMAPOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("DetectRes", "(LoDTensor) A 2-D LoDTensor with shape [M, 6] represents the " "detections. Each row has 6 values: " diff --git a/paddle/fluid/operators/dropout_op.cc b/paddle/fluid/operators/dropout_op.cc index 4ed1b548840fabd2383632beb5f35fa6aa096443..07322e720f26213ea777be3cd22f2fead28507f0 100644 --- a/paddle/fluid/operators/dropout_op.cc +++ b/paddle/fluid/operators/dropout_op.cc @@ -37,8 +37,7 @@ class DropoutOp : public framework::OperatorWithKernel { class DropoutOpMaker : public framework::OpProtoAndCheckerMaker { public: - DropoutOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "The input of dropout op."); AddOutput("Out", "The output of dropout op."); AddOutput("Mask", "The random sampled dropout mask.").AsIntermediate(); diff --git a/paddle/fluid/operators/edit_distance_op.cc b/paddle/fluid/operators/edit_distance_op.cc index c7f037d2df4372d0c4e3a261c0dff1fd6704d182..de25a3dab53492e38a92fbcf07ccbe43f7546950 100644 --- a/paddle/fluid/operators/edit_distance_op.cc +++ b/paddle/fluid/operators/edit_distance_op.cc @@ -49,8 +49,7 @@ class EditDistanceOp : public framework::OperatorWithKernel { class EditDistanceOpMaker : public framework::OpProtoAndCheckerMaker { public: - EditDistanceOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("Hyps", "(2-D LoDTensor, 2nd dim. equal to 1) " "The indices for hypothesis strings."); diff --git a/paddle/fluid/operators/elementwise_add_op.cc b/paddle/fluid/operators/elementwise_add_op.cc index 4aab54f60236ecc5fa7f70e22f1553c3bfe68198..d2c20537136fc3ac9d1bece24a2238f26215c922 100644 --- a/paddle/fluid/operators/elementwise_add_op.cc +++ b/paddle/fluid/operators/elementwise_add_op.cc @@ -14,26 +14,8 @@ limitations under the License. */ #include "paddle/fluid/operators/elementwise_add_op.h" #include "paddle/fluid/operators/elementwise_op.h" - -namespace paddle { -namespace operators { -class ElementwiseAddOpMaker : public ElementwiseOpMaker { - public: - ElementwiseAddOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : ElementwiseOpMaker(proto, op_checker) { - SetComment("Add", "Out = X + Y"); - AddComment(comment_); - } -}; -} // namespace operators -} // namespace paddle - namespace ops = paddle::operators; -REGISTER_OPERATOR(elementwise_add, ops::ElementwiseOp, - ops::ElementwiseAddOpMaker, ops::ElementwiseOpInferVarType, - paddle::framework::DefaultGradOpDescMaker); -REGISTER_OPERATOR(elementwise_add_grad, ops::ElementwiseOpGrad); - +REGISTER_ELEMWISE_OP(elementwise_add, "Add", "Out = X + Y"); REGISTER_OP_CPU_KERNEL( elementwise_add, ops::ElementwiseAddKernel, diff --git a/paddle/fluid/operators/elementwise_div_op.cc b/paddle/fluid/operators/elementwise_div_op.cc index c7ddafcad1d1f6c14791fde665f43881d6b49836..824b1221e5a77c8799dc34820b7f0db180c2439e 100644 --- a/paddle/fluid/operators/elementwise_div_op.cc +++ b/paddle/fluid/operators/elementwise_div_op.cc @@ -14,26 +14,8 @@ limitations under the License. */ #include "paddle/fluid/operators/elementwise_div_op.h" #include "paddle/fluid/operators/elementwise_op.h" - -namespace paddle { -namespace operators { -class ElementwiseDivOpMaker : public ElementwiseOpMaker { - public: - ElementwiseDivOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : ElementwiseOpMaker(proto, op_checker) { - SetComment("Div", "Out = X / Y"); - AddComment(comment_); - } -}; - -} // namespace operators -} // namespace paddle - namespace ops = paddle::operators; -REGISTER_OPERATOR(elementwise_div, ops::ElementwiseOp, - ops::ElementwiseDivOpMaker, - paddle::framework::DefaultGradOpDescMaker); -REGISTER_OPERATOR(elementwise_div_grad, ops::ElementwiseOpGrad); +REGISTER_ELEMWISE_OP(elementwise_div, "Div", "Out = X / Y"); REGISTER_OP_CPU_KERNEL( elementwise_div, ops::ElementwiseDivKernel, diff --git a/paddle/fluid/operators/elementwise_max_op.cc b/paddle/fluid/operators/elementwise_max_op.cc index a4fe386bb1907bf7c0099d2b1109077b21146948..411671335a19ae2283ca9db8b8f6bcbb6a6b630a 100644 --- a/paddle/fluid/operators/elementwise_max_op.cc +++ b/paddle/fluid/operators/elementwise_max_op.cc @@ -14,25 +14,8 @@ limitations under the License. */ #include "paddle/fluid/operators/elementwise_max_op.h" #include "paddle/fluid/operators/elementwise_op.h" - -namespace paddle { -namespace operators { -class ElementwiseMaxOpMaker : public ElementwiseOpMaker { - public: - ElementwiseMaxOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : ElementwiseOpMaker(proto, op_checker) { - SetComment("Max", "Out = max(X, Y)"); - AddComment(comment_); - } -}; -} // namespace operators -} // namespace paddle - namespace ops = paddle::operators; -REGISTER_OPERATOR(elementwise_max, ops::ElementwiseOp, - ops::ElementwiseMaxOpMaker, - paddle::framework::DefaultGradOpDescMaker); -REGISTER_OPERATOR(elementwise_max_grad, ops::ElementwiseOpGrad); +REGISTER_ELEMWISE_OP(elementwise_max, "Max", "Out = max(X, Y)"); REGISTER_OP_CPU_KERNEL( elementwise_max, ops::ElementwiseMaxKernel, diff --git a/paddle/fluid/operators/elementwise_min_op.cc b/paddle/fluid/operators/elementwise_min_op.cc index 68cd6ddb4a938b2b1c33e3f89c6d1151acb27f48..816192083d2275b26e6dd9afc76f2c021a01cf73 100644 --- a/paddle/fluid/operators/elementwise_min_op.cc +++ b/paddle/fluid/operators/elementwise_min_op.cc @@ -14,25 +14,8 @@ limitations under the License. */ #include "paddle/fluid/operators/elementwise_min_op.h" #include "paddle/fluid/operators/elementwise_op.h" - -namespace paddle { -namespace operators { -class ElementwiseMinOpMaker : public ElementwiseOpMaker { - public: - ElementwiseMinOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : ElementwiseOpMaker(proto, op_checker) { - SetComment("Max", "Out = min(X, Y)"); - AddComment(comment_); - } -}; -} // namespace operators -} // namespace paddle - namespace ops = paddle::operators; -REGISTER_OPERATOR(elementwise_min, ops::ElementwiseOp, - ops::ElementwiseMinOpMaker, - paddle::framework::DefaultGradOpDescMaker); -REGISTER_OPERATOR(elementwise_min_grad, ops::ElementwiseOpGrad); +REGISTER_ELEMWISE_OP(elementwise_min, "Min", "Out = min(X, Y)"); REGISTER_OP_CPU_KERNEL( elementwise_min, ops::ElementwiseMinKernel, diff --git a/paddle/fluid/operators/elementwise_mul_op.cc b/paddle/fluid/operators/elementwise_mul_op.cc index 2dec27136ad57ea032d5abb51799bd04ccc0b2e3..ba343909bb87b4f2efa56c0a4ff664b278e90c60 100644 --- a/paddle/fluid/operators/elementwise_mul_op.cc +++ b/paddle/fluid/operators/elementwise_mul_op.cc @@ -14,27 +14,8 @@ limitations under the License. */ #include "paddle/fluid/operators/elementwise_mul_op.h" #include "paddle/fluid/operators/elementwise_op.h" - -namespace paddle { -namespace operators { - -class ElementwiseMulOpMaker : public ElementwiseOpMaker { - public: - ElementwiseMulOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : ElementwiseOpMaker(proto, op_checker) { - SetComment("Mul", "Out = X \\odot\\ Y"); - AddComment(comment_); - } -}; - -} // namespace operators -} // namespace paddle - namespace ops = paddle::operators; -REGISTER_OPERATOR(elementwise_mul, ops::ElementwiseOp, - ops::ElementwiseMulOpMaker, - paddle::framework::DefaultGradOpDescMaker); -REGISTER_OPERATOR(elementwise_mul_grad, ops::ElementwiseOpGrad); +REGISTER_ELEMWISE_OP(elementwise_mul, "Mul", "Out = X \\odot\\ Y"); REGISTER_OP_CPU_KERNEL( elementwise_mul, ops::ElementwiseMulKernel, diff --git a/paddle/fluid/operators/elementwise_op.h b/paddle/fluid/operators/elementwise_op.h index a33634ab2503f988a8a692682ddb238d4794a3c0..d5b57cc2524efcdee112b2ce41cdcd4697fb79e6 100644 --- a/paddle/fluid/operators/elementwise_op.h +++ b/paddle/fluid/operators/elementwise_op.h @@ -54,8 +54,7 @@ class ElementwiseOpInferVarType : public framework::VarTypeInference { class ElementwiseOpMaker : public framework::OpProtoAndCheckerMaker { public: - ElementwiseOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() final { AddInput("X", "(Tensor), The first input tensor of elementwise op."); AddInput("Y", "(Tensor), The second input tensor of elementwise op."); AddOutput("Out", "The output of elementwise op."); @@ -64,12 +63,12 @@ class ElementwiseOpMaker : public framework::OpProtoAndCheckerMaker { "for broadcasting Y onto X.") .SetDefault(-1) .EqualGreaterThan(-1); - comment_ = R"DOC( -Limited Elementwise {name} Operator. + AddComment(string::Sprintf(R"DOC( +Limited Elementwise %s Operator. The equation is: -$${equation}$$ +$$%s$$ $X$ is a tensor of any dimension and the dimensions of tensor $Y$ must be smaller than or equal to the dimensions of $X$. @@ -100,26 +99,13 @@ For example Either of the inputs $X$ and $Y$ or none can carry the LoD (Level of Details) information. However, the output only shares the LoD information with input $X$. -)DOC"; - AddComment(comment_); +)DOC", + GetName(), GetEquation())); } protected: - std::string comment_; - - void Replace(std::string* src, std::string from, std::string to) { - std::size_t len_from = std::strlen(from.c_str()); - std::size_t len_to = std::strlen(to.c_str()); - for (std::size_t pos = src->find(from); pos != std::string::npos; - pos = src->find(from, pos + len_to)) { - src->replace(pos, len_from, to); - } - } - - void SetComment(std::string name, std::string equation) { - Replace(&comment_, "{name}", name); - Replace(&comment_, "{equation}", equation); - } + virtual std::string GetName() const = 0; + virtual std::string GetEquation() const = 0; }; class ElementwiseOpGrad : public framework::OperatorWithKernel { @@ -152,3 +138,16 @@ class ElementwiseOpGrad : public framework::OperatorWithKernel { }; } // namespace operators } // namespace paddle + +#define REGISTER_ELEMWISE_OP(op_type, op_name, equation) \ + class __ElemwiseOp##op_type##Maker__ \ + : public ::paddle::operators::ElementwiseOpMaker { \ + protected: \ + virtual std::string GetName() const { return op_name; } \ + virtual std::string GetEquation() const { return equation; } \ + }; \ + REGISTER_OPERATOR(op_type, ::paddle::operators::ElementwiseOp, \ + __ElemwiseOp##op_type##Maker__, \ + ::paddle::operators::ElementwiseOpInferVarType, \ + ::paddle::framework::DefaultGradOpDescMaker); \ + REGISTER_OPERATOR(op_type##_grad, ::paddle::operators::ElementwiseOpGrad) diff --git a/paddle/fluid/operators/elementwise_pow_op.cc b/paddle/fluid/operators/elementwise_pow_op.cc index 60302c5e59f8ce595861405713045b05d90002e3..5fd6bde9ba0930e29f2161f1ff23ff9f5e7dc85d 100644 --- a/paddle/fluid/operators/elementwise_pow_op.cc +++ b/paddle/fluid/operators/elementwise_pow_op.cc @@ -13,17 +13,15 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/elementwise_pow_op.h" +#include #include "paddle/fluid/operators/elementwise_op.h" namespace paddle { namespace operators { class ElementwisePowOpMaker : public ElementwiseOpMaker { - public: - ElementwisePowOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : ElementwiseOpMaker(proto, op_checker) { - SetComment("Pow", "Out = X ^ Y"); - AddComment(comment_); - } + protected: + std::string GetName() const override { return "Pow"; } + std::string GetEquation() const override { return "Out = X ^ Y"; } }; } // namespace operators } // namespace paddle diff --git a/paddle/fluid/operators/elementwise_sub_op.cc b/paddle/fluid/operators/elementwise_sub_op.cc index 9d0598fc39a3922fa830f18729d90a7dac6a890b..a7562b166b373ee2a8c9b6f379431d88d3e45fcb 100644 --- a/paddle/fluid/operators/elementwise_sub_op.cc +++ b/paddle/fluid/operators/elementwise_sub_op.cc @@ -14,25 +14,8 @@ limitations under the License. */ #include "paddle/fluid/operators/elementwise_sub_op.h" #include "paddle/fluid/operators/elementwise_op.h" - -namespace paddle { -namespace operators { -class ElementwiseSubOpMaker : public ElementwiseOpMaker { - public: - ElementwiseSubOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : ElementwiseOpMaker(proto, op_checker) { - SetComment("Sub", "Out = X - Y"); - AddComment(comment_); - } -}; -} // namespace operators -} // namespace paddle - namespace ops = paddle::operators; -REGISTER_OPERATOR(elementwise_sub, ops::ElementwiseOp, - ops::ElementwiseSubOpMaker, - paddle::framework::DefaultGradOpDescMaker); -REGISTER_OPERATOR(elementwise_sub_grad, ops::ElementwiseOpGrad); +REGISTER_ELEMWISE_OP(elementwise_sub, "Sub", "Out = X - Y"); REGISTER_OP_CPU_KERNEL( elementwise_sub, ops::ElementwiseSubKernel, diff --git a/paddle/fluid/operators/expand_op.cc b/paddle/fluid/operators/expand_op.cc index 4ae91d074d3df8b910a7f5d816a22b6f1d51dff6..5ad0ec251328cc1ba580026bb47bf05316e7dc77 100644 --- a/paddle/fluid/operators/expand_op.cc +++ b/paddle/fluid/operators/expand_op.cc @@ -56,8 +56,7 @@ class ExpandOp : public framework::OperatorWithKernel { class ExpandOpMaker : public framework::OpProtoAndCheckerMaker { public: - ExpandOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "(Tensor, default Tensor). A tensor with rank in [1, 6]." "X is the input to be expanded."); diff --git a/paddle/fluid/operators/fc_op.cc b/paddle/fluid/operators/fc_op.cc index 45e4d5b2b863a55ae0aa0414ff8697141fd2aa6f..8843a1c44b7004ba5d7935f75d3c99d9c30fc6c0 100644 --- a/paddle/fluid/operators/fc_op.cc +++ b/paddle/fluid/operators/fc_op.cc @@ -72,8 +72,7 @@ framework::OpKernelType FCOpGrad::GetExpectedKernelType( layout, library); } -FCOpMaker::FCOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { +void FCOpMaker::Make() { AddInput("Input", "(Tensor) The input tensor of fully connected operator. "); AddInput("W", "(Tensor), The second input tensor of fc op."); AddOutput("Out", "(Tensor) The output tensor of fully connected operator. "); diff --git a/paddle/fluid/operators/fc_op.h b/paddle/fluid/operators/fc_op.h index 70fa96440d344397a7427c1338afee85bde923d4..e1b780fc0c401fbf34a9db03aa31137cbc016939 100644 --- a/paddle/fluid/operators/fc_op.h +++ b/paddle/fluid/operators/fc_op.h @@ -45,7 +45,7 @@ class FCOpGrad : public framework::OperatorWithKernel { class FCOpMaker : public framework::OpProtoAndCheckerMaker { public: - FCOpMaker(OpProto* proto, OpAttrChecker* op_checker); + void Make() override; }; } // namespace operators diff --git a/paddle/fluid/operators/feed_op.cc b/paddle/fluid/operators/feed_op.cc index debacf07c360b9aa69000a0d891f04239ed08807..bcb3e63ed7dbc775c1de6c4522f0548ea48a6cf0 100644 --- a/paddle/fluid/operators/feed_op.cc +++ b/paddle/fluid/operators/feed_op.cc @@ -66,8 +66,7 @@ class FeedOp : public framework::OperatorBase { class FeedOpInfoMaker : public framework::OpProtoAndCheckerMaker { public: - FeedOpInfoMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "The input of feed op"); AddOutput("Out", "The output of feed op"); AddAttr("col", "(int) The column of feed"); diff --git a/paddle/fluid/operators/fetch_op.cc b/paddle/fluid/operators/fetch_op.cc index 18deec58137676a0b2c8d559e49d0f7a840cd5ba..1640a2a22c69a0e3ab81a2889d6105b2cf4162b7 100644 --- a/paddle/fluid/operators/fetch_op.cc +++ b/paddle/fluid/operators/fetch_op.cc @@ -66,8 +66,7 @@ class FetchOp : public framework::OperatorBase { class FetchOpInfoMaker : public framework::OpProtoAndCheckerMaker { public: - FetchOpInfoMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "The input of fetch op"); AddOutput("Out", "The output of fetch op"); AddAttr("col", "(int) The column of fetch"); diff --git a/paddle/fluid/operators/fill_constant_batch_size_like_op.cc b/paddle/fluid/operators/fill_constant_batch_size_like_op.cc index 72da80baaf9bb3286f09b7ae5fcf24326b391906..1ae78675a0cac8a72aeaef1227b631a41e4a10b2 100644 --- a/paddle/fluid/operators/fill_constant_batch_size_like_op.cc +++ b/paddle/fluid/operators/fill_constant_batch_size_like_op.cc @@ -30,9 +30,8 @@ class FillConstantBatchSizeLikeOp : public BatchSizeLikeOp { }; class FillConstantBatchSizeLikeOpMaker : public BatchSizeLikeOpMaker { - public: - FillConstantBatchSizeLikeOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : BatchSizeLikeOpMaker(proto, op_checker) { + protected: + void Apply() override { AddAttr("dtype", "(int, default 5 (FP32)) " "Output data type") diff --git a/paddle/fluid/operators/fill_constant_op.cc b/paddle/fluid/operators/fill_constant_op.cc index 07e0a80f8d644d4d011f2821785d49ece6cecfb5..130f18dde4f979a6a9925ede9cbf745fcec14d48 100644 --- a/paddle/fluid/operators/fill_constant_op.cc +++ b/paddle/fluid/operators/fill_constant_op.cc @@ -59,8 +59,7 @@ class FillConstantOp : public framework::OperatorBase { class FillConstantOpMaker : public framework::OpProtoAndCheckerMaker { public: - FillConstantOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : framework::OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddAttr("dtype", "(int, default 5 (FP32)) " "Output data type") diff --git a/paddle/fluid/operators/fill_op.cc b/paddle/fluid/operators/fill_op.cc index ee8a2fc353f86cdabd35459a9195c3aa35f63e31..925dc19061e2196a40411f415eb6e5ad59ab52ff 100644 --- a/paddle/fluid/operators/fill_op.cc +++ b/paddle/fluid/operators/fill_op.cc @@ -82,8 +82,7 @@ class FillOp : public framework::OperatorBase { class FillOpMaker : public framework::OpProtoAndCheckerMaker { public: - FillOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddComment(R"DOC(Fill operator Fill an tensor with `value` and `shape`. The type of the tensor is specify by diff --git a/paddle/fluid/operators/fill_zeros_like_op.cc b/paddle/fluid/operators/fill_zeros_like_op.cc index 58c814ba6413626a48310da595a13238994f5ef1..d67bec36b3248be8602da562a88aeb58f5effe39 100644 --- a/paddle/fluid/operators/fill_zeros_like_op.cc +++ b/paddle/fluid/operators/fill_zeros_like_op.cc @@ -33,8 +33,7 @@ class FillZerosLikeOp : public framework::OperatorWithKernel { class FillZerosLikeOpMaker : public framework::OpProtoAndCheckerMaker { public: - FillZerosLikeOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : framework::OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "The input of fill-zeros-like op."); AddOutput("Out", "The variable will be filled up with zeros."); AddComment(R"DOC( diff --git a/paddle/fluid/operators/ftrl_op.cc b/paddle/fluid/operators/ftrl_op.cc index cbdcce9beb3fafb0775d0b5fc39cb381ad128d0c..70ba25c213046cc934f46be067080d5fdbb42f9e 100644 --- a/paddle/fluid/operators/ftrl_op.cc +++ b/paddle/fluid/operators/ftrl_op.cc @@ -64,8 +64,7 @@ class FTRLOp : public framework::OperatorWithKernel { class FTRLOpMaker : public framework::OpProtoAndCheckerMaker { public: - FTRLOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("Param", "(Tensor, default Tensor) " "Input parameter value that has to be updated."); diff --git a/paddle/fluid/operators/gather_op.cc b/paddle/fluid/operators/gather_op.cc index 4c82f5c429038504d9876ee240a705911feb0b7a..e21b57258928856a10d6e86c3e2c6e81fb241ee3 100644 --- a/paddle/fluid/operators/gather_op.cc +++ b/paddle/fluid/operators/gather_op.cc @@ -67,8 +67,7 @@ class GatherGradOp : public framework::OperatorWithKernel { class GatherOpMaker : public framework::OpProtoAndCheckerMaker { public: - GatherOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "The source input of gather op"); AddInput("Index", "The index input of gather op"); AddOutput("Out", "The output of gather op"); diff --git a/paddle/fluid/operators/gaussian_random_batch_size_like_op.cc b/paddle/fluid/operators/gaussian_random_batch_size_like_op.cc index 53c706a83e5bfb9e93d485141314e8b652d73593..8050f61d4546f3351645f23ddcc63b2c49f17929 100644 --- a/paddle/fluid/operators/gaussian_random_batch_size_like_op.cc +++ b/paddle/fluid/operators/gaussian_random_batch_size_like_op.cc @@ -32,9 +32,8 @@ class GaussianRandomBatchSizeLikeOp : public BatchSizeLikeOp { }; class GaussianRandomBatchSizeLikeOpMaker : public BatchSizeLikeOpMaker { - public: - GaussianRandomBatchSizeLikeOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : BatchSizeLikeOpMaker(proto, op_checker) { + protected: + void Apply() override { AddAttr("mean", "(float, default 0.0) " "mean of random tensor.") diff --git a/paddle/fluid/operators/gaussian_random_op.cc b/paddle/fluid/operators/gaussian_random_op.cc index 4d197637b3f49f7e63f5b1a5cba212d1bf774f7e..815c1bb50988be49ca9996e368a59344c6583d58 100644 --- a/paddle/fluid/operators/gaussian_random_op.cc +++ b/paddle/fluid/operators/gaussian_random_op.cc @@ -70,8 +70,7 @@ class GaussianRandomOp : public framework::OperatorWithKernel { class GaussianRandomOpMaker : public framework::OpProtoAndCheckerMaker { public: - GaussianRandomOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : framework::OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddOutput("Out", "Output matrix of gaussian random op"); AddAttr>("shape", diff --git a/paddle/fluid/operators/get_places_op.cc b/paddle/fluid/operators/get_places_op.cc index 0d7219ac5c624236b85916d5faf6810dbed2198a..eafc364a15fa17cc5107bba737b0b44e712b0bef 100644 --- a/paddle/fluid/operators/get_places_op.cc +++ b/paddle/fluid/operators/get_places_op.cc @@ -78,8 +78,7 @@ class GetPlacesOp : public framework::OperatorBase { class GetPlacesOpProtoMaker : public framework::OpProtoAndCheckerMaker { public: - GetPlacesOpProtoMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddOutput("Out", "vector of Place"); AddAttr("device_count", "device count").SetDefault(0); AddAttr("device_type", "device type") diff --git a/paddle/fluid/operators/go_op.cc b/paddle/fluid/operators/go_op.cc index b8e1556c23a3b7357ed56d1b83c09622559040a4..48f9d967adc90838dc4c7a09bfaf5a5a1ac9c99b 100644 --- a/paddle/fluid/operators/go_op.cc +++ b/paddle/fluid/operators/go_op.cc @@ -89,8 +89,7 @@ class GoOp : public framework::OperatorBase { class GoOpMaker : public framework::OpProtoAndCheckerMaker { public: - GoOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput(kX, "A set of variables, which are required by operators inside the " "block of Go Op.") diff --git a/paddle/fluid/operators/gru_op.cc b/paddle/fluid/operators/gru_op.cc index 0a524c914d305661745c5d85cbbee2edb57c97ba..5c746878823b3dcde2573feec00d3d9dac5ceab8 100644 --- a/paddle/fluid/operators/gru_op.cc +++ b/paddle/fluid/operators/gru_op.cc @@ -71,8 +71,7 @@ class GRUOp : public framework::OperatorWithKernel { class GRUOpMaker : public framework::OpProtoAndCheckerMaker { public: - GRUOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("Input", "(LoDTensor) The first input is a LodTensor, which supports " "variable-time length input sequence. The underlying tensor in " diff --git a/paddle/fluid/operators/gru_unit_op.cc b/paddle/fluid/operators/gru_unit_op.cc index f8d1d44b5423dd09fe5aad11434911af6f14fe77..82a808b01e99ec33b0ca00a065fb301d3c633b19 100644 --- a/paddle/fluid/operators/gru_unit_op.cc +++ b/paddle/fluid/operators/gru_unit_op.cc @@ -71,8 +71,7 @@ class GRUUnitOp : public framework::OperatorWithKernel { class GRUUnitOpMaker : public framework::OpProtoAndCheckerMaker { public: - GRUUnitOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("Input", "(Tensor) Matrix with shape [batch_size, frame_size * 3] for the " "input."); diff --git a/paddle/fluid/operators/hinge_loss_op.cc b/paddle/fluid/operators/hinge_loss_op.cc index 086b5a97dec9a3d5b8f91b802b92d64ca73bf57c..69e7fa4490b892373d85898b13b976a474a6096a 100644 --- a/paddle/fluid/operators/hinge_loss_op.cc +++ b/paddle/fluid/operators/hinge_loss_op.cc @@ -46,8 +46,7 @@ class HingeLossOp : public framework::OperatorWithKernel { template class HingeLossOpMaker : public framework::OpProtoAndCheckerMaker { public: - HingeLossOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("Logits", "The input value (Logits) of Hinge loss op." "Logits is a 2-D tensor with shape [batch_size, 1]."); diff --git a/paddle/fluid/operators/huber_loss_op.cc b/paddle/fluid/operators/huber_loss_op.cc index 74d8e0e2b76adc7a3e69649f277a8c0df6f38056..4ecd8634ff41ff4eba6b5ed1d0fc78068190dce5 100644 --- a/paddle/fluid/operators/huber_loss_op.cc +++ b/paddle/fluid/operators/huber_loss_op.cc @@ -45,8 +45,7 @@ class HuberLossOp : public framework::OperatorWithKernel { template class HuberLossOpMaker : public framework::OpProtoAndCheckerMaker { public: - HuberLossOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "The input value of huber loss op." "X is a 2-D tensor with shape [batch_size, 1]."); diff --git a/paddle/fluid/operators/im2sequence_op.cc b/paddle/fluid/operators/im2sequence_op.cc index 8c120eec86601146500721bbb4249bc458190093..0669661d225c664010fce97f0a526b62988b92c5 100644 --- a/paddle/fluid/operators/im2sequence_op.cc +++ b/paddle/fluid/operators/im2sequence_op.cc @@ -54,8 +54,7 @@ class Im2SequenceOp : public framework::OperatorWithKernel { class Im2SequenceOpMaker : public framework::OpProtoAndCheckerMaker { public: - Im2SequenceOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "(Tensor) The input tensor has NCHW format." "N: batch size" diff --git a/paddle/fluid/operators/increment_op.cc b/paddle/fluid/operators/increment_op.cc index d8c97b27b328b1470bece4a6c1872b5ccc75115e..f0ffc9706689f5afe4546c3483114b38bc2b7872 100644 --- a/paddle/fluid/operators/increment_op.cc +++ b/paddle/fluid/operators/increment_op.cc @@ -47,8 +47,7 @@ class IncrementOp : public framework::OperatorWithKernel { class IncrementOpMaker : public framework::OpProtoAndCheckerMaker { public: - IncrementOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "(Tensor) The input tensor of increment operator"); AddOutput("Out", "(Tensor) The output tensor of increment operator."); AddAttr("step", diff --git a/paddle/fluid/operators/iou_similarity_op.cc b/paddle/fluid/operators/iou_similarity_op.cc index 4b78ec510d1fb73592ee8af9a641622f4d713f8d..007e0af7a5a9fd8c562218e641e1867388b081f1 100644 --- a/paddle/fluid/operators/iou_similarity_op.cc +++ b/paddle/fluid/operators/iou_similarity_op.cc @@ -42,8 +42,7 @@ class IOUSimilarityOp : public framework::OperatorWithKernel { class IOUSimilarityOpMaker : public framework::OpProtoAndCheckerMaker { public: - IOUSimilarityOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "(LoDTensor, default LoDTensor) " "Box list X is a 2-D LoDTensor with shape [N, 4] holds N boxes, " diff --git a/paddle/fluid/operators/is_empty_op.cc b/paddle/fluid/operators/is_empty_op.cc index 2a7be90dab1cc23ffe5e1c296c37a4bbeacb7d8e..d3f3ad92442cafdd8d4cdc396d89721863d069c2 100644 --- a/paddle/fluid/operators/is_empty_op.cc +++ b/paddle/fluid/operators/is_empty_op.cc @@ -48,8 +48,7 @@ class IsEmptyOp : public framework::OperatorBase { class IsEmptyOpProtoMaker : public framework::OpProtoAndCheckerMaker { public: - IsEmptyOpProtoMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput(kInput, "(Tensor) Tensor which is to be checked."); AddOutput(kOutput, "(Tensor) a boolean Tensor that indicate empty or not."); AddComment(R"DOC( diff --git a/paddle/fluid/operators/l1_norm_op.cc b/paddle/fluid/operators/l1_norm_op.cc index 0c143b7c8aed13a202e2597632d17d8bccc8b66d..bc115090acb473ac3175999ca96c5e00c0aeaeae 100644 --- a/paddle/fluid/operators/l1_norm_op.cc +++ b/paddle/fluid/operators/l1_norm_op.cc @@ -48,8 +48,7 @@ class L1NormGradOp : public framework::OperatorWithKernel { class L1NormOpMaker : public framework::OpProtoAndCheckerMaker { public: - L1NormOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : framework::OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "(Tensor) The input of l1_norm op."); AddOutput("Out", "(Scalar) The output of l1_norm op."); AddComment(R"DOC( diff --git a/paddle/fluid/operators/label_smooth_op.cc b/paddle/fluid/operators/label_smooth_op.cc index a73c626032f3bf6e97ac5974424e76bacb9a0799..da59bd53bce010d0d6ad2ab14acaffb9cc2f99e6 100644 --- a/paddle/fluid/operators/label_smooth_op.cc +++ b/paddle/fluid/operators/label_smooth_op.cc @@ -47,8 +47,7 @@ class LabelSmoothOp : public framework::OperatorWithKernel { class LabelSmoothOpMaker : public framework::OpProtoAndCheckerMaker { public: - LabelSmoothOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "(LoDTensor) The input labels of LabelSmooth operator. This " "input can be batched labels in one-hot encoding or output from " diff --git a/paddle/fluid/operators/layer_norm_op.cc b/paddle/fluid/operators/layer_norm_op.cc index de1056aef7bfa2f53f8a92b262e7d15aa7c2b75c..ab097d31e9ab5eafa788539170e7e405df697625 100644 --- a/paddle/fluid/operators/layer_norm_op.cc +++ b/paddle/fluid/operators/layer_norm_op.cc @@ -61,8 +61,7 @@ class LayerNormOp : public framework::OperatorWithKernel { class LayerNormOpMaker : public framework::OpProtoAndCheckerMaker { public: - LayerNormOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "(LoDTensor) The input tensor."); AddInput("Scale", "(Tensor, optional) Scale is a 1-dimensional tensor of size " diff --git a/paddle/fluid/operators/linear_chain_crf_op.cc b/paddle/fluid/operators/linear_chain_crf_op.cc index 2f29e377fdada918f2c9dca8c2d94eb06278320d..e38525cd7f44de020f364ffd16e71a439048347f 100644 --- a/paddle/fluid/operators/linear_chain_crf_op.cc +++ b/paddle/fluid/operators/linear_chain_crf_op.cc @@ -19,8 +19,7 @@ namespace operators { class LinearChainCRFOpMaker : public framework::OpProtoAndCheckerMaker { public: - LinearChainCRFOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("Emission", "(LoDTensor, default LoDTensor) " "A 2-D LoDTensor with shape [N x D], where N is the size of the " diff --git a/paddle/fluid/operators/listen_and_serv_op.cc b/paddle/fluid/operators/listen_and_serv_op.cc index 8acbf820250957163397342c645b333f0da0801c..a29e0cd52cfccf242a6490822234045e6eb66c0f 100644 --- a/paddle/fluid/operators/listen_and_serv_op.cc +++ b/paddle/fluid/operators/listen_and_serv_op.cc @@ -343,8 +343,7 @@ void ListenAndServOp::RunImpl(const framework::Scope &scope, class ListenAndServOpMaker : public framework::OpProtoAndCheckerMaker { public: - ListenAndServOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() { AddInput("X", "(Tensor) Variables that server recv.").AsDuplicable(); AddComment(R"DOC( ListenAndServ operator diff --git a/paddle/fluid/operators/load_combine_op.cc b/paddle/fluid/operators/load_combine_op.cc index e5353144e91455fc71460459e6e799b54f750f71..b5522dd246f250f02d69c0ba749ae6043eb810d6 100644 --- a/paddle/fluid/operators/load_combine_op.cc +++ b/paddle/fluid/operators/load_combine_op.cc @@ -77,8 +77,7 @@ class LoadCombineOp : public framework::OperatorBase { class LoadCombineOpProtoMaker : public framework::OpProtoAndCheckerMaker { public: - LoadCombineOpProtoMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddOutput( "Out", "(vector) The output LoDTensors that will be read from the input file.") diff --git a/paddle/fluid/operators/load_op.cc b/paddle/fluid/operators/load_op.cc index abf7becb2e7fc134e3a52ec4c118847c14a20b9c..93f45cff8a26201b1fbb1c44141e125a67c44037 100644 --- a/paddle/fluid/operators/load_op.cc +++ b/paddle/fluid/operators/load_op.cc @@ -73,8 +73,7 @@ class LoadOp : public framework::OperatorBase { class LoadOpProtoMaker : public framework::OpProtoAndCheckerMaker { public: - LoadOpProtoMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddOutput("Out", "(Tensor) The tensor need to be loaded"); AddAttr( "load_as_fp16", diff --git a/paddle/fluid/operators/lod_array_length_op.cc b/paddle/fluid/operators/lod_array_length_op.cc index e6212405770093455ec89bde9dc0a092b956fc83..e4551b8ba681fe92ac5f21bb0b509f43439f6b66 100644 --- a/paddle/fluid/operators/lod_array_length_op.cc +++ b/paddle/fluid/operators/lod_array_length_op.cc @@ -40,8 +40,7 @@ class LoDArrayLengthOp : public framework::OperatorBase { class LoDArrayLengthProtoMaker : public framework::OpProtoAndCheckerMaker { public: - LoDArrayLengthProtoMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "(LoDTensorArray) The input tensor array."); AddOutput("Out", "(Tensor) 1x1 CPU Tensor of length, int64_t"); AddComment(R"DOC( diff --git a/paddle/fluid/operators/lod_rank_table_op.cc b/paddle/fluid/operators/lod_rank_table_op.cc index 590b44e14f518c3c60c141c9a0dfe7f2b96f69c6..166952fe23192799443ef9c9d1f7ba5056d19290 100644 --- a/paddle/fluid/operators/lod_rank_table_op.cc +++ b/paddle/fluid/operators/lod_rank_table_op.cc @@ -38,8 +38,7 @@ class LoDRankTableOp : public framework::OperatorBase { class LoDRankTableOpProtoMaker : public framework::OpProtoAndCheckerMaker { public: - LoDRankTableOpProtoMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "(LoDTensor) input lod tensor, must contain lod information."); AddOutput("Out", "(LoDRankTable) The rank table of specific level."); diff --git a/paddle/fluid/operators/lod_reset_op.cc b/paddle/fluid/operators/lod_reset_op.cc index 92ebfc274b84f738f5bd688a9a6d9f437b6318aa..0d4e84e85083399e3803d0648dc7a10aa276d536 100644 --- a/paddle/fluid/operators/lod_reset_op.cc +++ b/paddle/fluid/operators/lod_reset_op.cc @@ -47,8 +47,7 @@ class LoDResetOp : public framework::OperatorWithKernel { class LoDResetOpMaker : public framework::OpProtoAndCheckerMaker { public: - LoDResetOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "(Tensor, LoDTensor) Input variable of LoDResetOp which " "could be a Tensor or LoDTensor, where the data of output " diff --git a/paddle/fluid/operators/lod_tensor_to_array_op.cc b/paddle/fluid/operators/lod_tensor_to_array_op.cc index 543495ce4e66c0955c9ce1b0db480088069b36db..00ba5ce8ee5e4084c8af204cfc37fe80c437f0d7 100644 --- a/paddle/fluid/operators/lod_tensor_to_array_op.cc +++ b/paddle/fluid/operators/lod_tensor_to_array_op.cc @@ -105,8 +105,7 @@ class LoDTensorToArrayOp : public framework::OperatorBase { class LoDTensorToArrayOpProtoMaker : public framework::OpProtoAndCheckerMaker { public: - LoDTensorToArrayOpProtoMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", ""); AddInput("RankTable", ""); AddOutput("Out", ""); diff --git a/paddle/fluid/operators/log_loss_op.cc b/paddle/fluid/operators/log_loss_op.cc index a8258a1afd70574c174abe8d5630ade5d4ac3de6..9d248e03218b83a65b9786cb317aafbe3dbb67ee 100644 --- a/paddle/fluid/operators/log_loss_op.cc +++ b/paddle/fluid/operators/log_loss_op.cc @@ -46,8 +46,7 @@ class LogLossOp : public framework::OperatorWithKernel { template class LogLossOpMaker : public framework::OpProtoAndCheckerMaker { public: - LogLossOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("Predicted", "The input value (Predicted) of Log loss op." "Predicted is a 2-D tensor with shape [batch_size, 1]."); diff --git a/paddle/fluid/operators/logical_op.cc b/paddle/fluid/operators/logical_op.cc index 41aa00ee8ac10e0776c066fc3c37f97b0dd40cc3..db109f5cd053d84718ac85bd4693ecece12ce172 100644 --- a/paddle/fluid/operators/logical_op.cc +++ b/paddle/fluid/operators/logical_op.cc @@ -21,8 +21,7 @@ namespace operators { template class BinaryLogicalOpProtoMaker : public framework::OpProtoAndCheckerMaker { public: - BinaryLogicalOpProtoMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { OpComment comment; AddInput("X", string::Sprintf("(LoDTensor) Left hand operand of %s operator", @@ -45,8 +44,7 @@ Each element of Out is calculated by %s template class UnaryLogicalOpProtoMaker : public framework::OpProtoAndCheckerMaker { public: - UnaryLogicalOpProtoMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { OpComment comment; AddInput("X", string::Sprintf("(LoDTensor) Operand of %s operator", comment.type)); diff --git a/paddle/fluid/operators/lookup_sparse_table_op.cc b/paddle/fluid/operators/lookup_sparse_table_op.cc index 66b626ed792ddec9d57fcf6c81655dffcc23ca99..d07a81968565f095cdb6425d104bc7a11bc9cfad 100644 --- a/paddle/fluid/operators/lookup_sparse_table_op.cc +++ b/paddle/fluid/operators/lookup_sparse_table_op.cc @@ -105,8 +105,7 @@ class LookupSparseTableOp : public framework::OperatorBase { class LookupSparseTableOpMaker : public framework::OpProtoAndCheckerMaker { public: - LookupSparseTableOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : framework::OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("W", "(SelectedRows) The input represents embedding table, " "which is a learnable parameter."); diff --git a/paddle/fluid/operators/lookup_table_op.cc b/paddle/fluid/operators/lookup_table_op.cc index 5e59bd1b178ad1803f6f70c5f3f9fd7af495ac3c..bda499432214b8841c8dfc406ee45ca0367920e7 100644 --- a/paddle/fluid/operators/lookup_table_op.cc +++ b/paddle/fluid/operators/lookup_table_op.cc @@ -58,8 +58,7 @@ class LookupTableOp : public framework::OperatorWithKernel { class LookupTableOpMaker : public framework::OpProtoAndCheckerMaker { public: - LookupTableOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("W", "(Tensor) The input represents embedding tensors, " "which is a learnable parameter."); diff --git a/paddle/fluid/operators/lrn_op.cc b/paddle/fluid/operators/lrn_op.cc index f5c0e47fda913b4635833c31496644b60a0a8504..52b9cd7fb7019b738098a8649f23277afd40e938 100644 --- a/paddle/fluid/operators/lrn_op.cc +++ b/paddle/fluid/operators/lrn_op.cc @@ -169,8 +169,7 @@ class LRNOp : public framework::OperatorWithKernel { template class LRNOpMaker : public framework::OpProtoAndCheckerMaker { public: - LRNOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "(Tensor) The input of LRN operator. " "It must be a 4D tenor with NCHW format."); diff --git a/paddle/fluid/operators/lstm_op.cc b/paddle/fluid/operators/lstm_op.cc index 084ee1cfe602af3622ef2a3f35f2892d5540cec7..4751e3e8025e51a687f8fcfd25e603b61e762f6d 100644 --- a/paddle/fluid/operators/lstm_op.cc +++ b/paddle/fluid/operators/lstm_op.cc @@ -103,8 +103,7 @@ class LSTMOp : public framework::OperatorWithKernel { class LSTMOpMaker : public framework::OpProtoAndCheckerMaker { public: - LSTMOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("Input", "(LoDTensor) the first input is a LodTensor, which support " "variable-time length input sequence. The underlying tensor in " diff --git a/paddle/fluid/operators/lstm_unit_op.cc b/paddle/fluid/operators/lstm_unit_op.cc index e1157ef6c640be17e7f48abe1ab972cf88504526..0895c58f5f58afd444000ebeac7a92e3eb7778d3 100644 --- a/paddle/fluid/operators/lstm_unit_op.cc +++ b/paddle/fluid/operators/lstm_unit_op.cc @@ -48,8 +48,7 @@ class LstmUnitOp : public framework::OperatorWithKernel { class LstmUnitOpMaker : public framework::OpProtoAndCheckerMaker { public: - LstmUnitOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "Lstm unit only applies non-linear activations, please make sure" "that linear tranformation has already been applied to `X`. " diff --git a/paddle/fluid/operators/lstmp_op.cc b/paddle/fluid/operators/lstmp_op.cc index f9261323f0f50c78b3b4b66a9fa8abcdf5ba27e9..e398b51480f6fc0c6c568770b3b2a9746360744e 100644 --- a/paddle/fluid/operators/lstmp_op.cc +++ b/paddle/fluid/operators/lstmp_op.cc @@ -120,8 +120,7 @@ class LSTMPOp : public framework::OperatorWithKernel { class LSTMPOpMaker : public framework::OpProtoAndCheckerMaker { public: - LSTMPOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("Input", "(LoDTensor) the input for sequence data, which supports " "variable-time length input sequence. The underlying tensor in " diff --git a/paddle/fluid/operators/margin_rank_loss_op.cc b/paddle/fluid/operators/margin_rank_loss_op.cc index 0b41a3e1ffdb32d248bb55651aba242336307e74..b643ba9d7fa61d758e871ebe7a463c22e937fa2c 100644 --- a/paddle/fluid/operators/margin_rank_loss_op.cc +++ b/paddle/fluid/operators/margin_rank_loss_op.cc @@ -42,8 +42,7 @@ class MarginRankLossOp : public framework::OperatorWithKernel { template class MarginRankLossOpMaker : public framework::OpProtoAndCheckerMaker { public: - MarginRankLossOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X1", "(2-D tensor with shape [batch_size x 1]) The score for " "one item X1 to be ranked, from pairwise ranking model."); diff --git a/paddle/fluid/operators/math/blas.cc b/paddle/fluid/operators/math/blas.cc index 3eeb77546b97a0337b46216d837a4f4cff12c89f..6a143b3c056455595fdedc131b0c5f4ee756e1e0 100644 --- a/paddle/fluid/operators/math/blas.cc +++ b/paddle/fluid/operators/math/blas.cc @@ -13,10 +13,40 @@ // limitations under the License. #include "paddle/fluid/operators/math/blas.h" + +#include namespace paddle { namespace operators { namespace math { -// Do nothing. Blas is a header only library. +MatDescriptor CreateMatrixDescriptor(const framework::DDim &tensor_dim, + int num_flatten_cols, bool trans) { + PADDLE_ENFORCE_GT(tensor_dim.size(), 1); + MatDescriptor retv; + if (num_flatten_cols > 1) { + auto flatten_dim = framework::flatten_to_2d(tensor_dim, num_flatten_cols); + retv.height_ = flatten_dim[0]; + retv.width_ = flatten_dim[1]; + } else { + if (tensor_dim.size() == 2) { + retv.height_ = tensor_dim[0]; + retv.width_ = tensor_dim[1]; + } else { + auto dim_vec = framework::vectorize(tensor_dim); + retv.batch_size_ = 1; + for (size_t i = 0; i < dim_vec.size() - 2; ++i) { + retv.batch_size_ *= dim_vec[i]; + } + retv.height_ = dim_vec[dim_vec.size() - 2]; + retv.width_ = dim_vec[dim_vec.size() - 1]; + retv.stride_ = retv.height_ * retv.width_; + } + } + if (trans) { + std::swap(retv.width_, retv.height_); + } + retv.trans_ = trans; + return retv; +} } // namespace math } // namespace operators } // namespace paddle diff --git a/paddle/fluid/operators/math/blas.h b/paddle/fluid/operators/math/blas.h index 5cd2f855d1135e6dd8343efdaa9855d2526a3520..dabde43850db770d286b13cacd32bee181328d5c 100644 --- a/paddle/fluid/operators/math/blas.h +++ b/paddle/fluid/operators/math/blas.h @@ -46,6 +46,50 @@ namespace paddle { namespace operators { namespace math { +/** + * Matrix Descriptor of a memory buffer. + * + * It is used for Blas::MatMul. MatMul operator can be batched. + * if Mat A is [BatchSize, H, W], Mat B is [BatchSize, H, W]. It will be a + * `batch_size` times of GEMM. The batched GEMM could be faster base on the + * implementation of the blas library. The batch size could be zero. If any + * matrix of `matmul` has a batch size, the will be a batched GEMM, too. e.g., + * Mat A is [BatchSize, H1, W2], and Mat B [H2, W2], The result matrix wil be + * [BatchSize, H1, W2] + * + * The boolean flag, `trans`, describe the memory is the transpose of matrix or + * not. If the trans is true, the last two dims of matrix are transposed. The + * memory layout of the matrix is [Width, Height] or [BatchSize, Width, Height]. + * + * The MatDescriptor is not only the dimension or shape of a matrix, it also + * contains the layout, stride of matrix. It is clearer to have a structure than + * reuse `DDim`. + */ +struct MatDescriptor { + int64_t height_; + int64_t width_; + int64_t stride_{0}; + int64_t batch_size_{0}; + bool trans_; +}; + +/** + * Create Matrix Descriptor from a tensor dim, num_flatten_cols, and transpose + * flag + * + * @param tensor_dim: The dimension of the tensor. The rank of this dimension + * must larger than 1. + * + * @param num_flatten_cols: Reshape a tensor to a matrix. The matrix's first + * dimension(column length) will be the product of tensor's first `num_col_dims` + * dimensions. If num_flatten_cols is zero, the first N-2 dimension will be the + * batch_size of descriptor. + * + * @param trans: True if the matrix is transposed. + */ +extern MatDescriptor CreateMatrixDescriptor(const framework::DDim& tensor_dim, + int num_flatten_cols, bool trans); + template class Blas { public: @@ -90,6 +134,11 @@ class Blas { int K, T alpha, const T* A, const T* B, T beta, T* C, int batchCount, int64_t strideA, int64_t strideB) const; + template + void MatMul(const framework::Tensor& mat_a, const MatDescriptor& dim_a, + const framework::Tensor& mat_b, const MatDescriptor& dim_b, + T alpha, framework::Tensor* mat_out, T beta) const; + private: const DeviceContext& context_; }; diff --git a/paddle/fluid/operators/math/blas_impl.h b/paddle/fluid/operators/math/blas_impl.h index 7360cc0a90da499c372c6fb3f8d40a26f9093dd8..577cbe3beb806ffcb2f1a7d7a469402be9b69224 100644 --- a/paddle/fluid/operators/math/blas_impl.h +++ b/paddle/fluid/operators/math/blas_impl.h @@ -180,6 +180,31 @@ void Blas::BatchedGEMM( #endif } +template +template +void Blas::MatMul(const framework::Tensor &mat_a, + const MatDescriptor &dim_a, + const framework::Tensor &mat_b, + const MatDescriptor &dim_b, T alpha, + framework::Tensor *mat_out, T beta) const { + PADDLE_ENFORCE_EQ(dim_a.width_, dim_b.height_); + CBLAS_TRANSPOSE transA = !dim_a.trans_ ? CblasNoTrans : CblasTrans; + CBLAS_TRANSPOSE transB = !dim_b.trans_ ? CblasNoTrans : CblasTrans; + if (dim_a.batch_size_ == 0 && dim_b.batch_size_ == 0) { + this->template GEMM(transA, transB, dim_a.height_, dim_b.width_, + dim_a.width_, alpha, mat_a.data(), + mat_b.data(), beta, mat_out->data()); + } else { + PADDLE_ENFORCE(dim_a.batch_size_ == dim_b.batch_size_ || + dim_a.batch_size_ == 0 || dim_b.batch_size_ == 0); + this->template BatchedGEMM( + transA, transB, dim_a.height_, dim_b.width_, dim_a.width_, alpha, + mat_a.data(), mat_b.data(), beta, mat_out->data(), + dim_a.batch_size_ == 0 ? dim_b.batch_size_ : dim_a.batch_size_, + dim_a.stride_, dim_b.stride_); + } +} + } // namespace math } // namespace operators } // namespace paddle diff --git a/paddle/fluid/operators/math/matmul.h b/paddle/fluid/operators/math/matmul.h deleted file mode 100644 index 87fd38a324e007bcc939c31b6ae8e5d38c3e658c..0000000000000000000000000000000000000000 --- a/paddle/fluid/operators/math/matmul.h +++ /dev/null @@ -1,149 +0,0 @@ -/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once -#include -#include -#include "paddle/fluid/operators/math/blas.h" - -namespace paddle { -namespace operators { -namespace math { - -// Implements the logic of numpy matmul: -// https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.matmul.html -// -// but allowing also for a, b to be transposed -// -// Both a & b can be 1- to 3-dimensional. Higher rank tensors are not supported -// yet. -template -class MatMulFunctor { - public: - void operator()(const DeviceContext& context, const framework::Tensor& a, - bool trans_a, const framework::Tensor& b, bool trans_b, - T alpha, framework::Tensor* out, T beta) { - auto dim_a = a.dims(); - auto dim_b = b.dims(); - - PADDLE_ENFORCE(a.place() == b.place() && b.place() == out->place(), - "Tensors must all be in the same place."); - PADDLE_ENFORCE_GE(dim_a.size(), 1, - "Input tensor a must be at least 1-dimensional."); - PADDLE_ENFORCE_GE(dim_b.size(), 1, - "Input tensor b must be at least 1-dimensional."); - - std::vector out_dim; - int64_t batch_count = 1; - if (dim_a.size() > 3) { - PADDLE_ENFORCE(dim_b.size() == dim_a.size(), - "The dimensions of X and Y must be the same, and both of " - "them should be %d-dimensional.", - dim_b.size()); - // The first rank-2 dimensions are accumulated on the batch_count, and the - // last two dimensions are used for matrix multiplication. - for (int j = 0; j < dim_a.size() - 2; ++j) { - PADDLE_ENFORCE_EQ(dim_b[j], dim_a[j], - "The %d-th dimension of X and Y must be the same.", - j); - out_dim.push_back(dim_a[j]); - batch_count *= dim_a[j]; - } - } - - int M = 0, N = 0, kA = 0, kB = 0, batchCountA = 0, batchCountB = 0, - strideA = 0, strideB = 0; - - switch (dim_a.size()) { - case 1: - // similar to np.matmul: - // prepend dimension 1 (no transpose) or append dimension 1 (transpose) - M = trans_a ? dim_a[0] : 1; - kA = trans_a ? 1 : dim_a[0]; - break; - case 2: - M = trans_a ? dim_a[1] : dim_a[0]; - kA = trans_a ? dim_a[0] : dim_a[1]; - break; - case 3: - batchCountA = dim_a[0]; - M = trans_a ? dim_a[2] : dim_a[1]; - kA = trans_a ? dim_a[1] : dim_a[2]; - strideA = M * kA; - break; - default: - batchCountA = batch_count; - size_t mat_s = dim_a.size() - 2; - M = trans_a ? dim_a[mat_s + 1] : dim_a[mat_s]; - kA = trans_a ? dim_a[mat_s] : dim_a[mat_s + 1]; - strideA = M * kA; - } - - switch (dim_b.size()) { - case 1: - // similar to np.matmul: - // append dimension 1 (no transpose) or prepend dimension 1 (transpose) - kB = trans_b ? 1 : dim_b[0]; - N = trans_b ? dim_b[0] : 1; - break; - case 2: - kB = trans_b ? dim_b[1] : dim_b[0]; - N = trans_b ? dim_b[0] : dim_b[1]; - break; - case 3: - batchCountB = dim_b[0]; - kB = trans_b ? dim_b[2] : dim_b[1]; - N = trans_b ? dim_b[1] : dim_b[2]; - strideB = kB * N; - break; - default: - batchCountB = batch_count; - size_t mat_s = dim_b.size() - 2; - kB = trans_b ? dim_b[mat_s + 1] : dim_b[mat_s]; - N = trans_b ? dim_b[mat_s] : dim_b[mat_s + 1]; - strideB = kB * N; - } - - PADDLE_ENFORCE_EQ( - kA, kB, - "First matrix's width must be equal with second matrix's height."); - if (batchCountA && batchCountB) { - PADDLE_ENFORCE_EQ( - batchCountA, batchCountB, - "When input tensors a and b are both batched, they must have the " - "same batch dimension."); - } - int batchCount = std::max(batchCountA, batchCountB); - - CBLAS_TRANSPOSE transA = (trans_a == false) ? CblasNoTrans : CblasTrans; - CBLAS_TRANSPOSE transB = (trans_b == false) ? CblasNoTrans : CblasTrans; - - auto blas = GetBlas(context); - - if (!batchCount) { - // regular matrix multiplication - blas.GEMM(transA, transB, M, N, kA, alpha, a.data(), b.data(), beta, - out->data()); - } else { - // batched matrix multiplication - blas.BatchedGEMM(transA, transB, M, N, kA, alpha, a.data(), - b.data(), beta, out->data(), batchCount, strideA, - strideB); - } - } -}; - -} // namespace math -} // namespace operators -} // namespace paddle diff --git a/paddle/fluid/operators/matmul_op.cc b/paddle/fluid/operators/matmul_op.cc index e5d33fbc36438f97ff5b604e4efdbfbfa91fcee4..2d05449822e6addc42c4fea5af8422ae6dcfd37d 100644 --- a/paddle/fluid/operators/matmul_op.cc +++ b/paddle/fluid/operators/matmul_op.cc @@ -12,14 +12,257 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/operators/matmul_op.h" #include +#include #include +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/detail/safe_ref.h" +#include "paddle/fluid/operators/math/blas.h" namespace paddle { namespace operators { +/** + * Get row matrix shape from a vector shape. If the rank of x_dim > 1, the + * original x_dim is returned. + */ +static framework::DDim RowMatrixFromVector(const framework::DDim& x_dim) { + if (x_dim.size() > 1) { + return x_dim; + } + return framework::make_ddim({1, x_dim[0]}); +} + +/** + * Get column matrix shape from a vector shape. If the ran of y_dim > 1, the + * original y_dim is returned. + */ +static framework::DDim ColumnMatrixFromVector(const framework::DDim& y_dim) { + if (y_dim.size() > 1) { + return y_dim; + } + return framework::make_ddim({y_dim[0], 1}); +} + +template +class MatMulKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& context) const override { + auto& x = + detail::Ref(context.Input("X"), "Cannot find X"); + auto& y = + detail::Ref(context.Input("Y"), "Cannot find Y"); + auto* out = context.Output("Out"); + out->mutable_data(context.GetPlace()); + + auto blas = math::GetBlas(context); + auto mat_dim_a = math::CreateMatrixDescriptor( + RowMatrixFromVector(x.dims()), 0, context.Attr("transpose_X")); + auto mat_dim_b = math::CreateMatrixDescriptor( + ColumnMatrixFromVector(y.dims()), 0, context.Attr("transpose_Y")); + blas.MatMul(x, mat_dim_a, y, mat_dim_b, T(1), out, T(0)); + } +}; + +// Reshape a rank-3 tensor from P x M x N to (P * M) x N. +// Identity op if the tensor is not of rank 3. +static framework::Tensor FoldInitDims(const framework::Tensor& input) { + auto output = input; + auto in_dims = input.dims(); + if (in_dims.size() == 3) { + output.Resize({in_dims[0] * in_dims[1], in_dims[2]}); + } + return output; +} + +// Reshape a rank-3 tensor from P x M x N to M x (P * N). +// (Warning: This requires transposing data and writes into new memory.) +// Identity op if the tensor is not of rank 3. +template +static framework::Tensor FoldHeadAndLastDims(const DeviceContext& context, + const framework::Tensor& input) { + auto in_dims = input.dims(); + if (in_dims.size() != 3) { + return input; + } + framework::Tensor output; + output.Resize({in_dims[1], in_dims[0], in_dims[2]}); + output.mutable_data(context.GetPlace()); + std::vector axis = {1, 0, 2}; + math::Transpose trans; + trans(context, input, &output, axis); + output.Resize({in_dims[1], in_dims[0] * in_dims[2]}); + + return output; +} + +/** + * Reshape a tensor to 3-D or 2-D tensor by matrix descriptor. + * + * The shape would be [BatchSize, H, W] or [H, W]. + * If transposed, `H,W` will be swapped. + */ +static void ReshapeTensorIntoMatrixSequence( + framework::Tensor* x, const math::MatDescriptor& descriptor) { + int64_t h, w; + h = descriptor.height_; + w = descriptor.width_; + if (descriptor.trans_) { + std::swap(w, h); + } + if (descriptor.batch_size_) { + x->Resize({descriptor.batch_size_, h, w}); + } else { + x->Resize({h, w}); + } +} + +/** + * Reshape the x,y,out tensor to 3-D or 2-D tensor by matrix descriptor + * Out = matmul(x, y) + * + * This method will first calculate X,Y matrix sequence, and then calculate + * the out shape. + * + * Assume X = [BatchSize, H1, W1], Y = [BatchSize, H2, W2] + * The out = [BatchSize, H1, W2] + * + * If there is no batch size in `X` and `Y`, the out will be [H1, W2] + * If any of `X` and `Y` has batch size BatchSize, the out will have the + * BatchSize. + */ +static void ReshapeXYOutIntoMatrixSequence(framework::Tensor* x, + framework::Tensor* y, + framework::Tensor* out, bool trans_x, + bool trans_y) { + auto x_dim = RowMatrixFromVector(x->dims()); + auto y_dim = ColumnMatrixFromVector(y->dims()); + auto mat_dim_x = math::CreateMatrixDescriptor(x_dim, 0, trans_x); + auto mat_dim_y = math::CreateMatrixDescriptor(y_dim, 0, trans_y); + if (mat_dim_x.batch_size_ == 0 && mat_dim_y.batch_size_ == 0) { + out->Resize({mat_dim_x.height_, mat_dim_y.width_}); + } else { + out->Resize({std::max(mat_dim_x.batch_size_, mat_dim_y.batch_size_), + mat_dim_x.height_, mat_dim_y.width_}); + } + + ReshapeTensorIntoMatrixSequence(x, mat_dim_x); + ReshapeTensorIntoMatrixSequence(y, mat_dim_y); +} + +// Using dimensional constraints on matrix multiplication, it is +// straight-forward to check the following table for when X and Y +// are both matrices. +// +// transpose_X | False | True | False | True +// transpose_Y | False | False | True | True +// -----------+----------+----------+----------+----------- +// dX = | dOut Y^T | Y dOut^T | dOut Y | Y^T dOut^T +// dY = | X^T dOut | X dOut | dOut^T X | dOut^T X^T +// +// When X is a vector of size K, we treat it instead as a matrix of shape +// (1, K). Similarly, when Y is a vector of size K, we treat it instead as +// a matrix of shape (K, 1). +// +// When X and Y are both 3-dimensional tensors, then the first dimension +// the batch dimension can be ignored and the exact same formulas apply +// as for two matrices. +// +// Finally, when, e.g., X is a 3-dimensional tensor but Y is a matrix, we end +// up with formulas like +// +// dY_{ij} = \sum_{p, m} X_{pmi} dOut_{pmj} +// +// To handle this sort of scenario, we reshape X : P x M x K, dOut: P x M x N +// to X: (P * M) x K, dOut: (P * M) x N. +template +class MatMulGradKernel : public framework::OpKernel { + public: + void MatMul(const framework::ExecutionContext& context, + const framework::Tensor& a, bool trans_a, + const framework::Tensor& b, bool trans_b, + framework::Tensor* out) const { + out->mutable_data(context.GetPlace()); + auto blas = math::GetBlas(context); + auto mat_dim_a = math::CreateMatrixDescriptor(a.dims(), 0, trans_a); + auto mat_dim_b = math::CreateMatrixDescriptor(b.dims(), 0, trans_b); + blas.MatMul(a, mat_dim_a, b, mat_dim_b, T(1), out, T(0)); + } + + void CalcInputGrad(const framework::ExecutionContext& context, + const framework::Tensor& a, bool trans_a, + bool is_fold_init_dims_a, const framework::Tensor& b, + bool trans_b, bool is_fold_init_dims_b, + framework::Tensor* out) const { + if (out == nullptr) return; + bool need_combine = (a.dims().size() == 3 || b.dims().size() == 3) && + out->dims().size() == 2; + if (!need_combine) { + MatMul(context, a, trans_a, b, trans_b, out); + } else { + auto& ctx = context.template device_context(); + MatMul(context, is_fold_init_dims_a + ? FoldInitDims(a) + : FoldHeadAndLastDims(ctx, a), + trans_a, is_fold_init_dims_b + ? FoldInitDims(b) + : FoldHeadAndLastDims(ctx, b), + trans_b, out); + } + } + + void Compute(const framework::ExecutionContext& context) const override { + auto x = *context.Input("X"); + auto y = *context.Input("Y"); + auto dout = + *context.Input(framework::GradVarName("Out")); + auto* dx = context.Output(framework::GradVarName("X")); + auto* dy = context.Output(framework::GradVarName("Y")); + bool transpose_x = context.Attr("transpose_X"); + bool transpose_y = context.Attr("transpose_Y"); + + ReshapeXYOutIntoMatrixSequence(&x, &y, &dout, transpose_x, transpose_y); + framework::DDim dx_dims; + if (dx) { + dx_dims = dx->dims(); + if (dx_dims != x.dims()) { + dx->Resize(x.dims()); + } + } + + framework::DDim dy_dims; + if (dy) { + dy_dims = dy->dims(); + if (dy_dims != y.dims()) { + dy->Resize(y.dims()); + } + } -using framework::Tensor; + if (transpose_x && transpose_y) { + CalcInputGrad(context, y, true, true, dout, true, false, dx); + CalcInputGrad(context, dout, true, true, x, true, false, dy); + } else if (transpose_x) { + CalcInputGrad(context, y, false, false, dout, true, false, dx); + CalcInputGrad(context, x, false, false, dout, false, true, dy); + } else if (transpose_y) { + CalcInputGrad(context, dout, false, false, y, false, true, dx); + CalcInputGrad(context, dout, true, true, x, false, true, dy); + } else { + CalcInputGrad(context, dout, false, false, y, true, false, dx); + CalcInputGrad(context, x, true, true, dout, false, true, dy); + } + + if (dx) { + if (dx_dims != x.dims()) { + dx->Resize(dx_dims); + } + } + if (dy) { + if (dy_dims != y.dims()) { + dy->Resize(dy_dims); + } + } + } +}; class MatMulOp : public framework::OperatorWithKernel { public: @@ -36,121 +279,41 @@ class MatMulOp : public framework::OperatorWithKernel { auto dim_x = context->GetInputDim("X"); auto dim_y = context->GetInputDim("Y"); - bool transpose_x = context->Attrs().Get("transpose_X"); - bool transpose_y = context->Attrs().Get("transpose_Y"); - - PADDLE_ENFORCE_GE(dim_x.size(), 1, - "Input tensor X must be at least 1-dimensional."); - PADDLE_ENFORCE_GE(dim_y.size(), 1, - "Input tensor Y must be at least 1-dimensional."); - - std::vector out_dim; - int64_t batch_count = 1; - if (dim_x.size() > 3) { - PADDLE_ENFORCE_EQ( - dim_y.size(), dim_x.size(), - "The dimensions of X and Y must be the same, and both of " - "them should be %d-dimensional.", - dim_x.size()); - - // The first rank-2 dimensions are accumulated on the batch_count, and the - // last two dimensions are used for matrix multiplication. - for (int j = 0; j < dim_x.size() - 2; ++j) { - PADDLE_ENFORCE_EQ(dim_y[j], dim_x[j], - "The %d-th dimension of X and Y must be the same.", - j); - out_dim.push_back(dim_x[j]); - batch_count *= dim_x[j]; - } - } - int M = 0, N = 0, KX = 0, KY = 0, batchCountX = 0, batchCountY = 0; - bool remove_initial_dim = false, remove_final_dim = false; - - switch (dim_x.size()) { - case 1: - if (transpose_x) { - M = dim_x[0]; - KX = 1; - } else { - M = 1; - KX = dim_x[0]; - remove_initial_dim = true; - } - break; - case 2: - M = transpose_x ? dim_x[1] : dim_x[0]; - KX = transpose_x ? dim_x[0] : dim_x[1]; - break; - case 3: - batchCountX = dim_x[0]; - M = transpose_x ? dim_x[2] : dim_x[1]; - KX = transpose_x ? dim_x[1] : dim_x[2]; - break; - default: - batchCountX = batch_count; - size_t mat_s = dim_x.size() - 2; - M = transpose_x ? dim_x[mat_s + 1] : dim_x[mat_s]; - KX = transpose_x ? dim_x[mat_s] : dim_x[mat_s + 1]; - break; - } + auto mat_dim_x = + math::CreateMatrixDescriptor(RowMatrixFromVector(dim_x), 0, + context->Attrs().Get("transpose_X")); + auto mat_dim_y = + math::CreateMatrixDescriptor(ColumnMatrixFromVector(dim_y), 0, + context->Attrs().Get("transpose_Y")); - switch (dim_y.size()) { - case 1: - if (transpose_y) { - N = dim_y[0]; - KY = 1; - } else { - N = 1; - KY = dim_y[0]; - remove_final_dim = true; - } - break; - case 2: - KY = transpose_y ? dim_y[1] : dim_y[0]; - N = transpose_y ? dim_y[0] : dim_y[1]; - break; - case 3: - batchCountY = dim_y[0]; - KY = transpose_y ? dim_y[2] : dim_y[1]; - N = transpose_y ? dim_y[1] : dim_y[2]; - break; - default: - batchCountY = batch_count; - size_t mat_s = dim_y.size() - 2; - KY = transpose_y ? dim_y[mat_s + 1] : dim_y[mat_s]; - N = transpose_y ? dim_y[mat_s] : dim_y[mat_s + 1]; + PADDLE_ENFORCE_EQ(mat_dim_x.width_, mat_dim_y.height_); + PADDLE_ENFORCE(mat_dim_x.batch_size_ == mat_dim_y.batch_size_ || + mat_dim_x.batch_size_ == 0 || mat_dim_y.batch_size_ == 0); + std::vector dim_out; + if (mat_dim_x.batch_size_ != 0) { + dim_out = framework::vectorize(dim_x); + dim_out[dim_out.size() - 2] = mat_dim_x.height_; + dim_out[dim_out.size() - 1] = mat_dim_y.width_; + } else if (mat_dim_y.batch_size_ != 0) { + dim_out = framework::vectorize(dim_y); + dim_out[dim_out.size() - 2] = mat_dim_x.height_; + dim_out[dim_out.size() - 1] = mat_dim_y.width_; + } else { + dim_out = {mat_dim_x.height_, mat_dim_y.width_}; } - PADDLE_ENFORCE_EQ( - KX, KY, - "First matrix's width must be equal with second matrix's height."); - if (batchCountX && batchCountY) { - PADDLE_ENFORCE_EQ( - batchCountX, batchCountY, - "When Input(X) and Input(Y) are both three dimensional, they " - "must have the same batch dimension."); + if (dim_x.size() == 1 && dim_out[dim_out.size() - 2] == 1) { + std::swap(dim_out[dim_out.size() - 2], dim_out[dim_out.size() - 1]); + dim_out.resize(dim_out.size() - 1); } - int batchCount = std::max(batchCountX, batchCountY); - std::vector dim_out; - if (batchCount) { - if (dim_x.size() > 3) { - dim_out.insert(dim_out.begin(), out_dim.begin(), out_dim.end()); - } else { - dim_out.push_back(batchCount); - } + if (dim_y.size() == 1 && dim_out[dim_out.size() - 1] == 1) { + dim_out.resize(dim_out.size() - 1); } - if (!remove_initial_dim) { - dim_out.push_back(M); - } - if (!remove_final_dim) { - dim_out.push_back(N); - } - if (dim_out.size() == 0) { - // We don't support 0-dimensional Tensors (scalars), so instead - // treat the output as a Tensor of shape (1, ) in this case. - dim_out.push_back(1); + + if (dim_out.empty()) { + dim_out = {1}; } context->SetOutputDim("Out", framework::make_ddim(dim_out)); context->ShareLoD("X", /*->*/ "Out"); @@ -159,8 +322,7 @@ class MatMulOp : public framework::OperatorWithKernel { class MatMulOpMaker : public framework::OpProtoAndCheckerMaker { public: - MatMulOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "The first input of MatMul op"); AddInput("Y", "The second input of MatMul op"); AddOutput("Out", "The output of MatMul op"); @@ -233,15 +395,40 @@ class MatMulOpGrad : public framework::OperatorWithKernel { } }; +class MatMulOpGradMaker : public framework::SingleGradOpDescMaker { + public: + using framework::SingleGradOpDescMaker::SingleGradOpDescMaker; + + protected: + std::unique_ptr Apply() const override { + auto* retv = new framework::OpDesc(); + retv->SetType("matmul_grad"); + retv->SetInput("X", Input("X")); + retv->SetInput("Y", Input("Y")); + retv->SetInput(framework::GradVarName("Out"), OutputGrad("Out")); + retv->SetOutput(framework::GradVarName("X"), InputGrad("X")); + retv->SetOutput(framework::GradVarName("Y"), InputGrad("Y")); + retv->SetAttrMap(Attrs()); + return std::unique_ptr(retv); + } +}; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OPERATOR(matmul, ops::MatMulOp, ops::MatMulOpMaker, - paddle::framework::DefaultGradOpDescMaker); + ops::MatMulOpGradMaker); REGISTER_OPERATOR(matmul_grad, ops::MatMulOpGrad); REGISTER_OP_CPU_KERNEL( matmul, ops::MatMulKernel); REGISTER_OP_CPU_KERNEL( matmul_grad, ops::MatMulGradKernel); + +#ifdef PADDLE_WITH_CUDA +REGISTER_OP_CUDA_KERNEL( + matmul, ops::MatMulKernel); +REGISTER_OP_CUDA_KERNEL( + matmul_grad, + ops::MatMulGradKernel); +#endif diff --git a/paddle/fluid/operators/matmul_op.cu.cc b/paddle/fluid/operators/matmul_op.cu.cc deleted file mode 100644 index e021bbe645399e410cde5c3ff7035d4d68c71744..0000000000000000000000000000000000000000 --- a/paddle/fluid/operators/matmul_op.cu.cc +++ /dev/null @@ -1,22 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#include "paddle/fluid/operators/matmul_op.h" - -namespace ops = paddle::operators; -REGISTER_OP_CUDA_KERNEL( - matmul, ops::MatMulKernel); -REGISTER_OP_CUDA_KERNEL( - matmul_grad, - ops::MatMulGradKernel); diff --git a/paddle/fluid/operators/matmul_op.h b/paddle/fluid/operators/matmul_op.h deleted file mode 100644 index f2e9cfdcdbf93326ae193776a7d5f6a324373603..0000000000000000000000000000000000000000 --- a/paddle/fluid/operators/matmul_op.h +++ /dev/null @@ -1,244 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once -#include -#include -#include -#include "paddle/fluid/framework/op_registry.h" -#include "paddle/fluid/operators/math/math_function.h" -#include "paddle/fluid/operators/math/matmul.h" - -namespace paddle { -namespace operators { -namespace matmul_detail { - -using Tensor = framework::Tensor; -using DDim = framework::DDim; -using framework::make_ddim; -using framework::vectorize; - -template -class MatMulKernel : public framework::OpKernel { - public: - void Compute(const framework::ExecutionContext& context) const override { - const Tensor& x = *context.Input("X"); - const Tensor& y = *context.Input("Y"); - Tensor* out = context.Output("Out"); - out->mutable_data(context.GetPlace()); - bool transpose_x = context.Attr("transpose_X"); - bool transpose_y = context.Attr("transpose_Y"); - - math::MatMulFunctor()( - context.template device_context(), x, transpose_x, y, - transpose_y, T(1), out, T(0)); - } -}; - -template -inline Tensor Reshape(const Tensor& input, const DDim& dims) { - Tensor output; - output.ShareDataWith(input); - output.Resize(dims); - return output; -} - -// Reshape a rank-3 tensor from P x M x N to (P * M) x N. -// Identity op if the tensor is not of rank 3. -template -Tensor CombineBatchAndM(const Tensor& input) { - Tensor output; - output.ShareDataWith(input); - auto in_dims = input.dims(); - if (in_dims.size() == 3) { - std::vector out_dims = {in_dims[0] * in_dims[1], in_dims[2]}; - output.Resize(make_ddim(out_dims)); - } - return output; -} - -// Reshape a rank-3 tensor from P x M x N to M x (P * N). -// (Warning: This requires transposing data and writes into new memory.) -// Identity op if the tensor is not of rank 3. -template -Tensor CombineBatchAndN(const DeviceContext& context, const Tensor& input) { - Tensor output; - auto in_dims = input.dims(); - if (in_dims.size() == 3) { - output.Resize({in_dims[1], in_dims[0], in_dims[2]}); - output.mutable_data(context.GetPlace()); - std::vector axis = {1, 0, 2}; - math::Transpose trans; - trans(context, input, &output, axis); - std::vector out_dims = {in_dims[1], in_dims[0] * in_dims[2]}; - output.Resize({in_dims[1], in_dims[0] * in_dims[2]}); - } else { - output.ShareDataWith(input); - } - return output; -} - -// Using dimensional constraints on matrix multiplication, it is -// straight-forward to check the following table for when X and Y -// are both matrices. -// -// transpose_X | False | True | False | True -// transpose_Y | False | False | True | True -// -----------+----------+----------+----------+----------- -// dX = | dOut Y^T | Y dOut^T | dOut Y | Y^T dOut^T -// dY = | X^T dOut | X dOut | dOut^T X | dOut^T X^T -// -// When X is a vector of size K, we treat it instead as a matrix of shape -// (1, K). Similarly, when Y is a vector of size K, we treat it instead as -// a matrix of shape (K, 1). -// -// When X and Y are both 3-dimensional tensors, then the first dimension -// the batch dimension can be ignored and the exact same formulas apply -// as for two matrices. -// -// Finally, when, e.g., X is a 3-dimensional tensor but Y is a matrix, we end -// up with formulas like -// -// dY_{ij} = \sum_{p, m} X_{pmi} dOut_{pmj} -// -// To handle this sort of scenario, we reshape X : P x M x K, dOut: P x M x N -// to X: (P * M) x K, dOut: (P * M) x N. -template -class MatMulGradKernel : public framework::OpKernel { - public: - void Compute(const framework::ExecutionContext& context) const override { - const Tensor& x = *context.Input("X"); - const Tensor& y = *context.Input("Y"); - const Tensor& dout = *context.Input(framework::GradVarName("Out")); - Tensor* dx = context.Output(framework::GradVarName("X")); - Tensor* dy = context.Output(framework::GradVarName("Y")); - bool transpose_x = context.Attr("transpose_X"); - bool transpose_y = context.Attr("transpose_Y"); - - std::vector x_dims = vectorize(x.dims()); - std::vector y_dims = vectorize(y.dims()); - - // If X is a vector, reshape it to a matrix. - if (x_dims.size() == 1) { - x_dims.insert(x_dims.begin(), 1); - } - - // If Y is a vector, reshape it to a matrix. - if (y_dims.size() == 1) { - y_dims.push_back(1); - } - - int batch_count = 0; - // The first rank-2 dimensions are accumulated on the batch_count, and the - // last two dimensions are used for matrix multiplication. - if (x_dims.size() > 3) { - batch_count = accumulate(x_dims.begin(), x_dims.end() - 2, 1, - std::multiplies()); - } - // Fix the dOut dimensions. - int M = 0, N = 0, batchCountX = 0, batchCountY = 0; - - switch (x_dims.size()) { - case 2: - M = transpose_x ? x_dims[1] : x_dims[0]; - break; - case 3: - batchCountX = x_dims[0]; - M = transpose_x ? x_dims[2] : x_dims[1]; - break; - default: - batchCountX = batch_count; - size_t mat_s = x_dims.size() - 2; - M = transpose_x ? x_dims[mat_s + 1] : x_dims[mat_s]; - } - - switch (y_dims.size()) { - case 2: - N = transpose_y ? y_dims[0] : y_dims[1]; - break; - case 3: - batchCountY = y_dims[0]; - N = transpose_y ? y_dims[1] : y_dims[2]; - break; - default: - batchCountY = batch_count; - size_t mat_s = y_dims.size() - 2; - N = transpose_y ? y_dims[mat_s] : y_dims[mat_s + 1]; - } - if (batchCountX && batchCountY) { - PADDLE_ENFORCE_EQ( - batchCountX, batchCountY, - "When Input(X) and Input(Y) are both three dimensional, they " - "must have the same batch dimension."); - } - int batchCount = std::max(batchCountX, batchCountY); - std::vector dout_dims = {M, N}; - if (batchCount) { - if (x_dims.size() > 3) { - dout_dims.insert(dout_dims.begin(), x_dims.begin(), x_dims.end() - 2); - } else { - dout_dims.insert(dout_dims.begin(), batchCount); - } - } - Tensor X = Reshape(x, make_ddim(x_dims)); - Tensor Y = Reshape(y, make_ddim(y_dims)); - Tensor dOut = Reshape(dout, make_ddim(dout_dims)); - - auto& dev_ctx = context.template device_context(); - if (dx) { - dx->mutable_data(context.GetPlace()); - const Tensor& dOut_for_dX = - (x_dims.size() == 2 && y_dims.size() == 3) - ? CombineBatchAndN(dev_ctx, dOut) - : dOut; - if (x_dims.size() == 2 && y_dims.size() == 3) { - Y = transpose_y ? CombineBatchAndM(Y) - : CombineBatchAndN(dev_ctx, Y); - } - if (transpose_x) { - math::MatMulFunctor()( - dev_ctx, Y, transpose_y, dOut_for_dX, transpose_x, T(1), dx, T(0)); - } else { - math::MatMulFunctor()( - dev_ctx, dOut_for_dX, transpose_x, Y, !transpose_y, T(1), dx, T(0)); - } - } - - if (dy) { - dy->mutable_data(context.GetPlace()); - const Tensor& dOut_for_dY = (y_dims.size() == 2 && x_dims.size() == 3) - ? CombineBatchAndM(dOut) - : dOut; - if (y_dims.size() == 2 && x_dims.size() == 3) { - X = transpose_x ? CombineBatchAndN(dev_ctx, X) - : CombineBatchAndM(X); - dOut = CombineBatchAndM(dOut); - } - if (transpose_y) { - math::MatMulFunctor()( - dev_ctx, dOut_for_dY, transpose_y, X, transpose_x, T(1), dy, T(0)); - } else { - math::MatMulFunctor()( - dev_ctx, X, !transpose_x, dOut_for_dY, transpose_y, T(1), dy, T(0)); - } - } - } -}; -} // namespace matmul_detail - -using matmul_detail::MatMulKernel; -using matmul_detail::MatMulGradKernel; - -} // namespace operators -} // namespace paddle diff --git a/paddle/fluid/operators/max_sequence_len_op.cc b/paddle/fluid/operators/max_sequence_len_op.cc index 4cd7c89b48a2442ee7a5074abbf0f3dd9ea3bcb4..8e508b68eeab69a4595904dcc3ea0a541d9ab6e6 100644 --- a/paddle/fluid/operators/max_sequence_len_op.cc +++ b/paddle/fluid/operators/max_sequence_len_op.cc @@ -41,8 +41,7 @@ class MaxSeqenceLenOp : public framework::OperatorBase { class MaxSeqenceLenOpProtoMaker : public framework::OpProtoAndCheckerMaker { public: - MaxSeqenceLenOpProtoMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("RankTable", "The lod_rank_table."); AddOutput("Out", "The max sequence length."); AddComment( diff --git a/paddle/fluid/operators/maxout_op.cc b/paddle/fluid/operators/maxout_op.cc index e2bcba5a5e15d4d5f10ae4ae64b5262f750137ab..058115cb624627d81b31d0903f7d615d19708c77 100644 --- a/paddle/fluid/operators/maxout_op.cc +++ b/paddle/fluid/operators/maxout_op.cc @@ -22,8 +22,7 @@ using framework::Tensor; class MaxOutOpMaker : public framework::OpProtoAndCheckerMaker { public: - MaxOutOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput( "X", "(Tensor) The input tensor of maxout operator. " diff --git a/paddle/fluid/operators/mean_op.cc b/paddle/fluid/operators/mean_op.cc index a134796bfcaa9dea2483ace9f5045e257916daba..74477eb439dc202c3f5f17fdf3e1647bc5c23512 100644 --- a/paddle/fluid/operators/mean_op.cc +++ b/paddle/fluid/operators/mean_op.cc @@ -32,8 +32,7 @@ class MeanOp : public framework::OperatorWithKernel { class MeanOpMaker : public framework::OpProtoAndCheckerMaker { public: - MeanOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "The input of mean op"); AddOutput("Out", "The output of mean op"); AddComment(R"DOC( diff --git a/paddle/fluid/operators/merge_lod_tensor_op.cc b/paddle/fluid/operators/merge_lod_tensor_op.cc index 4ebf20cbba69bee09dfddb8e928ddc95665e4731..a16861b3b77fc980ab932b9d88859b38ec36108b 100644 --- a/paddle/fluid/operators/merge_lod_tensor_op.cc +++ b/paddle/fluid/operators/merge_lod_tensor_op.cc @@ -121,8 +121,7 @@ class MergeLoDTensorOp : public framework::OperatorBase { class MergeLoDTensorOpProtoMaker : public framework::OpProtoAndCheckerMaker { public: - MergeLoDTensorOpProtoMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "The input LoDTensor, contains complete lod information to " "construct the output"); diff --git a/paddle/fluid/operators/mine_hard_examples_op.cc b/paddle/fluid/operators/mine_hard_examples_op.cc index 277901cff493445e1e85e92e22ea0ada0e1cba43..d4a09bae3a98e4518f9885c1e9182f7033a0d262 100644 --- a/paddle/fluid/operators/mine_hard_examples_op.cc +++ b/paddle/fluid/operators/mine_hard_examples_op.cc @@ -253,8 +253,7 @@ class MineHardExamplesOp : public framework::OperatorWithKernel { class MineHardExamplesOpMaker : public framework::OpProtoAndCheckerMaker { public: - MineHardExamplesOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput( "ClsLoss", "(Tensor, default Tensor), The classification loss with shape " diff --git a/paddle/fluid/operators/minus_op.cc b/paddle/fluid/operators/minus_op.cc index a302b24560e680076d62d02b422c6410467deb1d..34571a38a14795a98ac8454cec606077727b5ffa 100644 --- a/paddle/fluid/operators/minus_op.cc +++ b/paddle/fluid/operators/minus_op.cc @@ -48,8 +48,7 @@ class MinusOp : public framework::OperatorWithKernel { class MinusOpMaker : public framework::OpProtoAndCheckerMaker { public: - MinusOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "The left tensor of minus operator."); AddInput("Y", "The right tensor of minus operator."); AddOutput("Out", "The output tensor of minus operator."); diff --git a/paddle/fluid/operators/modified_huber_loss_op.cc b/paddle/fluid/operators/modified_huber_loss_op.cc index 3a0fc74584391d0441105a8ac7d7ac292e10fb8d..35db4c1ad1f6c6481eca397e99fc8c1f0bc7164c 100644 --- a/paddle/fluid/operators/modified_huber_loss_op.cc +++ b/paddle/fluid/operators/modified_huber_loss_op.cc @@ -39,8 +39,7 @@ class ModifiedHuberLossOp : public framework::OperatorWithKernel { class ModifiedHuberLossOpMaker : public framework::OpProtoAndCheckerMaker { public: - ModifiedHuberLossOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "The input tensor of modified huber loss op. " "X is 2-D tensor with shape [batch_size, 1]."); diff --git a/paddle/fluid/operators/momentum_op.cc b/paddle/fluid/operators/momentum_op.cc index f13ec53905aa3d5b55b865c3514f36211c06a549..dcd73e3c3e40f80e07b73944d1f0cc57fea010d3 100644 --- a/paddle/fluid/operators/momentum_op.cc +++ b/paddle/fluid/operators/momentum_op.cc @@ -62,8 +62,7 @@ class MomentumOp : public framework::OperatorWithKernel { class MomentumOpMaker : public framework::OpProtoAndCheckerMaker { public: - MomentumOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("Param", "(Tensor, default Tensor) " "Input parameter that has to be updated"); diff --git a/paddle/fluid/operators/mul_op.cc b/paddle/fluid/operators/mul_op.cc index 6903cf83b41a54b54382fac2cf58f7bfe192b55f..a43739463c85b38e1dba04c6ec1bfcf4b6cbfa63 100644 --- a/paddle/fluid/operators/mul_op.cc +++ b/paddle/fluid/operators/mul_op.cc @@ -96,8 +96,7 @@ class MulOp : public framework::OperatorWithKernel { class MulOpMaker : public framework::OpProtoAndCheckerMaker { public: - MulOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "(Tensor), The first input tensor of mul op."); AddInput("Y", "(Tensor), The second input tensor of mul op."); AddOutput("Out", "(Tensor), The output tensor of mul op."); diff --git a/paddle/fluid/operators/multiclass_nms_op.cc b/paddle/fluid/operators/multiclass_nms_op.cc index a12b975326519c776c9f4a1d9f2894b4028c2440..60b93efdce810f8552374449fe5a6fc79b1a92c1 100644 --- a/paddle/fluid/operators/multiclass_nms_op.cc +++ b/paddle/fluid/operators/multiclass_nms_op.cc @@ -309,8 +309,7 @@ class MultiClassNMSKernel : public framework::OpKernel { class MultiClassNMSOpMaker : public framework::OpProtoAndCheckerMaker { public: - MultiClassNMSOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("BBoxes", "(Tensor) A 3-D Tensor with shape [N, M, 4] represents the " "predicted locations of M bounding bboxes, N is the batch size. " diff --git a/paddle/fluid/operators/multiplex_op.cc b/paddle/fluid/operators/multiplex_op.cc index b698c1bf8a05e053db07db34712a13c8074ee4d0..a4363fd25d57edb5c2509904a1f55634832613be 100644 --- a/paddle/fluid/operators/multiplex_op.cc +++ b/paddle/fluid/operators/multiplex_op.cc @@ -61,8 +61,7 @@ class MultiplexOp : public framework::OperatorWithKernel { class MultiplexOpMaker : public framework::OpProtoAndCheckerMaker { public: - MultiplexOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("Ids", "The index tensor of multiplex operator."); AddInput("X", "The candidate tensors of multiplex operator.") .AsDuplicable(); diff --git a/paddle/fluid/operators/nccl_op.cc b/paddle/fluid/operators/nccl_op.cc index 5e4ed886b10bd48bf991ce84a9099611cf5d1d26..0018139cb06fe0573565c920849843e674df6f4c 100644 --- a/paddle/fluid/operators/nccl_op.cc +++ b/paddle/fluid/operators/nccl_op.cc @@ -76,8 +76,7 @@ class NCCLInitOpShapeInference : public framework::InferShapeBase { class NCCLInitOpMaker : public framework::OpProtoAndCheckerMaker { public: - NCCLInitOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput(kParallelScopes, "The working place of parallel do."); AddOutput("Communicator", "Create Communicator for communicating between gpus"); @@ -118,8 +117,7 @@ class NCCLAllReduceOp : public framework::OperatorWithKernel { // AllReduceOp class NCCLAllReduceOpMaker : public framework::OpProtoAndCheckerMaker { public: - NCCLAllReduceOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "The input of AllReduce op"); AddInput("Communicator", "Communicator for communicating between gpus"); AddOutput("Out", "The output of AllReduce op"); @@ -165,8 +163,7 @@ class NCCLReduceOp : public framework::OperatorWithKernel { // ReduceOp class NCCLReduceOpMaker : public framework::OpProtoAndCheckerMaker { public: - NCCLReduceOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "The input of Reduce op"); AddInput("Communicator", "Communicator for communicating between gpus"); AddOutput("Out", "The output of Reduce op"); @@ -214,8 +211,7 @@ class NCCLBcastOp : public framework::OperatorWithKernel { // BcastOp class NCCLBcastOpMaker : public framework::OpProtoAndCheckerMaker { public: - NCCLBcastOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "The input of BcastSend op"); AddInput("Communicator", "Communicator for communicating between gpus"); AddOutput("Out", "The output of Bcast"); diff --git a/paddle/fluid/operators/nce_op.cc b/paddle/fluid/operators/nce_op.cc index 192bdf8ea553f3a82066f8562458d286ee15a6ee..06092e680a1efbef379ccf40fdf476769f820429 100644 --- a/paddle/fluid/operators/nce_op.cc +++ b/paddle/fluid/operators/nce_op.cc @@ -75,8 +75,7 @@ class NCEOp : public framework::OperatorWithKernel { class NCEOpMaker : public framework::OpProtoAndCheckerMaker { public: - NCEOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("Input", "(Tensor) A tensor of shape [batch_size, dim]."); AddInput( "Label", diff --git a/paddle/fluid/operators/norm_op.cc b/paddle/fluid/operators/norm_op.cc index 30a991224fa184257a8e59af5e6a27a0b0a4da86..cdbc975c02214721ceae3a338741101ef32d7ee9 100644 --- a/paddle/fluid/operators/norm_op.cc +++ b/paddle/fluid/operators/norm_op.cc @@ -19,8 +19,7 @@ namespace operators { template class NormOpMaker : public framework::OpProtoAndCheckerMaker { public: - NormOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput( "X", "(Tensor) The input tensor of norm operator. " diff --git a/paddle/fluid/operators/one_hot_op.cc b/paddle/fluid/operators/one_hot_op.cc index 1d42dfdd765166c9596abc08ce8abd534453bc63..4fcb1d69935175c3f643db7a4da04db34492f8fb 100644 --- a/paddle/fluid/operators/one_hot_op.cc +++ b/paddle/fluid/operators/one_hot_op.cc @@ -46,8 +46,7 @@ class OneHotOp : public framework::OperatorWithKernel { class OneHotOpMaker : public framework::OpProtoAndCheckerMaker { public: - OneHotOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "(LoDTensor, LoDTensor) Input variable with rank at least 2. " "The last dimension of X should be 1. Each value of X is an index " diff --git a/paddle/fluid/operators/pad_op.cc b/paddle/fluid/operators/pad_op.cc index d2a0106f80144e3550d73ea22f8e012426eb01ae..d4b631a6f5bf9332f4ed1d1a4bda529fbb6ada0a 100644 --- a/paddle/fluid/operators/pad_op.cc +++ b/paddle/fluid/operators/pad_op.cc @@ -48,8 +48,7 @@ class PadOp : public framework::OperatorWithKernel { class PadOpMaker : public framework::OpProtoAndCheckerMaker { public: - PadOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "The input of pad op. " "The input should be a k-D tensor(k > 0 and k < 7)"); diff --git a/paddle/fluid/operators/parallel_do_op.cc b/paddle/fluid/operators/parallel_do_op.cc index ae34fe2184b43cc104c14672dec30efd3b0e9f3b..1012640d5e2052e4f347ad458cea9072a004f334 100644 --- a/paddle/fluid/operators/parallel_do_op.cc +++ b/paddle/fluid/operators/parallel_do_op.cc @@ -196,8 +196,7 @@ class ParallelDoOp : public framework::OperatorBase { class ParallelDoOpProtoMaker : public framework::OpProtoAndCheckerMaker { public: - ParallelDoOpProtoMaker(OpProto *proto, framework::OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput(kInputs, "").AsDuplicable(); AddInput(kParameters, "").AsDuplicable(); AddInput(kPlaces, ""); diff --git a/paddle/fluid/operators/pool_op.cc b/paddle/fluid/operators/pool_op.cc index f2de075e0d82fc5bd0ac41b481ac80314f3857a3..f4fb2b132fe8d59cb50f5a1f7359240ac50445fe 100644 --- a/paddle/fluid/operators/pool_op.cc +++ b/paddle/fluid/operators/pool_op.cc @@ -135,8 +135,7 @@ framework::OpKernelType PoolOpGrad::GetExpectedKernelType( library_); } -Pool2dOpMaker::Pool2dOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { +void Pool2dOpMaker::Make() { AddInput( "X", "(Tensor) The input tensor of pooling operator. " @@ -229,8 +228,7 @@ Example: )DOC"); } -Pool3dOpMaker::Pool3dOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { +void Pool3dOpMaker::Make() { AddInput("X", "(Tensor) The input tensor of pooling operator. " "The format of input tensor is NCDHW, where N is batch size, C is " diff --git a/paddle/fluid/operators/pool_op.h b/paddle/fluid/operators/pool_op.h index a48127ea6983d3d4ea12ec4925f30af233002ef2..a63963ca926bb94ff99e5cfe6dbcb2b15075bcb8 100644 --- a/paddle/fluid/operators/pool_op.h +++ b/paddle/fluid/operators/pool_op.h @@ -50,12 +50,12 @@ class PoolOpGrad : public framework::OperatorWithKernel { class Pool2dOpMaker : public framework::OpProtoAndCheckerMaker { public: - Pool2dOpMaker(OpProto* proto, OpAttrChecker* op_checker); + void Make() override; }; class Pool3dOpMaker : public framework::OpProtoAndCheckerMaker { public: - Pool3dOpMaker(OpProto* proto, OpAttrChecker* op_checker); + void Make() override; }; template diff --git a/paddle/fluid/operators/pool_with_index_op.cc b/paddle/fluid/operators/pool_with_index_op.cc index 848cd61b23c2389d3fe11f585b256d55c1ff177f..873706593e4c856f0079738654a9e7e59a1c0cd8 100644 --- a/paddle/fluid/operators/pool_with_index_op.cc +++ b/paddle/fluid/operators/pool_with_index_op.cc @@ -100,8 +100,7 @@ class MaxPoolWithIndexOpGrad : public framework::OperatorWithKernel { class MaxPool2dWithIndexOpMaker : public framework::OpProtoAndCheckerMaker { public: - MaxPool2dWithIndexOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput( "X", "(Tensor) The input tensor of pooling operator. " @@ -177,8 +176,7 @@ Example: class MaxPool3dWithIndexOpMaker : public framework::OpProtoAndCheckerMaker { public: - MaxPool3dWithIndexOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "(Tensor) The input tensor of pooling operator. " "The format of input tensor is NCDHW, where N is batch size, C is " diff --git a/paddle/fluid/operators/positive_negative_pair_op.cc b/paddle/fluid/operators/positive_negative_pair_op.cc index d237da25a00de13057e009b6705d3241b8b26539..4d865b7f17b050ac6f04addc9949f3f65da06ded 100644 --- a/paddle/fluid/operators/positive_negative_pair_op.cc +++ b/paddle/fluid/operators/positive_negative_pair_op.cc @@ -95,8 +95,7 @@ class PositiveNegativePairOp : public framework::OperatorWithKernel { class PositiveNegativePairOpMaker : public framework::OpProtoAndCheckerMaker { public: - PositiveNegativePairOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("Score", "(Tensor, float) Model Score on an item (with " "respect to QueryID). It's a 2-D tensor with shape [batch_size, " diff --git a/paddle/fluid/operators/precision_recall_op.cc b/paddle/fluid/operators/precision_recall_op.cc index c34b0d072bdb2f5b97dd4615ff9338d98f2bfbe5..e7ce16f33fb5052ffb41fc05bd1538e2f0dc35be 100644 --- a/paddle/fluid/operators/precision_recall_op.cc +++ b/paddle/fluid/operators/precision_recall_op.cc @@ -90,8 +90,7 @@ class PrecisionRecallOp : public framework::OperatorWithKernel { class PrecisionRecallOpMaker : public framework::OpProtoAndCheckerMaker { public: - PrecisionRecallOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("MaxProbs", "(Tensor, default Tensor) A 2-D tensor with shape N x 1, " "where N is the batch size. Each row contains the max probability " diff --git a/paddle/fluid/operators/prefetch_op.cc b/paddle/fluid/operators/prefetch_op.cc index f9ae01ab5d2972d2a74b36ae6035985d1d874bb6..4cfea958e8e50156c90af8806414b043e15f8a9c 100644 --- a/paddle/fluid/operators/prefetch_op.cc +++ b/paddle/fluid/operators/prefetch_op.cc @@ -64,8 +64,7 @@ class PrefetchOp : public framework::OperatorBase { class PrefetchOpMaker : public framework::OpProtoAndCheckerMaker { public: - PrefetchOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() { AddInput("X", "(LoDTensor) Input Id variables to be sent").AsDuplicable(); AddOutput("RPCClient", "(RPCClient) The RPC client object which will be" diff --git a/paddle/fluid/operators/prelu_op.cc b/paddle/fluid/operators/prelu_op.cc index a066b3e06e5eca2661827425b5b2d0059d5bcc3c..db040509bc08c3f6ad031c5b97c93574e31337e0 100644 --- a/paddle/fluid/operators/prelu_op.cc +++ b/paddle/fluid/operators/prelu_op.cc @@ -38,8 +38,7 @@ class PReluOp : public framework::OperatorWithKernel { class PReluOpMaker : public framework::OpProtoAndCheckerMaker { public: - PReluOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "The input tensor of prelu operator."); AddInput("Alpha", "The alpha weight of prelu operator."); AddOutput("Out", "The output tensor of prelu operator."); diff --git a/paddle/fluid/operators/print_op.cc b/paddle/fluid/operators/print_op.cc index fafc7e54d7a44d6bb2dadf67135537dc16430e76..db7634918a5179a61304315ecd08350d23fb4642 100644 --- a/paddle/fluid/operators/print_op.cc +++ b/paddle/fluid/operators/print_op.cc @@ -209,8 +209,7 @@ class TensorPrintOp : public framework::OperatorBase { class PrintOpProtoAndCheckMaker : public framework::OpProtoAndCheckerMaker { public: - PrintOpProtoAndCheckMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("In", "Input tensor to be displayed."); AddAttr("first_n", "Only log `first_n` number of times."); AddAttr("message", "A string message to print as a prefix."); diff --git a/paddle/fluid/operators/prior_box_op.cc b/paddle/fluid/operators/prior_box_op.cc index 058b13eeb872aaa77a88da37db64a6d59fbdd1cf..a0b069da0dda59c769723211533df8c33511fe3f 100644 --- a/paddle/fluid/operators/prior_box_op.cc +++ b/paddle/fluid/operators/prior_box_op.cc @@ -79,8 +79,7 @@ class PriorBoxOp : public framework::OperatorWithKernel { class PriorBoxOpMaker : public framework::OpProtoAndCheckerMaker { public: - PriorBoxOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("Input", "(Tensor, default Tensor), " "the input feature data of PriorBoxOp, The layout is NCHW."); diff --git a/paddle/fluid/operators/proximal_adagrad_op.cc b/paddle/fluid/operators/proximal_adagrad_op.cc index e057244c1e974edea1b9bbc76c0585c295495299..8d8075d76111928ec9855eb0b70fe6dbd90a979b 100644 --- a/paddle/fluid/operators/proximal_adagrad_op.cc +++ b/paddle/fluid/operators/proximal_adagrad_op.cc @@ -66,8 +66,7 @@ class ProximalAdagradOp : public framework::OperatorWithKernel { class ProximalAdagradOpMaker : public framework::OpProtoAndCheckerMaker { public: - ProximalAdagradOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("Param", "(Tensor, default Tensor) " "Input parameter that has to be updated."); diff --git a/paddle/fluid/operators/proximal_gd_op.cc b/paddle/fluid/operators/proximal_gd_op.cc index ed1472631870e5aee6b0e8b8f80bb5e6c84a3851..baf9cbcba2ed89f62afc9816e0ab9e0f112e6008 100644 --- a/paddle/fluid/operators/proximal_gd_op.cc +++ b/paddle/fluid/operators/proximal_gd_op.cc @@ -54,8 +54,7 @@ class ProximalGDOp : public framework::OperatorWithKernel { class ProximalGDOpMaker : public framework::OpProtoAndCheckerMaker { public: - ProximalGDOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("Param", "(Tensor, default Tensor) " "Input parameter value that has to be updated."); diff --git a/paddle/fluid/operators/rank_loss_op.cc b/paddle/fluid/operators/rank_loss_op.cc index eb9ff8de3e4b37ef0bbf7477c1bb62856bdb6310..313cf01541dd88a0f4f8bf54fe4436984c2cbcf8 100644 --- a/paddle/fluid/operators/rank_loss_op.cc +++ b/paddle/fluid/operators/rank_loss_op.cc @@ -46,8 +46,7 @@ class RankLossOp : public framework::OperatorWithKernel { class RankLossOpMaker : public framework::OpProtoAndCheckerMaker { public: - RankLossOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("Label", "(2-D Tensor with shape [batch_size x 1]) " "The label indicating A ranked higher than B or not."); diff --git a/paddle/fluid/operators/read_op.cc b/paddle/fluid/operators/read_op.cc index bf02b9958927580608b95d6b8ecfddc7231a02d4..72a27d43584d55cd0859c63577ae85ff0f5fdfa8 100644 --- a/paddle/fluid/operators/read_op.cc +++ b/paddle/fluid/operators/read_op.cc @@ -79,8 +79,7 @@ class ReadOp : public framework::OperatorBase { class ReadOpMaker : public framework::OpProtoAndCheckerMaker { public: - ReadOpMaker(OpProto* op_proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(op_proto, op_checker) { + void Make() override { AddInput("Reader", "(ReaderHolder) The executed reader."); AddOutput("Out", "(LoDTensor) The output data.").AsDuplicable(); AddComment(R"DOC( diff --git a/paddle/fluid/operators/reader/create_batch_reader_op.cc b/paddle/fluid/operators/reader/create_batch_reader_op.cc index 04c5872bef4600e30ba572a025cc5f0a5e9839ca..4cc7cbc6e89b0712faf9ad9c51480bce00da15f5 100644 --- a/paddle/fluid/operators/reader/create_batch_reader_op.cc +++ b/paddle/fluid/operators/reader/create_batch_reader_op.cc @@ -52,9 +52,8 @@ class CreateBatchReaderOp : public framework::OperatorBase { }; class CreateBatchReaderOpMaker : public DecoratedReaderMakerBase { - public: - CreateBatchReaderOpMaker(OpProto* op_proto, OpAttrChecker* op_checker) - : DecoratedReaderMakerBase(op_proto, op_checker) { + protected: + void Apply() override { AddAttr("batch_size", "How many instances the batch reader yields each time.") .GreaterThan(0); diff --git a/paddle/fluid/operators/reader/create_double_buffer_reader_op.cc b/paddle/fluid/operators/reader/create_double_buffer_reader_op.cc index e5efac461512a9a1869318d6547233589ca45a77..bc830a2b72e657f79f4c94e24428d38ff2b7c42e 100644 --- a/paddle/fluid/operators/reader/create_double_buffer_reader_op.cc +++ b/paddle/fluid/operators/reader/create_double_buffer_reader_op.cc @@ -113,14 +113,13 @@ class CreateDoubleBufferReaderOp : public framework::OperatorBase { }; class CreateDoubleBufferReaderOpMaker : public DecoratedReaderMakerBase { - public: - CreateDoubleBufferReaderOpMaker(OpProto* op_proto, OpAttrChecker* op_checker) - : DecoratedReaderMakerBase(op_proto, op_checker) { + protected: + void Apply() override { AddComment(R"DOC( CreateDoubleBufferReader Operator A double buffer reader takes another reader as its 'underlying reader'. - It launches another thread to execute the 'underlying reader' asynchronously, + It launches another thread to execute the 'underlying reader' asynchronously, which prevents reading process from blocking subsequent training. )DOC"); std::unordered_set enum_range; diff --git a/paddle/fluid/operators/reader/create_multi_pass_reader_op.cc b/paddle/fluid/operators/reader/create_multi_pass_reader_op.cc index 0573345ba502b6a9af35710840d5acf7634f332f..249b0b7c6dbc8b8104bce95562e6e9b2a28c77f8 100644 --- a/paddle/fluid/operators/reader/create_multi_pass_reader_op.cc +++ b/paddle/fluid/operators/reader/create_multi_pass_reader_op.cc @@ -65,20 +65,19 @@ class CreateMultiPassReaderOp : public framework::OperatorBase { }; class CreateMultiPassReaderOpMaker : public DecoratedReaderMakerBase { - public: - CreateMultiPassReaderOpMaker(OpProto* op_proto, OpAttrChecker* op_checker) - : DecoratedReaderMakerBase(op_proto, op_checker) { + protected: + void Apply() override { AddAttr("pass_num", "The number of pass to run.").GreaterThan(0); AddComment(R"DOC( CreateMultiPassReader Operator - This operator creates a multi-pass reader. A multi-pass reader - is used to yield data for several pass training continuously. + This operator creates a multi-pass reader. A multi-pass reader + is used to yield data for several pass training continuously. It takes the number of passes to run as one of its attributes - ('pass_num'), and maintains a pass counter to record how many - passes it has completed. When the underlying reader reaches the - EOF, the multi-pass reader checks whether it has completed training - of the given number of pass. If not, the underlying reader will + ('pass_num'), and maintains a pass counter to record how many + passes it has completed. When the underlying reader reaches the + EOF, the multi-pass reader checks whether it has completed training + of the given number of pass. If not, the underlying reader will be re-initialized and starts a new pass automatically. )DOC"); } diff --git a/paddle/fluid/operators/reader/create_random_data_generator_op.cc b/paddle/fluid/operators/reader/create_random_data_generator_op.cc index d1cb8e47da70cab784858caea7e791151fc104dd..55bb9739e0239d31f63c3d8703bcf1d18bf459dc 100644 --- a/paddle/fluid/operators/reader/create_random_data_generator_op.cc +++ b/paddle/fluid/operators/reader/create_random_data_generator_op.cc @@ -84,9 +84,8 @@ class CreateRandomDataGeneratorOp : public framework::OperatorBase { }; class CreateRandomDataGeneratorOpMaker : public FileReaderMakerBase { - public: - CreateRandomDataGeneratorOpMaker(OpProto* op_proto, OpAttrChecker* op_checker) - : FileReaderMakerBase(op_proto, op_checker) { + protected: + void Apply() override { AddAttr("min", "The lower bound of reader's uniform distribution."); AddAttr("max", "The upper bound of reader's uniform distribution."); AddComment(R"DOC( diff --git a/paddle/fluid/operators/reader/create_recordio_file_reader_op.cc b/paddle/fluid/operators/reader/create_recordio_file_reader_op.cc index 2ae29725561769ebe6428002c9983246b8eec724..282ec3f36b98e7aa62d71fb04f72721a5464e21c 100644 --- a/paddle/fluid/operators/reader/create_recordio_file_reader_op.cc +++ b/paddle/fluid/operators/reader/create_recordio_file_reader_op.cc @@ -76,9 +76,8 @@ class CreateRecordIOReaderOp : public framework::OperatorBase { }; class CreateRecordIOReaderOpMaker : public FileReaderMakerBase { - public: - CreateRecordIOReaderOpMaker(OpProto* op_proto, OpAttrChecker* op_checker) - : FileReaderMakerBase(op_proto, op_checker) { + protected: + void Apply() override { AddAttr("filename", "The filename of record io reader"); AddComment(R"DOC( CreateRecordIOReader Operator diff --git a/paddle/fluid/operators/reader/create_shuffle_reader_op.cc b/paddle/fluid/operators/reader/create_shuffle_reader_op.cc index 13825d65913be95f4f444bd9d5271a036ec8b1e2..fd233be945932eee9f9a3c0c578a43d5b7cc83aa 100644 --- a/paddle/fluid/operators/reader/create_shuffle_reader_op.cc +++ b/paddle/fluid/operators/reader/create_shuffle_reader_op.cc @@ -92,9 +92,8 @@ class CreateShuffleReaderOp : public framework::OperatorBase { }; class CreateShuffleReaderOpMaker : public DecoratedReaderMakerBase { - public: - CreateShuffleReaderOpMaker(OpProto* op_proto, OpAttrChecker* op_checker) - : DecoratedReaderMakerBase(op_proto, op_checker) { + protected: + void Apply() override { AddAttr("buffer_size", "The shuffle buffer size.").GreaterThan(0); AddComment(R"DOC( CreateShuffleReader Operator diff --git a/paddle/fluid/operators/reader/create_threaded_reader_op.cc b/paddle/fluid/operators/reader/create_threaded_reader_op.cc index 1cb9bd36455a2287b8ba4fb4ca14a4c5338da098..1db70f3e9699dba604569c36dc35025dfe2c94fe 100644 --- a/paddle/fluid/operators/reader/create_threaded_reader_op.cc +++ b/paddle/fluid/operators/reader/create_threaded_reader_op.cc @@ -53,17 +53,16 @@ class CreateThreadedReaderOp : public framework::OperatorBase { }; class CreateThreadedReaderOpMaker : public DecoratedReaderMakerBase { - public: - CreateThreadedReaderOpMaker(OpProto* op_proto, OpAttrChecker* op_checker) - : DecoratedReaderMakerBase(op_proto, op_checker) { + protected: + void Apply() override { AddComment(R"DOC( CreateThreadedReader Operator - This operator creates a threaded reader. A threaded reader's - 'ReadNext()' can be invoked by several threads at the same - time. - When the attribute 'safe_mode' is true, the threaded reader's - 'ReInit()' is disabled to avoid unexpected bugs in multi-thread + This operator creates a threaded reader. A threaded reader's + 'ReadNext()' can be invoked by several threads at the same + time. + When the attribute 'safe_mode' is true, the threaded reader's + 'ReInit()' is disabled to avoid unexpected bugs in multi-thread environment. )DOC"); } diff --git a/paddle/fluid/operators/reader/open_files_op.cc b/paddle/fluid/operators/reader/open_files_op.cc index 91ad7d56583446ee4686e74187de166f387125df..8c0dac65dd691954b112bfa61622d399b2b9c3e5 100644 --- a/paddle/fluid/operators/reader/open_files_op.cc +++ b/paddle/fluid/operators/reader/open_files_op.cc @@ -185,9 +185,8 @@ class OpenFilesOp : public framework::OperatorBase { }; class OpenFilesOpMaker : public FileReaderMakerBase { - public: - OpenFilesOpMaker(OpProto* op_proto, OpAttrChecker* op_checker) - : FileReaderMakerBase(op_proto, op_checker) { + protected: + void Apply() override { AddAttr>("file_names", "Files to be read."); AddAttr("thread_num", "The maximal concurrent prefetch thread number.") .GreaterThan(0); @@ -196,7 +195,7 @@ class OpenFilesOpMaker : public FileReaderMakerBase { AddComment(R"DOC( OpenFiles Operator - An OpenFilesOp creates a MultiFileReader, which is able to + An OpenFilesOp creates a MultiFileReader, which is able to read data multi-threaded from multiple files. )DOC"); } diff --git a/paddle/fluid/operators/reader/reader_op_registry.cc b/paddle/fluid/operators/reader/reader_op_registry.cc index 3ff4536819b128d9c593b97f4942a0292a3b6b36..11f1ddebc48134158315ea70a2d2b9e07f2e2469 100644 --- a/paddle/fluid/operators/reader/reader_op_registry.cc +++ b/paddle/fluid/operators/reader/reader_op_registry.cc @@ -53,10 +53,7 @@ std::unique_ptr CreateReaderByFileName( return std::unique_ptr(reader); } -FileReaderMakerBase::FileReaderMakerBase( - framework::OpProtoAndCheckerMaker::OpProto* op_proto, - framework::OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(op_proto, op_checker) { +void FileReaderMakerBase::Make() { AddOutput("Out", "(ReaderHolder) The created random reader.").AsDuplicable(); AddAttr>("shape_concat", "The concat of all data's shapes."); AddAttr>( @@ -68,6 +65,7 @@ FileReaderMakerBase::FileReaderMakerBase( "It means the reader will generate two data each time," "whose shapes are [2,3,4] and [5,6] respectively."); AddAttr>("lod_levels", "The LoD levels of each data."); + Apply(); } void FileReaderInferShape::operator()(framework::InferShapeContext* ctx) const { @@ -127,13 +125,11 @@ void DecoratedReaderInferVarType::operator()( out_reader->SetDataTypes(in_reader->GetDataTypes()); } -DecoratedReaderMakerBase::DecoratedReaderMakerBase( - framework::OpProtoAndCheckerMaker::OpProto* op_proto, - framework::OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(op_proto, op_checker) { +void DecoratedReaderMakerBase::Make() { AddInput("UnderlyingReader", "(ReaderHolder) The underlying reader for creating a batch reader."); AddOutput("Out", "(ReaderHolder) The created batch reader."); + Apply(); } } // namespace reader diff --git a/paddle/fluid/operators/reader/reader_op_registry.h b/paddle/fluid/operators/reader/reader_op_registry.h index ec25f55ef5c3bb691b1213328b996c080656bb7b..244bf15f068a47efc29ee54492cdbdeb10025020 100644 --- a/paddle/fluid/operators/reader/reader_op_registry.h +++ b/paddle/fluid/operators/reader/reader_op_registry.h @@ -47,7 +47,10 @@ extern std::vector RestoreShapes( class FileReaderMakerBase : public framework::OpProtoAndCheckerMaker { public: - FileReaderMakerBase(OpProto* op_proto, OpAttrChecker* op_checker); + void Make() final; + + protected: + virtual void Apply() = 0; }; class FileReaderInferShape : public framework::InferShapeBase { @@ -76,7 +79,10 @@ class DecoratedReaderInferVarType : public framework::VarTypeInference { class DecoratedReaderMakerBase : public framework::OpProtoAndCheckerMaker { public: - DecoratedReaderMakerBase(OpProto* op_proto, OpAttrChecker* op_checker); + void Make() final; + + protected: + virtual void Apply() = 0; }; } // namespace reader diff --git a/paddle/fluid/operators/recurrent_op.cc b/paddle/fluid/operators/recurrent_op.cc index 72c2905872c528a7ed05820744f4031799ad9e46..9c1cee7022a9b9a98f026f7602f0f7badc44a49b 100644 --- a/paddle/fluid/operators/recurrent_op.cc +++ b/paddle/fluid/operators/recurrent_op.cc @@ -508,8 +508,7 @@ class RecurrentGradOp : public RecurrentBase { class RecurrentOpProtoMaker : public framework::OpProtoAndCheckerMaker { public: - RecurrentOpProtoMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput(kInputs, "rnn inputs").AsDuplicable(); AddInput(kInitialStates, "rnn initial states").AsDuplicable(); AddInput(kParameters, diff --git a/paddle/fluid/operators/recv_op.cc b/paddle/fluid/operators/recv_op.cc index a4dcf704a63ae3bad6567ddb042ea23513bccff7..7148bd0e363a71b58581a6c3c5f245d98d5b9d02 100644 --- a/paddle/fluid/operators/recv_op.cc +++ b/paddle/fluid/operators/recv_op.cc @@ -53,8 +53,7 @@ class RecvOp : public framework::OperatorBase { class RecvOpMaker : public framework::OpProtoAndCheckerMaker { public: - RecvOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() { AddOutput("Out", "(Tensor) Variables to get from server.").AsDuplicable(); AddComment(R"DOC( Recv operator diff --git a/paddle/fluid/operators/reduce_op.cc b/paddle/fluid/operators/reduce_op.cc index 093db966472cf100b2f1e4159ce20399cee1f481..eb8c21179db690e20db29c21892fd6258dd75579 100644 --- a/paddle/fluid/operators/reduce_op.cc +++ b/paddle/fluid/operators/reduce_op.cc @@ -90,8 +90,7 @@ class ReduceGradOp : public framework::OperatorWithKernel { class ReduceOpMaker : public framework::OpProtoAndCheckerMaker { public: - ReduceOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() final { AddInput("X", "(Tensor) The input tensor. Tensors with rank at most 6 are " "supported."); @@ -111,78 +110,20 @@ class ReduceOpMaker : public framework::OpProtoAndCheckerMaker { "(bool, default false) " "If true, output a scalar reduced along all dimensions.") .SetDefault(false); - comment_ = R"DOC( -{ReduceOp} Operator. + AddComment(string::Sprintf(R"DOC( +%s Operator. -This operator computes the {reduce} of input tensor along the given dimension. +This operator computes the %s of input tensor along the given dimension. The result tensor has 1 fewer dimension than the input unless keep_dim is true. If reduce_all is true, just reduce along all dimensions and output a scalar. -)DOC"; - AddComment(comment_); +)DOC", + GetOpType(), GetName())); } protected: - std::string comment_; - - void Replace(std::string *src, std::string from, std::string to) { - std::size_t len_from = std::strlen(from.c_str()); - std::size_t len_to = std::strlen(to.c_str()); - for (std::size_t pos = src->find(from); pos != std::string::npos; - pos = src->find(from, pos + len_to)) { - src->replace(pos, len_from, to); - } - } - - void SetComment(std::string name, std::string op) { - Replace(&comment_, "{ReduceOp}", name); - Replace(&comment_, "{reduce}", op); - } -}; - -class ReduceSumOpMaker : public ReduceOpMaker { - public: - ReduceSumOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : ReduceOpMaker(proto, op_checker) { - SetComment("ReduceSum", "sum"); - AddComment(comment_); - } -}; - -class ReduceMeanOpMaker : public ReduceOpMaker { - public: - ReduceMeanOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : ReduceOpMaker(proto, op_checker) { - SetComment("ReduceMean", "mean"); - AddComment(comment_); - } -}; - -class ReduceMaxOpMaker : public ReduceOpMaker { - public: - ReduceMaxOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : ReduceOpMaker(proto, op_checker) { - SetComment("ReduceMax", "max"); - AddComment(comment_); - } -}; - -class ReduceMinOpMaker : public ReduceOpMaker { - public: - ReduceMinOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : ReduceOpMaker(proto, op_checker) { - SetComment("ReduceMin", "min"); - AddComment(comment_); - } -}; - -class ReduceProdOpMaker : public ReduceOpMaker { - public: - ReduceProdOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : ReduceOpMaker(proto, op_checker) { - SetComment("ReduceProd", "production"); - AddComment(comment_); - } + virtual std::string GetName() const = 0; + virtual std::string GetOpType() const = 0; }; } // namespace operators @@ -190,25 +131,21 @@ class ReduceProdOpMaker : public ReduceOpMaker { namespace ops = paddle::operators; -REGISTER_OPERATOR(reduce_sum, ops::ReduceOp, ops::ReduceSumOpMaker, - paddle::framework::DefaultGradOpDescMaker); -REGISTER_OPERATOR(reduce_sum_grad, ops::ReduceGradOp); - -REGISTER_OPERATOR(reduce_mean, ops::ReduceOp, ops::ReduceMeanOpMaker, - paddle::framework::DefaultGradOpDescMaker); -REGISTER_OPERATOR(reduce_mean_grad, ops::ReduceGradOp); - -REGISTER_OPERATOR(reduce_max, ops::ReduceOp, ops::ReduceMaxOpMaker, - paddle::framework::DefaultGradOpDescMaker); -REGISTER_OPERATOR(reduce_max_grad, ops::ReduceGradOp); - -REGISTER_OPERATOR(reduce_min, ops::ReduceOp, ops::ReduceMinOpMaker, - paddle::framework::DefaultGradOpDescMaker); -REGISTER_OPERATOR(reduce_min_grad, ops::ReduceGradOp); - -REGISTER_OPERATOR(reduce_prod, ops::ReduceOp, ops::ReduceProdOpMaker, - paddle::framework::DefaultGradOpDescMaker); -REGISTER_OPERATOR(reduce_prod_grad, ops::ReduceGradOp); +#define REGISTER_REDUCE_OP(op_name) \ + class __##op_name##Maker__ : public ops::ReduceOpMaker { \ + protected: \ + virtual std::string GetName() const { return #op_name; } \ + virtual std::string GetOpType() const { return "Reduce " #op_name; } \ + }; \ + REGISTER_OPERATOR(reduce_##op_name, ops::ReduceOp, __##op_name##Maker__, \ + paddle::framework::DefaultGradOpDescMaker); \ + REGISTER_OPERATOR(reduce_##op_name##_grad, ops::ReduceGradOp) + +REGISTER_REDUCE_OP(sum); +REGISTER_REDUCE_OP(mean); +REGISTER_REDUCE_OP(max); +REGISTER_REDUCE_OP(min); +REGISTER_REDUCE_OP(prod); #define REGISTER_REDUCE_CPU_KERNEL(reduce_type, functor, grad_functor) \ REGISTER_OP_CPU_KERNEL(reduce_type, \ diff --git a/paddle/fluid/operators/reorder_lod_tensor_by_rank_op.cc b/paddle/fluid/operators/reorder_lod_tensor_by_rank_op.cc index 5c3e1f5678df0270c837ed407d1e6cc662276880..e4f4fe358e0e8cd2080525227f14a3d40f3c1411 100644 --- a/paddle/fluid/operators/reorder_lod_tensor_by_rank_op.cc +++ b/paddle/fluid/operators/reorder_lod_tensor_by_rank_op.cc @@ -23,9 +23,7 @@ namespace operators { class ReorderLoDTensorByRankTableOpProtoMaker : public framework::OpProtoAndCheckerMaker { public: - ReorderLoDTensorByRankTableOpProtoMaker(OpProto *proto, - OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "(LoDTensor), the input lod tensor to be reordered according to " "Input(RankTable)."); diff --git a/paddle/fluid/operators/reshape_op.cc b/paddle/fluid/operators/reshape_op.cc index 5e5ccc3ded95d57dfed37c1ac9c7eae61d36b8c0..7f743f577fbcdaf6f62e01031e25ef09a842c2e9 100644 --- a/paddle/fluid/operators/reshape_op.cc +++ b/paddle/fluid/operators/reshape_op.cc @@ -22,8 +22,7 @@ namespace operators { class ReshapeOpMaker : public framework::OpProtoAndCheckerMaker { public: - ReshapeOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "(Tensor). The input tensor of reshape operator."); AddInput("Shape", "(Tensor, optional). If provided, reshape according to " diff --git a/paddle/fluid/operators/rmsprop_op.cc b/paddle/fluid/operators/rmsprop_op.cc index a8855b3ccd1686c75953e762ce42cc27b26202e6..919ebe48ca38040274bd2052b95ef96eccff4db6 100644 --- a/paddle/fluid/operators/rmsprop_op.cc +++ b/paddle/fluid/operators/rmsprop_op.cc @@ -63,8 +63,7 @@ class RmspropOp : public framework::OperatorWithKernel { class RmspropOpMaker : public framework::OpProtoAndCheckerMaker { public: - RmspropOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("Param", "(Tensor, default Tensor) " "Input parameter value that has to be updated."); diff --git a/paddle/fluid/operators/rnn_memory_helper_op.cc b/paddle/fluid/operators/rnn_memory_helper_op.cc index 70f205d887ef710aeed02905713200ce32988987..23e5fc1112d0b1e634d0ab288721cbba57b3ffe5 100644 --- a/paddle/fluid/operators/rnn_memory_helper_op.cc +++ b/paddle/fluid/operators/rnn_memory_helper_op.cc @@ -59,8 +59,7 @@ class RNNMemoryHelperOpShapeInference : public framework::InferShapeBase { class RNNMemoryHelperOpInfoMaker : public framework::OpProtoAndCheckerMaker { public: - RNNMemoryHelperOpInfoMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", ""); AddOutput("Out", ""); AddAttr("dtype", @@ -117,8 +116,7 @@ class RNNMemoryHelperGradOp : public framework::OperatorBase { class RNNMemoryHelperGradOpInfoMaker : public framework::OpProtoAndCheckerMaker { public: - RNNMemoryHelperGradOpInfoMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput(framework::GradVarName("Out"), ""); AddInput("X", ""); AddInput("Out", ""); diff --git a/paddle/fluid/operators/roi_pool_op.cc b/paddle/fluid/operators/roi_pool_op.cc index 397e49ef20ac45515a852f466d693f358ef5461b..293abb0ea4f1ac03c3889ce2937ef8fa0845db73 100644 --- a/paddle/fluid/operators/roi_pool_op.cc +++ b/paddle/fluid/operators/roi_pool_op.cc @@ -98,8 +98,7 @@ class ROIPoolGradOp : public framework::OperatorWithKernel { class ROIPoolOpMaker : public framework::OpProtoAndCheckerMaker { public: - ROIPoolOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "(Tensor), " "the input of ROIPoolOp. " diff --git a/paddle/fluid/operators/row_conv_op.cc b/paddle/fluid/operators/row_conv_op.cc index 23f720da0b68cd2fd4c9b51182bf82f72078a906..20f140f962c3aac364a1239a663d5f340bbeb6b2 100644 --- a/paddle/fluid/operators/row_conv_op.cc +++ b/paddle/fluid/operators/row_conv_op.cc @@ -76,8 +76,7 @@ class RowConvGradOp : public framework::OperatorWithKernel { class RowConvOpMaker : public framework::OpProtoAndCheckerMaker { public: - RowConvOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : framework::OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "(LoDTensor), the input(X) is a LodTensor, which supports " "variable time-length input sequences. The underlying tensor " diff --git a/paddle/fluid/operators/save_combine_op.cc b/paddle/fluid/operators/save_combine_op.cc index 94703393bfa53124d16e34ae4373773eece5f11f..cfee9207083b46f7c27354f22e82a7d3c38a027c 100644 --- a/paddle/fluid/operators/save_combine_op.cc +++ b/paddle/fluid/operators/save_combine_op.cc @@ -18,6 +18,7 @@ limitations under the License. */ #include #include #include "paddle/fluid/framework/data_type.h" +#include "paddle/fluid/framework/data_type_transform.h" #include "paddle/fluid/framework/framework.pb.h" #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/op_registry.h" @@ -69,6 +70,7 @@ class SaveCombineOp : public framework::OperatorBase { const platform::Place &place) const override { auto filename = Attr("file_path"); auto overwrite = Attr("overwrite"); + auto save_as_fp16 = Attr("save_as_fp16"); bool is_present = FileExists(filename); if (is_present && !overwrite) { @@ -100,8 +102,24 @@ class SaveCombineOp : public framework::OperatorBase { inp_var_names[i]); auto &tensor = var->Get(); - // Serialize tensor - framework::SerializeToStream(fout, tensor, dev_ctx); + // Serialize tensors one by one + + // Check types to see if a fp16 transformation is required + auto in_dtype = framework::ToDataType(tensor.type()); + auto out_dtype = + save_as_fp16 ? framework::proto::VarType::FP16 : in_dtype; + + if (in_dtype != out_dtype) { + auto in_kernel_type = framework::OpKernelType(in_dtype, place); + auto out_kernel_type = framework::OpKernelType(out_dtype, place); + framework::LoDTensor out; + // copy LoD info to the new tensor + out.set_lod(tensor.lod()); + framework::TransDataType(in_kernel_type, out_kernel_type, tensor, &out); + framework::SerializeToStream(fout, out, dev_ctx); + } else { + framework::SerializeToStream(fout, tensor, dev_ctx); + } } fout.close(); } @@ -109,8 +127,7 @@ class SaveCombineOp : public framework::OperatorBase { class SaveCombineOpProtoMaker : public framework::OpProtoAndCheckerMaker { public: - SaveCombineOpProtoMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput( "X", "(vector) Input LoDTensors that need to be saved together in a file.") @@ -125,6 +142,12 @@ to a file on disk. "(boolean, default true)" "Overwrite the output file if it exists.") .SetDefault(true); + AddAttr("save_as_fp16", + "(boolean, default false)" + "If true, the tensor will be converted to float16 data " + "type and then saved. Otherwise, the tensor will be " + "directly saved without data type conversion.") + .SetDefault(false); AddAttr( "file_path", "(string)" diff --git a/paddle/fluid/operators/save_load_combine_op_test.cc b/paddle/fluid/operators/save_load_combine_op_test.cc index 2773c32a0a10269e28c24e12527711e3c5b8f869..47618c51d98eb9f58988f82c0aee0083565d81a6 100644 --- a/paddle/fluid/operators/save_load_combine_op_test.cc +++ b/paddle/fluid/operators/save_load_combine_op_test.cc @@ -17,15 +17,17 @@ limitations under the License. */ #include #include "gtest/gtest.h" #include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/platform/float16.h" USE_NO_KERNEL_OP(save_combine); USE_NO_KERNEL_OP(load_combine); -int* CreateForSaveCombineOp(int x, int y, const std::vector& lod_info, - std::string var_name, - const paddle::platform::CPUPlace& place, - paddle::framework::Scope* scope, - paddle::framework::LoD* expect_lod) { +template +T* CreateForSaveCombineOp(int x, int y, const std::vector& lod_info, + std::string var_name, + const paddle::platform::CPUPlace& place, + paddle::framework::Scope* scope, + paddle::framework::LoD* expect_lod) { auto var = scope->Var(var_name); auto tensor = var->GetMutable(); tensor->Resize({x, y}); @@ -34,9 +36,10 @@ int* CreateForSaveCombineOp(int x, int y, const std::vector& lod_info, (*expect_lod)[0].push_back(lod_info[i]); } tensor->set_lod(*expect_lod); - int* expect = tensor->mutable_data(place); + T* expect = tensor->mutable_data(place); for (int64_t i = 0; i < tensor->numel(); ++i) { - expect[i] = static_cast(i); + expect[i] = static_cast( + static_cast(i)); // For FP16, we intend to do float(float16(i)) } return expect; } @@ -48,18 +51,20 @@ paddle::framework::LoDTensor* GeneratePlaceholderBeforeLoad( return target; } -int* GetValuesAfterLoadCombineOp(paddle::framework::LoDTensor* target, - const paddle::framework::Scope& scope, - paddle::framework::LoD* actual_lod) { - int* actual = target->data(); +template +T* GetValuesAfterLoadCombineOp(paddle::framework::LoDTensor* target, + const paddle::framework::Scope& scope, + paddle::framework::LoD* actual_lod) { + T* actual = target->data(); *actual_lod = target->lod(); return actual; } -void CheckValues(int* expect, int* actual, paddle::framework::LoD expect_lod, - paddle::framework::LoD actual_lod, const int& numel) { - for (int64_t i = 0; i < numel; ++i) { - EXPECT_EQ(expect[i], actual[i]); +template +void CheckValues(T* expect, U* actual, const paddle::framework::LoD& expect_lod, + const paddle::framework::LoD& actual_lod, const int& numel) { + for (int i = 0; i < numel; ++i) { + EXPECT_EQ(expect[i], static_cast(actual[i])); } EXPECT_EQ(expect_lod.size(), actual_lod.size()); for (size_t i = 0; i < expect_lod.size(); ++i) { @@ -78,26 +83,26 @@ TEST(SaveLoadCombineOp, CPU) { std::vector lod1 = {0, 1, 2, 3, 10}; int numel1 = 100; paddle::framework::LoD expect_lod1; - int* expect1 = CreateForSaveCombineOp(10, 10, lod1, "test_var1", place, - &scope, &expect_lod1); + int* expect1 = CreateForSaveCombineOp(10, 10, lod1, "test_var1", + place, &scope, &expect_lod1); std::vector lod2 = {0, 2, 5, 10}; int numel2 = 200; paddle::framework::LoD expect_lod2; - int* expect2 = CreateForSaveCombineOp(10, 20, lod2, "test_var2", place, - &scope, &expect_lod2); + int* expect2 = CreateForSaveCombineOp(10, 20, lod2, "test_var2", + place, &scope, &expect_lod2); std::vector lod3 = {0, 2, 3, 20}; int numel3 = 4000; paddle::framework::LoD expect_lod3; - int* expect3 = CreateForSaveCombineOp(20, 200, lod3, "test_var3", place, - &scope, &expect_lod3); + int* expect3 = CreateForSaveCombineOp(20, 200, lod3, "test_var3", + place, &scope, &expect_lod3); std::vector lod4 = {0, 1, 20}; int numel4 = 1000; paddle::framework::LoD expect_lod4; - int* expect4 = CreateForSaveCombineOp(20, 50, lod4, "test_var4", place, - &scope, &expect_lod4); + int* expect4 = CreateForSaveCombineOp(20, 50, lod4, "test_var4", + place, &scope, &expect_lod4); // Set attributes std::string filename = "check_tensor.ls"; @@ -123,15 +128,92 @@ TEST(SaveLoadCombineOp, CPU) { load_combine_op->Run(scope, place); paddle::framework::LoD actual_lod1, actual_lod2, actual_lod3, actual_lod4; - int* actual1 = GetValuesAfterLoadCombineOp(target1, scope, &actual_lod1); - int* actual2 = GetValuesAfterLoadCombineOp(target2, scope, &actual_lod2); - int* actual3 = GetValuesAfterLoadCombineOp(target3, scope, &actual_lod3); - int* actual4 = GetValuesAfterLoadCombineOp(target4, scope, &actual_lod4); - - CheckValues(expect1, actual1, expect_lod1, actual_lod1, numel1); - CheckValues(expect2, actual2, expect_lod2, actual_lod2, numel2); - CheckValues(expect3, actual3, expect_lod3, actual_lod3, numel3); - CheckValues(expect4, actual4, expect_lod4, actual_lod4, numel4); + int* actual1 = GetValuesAfterLoadCombineOp(target1, scope, &actual_lod1); + int* actual2 = GetValuesAfterLoadCombineOp(target2, scope, &actual_lod2); + int* actual3 = GetValuesAfterLoadCombineOp(target3, scope, &actual_lod3); + int* actual4 = GetValuesAfterLoadCombineOp(target4, scope, &actual_lod4); + + CheckValues(expect1, actual1, expect_lod1, actual_lod1, numel1); + CheckValues(expect2, actual2, expect_lod2, actual_lod2, numel2); + CheckValues(expect3, actual3, expect_lod3, actual_lod3, numel3); + CheckValues(expect4, actual4, expect_lod4, actual_lod4, numel4); +} + +// FP16 version of SaveLoadCombineOp Test +TEST(SaveLoadCombineFP16Op, CPU) { + paddle::framework::Scope scope; + paddle::platform::CPUPlace place; + + std::vector lod1 = {0, 1, 2, 3, 10}; + int numel1 = 100; + paddle::framework::LoD expect_lod1; + float* expect1 = CreateForSaveCombineOp( + 10, 10, lod1, "test_var1", place, &scope, &expect_lod1); + + std::vector lod2 = {0, 2, 5, 10}; + int numel2 = 200; + paddle::framework::LoD expect_lod2; + float* expect2 = CreateForSaveCombineOp( + 10, 20, lod2, "test_var2", place, &scope, &expect_lod2); + + std::vector lod3 = {0, 20}; + int numel3 = 4000; + paddle::framework::LoD expect_lod3; + float* expect3 = CreateForSaveCombineOp( + 20, 200, lod3, "test_var3", place, &scope, &expect_lod3); + + std::vector lod4 = {0, 1, 20}; + int numel4 = 1000; + paddle::framework::LoD expect_lod4; + float* expect4 = CreateForSaveCombineOp( + 20, 50, lod4, "test_var4", place, &scope, &expect_lod4); + + // Set attributes + std::string filename = "check_tensor_fp16.ls"; + paddle::framework::AttributeMap attrs; + attrs.insert({"file_path", std::string(filename)}); + attrs.insert({"save_as_fp16", true}); + + // Run the save_combine_op + auto save_combine_op = paddle::framework::OpRegistry::CreateOp( + "save_combine", + {{"X", {"test_var1", "test_var2", "test_var3", "test_var4"}}}, {}, attrs); + save_combine_op->Run(scope, place); + + // Set up output vars + auto target1 = GeneratePlaceholderBeforeLoad("out_var1", &scope); + auto target2 = GeneratePlaceholderBeforeLoad("out_var2", &scope); + auto target3 = GeneratePlaceholderBeforeLoad("out_var3", &scope); + auto target4 = GeneratePlaceholderBeforeLoad("out_var4", &scope); + + // Run the load_combine_op + auto load_combine_op = paddle::framework::OpRegistry::CreateOp( + "load_combine", {}, + {{"Out", {"out_var1", "out_var2", "out_var3", "out_var4"}}}, attrs); + load_combine_op->Run(scope, place); + + paddle::framework::LoD actual_lod1, actual_lod2, actual_lod3, actual_lod4; + paddle::platform::float16* actual1 = + GetValuesAfterLoadCombineOp(target1, scope, + &actual_lod1); + paddle::platform::float16* actual2 = + GetValuesAfterLoadCombineOp(target2, scope, + &actual_lod2); + paddle::platform::float16* actual3 = + GetValuesAfterLoadCombineOp(target3, scope, + &actual_lod3); + paddle::platform::float16* actual4 = + GetValuesAfterLoadCombineOp(target4, scope, + &actual_lod4); + + CheckValues(expect1, actual1, expect_lod1, + actual_lod1, numel1); + CheckValues(expect2, actual2, expect_lod2, + actual_lod2, numel2); + CheckValues(expect3, actual3, expect_lod3, + actual_lod3, numel3); + CheckValues(expect4, actual4, expect_lod4, + actual_lod4, numel4); } // Test with original SaveLoadTest @@ -141,7 +223,7 @@ TEST(SaveLoadTestWithCombineOp, CPU) { auto var = scope.Var("test_var"); auto tensor = var->GetMutable(); - tensor->Resize({3, 10}); + tensor->Resize({3, 4000}); paddle::framework::LoD expect_lod; expect_lod.resize(1); expect_lod[0].push_back(0); diff --git a/paddle/fluid/operators/save_load_op_test.cc b/paddle/fluid/operators/save_load_op_test.cc index 8d5c17490c08075723a1180d9b16e6000fc5a779..c4fcc61af4b75e6dc7d5c31e20c5fff358637af5 100644 --- a/paddle/fluid/operators/save_load_op_test.cc +++ b/paddle/fluid/operators/save_load_op_test.cc @@ -70,7 +70,14 @@ TEST(SaveFP16Op, CPU) { auto var = scope.Var("test_var"); auto tensor = var->GetMutable(); tensor->Resize({3, 10}); + paddle::framework::LoD expect_lod; + expect_lod.resize(1); + expect_lod[0].push_back(0); + expect_lod[0].push_back(1); + expect_lod[0].push_back(2); + expect_lod[0].push_back(3); + tensor->set_lod(expect_lod); float* expect = tensor->mutable_data(place); for (int64_t i = 0; i < tensor->numel(); ++i) { expect[i] = static_cast(paddle::platform::float16(i)); @@ -93,6 +100,13 @@ TEST(SaveFP16Op, CPU) { for (int64_t i = 0; i < tensor->numel(); ++i) { EXPECT_EQ(expect[i], static_cast(actual[i])); } + auto& actual_lod = target->lod(); + EXPECT_EQ(expect_lod.size(), actual_lod.size()); + for (size_t i = 0; i < expect_lod.size(); ++i) { + for (size_t j = 0; j < expect_lod[i].size(); ++j) { + EXPECT_EQ(expect_lod[i][j], actual_lod[i][j]); + } + } } TEST(LoadFP16Op, CPU) { diff --git a/paddle/fluid/operators/save_op.cc b/paddle/fluid/operators/save_op.cc index dcc1b9ec204e9e273b8fd2b12f2423fc989ba502..e6d27e2dedd7668b93bd8ddc330a897d1c6fa732 100644 --- a/paddle/fluid/operators/save_op.cc +++ b/paddle/fluid/operators/save_op.cc @@ -117,8 +117,7 @@ class SaveOp : public framework::OperatorBase { class SaveOpProtoMaker : public framework::OpProtoAndCheckerMaker { public: - SaveOpProtoMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "(Tensor ) Input tensor to be saved"); AddComment(R"DOC( Save operator diff --git a/paddle/fluid/operators/scale_op.cc b/paddle/fluid/operators/scale_op.cc index 7dcf33c989c3bcd905da8017ee36ec8ce8032911..4687e21e7155fc7309fb28c881c0d47152df9ad5 100644 --- a/paddle/fluid/operators/scale_op.cc +++ b/paddle/fluid/operators/scale_op.cc @@ -37,8 +37,7 @@ class ScaleOp : public framework::OperatorWithKernel { class ScaleOpMaker : public framework::OpProtoAndCheckerMaker { public: - ScaleOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "(Tensor) Input tensor of scale operator."); AddOutput("Out", "(Tensor) Output tensor of scale operator."); AddComment(R"DOC( diff --git a/paddle/fluid/operators/scatter_op.cc b/paddle/fluid/operators/scatter_op.cc index 95b12455ea4996f00bab8a353ccd425b2c37aed1..bf5e0d864495ce3a651a31c9d5a7664fe9eb2396 100644 --- a/paddle/fluid/operators/scatter_op.cc +++ b/paddle/fluid/operators/scatter_op.cc @@ -78,8 +78,7 @@ class ScatterGradOp : public framework::OperatorWithKernel { class ScatterOpMaker : public framework::OpProtoAndCheckerMaker { public: - ScatterOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "The source input of scatter op"); AddInput("Ids", "The index input of scatter op where X will be updated"); AddInput("Updates", "The updated value of updates op"); diff --git a/paddle/fluid/operators/select_op.cc b/paddle/fluid/operators/select_op.cc index 876d8acf0d880a7ef806514014d297f98e04c53d..e71841d4d1815d50cd9800910c9db34e121beffc 100644 --- a/paddle/fluid/operators/select_op.cc +++ b/paddle/fluid/operators/select_op.cc @@ -380,8 +380,7 @@ class SelectOp : public framework::OperatorBase { class SelectOpMaker : public framework::OpProtoAndCheckerMaker { public: - SelectOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput(kX, "A set of variables, which are required by operators inside the " "cases of Select Op") diff --git a/paddle/fluid/operators/send_barrier_op.cc b/paddle/fluid/operators/send_barrier_op.cc index 12b844daaa33162b86b7daffa2e4c49785701662..1ce0907f3a9473e37f53bf7b2d42cddcb629dfa6 100644 --- a/paddle/fluid/operators/send_barrier_op.cc +++ b/paddle/fluid/operators/send_barrier_op.cc @@ -57,8 +57,7 @@ class SendBarrierOp : public framework::OperatorBase { class SendBarrierOpMaker : public framework::OpProtoAndCheckerMaker { public: - SendBarrierOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() { AddOutput("RPCClient", "(RPCClient) The RPC client object which is" "initialized at most once."); diff --git a/paddle/fluid/operators/send_op.cc b/paddle/fluid/operators/send_op.cc index e4386b640a298cd216bb60104653f20c4a96e7dc..95bb1f3c695297e6d8134a647925310207118a9b 100644 --- a/paddle/fluid/operators/send_op.cc +++ b/paddle/fluid/operators/send_op.cc @@ -92,8 +92,7 @@ class SendOp : public framework::OperatorBase { class SendOpMaker : public framework::OpProtoAndCheckerMaker { public: - SendOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() { AddInput("X", "(Tensor) Input tensor to be sent").AsDuplicable(); AddOutput("Out", "(Tensor) Output tensor to be received from server") .AsDuplicable(); diff --git a/paddle/fluid/operators/send_vars_op.cc b/paddle/fluid/operators/send_vars_op.cc index 56b3713d6af28d0787e114a672a503e86cbd85fd..f11e84c176ae97dff0fda560ce3ebe2ab72c7bcc 100644 --- a/paddle/fluid/operators/send_vars_op.cc +++ b/paddle/fluid/operators/send_vars_op.cc @@ -66,8 +66,7 @@ class SendVarsOp : public framework::OperatorBase { class SendVarsOpMaker : public framework::OpProtoAndCheckerMaker { public: - SendVarsOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() { AddInput("X", "(Tensor, SelectedRows) Input variables to be sent") .AsDuplicable(); AddOutput("RPCClient", diff --git a/paddle/fluid/operators/sequence_concat_op.cc b/paddle/fluid/operators/sequence_concat_op.cc index 3c21903e3a08dcfb55c6c07370a117d0ad633e69..077b9a5f7d935a39706ef3c2b710522bf1b713ed 100644 --- a/paddle/fluid/operators/sequence_concat_op.cc +++ b/paddle/fluid/operators/sequence_concat_op.cc @@ -43,8 +43,7 @@ class SequenceConcatOp : public framework::OperatorWithKernel { class SequenceConcatOpMaker : public framework::OpProtoAndCheckerMaker { public: - SequenceConcatOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "(LodTensorArray) Input is a vector of LoDTensor, " "each of which is a variable-length sequence or nested sequence.") diff --git a/paddle/fluid/operators/sequence_conv_op.cc b/paddle/fluid/operators/sequence_conv_op.cc index 94f4b49b0018fdbff6e67c3c081aa5706ccb2e66..ec6cb24350ae276724aae339590d40be1e9ea400 100644 --- a/paddle/fluid/operators/sequence_conv_op.cc +++ b/paddle/fluid/operators/sequence_conv_op.cc @@ -102,8 +102,7 @@ class SequenceConvGradOp : public framework::OperatorWithKernel { class SequenceConvOpMaker : public framework::OpProtoAndCheckerMaker { public: - SequenceConvOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput( "X", "(LoDTensor) the input(X) is a LodTensor, which supports " diff --git a/paddle/fluid/operators/sequence_erase_op.cc b/paddle/fluid/operators/sequence_erase_op.cc index 73c0e89512972cda002bd902ee0c78b4b77d8502..1c86486157a02c3b78ed61e840fd8e452b9cb452 100644 --- a/paddle/fluid/operators/sequence_erase_op.cc +++ b/paddle/fluid/operators/sequence_erase_op.cc @@ -37,8 +37,7 @@ class SequenceEraseOp : public framework::OperatorWithKernel { class SequenceEraseOpMaker : public framework::OpProtoAndCheckerMaker { public: - SequenceEraseOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "(2-D LoDTensor with the 2nd dim. equal to 1) " "Input LoDTensor of SequenceEraseOp."); diff --git a/paddle/fluid/operators/sequence_expand_op.cc b/paddle/fluid/operators/sequence_expand_op.cc index 84a35d7172a567a3f6505559fa45a32290288533..944c7f85e5f43679e1875fcce813382be2ba5526 100644 --- a/paddle/fluid/operators/sequence_expand_op.cc +++ b/paddle/fluid/operators/sequence_expand_op.cc @@ -94,8 +94,7 @@ class SequenceExpandOp : public framework::OperatorWithKernel { class SequenceExpandOpMaker : public framework::OpProtoAndCheckerMaker { public: - SequenceExpandOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "(LoDTensor, default LoDTensor) A 2-D LoDTensor whose lod " "level is at most 1."); diff --git a/paddle/fluid/operators/sequence_pool_op.cc b/paddle/fluid/operators/sequence_pool_op.cc index 933c8c26239d49221819a583f999389ed6fb6cb6..5c6fd13d42e43e3502a1cab85a56e019420c708d 100644 --- a/paddle/fluid/operators/sequence_pool_op.cc +++ b/paddle/fluid/operators/sequence_pool_op.cc @@ -38,8 +38,7 @@ class SequencePoolOp : public framework::OperatorWithKernel { class SequencePoolOpMaker : public framework::OpProtoAndCheckerMaker { public: - SequencePoolOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "(LoDTensor) The variable-length input of SequencePoolOp"); AddOutput("Out", "(Tensor) The output of SequencePoolOp does not contain LoD " diff --git a/paddle/fluid/operators/sequence_reshape_op.cc b/paddle/fluid/operators/sequence_reshape_op.cc index a2999650b8903f9d819a8e8011421349e098b219..ef5e6f3210234d59298fcf04c812390643c693d0 100644 --- a/paddle/fluid/operators/sequence_reshape_op.cc +++ b/paddle/fluid/operators/sequence_reshape_op.cc @@ -42,8 +42,7 @@ class SequenceReshapeOp : public framework::OperatorWithKernel { class SequenceReshapeOpMaker : public framework::OpProtoAndCheckerMaker { public: - SequenceReshapeOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "(LoDTensor, default LoDTensor) A 2-D LoDTensor with shape " "being [N, M]."); diff --git a/paddle/fluid/operators/sequence_slice_op.cc b/paddle/fluid/operators/sequence_slice_op.cc index 7cd620af07fa9b5f8fcee3c0f88207ef2800c4a1..df9243dc04c584d70dfa6ca78d5fac8423796466 100644 --- a/paddle/fluid/operators/sequence_slice_op.cc +++ b/paddle/fluid/operators/sequence_slice_op.cc @@ -79,8 +79,7 @@ class SequenceSliceGradOp : public framework::OperatorWithKernel { class SequenceSliceOpMaker : public framework::OpProtoAndCheckerMaker { public: - SequenceSliceOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "(LoDTensor), " "the input of SequenceSliceOp."); diff --git a/paddle/fluid/operators/sequence_softmax_op.cc b/paddle/fluid/operators/sequence_softmax_op.cc index a0d47c12ba606eb62bbbea4d5ea793ce915e8100..c44f8206eb5079fef969e3e527552512eebd0f1a 100644 --- a/paddle/fluid/operators/sequence_softmax_op.cc +++ b/paddle/fluid/operators/sequence_softmax_op.cc @@ -57,8 +57,7 @@ class SequenceSoftmaxOp : public framework::OperatorWithKernel { class SequenceSoftmaxOpMaker : public framework::OpProtoAndCheckerMaker { public: - SequenceSoftmaxOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "(LoDTensor) 1-D or 2-D input LoDTensor with the 2-nd dimension " "of length 1."); diff --git a/paddle/fluid/operators/sgd_op.cc b/paddle/fluid/operators/sgd_op.cc index bd04c60ffa5c1e5eb8d2051ce495ab6c685b14b5..7a2bdeac09d61603f437ff10d58d0542bb3c3689 100644 --- a/paddle/fluid/operators/sgd_op.cc +++ b/paddle/fluid/operators/sgd_op.cc @@ -68,8 +68,7 @@ class SGDOpInferVarType : public framework::VarTypeInference { class SGDOpMaker : public framework::OpProtoAndCheckerMaker { public: - SGDOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("Param", "(Tensor or SelectedRows) Input parameter"); AddInput("LearningRate", "(Tensor) Learning rate of SGD"); AddInput("Grad", "(Tensor or SelectedRows) Input gradient"); diff --git a/paddle/fluid/operators/shrink_rnn_memory_op.cc b/paddle/fluid/operators/shrink_rnn_memory_op.cc index a1871a8e7fb27d351f9d333966baa63c6f32ae01..8146c5f56104b7dec86b1c4491ed10fc2e94b58b 100644 --- a/paddle/fluid/operators/shrink_rnn_memory_op.cc +++ b/paddle/fluid/operators/shrink_rnn_memory_op.cc @@ -69,8 +69,7 @@ class ShrinkRNNMemoryOp : public ArrayOp { class ShrinkRNNMemoryOpProtoMaker : public framework::OpProtoAndCheckerMaker { public: - ShrinkRNNMemoryOpProtoMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "(LoDTensor) The RNN step memory to be shrinked."); AddInput("RankTable", "(LoDRankTable) The lod_rank_table of dynamic RNN."); AddInput("I", diff --git a/paddle/fluid/operators/sigmoid_cross_entropy_with_logits_op.cc b/paddle/fluid/operators/sigmoid_cross_entropy_with_logits_op.cc index 5db77d0493fc0abaa0a696cb559c3ca0534d4101..135e2a6f7f877c9ef159a4542b834d5627649e81 100644 --- a/paddle/fluid/operators/sigmoid_cross_entropy_with_logits_op.cc +++ b/paddle/fluid/operators/sigmoid_cross_entropy_with_logits_op.cc @@ -86,9 +86,7 @@ class SigmoidCrossEntropyWithLogitsGradOp class SigmoidCrossEntropyWithLogitsOpMaker : public framework::OpProtoAndCheckerMaker { public: - SigmoidCrossEntropyWithLogitsOpMaker(OpProto* proto, - OpAttrChecker* op_checker) - : framework::OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "(Tensor, default Tensor), a 2-D tensor with shape N x D, " "where N is the batch size and D is the number of classes. " diff --git a/paddle/fluid/operators/sign_op.cc b/paddle/fluid/operators/sign_op.cc index 8f8b7abd03212c12ca351e551621e63b4c7148c2..f3985dcc027f974e0213a73ea9a21e268d77615f 100644 --- a/paddle/fluid/operators/sign_op.cc +++ b/paddle/fluid/operators/sign_op.cc @@ -34,8 +34,7 @@ class SignOp : public framework::OperatorWithKernel { template class SignOpMaker : public framework::OpProtoAndCheckerMaker { public: - SignOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "(Tensor) Input tensor of sign operator."); AddOutput("Out", "(Tensor) Output tensor of sign operator."); AddComment(R"DOC( diff --git a/paddle/fluid/operators/smooth_l1_loss_op.cc b/paddle/fluid/operators/smooth_l1_loss_op.cc index 322581fdef27b12a06704abc9c3b8772adf002f2..c44c5f164b2d84616e9a85813e0ee5219b41df28 100644 --- a/paddle/fluid/operators/smooth_l1_loss_op.cc +++ b/paddle/fluid/operators/smooth_l1_loss_op.cc @@ -46,8 +46,7 @@ class SmoothL1LossOp : public framework::OperatorWithKernel { class SmoothL1LossOpMaker : public framework::OpProtoAndCheckerMaker { public: - SmoothL1LossOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "(Tensor, default Tensor) A tensor with rank at least 2. " "The input value of smooth l1 loss op with shape " diff --git a/paddle/fluid/operators/softmax_op.cc b/paddle/fluid/operators/softmax_op.cc index aa7b192e327704c02a26c86cc208ebe8a5cd7ba5..cc256aa627bdda0609f496cab93a2dec7d95f348 100644 --- a/paddle/fluid/operators/softmax_op.cc +++ b/paddle/fluid/operators/softmax_op.cc @@ -77,8 +77,7 @@ class SoftmaxOp : public framework::OperatorWithKernel { class SoftmaxOpMaker : public framework::OpProtoAndCheckerMaker { public: - SoftmaxOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "The input tensor of softmax. " "2-D with shape [batch_size, input_feature_dimensions]."); diff --git a/paddle/fluid/operators/softmax_with_cross_entropy_op.cc b/paddle/fluid/operators/softmax_with_cross_entropy_op.cc index 857e5733573497b56520daa7860f4feb4e01cda7..53cb716a979229c99fcbdc12f1aeab4e21b320f3 100644 --- a/paddle/fluid/operators/softmax_with_cross_entropy_op.cc +++ b/paddle/fluid/operators/softmax_with_cross_entropy_op.cc @@ -20,8 +20,7 @@ namespace operators { class SoftmaxWithCrossEntropyOpMaker : public framework::OpProtoAndCheckerMaker { public: - SoftmaxWithCrossEntropyOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("Logits", "(Tensor, default: Tensor), The unscaled log probabilities " "which is a 2-D tensor with shape [N x K]. N is the batch_size, " diff --git a/paddle/fluid/operators/split_byref_op.cc b/paddle/fluid/operators/split_byref_op.cc index 7413ce3e9ce60ed733bb4d27e9ec205e5f0a7e1b..bc998e1abbd7131a7497288cc9d66315a6fedc85 100644 --- a/paddle/fluid/operators/split_byref_op.cc +++ b/paddle/fluid/operators/split_byref_op.cc @@ -64,8 +64,7 @@ class SplitByrefOp : public framework::OperatorWithKernel { class SplitByrefOpMaker : public framework::OpProtoAndCheckerMaker { public: - SplitByrefOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "(Tensor) Input tensor of the split operator."); AddOutput("Out", "(Tensor) Output tensors of the split operator.") .AsDuplicable(); diff --git a/paddle/fluid/operators/split_ids_op.cc b/paddle/fluid/operators/split_ids_op.cc index a53cbc8ac5199061dafdc7f4cf560b9e4fc577ab..c867c46873ae7ddbdbda280351e4ab28235bcc08 100644 --- a/paddle/fluid/operators/split_ids_op.cc +++ b/paddle/fluid/operators/split_ids_op.cc @@ -19,8 +19,7 @@ namespace operators { class SplitIdsOpMaker : public framework::OpProtoAndCheckerMaker { public: - SplitIdsOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("Ids", "(LoDTensor) the input ids with shape{batch_num, 1}"); AddOutput("Out", "(LoDTensor) The outputs of the input Ids.") .AsDuplicable(); diff --git a/paddle/fluid/operators/split_lod_tensor_op.cc b/paddle/fluid/operators/split_lod_tensor_op.cc index 3222cce239988b170501f2b99e9f1253036b7fbc..767449cde981e5925b7144ff1038560c67651f3e 100644 --- a/paddle/fluid/operators/split_lod_tensor_op.cc +++ b/paddle/fluid/operators/split_lod_tensor_op.cc @@ -125,8 +125,7 @@ class SplitLoDTensorOp : public framework::OperatorBase { class SplitLoDTensorOpProtoMaker : public framework::OpProtoAndCheckerMaker { public: - SplitLoDTensorOpProtoMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "The input LoDTensor"); AddInput("Mask", "A bool column vector which mask the input"); AddOutput("OutTrue", "True branch of input LoDTensor"); diff --git a/paddle/fluid/operators/split_op.cc b/paddle/fluid/operators/split_op.cc index a4398df36bcc2d3b8bbe8949f27f5d6508861d95..5e2b2a994534c2fb1e053c067b36651d358b9da8 100644 --- a/paddle/fluid/operators/split_op.cc +++ b/paddle/fluid/operators/split_op.cc @@ -70,8 +70,7 @@ class SplitOp : public framework::OperatorWithKernel { class SplitOpMaker : public framework::OpProtoAndCheckerMaker { public: - SplitOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "(Tensor) Input tensor of the split operator."); AddOutput("Out", "(Tensor) Output tensors of the split operator.") .AsDuplicable(); diff --git a/paddle/fluid/operators/split_selected_rows_op.cc b/paddle/fluid/operators/split_selected_rows_op.cc index e1ce3d0c1bf11e9a623e4e9adc8f08f5069f4d94..76615a9405d7a8e3fa9dba8d01a956209e02ae8f 100644 --- a/paddle/fluid/operators/split_selected_rows_op.cc +++ b/paddle/fluid/operators/split_selected_rows_op.cc @@ -19,8 +19,7 @@ namespace operators { class SplitSelectedRowsOpMaker : public framework::OpProtoAndCheckerMaker { public: - SplitSelectedRowsOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "The input SelectedRows."); AddOutput("Out", "The outputs of the input SelectedRows.").AsDuplicable(); AddAttr>("height_sections", diff --git a/paddle/fluid/operators/spp_op.cc b/paddle/fluid/operators/spp_op.cc index 1cada95501a76da27081d533b451ce7f6a384a49..a2a96b72f09df86790ad1f90ead9189ff9bd581c 100644 --- a/paddle/fluid/operators/spp_op.cc +++ b/paddle/fluid/operators/spp_op.cc @@ -20,8 +20,7 @@ namespace operators { class SppOpMaker : public framework::OpProtoAndCheckerMaker { public: - SppOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput( "X", "(Tensor) The input tensor of spp operator. " diff --git a/paddle/fluid/operators/squared_l2_distance_op.cc b/paddle/fluid/operators/squared_l2_distance_op.cc index c32f575b541d6a6441cc1b6e999496eacef421a5..42532a294b2ef9ffdb240fac8596278047daf7fe 100644 --- a/paddle/fluid/operators/squared_l2_distance_op.cc +++ b/paddle/fluid/operators/squared_l2_distance_op.cc @@ -56,8 +56,7 @@ class SquaredL2DistanceOp : public framework::OperatorWithKernel { class SquaredL2DistanceOpMaker : public framework::OpProtoAndCheckerMaker { public: - SquaredL2DistanceOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "(Tensor) Input of SquaredL2DistanceOp."); AddInput("Y", "(Tensor) Target of SquaredL2DistanceOp."); AddOutput("sub_result", diff --git a/paddle/fluid/operators/squared_l2_norm_op.cc b/paddle/fluid/operators/squared_l2_norm_op.cc index 4ce51259da3530367d91b5da34f06fbe5d969fce..7bd82e0ce4add6d4434e1defaee43da178a6f309 100644 --- a/paddle/fluid/operators/squared_l2_norm_op.cc +++ b/paddle/fluid/operators/squared_l2_norm_op.cc @@ -48,8 +48,7 @@ class SquaredL2NormGradOp : public framework::OperatorWithKernel { class SquaredL2NormOpMaker : public framework::OpProtoAndCheckerMaker { public: - SquaredL2NormOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : framework::OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "(Tensor) The input of squared_l2_norm op."); AddOutput("Out", "(Scalar) The output of squared_l2_norm op."); AddComment(R"DOC( diff --git a/paddle/fluid/operators/sum_op.cc b/paddle/fluid/operators/sum_op.cc index 108f26fafe7af76eaa613d77ed77748ee43ea234..bcc5e22d4a77349e7cde9a43b83f23d4c867d994 100644 --- a/paddle/fluid/operators/sum_op.cc +++ b/paddle/fluid/operators/sum_op.cc @@ -112,8 +112,7 @@ class SumOp : public framework::OperatorWithKernel { class SumOpMaker : public framework::OpProtoAndCheckerMaker { public: - SumOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "(vector) The input tensors of sum operator.") .AsDuplicable(); AddOutput("Out", "(Tensor) The output tensor of sum operator."); diff --git a/paddle/fluid/operators/target_assign_op.cc b/paddle/fluid/operators/target_assign_op.cc index 33ff967e5e8f5afbaa62ba39ce596687ae0a71cd..9fce216e880f2af3b665202f4c4d2777995db003 100644 --- a/paddle/fluid/operators/target_assign_op.cc +++ b/paddle/fluid/operators/target_assign_op.cc @@ -65,8 +65,7 @@ class TargetAssignOp : public framework::OperatorWithKernel { class TargetAssignOpMaker : public framework::OpProtoAndCheckerMaker { public: - TargetAssignOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "(LoDTensor), This input is a 3D LoDTensor with shape [M, P, K]. " "Some elements in X will be assigned to Out based on the " diff --git a/paddle/fluid/operators/tensor_array_read_write_op.cc b/paddle/fluid/operators/tensor_array_read_write_op.cc index 2636812c42985536e7ca3475c03bbd8d1638ece6..c703d11eeccf8418250f00c801f47418ee9c85ae 100644 --- a/paddle/fluid/operators/tensor_array_read_write_op.cc +++ b/paddle/fluid/operators/tensor_array_read_write_op.cc @@ -57,8 +57,7 @@ class WriteToArrayOp : public ArrayOp { class WriteToArrayOpProtoMaker : public framework::OpProtoAndCheckerMaker { public: - WriteToArrayOpProtoMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "(LoDTensor) the tensor will be written to tensor array"); AddInput( "I", @@ -148,8 +147,7 @@ class ReadFromArrayOp : public ArrayOp { class ReadFromArrayProtoMaker : public framework::OpProtoAndCheckerMaker { public: - ReadFromArrayProtoMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "(TensorArray) the array will be read from."); AddInput("I", "(Tensor) the subscript index in tensor array. The number of " diff --git a/paddle/fluid/operators/top_k_op.cc b/paddle/fluid/operators/top_k_op.cc index 942a5de3f90f20eabe691924a570b61509eccf76..c17d1afc309c65035063348d4934ea1783b018ed 100644 --- a/paddle/fluid/operators/top_k_op.cc +++ b/paddle/fluid/operators/top_k_op.cc @@ -48,8 +48,7 @@ class TopkOp : public framework::OperatorWithKernel { class TopkOpMaker : public framework::OpProtoAndCheckerMaker { public: - TopkOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "(Tensor) The input of Topk op"); AddOutput("Out", "(Tensor) The output tensor of Topk op"); AddOutput("Indices", "(Tensor) The indices of Topk elements of input"); diff --git a/paddle/fluid/operators/transpose_op.cc b/paddle/fluid/operators/transpose_op.cc index 3555cb68cab97c0cf983f1173c3b4ca9307e4f7d..60556a564c25c08612447ebd47a4b432b8a12d29 100644 --- a/paddle/fluid/operators/transpose_op.cc +++ b/paddle/fluid/operators/transpose_op.cc @@ -56,8 +56,7 @@ class TransposeOp : public framework::OperatorWithKernel { class TransposeOpMaker : public framework::OpProtoAndCheckerMaker { public: - TransposeOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput( "X", "(Tensor) The input tensor, tensors with rank up to 6 are supported."); diff --git a/paddle/fluid/operators/uniform_random_batch_size_like_op.cc b/paddle/fluid/operators/uniform_random_batch_size_like_op.cc index 00f00bb403db5e40939a1502b2219fb4d36d58e5..78fee77df8151221459b0afa0d6789bfe82cfda5 100644 --- a/paddle/fluid/operators/uniform_random_batch_size_like_op.cc +++ b/paddle/fluid/operators/uniform_random_batch_size_like_op.cc @@ -32,9 +32,8 @@ class UniformRandomBatchSizeLikeOp : public BatchSizeLikeOp { }; class UniformRandomBatchSizeLikeOpMaker : public BatchSizeLikeOpMaker { - public: - UniformRandomBatchSizeLikeOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : BatchSizeLikeOpMaker(proto, op_checker) { + protected: + void Apply() override { AddComment(R"DOC( Uniform random operator diff --git a/paddle/fluid/operators/uniform_random_op.cc b/paddle/fluid/operators/uniform_random_op.cc index 3b5cf68dd4f28d23e507058337fe55de9b88d3cd..137ea91caedabc3167146d91b063dbe9e2e2b931 100644 --- a/paddle/fluid/operators/uniform_random_op.cc +++ b/paddle/fluid/operators/uniform_random_op.cc @@ -85,8 +85,7 @@ class UniformRandomOp : public framework::OperatorWithKernel { class UniformRandomOpMaker : public framework::OpProtoAndCheckerMaker { public: - UniformRandomOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : framework::OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddOutput("Out", "(Tensor) The output tensor of uniform random op"); AddComment(R"DOC( Uniform random operator. diff --git a/paddle/fluid/operators/unpool_op.cc b/paddle/fluid/operators/unpool_op.cc index b3cd87efa21115565b32659cb35fee4b5bed2d4f..1d441b43b14ea194152095874645f8133c423efd 100644 --- a/paddle/fluid/operators/unpool_op.cc +++ b/paddle/fluid/operators/unpool_op.cc @@ -20,8 +20,7 @@ namespace operators { class Unpool2dOpMaker : public framework::OpProtoAndCheckerMaker { public: - Unpool2dOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput( "X", "(Tensor) The input tensor of unpool operator. " diff --git a/paddle/fluid/operators/warpctc_op.cc b/paddle/fluid/operators/warpctc_op.cc index 6835a5dd6286ece20c4ce6f3e951ed4b0057012c..e06c8c962f45a4e91b7efed7431571f0fc6870a3 100644 --- a/paddle/fluid/operators/warpctc_op.cc +++ b/paddle/fluid/operators/warpctc_op.cc @@ -53,8 +53,7 @@ class WarpCTCOp : public framework::OperatorWithKernel { class WarpCTCOpMaker : public framework::OpProtoAndCheckerMaker { public: - WarpCTCOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("Logits", "(LodTensor, default: LoDTensor), the unscaled " "probabilities of variable-length sequences, which is a 2-D " diff --git a/paddle/fluid/operators/while_op.cc b/paddle/fluid/operators/while_op.cc index 710cc9fc2e716da2e4fd067562a34d312e48b1a1..175c3ac5d79f24e47d21417df8e3eaeb4d5b2335 100644 --- a/paddle/fluid/operators/while_op.cc +++ b/paddle/fluid/operators/while_op.cc @@ -68,8 +68,7 @@ class WhileOp : public framework::OperatorBase { class WhileOpMaker : public framework::OpProtoAndCheckerMaker { public: - WhileOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput(kX, "A set of variables, which are required by operators inside the " "block of While Op.") diff --git a/paddle/scripts/paddle_build.sh b/paddle/scripts/paddle_build.sh index c9b49adef7061d2cfa504258cfc589346c27e192..5bef232cd8fc44ded89ac56a790c8db0955b390a 100755 --- a/paddle/scripts/paddle_build.sh +++ b/paddle/scripts/paddle_build.sh @@ -473,6 +473,7 @@ EOF } function main() { + set -e local CMD=$1 init case $CMD in diff --git a/paddle/scripts/paddle_docker_build.sh b/paddle/scripts/paddle_docker_build.sh index 0bf8c7989245e7b0119c41960f792a97d1555b9c..ac32bf0292dcc77e56bda2e390fd5d4cac062e61 100755 --- a/paddle/scripts/paddle_docker_build.sh +++ b/paddle/scripts/paddle_docker_build.sh @@ -59,7 +59,7 @@ EOL if [ ! -d "${HOME}/.ccache" ]; then mkdir ${HOME}/.ccache fi - set -x + set -ex ${DOCKER_CMD} run -it \ --name $CONTAINER_ID \ ${DOCKER_ENV} \ diff --git a/python/paddle/fluid/framework.py b/python/paddle/fluid/framework.py index d7eda619c34a20fa09a30afdcf90047d66a05cbf..28e54f5492e7b04a1406e319cecf977d4a55725e 100644 --- a/python/paddle/fluid/framework.py +++ b/python/paddle/fluid/framework.py @@ -160,6 +160,7 @@ class Variable(object): persistable=None, error_clip=None, stop_gradient=False, + is_data=False, **kwargs): self.block = block self.error_clip = error_clip @@ -238,6 +239,7 @@ class Variable(object): self.block.vars[name] = self self.op = None self.stop_gradient = stop_gradient + self.is_data = is_data def __str__(self): return self.to_string(True) @@ -475,7 +477,7 @@ class Operator(object): if isinstance(attrs[attr_name], Block): self.desc.set_block_attr(attr_name, attrs[attr_name].desc) elif isinstance(attrs[attr_name], core.BlockDesc) or \ - isinstance(attrs[attr_name], core.ProgramDesc): + isinstance(attrs[attr_name], core.ProgramDesc): self.desc.set_serialized_attr( attr_name, attrs[attr_name].serialize_to_string()) else: @@ -978,7 +980,8 @@ class Block(object): shape=var.shape, dtype=var.dtype, type=var.type, - persistable=True) + persistable=True, + is_data=var.is_data) else: ret_var = self.create_var( name=var.name, @@ -986,7 +989,8 @@ class Block(object): dtype=var.dtype, type=var.type, lod_level=var.lod_level, - persistable=True) + persistable=True, + is_data=var.is_data) return ret_var @@ -1051,6 +1055,7 @@ class Program(object): p.sync_with_cpp() p.copy_param_info_from(self) + p.copy_data_info_from(self) return p def prune(self, targets): @@ -1172,6 +1177,26 @@ class Program(object): "program, with represent the same topology") self.global_block().copy_param_info_from(other.global_block()) + def copy_data_info_from(self, other): + """ + Copy the information of data variables from other program. + Args: + other(Program): Other program + + Returns: + None + """ + if not isinstance(other, Program): + raise TypeError("copy_param_info_from should be invoked with " + "Program") + + if len(self.blocks) != len(other.blocks): + raise ValueError("copy_param_info_from should be invoked with two " + "program, with represent the same topology") + for var in other.global_block().vars.itervalues(): + if var.is_data: + self.global_block().var(var.name).is_data = True + def list_vars(self): for each_block in self.blocks: for each_var in each_block.vars.itervalues(): diff --git a/python/paddle/fluid/layers/io.py b/python/paddle/fluid/layers/io.py index 0a6befd1485a1f79d63873c47a9fd74ab4214f57..4d6ee3c51b7cccdaa3303b5a4cd8e7219b753ccb 100644 --- a/python/paddle/fluid/layers/io.py +++ b/python/paddle/fluid/layers/io.py @@ -78,8 +78,8 @@ def data(name, dtype=dtype, type=type, stop_gradient=stop_gradient, - lod_level=lod_level) - data_var.is_data = True + lod_level=lod_level, + is_data=True) return data_var diff --git a/python/paddle/fluid/layers/layer_function_generator.py b/python/paddle/fluid/layers/layer_function_generator.py index 35b01a79914b3427836d4abd51aa2e2eb471d517..295d1b7190ec39bcc6efdf72aebede14a99807aa 100644 --- a/python/paddle/fluid/layers/layer_function_generator.py +++ b/python/paddle/fluid/layers/layer_function_generator.py @@ -113,7 +113,7 @@ def generate_layer_fn(op_type): if len(not_intermediate_outputs) != 1: raise ValueError("Only one non intermediate output operator can be", - "automatically generated.") + "automatically generated. {0}".format(op_type)) if not_intermediate_outputs[0].duplicable: raise ValueError( diff --git a/python/paddle/fluid/tests/book/high-level-api/word2vec/no_test_word2vec_new_api.py b/python/paddle/fluid/tests/book/high-level-api/word2vec/no_test_word2vec_new_api.py index 35e163dc9df5a35ee5774b6b157366c4eabcb0f7..93f7757a66ea9b217c7831c7263936ece0aa8f18 100644 --- a/python/paddle/fluid/tests/book/high-level-api/word2vec/no_test_word2vec_new_api.py +++ b/python/paddle/fluid/tests/book/high-level-api/word2vec/no_test_word2vec_new_api.py @@ -80,8 +80,11 @@ def inference_program(is_sparse): def train_program(is_sparse): - next_word = fluid.layers.data(name='nextw', shape=[1], dtype='int64') + # The declaration of 'next_word' must be after the invoking of inference_program, + # or the data input order of train program would be [next_word, firstw, secondw, + # thirdw, forthw], which is not correct. predict_word = inference_program(is_sparse) + next_word = fluid.layers.data(name='nextw', shape=[1], dtype='int64') cost = fluid.layers.cross_entropy(input=predict_word, label=next_word) avg_cost = fluid.layers.mean(cost) return avg_cost @@ -90,14 +93,17 @@ def train_program(is_sparse): def train(use_cuda, is_sparse, save_path): train_reader = paddle.batch( paddle.dataset.imikolov.train(word_dict, N), BATCH_SIZE) + test_reader = paddle.batch( + paddle.dataset.imikolov.test(word_dict, N), BATCH_SIZE) place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() def event_handler(event): - print type(event) + # print type(event) if isinstance(event, fluid.EndEpochEvent): - avg_cost = trainer.test(reader=paddle.dataset.imikolov.test( - word_dict, N)) + outs = trainer.test(reader=test_reader) + avg_cost = outs[0] + print("loss= ", avg_cost) if avg_cost < 5.0: trainer.save_params(save_path) diff --git a/python/paddle/fluid/tests/unittests/test_matmul_op.py b/python/paddle/fluid/tests/unittests/test_matmul_op.py index 44ac4683891ffd3141a126740f4fddb47550e183..cae2c8fa87d9857de8f26cf4962d9370eca66243 100644 --- a/python/paddle/fluid/tests/unittests/test_matmul_op.py +++ b/python/paddle/fluid/tests/unittests/test_matmul_op.py @@ -111,21 +111,24 @@ class Generator(object): # Generate test cases for all possibilities -for dim_X in [1, 2, 3]: - for dim_Y in [1, 2, 3]: - for transpose_X in [False, True]: - for transpose_Y in [False, True]: - test_name = ( - 'TestMatMulOp_dimX_{}_dim_Y_{}_transX_{}_transY_{}'.format( - dim_X, dim_Y, transpose_X, transpose_Y)) - shape_X, shape_Y = generate_compatible_shapes( - dim_X, dim_Y, transpose_X, transpose_Y) - globals()[test_name] = type(test_name, (Generator, OpTest), { - 'shape_X': shape_X, - 'shape_Y': shape_Y, - 'transpose_X': transpose_X, - 'transpose_Y': transpose_Y, - }) +def inject_test(dim_x, dim_y, trans_x, trans_y): + test_name = ('TestMatMulOp_dimX_{}_dim_Y_{}_transX_{}_transY_{}'.format( + dim_x, dim_y, trans_x, trans_y)) + shape_x, shape_y = generate_compatible_shapes(dim_x, dim_y, trans_x, + trans_y) + globals()[test_name] = type(test_name, (Generator, OpTest), { + 'shape_X': shape_x, + 'shape_Y': shape_y, + 'transpose_X': trans_x, + 'transpose_Y': trans_y, + }) + + +for dim_X in (1, 2, 3): + for dim_Y in (1, 2, 3): + for transose_x in (False, True): + for transose_y in (False, True): + inject_test(dim_X, dim_Y, transose_x, transose_y) # Test case n-dim @@ -149,7 +152,7 @@ def generate_compatible_shapes(dim, transpose_X, transpose_Y): return shape_X, shape_Y -# Test case n-dim +# # Test case n-dim for dim in [4]: for transpose_X in [False, True]: for transpose_Y in [False, True]: diff --git a/python/paddle/fluid/trainer.py b/python/paddle/fluid/trainer.py index d44cb16bfb1545fc840d1a38155ec407afd4473d..30b58b465ef2a7945ed87ce69397a050fc337623 100644 --- a/python/paddle/fluid/trainer.py +++ b/python/paddle/fluid/trainer.py @@ -75,11 +75,15 @@ class Trainer(object): self.train_program = framework.Program() with framework.program_guard(self.train_program, self.startup_program): - loss = program_func() + program_func_outs = program_func() + self.test_outputs = program_func_outs if isinstance( + program_func_outs, list) else [program_func_outs] + self.test_program = self.train_program.clone() if not isinstance(optimizer, opt_module.Optimizer): raise TypeError( "The optimizer should be an instance of Optimizer") - + # The fisrt element of program_func_outs is loss. + loss = self.test_outputs[0] optimize_ops, params_grads = optimizer.minimize(loss) self.place = Trainer._check_and_get_place(place) @@ -168,8 +172,17 @@ class Trainer(object): self._train_by_executor(num_epochs, event_handler, reader, feed_order) - def test(self, reader): - pass + def test(self, reader, feed_order=None): + """ + Test the model on given test data + + Args: + reader: The reader that yields test data. + feed_order: Feeding order of reader. None will following the defining + order in program + """ + + return self._test_by_executor(reader, feed_order, self.test_outputs) def save_params(self, param_path): # reference: save_persistables in io.py @@ -225,22 +238,10 @@ class Trainer(object): """ with self._prog_and_scope_guard(): - exe = executor.Executor(self.place) - if feed_order is None: - feed_var_list = [ - var - for var in self.train_program.global_block( - ).vars.itervalues() - if hasattr(var, 'is_data') and var.is_data - ] - else: - feed_var_list = [ - self.train_program.global_block().var(var_name) - for var_name in feed_order - ] - + feed_var_list = build_feed_var_list(self.train_program, feed_order) feeder = data_feeder.DataFeeder( feed_list=feed_var_list, place=self.place) + exe = executor.Executor(self.place) for epoch_id in range(num_epochs): event_handler(BeginEpochEvent(epoch_id)) for step_id, data in enumerate(reader()): @@ -248,3 +249,48 @@ class Trainer(object): exe.run(feed=feeder.feed(data), fetch_list=[]) event_handler(EndStepEvent(epoch_id, step_id)) event_handler(EndEpochEvent(epoch_id)) + + def _test_by_executor(self, reader, feed_order, fetch_list): + with executor.scope_guard(self.scope): + feed_var_list = build_feed_var_list(self.test_program, feed_order) + feeder = data_feeder.DataFeeder( + feed_list=feed_var_list, place=self.place) + exe = executor.Executor(self.place) + accumulated = len(fetch_list) * [0] + count = 0 + for data in reader(): + outs = exe.run(program=self.test_program, + feed=feeder.feed(data), + fetch_list=fetch_list) + accumulated = [x[0] + x[1][0] for x in zip(accumulated, outs)] + count += 1 + + return [x / count for x in accumulated] + + +def build_feed_var_list(program, feed_order): + if not isinstance(program, framework.Program): + raise TypeError("The 'program' should be an object of Program") + + if feed_order is None: + feed_var_list = [ + var for var in program.global_block().vars.itervalues() + if var.is_data + ] + elif isinstance(feed_order, list): + feed_var_list = [ + program.global_block().var(var_name) for var_name in feed_order + ] + else: + if not isinstance(feed_order, dict): + raise TypeError( + "The 'feed_order' should be either None, list or dict.") + if not sorted(feed_order.values()) == range(len(feed_order)): + raise ValueError( + "The values of 'feed_order' should be a permutation of [0, len(feed_order))" + ) + sorted_pair_list = sorted(feed_order.items(), key=lambda item: item[1]) + feed_var_list = [ + program.global_block().var(pair[0]) for pair in sorted_pair_list + ] + return feed_var_list diff --git a/python/paddle/fluid/transpiler/distribute_transpiler.py b/python/paddle/fluid/transpiler/distribute_transpiler.py index 640ac9f085e6dc83bb04faafadf4846089ad3e29..b45cb987d896bd189531e97eb62bddbbee16069d 100644 --- a/python/paddle/fluid/transpiler/distribute_transpiler.py +++ b/python/paddle/fluid/transpiler/distribute_transpiler.py @@ -18,7 +18,9 @@ import math import distributed_splitter as splitter from .. import core -from ..framework import Program, default_main_program, Variable, Parameter +from ..framework import Program, default_main_program, \ + default_startup_program, \ + Variable, Parameter, grad_var_name LOOKUP_TABLE_TYPE = "lookup_table" LOOKUP_TABLE_GRAD_TYPE = "lookup_table_grad" @@ -153,43 +155,43 @@ class DistributeTranspiler: split_method=splitter.round_robin, sync_mode=True): """ - Transpile the program to distributed data-parallelism programs. - The main_program will be transformed to use a remote parameter server - to do parameter optimization. And the optimization graph will be put - into a parameter server program. - - Use different methods to split trainable variables to different - parameter servers. - - Steps to transpile trainer: - 1. split variable to multiple blocks, aligned by product(dim[1:]) (width). - 2. rename splited grad variables to add trainer_id suffix ".trainer_%d". - 3. modify trainer program add split_op to each grad variable. - 4. append send_op to send splited variables to server and fetch - params(splited blocks or origin param) from server. - 5. append concat_op to merge splited blocks to update local weights. - - Steps to transpile pserver: - 1. create new program for parameter server. - 2. create params and grad variables that assigned to current server instance. - 3. create a sub-block in the server side program - 4. append ops that should run on current server instance. - 5. add listen_and_serv op - - :param trainer_id: one unique id for each trainer in a job. - :type trainer_id: int - :param program: program to transpile, default is default_main_program - :type program: Program - :param pservers: parameter server endpoints like "m1:6174,m2:6174" - :type pservers: string - :param trainers: total number of workers/trainers in the job - :type trainers: int - :param split_method: A function to determin how to split variables - to different servers equally. - :type split_method: function - :type sync_mode: boolean default True - :param sync_mode: if sync_mode is set True, it means that dist transpiler - will transpile the program into sync_mode pserver and trainer program. + Transpile the program to distributed data-parallelism programs. + The main_program will be transformed to use a remote parameter server + to do parameter optimization. And the optimization graph will be put + into a parameter server program. + + Use different methods to split trainable variables to different + parameter servers. + + Steps to transpile trainer: + 1. split variable to multiple blocks, aligned by product(dim[1:]) (width). + 2. rename splited grad variables to add trainer_id suffix ".trainer_%d". + 3. modify trainer program add split_op to each grad variable. + 4. append send_op to send splited variables to server and fetch + params(splited blocks or origin param) from server. + 5. append concat_op to merge splited blocks to update local weights. + + Steps to transpile pserver: + 1. create new program for parameter server. + 2. create params and grad variables that assigned to current server instance. + 3. create a sub-block in the server side program + 4. append ops that should run on current server instance. + 5. add listen_and_serv op + + :param trainer_id: one unique id for each trainer in a job. + :type trainer_id: int + :param program: program to transpile, default is default_main_program + :type program: Program + :param pservers: parameter server endpoints like "m1:6174,m2:6174" + :type pservers: string + :param trainers: total number of workers/trainers in the job + :type trainers: int + :param split_method: A function to determin how to split variables + to different servers equally. + :type split_method: function + :type sync_mode: boolean default True + :param sync_mode: if sync_mode is set True, it means that dist transpiler + will transpile the program into sync_mode pserver and trainer program. """ assert (callable(split_method)) if program is None: @@ -244,7 +246,7 @@ class DistributeTranspiler: ] grad_list = [ grad for grad in grad_list - if grad.name != framework.grad_var_name(self.table_name) + if grad.name != grad_var_name(self.table_name) ] self.table_param_grad = [ param_grad for param_grad in params_grads @@ -494,7 +496,7 @@ class DistributeTranspiler: were split to several blocks. """ s_prog = Program() - orig_s_prog = framework.default_startup_program() + orig_s_prog = default_startup_program() params = self.param_grad_ep_mapping[endpoint]["params"] def _get_splited_name_and_shape(varname): @@ -619,7 +621,7 @@ class DistributeTranspiler: # 2. add split_ids_op and send_vars_op to send gradient to pservers # there should only be one table_name all_ops = program.global_block().ops - table_grad_name = framework.grad_var_name(self.table_name) + table_grad_name = grad_var_name(self.table_name) for op in all_ops: if table_grad_name in op.output_arg_names: op_index = list(all_ops).index(op) @@ -692,7 +694,7 @@ class DistributeTranspiler: persistable=True) grad_var = _clone_var( pserver_program.global_block(), - self.origin_program.global_block().vars[framework.grad_var_name( + self.origin_program.global_block().vars[grad_var_name( self.table_name)], persistable=False)