diff --git a/cmake/configure.cmake b/cmake/configure.cmake index 51c3b918cc4ef4cf6c8052ccc14028a872309fcf..926a7b1d69c7f8f9bb2e2e1de084c6cd42cfc674 100644 --- a/cmake/configure.cmake +++ b/cmake/configure.cmake @@ -49,11 +49,11 @@ if(NOT WITH_GOLANG) endif(NOT WITH_GOLANG) if(NOT WITH_GPU) - add_definitions(-DPADDLE_ONLY_CPU) add_definitions(-DHPPL_STUB_FUNC) list(APPEND CMAKE_CXX_SOURCE_FILE_EXTENSIONS cu) else() + add_definitions(-DPADDLE_WITH_GPU) FIND_PACKAGE(CUDA REQUIRED) if(${CUDA_VERSION_MAJOR} VERSION_LESS 7) diff --git a/doc/design/register_grad_op.md b/doc/design/register_grad_op.md index 12b04fb2713d8bb862c08ddb538ddf860bb2983c..3cf8a59446d244bb3a388b87b14273d9096c839a 100644 --- a/doc/design/register_grad_op.md +++ b/doc/design/register_grad_op.md @@ -33,22 +33,45 @@ The mapping relationship between an operator and its gradient operators is a fun ```cpp // (OpDesc) --> vector -using GradOpDescMaker = std::function(const OpDesc&)>; +std::function(const OpDescBind&)>; ``` -The function take a `OpDesc` of the forward operator and return one or many gradient operator descriptions. +The function takes an `OpDescBind` of the forward operator and returns one or many gradient operator descriptions. `OpDescBind` is a C++ wrapper for protobuf message `OpDesc` to manipulate `OpDesc` fast. The `GradOpDescMaker` will be registered in `OpInfo`, to replace `grad_op_type_` field. The `OpInfo` should be ```cpp struct OpInfo { - GradOpDescMaker grad_op_maker_; + std::function>(const OpDescBind&)> grad_op_maker_; ... }; ``` The `grad_op_maker_ ` is `nullptr` if the operator does not have associated gradient operators. +We propose a base class called `GradOpDescMakerBase` to let operator developers generate `Gradient Operators` easily. The public interface of that class is + +```cpp +class GradOpDescMakerBase { +public: + GradOpDescMakerBase(const OpDescBind& ); + virtual std::vector> operator()()const = 0; +}; +``` + +We can convert `GradOpDescMakerBase` to `std::function>(const OpDescBind&)>` by + +```cpp +using GradOpMaker = ...; +std::function(const OpDescBind&)> func; +func = [] (const OpDescBind& fwd_op) { + GradOpMaker maker(fwd_op); + return maker(); +}; +``` + +We can write many helper functions since the `GradOpDescMakerBase` is a class now. The basic helper functions get the variables of `Input`, `Output`, `InputGradient` and `OutputGradient` in the forwarding operator. + We should chagne register macros at the same time. In the current solution, there is no difference between forwarding operators and backward operators. So `REGISTER_OP` just register one operator. If the `REGISTER_OPERATOR ` contains `OpProtoAndCheckerMaker` and `GradOpDescMaker`, we just list them in the same macro. It can be done by a macro contains `__VA_ARGS__`. The user interface should be diff --git a/paddle/api/Util.cpp b/paddle/api/Util.cpp index d369df5d4e04b4a8d822db0e72a8051150868ce6..7446d892fdc272fc3d91b38d75873270080dcbed 100644 --- a/paddle/api/Util.cpp +++ b/paddle/api/Util.cpp @@ -47,7 +47,7 @@ bool isUsingGpu() { return FLAGS_use_gpu; } void setUseGpu(bool useGpu) { FLAGS_use_gpu = useGpu; } bool isGpuVersion() { -#ifdef PADDLE_ONLY_CPU +#ifndef PADDLE_WITH_GPU return false; #else return true; diff --git a/paddle/capi/Matrix.cpp b/paddle/capi/Matrix.cpp index d898ebe2612d749ca261d35139d1cd45bd355eef..5b3737a759ce4d7d85feceda854556d765aba2d8 100644 --- a/paddle/capi/Matrix.cpp +++ b/paddle/capi/Matrix.cpp @@ -46,7 +46,7 @@ paddle_error paddle_matrix_set_row(paddle_matrix mat, if (rowID >= ptr->mat->getHeight()) return kPD_OUT_OF_RANGE; paddle::real* buf = ptr->mat->getRowBuf(rowID); size_t width = ptr->mat->getWidth(); -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU hl_memcpy(buf, rowArray, sizeof(paddle::real) * width); #else std::copy(rowArray, rowArray + width, buf); diff --git a/paddle/framework/backward.cc b/paddle/framework/backward.cc index 0ec18de5b8a0e7cebdb91c30d2b45596b02dfa51..c0188c0e55a6db04a86d80c216fd162ad5259593 100644 --- a/paddle/framework/backward.cc +++ b/paddle/framework/backward.cc @@ -141,9 +141,26 @@ static std::unique_ptr BackwardRecursive( net->ops_[op_offset]->Rename(name, dup_outputs.back()); } // collect all the offset to append `add` op for each alias - insert_position.push_back( - {dup_op.back(), OpRegistry::CreateOp("add", {{"X", {dup_outputs}}}, - {{"Out", {name}}}, {})}); + // + // one variable is shared between multiple operators. + // insert add operator one by one, then add it to output + for (size_t output_idx = 0; output_idx < dup_outputs.size() - 1; + ++output_idx) { + auto insert_add_x = dup_outputs[output_idx]; + auto insert_add_y = dup_outputs[output_idx + 1]; + auto insert_add_out = name + "@SHARED@" + std::to_string(output_idx); + // first add op inserted + if (output_idx == dup_outputs.size() - 2) { + insert_add_out = name; + } + if (output_idx != 0) { + insert_add_y = name + "@SHARED@" + std::to_string(output_idx - 1); + } + insert_position.push_back( + {dup_op.back(), + OpRegistry::CreateOp("sum", {{"X", {insert_add_x, insert_add_y}}}, + {{"Out", {insert_add_out}}}, {})}); + } } // make sure the inserted `add` ops follow the BFS order. @@ -182,7 +199,8 @@ static std::unique_ptr BackwardRecursive( // process recurrent gradient op as a special operator. if (forwardOp.Type() == "recurrent") { - // NOTE clean up cycle call somewhere (RNN's stepnet constains itself), or + // NOTE clean up cycle call somewhere (RNN's stepnet constains itself), + // or // this will result in infinite loop. const auto& rnnop = *static_cast(&forwardOp); diff --git a/paddle/framework/backward_test.cc b/paddle/framework/backward_test.cc index 6932f5b989a3e21ebc44ec4fec9f5223f2547d7a..a36e7bde8c61c6a8bfc6eefd46ce5e09907476ef 100644 --- a/paddle/framework/backward_test.cc +++ b/paddle/framework/backward_test.cc @@ -133,15 +133,18 @@ class FillZeroOpMaker : public OpProtoAndCheckerMaker { } }; -class AddOpMaker : public OpProtoAndCheckerMaker { +class SumOpMaker : public framework::OpProtoAndCheckerMaker { public: - AddOpMaker(OpProto *proto, OpAttrChecker *op_checker) + SumOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("X", "x").AsDuplicable(); - AddOutput("Out", "out"); + AddInput("X", "the input tensors of sum operator.") + .AsDuplicable() + .NotInGradient(); + AddOutput("Out", "the output tensor of sum operator.").NotInGradient(); AddComment(""); } }; + } // namespace framework } // namespace paddle @@ -154,7 +157,7 @@ REGISTER_OP(mul, f::NOP, f::MulOpMaker, mul_grad, f::NOP); REGISTER_OP(sigmoid, f::NOP, f::SigmoidOpMaker, sigmoid_grad, f::NOP); REGISTER_OP_WITHOUT_GRADIENT(nograd, f::NOP, f::NoGradOpMaker); REGISTER_OP_WITHOUT_GRADIENT(fill_zeros_like, f::NOP, f::FillZeroOpMaker); -REGISTER_OP(add, f::NOP, f::AddOpMaker, add_grad, f::NOP); +REGISTER_OP(sum, f::NOP, f::SumOpMaker, sum_grad, f::NOP); REGISTER_OP_WITHOUT_GRADIENT(fc, f::FcOp, f::FcOpMaker); REGISTER_OP(many_output_op, f::NOP, f::ManyOutputOpMaker, many_output_op_grad, f::NOP); @@ -283,7 +286,7 @@ TEST(Backward, net_shared_weight) { ASSERT_TRUE(bwd->IsNetOp()); auto bwd_net = static_cast(bwd.get()); ASSERT_EQ(3UL, bwd_net->ops_.size()); - ASSERT_EQ("add", bwd_net->ops_[2]->Type()); + ASSERT_EQ("sum", bwd_net->ops_[2]->Type()); } TEST(Backward, op_register_grad_not_for_network) { diff --git a/paddle/framework/details/op_registry.h b/paddle/framework/details/op_registry.h index d2516ccc1eec21682276c2fddf049984453404d4..daa474e8c5a223589018720da29a5c3363b5934d 100644 --- a/paddle/framework/details/op_registry.h +++ b/paddle/framework/details/op_registry.h @@ -14,6 +14,7 @@ #pragma once +#include "paddle/framework/grad_op_desc_maker.h" #include "paddle/framework/op_info.h" #include "paddle/framework/op_proto_maker.h" #include "paddle/framework/operator.h" @@ -96,7 +97,10 @@ struct OpInfoFiller { template struct OpInfoFiller { void operator()(const char* op_type, OpInfo* info) const { - info->grad_op_maker_ = new T(); + info->grad_op_maker_ = [](const OpDescBind& fwd_op) { + T maker(fwd_op); + return maker(); + }; } }; } // namespace details diff --git a/paddle/framework/grad_op_desc_maker.h b/paddle/framework/grad_op_desc_maker.h new file mode 100644 index 0000000000000000000000000000000000000000..e9ae6e22060850fe229998d3b651d08a5ca2033a --- /dev/null +++ b/paddle/framework/grad_op_desc_maker.h @@ -0,0 +1,124 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#pragma once +#include "paddle/framework/op_desc.h" +#include "paddle/framework/operator.h" + +namespace paddle { +namespace framework { + +class GradOpDescMakerBase { + public: + explicit GradOpDescMakerBase(const OpDescBind& fwd_op) : fwd_op_(fwd_op) {} + + virtual ~GradOpDescMakerBase() = default; + virtual std::vector> operator()() const = 0; + + protected: + static std::vector ToGradNames( + const std::vector& var_names) { + std::vector ret_val; + ret_val.reserve(var_names.size()); + std::transform(var_names.begin(), var_names.end(), + std::back_inserter(ret_val), GradVarName); + return ret_val; + } + + std::vector InputGrad(const std::string& name) const { + return ToGradNames(fwd_op_.Input(name)); + } + + std::vector OutputGrad(const std::string& name) const { + return ToGradNames(fwd_op_.Output(name)); + } + + std::vector InputNames() const { + return this->fwd_op_.InputNames(); + } + + std::vector OutputNames() const { + return this->fwd_op_.OutputNames(); + } + + std::vector Input(const std::string& name) const { + return fwd_op_.Input(name); + } + + std::vector Output(const std::string& name) const { + return fwd_op_.Output(name); + } + + const std::unordered_map& Attrs() const { + return fwd_op_.GetAttrMap(); + } + + const Attribute& GetAttr(const std::string& name) const { + auto& map = fwd_op_.GetAttrMap(); + auto it = map.find(name); + PADDLE_ENFORCE(it != map.end(), "Cannot find attribute %s", name); + return it->second; + } + + std::string ForwardOpType() const { return this->fwd_op_.Type(); } + + private: + const OpDescBind& fwd_op_; +}; + +class SingleGradOpDescMaker : public GradOpDescMakerBase { + public: + using GradOpDescMakerBase::GradOpDescMakerBase; + + std::vector> operator()() const { + std::vector> retv; + retv.emplace_back(this->Apply()); + return retv; + } + + protected: + virtual std::unique_ptr Apply() const = 0; +}; + +class DefaultGradOpDescMaker : public SingleGradOpDescMaker { + public: + using SingleGradOpDescMaker::SingleGradOpDescMaker; + + protected: + virtual std::unique_ptr Apply() const { + auto* grad = new OpDescBind(); + grad->SetType(this->GradOpType()); + + for (auto& input_param : this->InputNames()) { + grad->SetInput(input_param, this->Input(input_param)); + grad->SetOutput(GradVarName(input_param), this->InputGrad(input_param)); + } + + for (auto& output_param : this->OutputNames()) { + grad->SetInput(output_param, this->Output(output_param)); + grad->SetInput(GradVarName(output_param), this->OutputGrad(output_param)); + } + + grad->SetAttrMap(this->Attrs()); + + return std::unique_ptr(grad); + } + + virtual std::string GradOpType() const { + return this->ForwardOpType() + "_grad"; + } +}; + +} // namespace framework +} // namespace paddle diff --git a/paddle/framework/lod_tensor.h b/paddle/framework/lod_tensor.h index 49786a4a6635f1b39356dbf9633c4e7da443f04e..b12c95b6b7b728ca902c63ae96bfecb2f653e20b 100644 --- a/paddle/framework/lod_tensor.h +++ b/paddle/framework/lod_tensor.h @@ -15,7 +15,7 @@ #pragma once #include -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU #include #include #include @@ -29,7 +29,7 @@ namespace paddle { namespace framework { -#ifdef PADDLE_ONLY_CPU +#ifndef PADDLE_WITH_GPU template using Vector = std::vector; #else diff --git a/paddle/framework/lod_tensor.md b/paddle/framework/lod_tensor.md index 07bbdf9416c432052b3222757a61ac4bfd70fe14..d147f1c4257eec14664301edab8d1fe2f128d2b0 100644 --- a/paddle/framework/lod_tensor.md +++ b/paddle/framework/lod_tensor.md @@ -1,147 +1,175 @@ # Design Doc: LoD (Level-of-Detail) Tensor -PaddlePaddle's RNN doesn't require that all instances have the same length. To do so, we introduce an extension to Tensor, namely, LoD Tensor. +Like other deep learning systems, PaddlePaddle supports training models from sequence data. Also, like other systems, PaddlePaddle represent a mini-batch of sequences as a Tensor. What is different is that PaddlePaddle doesn't require all sequences in a mini-batch to be of the same length. Thus no need for padding zeros. -## Challenge of Variable-length Inputs +| | TensorFlow | PaddlePaddle | +|-----------------------|------------|--------------| +| RNN | Support | Support | +| recursive RNN | Support | Support | +| padding zeros | Must | No need | +| blob data type | Tensor | LoDTensor | -People usually represent a mini-batch by a Tensor. For example, a mini-batch of 10 images, each of size 32x32, is a 10x32x32 Tensor. So a transformation, T, of all images can be a matrix multiplication of the 10xOx32-dimensional tensor T and the 10x32x32 Tensor. +PaddlePaddle achieves this flexibility by passing through a new data type, *LoD Tensor*, which is a Tensor attached with segmentation index known as *LoD*, between operators. The LoD index doesn't only segment a tensor, but also recursively segments sub-sequences. This document presents the design of LoD and LoDTensor. -Another example is that each mini-batch contains 32 sentences, where each word is a D-dimensional one-hot vector. If all sentences have the same length L, we can represent this mini-batch by a 32xLxD tensor. However, in most cases, sentences have variable lengths, and we will need an index data structure to record these variable lengths. -## LoD as a Solution +## The Challenge: Variable-length Sequences -### Mini-Batch of variable-length sentences +Most deep learning systems represent a mini-batch as a Tensor. For example, a mini-batch of 10 images, each of size 32x32, is a 10x32x32 Tensor. Another example is that each mini-batch contains N sentences, where each word is a D-dimensional one-hot vector. Suppose that all sentences have the same length L, we can represent this mini-batch by a NxLxD tensor. -Let's imagine a mini-batch of 3 variable lengths sentences, containing 3, 1, and 2 words respectively. We can represent it by a (3+1+2)xD tensor plus some index information: +Both examples show that the elements of sequences are usually of the same size. In the first example, all images are 32x32, and in the second one, all words are D-dimensional vectors. It doesn't make sense to allow variable-sized images, as that would require transformations like convolution to handle variable-sized Tensors. + +The real challenge is that in most cases, sentences have variable lengths, and we will need an index data structure to segment the tensor into sequences. Also, sequences might consist of sub-sequences. + + +## A Solution: The LoD Index + +To understand our solution, it is best to look at some examples. + +### A Mini-Batch of Sentences + +Let's imagine a mini-batch of 3 variable lengths sentences composed of 3, 1, and 2 words, respectively. We can represent the mini-batch by a (3+1+2)xD tensor plus some index information: ``` - 3 3 1 2 ||| | || ``` -Each `|` represents a D-dimensional word vectors. The number 3 on top indicate 3 sentences, and numbers 3, 1, and 2 on the second level represent the number of words in each sentence. +where each `|` represents a D-dimensional word vector. The numbers, 3, 1, and 2, form a 1-level LoD. + +### Recursive Sequences + +Let check another example of a 2-level LoD Tensor. Consider a mini-batch of three articles with 3, 1, and 2 sentences, and each sentence consists of a variable number of words: + +``` +3 1 2 +3 2 4 1 2 3 +||| || |||| | || ||| +``` -### Mini-Batch of variable-length videos +### A Mini-Batch of Videos -This approach generalizes to the case where elements are not words, but higher dimensional objects, like images. Suppose that a mini-batch contains videos of the same frame size 640x480. If a mini-batch contains 3 videos of 3, 1, and 2 frames respectively. The underlying tensor is of size (3+1+2)x640x480. The index information illustrates as: +LoD tensors generalize to the case where elements are higher dimensional objects, like images. Suppose that a mini-batch contains videos of the same frame size 640x480. Here is a mini-batch of 3 videos with 3, 1, and 2 frames, respectively. ``` - 3 3 1 2 口口口 口 口口 ``` -where each `口` represents an image. +The underlying tensor is of size (3+1+2)x640x480, and each `口` represents a 640x480 image. -### Mini-Batch of fixed-size images +### A Mini-Batch of Images -Let's get back to a typical example, image classification, where each mini-batch has M fixed-sized images. The LoD Tensor representation is +In traditional cases like a mini-batch with N fixed-sized images, the LoD Tensor representation is as ``` - M 1 1 1 1 1 口口口口 ... 口 ``` -The many 1's on the second level seem duplicated. For this particular case of 2 levels and the second level always have length 1, we can ignore the LoD index. - -### Design and summarization +In this case, we don't lose any information by ignoring the many 1's in the index and simply considering this LoD Tensor as a usual Tensor: -In summary, as long as that the essential elements (words or images) have the same size, we can represent mini-batches by a LoD Tensor: +``` +口口口口 ... 口 +``` -- The underlying tensor has size LxD1xD2x..., where D1xD2... is the size of the essential elements, and -- The first dimension size L has an additonal property -- a LoD index as a nested vector: +### Model Parameters - ```c++ - typedef std::vector> LoD; - ``` +A model parameter is just a usual Tensor, which, just like the above example, is a **0-level LoD Tensor**. -- The LoD index is not necessary when there are only two levels and all elements of the second level have length 1. -## Slicing of LoD Tensor +## The LoD Tensor -Consider that we have a network with three levels of RNN: the top level one handles articles, the second level one handles sentences, and the basic level one handles words. This network requires that mini-batches represented by 3 level LoD Tensor, for example, +Let us revisit above example of the 2-level LoD Tensor ``` - 3 3 1 2 3 2 4 1 2 3 ||| || |||| | || ||| ``` -To allow each level of RNN to handle its input, we define **the slicing of a LoD Tensor is defined as getting the j-th sequence on level i, or the -slice** +It is indeed a tree, where leaves are elementary sequences identified by **branches**. + +For example, the third sentence in above example is identified by branch <0,2>, where 0 indicates the first article with length 3, and 2 indicates the third sentence in this article with length 4. + +### The LoD Index -For example, the <2,1>-slice of above slice is +We can save the LoD index in the above example ``` -2 -|| +3 1 2 +3 2 4 1 2 3 ``` -and the <1,2>-slice of above example is +in a not-full 2D matrix: +```c++ +typedef std::vector > LoD; ``` -2 -2 3 -|| ||| -``` -Let's go on slicing this slice. Its <1,1>-slice is +where + +- `LoD.size()` is the number of levels, or the maximum length of branches, +- `LoD[i][j]` is the length of the j-th segment at the i-th level. + +## The Offset Representation + +To quickly access elementary sequences, we adopt an offset representation -- instead of saving the lengths, we save the beginning and ending elements of sequences. + +In the above example, we accumulate the length of elementary sequences: ``` -1 -1 -| +3 2 4 1 2 3 ``` -### The Slicing Algorithm +into offsets -The algorithm, with over-simplified data structure, is defined as +``` +0 3 5 9 10 12 15 + = = = = = = + 3 2+3 4+5 1+9 2+10 3+12 +``` -```c++ -typedef std::vector> LoD; +so we know that the first sentence is from word 0 to word 3, and the second sentence from work 3 to word 5. -struct LoDTensor { - LoD lod_; - float* tensor_; -}; +Similarly, the lengths in the top level LoD -LoDTensor Slice(const LoDTensor& lodt, int level, int sequence); +``` +3 1 2 ``` -Let us revisit the example above +are transformed into offsets of elements/words as follows: ``` - 3 -3 1 2 -3 2 4 1 2 3 -||| || |||| | || ||| +0 9 10 15 + = = = + 3+2+4 1+9 2+3+10 ``` -Suppose that we want to retrieve the <1,2>-slice +so we can tell that the first article is from word 0 to word 9, and the second article is from word 9 to word 10. + +The complete offset representation is as follows: ``` -2 -2 3 -|| ||| +0 9 10 15 +0 3 5 9 10 12 15 + ||| || |||| | || ||| ``` -we will need to find out the starting position of this slice by summing over all leaf nodes in `LoD` to the left of the slice, i.e., 3 + 2 + 4 + 1 = 10. +## Slicing of LoD Tensors + +When we use the above 2-level LoD Tensor as the input to a nested-RNN, we need to retrieve certain sequences. Here we define the sequence identified by branch as the **-slice**. -To avoid the traversal of the LoD tree at slicing time, we can do it at the construction time -- instead of saving the lengths of the next level in the LoD tree, we can save the starting offset of the next level. For example, above LoD Tensor can be transformed into +For example, the <2>-slice of above example is ``` - 0 -0 9 10 -0 3 5 9 10 12 -||| || |||| | || ||| +10 15 +10 12 15 + || ||| ``` -We don't really need the 0 on top, so the LoD Tensor could be +and the <2,0>-slice of above slice is ``` -0 9 10 -0 3 5 9 10 12 -||| || |||| | || ||| +10 12 + || ``` diff --git a/paddle/framework/op_desc.cc b/paddle/framework/op_desc.cc index 33a064890cc62a4e3f04c79bc000951e76fd477c..852f0f1eb8ea09b336d656afea4f5b146c8d45de 100644 --- a/paddle/framework/op_desc.cc +++ b/paddle/framework/op_desc.cc @@ -31,15 +31,6 @@ const std::vector &OpDescBind::Input( return it->second; } -std::vector OpDescBind::InputNames() const { - std::vector retv; - retv.reserve(this->inputs_.size()); - for (auto &ipt : this->inputs_) { - retv.push_back(ipt.first); - } - return retv; -} - void OpDescBind::SetInput(const std::string ¶m_name, const std::vector &args) { need_update_ = true; @@ -54,15 +45,6 @@ const std::vector &OpDescBind::Output( return it->second; } -std::vector OpDescBind::OutputNames() const { - std::vector retv; - retv.reserve(this->outputs_.size()); - for (auto &ipt : this->outputs_) { - retv.push_back(ipt.first); - } - return retv; -} - void OpDescBind::SetOutput(const std::string ¶m_name, const std::vector &args) { need_update_ = true; diff --git a/paddle/framework/op_desc.h b/paddle/framework/op_desc.h index 0af416971556c812db238caeb7959a0a488c6e1b..397393f796ed11312578859495529f4aedd17e78 100644 --- a/paddle/framework/op_desc.h +++ b/paddle/framework/op_desc.h @@ -35,15 +35,11 @@ class OpDescBind { const std::vector &Input(const std::string &name) const; - std::vector InputNames() const; - void SetInput(const std::string ¶m_name, const std::vector &args); const std::vector &Output(const std::string &name) const; - std::vector OutputNames() const; - void SetOutput(const std::string ¶m_name, const std::vector &args); @@ -61,9 +57,6 @@ class OpDescBind { void SetBlockAttr(const std::string &name, BlockDescBind &block); - // Only be used in C++ - void SetAttrMap(const AttributeMap &attr_map); - Attribute GetAttr(const std::string &name) const; int GetBlockAttr(const std::string &name) const; @@ -71,7 +64,23 @@ class OpDescBind { // Only be used in C++ const AttributeMap &GetAttrMap() const; + // Only be used in C++ + void SetAttrMap(const AttributeMap &attr_map); + + std::vector InputNames() const { return MapKeys(inputs_); } + std::vector OutputNames() const { return MapKeys(outputs_); } + private: + template + static std::vector MapKeys(const MapType &map) { + std::vector ret_val; + ret_val.reserve(map.size()); + std::transform( + map.begin(), map.end(), std::back_inserter(ret_val), + [](const typename MapType::value_type &pair) { return pair.first; }); + return ret_val; + } + void Sync(); OpDesc op_desc_; diff --git a/paddle/framework/op_info.h b/paddle/framework/op_info.h index 9672e540c8dd7bc3ee7993200e606320087d9ba7..8b7882485ff82860b884d3a8bc3d5543e2426898 100644 --- a/paddle/framework/op_info.h +++ b/paddle/framework/op_info.h @@ -25,16 +25,10 @@ namespace paddle { namespace framework { -class GradOpDescMakerBase { - public: - virtual ~GradOpDescMakerBase() = default; - virtual std::vector operator()(const OpDescBind&) const = 0; -}; - struct OpInfo { OpCreator creator_; std::string grad_op_type_; - GradOpDescMakerBase* grad_op_maker_{nullptr}; + GradOpMakerFN grad_op_maker_; OpProto* proto_{nullptr}; OpAttrChecker* checker_{nullptr}; diff --git a/paddle/framework/op_registry.h b/paddle/framework/op_registry.h index 4ee2c7d27561c3855059c42e1604f353c65bfa41..aca6579f36e6f840c8a9f759cd2baa391c19fddb 100644 --- a/paddle/framework/op_registry.h +++ b/paddle/framework/op_registry.h @@ -211,7 +211,7 @@ class OpKernelRegistrar : public Registrar { // TODO(fengjiayi): The following macros // seems ugly, do we have better method? -#ifdef PADDLE_ONLY_CPU +#ifndef PADDLE_WITH_GPU #define USE_OP_KERNEL(op_type) USE_OP_DEVICE_KERNEL(op_type, CPU) #else #define USE_OP_KERNEL(op_type) \ diff --git a/paddle/framework/operator.cc b/paddle/framework/operator.cc index 1012a30b0a6b28e79b92007244bb6e0139cf39f9..21c1c6f9e688a34ddfb98f515059409568c06988 100644 --- a/paddle/framework/operator.cc +++ b/paddle/framework/operator.cc @@ -25,7 +25,7 @@ Eigen::DefaultDevice& ExecutionContext::GetEigenDevice< return *device_context_.GetEigenDevice(); } -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU template <> Eigen::GpuDevice& ExecutionContext::GetEigenDevice() const { diff --git a/paddle/framework/tensor_impl.h b/paddle/framework/tensor_impl.h index a5405f9c31543b5733f9db923c2a6f8b968cfc2d..1cde1f74b83fce2e1d90370a9dad792a43043ffd 100644 --- a/paddle/framework/tensor_impl.h +++ b/paddle/framework/tensor_impl.h @@ -65,7 +65,7 @@ inline T* Tensor::mutable_data(platform::Place place) { holder_.reset(new PlaceholderImpl( boost::get(place), size)); } else if (platform::is_gpu_place(place)) { -#ifdef PADDLE_ONLY_CPU +#ifndef PADDLE_WITH_GPU PADDLE_THROW("'GPUPlace' is not supported in CPU only device."); } #else @@ -103,7 +103,7 @@ inline void Tensor::CopyFrom(const Tensor& src, memory::Copy(boost::get(dst_place), dst_ptr, boost::get(src_place), src_ptr, size); } -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU else if (platform::is_gpu_place(src_place) && platform::is_cpu_place(dst_place)) { memory::Copy(boost::get(dst_place), dst_ptr, diff --git a/paddle/framework/tensor_test.cc b/paddle/framework/tensor_test.cc index e2ec738de35c90c6a06c9a46b062d4cce55f5eda..86c6945ab588a2bbbc5bd38c98a041e95ce17917 100644 --- a/paddle/framework/tensor_test.cc +++ b/paddle/framework/tensor_test.cc @@ -74,7 +74,7 @@ TEST(Tensor, MutableData) { EXPECT_EQ(p1, p2); } -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU { Tensor src_tensor; float* p1 = nullptr; @@ -126,7 +126,7 @@ TEST(Tensor, ShareDataWith) { ASSERT_EQ(src_tensor.data(), dst_tensor.data()); } -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU { Tensor src_tensor; Tensor dst_tensor; @@ -163,7 +163,7 @@ TEST(Tensor, Slice) { EXPECT_EQ(src_data_address + 3 * 4 * 1 * sizeof(int), slice_data_address); } -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU { Tensor src_tensor; src_tensor.mutable_data(make_ddim({6, 9}), GPUPlace()); @@ -218,7 +218,7 @@ TEST(Tensor, CopyFrom) { EXPECT_EQ(dst_ptr[i], slice_ptr[i]); } } -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU { Tensor src_tensor; Tensor gpu_tensor; diff --git a/paddle/framework/type_defs.h b/paddle/framework/type_defs.h index dec5066f1e649a27c9f47fc5f93faa1498b37de7..a5b94722136eca7ac0a22eb7ef113532330428ab 100644 --- a/paddle/framework/type_defs.h +++ b/paddle/framework/type_defs.h @@ -20,6 +20,7 @@ namespace paddle { namespace framework { class OperatorBase; +class OpDescBind; using VariableNameMap = std::map>; // The order should be as same as framework.proto @@ -34,5 +35,8 @@ using OpCreator = std::function; +using GradOpMakerFN = + std::function>(const OpDescBind&)>; + } // namespace framework } // namespace paddle diff --git a/paddle/function/BlockExpandOp.cpp b/paddle/function/BlockExpandOp.cpp index a89b6bba45843d81264819cad6ba053f28314f6b..ad78f5f5844a69597a74acba47f4fcdd8a39aa18 100644 --- a/paddle/function/BlockExpandOp.cpp +++ b/paddle/function/BlockExpandOp.cpp @@ -194,7 +194,7 @@ public: REGISTER_TYPED_FUNC(BlockExpand, CPU, BlockExpandForward); REGISTER_TYPED_FUNC(BlockExpandGrad, CPU, BlockExpandBackward); -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU REGISTER_TYPED_FUNC(BlockExpand, GPU, BlockExpandForward); REGISTER_TYPED_FUNC(BlockExpandGrad, GPU, BlockExpandBackward); #endif diff --git a/paddle/function/ContextProjectionOp.cpp b/paddle/function/ContextProjectionOp.cpp index b87750b74247bd0eb822340bc5a85d41b86ecec2..ab18c39df8a74603cc42c911d56e1d6f3fe37402 100644 --- a/paddle/function/ContextProjectionOp.cpp +++ b/paddle/function/ContextProjectionOp.cpp @@ -395,7 +395,7 @@ REGISTER_TYPED_FUNC(ContextProjectionForward, REGISTER_TYPED_FUNC(ContextProjectionBackward, CPU, ContextProjectionBackwardFunc); -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU REGISTER_TYPED_FUNC(ContextProjectionForward, GPU, ContextProjectionForwardFunc); diff --git a/paddle/function/CosSimOp.cpp b/paddle/function/CosSimOp.cpp index 7ece7b2dfedaf460741c97b5a700eb632d85cabc..4418f144d3a9c88a3c575ce43def119f9ffd82bd 100644 --- a/paddle/function/CosSimOp.cpp +++ b/paddle/function/CosSimOp.cpp @@ -233,7 +233,7 @@ private: REGISTER_TYPED_FUNC(CosSimForward, CPU, CosSimForwardFunc); REGISTER_TYPED_FUNC(CosSimBackward, CPU, CosSimBackwardFunc); -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU REGISTER_TYPED_FUNC(CosSimForward, GPU, CosSimForwardFunc); REGISTER_TYPED_FUNC(CosSimBackward, GPU, CosSimBackwardFunc); #endif diff --git a/paddle/function/CropOp.cpp b/paddle/function/CropOp.cpp index f12ee43e3d72f9ac776eaff93914228850694dd2..39504cc2c19868639568c48d13c751133c62a17f 100644 --- a/paddle/function/CropOp.cpp +++ b/paddle/function/CropOp.cpp @@ -169,7 +169,7 @@ private: REGISTER_TYPED_FUNC(Crop, CPU, CropFunc); REGISTER_TYPED_FUNC(CropGrad, CPU, CropGradFunc); -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU REGISTER_TYPED_FUNC(Crop, GPU, CropFunc); REGISTER_TYPED_FUNC(CropGrad, GPU, CropGradFunc); #endif diff --git a/paddle/function/CrossMapNormalOp.cpp b/paddle/function/CrossMapNormalOp.cpp index ef878bfbba961bdd3d5212e19fb83bb1e285e47f..1cf0918bedf28350d9f42f3294c2bf89ee1341ba 100644 --- a/paddle/function/CrossMapNormalOp.cpp +++ b/paddle/function/CrossMapNormalOp.cpp @@ -336,7 +336,7 @@ private: REGISTER_TYPED_FUNC(CrossMapNormal, CPU, CrossMapNormalFunc); REGISTER_TYPED_FUNC(CrossMapNormalGrad, CPU, CrossMapNormalGradFunc); -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU REGISTER_TYPED_FUNC(CrossMapNormal, GPU, CrossMapNormalFunc); REGISTER_TYPED_FUNC(CrossMapNormalGrad, GPU, CrossMapNormalGradFunc); #endif diff --git a/paddle/function/DepthwiseConvOp.cpp b/paddle/function/DepthwiseConvOp.cpp index 2f3112fe657cd381891dc53c7179e7520911e8c9..7656ab3d0aca8f54e7a5f9bb3d74ddba295664ad 100644 --- a/paddle/function/DepthwiseConvOp.cpp +++ b/paddle/function/DepthwiseConvOp.cpp @@ -292,7 +292,7 @@ REGISTER_TYPED_FUNC(DepthwiseConvGradInput, REGISTER_TYPED_FUNC(DepthwiseConvGradFilter, CPU, DepthwiseConvGradFilterFunction); -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU REGISTER_TYPED_FUNC(DepthwiseConv, GPU, DepthwiseConvFunction); REGISTER_TYPED_FUNC(DepthwiseConvGradInput, GPU, diff --git a/paddle/function/DepthwiseConvOpTest.cpp b/paddle/function/DepthwiseConvOpTest.cpp index d8e8c889d5c23bf9b2b5fd0b0393395883188fd8..39033ecb2b5c6916ab6f500a6f1f1fba7c0fb4e2 100644 --- a/paddle/function/DepthwiseConvOpTest.cpp +++ b/paddle/function/DepthwiseConvOpTest.cpp @@ -17,7 +17,7 @@ limitations under the License. */ namespace paddle { -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU TEST(DepthwiseConv, Forward) { DepthwiseConvolution( "GemmConv-CPU", "DepthwiseConv-GPU", forward); diff --git a/paddle/function/GemmConvOp.cpp b/paddle/function/GemmConvOp.cpp index f8cf4ebea8d724f0291b981647622b63e3d84495..68e08c1480a1c976f2abb9b0d23c5aa63bd78f71 100644 --- a/paddle/function/GemmConvOp.cpp +++ b/paddle/function/GemmConvOp.cpp @@ -340,7 +340,7 @@ public: REGISTER_TYPED_FUNC(GemmConv, CPU, GemmConvFunction); REGISTER_TYPED_FUNC(GemmConvGradInput, CPU, GemmConvGradInputFunction); REGISTER_TYPED_FUNC(GemmConvGradFilter, CPU, GemmConvGradFilterFunction); -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU REGISTER_TYPED_FUNC(GemmConv, GPU, GemmConvFunction); REGISTER_TYPED_FUNC(GemmConvGradInput, GPU, GemmConvGradInputFunction); REGISTER_TYPED_FUNC(GemmConvGradFilter, GPU, GemmConvGradFilterFunction); diff --git a/paddle/function/GemmConvOpTest.cpp b/paddle/function/GemmConvOpTest.cpp index 5283d79a5a53d979ae4e134f7e46b7ee106e9c44..bd1cf3c6a4c3f3fa67f74dd36a6100602d23d6f6 100644 --- a/paddle/function/GemmConvOpTest.cpp +++ b/paddle/function/GemmConvOpTest.cpp @@ -24,7 +24,7 @@ TEST(GemmConv, NaiveConv) { "NaiveConv-CPU", "GemmConv-CPU", forward); } -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU TEST(GemmConv, Forward) { Convolution( "GemmConv-CPU", "GemmConv-GPU", forward); diff --git a/paddle/function/Im2ColTest.cpp b/paddle/function/Im2ColTest.cpp index acc88a553abe7ac58b629aba9b850df58cee7f81..55325e94b58dd587474751051d29ad1eb2fc34e3 100644 --- a/paddle/function/Im2ColTest.cpp +++ b/paddle/function/Im2ColTest.cpp @@ -116,7 +116,7 @@ void TestIm2ColFunctor() { TEST(Im2ColFunctor, CPU) { TestIm2ColFunctor(); } -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU TEST(Im2ColFunctor, GPU) { TestIm2ColFunctor(); } diff --git a/paddle/function/MulOp.cpp b/paddle/function/MulOp.cpp index 25e41edad54bec0f76a3de4799fab14241407272..655026320c19e3e2c81421d46b28e569deadae1a 100644 --- a/paddle/function/MulOp.cpp +++ b/paddle/function/MulOp.cpp @@ -341,7 +341,7 @@ private: }; REGISTER_TYPED_FUNC(MulOp, CPU, MulFunc); -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU REGISTER_TYPED_FUNC(MulOp, GPU, MulFunc); #endif } // namespace paddle diff --git a/paddle/function/PadOp.cpp b/paddle/function/PadOp.cpp index adba7c92ece505eecc74edce6b393cf27fa10ccc..24c9bf4e72a2a04f1ba6ad5fbedc2880a663990b 100644 --- a/paddle/function/PadOp.cpp +++ b/paddle/function/PadOp.cpp @@ -207,7 +207,7 @@ private: REGISTER_TYPED_FUNC(Pad, CPU, PadFunc); REGISTER_TYPED_FUNC(PadGrad, CPU, PadGradFunc); -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU REGISTER_TYPED_FUNC(Pad, GPU, PadFunc); REGISTER_TYPED_FUNC(PadGrad, GPU, PadGradFunc); #endif diff --git a/paddle/function/RowConvOp.cpp b/paddle/function/RowConvOp.cpp index b6501e8f4db7fd33891cd80e07a6f36dd0b34532..09e702f71a5b1d72b715597f18cadf6b6f2b73a7 100644 --- a/paddle/function/RowConvOp.cpp +++ b/paddle/function/RowConvOp.cpp @@ -217,7 +217,7 @@ public: REGISTER_TYPED_FUNC(RowConv, CPU, RowConvFunc); REGISTER_TYPED_FUNC(RowConvGrad, CPU, RowConvGradFunc); -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU REGISTER_TYPED_FUNC(RowConv, GPU, RowConvFunc); REGISTER_TYPED_FUNC(RowConvGrad, GPU, RowConvGradFunc); #endif diff --git a/paddle/function/SwitchOp.cpp b/paddle/function/SwitchOp.cpp index 01e252a8dc0cd5fa1e964efa01d04cf282b3dfe7..db839b5b76ffce8880bd71e94585c4731cb7a94c 100644 --- a/paddle/function/SwitchOp.cpp +++ b/paddle/function/SwitchOp.cpp @@ -132,7 +132,7 @@ public: REGISTER_TYPED_FUNC(NCHW2NHWC, CPU, NCHW2NHWCFunc); REGISTER_TYPED_FUNC(NHWC2NCHW, CPU, NHWC2NCHWFunc); -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU REGISTER_TYPED_FUNC(NCHW2NHWC, GPU, NCHW2NHWCFunc); REGISTER_TYPED_FUNC(NHWC2NCHW, GPU, NHWC2NCHWFunc); #endif diff --git a/paddle/gserver/layers/BatchNormBaseLayer.cpp b/paddle/gserver/layers/BatchNormBaseLayer.cpp index f7a80e23e1bd49549bec57b360587adc6b423794..55f52816ab496a1d62e5e2778babab12da17a4ce 100644 --- a/paddle/gserver/layers/BatchNormBaseLayer.cpp +++ b/paddle/gserver/layers/BatchNormBaseLayer.cpp @@ -16,7 +16,7 @@ limitations under the License. */ #include "BatchNormalizationLayer.h" #include "Layer.h" #include "paddle/utils/Stat.h" -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU #include "CudnnBatchNormLayer.h" #endif diff --git a/paddle/gserver/layers/BatchNormalizationLayer.cpp b/paddle/gserver/layers/BatchNormalizationLayer.cpp index 412762d38475422be98ffeb87ffcfb028c3e035f..33cf24431d70e541854858d077b2438587ea93f5 100644 --- a/paddle/gserver/layers/BatchNormalizationLayer.cpp +++ b/paddle/gserver/layers/BatchNormalizationLayer.cpp @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/utils/Stat.h" -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU #include "hl_batch_transpose.h" #endif #include "BatchNormalizationLayer.h" @@ -90,7 +90,7 @@ void BatchNormalizationLayer::expandMat(const MatrixPtr& in, MatrixPtr& out) { size_t batchSize = in->getHeight(); CHECK_EQ(out->getHeight(), batchSize * imgPixels_); if (useGpu_) { -#ifdef PADDLE_ONLY_CPU +#ifndef PADDLE_WITH_GPU LOG(FATAL) << "paddle is compiled only for cpu"; #else batchTranspose( @@ -127,7 +127,7 @@ void BatchNormalizationLayer::shrinkMat(const MatrixPtr& in, MatrixPtr& out) { } CHECK_EQ(in->getHeight(), static_cast(batchSize * imgPixels_)); if (useGpu_) { -#ifdef PADDLE_ONLY_CPU +#ifndef PADDLE_WITH_GPU LOG(FATAL) << "paddle is compiled only for cpu"; #else batchTranspose( diff --git a/paddle/gserver/layers/PoolLayer.cpp b/paddle/gserver/layers/PoolLayer.cpp index 96d5c54accc047b685502a178de2d290f3158731..43ab4e4d4741cc938e92f948d48eee853ebe808b 100644 --- a/paddle/gserver/layers/PoolLayer.cpp +++ b/paddle/gserver/layers/PoolLayer.cpp @@ -15,7 +15,7 @@ limitations under the License. */ #include "PoolLayer.h" #include "PoolProjectionLayer.h" #include "paddle/utils/Logging.h" -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU #include "CudnnPoolLayer.h" #endif namespace paddle { @@ -53,7 +53,7 @@ Layer* PoolLayer::create(const LayerConfig& config) { const std::string& pool = config.inputs(0).pool_conf().pool_type(); if (pool == "max-projection" || pool == "avg-projection") { return new PoolProjectionLayer(config); -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU } else if (CudnnPoolLayer::typeCheck(pool)) { return new CudnnPoolLayer(config); #endif diff --git a/paddle/gserver/tests/LayerGradUtil.cpp b/paddle/gserver/tests/LayerGradUtil.cpp index a38880e14cdfcef05461dae567d198e5400c6bb1..59df057a807e5d8bc0ab13b8f8961e3374517045 100644 --- a/paddle/gserver/tests/LayerGradUtil.cpp +++ b/paddle/gserver/tests/LayerGradUtil.cpp @@ -674,7 +674,7 @@ void testLayerGradKernel(TestConfig testConf, bool useGpu, bool useWeight, float epsilon) { -#ifdef PADDLE_ONLY_CPU +#ifndef PADDLE_WITH_GPU if (useGpu) return; #endif FLAGS_use_gpu = useGpu; diff --git a/paddle/gserver/tests/test_BatchNorm.cpp b/paddle/gserver/tests/test_BatchNorm.cpp index 659eefa31bdb1f2433d03a59d5bf4782c71bdecf..c1c85f8fac43af6a7f260c10a18499e61ca7f17a 100644 --- a/paddle/gserver/tests/test_BatchNorm.cpp +++ b/paddle/gserver/tests/test_BatchNorm.cpp @@ -119,7 +119,7 @@ TEST(Layer, batchNorm) { CHECK_EQ(static_cast(convLayer->getOutputValue()->getWidth()), 576); } -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU void batchNormInference(int n, int c, int h, int w) { MatrixPtr input = std::make_shared(n, c * h * w); MatrixPtr cudnnOut = std::make_shared(n, c * h * w); diff --git a/paddle/gserver/tests/test_ConvUnify.cpp b/paddle/gserver/tests/test_ConvUnify.cpp index e7325e0cc3b7195b5fec77c878e3e087cfc643e0..16556469cbf854a8e61321efe4e93e86f5641952 100644 --- a/paddle/gserver/tests/test_ConvUnify.cpp +++ b/paddle/gserver/tests/test_ConvUnify.cpp @@ -117,7 +117,7 @@ MatrixPtr doOneConvTest(size_t imgSize, } TEST(Layer, convParaUnified) { -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU MatrixPtr input, resultCpu, resultGpu; /// TEST1 for conv /// diff --git a/paddle/gserver/tests/test_DetectionOutput.cpp b/paddle/gserver/tests/test_DetectionOutput.cpp index af43dc51fad35c834635b543b1a016f6d717de1e..1a83f48fae17410b0014cf6b258b4aac29a86242 100644 --- a/paddle/gserver/tests/test_DetectionOutput.cpp +++ b/paddle/gserver/tests/test_DetectionOutput.cpp @@ -150,7 +150,7 @@ TEST(Layer, detectionOutputLayerFwd) { useGpu, result2); -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU // GPU case 1. useGpu = true; inputLoc = Matrix::create(1, 16, false, useGpu); diff --git a/paddle/gserver/tests/test_Evaluator.cpp b/paddle/gserver/tests/test_Evaluator.cpp index 93996392d221d531f65caf465decbffdbc2d0384..42bb5705726cab26f3b5c126cdf08a75930241bf 100644 --- a/paddle/gserver/tests/test_Evaluator.cpp +++ b/paddle/gserver/tests/test_Evaluator.cpp @@ -51,7 +51,7 @@ void testEvaluator(TestConfig testConf, string testEvaluatorName, size_t batchSize, bool useGpu) { -#ifdef PADDLE_ONLY_CPU +#ifndef PADDLE_WITH_GPU if (useGpu) return; #endif FLAGS_use_gpu = useGpu; diff --git a/paddle/gserver/tests/test_KmaxSeqScore.cpp b/paddle/gserver/tests/test_KmaxSeqScore.cpp index 308abe6816428bc0f98ec32e892622fa4a23b1ae..1594de850210b631594e88bd7326b310a9c72eaf 100644 --- a/paddle/gserver/tests/test_KmaxSeqScore.cpp +++ b/paddle/gserver/tests/test_KmaxSeqScore.cpp @@ -97,7 +97,7 @@ TEST(Layer, kmaxSeqScoreLayer) { Matrix::create(subSeqStartPosition.back(), 1, false, false); std::vector mode = {false}; -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU mode.push_back(true); #endif diff --git a/paddle/gserver/tests/test_LayerGrad.cpp b/paddle/gserver/tests/test_LayerGrad.cpp index 090bde7b203652e3ffb1662b8f5b8937885d2608..e887dee5f98c6ac5af3171ce43c048d92322186c 100644 --- a/paddle/gserver/tests/test_LayerGrad.cpp +++ b/paddle/gserver/tests/test_LayerGrad.cpp @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU #include #endif #include @@ -258,7 +258,7 @@ void testProjectionConv(size_t groups, bool isDeconv) { true); } -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU TEST(Projection, conv) { /// test ConvProjection testProjectionConv(1, false); @@ -422,7 +422,7 @@ TEST(Layer, depthwiseConvLayer) { // 'depthwise_conv' is a sepecial case of 'exconv' whose // groups size equals to the input channels size. testDepthwiseConvLayer("exconv", /* useGpu= */ false); -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU testDepthwiseConvLayer("exconv", /* useGpu= */ true); #endif } @@ -480,7 +480,7 @@ void testConvLayer(const string& type, bool trans, bool useGpu) { TEST(Layer, convLayer) { testConvLayer("exconv", /* trans= */ false, /* useGpu= */ false); -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU testConvLayer("exconv", /* trans= */ false, /* useGpu= */ true); testConvLayer("cudnn_conv", /* trans= */ false, /* useGpu= */ true); #endif @@ -525,7 +525,7 @@ TEST(Layer, convTransLayer) { for (auto useGpu : {false, true}) { testConvTransLayer("exconvt", /* trans= */ false, /* useGpu= */ useGpu); } -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU testConvTransLayer("cudnn_convt", /* trans= */ false, /* useGpu= */ true); #endif } @@ -638,7 +638,7 @@ TEST(Layer, SelectiveFullyConnectedLayer) { /* trans= */ false, /* useGup= */ false, false); -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU testLayerGrad(config, "selective_fc", 100, @@ -1210,7 +1210,7 @@ void testPoolLayer(const string& poolType, bool trans, bool useGpu) { testLayerGrad(config, "pool", 100, trans, useGpu); } -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU void testPoolLayer2(const string& poolType, bool trans, bool useGpu) { TestConfig config; config.inputDefs.push_back({INPUT_DATA, "layer_0", 3200, 0}); @@ -1236,7 +1236,7 @@ TEST(Layer, PoolLayer) { testPoolLayer("avg-projection", /* trans= */ false, /* useGpu= */ false); testPoolLayer("max-projection", /* trans= */ false, /* useGpu= */ false); -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU testPoolLayer("avg-projection", /* trans= */ false, /* useGpu= */ true); testPoolLayer("max-projection", /* trans= */ false, /* useGpu= */ true); testPoolLayer("cudnn-max-pool", /* trans= */ false, /* useGpu= */ true); @@ -1309,7 +1309,7 @@ void testPool3DLayer(const string& poolType, bool trans, bool useGpu) { TEST(Layer, Pool3DLayer) { testPool3DLayer("avg", /* trans= */ false, /* useGpu= */ false); testPool3DLayer("max", /* trans= */ false, /* useGpu= */ false); -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU testPool3DLayer("avg", /* trans= */ false, /* useGpu= */ true); testPool3DLayer("max", /* trans= */ false, /* useGpu= */ true); #endif @@ -1695,7 +1695,7 @@ void testBatchNormLayer(const string& type, bool trans, bool useGpu) { TEST(Layer, BatchNormalizationLayer) { testBatchNormLayer("batch_norm", false, false); -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU testBatchNormLayer("batch_norm", false, true); if (hl_get_cudnn_lib_version() >= int(4000)) { testBatchNormLayer("cudnn_batch_norm", false, true); @@ -1744,7 +1744,7 @@ void testBatchNorm3DLayer(const string& type, bool trans, bool useGpu) { TEST(Layer, testBatchNorm3DLayer) { testBatchNorm3DLayer("batch_norm", false, false); -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU testBatchNorm3DLayer("batch_norm", false, true); if (hl_get_cudnn_lib_version() >= int(4000)) { testBatchNorm3DLayer("cudnn_batch_norm", false, true); @@ -2262,7 +2262,7 @@ void test3DConvLayer(const string& type, bool trans, bool useGpu) { TEST(Layer, test3DConvLayer) { test3DConvLayer("conv3d", /* trans= */ false, /* useGpu= */ false); -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU test3DConvLayer("conv3d", /* trans= */ false, /* useGpu= */ true); #endif } @@ -2339,7 +2339,7 @@ void test3DDeConvLayer(const string& type, bool trans, bool useGpu) { TEST(Layer, test3DDeConvLayer) { test3DDeConvLayer("deconv3d", /* trans= */ false, /* useGpu= */ false); -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU test3DDeConvLayer("deconv3d", /* trans= */ false, /* useGpu= */ true); #endif } diff --git a/paddle/gserver/tests/test_NetworkCompare.cpp b/paddle/gserver/tests/test_NetworkCompare.cpp index d36f72360f8ebd2033fb3e8c0e1b30911abba362..e322fef9a468c60a2f7dd4842207cf1cac44935e 100644 --- a/paddle/gserver/tests/test_NetworkCompare.cpp +++ b/paddle/gserver/tests/test_NetworkCompare.cpp @@ -243,7 +243,7 @@ TEST(Compare, concat_slice) { compareNetwork(config_file_a, config_file_b); } -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU TEST(Compare, img_pool) { std::string config_file_a = "./gserver/tests/img_pool_a.conf"; std::string config_file_b = "./gserver/tests/img_pool_b.conf"; diff --git a/paddle/gserver/tests/test_PriorBox.cpp b/paddle/gserver/tests/test_PriorBox.cpp index ae0e3bc3d24c54eb84c7b5f5053e629607ef4310..cbc0fff7b895d0b34b0af42e881aa68a1754eeea 100644 --- a/paddle/gserver/tests/test_PriorBox.cpp +++ b/paddle/gserver/tests/test_PriorBox.cpp @@ -151,7 +151,7 @@ TEST(Layer, priorBoxLayerFwd) { useGpu, result); -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU // reset the input parameters variance[1] = 0.1; variance[3] = 0.2; diff --git a/paddle/gserver/tests/test_ProtoDataProvider.cpp b/paddle/gserver/tests/test_ProtoDataProvider.cpp index e11bf402c27898b8fdbd3fceeb8aeff8906352db..988dbc2513d7d64f647036ed5a8b890143d0e606 100644 --- a/paddle/gserver/tests/test_ProtoDataProvider.cpp +++ b/paddle/gserver/tests/test_ProtoDataProvider.cpp @@ -485,7 +485,7 @@ TEST(ProtoDataProvider, test) { // Currently in async mode, useGpu is not supported continue; } -#ifdef PADDLE_ONLY_CPU +#ifndef PADDLE_WITH_GPU if (useGpu) { continue; } @@ -525,7 +525,7 @@ TEST(ProtoDataProvider, constant_slots) { for (int numConstantSlots : {1, 2}) { for (int useGpu : numTwoArray) { for (int dataCompression : numTwoArray) { -#ifdef PADDLE_ONLY_CPU +#ifndef PADDLE_WITH_GPU if (useGpu) { continue; } @@ -708,7 +708,7 @@ TEST(ProtoSequenceDataProvider, test) { // Currently in async mode, useGpu is not supported continue; } -#ifdef PADDLE_ONLY_CPU +#ifndef PADDLE_WITH_GPU if (useGpu) { continue; } diff --git a/paddle/gserver/tests/test_PyDataProvider.cpp b/paddle/gserver/tests/test_PyDataProvider.cpp index db883543c306c1938eb9da188ce20ed768018efb..f6522febf8cae8be03e98564758a93eca73de9d8 100644 --- a/paddle/gserver/tests/test_PyDataProvider.cpp +++ b/paddle/gserver/tests/test_PyDataProvider.cpp @@ -37,7 +37,7 @@ TEST(PyDataProvider, py_fill_slots) { config.clear_files(); std::string dataFile = "gserver/tests/pyDataProvider/pyDataProviderList"; config.set_files(dataFile); -#ifdef PADDLE_ONLY_CPU +#ifndef PADDLE_WITH_GPU bool useGpu = false; #else bool useGpu = true; @@ -71,7 +71,7 @@ TEST(PyDataProvider, py_fill_nest_slots) { std::string dataFile = "gserver/tests/pyDataProvider/pyDataProviderList"; config.set_files(dataFile); EXPECT_EQ(config.IsInitialized(), true); -#ifdef PADDLE_ONLY_CPU +#ifndef PADDLE_WITH_GPU bool useGpu = false; #else bool useGpu = true; diff --git a/paddle/gserver/tests/test_SelectiveFCLayer.cpp b/paddle/gserver/tests/test_SelectiveFCLayer.cpp index ab23d00a2cb6077147f5b89664a8e2437b4cd63b..b25d32fb2c045758abf1b65e78adb68b705ee3e9 100644 --- a/paddle/gserver/tests/test_SelectiveFCLayer.cpp +++ b/paddle/gserver/tests/test_SelectiveFCLayer.cpp @@ -321,7 +321,7 @@ TEST(Layer, SelectiveFcLayer_train_dense_mul) { "filelist=gserver/tests/SelectiveFcTest/dense_mul_list"; for (auto useGpu : {false, true}) { -#ifdef PADDLE_ONLY_CPU +#ifndef PADDLE_WITH_GPU if (useGpu) { break; } @@ -388,7 +388,7 @@ void testSelectiveFcLayerTrainSparseMul(const LayerConfig& config, outMatSelfc->getWidth(), outMatSelfc->getElementCnt())); cpuOutMatSelfc->copyFrom(*outMatSelfc, HPPL_STREAM_DEFAULT); -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU if (useGpu) { hl_stream_synchronize(HPPL_STREAM_DEFAULT); } @@ -418,7 +418,7 @@ void testSelectiveFcLayerTrainSparseMul(const LayerConfig& config, MatrixPtr cpuOutMatFc( new CpuMatrix(outMatFc->getHeight(), outMatFc->getWidth())); cpuOutMatFc->copyFrom(*outMatFc, HPPL_STREAM_DEFAULT); -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU if (useGpu) { hl_stream_synchronize(HPPL_STREAM_DEFAULT); } @@ -443,7 +443,7 @@ TEST(Layer, SelectiveFcLayer_train_sparse_mul) { selLayerConfig.set_size(fcLayerWidth); testSelectiveFcLayerTrainSparseMul(selLayerConfig, false); -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU testSelectiveFcLayerTrainSparseMul(selLayerConfig, true); #endif } diff --git a/paddle/gserver/tests/test_SeqSliceLayerGrad.cpp b/paddle/gserver/tests/test_SeqSliceLayerGrad.cpp index e1d4ae16176433b898ba88dd60550e44b4fe37be..f28149081b11d52651747f8fccc6e9f13d672d87 100644 --- a/paddle/gserver/tests/test_SeqSliceLayerGrad.cpp +++ b/paddle/gserver/tests/test_SeqSliceLayerGrad.cpp @@ -195,7 +195,7 @@ TEST(Layer, SeqSliceLayer) { vector> ends; std::vector mode = {false}; -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU mode.push_back(true); #endif genSeqInfo(seqStartPos, subSeqStartPos); diff --git a/paddle/gserver/tests/test_WarpCTCLayer.cpp b/paddle/gserver/tests/test_WarpCTCLayer.cpp index 55427e2f12fd7b77c6eea1f65b3229e6fd29d71d..ae5b64257fa06df696e0def76c67e21722f9bc97 100644 --- a/paddle/gserver/tests/test_WarpCTCLayer.cpp +++ b/paddle/gserver/tests/test_WarpCTCLayer.cpp @@ -199,7 +199,7 @@ TEST(Layer, WarpCTCLayer) { for (auto batchSize : {1, 10, 32}) { for (auto normByTimes : {false, true}) { for (auto useGpu : {false, true}) { -#ifdef PADDLE_ONLY_CPU +#ifndef PADDLE_WITH_GPU if (useGpu) continue; #endif LOG(INFO) << "layerSize=" << layerSize << " batchSize=" << batchSize diff --git a/paddle/math/Matrix.cpp b/paddle/math/Matrix.cpp index 0023b4d0f5da500f380ecb836b7c54e050b13d67..de02f9c0d57f591052f9c28ac3f9afe418fd5df4 100644 --- a/paddle/math/Matrix.cpp +++ b/paddle/math/Matrix.cpp @@ -670,7 +670,7 @@ void GpuMatrix::leftMul(Matrix& a, real scaleAB, real scaleT) { } void GpuMatrix::selectRows(Matrix& table, IVector& ids) { -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU CHECK(dynamic_cast(&table)); CHECK(table.useGpu()); CHECK(ids.useGpu()); @@ -694,7 +694,7 @@ void GpuMatrix::selectRows(Matrix& table, IVector& ids) { } void GpuMatrix::addToRows(Matrix& table, IVector& ids) { -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU CHECK(dynamic_cast(&table)); CHECK(table.useGpu()); CHECK(ids.useGpu()); @@ -741,7 +741,7 @@ void GpuMatrix::rowMax(Matrix& max) { } void GpuMatrix::rowMax(IVector& maxIds, Matrix& maxVal) { -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU CHECK(maxIds.useGpu() && maxVal.useGpu()) << "Matrix type are not equal"; size_t numSamples = getHeight(); size_t beam = maxVal.getWidth(); diff --git a/paddle/math/SparseMatrix.cpp b/paddle/math/SparseMatrix.cpp index 6370c77386688a334fa0de8b4e2b272882e9e2b0..1f31082ae8ea840ed2eeef33ac97791a1ca4e966 100644 --- a/paddle/math/SparseMatrix.cpp +++ b/paddle/math/SparseMatrix.cpp @@ -836,7 +836,7 @@ void GpuSparseMatrix::zeroMem() { } void GpuSparseMatrix::rowMax(IVector& maxIds, Matrix& maxVal) { -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU CHECK(maxIds.useGpu() && maxVal.useGpu()) << "Matrix type are not equal"; size_t numSamples = getHeight(); size_t beam = maxVal.getWidth(); diff --git a/paddle/math/Vector.cpp b/paddle/math/Vector.cpp index eb87ee9bb7936d27c0c32a1a4b35ff49871c0a10..54e57b255d3cf5e71618c15322e37e037c20210c 100644 --- a/paddle/math/Vector.cpp +++ b/paddle/math/Vector.cpp @@ -172,7 +172,7 @@ void GpuVectorT::isEqualTo(const VectorT& b, const T& value) { template void GpuVectorT::selectFrom(const VectorT& src, const VectorT& ids) { -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU hl_vector_select_from(this->getData(), this->getSize(), src.getData(), @@ -850,7 +850,7 @@ CpuGpuVectorT::CpuGpuVectorT(CpuGpuVectorT& src, size_t size) : sync_(nullptr) { CHECK_LE(offset + size, static_cast(src.getSize())); -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU SyncedFlag* flag = src.getSync(); if (*flag == DATA_AT_CPU) { src.copyToGpu(); // will set synchronous data between CPU and GPU @@ -861,7 +861,7 @@ CpuGpuVectorT::CpuGpuVectorT(CpuGpuVectorT& src, auto cMemHandle = (src.getVector(false))->getMemoryHandle(); cpuVectorT_ = std::make_shared>( size, std::dynamic_pointer_cast(cMemHandle), offset); -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU auto gMemHandle = (src.getVector(true))->getMemoryHandle(); gpuVectorT_ = std::make_shared>( size, std::dynamic_pointer_cast(gMemHandle), offset); diff --git a/paddle/math/tests/test_Allocator.cpp b/paddle/math/tests/test_Allocator.cpp index 1ca70ea84c867b83013625eaee141f5b75fad4ae..cf2f66aea153bab22220e4e5a8b580778cddd058 100644 --- a/paddle/math/tests/test_Allocator.cpp +++ b/paddle/math/tests/test_Allocator.cpp @@ -68,7 +68,7 @@ void testPoolAllocator() { TEST(Allocator, Pool) { testPoolAllocator(); -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU testPoolAllocator(); #endif } @@ -92,7 +92,7 @@ TEST(MemoryHandle, Cpu) { EXPECT_EQ(ptr1, ptr2); } -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU TEST(MemoryHandle, Gpu) { int numGpu = hl_get_device_count(); diff --git a/paddle/math/tests/test_BaseMatrix.cpp b/paddle/math/tests/test_BaseMatrix.cpp index 22ce39701fca7b650fc03794cb0701e0987d2dae..730759f3dbfd4c43938c41f2bc30f3b703b3de77 100644 --- a/paddle/math/tests/test_BaseMatrix.cpp +++ b/paddle/math/tests/test_BaseMatrix.cpp @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU /** * This test file use autotest::AutoCompare and cmpWithoutArg to compares the * implementation of CPU and GPU member function in diff --git a/paddle/math/tests/test_CpuGpuVector.cpp b/paddle/math/tests/test_CpuGpuVector.cpp index 58bc43a38ba9465a832fcd0652e6309c403577e3..ccb4a902b0baf9e279857c760f0396433d187966 100644 --- a/paddle/math/tests/test_CpuGpuVector.cpp +++ b/paddle/math/tests/test_CpuGpuVector.cpp @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU #include #include "paddle/math/Vector.h" diff --git a/paddle/math/tests/test_ExecViaCpu.cpp b/paddle/math/tests/test_ExecViaCpu.cpp index 04c856453d2ec4ad764e37ae430e3e30ac0dea0b..2d439cd0600433989bb0150ad227c8e32741cfb0 100644 --- a/paddle/math/tests/test_ExecViaCpu.cpp +++ b/paddle/math/tests/test_ExecViaCpu.cpp @@ -94,7 +94,7 @@ void testWrapper(F&& f) { } } -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU TEST(ExecViaCpu, test1) { testWrapper(f); testWrapper(&f); diff --git a/paddle/math/tests/test_GpuProfiler.cpp b/paddle/math/tests/test_GpuProfiler.cpp index e6b5dba446b5a0022ade76b188895c4e0e2a22b4..6dab187e3e0a9275c317775a15260d313e480415 100644 --- a/paddle/math/tests/test_GpuProfiler.cpp +++ b/paddle/math/tests/test_GpuProfiler.cpp @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU #include #include "paddle/math/Matrix.h" diff --git a/paddle/math/tests/test_Matrix.cpp b/paddle/math/tests/test_Matrix.cpp index 1c21da5b76e95603258a5006d0c57b00126e65b9..7a145eae6a2e7bd221b0f33a1430bfd020d86931 100644 --- a/paddle/math/tests/test_Matrix.cpp +++ b/paddle/math/tests/test_Matrix.cpp @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU /** * This test file use autotest::AutoCompare and cmpWithArg to compares the * implementation of CPU and GPU member function in Matrix.cpp. diff --git a/paddle/math/tests/test_SparseMatrix.cpp b/paddle/math/tests/test_SparseMatrix.cpp index c0572dfdbf738a4dfad04811b3a3e1b65487ff6d..8151dde106c9c824f785c0bcd5812bb84f2c75f8 100644 --- a/paddle/math/tests/test_SparseMatrix.cpp +++ b/paddle/math/tests/test_SparseMatrix.cpp @@ -47,7 +47,7 @@ struct MatrixPara { SparseFormat format; }; -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU void test_sparse_matrix_mul(MatrixPara paraA, MatrixPara paraB, MatrixPara paraC) { @@ -452,7 +452,7 @@ TEST(Matrix, SparseMatrixCSRFormatTrimFrom) { matB->trimFrom(*mat); checkSMatrixEqual2(matA, matB); -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU GpuSparseMatrixPtr matC = std::make_shared( height, trimedWidth, height, FLOAT_VALUE, SPARSE_CSR, true); matC->trimFrom(*mat); @@ -546,7 +546,7 @@ TEST(Matrix, SparseMatrixCSCFormatTrimFrom) { matB->trimFrom(*mat); checkSMatrixEqual2(matA, matB); -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU GpuSparseMatrixPtr matC = std::make_shared( height, trimedWidth, height, FLOAT_VALUE, SPARSE_CSC, true); matC->trimFrom(*mat); diff --git a/paddle/math/tests/test_Tensor.cu b/paddle/math/tests/test_Tensor.cu index 31b693afa8bd50f77a8efb67769e6215dd755bd3..d03698dee25fdd6dd49f2a3fdb5c605333440f49 100644 --- a/paddle/math/tests/test_Tensor.cu +++ b/paddle/math/tests/test_Tensor.cu @@ -270,7 +270,7 @@ TEST(Unary, BaseOp) { TestUnaryVectorT testCpuIVector( testUnaryBaseOpInt); -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU TestUnaryMatrix testGpuMatrix(testUnaryBaseOp); TestUnaryVectorT testGpuVector(testUnaryBaseOp); TestUnaryVectorT testGpuIVector( @@ -317,7 +317,7 @@ void testUnayrMathOp(Tensor& A1, Tensor& A2) { TEST(Unary, MathOp) { TestUnaryMatrix testCpu(testUnayrMathOp); -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU TestUnaryMatrix testGpu(testUnayrMathOp); #endif } @@ -374,7 +374,7 @@ void testUnayrCompareOp(Tensor& A1, Tensor& A2) { TEST(Unary, CompareOp) { TestUnaryMatrix testCpu(testUnayrCompareOp); -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU TestUnaryMatrix testGpu(testUnayrCompareOp); #endif } @@ -536,7 +536,7 @@ void testBinaryBaseOp(Tensor& A1, Tensor& A2, Tensor& B) { TEST(Binary, BaseOp) { TestBinaryMatrix testCpu(testBinaryBaseOp); -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU TestBinaryMatrix testGpu(testBinaryBaseOp); #endif } @@ -710,7 +710,7 @@ void testBinaryMathOp(Tensor& A1, Tensor& A2, Tensor& B) { TEST(Binary, MathOp) { TestBinaryMatrix testCpu(testBinaryMathOp); -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU TestBinaryMatrix testGpu(testBinaryMathOp); #endif } @@ -810,7 +810,7 @@ void testBinaryCompareOp(Tensor& A1, Tensor& A2, Tensor& B) { TEST(Binary, CompareOp) { TestBinaryMatrix testCpu(testBinaryCompareOp); -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU TestBinaryMatrix testGpu(testBinaryCompareOp); #endif } @@ -955,7 +955,7 @@ void testTernaryBaseOp(Tensor& A1, Tensor& A2, Tensor& B, Tensor& C) { TEST(Ternary, BaseOp) { TestTernaryMatrix testCpu(testTernaryBaseOp); -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU TestTernaryMatrix testGpu(testTernaryBaseOp); #endif } @@ -1058,7 +1058,7 @@ void testTernaryCompareOp(Tensor& A1, Tensor& A2, Tensor& B, Tensor& C) { TEST(Ternary, CompareOp) { TestTernaryMatrix testCpu(testTernaryCompareOp); -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU TestTernaryMatrix testGpu(testTernaryCompareOp); #endif } @@ -1086,7 +1086,7 @@ void testQuaternaryAdd( TEST(Quaternary, BaseOp) { TestQuaternaryMatrix testCpu(testQuaternaryAdd); -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU TestQuaternaryMatrix testGpu(testQuaternaryAdd); #endif } @@ -1156,7 +1156,7 @@ void testQuaternaryCompareOp( TEST(Quaternary, CompareOp) { TestQuaternaryMatrix testCpu(testQuaternaryCompareOp); -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU TestQuaternaryMatrix testGpu(testQuaternaryCompareOp); #endif } diff --git a/paddle/math/tests/test_TrainingAlgorithm.cpp b/paddle/math/tests/test_TrainingAlgorithm.cpp index 4a88844b43ef40af988d2b391d2bef4568dea9b7..36ac0240076db80dd50a46cfeef0445e355762ca 100644 --- a/paddle/math/tests/test_TrainingAlgorithm.cpp +++ b/paddle/math/tests/test_TrainingAlgorithm.cpp @@ -91,7 +91,7 @@ int VectorCheckErr(const VectorPtr& vector1, const VectorPtr& vector2) { typedef std::function testMatrixFunc; void testCase(testMatrixFunc matrixFunc) { -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU for (auto useGpu : {false, true}) { #else for (auto useGpu : {false}) { diff --git a/paddle/math/tests/test_batchTranspose.cpp b/paddle/math/tests/test_batchTranspose.cpp index 4eb9837909ffaaf0f483ab65ece7a0b29fd49319..0189e534eb2c001298ee31f9e1b22e0b5d5b4ef4 100644 --- a/paddle/math/tests/test_batchTranspose.cpp +++ b/paddle/math/tests/test_batchTranspose.cpp @@ -17,7 +17,7 @@ limitations under the License. */ using namespace paddle; // NOLINT -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU TEST(MatrixBatchTransTest, test_batch_matrix_transpose) { const int nx = 100; const int ny = 50; diff --git a/paddle/math/tests/test_lazyAssign.cu b/paddle/math/tests/test_lazyAssign.cu index 92afab4ff7f5ff4acc219c5ac783733340c5726a..04f23cff55db45c39049538545430bc5996cce5d 100644 --- a/paddle/math/tests/test_lazyAssign.cu +++ b/paddle/math/tests/test_lazyAssign.cu @@ -72,7 +72,7 @@ void testLazyAssign(int height, int width) { TEST(lazyAssign, CPU) { testMatrixCase(testLazyAssign); } -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU TEST(lazyAssign, GPU) { testMatrixCase(testLazyAssign); } #endif @@ -142,6 +142,6 @@ void testSgdUpdate(int height, int width) { TEST(sgdUpdate, CPU) { testMatrixCase(testSgdUpdate); } -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU TEST(sgdUpdate, GPU) { testMatrixCase(testSgdUpdate); } #endif diff --git a/paddle/math/tests/test_matrixCompare.cpp b/paddle/math/tests/test_matrixCompare.cpp index 061fb22e3fd744d9d9895fd1008089e4a6ce6a0f..7735877ac89af2832df199c18bf42494b375212f 100644 --- a/paddle/math/tests/test_matrixCompare.cpp +++ b/paddle/math/tests/test_matrixCompare.cpp @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU /// This unittest checks GpuMatrix/CpuMatrix get same result, so disable when /// only cpu version. diff --git a/paddle/math/tests/test_perturbation.cpp b/paddle/math/tests/test_perturbation.cpp index 60ebae015381a3901c14d0cd4c1225e54ac5726f..dff18136ae54877671e2f80f7265021870efa6ec 100644 --- a/paddle/math/tests/test_perturbation.cpp +++ b/paddle/math/tests/test_perturbation.cpp @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU #include #include diff --git a/paddle/math/tests/test_sparseMatrixCompare.cpp b/paddle/math/tests/test_sparseMatrixCompare.cpp index a9185a4b24b13ca0287b0f67375c4599e8b9ac78..e39cc0a2f64572a4594ee8f22ecf4f838c691696 100644 --- a/paddle/math/tests/test_sparseMatrixCompare.cpp +++ b/paddle/math/tests/test_sparseMatrixCompare.cpp @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU /// This unittest checks GpuSparseMatrix/CpuSparseMatrix get same result, // so disable when /// only cpu version. diff --git a/paddle/memory/detail/buddy_allocator.cc b/paddle/memory/detail/buddy_allocator.cc index bb44970109c05d239e6b92d90b2079b752fa0104..ed0c3374ff6645725c907947c43733b6a3d14eef 100644 --- a/paddle/memory/detail/buddy_allocator.cc +++ b/paddle/memory/detail/buddy_allocator.cc @@ -175,7 +175,7 @@ void* BuddyAllocator::SystemAlloc(size_t size) { } BuddyAllocator::PoolSet::iterator BuddyAllocator::RefillPool() { -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU if (system_allocator_->UseGpu()) { if ((total_used_ + total_free_) == 0) { // Compute the maximum allocation size for the first allocation. diff --git a/paddle/memory/detail/system_allocator.cc b/paddle/memory/detail/system_allocator.cc index a270bd59581520859d43cddd2fc0cfa72080f46d..64f8182b5cd9eb3e5b9fef5d0aaec700f6e3e17f 100644 --- a/paddle/memory/detail/system_allocator.cc +++ b/paddle/memory/detail/system_allocator.cc @@ -62,7 +62,7 @@ void CPUAllocator::Free(void* p, size_t size, size_t index) { bool CPUAllocator::UseGpu() const { return false; } -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU void* GPUAllocator::Alloc(size_t& index, size_t size) { // CUDA documentation doesn't explain if cudaMalloc returns nullptr diff --git a/paddle/memory/detail/system_allocator.h b/paddle/memory/detail/system_allocator.h index 82ba322e057575c460b1d51d719c9b0fa459273e..6b1f40347b983b0a73d76919e5e6e8aa91f2794e 100644 --- a/paddle/memory/detail/system_allocator.h +++ b/paddle/memory/detail/system_allocator.h @@ -40,7 +40,7 @@ class CPUAllocator : public SystemAllocator { virtual bool UseGpu() const; }; -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU class GPUAllocator : public SystemAllocator { public: virtual void* Alloc(size_t& index, size_t size); diff --git a/paddle/memory/detail/system_allocator_test.cc b/paddle/memory/detail/system_allocator_test.cc index ba44e06ddb68e92e4086a8006b868557b0c89b50..57d5443d5098c78cb7a59670a94accec56e841bf 100644 --- a/paddle/memory/detail/system_allocator_test.cc +++ b/paddle/memory/detail/system_allocator_test.cc @@ -56,7 +56,7 @@ TEST(CPUAllocator, LockMem) { TestAllocator(a, 0); } -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU TEST(GPUAllocator, Alloc) { paddle::memory::detail::GPUAllocator a; TestAllocator(a, 2048); diff --git a/paddle/memory/memcpy.cc b/paddle/memory/memcpy.cc index c96a697a7e022684688b31c05da43e52812100d8..184d0f8fa72c436fbfa2c83e18a273a83504522e 100644 --- a/paddle/memory/memcpy.cc +++ b/paddle/memory/memcpy.cc @@ -26,7 +26,7 @@ void Copy(platform::CPUPlace, void* dst, std::memcpy(dst, src, num); } -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU template <> void Copy(platform::CPUPlace dst_place, void* dst, diff --git a/paddle/memory/memcpy.h b/paddle/memory/memcpy.h index 2b9c0eada6e8406fc81baec7f331a8dd5b8b0ec1..7142831d434a3e786620caea47b8715997288b9d 100644 --- a/paddle/memory/memcpy.h +++ b/paddle/memory/memcpy.h @@ -33,7 +33,7 @@ namespace memory { template void Copy(DstPlace, void* dst, SrcPlace, const void* src, size_t num); -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU /** * \brief Copy memory from one place to another place. diff --git a/paddle/memory/memory.cc b/paddle/memory/memory.cc index 29bc26f9d3bca0e30896657431f9a9bb1dac0d1d..6d5a74dafe642f2959928c77b2bbd1239b721847 100644 --- a/paddle/memory/memory.cc +++ b/paddle/memory/memory.cc @@ -62,7 +62,7 @@ size_t Used(platform::CPUPlace place) { return GetCPUBuddyAllocator()->Used(); } -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU BuddyAllocator* GetGPUBuddyAllocator(int gpu_id) { using BuddyAllocVec = std::vector; diff --git a/paddle/memory/memory_test.cc b/paddle/memory/memory_test.cc index 53cc63a098d0802479e3a371717adb7596c249ed..7a617f04dc5447d4f1770b500a92e88c58355a6b 100644 --- a/paddle/memory/memory_test.cc +++ b/paddle/memory/memory_test.cc @@ -80,7 +80,7 @@ TEST(BuddyAllocator, CPUMultAlloc) { } } -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU size_t align(size_t size, paddle::platform::GPUPlace place) { size += sizeof(paddle::memory::detail::Metadata); diff --git a/paddle/operators/cond_op.cc b/paddle/operators/cond_op.cc index db20b69f3fab1a5a5bfcd445536437dc4b428559..2737104a205cbc1e18ce4a3a45592a416d38a874 100644 --- a/paddle/operators/cond_op.cc +++ b/paddle/operators/cond_op.cc @@ -126,8 +126,7 @@ void CondOp::PrepareDataForSubnet( dim[0] = index_tensors[i].dims()[0]; tensor_child->mutable_data(dim, platform::CPUPlace()); - Gather(dev_ctx.GetPlace(), tensor_parent, &index_tensors[i], - tensor_child); + CPUGather(dev_ctx, *tensor_parent, index_tensors[i], tensor_child); } } @@ -188,7 +187,7 @@ void CondOp::MergeDataFromSubnet(const framework::Scope& scope, Variable* var_child = sub_scopes[i]->FindVar(output); PADDLE_ENFORCE_NOT_NULL(var_child); auto* tensor_child = &var_child->Get(); - ScatterUpdate(dev_ctx.GetPlace(), tensor_child, &index_tensors[i], + ScatterAssign(dev_ctx, *tensor_child, index_tensors[i], tensor_parent); } } diff --git a/paddle/operators/detail/strided_memcpy.h b/paddle/operators/detail/strided_memcpy.h index b165224b37fb091c094a823179256c3dd40a37c9..9f05a2632266a11f23d34162ee6fdc2260ec7cc3 100644 --- a/paddle/operators/detail/strided_memcpy.h +++ b/paddle/operators/detail/strided_memcpy.h @@ -34,7 +34,7 @@ struct StridedMemcpyFunctor { auto& cpu_place = boost::get(place); memory::Copy(cpu_place, dst, cpu_place, src, sizeof(T) * dst_dim.head); } else { -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU auto& gpu_place = boost::get(place); auto& cuda_ctx = reinterpret_cast(dev_ctx); diff --git a/paddle/operators/gather.cu.h b/paddle/operators/gather.cu.h new file mode 100644 index 0000000000000000000000000000000000000000..8d04ecd284226c7b4c6cdd5531915fee2d94ce61 --- /dev/null +++ b/paddle/operators/gather.cu.h @@ -0,0 +1,79 @@ +/* Copyright (c) 2016 PaddlePaddle Authors All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#pragma once +#include "paddle/framework/tensor.h" +#include "paddle/platform/place.h" + +namespace paddle { +namespace operators { + +using framework::Tensor; +using platform::Place; + +#define CUDA_1D_KERNEL_LOOP(i, n) \ + for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \ + i += blockDim.x * gridDim.x) + +template +__global__ void GatherCUDAKernel(const T* params, const int* indices, T* output, + size_t index_size, size_t slice_size) { + CUDA_1D_KERNEL_LOOP(i, index_size * slice_size) { + int indices_i = i / slice_size; + int slice_i = i - indices_i * slice_size; // offset inside the slice + int gather_i = indices[indices_i]; + int params_i = gather_i * slice_size + slice_i; + *(output + i) = *(params + params_i); + } +} + +/** + * A thin wrapper on gpu tensor + * Return a new tensor from source tensor, gathered according to index + * input[src]: type-T source Tensor + * input[index]: type-int index Tensor (1-D) + * return: output tensor + */ +template +void GPUGather(const platform::DeviceContext& ctx, const Tensor& src, + const Tensor& index, Tensor* output) { + // PADDLE_ENFORCE(platform::is_gpu_place(place)); + // check index of shape 1-D + PADDLE_ENFORCE(index.dims().size() == 1); + int index_size = index.dims()[0]; + + auto src_dims = src.dims(); + framework::DDim output_dims(src_dims); + output_dims[0] = index_size; + + // slice size + int slice_size = 1; + for (int i = 1; i < src_dims.size(); ++i) slice_size *= src_dims[i]; + + const T* p_src = src.data(); + const int* p_index = index.data(); + T* p_output = output->data(); + + int block = 512; + int n = slice_size * index_size; + int grid = (n + block - 1) / block; + + GatherCUDAKernel<<< + grid, block, 0, + reinterpret_cast(ctx).stream()>>>( + p_src, p_index, p_output, index_size, slice_size); +} + +} // namespace operators +} // namespace paddle diff --git a/paddle/operators/gather.h b/paddle/operators/gather.h index 92fb51ec17709bc6f8abb2f516a9240fb5dc3a77..052db49cb3c2594eca8b9a5e3716689480089703 100644 --- a/paddle/operators/gather.h +++ b/paddle/operators/gather.h @@ -24,49 +24,40 @@ limitations under the License. */ namespace paddle { namespace operators { -// Implementation of CPU copy -template -void CPUGather(const T* src, const int* indices, const int slice_size, - const int index_size, T* output) { - const size_t slice_bytes = slice_size * sizeof(T); - - for (int i = 0; i < index_size; ++i) { - int index_ = indices[i]; - memcpy(output + i * slice_size, src + index_ * slice_size, slice_bytes); - } -} - -// Implementation of GPU copy: -template -void GPUGather(const T* src, const int* index, const int slice_size, - const int index_size, T* output); +using framework::Tensor; /** + * A thin wrapper for gathering on cpu tensor * Return a new tensor from source tensor, gathered according to index * input[src]: type-T source Tensor * input[index]: type-int index Tensor (1-D) * return: output tensor */ template -void Gather(const platform::Place& place, const paddle::framework::Tensor* src, - const paddle::framework::Tensor* index, - paddle::framework::Tensor* output) { +void CPUGather(const platform::DeviceContext& ctx, const Tensor& src, + const Tensor& index, Tensor* output) { + PADDLE_ENFORCE(platform::is_cpu_place(ctx.GetPlace())); // check index of shape 1-D - PADDLE_ENFORCE(index->dims().size() == 1); - int index_size = index->dims()[0]; + PADDLE_ENFORCE(index.dims().size() == 1); + int index_size = index.dims()[0]; - auto src_dims = src->dims(); + auto src_dims = src.dims(); framework::DDim output_dims(src_dims); output_dims[0] = index_size; + const T* p_src = src.data(); + const int* p_index = index.data(); + T* p_output = output->data(); + // slice size int slice_size = 1; for (int i = 1; i < src_dims.size(); ++i) slice_size *= src_dims[i]; - // Gathering - if (platform::is_cpu_place(place)) { - CPUGather(src->data(), index->data(), slice_size, index_size, - output->data()); + const size_t slice_bytes = slice_size * sizeof(T); + + for (int i = 0; i < index_size; ++i) { + int index_ = p_index[i]; + memcpy(p_output + i * slice_size, p_src + index_ * slice_size, slice_bytes); } } diff --git a/paddle/operators/gather_op.cc b/paddle/operators/gather_op.cc index da22bd0c52c27d7decd10e2e2b34fa38d0620da8..fe305337cbebd7c679ae1b8ee8aa2740472ee109 100644 --- a/paddle/operators/gather_op.cc +++ b/paddle/operators/gather_op.cc @@ -31,6 +31,8 @@ class GatherOp : public framework::OperatorWithKernel { PADDLE_ENFORCE(ctx->HasOutput("Out"), "Output(Out) of GatherOp should not be null."); + auto index_dims = ctx->GetInputDim("Index"); + PADDLE_ENFORCE(index_dims.size() == 1); int batch_size = ctx->GetInputDim("Index")[0]; PADDLE_ENFORCE_GE(batch_size, 0, "Batch size must be >0"); framework::DDim output_dims(ctx->GetInputDim("X")); @@ -79,8 +81,5 @@ Out = X[Index] namespace ops = paddle::operators; REGISTER_OP(gather, ops::GatherOp, ops::GatherOpMaker, gather_grad, ops::GatherGradOp); -REGISTER_OP_CPU_KERNEL(gather, - ops::GatherOpKernel); -REGISTER_OP_CPU_KERNEL( - gather_grad, - ops::GatherGradientOpKernel); +REGISTER_OP_CPU_KERNEL(gather, ops::GatherOpKernel); +REGISTER_OP_CPU_KERNEL(gather_grad, ops::GatherGradientOpKernel); diff --git a/paddle/operators/gather_op.cu b/paddle/operators/gather_op.cu new file mode 100644 index 0000000000000000000000000000000000000000..92219d6a433e6db0bb9886ed8670cbafaa843ff8 --- /dev/null +++ b/paddle/operators/gather_op.cu @@ -0,0 +1,64 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#include "gather.cu.h" +#include "paddle/framework/eigen.h" +#include "paddle/operators/gather_op.h" +#include "scatter.cu.h" + +namespace paddle { +namespace operators { + +template +class GatherOpCUDAKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext &ctx) const override { + PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()), + "This kernel only runs on GPU device."); + auto *x = ctx.Input("X"); + auto *index = ctx.Input("Index"); + auto *output = ctx.Output("Out"); + + output->mutable_data(ctx.GetPlace()); + + GPUGather(ctx.device_context(), *x, *index, output); + } +}; + +template +class GatherGradOpCUDAKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext &ctx) const override { + PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()), + "This kernel only runs on GPU device."); + auto *Index = ctx.Input("Index"); + auto *dX = ctx.Output(framework::GradVarName("X")); + auto *dO = ctx.Input(framework::GradVarName("Out")); + auto *x = ctx.Input("X"); + + dX->mutable_data(ctx.GetPlace()); + auto dxt = framework::EigenVector::Flatten(*dX); + auto place = ctx.GetEigenDevice(); + dxt.device(place) = dxt.constant(static_cast(0)); + + GPUScatterAssign(ctx.device_context(), *dO, *Index, dX); + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OP_GPU_KERNEL(gather, ops::GatherOpCUDAKernel); +REGISTER_OP_GPU_KERNEL(gather_grad, ops::GatherGradOpCUDAKernel); diff --git a/paddle/operators/gather_op.h b/paddle/operators/gather_op.h index 073e566e8f6962d62cc1b738672843421dcb4ee5..8276ed0d3d8b676aafab45fae70942e78b72b8e6 100644 --- a/paddle/operators/gather_op.h +++ b/paddle/operators/gather_op.h @@ -23,29 +23,40 @@ namespace operators { using Tensor = framework::Tensor; -template +template class GatherOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext &ctx) const override { - auto *X = ctx.Input("X"); - auto *Index = ctx.Input("Index"); - auto *Y = ctx.Output("Out"); + PADDLE_ENFORCE(platform::is_cpu_place(ctx.GetPlace()), + "This kernel only runs on CPU."); + + auto *x = ctx.Input("X"); + auto *index = ctx.Input("Index"); + auto *output = ctx.Output("Out"); + + output->mutable_data(ctx.GetPlace()); - Y->mutable_data(ctx.GetPlace()); - Gather(ctx.GetPlace(), X, Index, Y); + CPUGather(ctx.device_context(), *x, *index, output); } }; -template +template class GatherGradientOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext &ctx) const override { + PADDLE_ENFORCE(platform::is_cpu_place(ctx.GetPlace()), + "This kernel only runs on CPU."); + auto *Index = ctx.Input("Index"); auto *dX = ctx.Output(framework::GradVarName("X")); auto *dO = ctx.Input(framework::GradVarName("Out")); dX->mutable_data(ctx.GetPlace()); - ScatterUpdate(ctx.GetPlace(), dO, Index, dX); + auto dxt = framework::EigenVector::Flatten(*dX); + auto place = ctx.GetEigenDevice(); + dxt.device(place) = dxt.constant(static_cast(0)); + + ScatterAssign(ctx.device_context(), *dO, *Index, dX); } }; diff --git a/paddle/operators/gather_test.cc b/paddle/operators/gather_test.cc index 0ae1e99452973feb6d085dd6ef51e2afca988f59..cbd86b87961ee24aa889e208de5ac38e03a33135 100644 --- a/paddle/operators/gather_test.cc +++ b/paddle/operators/gather_test.cc @@ -41,7 +41,9 @@ TEST(Gather, GatherData) { int* p_output = output->mutable_data(make_ddim({2, 4}), CPUPlace()); - Gather(CPUPlace(), src, index, output); + auto* cpu_place = new paddle::platform::CPUPlace(); + paddle::platform::CPUDeviceContext ctx(*cpu_place); + CPUGather(ctx, *src, *index, output); for (int i = 0; i < 4; ++i) EXPECT_EQ(p_output[i], i + 4); for (int i = 4; i < 8; ++i) EXPECT_EQ(p_output[i], i - 4); diff --git a/paddle/operators/math/im2col_test.cc b/paddle/operators/math/im2col_test.cc index f0b8c885918afe7f80edc465c6d9be7c11ac066f..3d040ca2b546ff66a4e8c35c3871fb6b1e92b210 100644 --- a/paddle/operators/math/im2col_test.cc +++ b/paddle/operators/math/im2col_test.cc @@ -71,7 +71,7 @@ void testIm2col() { context = new paddle::platform::CPUDeviceContext(paddle::platform::CPUPlace()); } else { -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU context = new paddle::platform::CUDADeviceContext(paddle::platform::GPUPlace()); #else @@ -116,7 +116,7 @@ void testIm2col() { TEST(math, im2col) { testIm2col(); -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU testIm2col(); #endif } diff --git a/paddle/operators/math/math_function_test.cc b/paddle/operators/math/math_function_test.cc index 22468a0c4a4b0aca343fe766c8c9d63393a338eb..2252268620ffc68bb1604596379a68416083918f 100644 --- a/paddle/operators/math/math_function_test.cc +++ b/paddle/operators/math/math_function_test.cc @@ -1,7 +1,7 @@ #include "paddle/operators/math/math_function.h" #include "gtest/gtest.h" -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU TEST(math_function, notrans_mul_trans) { paddle::framework::Tensor input1; paddle::framework::Tensor input1_gpu; diff --git a/paddle/operators/scatter.cu.h b/paddle/operators/scatter.cu.h new file mode 100644 index 0000000000000000000000000000000000000000..d95436be4f25b9df4aaef57ddb249ecf944f0666 --- /dev/null +++ b/paddle/operators/scatter.cu.h @@ -0,0 +1,80 @@ +/* Copyright (c) 2016 PaddlePaddle Authors All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#pragma once +#include "paddle/framework/tensor.h" +#include "paddle/platform/place.h" + +namespace paddle { +namespace operators { + +using Tensor = framework::Tensor; + +#define CUDA_1D_KERNEL_LOOP(i, n) \ + for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \ + i += blockDim.x * gridDim.x) + +template +__global__ void ScatterCUDAKernel(const T* params, const int* indices, + T* output, size_t index_size, + size_t slice_size) { + CUDA_1D_KERNEL_LOOP(i, index_size * slice_size) { + int indices_i = i / slice_size; + int slice_i = i - indices_i * slice_size; // offset inside the slice + int scatter_i = indices[indices_i]; + int out_i = scatter_i * slice_size + slice_i; + *(output + out_i) = *(params + i); + } +} + +/** + * A thin wrapper on gpu tensor + * Return a new updated tensor from source tensor, scatter-assigned according to + * index + * input[src]: type-T source Tensor + * input[index]: type-int index Tensor (1-D) + * return: output tensor + */ +template +void GPUScatterAssign(const platform::DeviceContext& ctx, const Tensor& src, + const Tensor& index, Tensor* output) { + // PADDLE_ENFORCE(platform::is_gpu_place(place)); + // check index of shape 1-D + PADDLE_ENFORCE(index.dims().size() == 1); + int index_size = index.dims()[0]; + + auto src_dims = src.dims(); + framework::DDim output_dims(src_dims); + output_dims[0] = index_size; + + // slice size + int slice_size = 1; + for (int i = 1; i < src_dims.size(); ++i) slice_size *= src_dims[i]; + + const T* p_src = src.data(); + const int* p_index = index.data(); + T* p_output = output->data(); + + int block = 512; + int n = slice_size * index_size; + int grid = (n + block - 1) / block; + + ScatterCUDAKernel<<< + grid, block, 0, + reinterpret_cast(ctx).stream()>>>( + p_src, p_index, p_output, index_size, slice_size); +} + +} // namespace operators +} // namespace paddle diff --git a/paddle/operators/scatter.h b/paddle/operators/scatter.h index fec1046a9748637ac41aa35143a8c2cf5528913f..c1fb844ebd2ff7ca7dbdb8e8ac3c1fff4c0c6607 100644 --- a/paddle/operators/scatter.h +++ b/paddle/operators/scatter.h @@ -24,63 +24,42 @@ namespace paddle { namespace operators { using Tensor = framework::Tensor; -template -using EigenVector = framework::EigenVector; - -// Implementation of CPU copy -template -void CPUScatterUpdate(const paddle::framework::Tensor* src, const int* index, - const size_t index_size, - paddle::framework::Tensor* output) { - paddle::framework::DDim output_dims = output->dims(); - - for (size_t i = 0; i < index_size; ++i) { - int index_ = index[i]; - - paddle::framework::Tensor src_ = *src; - paddle::framework::Tensor output_ = *output; - if (index_size > 1) src_ = src->Slice(i, i + 1); - if (output_dims[0] > 1) output_ = output->Slice(index_, index_ + 1); - - auto X = EigenVector::Flatten(src_); - auto Y = EigenVector::Flatten(output_); - - Y = X + Y; - } -} - -// Implementation of GPU scatter: -template -void GPUScatterUpdate(const T* src, const int* index, const int slice_size, - const int index_size, T* output); /** * Return a updated tensor from source tensor, scattered according to index: - * dst[i] += src[index[i]] + * dst[i] = src[index[i]] * input[src]: type-T source Tensor * input[index]: type-int index Tensor (1-D) * return: output tensor */ template -void ScatterUpdate(const platform::Place& place, - const paddle::framework::Tensor* src, - const paddle::framework::Tensor* index, - paddle::framework::Tensor* output) { +void ScatterAssign(const platform::DeviceContext& ctx, const Tensor& src, + const Tensor& index, Tensor* output) { + PADDLE_ENFORCE(platform::is_cpu_place(ctx.GetPlace())); // check index of shape 1-D - PADDLE_ENFORCE(index->dims().size() == 1); - int index_size = index->dims()[0]; + PADDLE_ENFORCE(index.dims().size() == 1); + int index_size = index.dims()[0]; - auto src_dims = src->dims(); + auto src_dims = src.dims(); auto dst_dims = output->dims(); + const T* p_src = src.data(); + const int* p_index = index.data(); + T* p_output = output->data(); + // check src shape and dst shape should match for (int i = 1; i < src_dims.size(); i++) PADDLE_ENFORCE(src_dims[i] == dst_dims[i]); - if (platform::is_cpu_place(place)) { - CPUScatterUpdate(src, index->data(), index_size, output); - } else { + // slice size + size_t slice_size = 1; + for (int i = 1; i < src_dims.size(); ++i) slice_size *= src_dims[i]; + + const size_t slice_bytes = slice_size * sizeof(T); + + for (int i = 0; i < index_size; ++i) { + int index_ = p_index[i]; + memcpy(p_output + index_ * slice_size, p_src + i * slice_size, slice_bytes); } } diff --git a/paddle/operators/scatter_op.cc b/paddle/operators/scatter_op.cc index cadd8841b6ab3a3674054240265eb6d4b474db1e..d15ba151539987c133ac57e102df53551483c6dd 100644 --- a/paddle/operators/scatter_op.cc +++ b/paddle/operators/scatter_op.cc @@ -97,8 +97,5 @@ Out[Index] = Ref[Index] + Updates namespace ops = paddle::operators; REGISTER_OP(scatter, ops::ScatterOp, ops::ScatterOpMaker, scatter_grad, ops::ScatterGradOp); -REGISTER_OP_CPU_KERNEL(scatter, - ops::ScatterOpKernel); -REGISTER_OP_CPU_KERNEL( - scatter_grad, - ops::ScatterGradientOpKernel); +REGISTER_OP_CPU_KERNEL(scatter, ops::ScatterOpKernel); +REGISTER_OP_CPU_KERNEL(scatter_grad, ops::ScatterGradientOpKernel); diff --git a/paddle/operators/scatter_op.cu b/paddle/operators/scatter_op.cu new file mode 100644 index 0000000000000000000000000000000000000000..06f4d759447b6dcd28b50576dfc246fc466d9336 --- /dev/null +++ b/paddle/operators/scatter_op.cu @@ -0,0 +1,63 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#include "gather.cu.h" +#include "paddle/operators/gather_op.h" +#include "scatter.cu.h" + +namespace paddle { +namespace operators { + +template +class ScatterOpCUDAKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext &ctx) const override { + PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()), + "This kernel only runs on GPU device."); + auto *Ref = ctx.Input("Ref"); + auto *Index = ctx.Input("Index"); + auto *Updates = ctx.Input("Updates"); + auto *Out = ctx.Output("Out"); + + Out->ShareDataWith(*Ref); + + GPUScatterAssign(ctx.device_context(), *Updates, *Index, Out); + } +}; + +template +class ScatterGradOpCUDAKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext &ctx) const override { + PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()), + "This kernel only runs on GPU device."); + auto *dRef = ctx.Output(framework::GradVarName("Ref")); + auto *dUpdates = ctx.Output(framework::GradVarName("Updates")); + auto *Index = ctx.Input("Index"); + auto *dOut = ctx.Input(framework::GradVarName("Out")); + + // In place gradient: dRef = dO + dRef->ShareDataWith(*dOut); + dUpdates->mutable_data(ctx.GetPlace()); + // Gradient by Gather: dUpdates = dO[Index] + GPUGather(ctx.device_context(), *dOut, *Index, dUpdates); + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OP_GPU_KERNEL(scatter, ops::ScatterOpCUDAKernel); +REGISTER_OP_GPU_KERNEL(scatter_grad, ops::ScatterGradOpCUDAKernel); diff --git a/paddle/operators/scatter_op.h b/paddle/operators/scatter_op.h index a8eb54399a932913de208e1ddc90a6ff0dfaa452..6101219006414e4865f676e3ca5d2a88949ad17a 100644 --- a/paddle/operators/scatter_op.h +++ b/paddle/operators/scatter_op.h @@ -23,10 +23,12 @@ namespace operators { using Tensor = framework::Tensor; -template +template class ScatterOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext &ctx) const override { + PADDLE_ENFORCE(platform::is_cpu_place(ctx.GetPlace()), + "This kernel only runs on CPU."); auto *Ref = ctx.Input("Ref"); auto *Index = ctx.Input("Index"); auto *Updates = ctx.Input("Updates"); @@ -35,14 +37,16 @@ class ScatterOpKernel : public framework::OpKernel { // In place output: Out = Ref, Out[Index] += Updates Out->ShareDataWith(*Ref); // Apply ScatterUpdate: Out[index] += Updates[:] - ScatterUpdate(ctx.GetPlace(), Updates, Index, Out); + ScatterAssign(ctx.device_context(), *Updates, *Index, Out); } }; -template +template class ScatterGradientOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext &ctx) const override { + PADDLE_ENFORCE(platform::is_cpu_place(ctx.GetPlace()), + "This kernel only runs on CPU."); auto *dRef = ctx.Output(framework::GradVarName("Ref")); auto *dUpdates = ctx.Output(framework::GradVarName("Updates")); auto *Index = ctx.Input("Index"); @@ -52,7 +56,7 @@ class ScatterGradientOpKernel : public framework::OpKernel { dRef->ShareDataWith(*dOut); dUpdates->mutable_data(ctx.GetPlace()); // Gradient by Gather: dUpdates += dO[Index] - Gather(ctx.GetPlace(), dOut, Index, dUpdates); + CPUGather(ctx.device_context(), *dOut, *Index, dUpdates); } }; diff --git a/paddle/operators/scatter_test.cc b/paddle/operators/scatter_test.cc index 26fdaff1460a297fa638181641991f732533fe52..00dbdacbfef7af826790472acc6caa285c259e0e 100644 --- a/paddle/operators/scatter_test.cc +++ b/paddle/operators/scatter_test.cc @@ -40,7 +40,9 @@ TEST(scatter, ScatterUpdate) { float* p_output = output->mutable_data(make_ddim({4, 4}), CPUPlace()); - ScatterUpdate(CPUPlace(), src, index, output); + auto* cpu_place = new paddle::platform::CPUPlace(); + paddle::platform::CPUDeviceContext ctx(*cpu_place); + ScatterAssign(ctx, *src, *index, output); for (size_t i = 0; i < 4; ++i) EXPECT_EQ(p_output[i], float(0)); for (size_t i = 0; i < 4; ++i) EXPECT_EQ(output->data()[i], float(0)); diff --git a/paddle/operators/sgd_op.cc b/paddle/operators/sgd_op.cc index 8f9eae4186ad848fcecd74b4ab22711f8bb99e2a..31d491f130e36289f7dd3dc18710d35d5ba6ab34 100644 --- a/paddle/operators/sgd_op.cc +++ b/paddle/operators/sgd_op.cc @@ -23,19 +23,22 @@ class SGDOp : public framework::OperatorWithKernel { protected: void InferShape(framework::InferShapeContextBase *ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("param"), - "Input(param) of SGDOp should not be null."); - PADDLE_ENFORCE(ctx->HasInput("grad"), - "Input(grad) of SGDOp should not be null."); - PADDLE_ENFORCE(ctx->HasInput("learning_rate"), - "Input(learning_rate) of SGDOp should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("param_out"), - "Output(param_out) of SGDOp should not be null."); + PADDLE_ENFORCE(ctx->HasInput("Param"), + "Input(Param) of SGDOp should not be null."); + PADDLE_ENFORCE(ctx->HasInput("Grad"), + "Input(Grad) of SGDOp should not be null."); + PADDLE_ENFORCE(ctx->HasInput("LearningRate"), + "Input(LearningRate) of SGDOp should not be null."); + PADDLE_ENFORCE(ctx->HasOutput("ParamOut"), + "Output(ParamOut) of SGDOp should not be null."); - auto param_dim = ctx->GetInputDim("param"); - PADDLE_ENFORCE_EQ(param_dim, ctx->GetInputDim("grad"), + auto lr_dims = ctx->GetInputDim("LearningRate"); + PADDLE_ENFORCE_EQ(framework::product(lr_dims), 1, + "Learning rate should have 1 element"); + auto param_dim = ctx->GetInputDim("Param"); + PADDLE_ENFORCE_EQ(param_dim, ctx->GetInputDim("Grad"), "Two input of SGD Op's dimension must be same."); - ctx->SetOutputDim("param_out", param_dim); + ctx->SetOutputDim("ParamOut", param_dim); } }; @@ -43,10 +46,10 @@ class SGDOpMaker : public framework::OpProtoAndCheckerMaker { public: SGDOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("param", "input parameter"); - AddInput("learning_rate", "learning rate of sgd"); - AddInput("grad", "input gradient"); - AddOutput("param_out", "output parameter"); + AddInput("Param", "Input parameter"); + AddInput("LearningRate", "Learning rate of SGD"); + AddInput("Grad", "Input gradient"); + AddOutput("ParamOut", "output parameter"); AddComment(R"DOC( Simplest sgd algorithm. diff --git a/paddle/operators/sgd_op.h b/paddle/operators/sgd_op.h index 977d201ced31c498c2ab41cf6d412756cabb3aee..d72d333a9a03df17b34356090ca420cbe75fa58f 100644 --- a/paddle/operators/sgd_op.h +++ b/paddle/operators/sgd_op.h @@ -28,10 +28,10 @@ template class SGDOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { - auto param = ctx.Input("param"); - auto grad = ctx.Input("grad"); - auto param_out = ctx.Output("param_out"); - float lr = *ctx.Input("learning_rate"); + auto param = ctx.Input("Param"); + auto grad = ctx.Input("Grad"); + auto param_out = ctx.Output("ParamOut"); + float lr = ctx.Input("LearningRate")->data()[0]; param_out->mutable_data(ctx.GetPlace()); diff --git a/paddle/operators/strided_memcpy_test.cc b/paddle/operators/strided_memcpy_test.cc index 05882a88738cfc9cc23480efe0afe504008377ca..e0dd7b19f14f04cb1ef6f3e9b06fcb20d6aebc6b 100644 --- a/paddle/operators/strided_memcpy_test.cc +++ b/paddle/operators/strided_memcpy_test.cc @@ -72,7 +72,7 @@ TEST(StridedMemcpy, CPUConcat) { } } -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU TEST(StridedMemcpy, GPUCrop) { // clang-format off int src[] = { diff --git a/paddle/platform/device_context.cc b/paddle/platform/device_context.cc index 36af1ac677f6bb3e5b6392ff0de678afe7e47950..8dcc357a16a057416b7e86a9de8ee5fee8cfcfc3 100644 --- a/paddle/platform/device_context.cc +++ b/paddle/platform/device_context.cc @@ -35,7 +35,7 @@ Eigen::DefaultDevice* CPUDeviceContext::eigen_device() const { Place CPUDeviceContext::GetPlace() const { return CPUPlace(); } -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU template <> Eigen::GpuDevice* diff --git a/paddle/platform/device_context.h b/paddle/platform/device_context.h index d805d2ab085f76e119edf1c6f2acb9715883d755..c1c4c7f7600b7e404c5b8db42172d87dc7cf7342 100644 --- a/paddle/platform/device_context.h +++ b/paddle/platform/device_context.h @@ -14,7 +14,7 @@ limitations under the License. */ #include "paddle/platform/enforce.h" #include "paddle/platform/place.h" -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU #include "paddle/platform/dynload/cublas.h" #include "paddle/platform/dynload/cudnn.h" #include "paddle/platform/gpu_info.h" @@ -61,7 +61,7 @@ class CPUDeviceContext : public DeviceContext { std::unique_ptr eigen_device_; }; -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU template <> struct EigenDeviceConverter { using EigenDeviceType = Eigen::GpuDevice; diff --git a/paddle/platform/enforce.h b/paddle/platform/enforce.h index 52bd23039b82fc0290edb640f5d5851ba9abee61..f9fe521d50729fdb20d8be45a89b81fd8f0f4564 100644 --- a/paddle/platform/enforce.h +++ b/paddle/platform/enforce.h @@ -29,7 +29,7 @@ limitations under the License. */ #include // for __cxa_demangle #endif -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU #include "paddle/platform/dynload/cublas.h" #include "paddle/platform/dynload/cudnn.h" @@ -113,7 +113,7 @@ inline typename std::enable_if::type throw_on_error( } } -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU template inline typename std::enable_if::type throw_on_error( diff --git a/paddle/platform/gpu_info.h b/paddle/platform/gpu_info.h index f0c825bd9b0bc41396b8fdb95f0b4337cbe3db02..ac884386dde1f9e6ec3433bd46705f78cfd6b1e6 100644 --- a/paddle/platform/gpu_info.h +++ b/paddle/platform/gpu_info.h @@ -14,7 +14,7 @@ limitations under the License. */ #pragma once -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU #include #include diff --git a/paddle/platform/variant.h b/paddle/platform/variant.h index 16ee00efe7a9b0406f8459e19a55e1e1b9ca7419..8145799dfdc6d88959c65904621e4820f5e06c9b 100644 --- a/paddle/platform/variant.h +++ b/paddle/platform/variant.h @@ -16,7 +16,7 @@ #include -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU // Because boost's variadic templates has bug on nvcc, boost will disable // variadic template support when GPU enabled on nvcc. diff --git a/paddle/pserver/test/SocketTest.cpp b/paddle/pserver/test/SocketTest.cpp index 6f6c9e596cfb7a2547d5b6c5de69381eb9c29132..96724530f5e1adb3135d9931d4b258d0ccf1c30d 100644 --- a/paddle/pserver/test/SocketTest.cpp +++ b/paddle/pserver/test/SocketTest.cpp @@ -215,7 +215,7 @@ int main(int argc, char** argv) { uint64_t dataSize = FLAGS_dim * sizeof(real); -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU GpuVector gpuParam(FLAGS_dim); GpuVector gpuGrad(FLAGS_dim); #else diff --git a/paddle/pserver/test/test_ProtoServer.cpp b/paddle/pserver/test/test_ProtoServer.cpp index 04236fda2fb62b928b5c06ff38acfd3eb7217b08..74ab1f2f77ec503c23ac81b9e2aaf60d21342b0b 100644 --- a/paddle/pserver/test/test_ProtoServer.cpp +++ b/paddle/pserver/test/test_ProtoServer.cpp @@ -99,7 +99,7 @@ TEST(ProtoServer, regular) { } TEST(ProtoServer, extended) { -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU ProtoClient* client; if (FLAGS_rdma_tcp == "rdma") client = new ProtoClient(FLAGS_server_addr, FLAGS_port, F_RDMA); diff --git a/paddle/pybind/pybind.cc b/paddle/pybind/pybind.cc index d480427f593ccba20ef6555809a524654d0e54b8..761d82fc4d0c5bbc8ebc8a88e1a4d6745fb564bd 100644 --- a/paddle/pybind/pybind.cc +++ b/paddle/pybind/pybind.cc @@ -34,7 +34,7 @@ static size_t UniqueIntegerGenerator() { } bool IsCompileGPU() { -#ifdef PADDLE_ONLY_CPU +#ifndef PADDLE_WITH_GPU return false; #else return true; @@ -78,7 +78,7 @@ PYBIND11_PLUGIN(core) { .def("set", PyCPUTensorSetFromArray) .def("set", PyCPUTensorSetFromArray) .def("set", PyCPUTensorSetFromArray) -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU .def("set", PyCUDATensorSetFromArray) .def("set", PyCUDATensorSetFromArray) .def("set", PyCUDATensorSetFromArray) @@ -96,7 +96,7 @@ PYBIND11_PLUGIN(core) { .def( "__init__", [](LoDTensor &instance, const std::vector> &lod) { -#ifdef PADDLE_ONLY_CPU +#ifndef PADDLE_WITH_GPU new (&instance) LoDTensor(lod); #else LoD new_lod; @@ -107,7 +107,7 @@ PYBIND11_PLUGIN(core) { }) .def("set_lod", [](LoDTensor &self, const std::vector> &lod) { -#ifdef PADDLE_ONLY_CPU +#ifndef PADDLE_WITH_GPU self.set_lod(lod); #else LoD new_lod; @@ -117,7 +117,7 @@ PYBIND11_PLUGIN(core) { #endif }) .def("lod", [](LoDTensor &self) -> std::vector> { -#ifdef PADDLE_ONLY_CPU +#ifndef PADDLE_WITH_GPU return self.lod(); #else auto lod = self.lod(); @@ -203,7 +203,7 @@ All parameter, weight, gradient are variables in Paddle. .def_static("create", [](paddle::platform::GPUPlace& place) -> paddle::platform::DeviceContext* { -#ifdef PADDLE_ONLY_CPU +#ifndef PADDLE_WITH_GPU PADDLE_THROW("GPUPlace is not supported in CPU device."); #else return new paddle::platform::CUDADeviceContext(place); diff --git a/paddle/pybind/tensor_py.h b/paddle/pybind/tensor_py.h index 3e3e6bc0312974fab50e17d428c7dea9ca547d1e..62e85fa54fba7ddbd017f3b12f7abdfae8a7c48c 100644 --- a/paddle/pybind/tensor_py.h +++ b/paddle/pybind/tensor_py.h @@ -106,7 +106,7 @@ void PyCPUTensorSetFromArray( std::memcpy(dst, array.data(), sizeof(T) * array.size()); } -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU template void PyCUDATensorSetFromArray( framework::Tensor &self, diff --git a/paddle/trainer/MergeModel.cpp b/paddle/trainer/MergeModel.cpp index 91d89b61a32259b8bbe70fda2579f87ec6b9af00..a37d53bc724777a537b7706892ca84fd354c3b33 100644 --- a/paddle/trainer/MergeModel.cpp +++ b/paddle/trainer/MergeModel.cpp @@ -29,7 +29,7 @@ int main(int argc, char** argv) { initMain(argc, argv); initPython(argc, argv); string confFile = TrainerConfigHelper::getConfigNameFromPath(FLAGS_model_dir); -#ifdef PADDLE_ONLY_CPU +#ifndef PADDLE_WITH_GPU FLAGS_use_gpu = false; #endif auto config = std::make_shared(confFile); diff --git a/paddle/trainer/tests/test_Compare.cpp b/paddle/trainer/tests/test_Compare.cpp index e855a8fe2e09aa0f16a73f3e7bcc2f32921092f8..b5d29da45af2a40962d6a2800243a980bd29d28d 100644 --- a/paddle/trainer/tests/test_Compare.cpp +++ b/paddle/trainer/tests/test_Compare.cpp @@ -146,7 +146,7 @@ void compareGradient(comData& comDataCpu, comData& comDataGpu) { } int main(int argc, char** argv) { -#ifdef PADDLE_ONLY_CPU +#ifndef PADDLE_WITH_GPU exit(0); #endif paddle::initMain(argc, argv); diff --git a/paddle/trainer/tests/test_CompareSparse.cpp b/paddle/trainer/tests/test_CompareSparse.cpp index 813275518e411d6e963e23df634541f771096e0f..4da9ce20fb3f91b4763c3e9f651f93364d84233e 100644 --- a/paddle/trainer/tests/test_CompareSparse.cpp +++ b/paddle/trainer/tests/test_CompareSparse.cpp @@ -174,7 +174,7 @@ TEST(compareSparse, multiGradientMachine) { FLAGS_local = local; FLAGS_ports_num_for_sparse = 5; for (bool useGpu : {false, true}) { -#ifdef PADDLE_ONLY_CPU +#ifndef PADDLE_WITH_GPU if (useGpu) continue; #endif FLAGS_parallel_nn = useGpu; @@ -198,7 +198,7 @@ TEST(compareSparse, NeuralNetwork) { FLAGS_local = local; FLAGS_ports_num_for_sparse = 5; for (bool useGpu : {false, true}) { -#ifdef PADDLE_ONLY_CPU +#ifndef PADDLE_WITH_GPU if (useGpu) continue; #endif FLAGS_parallel_nn = useGpu; diff --git a/paddle/trainer/tests/test_Trainer.cpp b/paddle/trainer/tests/test_Trainer.cpp index 264bc46ebcd0aa17fd605e537fcb2c316ef31162..f69e1aafeeebbdfc847585dfe071a44689a5b019 100644 --- a/paddle/trainer/tests/test_Trainer.cpp +++ b/paddle/trainer/tests/test_Trainer.cpp @@ -51,7 +51,7 @@ void checkGradientTest(const string& configFile, TEST(checkGradient, cpu) { checkGradientTest(configFile1, false, false); } -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU TEST(checkGradient, gpu) { checkGradientTest(configFile1, true, false); } TEST(checkGradient, multiGpu) { @@ -97,7 +97,7 @@ TEST(checkGradient, hsigmoid) { checkGradientTest(configFile2, false, false); } TEST(checkGradient, chunk) { checkGradientTest(configFile3, false, false); -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU checkGradientTest(configFile3, true, true); #endif } diff --git a/paddle/trainer/tests/test_TrainerOnePass.cpp b/paddle/trainer/tests/test_TrainerOnePass.cpp index 00ba61377aeff17d82e03f7560c0d71b3570d14f..4c4d124fa9d95bc3def0700b97e30cd2558a009c 100644 --- a/paddle/trainer/tests/test_TrainerOnePass.cpp +++ b/paddle/trainer/tests/test_TrainerOnePass.cpp @@ -79,7 +79,7 @@ void trainerOnePassTest(const string& configFile, // 1. test trainer (cpu, gpu). TEST(trainerOnePass, cpu) { trainerOnePassTest(configFile1, false, false); } -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU TEST(trainerOnePass, gpu) { trainerOnePassTest(configFile1, true, false); } TEST(trainerOnePass, gpu2) { trainerOnePassTest(configFile1, true, false, 2); } @@ -94,7 +94,7 @@ TEST(trainerOnePass, parallel) { #endif // 2. test average_window. -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU TEST(average_window, gpu) { trainerOnePassTest(configFile1, true, false, 4, 0.01); } @@ -266,7 +266,7 @@ TEST(checkRemoteUpdater, cpuTrainerOldUpdater) { checkRemoteParameterUpdaterTest(configFile1, false, false, 1, true); } -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU TEST(checkRemoteUpdater, gpuTrainer) { checkRemoteParameterUpdaterTest(configFile1, true, false); } diff --git a/paddle/trainer/tests/test_recurrent_machine_generation.cpp b/paddle/trainer/tests/test_recurrent_machine_generation.cpp index 1322e77178a4f5674f41943f886a17be8337bd75..74b4fed7edd62ff24282f614b3886ee4cafea630 100644 --- a/paddle/trainer/tests/test_recurrent_machine_generation.cpp +++ b/paddle/trainer/tests/test_recurrent_machine_generation.cpp @@ -113,7 +113,7 @@ void testGeneration(const string& configFile, #ifndef PADDLE_TYPE_DOUBLE TEST(RecurrentGradientMachine, test_generation) { -#ifdef PADDLE_ONLY_CPU +#ifndef PADDLE_WITH_GPU const auto useGpuConfs = {false}; #else const auto useGpuConfs = {true, false}; diff --git a/paddle/utils/Flags.cpp b/paddle/utils/Flags.cpp index ab1c181c62cdbee8cc5f804ec9aaf63ac5464ad6..32155ded3574dae2ff4fc5a3ff327927435ef296 100644 --- a/paddle/utils/Flags.cpp +++ b/paddle/utils/Flags.cpp @@ -14,7 +14,7 @@ limitations under the License. */ #include "Flags.h" -#ifdef PADDLE_ONLY_CPU +#ifndef PADDLE_WITH_GPU DEFINE_bool(use_gpu, false, "Only support CPU training"); #else DEFINE_bool(use_gpu, true, "Whether to use GPU for training"); diff --git a/paddle/utils/Util.h b/paddle/utils/Util.h index 22ce2534d3468ded36221810aa61c15b37f13f3d..904d0f506150d8279945b5942de27d1736b7c992 100644 --- a/paddle/utils/Util.h +++ b/paddle/utils/Util.h @@ -218,7 +218,7 @@ protected: * *d2* is peer device to enable direct access to by the d1 device. */ inline void enablePeerAccess(int d1, int d2) { -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU if (hl_device_can_access_peer(d1, d2)) { SetDevice dev(d1); hl_device_enable_peer_access(d2); diff --git a/paddle/utils/Version.h b/paddle/utils/Version.h index f53d6420bbbdf66f8f355af95c6b11c30a3bfab9..611fda83d98fd22f81aff5c5d588841ad490eff7 100644 --- a/paddle/utils/Version.h +++ b/paddle/utils/Version.h @@ -48,7 +48,7 @@ void printVersion(std::ostream& os); * @return return true if paddle compiled with GPU */ constexpr bool isWithGpu() { -#ifdef PADDLE_ONLY_CPU +#ifndef PADDLE_WITH_GPU return false; #else return true; diff --git a/python/paddle/v2/framework/tests/test_scatter_op.py b/python/paddle/v2/framework/tests/test_scatter_op.py index 33c73c52631a09ea0fefdeb9467991ae9c04321c..1032269d5dfb02e3518b9ef2820d5d0dcc8a51a0 100644 --- a/python/paddle/v2/framework/tests/test_scatter_op.py +++ b/python/paddle/v2/framework/tests/test_scatter_op.py @@ -10,7 +10,7 @@ class TestScatterOp(OpTest): index_np = np.array([1, 2]).astype("int32") updates_np = np.random.random((2, 3)).astype("float32") output_np = np.copy(ref_np) - output_np[index_np] += updates_np + output_np[index_np] = updates_np self.inputs = {'Ref': ref_np, 'Index': index_np, 'Updates': updates_np} self.outputs = {'Out': output_np} @@ -18,7 +18,7 @@ class TestScatterOp(OpTest): self.check_output() def test_check_grad(self): - self.check_grad(['Updates', 'Ref'], 'Out', in_place=True) + self.check_grad(['Updates'], 'Out', in_place=True) if __name__ == "__main__": diff --git a/python/paddle/v2/framework/tests/test_sgd_op.py b/python/paddle/v2/framework/tests/test_sgd_op.py index f1125f4edb5248abb2a0128a7a8b8b3647ed3317..2dd881e5e107249277a91bd8e3a72567269e1cd4 100644 --- a/python/paddle/v2/framework/tests/test_sgd_op.py +++ b/python/paddle/v2/framework/tests/test_sgd_op.py @@ -8,10 +8,10 @@ class TestSGDOp(OpTest): self.op_type = "sgd" w = np.random.random((102, 105)).astype("float32") g = np.random.random((102, 105)).astype("float32") - lr = 0.1 + lr = np.array([0.1]).astype("float32") - self.inputs = {'param': w, 'grad': g, 'learning_rate': lr} - self.outputs = {'param_out': w - lr * g} + self.inputs = {'Param': w, 'Grad': g, 'LearningRate': lr} + self.outputs = {'ParamOut': w - lr * g} def test_check_output(self): self.check_output()