diff --git a/.travis.yml b/.travis.yml index 8c8c6699d3d9abddd65a3a224c2bceedc7d88348..b4b83fcdbc84ce0fb0c91c816ebc3c964acfa590 100644 --- a/.travis.yml +++ b/.travis.yml @@ -37,8 +37,8 @@ before_install: - if [[ "$JOB" == "check_style" ]]; then sudo ln -s /usr/bin/clang-format-3.8 /usr/bin/clang-format; fi # Paddle is using protobuf 3.1 currently. Protobuf 3.2 breaks the compatibility. So we specify the python # protobuf version. - - pip install numpy wheel 'protobuf==3.1' sphinx==1.5.6 recommonmark sphinx-rtd-theme==0.1.9 virtualenv pre-commit requests==2.9.2 LinkChecker - - pip install rarfile nltk==3.2.2 scipy==0.19.0 recordio matplotlib Pillow + - pip install -r $TRAVIS_BUILD_DIR/python/requirements.txt + - pip install wheel sphinx==1.5.6 recommonmark sphinx-rtd-theme==0.1.9 virtualenv pre-commit LinkChecker - curl https://glide.sh/get | bash - eval "$(GIMME_GO_VERSION=1.8.3 gimme)" - go get -u github.com/alecthomas/gometalinter diff --git a/doc/getstarted/build_and_install/docker_install_cn.rst b/doc/getstarted/build_and_install/docker_install_cn.rst index 02b96bb413156786db6dc77696c5640b97c10aa4..84e33177740ca1652efc09c8081c2519b4366906 100644 --- a/doc/getstarted/build_and_install/docker_install_cn.rst +++ b/doc/getstarted/build_and_install/docker_install_cn.rst @@ -74,13 +74,13 @@ PaddlePaddle发布新版本的时候都会发布对应版本的生产镜像以 .. code-block:: bash - docker run -it --rm paddlepaddle/paddle:0.10.0-dev /bin/bash + docker run -it --rm -v $(pwd):/paddle paddlepaddle/paddle:0.10.0-dev /bin/bash 或者,可以以后台进程方式运行容器: .. code-block:: bash - docker run -d -p 2202:22 -p 8888:8888 paddledev/paddle:0.10.0-dev + docker run -d -p 2202:22 -p 8888:8888 -v $(pwd):/paddle paddlepaddle/paddle:0.10.0-dev /usr/sbin/sshd -D 然后用密码 :code:`root` SSH进入容器: diff --git a/paddle/framework/CMakeLists.txt b/paddle/framework/CMakeLists.txt index 9142ecb02a741d74569566e9b040e974675c7873..03985260241689a099ae9ebc136bd04831a44167 100644 --- a/paddle/framework/CMakeLists.txt +++ b/paddle/framework/CMakeLists.txt @@ -7,7 +7,7 @@ cc_library(tensor SRCS tensor.cc DEPS ddim place paddle_memory device_context) cc_test(tensor_test SRCS tensor_test.cc DEPS tensor) cc_test(eigen_test SRCS eigen_test.cc DEPS tensor) -cc_library(lod_tensor SRCS lod_tensor.cc details/lod_tensor.cc DEPS ddim place tensor) +cc_library(lod_tensor SRCS lod_tensor.cc DEPS ddim place tensor) cc_test(lod_tensor_test SRCS lod_tensor_test.cc DEPS lod_tensor) cc_test(variable_test SRCS variable_test.cc) diff --git a/paddle/framework/backward_test.cc b/paddle/framework/backward_test.cc index d7cb1787065f72463f6cb05416612c7787788bb4..ebe52d5f284a8d271b666483001544a805d598ac 100644 --- a/paddle/framework/backward_test.cc +++ b/paddle/framework/backward_test.cc @@ -39,9 +39,9 @@ class RowWiseAddOpMaker : public OpProtoAndCheckerMaker { public: RowWiseAddOpMaker(OpProto *proto, OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("X", "Input X of Add").IgnoreGradient(); - AddInput("b", "Bias of Add").IgnoreGradient(); - AddOutput("Out", "Out of Add").IgnoreGradient(); + AddInput("X", "Input X of Add").AsNoGradient(); + AddInput("b", "Bias of Add").AsNoGradient(); + AddOutput("Out", "Out of Add").AsNoGradient(); AddComment("Add Op"); } }; @@ -112,8 +112,8 @@ class FcOpMaker : public OpProtoAndCheckerMaker { AddInput("X", "x"); AddInput("W", "w"); AddInput("b", "b"); - AddOutput("mul_result", "").SetTemporary(); - AddOutput("add_result", "").SetTemporary(); + AddOutput("mul_result", "").AsIntermediate(); + AddOutput("add_result", "").AsIntermediate(); AddOutput("Out", ""); AddComment(""); } @@ -144,7 +144,7 @@ class AddOpMaker : public OpProtoAndCheckerMaker { public: AddOpMaker(OpProto *proto, OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("X", "x").SetMultiple(); + AddInput("X", "x").AsDuplicable(); AddOutput("Y", "y"); AddComment(""); } @@ -393,18 +393,20 @@ TEST(Backward, linear_net_intermediate_variable_has_no_grad) { auto bwd_net = static_cast(backward.get()); ASSERT_EQ(bwd_net->ops_.size(), 3UL); auto &grad_fc = *bwd_net->ops_[0]; - EXPECT_EQ(grad_fc.inputs_["all"].size(), + + const char *all = paddle::operators::NetOp::kAll; + EXPECT_EQ(grad_fc.inputs_[all].size(), 2UL /* external input number */ + 1UL /* external output number*/ + 1UL /* number of gradient of external output*/ + 2U /* internal variable number*/); - EXPECT_EQ(grad_fc.outputs_["all"].size(), + EXPECT_EQ(grad_fc.outputs_[all].size(), 2UL /* input number of mul*/ + 2UL /* input number of rowwise_add */ + 1UL /* input number of sigmod */); - EXPECT_EQ(bwd_net->ops_[1]->inputs_["all"].size(), 0UL); - EXPECT_EQ(bwd_net->ops_[1]->outputs_["all"].size(), 0UL); - EXPECT_EQ(bwd_net->ops_[2]->inputs_["all"].size(), 0UL); - EXPECT_EQ(bwd_net->ops_[2]->outputs_["all"].size(), 0UL); + EXPECT_EQ(bwd_net->ops_[1]->inputs_[all].size(), 0UL); + EXPECT_EQ(bwd_net->ops_[1]->outputs_[all].size(), 0UL); + EXPECT_EQ(bwd_net->ops_[2]->inputs_[all].size(), 0UL); + EXPECT_EQ(bwd_net->ops_[2]->outputs_[all].size(), 0UL); } diff --git a/paddle/framework/ddim.cc b/paddle/framework/ddim.cc index 0b76a4fdb701ea7435d0da865e928b8da15335c9..cfd3e8dfdec0e92620aef5cd246b4622b779ce19 100644 --- a/paddle/framework/ddim.cc +++ b/paddle/framework/ddim.cc @@ -283,12 +283,5 @@ std::ostream& operator<<(std::ostream& os, const DDim& ddim) { DDim::DDim(std::initializer_list init_list) { *this = make_ddim(init_list); } - -std::string DDim::DebugString() const { - std::ostringstream ss; - ss << *this; - return ss.str(); -} - } // namespace framework } // namespace paddle diff --git a/paddle/framework/ddim.h b/paddle/framework/ddim.h index 1627bcb269258bf8e3d9db86c716ea8c4b7406eb..95f294b62737be5c3eac39303148ac35da29fe7d 100644 --- a/paddle/framework/ddim.h +++ b/paddle/framework/ddim.h @@ -72,8 +72,6 @@ struct DDim { DDim operator*(DDim d) const; ssize_t size() const; - - std::string DebugString() const; }; /** diff --git a/paddle/framework/details/lod_tensor.cc b/paddle/framework/details/lod_tensor.cc deleted file mode 100644 index 9ad3979e5b511517f75d2d43004f97ee1576953b..0000000000000000000000000000000000000000 --- a/paddle/framework/details/lod_tensor.cc +++ /dev/null @@ -1,62 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ - -#include "paddle/framework/lod_tensor.h" - -#include - -namespace paddle { -namespace framework { -namespace details { - -using LOD = LODTensor::LOD; - -std::shared_ptr SliceLOD(const LOD &lod, size_t level_begin, - size_t level_end) { - auto new_lod = std::make_shared(); - new_lod->reserve(level_end - level_begin); - for (size_t i = level_begin; i < level_end; i++) { - new_lod->emplace_back(lod[i]); - } - return new_lod; -} - -std::shared_ptr SliceLOD(const LOD &lod, size_t level, size_t elem_begin, - size_t elem_end, bool tensor_shared) { - // slice the lod. - auto new_lod = std::make_shared(); - new_lod->reserve(lod.size() - level); - auto start = lod.at(level)[elem_begin]; - auto end = lod.at(level)[elem_end]; - - for (auto it = lod.begin() + level; it != lod.end(); it++) { - auto it_begin = std::find(it->begin(), it->end(), start); - auto it_end = std::find(it_begin, it->end(), end); - PADDLE_ENFORCE(it_begin != it->end(), "error in parsing lod info"); - PADDLE_ENFORCE(it_end != it->end(), "error in parsing lod info"); - new_lod->emplace_back(it_begin, it_end + 1); - if (!tensor_shared) { - // reset offset if tensor is copyed and sliced. - std::transform(new_lod->back().begin(), new_lod->back().end(), - new_lod->back().begin(), - [start](int v) { return v - start; }); - PADDLE_ENFORCE(new_lod->back().front() == 0, "error in slice LOD"); - } - } - return new_lod; -} - -} // namespace details -} // namespace framework -} // namespace paddle diff --git a/paddle/framework/details/lod_tensor.h b/paddle/framework/details/lod_tensor.h deleted file mode 100644 index 9a6a6cd2ea41f02db991bdc0a2b917433dafed99..0000000000000000000000000000000000000000 --- a/paddle/framework/details/lod_tensor.h +++ /dev/null @@ -1,46 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ - -#pragma once - -#include - -namespace paddle { -namespace framework { -namespace details { - -/* - * Slice levels from LOD. - * - * @lod: LOD to slice. - * @level_begin: level to begin slice. - * @level_end: level to end slice. - */ -std::shared_ptr SliceLOD(const LODTensor::LOD &lod, - size_t level_begin, size_t level_end); - -/* - * Slice elements from a level of LOD. - * - * @lod: LOD to slice. - * @level: which level to slice. - * @elem_begin: element's index to begin slice. - * @elem_end: element's index to end slice. - */ -std::shared_ptr SliceLOD(const LODTensor::LOD &lod, - size_t level, size_t elem_begin, - size_t elem_end, bool tensor_shared); -} // namespace details -} // namespace framework -} // namespace paddle diff --git a/paddle/framework/grad_op_builder.cc b/paddle/framework/grad_op_builder.cc index c2855d3a589a8a0aea43e59acd671925b5506c8e..21bc30d1fbdae31548547bccf39e78fe16eedfaa 100644 --- a/paddle/framework/grad_op_builder.cc +++ b/paddle/framework/grad_op_builder.cc @@ -18,9 +18,6 @@ permissions and limitations under the License. */ namespace paddle { namespace framework { - -class OpRegistry; - enum class OpArgType { IN, OUT }; static void TransOpArg(const OperatorBase* src_op, diff --git a/paddle/framework/grad_op_builder_test.cc b/paddle/framework/grad_op_builder_test.cc index a351e86c5d10e8471285cf29e269b67d2d51dcaa..ebaf84545fce0d281d8821861264cddc8854893d 100644 --- a/paddle/framework/grad_op_builder_test.cc +++ b/paddle/framework/grad_op_builder_test.cc @@ -21,10 +21,10 @@ class MutiInOutOpMaker : public OpProtoAndCheckerMaker { MutiInOutOpMaker(OpProto *proto, OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("In1", "a single input"); - AddInput("In2_mult", "a multiple input").SetMultiple(); + AddInput("In2_mult", "a multiple input").AsDuplicable(); AddInput("In3", "another single input"); AddOutput("Out1", "a single output"); - AddOutput("Out2_mult", "a multiple output").SetMultiple(); + AddOutput("Out2_mult", "a multiple output").AsDuplicable(); AddComment("test op with multiple inputs and outputs"); } }; @@ -34,10 +34,10 @@ class IOIgnoredOpMaker : public OpProtoAndCheckerMaker { IOIgnoredOpMaker(OpProto *proto, OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("In1", "a single input"); - AddInput("In2_mult", "a multiple input").SetMultiple().IgnoreGradient(); - AddInput("In3_mult", "another multiple input").SetMultiple(); - AddOutput("Out1_mult", "a multiple output").SetMultiple(); - AddOutput("Out2", "a single output").IgnoreGradient(); + AddInput("In2_mult", "a multiple input").AsDuplicable().AsNoGradient(); + AddInput("In3_mult", "another multiple input").AsDuplicable(); + AddOutput("Out1_mult", "a multiple output").AsDuplicable(); + AddOutput("Out2", "a single output").AsNoGradient(); AddComment("op with inputs and outputs ignored in gradient calculating"); } }; diff --git a/paddle/framework/lod_tensor.cc b/paddle/framework/lod_tensor.cc index 70045dbf7afd0935e4df852b2f0e3ecd163a9316..2b178907747b3911292b070b65160a24c120b726 100644 --- a/paddle/framework/lod_tensor.cc +++ b/paddle/framework/lod_tensor.cc @@ -19,32 +19,59 @@ namespace paddle { namespace framework { -LODTensor LODTensor::SliceShared(size_t level_begin, size_t level_end) const { - PADDLE_ENFORCE(HasLOD(), "has no LOD info, can't be sliced."); - auto new_lod = details::SliceLOD(*lod_start_pos_, level_begin, level_end); - // slice levels just need to update LOD info, each level will contains the - // whole tensor_, so no need to modify tensor_. - return LODTensor(tensor_, new_lod); +LODTensor::LOD LODTensor::LOD::SliceLevels(size_t level_begin, + size_t level_end) const { + LOD new_lod; + new_lod.reserve(level_end - level_begin); + for (size_t i = level_begin; i < level_end; i++) { + new_lod.emplace_back(at(i)); + } + return new_lod; } -LODTensor LODTensor::SliceShared(size_t level, size_t elem_begin, - size_t elem_end) const { - PADDLE_ENFORCE(HasLOD(), "has no LOD info, can't be sliced."); - PADDLE_ENFORCE(level < NumLevels(), "level [%d] out of range [%d]", level, - NumLevels()); - PADDLE_ENFORCE(elem_begin < NumElements(level), - "element begin [%d] out of range [%d]", elem_begin, - NumElements(level)); - PADDLE_ENFORCE(elem_end < NumElements(level) + 1, - "element end [%d] out of range [%d]", elem_end, - NumElements(level)); - - auto new_lod = details::SliceLOD(*lod_start_pos_, level, elem_begin, elem_end, - true /*tensor_shared*/); - - // slice elements just need to update LOD info, because offsets are not - // changed, so the original tensor_ can be reused. - return LODTensor(tensor_, new_lod); +LODTensor::LOD LODTensor::LOD::SliceInLevel(size_t level, size_t elem_begin, + size_t elem_end) const { + // slice the lod. + LOD new_lod; + new_lod.reserve(size() - level); + auto start = this->at(level)[elem_begin]; + auto end = this->at(level)[elem_end]; + + for (auto it = this->begin() + level; it != this->end(); it++) { + auto it_begin = std::find(it->begin(), it->end(), start); + auto it_end = std::find(it_begin, it->end(), end); + PADDLE_ENFORCE(it_begin != it->end(), "error in parsing lod info"); + PADDLE_ENFORCE(it_end != it->end(), "error in parsing lod info"); + new_lod.emplace_back(it_begin, it_end + 1); + // reset offset if tensor is copyed and sliced. + std::transform(new_lod.back().begin(), new_lod.back().end(), + new_lod.back().begin(), + [start](int v) { return v - start; }); + PADDLE_ENFORCE_EQ(new_lod.back().front(), 0, "error in slice LOD"); + } + PADDLE_ENFORCE_LE(new_lod.size(), this->size()); + return new_lod; +} + +bool operator==(const LODTensor::LOD& a, const LODTensor::LOD& b) { + if (a.size() != b.size()) { + return false; + } + + for (size_t i = 0; i < a.size(); i++) { + const auto& a_level = a[i]; + const auto& b_level = b[i]; + if (a_level.size() != b_level.size()) { + return false; + } + for (size_t j = 0; j < a_level.size(); j++) { + if (a_level[j] != b_level[j]) { + return false; + } + } + } + + return true; } } // namespace framework diff --git a/paddle/framework/lod_tensor.h b/paddle/framework/lod_tensor.h index 4933479b109694312e99595dc8ad6db70259efa6..9e27aec38d336db8a4f0adbed098d299aa741356 100644 --- a/paddle/framework/lod_tensor.h +++ b/paddle/framework/lod_tensor.h @@ -15,7 +15,7 @@ #pragma once #include -#if (!PADDLE_ONLY_CPU) +#if !defined(PADDLE_ONLY_CPU) #include #include #endif @@ -31,30 +31,29 @@ namespace framework { * LODTensor (Level of details Tensor) * see https://en.wikipedia.org/wiki/Level_of_details for reference. */ -class LODTensor { +class LODTensor : public Tensor { public: // Level save offsets of each unit. #ifdef PADDLE_ONLY_CPU - using Level = std::vector; + template + using Vector = std::vector; #else - using Level = thrust::device_vector; + template + using Vector = thrust::host_vector; #endif - // LOD stores offsets of each level of units, the largest units level first, + // LoD stores offsets of each level of units, the largest units level first, // then the smaller units level. Each Level stores the offsets of units in // Tesor. - typedef std::vector LOD; + class LOD : public std::vector> { + public: + LOD SliceLevels(size_t level_begin, size_t level_end) const; + LOD SliceInLevel(size_t level, size_t elem_begin, size_t elem_end) const; + }; LODTensor() {} - LODTensor(const std::shared_ptr &tensor, - const std::shared_ptr &lod) { - Reset(tensor, lod); - } + explicit LODTensor(const LOD &lod) : lod_(lod) {} - void Reset(const std::shared_ptr &tensor, - const std::shared_ptr &lod) { - tensor_ = tensor; - lod_start_pos_ = lod; - } + virtual Tensor *Clone() const { return new LODTensor(lod_); } /* * Get a element from LOD. @@ -65,16 +64,14 @@ class LODTensor { PADDLE_ENFORCE(elem < NumElements(level), "element begin [%d] out of range [%d]", elem, NumElements(level)); - return (*lod_start_pos_)[level][elem]; + return (lod_)[level][elem]; } /* * Number of LODTensor's levels, each level has units of data, for example, * in the sentence's view, article, paragraph, sentence are 3 levels. */ - size_t NumLevels() const { - return lod_start_pos_ ? lod_start_pos_->size() : 0UL; - } + size_t NumLevels() const { return lod_.size(); } /* * Number of elements in a level. */ @@ -82,64 +79,71 @@ class LODTensor { PADDLE_ENFORCE(level < NumLevels(), "level [%d] out of range [%d]", level, NumLevels()); // the last offset is the end of last element - return lod_start_pos_->at(level).size() - 1; + return lod_[level].size() - 1; } - /* - * Slice of levels[level_begin:level_end], with tensor copied. - */ - template - LODTensor SliceCopied(size_t level_begin, size_t level_end, - const platform::Place &dst_place) const; - /* * Slice of levels[level_begin:level_end], with tensor shared. */ - LODTensor SliceShared(size_t level_begin, size_t level_end) const; - - /* - * Slice of elements of a level, [elem_begin: elem_end], with tensor copied. - * @note: low performance in slice lod_start_pos_. - */ template - LODTensor SliceCopied(size_t level, size_t elem_begin, size_t elem_end, - const platform::Place &dst_place) const; + LODTensor SliceLevels(size_t level_begin, size_t level_end) const; /* * Slice of elements of a level, [elem_begin: elem_end], with tensor shared. - * @note: low performance in slice lod_start_pos_. - */ - LODTensor SliceShared(size_t level, size_t elem_begin, size_t elem_end) const; - - /* - * Copy other's lod_start_pos_, to share LOD info. - * @note: the LOD info should not be changed. + * @note: low performance in slice lod_. */ - void ShareLOD(const LODTensor &other) { - lod_start_pos_ = other.lod_start_pos_; - } + template + LODTensor SliceInLevel(size_t level, size_t elem_begin, + size_t elem_end) const; /* - * Copy other's lod_start_pos_'s content, free to mutate. + * Copy other's lod_'s content, free to mutate. */ - void CopyLOD(const LODTensor &other) { - lod_start_pos_ = std::make_shared(*other.lod_start_pos_); - } + void CopyLOD(const LODTensor &other) { lod_ = other.lod_; } /* * Determine whether LODTensor has a valid LOD info. */ - bool HasLOD() const { return bool(lod_start_pos_); } - LOD *lod() const { return lod_start_pos_.get(); } + const LOD &lod() const { return lod_; } + LOD *mutable_lod() { return &lod_; } - std::shared_ptr &tensor() { return tensor_; } - Tensor *raw_tensor() { return tensor_.get(); } + virtual ~LODTensor() {} private: - std::shared_ptr lod_start_pos_; - std::shared_ptr tensor_; + LOD lod_; }; +bool operator==(const LODTensor::LOD &a, const LODTensor::LOD &b); + +template +LODTensor LODTensor::SliceLevels(size_t level_begin, size_t level_end) const { + auto new_lod = lod_.SliceLevels(level_begin, level_end); + // slice levels just need to update LOD info, each level will contains the + // whole tensor_, so no need to modify tensor_. + LODTensor new_tensor(new_lod); + new_tensor.ShareDataWith(*this); + return new_tensor; +} + +template +LODTensor LODTensor::SliceInLevel(size_t level, size_t elem_begin, + size_t elem_end) const { + PADDLE_ENFORCE(level < NumLevels(), "level [%d] out of range [%d]", level, + NumLevels()); + PADDLE_ENFORCE(elem_begin < NumElements(level), + "element begin [%d] out of range [%d]", elem_begin, + NumElements(level)); + PADDLE_ENFORCE(elem_end < NumElements(level) + 1, + "element end [%d] out of range [%d]", elem_end, + NumElements(level)); + + auto new_lod = lod_.SliceInLevel(level, elem_begin, elem_end); + + // slice elements just need to update LOD info, because offsets are not + // changed, so the original tensor_ can be reused. + LODTensor new_tensor(new_lod); + new_tensor.ShareDataWith(*this); + return new_tensor; +} + } // namespace framework } // namespace paddle - -#include "paddle/framework/lod_tensor_impl.h" diff --git a/paddle/framework/lod_tensor_impl.h b/paddle/framework/lod_tensor_impl.h deleted file mode 100644 index 0eb6469aea3ae25f035751da985b5bebb489d961..0000000000000000000000000000000000000000 --- a/paddle/framework/lod_tensor_impl.h +++ /dev/null @@ -1,60 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ - -#pragma once - -#include "paddle/framework/details/lod_tensor.h" - -namespace paddle { -namespace framework { - -template -LODTensor LODTensor::SliceCopied(size_t level_begin, size_t level_end, - const platform::Place &dst_place) const { - PADDLE_ENFORCE(HasLOD(), "has no LOD info, can't be sliced."); - auto new_lod = details::SliceLOD(*lod_start_pos_, level_begin, level_end); - auto new_tensor = std::make_shared(); - new_tensor->CopyFrom(*tensor_, dst_place); - - return LODTensor(new_tensor, new_lod); -} - -template -LODTensor LODTensor::SliceCopied(size_t level, size_t elem_begin, - size_t elem_end, - const platform::Place &dst_place) const { - PADDLE_ENFORCE(HasLOD(), "has no LOD info, can't be sliced."); - PADDLE_ENFORCE(level < NumLevels(), "level [%d] out of range [%d]", level, - NumLevels()); - PADDLE_ENFORCE(elem_begin < NumElements(level), - "element begin [%d] out of range [%d]", elem_begin, - NumElements(level)); - PADDLE_ENFORCE(elem_end < NumElements(level) + 1, - "element end [%d] out of range [%d]", elem_end, - NumElements(level)); - - auto new_lod = details::SliceLOD(*lod_start_pos_, level, elem_begin, elem_end, - false /*tensor_shared*/); - - auto start_idx = new_lod->front().front(); - auto end_idx = new_lod->front().back() - 1 /*the next element's start*/; - auto sliced_tensor = tensor_->Slice(start_idx, end_idx); - auto new_tensor = std::make_shared(); - new_tensor->CopyFrom(sliced_tensor, dst_place); - - return LODTensor(new_tensor, new_lod); -} - -} // namespace framework -} // namespace paddle diff --git a/paddle/framework/lod_tensor_test.cc b/paddle/framework/lod_tensor_test.cc index 511716375e81e8fd89b071c940ee97327c268b8b..2881136ced6ef957a192e303e529b9b2867b3dda 100644 --- a/paddle/framework/lod_tensor_test.cc +++ b/paddle/framework/lod_tensor_test.cc @@ -15,6 +15,7 @@ #include #include +#include #include namespace paddle { @@ -29,22 +30,28 @@ class LODTensorTester : public ::testing::Test { // 0 10 20 // 0 5 10 15 20 // 0 2 5 7 10 12 15 20 - auto lod = std::make_shared(); - lod->push_back(std::vector{0, 10, 20}); - lod->push_back(std::vector{0, 5, 10, 15, 20}); - lod->push_back(std::vector{0, 2, 5, 7, 10, 12, 15, 17, 20}); + LODTensor::LOD lod; + lod.push_back(std::vector{0, 10, 20}); + lod.push_back(std::vector{0, 5, 10, 15, 20}); + lod.push_back(std::vector{0, 2, 5, 7, 10, 12, 15, 17, 20}); - auto tensor = std::make_shared(); - tensor->Resize({20 /*batch size*/, 128 /*dim*/}); + ASSERT_EQ(lod.size(), 3UL); + + tensor.Resize({20 /*batch size*/, 128 /*dim*/}); // malloc memory - tensor->mutable_data(place); + tensor.mutable_data(place); + + lod_tensor.reset(new LODTensor(lod)); + lod_tensor->Resize({20 /*batch size*/, 128 /*dim*/}); - lod_tensor->Reset(tensor, lod); + lod_tensor->ShareDataWith(tensor); + // lod_tensor->ShareDataWith(tensor); } protected: std::unique_ptr lod_tensor; platform::CPUPlace place; + Tensor tensor; }; TEST_F(LODTensorTester, NumLevels) { ASSERT_EQ(lod_tensor->NumLevels(), 3UL); } @@ -55,110 +62,54 @@ TEST_F(LODTensorTester, NumElements) { ASSERT_EQ(lod_tensor->NumElements(2), 8UL); } -TEST_F(LODTensorTester, SliceShared_Level) { - // slice 1 level - for (size_t level = 0; level < 3UL; ++level) { - auto new_lod_tensor = lod_tensor->SliceShared(level, level + 1); - ASSERT_EQ(new_lod_tensor.NumLevels(), 1UL); - ASSERT_EQ(new_lod_tensor.NumElements(0UL), lod_tensor->NumElements(level)); - ASSERT_EQ(new_lod_tensor.tensor(), lod_tensor->tensor()); - } - // slice 2 level - for (size_t level = 0; level < 2UL; ++level) { - auto new_lod_tensor = lod_tensor->SliceShared(level, level + 2); - ASSERT_EQ(new_lod_tensor.NumLevels(), 2UL); - ASSERT_EQ(new_lod_tensor.NumElements(0), lod_tensor->NumElements(level)); - ASSERT_EQ(new_lod_tensor.NumElements(1), - lod_tensor->NumElements(level + 1)); - ASSERT_EQ(new_lod_tensor.tensor(), lod_tensor->tensor()); - } -} - -TEST_F(LODTensorTester, SliceCopied_Level) { +TEST_F(LODTensorTester, SliceLevels) { // slice 1 level for (size_t level = 0; level < 3UL; ++level) { - auto new_lod_tensor = - lod_tensor->SliceCopied(level, level + 1, place); + auto new_lod_tensor = lod_tensor->SliceLevels(level, level + 1); ASSERT_EQ(new_lod_tensor.NumLevels(), 1UL); ASSERT_EQ(new_lod_tensor.NumElements(0UL), lod_tensor->NumElements(level)); - // ASSERT_EQ(new_lod_tensor.tensor(), lod_tensor->tensor()); - // TODO(superjom) add tensor comparation here. + // ASSERT_EQ(new_lod_tensor, *lod_tensor); } // slice 2 level for (size_t level = 0; level < 2UL; ++level) { - auto new_lod_tensor = - lod_tensor->SliceCopied(level, level + 2, place); + auto new_lod_tensor = lod_tensor->SliceLevels(level, level + 2); ASSERT_EQ(new_lod_tensor.NumLevels(), 2UL); ASSERT_EQ(new_lod_tensor.NumElements(0), lod_tensor->NumElements(level)); ASSERT_EQ(new_lod_tensor.NumElements(1), lod_tensor->NumElements(level + 1)); - // ASSERT_EQ(new_lod_tensor.tensor(), lod_tensor->tensor()); - // TODO(superjom) add tensor comparation here. + ASSERT_EQ(new_lod_tensor.data(), lod_tensor->data()); } } -TEST_F(LODTensorTester, SliceShared_Element) { - size_t level = 0; - auto new_lod_tensor = lod_tensor->SliceShared(level, 0, 2); - ASSERT_EQ(new_lod_tensor.NumLevels(), 3UL); - ASSERT_EQ(new_lod_tensor.NumElements(0), 2UL); - ASSERT_EQ(new_lod_tensor.NumElements(1), 4UL); - ASSERT_EQ(new_lod_tensor.NumElements(2), 8UL); - ASSERT_EQ(new_lod_tensor.raw_tensor(), lod_tensor->raw_tensor()); - - level = 1; - new_lod_tensor = lod_tensor->SliceShared(level, 0, 2); - ASSERT_EQ(new_lod_tensor.NumLevels(), 2UL); - ASSERT_EQ(new_lod_tensor.NumElements(0), 2UL); - ASSERT_EQ(new_lod_tensor.NumElements(1), 4UL); - ASSERT_EQ(new_lod_tensor.raw_tensor(), lod_tensor->raw_tensor()); -} - -TEST_F(LODTensorTester, SliceCopied_Element) { +TEST_F(LODTensorTester, SliceInLevel) { size_t level = 0; - auto new_lod_tensor = lod_tensor->SliceCopied(level, 0, 2, place); - ASSERT_EQ(new_lod_tensor.NumLevels(), 3UL); - ASSERT_EQ(new_lod_tensor.NumElements(0), 2UL); - ASSERT_EQ(new_lod_tensor.NumElements(1), 4UL); - ASSERT_EQ(new_lod_tensor.NumElements(2), 8UL); - ASSERT_NE(new_lod_tensor.raw_tensor(), lod_tensor->raw_tensor()); + auto new_lod_tensor = lod_tensor->SliceInLevel(level, 0, 2); + EXPECT_EQ(new_lod_tensor.NumLevels(), 3UL); + EXPECT_EQ(new_lod_tensor.NumElements(0), 2UL); + EXPECT_EQ(new_lod_tensor.NumElements(1), 4UL); + EXPECT_EQ(new_lod_tensor.NumElements(2), 8UL); + ASSERT_EQ(new_lod_tensor.data(), lod_tensor->data()); level = 1; - new_lod_tensor = lod_tensor->SliceCopied(level, 0, 2, place); + new_lod_tensor = lod_tensor->SliceInLevel(level, 0, 2); ASSERT_EQ(new_lod_tensor.NumLevels(), 2UL); ASSERT_EQ(new_lod_tensor.NumElements(0), 2UL); ASSERT_EQ(new_lod_tensor.NumElements(1), 4UL); - ASSERT_NE(new_lod_tensor.raw_tensor(), lod_tensor->raw_tensor()); - - level = 1; - // LOD is - // 0 5 10 - // 0 2 5 7 10 - new_lod_tensor = lod_tensor->SliceCopied(level, 1, 3, place); - ASSERT_EQ(new_lod_tensor.NumLevels(), 2UL); - ASSERT_EQ(new_lod_tensor.NumElements(0), 2UL); - ASSERT_EQ(new_lod_tensor.NumElements(1), 4UL); - - ASSERT_EQ(new_lod_tensor.lod_element(0, 0), 0UL); - ASSERT_EQ(new_lod_tensor.lod_element(0, 1), 5UL); - ASSERT_EQ(new_lod_tensor.lod_element(1, 0), 0UL); - ASSERT_EQ(new_lod_tensor.lod_element(1, 1), 2UL); - ASSERT_EQ(new_lod_tensor.lod_element(1, 2), 5UL); - ASSERT_EQ(new_lod_tensor.lod_element(1, 3), 7UL); - - // TODO(superjom) compare the content of these tensors + ASSERT_EQ(new_lod_tensor.data(), lod_tensor->data()); } TEST_F(LODTensorTester, ShareLOD) { LODTensor new_lod_tensor; - new_lod_tensor.ShareLOD(*lod_tensor); + new_lod_tensor.CopyLOD(*lod_tensor); ASSERT_EQ(new_lod_tensor.lod(), lod_tensor->lod()); } TEST_F(LODTensorTester, CopyLOD) { LODTensor new_lod_tensor; new_lod_tensor.CopyLOD(*lod_tensor); - ASSERT_NE(new_lod_tensor.lod(), lod_tensor->lod()); + bool equals = std::equal(lod_tensor->lod().begin(), lod_tensor->lod().end(), + new_lod_tensor.lod().begin()); + ASSERT_TRUE(equals); } } // namespace framework diff --git a/paddle/framework/op_registry.h b/paddle/framework/op_registry.h index 0fbda936c640c91385e289d6728fd2d5cdf23363..3b793628aa6fdb08544ba90274736c9d29262a8b 100644 --- a/paddle/framework/op_registry.h +++ b/paddle/framework/op_registry.h @@ -47,17 +47,20 @@ class OpProtoAndCheckerMaker { struct VariableBuilder { OpProto::Var* var_; - VariableBuilder& SetMultiple() { + VariableBuilder& AsDuplicable() { var_->set_duplicable(true); return *this; } - VariableBuilder& SetTemporary() { + VariableBuilder& AsIntermediate() { var_->set_intermediate(true); return *this; } - VariableBuilder& IgnoreGradient() { + // TODO(FengJiayi, yuyang18): `AsNoGradient` is a very bad name, because it + // means that input/output is not needed when calculate gradient. It does + // not mean no gradient when backward. It should be changed soon. + VariableBuilder& AsNoGradient() { var_->set_no_gradient(true); return *this; } @@ -166,25 +169,22 @@ class OpRegistry { return std::shared_ptr(op); } - static std::shared_ptr CreateOp(const OpDesc& op_desc) { - VarNameMap inputs; - for (auto& input : op_desc.inputs()) { - auto& var_names = inputs[input.parameter()]; - auto& var_names_in_proto = input.arguments(); - var_names.reserve(static_cast(var_names_in_proto.size())); - std::copy(var_names_in_proto.begin(), var_names_in_proto.end(), - std::back_inserter(var_names)); - } - - VarNameMap outputs; - for (auto& output : op_desc.outputs()) { - auto& var_names = outputs[output.parameter()]; - auto& var_names_in_proto = output.arguments(); + static VarNameMap ConvertOpDescVarsToVarNameMap( + const google::protobuf::RepeatedPtrField& op_desc_vars) { + VarNameMap ret_val; + for (auto& var : op_desc_vars) { + auto& var_names = ret_val[var.parameter()]; + auto& var_names_in_proto = var.arguments(); var_names.reserve(static_cast(var_names_in_proto.size())); std::copy(var_names_in_proto.begin(), var_names_in_proto.end(), std::back_inserter(var_names)); } + return ret_val; + } + static std::shared_ptr CreateOp(const OpDesc& op_desc) { + VarNameMap inputs = ConvertOpDescVarsToVarNameMap(op_desc.inputs()); + VarNameMap outputs = ConvertOpDescVarsToVarNameMap(op_desc.outputs()); AttributeMap attrs; for (auto& attr : op_desc.attrs()) { attrs[attr.name()] = GetAttrValue(attr); diff --git a/paddle/framework/op_registry_test.cc b/paddle/framework/op_registry_test.cc index 42361c718bc1a5ed29d31f175216697643f08ef7..0b8f8289490135b8976c38fa3fb3c2995c50416f 100644 --- a/paddle/framework/op_registry_test.cc +++ b/paddle/framework/op_registry_test.cc @@ -38,8 +38,8 @@ class MyTestOpProtoAndCheckerMaker : public OpProtoAndCheckerMaker { public: MyTestOpProtoAndCheckerMaker(OpProto* proto, OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("input", "input of cosine op").SetMultiple(); - AddOutput("output", "output of cosine op").SetTemporary(); + AddInput("input", "input of cosine op").AsDuplicable(); + AddOutput("output", "output of cosine op").AsIntermediate(); auto my_checker = [](int i) { PADDLE_ENFORCE(i % 2 == 0, "'test_attr' must be even!"); }; @@ -51,6 +51,15 @@ class MyTestOpProtoAndCheckerMaker : public OpProtoAndCheckerMaker { } // namespace framework } // namespace paddle +static void BuildVar(const std::string& param_name, + std::initializer_list arguments, + paddle::framework::OpDesc::Var* var) { + var->set_parameter(param_name); + for (auto& arg_name : arguments) { + var->add_arguments(arg_name); + } +} + REGISTER_OP(cos_sim, paddle::framework::CosineOp, paddle::framework::CosineOpProtoAndCheckerMaker); REGISTER_OP(my_test_op, paddle::framework::MyTestOp, @@ -59,13 +68,8 @@ REGISTER_OP(my_test_op, paddle::framework::MyTestOp, TEST(OpRegistry, CreateOp) { paddle::framework::OpDesc op_desc; op_desc.set_type("cos_sim"); - auto input = op_desc.add_inputs(); - input->set_parameter("input"); - *input->mutable_arguments()->Add() = "aa"; - - auto output = op_desc.add_outputs(); - output->set_parameter("output"); - *output->mutable_arguments()->Add() = "bb"; + BuildVar("input", {"aa"}, op_desc.add_inputs()); + BuildVar("output", {"bb"}, op_desc.add_outputs()); float scale = 3.3; auto attr = op_desc.mutable_attrs()->Add(); @@ -85,13 +89,8 @@ TEST(OpRegistry, CreateOp) { TEST(OpRegistry, IllegalAttr) { paddle::framework::OpDesc op_desc; op_desc.set_type("cos_sim"); - auto input = op_desc.add_inputs(); - input->set_parameter("input"); - *input->mutable_arguments()->Add() = "aa"; - - auto output = op_desc.add_outputs(); - output->set_parameter("output"); - *output->mutable_arguments()->Add() = "bb"; + BuildVar("input", {"aa"}, op_desc.add_inputs()); + BuildVar("output", {"bb"}, op_desc.add_outputs()); auto attr = op_desc.mutable_attrs()->Add(); attr->set_name("scale"); @@ -115,13 +114,8 @@ TEST(OpRegistry, IllegalAttr) { TEST(OpRegistry, DefaultValue) { paddle::framework::OpDesc op_desc; op_desc.set_type("cos_sim"); - auto input = op_desc.add_inputs(); - input->set_parameter("input"); - *input->mutable_arguments()->Add() = "aa"; - - auto output = op_desc.add_outputs(); - output->set_parameter("output"); - *output->mutable_arguments()->Add() = "bb"; + BuildVar("input", {"aa"}, op_desc.add_inputs()); + BuildVar("output", {"bb"}, op_desc.add_outputs()); ASSERT_TRUE(op_desc.IsInitialized()); @@ -136,13 +130,8 @@ TEST(OpRegistry, DefaultValue) { TEST(OpRegistry, CustomChecker) { paddle::framework::OpDesc op_desc; op_desc.set_type("my_test_op"); - auto input = op_desc.add_inputs(); - input->set_parameter("input"); - *input->mutable_arguments()->Add() = "ii"; - - auto output = op_desc.add_outputs(); - output->set_parameter("output"); - *output->mutable_arguments()->Add() = "oo"; + BuildVar("input", {"ii"}, op_desc.add_inputs()); + BuildVar("output", {"oo"}, op_desc.add_outputs()); // attr 'test_attr' is not set bool caught = false; diff --git a/paddle/framework/operator.cc b/paddle/framework/operator.cc index 59593cb6bd7027364cf154efa6eff3a8af9815b2..13442a72b9d77a4858b5d91dd7690e089ec7ed49 100644 --- a/paddle/framework/operator.cc +++ b/paddle/framework/operator.cc @@ -42,33 +42,35 @@ std::unordered_map& OpProtos() { } const std::string& OperatorBase::Input(const std::string& name) const { - auto it = inputs_.find(name); - PADDLE_ENFORCE(it != inputs_.end(), "Op %s does not have input %s", type_, - name); - PADDLE_ENFORCE_EQ(it->second.size(), 1UL, + auto& ins = Inputs(name); + PADDLE_ENFORCE_EQ(ins.size(), 1UL, "Op %s input %s should contain only one variable", type_, name); - return it->second[0]; + return ins[0]; } const std::vector& OperatorBase::Inputs( const std::string& name) const { - return inputs_.at(name); + auto it = inputs_.find(name); + PADDLE_ENFORCE(it != inputs_.end(), "Op %s do not have input %s", type_, + name); + return it->second; } const std::string& OperatorBase::Output(const std::string& name) const { - auto it = outputs_.find(name); - PADDLE_ENFORCE(it != outputs_.end(), "Op %s does not have output %s", type_, - name); - PADDLE_ENFORCE_EQ(it->second.size(), 1UL, - "Op %s input %s should contain only one variable", type_, + auto& outs = Outputs(name); + PADDLE_ENFORCE_EQ(outs.size(), 1UL, + "Op %s output %s should contain only one variable", type_, name); - return it->second[0]; + return outs[0]; } const std::vector& OperatorBase::Outputs( const std::string& name) const { - return outputs_.at(name); + auto it = outputs_.find(name); + PADDLE_ENFORCE(it != outputs_.end(), "Op %s does not have output %s", type_, + name); + return it->second; } std::string OperatorBase::DebugString() const { @@ -136,5 +138,35 @@ OperatorBase::OperatorBase(const std::string& type, } } } + +std::vector OperatorBase::OutputVars(bool has_intermediate) const { + std::vector ret_val; + if (has_intermediate) { + // push all outputs into ret_val + for (auto& o : outputs_) { + ret_val.reserve(ret_val.size() + o.second.size()); + ret_val.insert(ret_val.end(), o.second.begin(), o.second.end()); + } + return ret_val; + } + auto it = OpProtos().find(type_); + PADDLE_ENFORCE( + it != OpProtos().end(), + "Operator %s not registered, cannot figure out intermediate outputs", + type_); + + // get all OpProto::Var for outputs + for (auto& o : it->second.outputs()) { + // ignore all intermediate output + if (o.intermediate()) continue; + auto out = outputs_.find(o.name()); + if (out != outputs_.end()) { + ret_val.reserve(ret_val.size() + out->second.size()); + ret_val.insert(ret_val.end(), out->second.begin(), out->second.end()); + } + } + return ret_val; +} + } // namespace framework } // namespace paddle diff --git a/paddle/framework/operator.h b/paddle/framework/operator.h index 292847f1f0f5343ece3381522d7e52e0fece097c..161b6c5c50467ef54848f6bf848dfff61f4800b6 100644 --- a/paddle/framework/operator.h +++ b/paddle/framework/operator.h @@ -110,34 +110,7 @@ class OperatorBase { //! TODO add a vector_view to prevent memory copy. const std::vector& Outputs(const std::string& name) const; - virtual std::vector OutputVars(bool has_intermediate) const { - std::vector ret_val; - if (has_intermediate) { - // push all outputs into ret_val - for (auto& o : outputs_) { - ret_val.reserve(ret_val.size() + o.second.size()); - ret_val.insert(ret_val.end(), o.second.begin(), o.second.end()); - } - return ret_val; - } - auto it = OpProtos().find(type_); - PADDLE_ENFORCE( - it != OpProtos().end(), - "Operator %s not registered, cannot figure out intermediate outputs", - type_); - - // get all OpProto::Var for outputs - for (auto& o : it->second.outputs()) { - // ignore all intermediate output - if (o.intermediate()) continue; - auto out = outputs_.find(o.name()); - if (out != outputs_.end()) { - ret_val.reserve(ret_val.size() + out->second.size()); - ret_val.insert(ret_val.end(), out->second.begin(), out->second.end()); - } - } - return ret_val; - } + virtual std::vector OutputVars(bool has_intermediate) const; std::string Type() const { return type_; } const AttributeMap& Attrs() const { return attrs_; } @@ -162,11 +135,11 @@ class InferShapeContext { : op_(op), scope_(scope) {} size_t InputSize(const std::string& name) const { - return op_.inputs_.at(name).size(); + return op_.Inputs(name).size(); } size_t OutputSize(const std::string& name) const { - return op_.outputs_.at(name).size(); + return op_.Outputs(name).size(); } const Variable* InputVar(const std::string& name) const { diff --git a/paddle/framework/operator_test.cc b/paddle/framework/operator_test.cc index 6a6ee10f213b22f7a7914b678fd09a4e509e6148..6804841587730d51d9cfad30a9de81401d36695b 100644 --- a/paddle/framework/operator_test.cc +++ b/paddle/framework/operator_test.cc @@ -56,19 +56,24 @@ class OpeWithoutKernelTestProtoAndCheckerMaker : public OpProtoAndCheckerMaker { } // namespace framework } // namespace paddle +static void BuildVar(const std::string& param_name, + std::initializer_list arguments, + paddle::framework::OpDesc::Var* var) { + var->set_parameter(param_name); + for (auto& arg_name : arguments) { + *var->mutable_arguments()->Add() = arg_name; + } +} + REGISTER_OP(test_operator, paddle::framework::OpWithoutKernelTest, paddle::framework::OpeWithoutKernelTestProtoAndCheckerMaker); TEST(OperatorBase, all) { paddle::framework::OpDesc op_desc; op_desc.set_type("test_operator"); - auto* ipt = op_desc.mutable_inputs()->Add(); - *ipt->mutable_arguments()->Add() = "IN1"; - ipt->set_parameter("input"); + BuildVar("input", {"IN1"}, op_desc.add_inputs()); + BuildVar("output", {"OUT1"}, op_desc.add_outputs()); - auto* output = op_desc.mutable_outputs()->Add(); - *output->mutable_arguments()->Add() = "OUT1"; - output->set_parameter("output"); auto attr = op_desc.mutable_attrs()->Add(); attr->set_name("scale"); attr->set_type(paddle::framework::AttrType::FLOAT); @@ -129,9 +134,9 @@ class OpKernelTestMultiInputsProtoAndCheckerMaker OpKernelTestMultiInputsProtoAndCheckerMaker(OpProto* proto, OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("xs", "inputs of test op").SetMultiple(); + AddInput("xs", "inputs of test op").AsDuplicable(); AddInput("k", "input of test op"); - AddOutput("ys", "outputs of test op").SetMultiple(); + AddOutput("ys", "outputs of test op").AsDuplicable(); AddAttr("scale", "scale of cosine op") .SetDefault(1.0) .LargerThan(0.0); @@ -188,13 +193,8 @@ REGISTER_OP_CPU_KERNEL(op_with_kernel, TEST(OpKernel, all) { paddle::framework::OpDesc op_desc; op_desc.set_type("op_with_kernel"); - auto* ipt = op_desc.mutable_inputs()->Add(); - *ipt->mutable_arguments()->Add() = "IN1"; - ipt->set_parameter("x"); - - auto* output = op_desc.mutable_outputs()->Add(); - *output->mutable_arguments()->Add() = "OUT1"; - output->set_parameter("y"); + BuildVar("x", {"IN1"}, op_desc.add_inputs()); + BuildVar("y", {"OUT1"}, op_desc.add_outputs()); auto attr = op_desc.mutable_attrs()->Add(); attr->set_name("scale"); @@ -221,18 +221,9 @@ TEST(OpKernel, multi_inputs) { OpDesc op_desc; op_desc.set_type("op_multi_inputs_with_kernel"); - auto x = op_desc.mutable_inputs()->Add(); - x->set_parameter("xs"); - *x->mutable_arguments()->Add() = "x0"; - *x->mutable_arguments()->Add() = "x1"; - *x->mutable_arguments()->Add() = "x2"; - auto k = op_desc.mutable_inputs()->Add(); - k->set_parameter("k"); - *k->mutable_arguments()->Add() = "k0"; - auto y = op_desc.mutable_outputs()->Add(); - y->set_parameter("ys"); - *y->mutable_arguments()->Add() = "y0"; - *y->mutable_arguments()->Add() = "y1"; + BuildVar("xs", {"x0", "x1", "x2"}, op_desc.add_inputs()); + BuildVar("k", {"k0"}, op_desc.add_inputs()); + BuildVar("ys", {"y0", "y1"}, op_desc.add_outputs()); auto attr = op_desc.mutable_attrs()->Add(); attr->set_name("scale"); diff --git a/paddle/gserver/tests/LayerGradUtil.cpp b/paddle/gserver/tests/LayerGradUtil.cpp index fd9cfa1dc7a9028cb2c5c98baca98ffb2a837bac..a38880e14cdfcef05461dae567d198e5400c6bb1 100644 --- a/paddle/gserver/tests/LayerGradUtil.cpp +++ b/paddle/gserver/tests/LayerGradUtil.cpp @@ -388,14 +388,23 @@ void initDataLayer(TestConfig testConf, data.grad->zeroMem(); break; case INPUT_SELF_DEFINE_DATA: { - size_t height = testConf.inputDefs[i].selfDefinedData->getHeight(); - size_t width = testConf.inputDefs[i].selfDefinedData->getWidth(); - CHECK_GT(static_cast(height), 0); - CHECK_GT(static_cast(width), 0); - data.value = Matrix::create(height, width, false, useGpu); - data.grad = Matrix::create(height, width, false, useGpu); - data.value->copyFrom(*testConf.inputDefs[i].selfDefinedData); - data.grad->zeroMem(); + if (testConf.inputDefs[i].ids.size()) { + data.ids = IVector::create(testConf.inputDefs[i].ids.size(), useGpu); + data.ids->copyFrom(testConf.inputDefs[i].ids.data(), + testConf.inputDefs[i].ids.size()); + } else if (testConf.inputDefs[i].selfDefinedData) { + size_t height = testConf.inputDefs[i].selfDefinedData->getHeight(); + size_t width = testConf.inputDefs[i].selfDefinedData->getWidth(); + CHECK_GT(static_cast(height), 0); + CHECK_GT(static_cast(width), 0); + data.value = Matrix::create(height, width, false, useGpu); + data.grad = Matrix::create(height, width, false, useGpu); + data.value->copyFrom(*testConf.inputDefs[i].selfDefinedData); + data.grad->zeroMem(); + } else { + LOG(FATAL) << "No self-defined data are given."; + return; + } const std::vector& labelSeqStartPositions = testConf.inputDefs[i].labelSeqStartPositions; diff --git a/paddle/gserver/tests/LayerGradUtil.h b/paddle/gserver/tests/LayerGradUtil.h index 5debedf5ef6a3262578ca01b335e664f9a334d35..88e831f78bd165f63806df6c081d84411be51502 100644 --- a/paddle/gserver/tests/LayerGradUtil.h +++ b/paddle/gserver/tests/LayerGradUtil.h @@ -68,6 +68,7 @@ struct InputDef { std::vector labelInitValue; std::vector labelSeqStartPositions; std::vector labelSubSeqStartPositions; + std::vector ids; MatrixPtr selfDefinedData; InputDef(InputType type, string nameIn, size_t dimIn, size_t sizeIn) { @@ -95,6 +96,23 @@ struct InputDef { isStatic = false; } + InputDef(InputType type, + string nameIn, + const std::vector& ids, + const std::vector& selfDefinedSeqStartPos = {}, + const std::vector& selfDefinedSubSeqStartPos = {}) + : labelSeqStartPositions(selfDefinedSeqStartPos), + labelSubSeqStartPositions(selfDefinedSubSeqStartPos), + ids(ids) { + selfDefinedData = nullptr; + inputType = type; + name = nameIn; + dim = 0; + sparse = {""}; + paraSize = 0; + isStatic = false; + } + InputDef(InputType type, string nameIn, size_t dimIn, diff --git a/paddle/operators/mean_op.cc b/paddle/operators/mean_op.cc index 8e3f0111662296cf9c50bf081edd974c6a8b3488..35e7212dde210a50285272cfd94118fa34fb7cd9 100644 --- a/paddle/operators/mean_op.cc +++ b/paddle/operators/mean_op.cc @@ -34,7 +34,7 @@ class MeanOpMaker : public framework::OpProtoAndCheckerMaker { MeanOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "The input of mean op"); - AddOutput("Out", "The output of mean op").IgnoreGradient(); + AddOutput("Out", "The output of mean op").AsNoGradient(); AddComment("Mean Operator"); } }; diff --git a/paddle/operators/recurrent_op.cc b/paddle/operators/recurrent_op.cc index bb30ae6894fd9161c85f8071711e6e2d102fe4a9..5ddee75581824996fd312f8ddf13007759fd9a67 100644 --- a/paddle/operators/recurrent_op.cc +++ b/paddle/operators/recurrent_op.cc @@ -155,13 +155,13 @@ class RecurrentAlgorithmProtoAndCheckerMaker // inputs and outputs stored in proto AddInput(name.inlinks, "the inputs that need to be segmented for each step.") - .SetMultiple(); + .AsDuplicable(); AddInput(name.boot_memories, "variables to initialize memories.") - .SetMultiple(); + .AsDuplicable(); AddInput(name.step_net, "network shared by all steps."); AddOutput(name.outlinks, "the outputs that need to concated for all steps.") - .SetMultiple(); + .AsDuplicable(); AddOutput(name.step_scopes, "step scopes"); // Attributes stored in AttributeMap diff --git a/paddle/operators/recurrent_op_test.cc b/paddle/operators/recurrent_op_test.cc index 40c212d6b765e4d48fbba00e82837257219caeb1..2f6eff0720847fdfa6443d2fc233e92dac2d0fab 100644 --- a/paddle/operators/recurrent_op_test.cc +++ b/paddle/operators/recurrent_op_test.cc @@ -26,8 +26,6 @@ namespace paddle { namespace operators { using namespace paddle::framework; -// using framework::make_ddim; -// using framework::DDim; class RecurrentGradientAlgorithmTest : public ::testing::Test { protected: diff --git a/python/paddle/v2/framework/tests/CMakeLists.txt b/python/paddle/v2/framework/tests/CMakeLists.txt index b76c05dc8142af40d9872b42cc51b3c317e095be..96fad9b42e04a88fdcbda093683b57451b2a3e41 100644 --- a/python/paddle/v2/framework/tests/CMakeLists.txt +++ b/python/paddle/v2/framework/tests/CMakeLists.txt @@ -24,3 +24,4 @@ py_test(test_default_scope_funcs SRCS test_default_scope_funcs.py) py_test(test_operator SRCS test_operator.py) # py_test(test_gaussian_random_op SRCS test_gaussian_random_op.py) py_test(test_uniform_random_op SRCS test_uniform_random_op.py) +py_test(test_recurrent_op SRCS test_recurrent_op.py) diff --git a/python/paddle/v2/framework/tests/test_add_two_op.py b/python/paddle/v2/framework/tests/test_add_two_op.py index 019784a8b4d6138d8f8a45f308513c376cd41ebb..0def484eddb88604398ee10390d3f28058714a57 100644 --- a/python/paddle/v2/framework/tests/test_add_two_op.py +++ b/python/paddle/v2/framework/tests/test_add_two_op.py @@ -19,13 +19,5 @@ class TestAddOp(unittest.TestCase): self.outputs = {'Out': self.inputs['X'] + self.inputs['Y']} -#class TestAddGradOp(unittest.TestCase): -# def test_add_grad(self): -# op = Operator('add_two', X="X", Y="Y", Out="Out") -# backward_op = core.Operator.backward(op, set()) -# self.assertEqual(backward_op.type(), "add_two_grad") -# expected = '''Op(add_two_grad), inputs:(X, Y, Out, Out@GRAD), outputs:(X@GRAD, Y@GRAD).''' -# self.assertEqual(expected, str(backward_op)) - if __name__ == '__main__': unittest.main() diff --git a/python/paddle/v2/framework/tests/test_recurrent_op.py b/python/paddle/v2/framework/tests/test_recurrent_op.py index 5c77c477b347f4713e4af2a8cb462b243d7a779c..0db66cc4e181fde10f161a323ea749fd84a5f963 100644 --- a/python/paddle/v2/framework/tests/test_recurrent_op.py +++ b/python/paddle/v2/framework/tests/test_recurrent_op.py @@ -2,19 +2,74 @@ import logging import paddle.v2.framework.core as core import unittest import numpy as np -import paddle.v2.framework.create_op_creation_methods as creation +from paddle.v2.framework.op import Operator -ops = creation.op_creations +def py_sigmoid(x): + return 1. / (1. + np.exp(-x)) -def create_tensor(scope, name, shape): + +class PySimpleRNN(object): + ''' + A simple implementation of RNN based on numpy, to futhur test RecurrentOp's alogorithm + ''' + + def __init__(self, input_dim=30, batch_size=50, weight_dim=15, sent_len=11): + self.x = np.random.normal(size=(sent_len, batch_size, input_dim)) + self.W = np.random.normal(size=(input_dim, input_dim)) + self.U = np.random.normal(size=(input_dim, input_dim)) + self.h_boot = np.random.normal(size=(batch_size, input_dim)) + + # memories + self.mems = [ + np.zeros(shape=(batch_size, input_dim)) for i in range(sent_len) + ] + + def forward(self): + xs = self.segment_inputs() + for step_id in range(self.x.shape[0]): + self.step(step_id, xs[step_id]) + return self.concat_outputs() + + def segment_inputs(self): + return [self.x[i] for i in range(self.x.shape[0])] + + def concat_outputs(self): + return np.array(self.mems) + + def step(self, step_id, x): + ''' + run a step + ''' + mem = self.mems[step_id] + if step_id > 0: + pre_mem = self.mems[step_id - 1] + else: + pre_mem = self.h_boot + xW = np.matmul(x, self.W) + hU = np.matmul(mem, self.U) + + sum = xW + hU + self.mems[step_id] = py_sigmoid(sum) + + +class PySimpleRNNTest(unittest.TestCase): + def setUp(self): + self.rnn = PySimpleRNN() + + def test_forward(self): + output = self.rnn.forward() + print 'output', output + + +def create_tensor(scope, name, shape, np_data): tensor = scope.new_var(name).get_tensor() tensor.set_dims(shape) - tensor.set(np.random.random(shape), core.CPUPlace()) + tensor.set(np_data, core.CPUPlace()) return tensor -class TestRNN(unittest.TestCase): +class TestRecurrentOp(unittest.TestCase): ''' Test RNNOp @@ -28,7 +83,7 @@ class TestRNN(unittest.TestCase): memories: - h outputs: - - h + - h ''' input_dim = 30 @@ -36,33 +91,45 @@ class TestRNN(unittest.TestCase): weight_dim = 15 sent_len = 11 - def init(self): + def setUp(self): + self.py_rnn = PySimpleRNN(self.input_dim, self.batch_size, + self.weight_dim, self.sent_len) + def forward(self): self.scope = core.Scope() - self.create_global_variables() self.create_step_net() rnn_op = self.create_rnn_op() ctx = core.DeviceContext.create(core.CPUPlace()) - print 'infer_shape' rnn_op.infer_shape(self.scope) - rnn_op.run(self.scope, ctx) + return np.array(self.scope.find_var("h").get_tensor()) def create_global_variables(self): # create inlink + x_np_data = self.py_rnn.x create_tensor(self.scope, "x", - [self.sent_len, self.batch_size, self.input_dim]) - create_tensor(self.scope, "W", [self.input_dim, self.input_dim]) - create_tensor(self.scope, "U", [self.input_dim, self.input_dim]) - create_tensor(self.scope, "h_boot", [self.batch_size, self.input_dim]) + [self.sent_len, self.batch_size, self.input_dim], + x_np_data) + W_np_data = self.py_rnn.W + create_tensor(self.scope, "W", [self.input_dim, self.input_dim], + W_np_data) + + U_np_data = self.py_rnn.U + create_tensor(self.scope, "U", [self.input_dim, self.input_dim], + U_np_data) + + h_boot_np_data = self.py_rnn.h_boot + create_tensor(self.scope, "h_boot", [self.batch_size, self.input_dim], + h_boot_np_data) self.scope.new_var("step_scopes") self.scope.new_var("h@alias") self.scope.new_var("h") def create_rnn_op(self): # create RNNOp - rnnop = ops.recurrent_op( + rnnop = Operator( + "recurrent_op", # inputs inlinks=["x"], boot_memories=["h_boot"], @@ -81,17 +148,25 @@ class TestRNN(unittest.TestCase): var = self.scope.new_var("stepnet") stepnet = var.get_net() - x_fc_op = ops.fc(X="x@alias", W="W", Y="Wx") - h_fc_op = ops.fc(X="h@pre", W="U", Y="Uh") - sum_op = ops.add_two(X="Wx", Y="Uh", Out="sum") - sig_op = ops.sigmoid(X="sum", Y="h@alias") + # x_fc_op = Operator("fc", X="x@alias", W="W", Y="Wx") + # h_fc_op = Operator("fc", X="h@pre", W="U", Y="Uh") + x_fc_op = Operator("mul", X="x@alias", Y="W", Out="Wx") + h_fc_op = Operator("mul", X="h@pre", Y="U", Out="Uh") + sum_op = Operator("add_two", X="Wx", Y="Uh", Out="sum") + sig_op = Operator("sigmoid", X="sum", Y="h@alias") for op in [x_fc_op, h_fc_op, sum_op, sig_op]: stepnet.add_op(op) stepnet.complete_add_op(True) - def test_recurrent(self): - self.init() + def test_forward(self): + print 'test recurrent op forward' + pd_output = self.forward() + py_output = self.py_rnn.forward() + print 'pd_output', pd_output + print + print 'py_output', py_output + self.assertEqual(pd_output.shape, py_output.shape) if __name__ == '__main__': diff --git a/python/requirements.txt b/python/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..3df822bd76d2a64a0a35f84b4ec309ce7150c221 --- /dev/null +++ b/python/requirements.txt @@ -0,0 +1,9 @@ +requests==2.9.2 +numpy>=1.12 +protobuf==3.1 +recordio +matplotlib +rarfile +scipy>=0.19.0 +Pillow +nltk>=3.2.2 diff --git a/python/setup.py.in b/python/setup.py.in index 4110c983180937e86716324f8c92c37b7d2cc3ea..38728aa2fd77cf3c882479ed83e99688b9ffa541 100644 --- a/python/setup.py.in +++ b/python/setup.py.in @@ -1,5 +1,4 @@ from setuptools import setup, Distribution - class BinaryDistribution(Distribution): def has_ext_modules(foo): return True @@ -18,15 +17,8 @@ packages=['paddle', 'paddle.v2.framework.proto', 'py_paddle'] -setup_requires=["requests", - "numpy>=1.12", - "protobuf==3.1", - "recordio", - "matplotlib", - "rarfile", - "scipy>=0.19.0", - "Pillow", - "nltk>=3.2.2"] +with open('@PADDLE_SOURCE_DIR@/python/requirements.txt') as f: + setup_requires = f.read().splitlines() if '${CMAKE_SYSTEM_PROCESSOR}' not in ['arm', 'armv7-a', 'aarch64']: setup_requires+=["opencv-python"]