diff --git a/.gitignore b/.gitignore index 1512c1438e9e0b0b7b6e0c273a24b273cb652b04..7480bd53a403d74932d56409fdb0a9dd7bb6b9d6 100644 --- a/.gitignore +++ b/.gitignore @@ -21,7 +21,7 @@ third_party/ cmake-build-* # generated while compiling -python/paddle/v2/framework/core.so +python/paddle/v2/fluid/core.so paddle/pybind/pybind.h CMakeFiles cmake_install.cmake diff --git a/paddle/framework/backward.cc b/paddle/framework/backward.cc index 913cd0f81eaef37014f38c71e7c3d23bfeec1466..b3b9c45ded95ce2e735b8898d47760956dcacdce 100644 --- a/paddle/framework/backward.cc +++ b/paddle/framework/backward.cc @@ -377,6 +377,12 @@ std::vector> MakeOpGrad( return grad_op_descs; } +static BlockDescBind* CreateStepBlock( + ProgramDescBind& program_desc, + std::unordered_set* no_grad_vars, + std::unordered_map* grad_to_var, + int step_block_idx); + std::vector> MakeBlockBackward( ProgramDescBind& program_desc, int block_idx, std::unordered_set* no_grad_vars, @@ -392,13 +398,13 @@ std::vector> MakeBlockBackward( if ((*it)->Type() == "recurrent") { int step_block_idx = (*it)->GetBlockAttr("step_block"); - auto backward_block_op_descs = MakeBlockBackward( - program_desc, step_block_idx, no_grad_vars, grad_to_var); + BlockDescBind* backward_block = CreateStepBlock( + program_desc, no_grad_vars, grad_to_var, step_block_idx); + op_grads = MakeOpGrad(*it, no_grad_vars, grad_to_var, {backward_block}); + } else if ((*it)->Type() == "conditional_block") { BlockDescBind* backward_block = - program_desc.AppendBlock(*program_desc.MutableBlock(step_block_idx)); - for (auto& ptr : backward_block_op_descs) { - backward_block->AppendAllocatedOp(std::move(ptr)); - } + CreateStepBlock(program_desc, no_grad_vars, grad_to_var, + (*it)->GetBlockAttr("block")); op_grads = MakeOpGrad(*it, no_grad_vars, grad_to_var, {backward_block}); } else { op_grads = MakeOpGrad(*it, no_grad_vars, grad_to_var); @@ -449,6 +455,21 @@ std::vector> MakeBlockBackward( return backward_descs; } +static BlockDescBind* CreateStepBlock( + ProgramDescBind& program_desc, + std::unordered_set* no_grad_vars, + std::unordered_map* grad_to_var, + int step_block_idx) { + auto backward_block_op_descs = MakeBlockBackward(program_desc, step_block_idx, + no_grad_vars, grad_to_var); + BlockDescBind* backward_block = + program_desc.AppendBlock(*program_desc.MutableBlock(step_block_idx)); + for (auto& ptr : backward_block_op_descs) { + backward_block->AppendAllocatedOp(move(ptr)); + } + return backward_block; +} + ParamGradInfoMap AppendBackward( ProgramDescBind& program_desc, const VarDescBind& target, const std::unordered_set& no_grad_vars) { diff --git a/paddle/framework/var_type.h b/paddle/framework/var_type.h index d060196bb2c478b776851288cb71a1880d60660d..0f19870bec3e69d07278507cc556a86bbd25d12d 100644 --- a/paddle/framework/var_type.h +++ b/paddle/framework/var_type.h @@ -27,10 +27,32 @@ inline VarDesc::VarType ToVarType(std::type_index type) { return VarDesc_VarType_LOD_RANK_TABLE; } else if (type.hash_code() == typeid(LoDTensorArray).hash_code()) { return VarDesc_VarType_LOD_TENSOR_ARRAY; + } else if (type.hash_code() == typeid(SelectedRows).hash_code()) { + return VarDesc_VarType_SELECTED_ROWS; } else { PADDLE_THROW("ToVarType:Unsupported type %s", type.name()); } } +template +inline void VisitVarType(const Variable& var, Visitor visitor) { + switch (ToVarType(var.Type())) { + case VarDesc_VarType_LOD_TENSOR: + visitor(var.Get()); + return; + case VarDesc_VarType_LOD_RANK_TABLE: + visitor(var.Get()); + return; + case VarDesc_VarType_LOD_TENSOR_ARRAY: + visitor(var.Get()); + return; + case VarDesc_VarType_SELECTED_ROWS: + visitor(var.Get()); + return; + default: + PADDLE_THROW("Not supported visit type, %d", ToVarType(var.Type())); + } +} + } // namespace framework } // namespace paddle diff --git a/paddle/gserver/layers/MKLDNNAddtoLayer.cpp b/paddle/gserver/layers/MKLDNNAddtoLayer.cpp index 6ffe4fbec643e50d27924a989875454d307f5b9b..0f2b67fd758ec1513f42c4cb1a36f2f3915f4740 100644 --- a/paddle/gserver/layers/MKLDNNAddtoLayer.cpp +++ b/paddle/gserver/layers/MKLDNNAddtoLayer.cpp @@ -54,7 +54,6 @@ void MKLDNNAddtoLayer::reshape( ow = iw; reshapeOutput(oh, ow); resizeOutput(bs, oc * oh * ow); - printSizeInfo(); } void MKLDNNAddtoLayer::resetFwd(std::vector& pipeline, diff --git a/paddle/gserver/layers/MKLDNNBatchNormLayer.cpp b/paddle/gserver/layers/MKLDNNBatchNormLayer.cpp index ed3887cbf653878623764a310c9f364f4d8be27f..071bdf54d5dc9538d5ced580a73b9c0fbcea41fb 100644 --- a/paddle/gserver/layers/MKLDNNBatchNormLayer.cpp +++ b/paddle/gserver/layers/MKLDNNBatchNormLayer.cpp @@ -125,7 +125,6 @@ void MKLDNNBatchNormLayer::reshape( << "Input channel can not be changed"; reshapeOutput(oh, ow); resizeOutput(bs, oc * oh * ow); - printSizeInfo(); } void MKLDNNBatchNormLayer::resetFwd(std::vector& pipeline, diff --git a/paddle/gserver/layers/MKLDNNConvLayer.cpp b/paddle/gserver/layers/MKLDNNConvLayer.cpp index b8120eda1e2dadab943869a05546351a369af6fd..8aa54e0a9efa7adb766cbb6009f6a29410c6ae7d 100644 --- a/paddle/gserver/layers/MKLDNNConvLayer.cpp +++ b/paddle/gserver/layers/MKLDNNConvLayer.cpp @@ -102,8 +102,6 @@ void MKLDNNConvLayer::reshape( reshapeOutput(oh, ow); resizeOutput(bs, oc * oh * ow); - - printSizeInfo(); } void MKLDNNConvLayer::resetFwd(std::vector& pipeline, diff --git a/paddle/gserver/layers/MKLDNNConvLayer.h b/paddle/gserver/layers/MKLDNNConvLayer.h index 1fed0e1c6565b763a3ee73a0853f560ddfbd44c6..9c69136684e5f9005860b476ec6ed1bbc9ceff6c 100644 --- a/paddle/gserver/layers/MKLDNNConvLayer.h +++ b/paddle/gserver/layers/MKLDNNConvLayer.h @@ -92,7 +92,7 @@ public: void printSizeInfo() override { MKLDNNLayer::printSizeInfo(); VLOG(MKLDNN_SIZES) << getName() << ": fh: " << fh_ << ", fw: " << fw_ - << ": ph: " << ph_ << ", pw: " << pw_ << ", sh: " << sh_ + << ", ph: " << ph_ << ", pw: " << pw_ << ", sh: " << sh_ << ", sw: " << sw_ << ", dh: " << dh_ << ", dw: " << dw_; } diff --git a/paddle/gserver/layers/MKLDNNFcLayer.cpp b/paddle/gserver/layers/MKLDNNFcLayer.cpp index 3429c53d2396e051d62fe0ae405934758e89f9c2..350ec65fffbc73c3a6e4245f763f4c6aa868f574 100644 --- a/paddle/gserver/layers/MKLDNNFcLayer.cpp +++ b/paddle/gserver/layers/MKLDNNFcLayer.cpp @@ -84,8 +84,6 @@ void MKLDNNFcLayer::reshape( reshapeOutput(oh, ow); resizeOutput(bs, oc); - - printSizeInfo(); } void MKLDNNFcLayer::resetFwd(std::vector& pipeline, diff --git a/paddle/gserver/layers/MKLDNNPoolLayer.cpp b/paddle/gserver/layers/MKLDNNPoolLayer.cpp index 6e89260f49979d4edb4da138507a73dc2bf120de..a18c455beab96ef25b5545281bae4d48cec98d9e 100644 --- a/paddle/gserver/layers/MKLDNNPoolLayer.cpp +++ b/paddle/gserver/layers/MKLDNNPoolLayer.cpp @@ -71,8 +71,6 @@ void MKLDNNPoolLayer::reshape( reshapeOutput(oh, ow); resizeOutput(bs, oc * oh * ow); - - printSizeInfo(); } void MKLDNNPoolLayer::resetFwd(std::vector& pipeline, diff --git a/paddle/gserver/layers/ROIPoolLayer.cpp b/paddle/gserver/layers/ROIPoolLayer.cpp index 99cfddb0cf3337745a716a8c329713c18b99eda3..35d4b12d3d357800fe72899069b5377c252fac5f 100644 --- a/paddle/gserver/layers/ROIPoolLayer.cpp +++ b/paddle/gserver/layers/ROIPoolLayer.cpp @@ -98,7 +98,7 @@ void ROIPoolLayer::forward(PassType passType) { size_t roiStartH = round(bottomROIs[2] * spatialScale_); size_t roiEndW = round(bottomROIs[3] * spatialScale_); size_t roiEndH = round(bottomROIs[4] * spatialScale_); - CHECK_GE(roiBatchIdx, 0); + CHECK_GE(roiBatchIdx, 0UL); CHECK_LT(roiBatchIdx, batchSize); size_t roiHeight = std::max(roiEndH - roiStartH + 1, 1UL); size_t roiWidth = std::max(roiEndW - roiStartW + 1, 1UL); diff --git a/paddle/gserver/tests/test_MKLDNN.cpp b/paddle/gserver/tests/test_MKLDNN.cpp index a0e039c2a33b586e21775ad06c1278a10804d654..a859e34c8996d81f14bf1edcb6e23d5a4f687e6b 100644 --- a/paddle/gserver/tests/test_MKLDNN.cpp +++ b/paddle/gserver/tests/test_MKLDNN.cpp @@ -297,7 +297,7 @@ static void getAddtoConfig(TestConfig& cfg, } void testAddtoLayer(const testImageDesc& pm, const size_t nInputs) { - CHECK_GE(nInputs, 1); + CHECK_GE(nInputs, 1UL); TestConfig dnnConfig; getAddtoConfig(dnnConfig, pm, nInputs); dnnConfig.layerConfig.set_type("mkldnn_addto"); diff --git a/paddle/operators/CMakeLists.txt b/paddle/operators/CMakeLists.txt index 29ce44c23308cb5ae1c1df5c9be1412c28abe96f..709f7de2e43093114d096cbfca5b5d49293a6d3e 100644 --- a/paddle/operators/CMakeLists.txt +++ b/paddle/operators/CMakeLists.txt @@ -214,6 +214,7 @@ set(GLOB_OP_LIB ${OP_LIBRARY} CACHE INTERNAL "Global OP library") cc_test(gather_test SRCS gather_test.cc DEPS tensor) cc_test(net_op_test SRCS net_op_test.cc DEPS net_op) cc_test(scatter_test SRCS scatter_test.cc DEPS tensor) +cc_test(beam_search_decode_op_test SRCS beam_search_decode_op_test.cc DEPS lod_tensor) cc_test(strided_memcpy_test SRCS strided_memcpy_test.cc DEPS tensor paddle_memory) cc_test(dynamic_recurrent_op_test SRCS dynamic_recurrent_op_test.cc rnn/recurrent_op_utils.cc diff --git a/paddle/operators/assign_op.cc b/paddle/operators/assign_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..609e915b932e2bc4d5abee1e5f868cc07a7619d3 --- /dev/null +++ b/paddle/operators/assign_op.cc @@ -0,0 +1,138 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#include "paddle/framework/data_type.h" +#include "paddle/framework/op_registry.h" +#include "paddle/framework/var_type.h" + +namespace paddle { +namespace operators { +class AssignFunctor { + public: + AssignFunctor(framework::Variable *out, + const platform::DeviceContext &dev_ctx) + : out_(out), dev_ctx_(dev_ctx) {} + + void operator()(const framework::LoDTensor &lod_tensor) const { + auto &out_tensor = *out_->GetMutable(); + copy_tensor(lod_tensor, &out_tensor); + } + + void operator()(const framework::LoDTensorArray &array) const { + auto &out_array = *out_->GetMutable(); + out_array.resize(array.size()); + for (size_t i = 0; i < array.size(); ++i) { + copy_tensor(array[i], &out_array[i]); + } + } + + void operator()(const framework::SelectedRows &rows) const { + framework::SelectedRows &out_rows = + *out_->GetMutable(); + out_rows.set_rows(rows.rows()); + out_rows.set_height(rows.height()); + auto &t = rows.value(); + out_rows.mutable_value()->CopyFrom(t, t.place(), dev_ctx_); + } + + template + void operator()(const T &v) const { + PADDLE_THROW("Not support type for assign op %s", typeid(T).name()); + } + + private: + void copy_tensor(const framework::LoDTensor &lod_tensor, + framework::LoDTensor *out) const { + auto &out_tensor = *out; + out_tensor.CopyFrom(lod_tensor, lod_tensor.place(), dev_ctx_); + out_tensor.set_lod(lod_tensor.lod()); + } + + framework::Variable *out_; + const platform::DeviceContext &dev_ctx_; +}; + +class AssignOp : public framework::OperatorBase { + public: + AssignOp(const std::string &type, const framework::VariableNameMap &inputs, + const framework::VariableNameMap &outputs, + const framework::AttributeMap &attrs) + : OperatorBase(type, inputs, outputs, attrs) {} + void Run(const framework::Scope &scope, + const platform::DeviceContext &dev_ctx) const override { + auto *x = scope.FindVar(Input("X")); + if (x == nullptr) { + return; + } + auto *out = scope.FindVar(Output("Out")); + PADDLE_ENFORCE( + out != nullptr, + "The Output(Out) should not be null if the Input(X) is set."); + framework::VisitVarType(*x, AssignFunctor(out, dev_ctx)); + } +}; + +class AssignOpProtoMaker : public framework::OpProtoAndCheckerMaker { + public: + AssignOpProtoMaker(framework::OpProto *proto, + framework::OpAttrChecker *op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("X", + "(LoDTensor, SelectedRows or LoDTensorArray) The input variable " + "could be LoDTensor, SelectedRows or LoDTensorArray.") + .AsDispensable(); + AddOutput("Out", + "(LoDTensor, SelectedRows or LoDTensorArray) The type of output " + "is the same as input X."); + AddComment(R"DOC(Assign Operator + +Out = X, when type in [LoDTensor/SelectedRows/LoDTensorArray] +raise error if the type is not listed above. +)DOC"); + } +}; + +class AssignInferShape : public framework::InferShapeBase { + public: + void operator()(framework::InferShapeContext *context) const override { + if (context->HasInput("X")) { + auto type = context->GetInputsVarType("X")[0]; + if (type == framework::VarDesc_VarType_SELECTED_ROWS || + type == framework::VarDesc_VarType_LOD_TENSOR) { + context->SetOutputDim("Out", context->GetInputDim("X")); + } + } + } +}; + +class AssignGradMaker : public framework::SingleGradOpDescMaker { + public: + using framework::SingleGradOpDescMaker::SingleGradOpDescMaker; + + protected: + std::unique_ptr Apply() const override { + auto *op = new framework::OpDescBind(); + op->SetType("assign"); + op->SetInput("X", OutputGrad("Out")); + op->SetOutput("Out", InputGrad("X")); + return std::unique_ptr(op); + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OPERATOR(assign, ops::AssignOp, ops::AssignGradMaker, + ops::AssignInferShape, ops::AssignOpProtoMaker); diff --git a/paddle/operators/beam_search_decode_op.cc b/paddle/operators/beam_search_decode_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..3904a97d58166cfeeb2be7d2144700dbd8bc5721 --- /dev/null +++ b/paddle/operators/beam_search_decode_op.cc @@ -0,0 +1,111 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/operators/beam_search_decode_op.h" + +namespace paddle { +namespace operators { + +class BeamSearchDecodeOp : public framework::OperatorBase { + public: + BeamSearchDecodeOp(const std::string& type, + const framework::VariableNameMap& inputs, + const framework::VariableNameMap& outputs, + const framework::AttributeMap& attrs) + : OperatorBase(type, inputs, outputs, attrs) {} + void Run(const framework::Scope& scope, + const platform::DeviceContext& dev_ctx) const override { + framework::ExecutionContext ctx(*this, scope, dev_ctx); + + const LoDTensorArray* ids = ctx.Input("Ids"); + const LoDTensorArray* scores = ctx.Input("Scores"); + const size_t step_num = ids->size(); + PADDLE_ENFORCE_GT(step_num, 0UL, + "beam search steps should be larger than 0"); + const size_t source_num = ids->at(0).lod().at(0).size() - 1; + PADDLE_ENFORCE_GT(source_num, 0UL, "source num should be larger than 0"); + + for (size_t i = 0; i < step_num; ++i) { + PADDLE_ENFORCE_EQ(ids->at(i).lod().size(), 2UL, + "Level of LodTensor should be 2"); + } + + // prepare output + LoDTensor* sentenceIds = ctx.Output("SentenceIds"); + LoDTensor* sentenceScores = ctx.Output("SentenceScores"); + + BeamSearchDecoder beam_search_decoder; + beam_search_decoder.PackAllSteps(*ids, *scores, sentenceIds, + sentenceScores); + } +}; + +class BeamSearchDecodeOpProtoMaker : public framework::OpProtoAndCheckerMaker { + public: + BeamSearchDecodeOpProtoMaker(framework::OpProto* proto, + framework::OpAttrChecker* op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("Ids", + "(LodTensorArray)" + "score of the candidate words in each step"); + AddInput("Scores", + "(LodTensorArray)" + "score of the candidate words in each step"); + AddOutput("SentenceIds", + "(LodTensor)" + "All possible result sentences of word ids"); + AddOutput("SentenceScores", + "(LodTensor)" + "All possible result sentences of word scores"); + AddComment(R"DOC( +Pack the result of Beam search op into SentenceIds and SentenceScores. +)DOC"); + } +}; + +class BeamSearchDecodeInferShape : public framework::InferShapeBase { + public: + void operator()(framework::InferShapeContext* context) const override { + PADDLE_ENFORCE(context->HasInput("Ids"), + "BeamSearchDecodeOp must has input Ids"); + PADDLE_ENFORCE(context->HasInput("Scores"), + "BeamSearchDecodeOp must has input Scores"); + PADDLE_ENFORCE(context->HasOutput("SentenceIds"), + "BeamSearchDecodeOp must has output SentenceIds"); + PADDLE_ENFORCE(context->HasOutput("SentenceScores"), + "BeamSearchDecodeOp must has output SentenceScores"); + } +}; + +class BeamSearchDecodeInferVarType : public framework::VarTypeInference { + public: + void operator()(const framework::OpDescBind& op_desc, + framework::BlockDescBind* block) const override { + for (auto& o : op_desc.Output("SentenceIds")) { + block->Var(o)->SetType(framework::VarDesc::LOD_TENSOR); + } + for (auto& o : op_desc.Output("SentenceScores")) { + block->Var(o)->SetType(framework::VarDesc::LOD_TENSOR); + } + } +}; + +} // namespace operators +} // namespace paddle + +REGISTER_OPERATOR(beam_search_decode, paddle::operators::BeamSearchDecodeOp, + paddle::operators::BeamSearchDecodeOpProtoMaker, + paddle::operators::BeamSearchDecodeInferShape, + paddle::operators::BeamSearchDecodeInferVarType, + paddle::framework::EmptyGradOpMaker); diff --git a/paddle/operators/beam_search_decode_op.h b/paddle/operators/beam_search_decode_op.h new file mode 100644 index 0000000000000000000000000000000000000000..0f007ec22f9a66572971516a711317f348e1ec5a --- /dev/null +++ b/paddle/operators/beam_search_decode_op.h @@ -0,0 +1,280 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include "paddle/framework/lod_tensor_array.h" +#include "paddle/framework/op_registry.h" + +namespace paddle { +namespace operators { + +using LoDTensor = framework::LoDTensor; +using LoDTensorArray = framework::LoDTensorArray; + +// all the lod have 2 levels. +// The First is source level, the second is sentence level. +// source level describe how many candidate words for this source. +// sentence level describe these candidates belong to which prefix +const size_t kSourceLevel = 0; +const size_t kSentenceLevel = 1; + +template +struct BeamNode { + BeamNode(int64_t word_id, T score) : word_id_(word_id), score_(score) {} + + ~BeamNode() { + if (parent_) { + parent_->DropKid(this); + if (parent_->kids_.size() == 0UL) { + delete parent_; + } + } + VLOG(3) << "Delete BeamNode root with word_id:" << this->word_id_; + } + + void AppendTo(BeamNode* parent) { + parent_ = parent; + parent->kids_.insert(this); + } + + void DropKid(BeamNode* kid) { kids_.erase(kid); } + + BeamNode* parent_ = nullptr; + std::unordered_set kids_; + int64_t word_id_; + T score_; +}; + +template +using BeamNodeVector = std::vector>>; + +template +struct Sentence { + std::vector word_ids; + std::vector scores; +}; + +template +using SentenceVector = std::vector>; + +template +struct BeamSearchDecoder { + /** + * make a BeamNode and all it's related prefix BeanNode into a Sentence. + */ + Sentence MakeSentence(const BeamNode* node) const; + + /** + * Param: + * cur_ids: LoDTensor of One step for word ID + * cur_scores: LoDTensor of One Step for word score + * prefixes_list: prefixes for each source sentence. + * sentence_vector_list: result sentence_vector for each source sentence. + * Return: + * a new prefixes list for each source of current step + */ + std::vector> PackTwoSteps( + const LoDTensor& cur_ids, const LoDTensor& cur_scores, + std::vector>& prefixes_list, + std::vector>* sentence_vector_list) const; + + /** + * convert the result sentence_vector for each source sentence into two + * LodTensor. + * One is all candidate sentences with word id, one is all candidate sentences + * with word score. + * Param: + * sentence_vector_list: sentence_vector for each source sentence. + * id_tensor: result LoDTensor for sentences of id. + * score_tensor: result LoDTensor for sentences of score. + */ + void ConvertSentenceVectorToLodTensor( + std::vector> sentence_vector_list, LoDTensor* id_tensor, + LoDTensor* score_tensor) const; + + /** + * Pack all steps of id/score LodTensor into sentence LoDTensor + * it's main logic is: + * ```python + * prefix + * result_sentence + * result_lod_tensor + * + * for (step in steps): + * prefix = PackTwoSteps(prefix, step, &result_sentence) + * ConvertSentenceVectorToLodTensor(result_sentence, &result_lod_tensor) + * ``` + */ + void PackAllSteps(const LoDTensorArray& step_ids, + const LoDTensorArray& step_scores, LoDTensor* id_tensor, + LoDTensor* score_tensor) const; +}; + +template +Sentence BeamSearchDecoder::MakeSentence(const BeamNode* node) const { + Sentence sentence; + while (node != nullptr) { + sentence.word_ids.emplace_back(node->word_id_); + sentence.scores.emplace_back(node->score_); + node = node->parent_; + } + + std::reverse(std::begin(sentence.word_ids), std::end(sentence.word_ids)); + std::reverse(std::begin(sentence.scores), std::end(sentence.scores)); + + return sentence; +} + +template +std::vector> BeamSearchDecoder::PackTwoSteps( + const LoDTensor& cur_ids, const LoDTensor& cur_scores, + std::vector>& prefixes_list, + std::vector>* sentence_vector_list) const { + std::vector> result; + + for (size_t src_idx = 0; src_idx < cur_ids.lod()[kSourceLevel].size() - 1; + ++src_idx) { + size_t src_start = cur_ids.lod().at(kSourceLevel)[src_idx]; + size_t src_end = cur_ids.lod().at(kSourceLevel)[src_idx + 1]; + + BeamNodeVector beam_nodes; + + // if prefixes size is 0, it means this is the first step. In this step, + // all candidate id is the start of candidate sentences. + if (prefixes_list.empty()) { + PADDLE_ENFORCE_EQ(cur_ids.lod().at(kSourceLevel).back(), + cur_ids.lod().at(kSentenceLevel).back(), + "in the first step"); + for (size_t id_idx = src_start; id_idx < src_end; ++id_idx) { + beam_nodes.push_back(std::unique_ptr>(new BeamNode( + cur_ids.data()[id_idx], cur_scores.data()[id_idx]))); + } + } else { + BeamNodeVector& prefixes = prefixes_list[src_idx]; + SentenceVector& sentence_vector = (*sentence_vector_list)[src_idx]; + + PADDLE_ENFORCE_EQ(src_end - src_start, prefixes.size(), + "prefix and candidate set number should be the same"); + + auto candidate_offset = cur_ids.lod()[kSentenceLevel]; + for (size_t prefix_idx = 0; prefix_idx < prefixes.size(); ++prefix_idx) { + std::unique_ptr>& prefix = prefixes[prefix_idx]; + size_t candidate_start = candidate_offset[src_start + prefix_idx]; + size_t candidate_end = candidate_offset[src_start + prefix_idx + 1]; + if (candidate_start == candidate_end) { + VLOG(3) << "this sentence has no more candidate, " + "add to result sentence and rm it from beam tree"; + sentence_vector.push_back(MakeSentence(prefix.get())); + prefix.reset(); + } else { + for (size_t candidate_idx = candidate_start; + candidate_idx < candidate_end; ++candidate_idx) { + auto* candidate = + new BeamNode(cur_ids.data()[candidate_idx], + cur_scores.data()[candidate_idx]); + candidate->AppendTo(prefix.get()); + beam_nodes.push_back(std::unique_ptr>(candidate)); + } + prefix.release(); + } + } + } + result.push_back(std::move(beam_nodes)); + } + return result; +} + +template +void BeamSearchDecoder::ConvertSentenceVectorToLodTensor( + std::vector> sentence_vector_list, LoDTensor* id_tensor, + LoDTensor* score_tensor) const { + size_t src_num = sentence_vector_list.size(); + + PADDLE_ENFORCE_NE(src_num, 0, "src_num should not be 0"); + + std::vector source_level_lod = {0}; + std::vector sentence_level_lod = {0}; + std::vector id_data; + std::vector score_data; + + for (size_t src_idx = 0; src_idx < src_num; ++src_idx) { + for (Sentence& sentence : sentence_vector_list[src_idx]) { + id_data.insert(id_data.end(), sentence.word_ids.begin(), + sentence.word_ids.end()); + score_data.insert(score_data.end(), sentence.scores.begin(), + sentence.scores.end()); + sentence_level_lod.push_back(sentence_level_lod.back() + + sentence.word_ids.size()); + } + source_level_lod.push_back(source_level_lod.back() + + sentence_vector_list[src_idx].size()); + } + + auto cpu_place = new paddle::platform::CPUPlace(); + paddle::platform::CPUDeviceContext cpu_ctx(*cpu_place); + + framework::LoD lod; + lod.push_back(source_level_lod); + lod.push_back(sentence_level_lod); + + id_tensor->set_lod(lod); + id_tensor->Resize({static_cast(id_data.size())}); + id_tensor->mutable_data(paddle::platform::CPUPlace()); + id_tensor->CopyFromVector(id_data, cpu_ctx); + + score_tensor->set_lod(lod); + score_tensor->Resize({static_cast(score_data.size())}); + score_tensor->mutable_data(paddle::platform::CPUPlace()); + score_tensor->CopyFromVector(score_data, cpu_ctx); +} + +template +void BeamSearchDecoder::PackAllSteps(const LoDTensorArray& step_ids, + const LoDTensorArray& step_scores, + LoDTensor* id_tensor, + LoDTensor* score_tensor) const { + PADDLE_ENFORCE(!step_ids.empty(), "step num should be larger than 0"); + PADDLE_ENFORCE_EQ(step_ids.size(), step_scores.size(), + "step_ids and step_scores should be the same"); + const size_t step_num = step_ids.size(); + const size_t src_num = step_ids.at(0).lod().at(kSourceLevel).size() - 1; + + PADDLE_ENFORCE_GT(src_num, 0UL, "source num should be larger than 0"); + + // previous prefixes for each step, + // the init length is 0, means this is the first step. + std::vector> beamnode_vector_list(0); + std::vector> sentence_vector_list(src_num); + + // pack all steps for one batch first, then another batch + for (size_t step_id = 0; step_id < step_num; ++step_id) { + beamnode_vector_list = + PackTwoSteps(step_ids.at(step_id), step_scores.at(step_id), + beamnode_vector_list, &sentence_vector_list); + } + // append last beam_node to result + for (size_t src_idx = 0; src_idx < src_num; ++src_idx) { + for (auto& beam_node : beamnode_vector_list.at(src_idx)) { + sentence_vector_list[src_idx].push_back(MakeSentence(beam_node.get())); + beam_node.reset(); + } + } + + ConvertSentenceVectorToLodTensor(sentence_vector_list, id_tensor, + score_tensor); +} + +} // namespace operators +} // namespace paddle diff --git a/paddle/operators/beam_search_decode_op_test.cc b/paddle/operators/beam_search_decode_op_test.cc new file mode 100644 index 0000000000000000000000000000000000000000..5ac23991f3c7768abaf94f3a4b750697de0ef114 --- /dev/null +++ b/paddle/operators/beam_search_decode_op_test.cc @@ -0,0 +1,221 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/operators/beam_search_decode_op.h" +#include "gtest/gtest.h" + +using CPUPlace = paddle::platform::CPUPlace; +using LoD = paddle::framework::LoD; +using LoDTensor = paddle::framework::LoDTensor; +using LoDTensorArray = paddle::framework::LoDTensorArray; + +template +using BeamNode = paddle::operators::BeamNode; +template +using BeamSearchDecoder = paddle::operators::BeamSearchDecoder; +template +using Sentence = paddle::operators::Sentence; +template +using BeamNodeVector = paddle::operators::BeamNodeVector; +template +using SentenceVector = paddle::operators::SentenceVector; + +namespace paddle { +namespace test { + +void GenerateExample(const std::vector& level_0, + const std::vector& level_1, + const std::vector& data, LoDTensorArray* ids, + LoDTensorArray* scores) { + PADDLE_ENFORCE_EQ(level_0.back(), level_1.size() - 1, + "source level is used to describe candidate set"); + PADDLE_ENFORCE_EQ(level_1.back(), data.size(), + "the lowest level is used to describe data" + ", so it's last element should be data length"); + + CPUPlace place; + + LoD lod; + lod.push_back(level_0); + lod.push_back(level_1); + + // Ids + LoDTensor tensor_id; + tensor_id.set_lod(lod); + tensor_id.Resize({static_cast(data.size())}); + // malloc memory + int64_t* id_ptr = tensor_id.mutable_data(place); + for (size_t i = 0; i < data.size(); ++i) { + id_ptr[i] = static_cast(data.at(i)); + } + + // Scores + LoDTensor tensor_score; + tensor_score.set_lod(lod); + tensor_score.Resize({static_cast(data.size())}); + // malloc memory + float* score_ptr = tensor_score.mutable_data(place); + for (size_t i = 0; i < data.size(); ++i) { + score_ptr[i] = static_cast(data.at(i)); + } + + ids->push_back(tensor_id); + scores->push_back(tensor_score); +} + +} // namespace test +} // namespace paddle + +TEST(BeamSearchDecodeOp, DeleteBeamNode) { + auto* root = new BeamNode(0, 0); + auto* b1 = new BeamNode(1, 1); + auto* b2 = new BeamNode(2, 2); + auto* b3 = new BeamNode(3, 3); + + b1->AppendTo(root); + b2->AppendTo(root); + b3->AppendTo(b1); + + delete b3; + delete b2; +} + +TEST(BeamSearchDecodeOp, MakeSentence) { + auto* root = new BeamNode(0, 0); + auto* b1 = new BeamNode(1, 1); + auto* end = new BeamNode(2, 2); + b1->AppendTo(root); + end->AppendTo(b1); + + BeamSearchDecoder helper; + Sentence sentence = helper.MakeSentence(end); + delete end; + + std::vector expect_ids = {0, 1, 2}; + ASSERT_EQ(sentence.word_ids, expect_ids); + + std::vector expect_scores = {0, 1, 2}; + ASSERT_EQ(sentence.scores, expect_scores); +} + +TEST(BeamSearchDecodeOp, PackTwoStepsFistStep) { + CPUPlace place; + + LoDTensorArray ids; + LoDTensorArray scores; + + paddle::test::GenerateExample( + std::vector{0, 2, 6}, std::vector{0, 1, 2, 3, 4, 5, 6}, + std::vector{1, 2, 3, 4, 5, 6}, &ids, &scores); + + std::vector> beamnode_vector_list; + std::vector> sentence_vector_list( + 2, SentenceVector()); + + BeamSearchDecoder helper; + beamnode_vector_list = helper.PackTwoSteps( + ids[0], scores[0], beamnode_vector_list, &sentence_vector_list); + ASSERT_EQ(beamnode_vector_list.size(), 2UL); + ASSERT_EQ(beamnode_vector_list[0].size(), 2UL); + ASSERT_EQ(beamnode_vector_list[1].size(), 4UL); +} + +TEST(BeamSearchDecodeOp, PackTwoSteps) { + CPUPlace place; + + // first source has three prefix + BeamNodeVector source0_prefixes; + source0_prefixes.push_back( + std::unique_ptr>(new BeamNode(1, 1))); + source0_prefixes.push_back( + std::unique_ptr>(new BeamNode(0, 0))); + source0_prefixes.push_back( + std::unique_ptr>(new BeamNode(3, 3))); + + // second source has two prefix + BeamNodeVector source1_prefixes; + source1_prefixes.push_back( + std::unique_ptr>(new BeamNode(4, 4))); + source1_prefixes.push_back( + std::unique_ptr>(new BeamNode(5, 5))); + + std::vector> beamnode_vector_list; + std::vector> sentence_vector_list( + 2, SentenceVector()); + + beamnode_vector_list.push_back(std::move(source0_prefixes)); + beamnode_vector_list.push_back(std::move(source1_prefixes)); + + // generate data for one step + LoDTensorArray ids; + LoDTensorArray scores; + + paddle::test::GenerateExample(std::vector{0, 3, 5}, + std::vector{0, 1, 1, 3, 4, 5}, + std::vector{0, 1, 2, 3, 4}, &ids, &scores); + + BeamSearchDecoder helper1; + beamnode_vector_list = helper1.PackTwoSteps( + ids[0], scores[0], beamnode_vector_list, &sentence_vector_list); + + ASSERT_EQ(sentence_vector_list[0].size(), 1UL); + ASSERT_EQ(sentence_vector_list[1].size(), 0UL); + ASSERT_EQ(beamnode_vector_list[0].size(), 3UL); + ASSERT_EQ(beamnode_vector_list[1].size(), 2UL); +} + +TEST(BeamSearchDecodeOp, PackAllSteps) { + CPUPlace place; + + // we will constuct a sample data with 3 steps and 2 source sentences + LoDTensorArray ids; + LoDTensorArray scores; + + paddle::test::GenerateExample( + std::vector{0, 3, 6}, std::vector{0, 1, 2, 3, 4, 5, 6}, + std::vector{1, 2, 3, 4, 5, 6}, &ids, &scores); + paddle::test::GenerateExample( + std::vector{0, 3, 6}, std::vector{0, 1, 1, 3, 5, 5, 6}, + std::vector{0, 1, 2, 3, 4, 5}, &ids, &scores); + paddle::test::GenerateExample(std::vector{0, 3, 6}, + std::vector{0, 0, 1, 2, 3, 4, 5}, + std::vector{0, 1, 2, 3, 4}, &ids, &scores); + + ASSERT_EQ(ids.size(), 3UL); + ASSERT_EQ(scores.size(), 3UL); + + BeamSearchDecoder helper; + + LoDTensor id_tensor; + LoDTensor score_tensor; + helper.PackAllSteps(ids, scores, &id_tensor, &score_tensor); + + LoD lod = id_tensor.lod(); + std::vector expect_source_lod = {0, 4, 8}; + EXPECT_EQ(lod[0], expect_source_lod); + std::vector expect_sentence_lod = {0, 1, 3, 6, 9, 10, 13, 16, 19}; + EXPECT_EQ(lod[1], expect_sentence_lod); + // 2| 1, 0| 3, 1, 0| 3, 2, 1| 5| 4, 3, 2| 4, 4, 3| 6, 5, 4 + std::vector expect_data = {2, 1, 0, 3, 1, 0, 3, 2, 1, 5, + 4, 3, 2, 4, 4, 3, 6, 5, 4}; + ASSERT_EQ(id_tensor.dims()[0], static_cast(expect_data.size())); + for (size_t i = 0; i < expect_data.size(); ++i) { + ASSERT_EQ(id_tensor.data()[i], + static_cast(expect_data[i])); + } + for (int64_t i = 0; i < id_tensor.dims()[0]; ++i) { + ASSERT_EQ(score_tensor.data()[i], + static_cast(id_tensor.data()[i])); + } +} diff --git a/paddle/operators/bilinear_tensor_product_op.cc b/paddle/operators/bilinear_tensor_product_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..c65ba7eb262f3aabe2c00837b79806c0b40b60fd --- /dev/null +++ b/paddle/operators/bilinear_tensor_product_op.cc @@ -0,0 +1,159 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/operators/bilinear_tensor_product_op.h" + +namespace paddle { +namespace operators { + +using framework::Tensor; + +class BilinearTensorProductOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + protected: + void InferShape(framework::InferShapeContext* ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null."); + PADDLE_ENFORCE(ctx->HasInput("Y"), "Input(Y) should not be null."); + PADDLE_ENFORCE(ctx->HasInput("Weight"), + "Input(Weight) should not be null."); + PADDLE_ENFORCE(ctx->HasOutput("Out"), "Output(Out) should not be null."); + auto x_dims = ctx->GetInputDim("X"); + auto y_dims = ctx->GetInputDim("Y"); + auto weight_dims = ctx->GetInputDim("Weight"); + + PADDLE_ENFORCE_EQ(x_dims.size(), 2UL, "The input(X) must be a 2D Tensor."); + PADDLE_ENFORCE_EQ(y_dims.size(), 2UL, "The input(Y) must be a 2D Tensor."); + PADDLE_ENFORCE_EQ(weight_dims.size(), 3UL, + "The input(Weight) must be a 3D tensor."); + PADDLE_ENFORCE_EQ(x_dims[0], y_dims[0], + "The first dimension(batch_size) of input(X) must be " + "equal to the first dimension of the input(Y)."); + PADDLE_ENFORCE_EQ(x_dims[1], weight_dims[1], + "The second dimension of input(X) must be equal to " + "the second dimension of the input(Weight)."); + PADDLE_ENFORCE_EQ(y_dims[1], weight_dims[2], + "The second dimension of input(Y) must be equal to " + "the third dimension of the input(Weight)."); + + if (ctx->HasInput("Bias")) { + auto bias_dims = ctx->GetInputDim("Bias"); + PADDLE_ENFORCE(bias_dims.size() == 2UL && bias_dims[0] == 1UL, + "The Input(Bias) must be a 2-D tensor with " + "the 2nd dimension fixed to 1 (a row vector)."); + PADDLE_ENFORCE_EQ(bias_dims[1], weight_dims[0], + "The second dimension of input(Bias) must be equal " + "to the first dimension of the input(Weight)."); + } + + ctx->SetOutputDim("Out", {x_dims[0], weight_dims[0]}); + ctx->ShareLoD("X", /*->*/ "Out"); + } +}; + +class BilinearTensorProductOpMaker : public framework::OpProtoAndCheckerMaker { + public: + BilinearTensorProductOpMaker(framework::OpProto* proto, + framework::OpAttrChecker* op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("X", "The first input of bilinear_tensor_product operator."); + AddInput("Y", "The second input of bilinear_tensor_product operator."); + AddInput("Weight", + "The learnable parameters of bilinear_tensor_product operator."); + AddInput("Bias", "The learnable bias of bilinear_tensor_product operator.") + .AsDispensable(); + AddOutput("Out", "The output of bilinear_tensor_product operator."); + AddComment(R"DOC( +Bilinear Tensor Product operator. +Given input X and Y, a 3D tensor weight, and bias. Each column of the +output is computed by one slice i = 1, . . . , k of the tensor: + + M = (X W_i) \cdot Y + Out_i = \sum_i {M_i} + Bias_i + +)DOC"); + } +}; + +class BilinearTensorProductOpGrad : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + protected: + void InferShape(framework::InferShapeContext* ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null."); + PADDLE_ENFORCE(ctx->HasInput("Y"), "Input(Y) should not be null."); + PADDLE_ENFORCE(ctx->HasInput("Weight"), + "Input(Weight) should not be null."); + PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")), + "Input(Out@GRAD) should not be null."); + auto x_dims = ctx->GetInputDim("X"); + auto y_dims = ctx->GetInputDim("Y"); + auto weight_dims = ctx->GetInputDim("Weight"); + auto out_dims = ctx->GetInputDim(framework::GradVarName("Out")); + + PADDLE_ENFORCE_EQ(out_dims.size(), 2UL, + "The input(Out@GRAD) must be a 2D Tensor."); + PADDLE_ENFORCE_EQ( + x_dims[0], out_dims[0], + "The first dimension(batch_size) of input(Out@GRAD) must be " + "equal to the first dimension of the Input(X)."); + PADDLE_ENFORCE_EQ( + weight_dims[0], out_dims[1], + "The second dimension of input(Out@GRAD) must be equal to " + "the third dimension of the Input(Weight)."); + + if (ctx->HasInput("Bias")) { + auto bias_dims = ctx->GetInputDim("Bias"); + PADDLE_ENFORCE_EQ( + bias_dims[1], out_dims[1], + "The second dimension of input(Out@GRAD) must be equal to " + "the second dimension of the Input(Bias)."); + auto bias_grad_name = framework::GradVarName("Bias"); + if (ctx->HasOutput(bias_grad_name)) + ctx->SetOutputDim(bias_grad_name, bias_dims); + } + + auto x_grad_name = framework::GradVarName("X"); + auto y_grad_name = framework::GradVarName("Y"); + auto weight_grad_name = framework::GradVarName("Weight"); + + if (ctx->HasOutput(x_grad_name)) { + ctx->SetOutputDim(x_grad_name, x_dims); + } + if (ctx->HasOutput(y_grad_name)) { + ctx->SetOutputDim(y_grad_name, y_dims); + } + if (ctx->HasOutput(weight_grad_name)) { + ctx->SetOutputDim(weight_grad_name, weight_dims); + } + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OP(bilinear_tensor_product, ops::BilinearTensorProductOp, + ops::BilinearTensorProductOpMaker, bilinear_tensor_product_grad, + ops::BilinearTensorProductOpGrad); +REGISTER_OP_CPU_KERNEL( + bilinear_tensor_product, + ops::BilinearTensorProductKernel, + ops::BilinearTensorProductKernel); +REGISTER_OP_CPU_KERNEL( + bilinear_tensor_product_grad, + ops::BilinearTensorProductGradKernel, + ops::BilinearTensorProductGradKernel); diff --git a/paddle/operators/bilinear_tensor_product_op.cu b/paddle/operators/bilinear_tensor_product_op.cu new file mode 100644 index 0000000000000000000000000000000000000000..858d2668d01379afe8082cd1eda32a2a5d09bd18 --- /dev/null +++ b/paddle/operators/bilinear_tensor_product_op.cu @@ -0,0 +1,26 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#define EIGEN_USE_GPU +#include "paddle/operators/bilinear_tensor_product_op.h" + +namespace ops = paddle::operators; +REGISTER_OP_GPU_KERNEL( + bilinear_tensor_product, + ops::BilinearTensorProductKernel, + ops::BilinearTensorProductKernel); +REGISTER_OP_GPU_KERNEL( + bilinear_tensor_product_grad, + ops::BilinearTensorProductGradKernel, + ops::BilinearTensorProductGradKernel); diff --git a/paddle/operators/bilinear_tensor_product_op.h b/paddle/operators/bilinear_tensor_product_op.h new file mode 100644 index 0000000000000000000000000000000000000000..ffa4f43a327418498c1f110504127e7d2878409d --- /dev/null +++ b/paddle/operators/bilinear_tensor_product_op.h @@ -0,0 +1,184 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include "paddle/framework/eigen.h" +#include "paddle/framework/op_registry.h" +#include "paddle/operators/math/math_function.h" + +namespace paddle { +namespace operators { + +using framework::Tensor; + +template +using EigenMatrix = framework::EigenMatrix; + +template +class BilinearTensorProductKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + auto* x = ctx.Input("X"); + auto* y = ctx.Input("Y"); + auto* weight = ctx.Input("Weight"); + auto* bias = ctx.Input("Bias"); + auto* out = ctx.Output("Out"); + out->mutable_data(ctx.GetPlace()); + + auto y_mat = EigenMatrix::From(*y); + auto output_mat = EigenMatrix::From(*out); + + auto batch_size = x->dims()[0]; + auto weight_dims = weight->dims(); + int out_dim = weight_dims[0]; + auto x_dim = weight_dims[1]; + auto y_dim = weight_dims[2]; + auto place = ctx.GetEigenDevice(); + + // Create the intermediate variable to caculate the result of + // Input(X) multiplied by Input(Weight_i), the formula is: + // left_mul = X Weight_i. + Tensor left_mul; + left_mul.mutable_data(framework::make_ddim({batch_size, y_dim}), + ctx.GetPlace()); + auto left_mul_mat = EigenMatrix::From(left_mul); + + for (int i = 0; i < out_dim; ++i) { + auto output_col_vec = output_mat.chip(i, 1); + Tensor weight_mat = + weight->Slice(i, i + 1).Resize(framework::make_ddim({x_dim, y_dim})); + math::gemm(ctx.device_context(), CblasNoTrans, CblasNoTrans, + batch_size, y_dim, x_dim, 1, x->data(), + weight_mat.data(), 0, left_mul.data()); + output_col_vec.device(place) = + (left_mul_mat * y_mat).sum(Eigen::DSizes(1)); + } + if (bias) { + auto bias_vec = EigenMatrix::From(*bias); + Eigen::DSizes bcast(batch_size, 1); + output_mat.device(place) = bias_vec.broadcast(bcast) + output_mat; + } + } +}; + +template +class BilinearTensorProductGradKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + const Tensor* x = ctx.Input("X"); + const Tensor* y = ctx.Input("Y"); + const Tensor* weight = ctx.Input("Weight"); + Tensor* d_x = ctx.Output(framework::GradVarName("X")); + Tensor* d_y = ctx.Output(framework::GradVarName("Y")); + Tensor* d_weight = ctx.Output(framework::GradVarName("Weight")); + Tensor* d_bias = ctx.Output(framework::GradVarName("Bias")); + const Tensor* d_out = ctx.Input(framework::GradVarName("Out")); + + auto batch_size = x->dims()[0]; + auto weight_dims = weight->dims(); + int out_dim = weight_dims[0]; + auto x_dim = weight_dims[1]; + auto y_dim = weight_dims[2]; + + auto x_mat = EigenMatrix::From(*x); + auto y_mat = EigenMatrix::From(*y); + auto d_out_mat = EigenMatrix::From(*d_out); + auto place = ctx.GetEigenDevice(); + + // Create the intermediate variable to caculate the Output(Y@Grad). + Tensor x_scale; + x_scale.mutable_data(framework::make_ddim({batch_size, x_dim}), + ctx.GetPlace()); + auto x_scale_mat = EigenMatrix::From(x_scale); + + // Create the intermediate variable to caculate the Output(X@Grad). + Tensor y_scale; + y_scale.mutable_data(framework::make_ddim({batch_size, y_dim}), + ctx.GetPlace()); + auto y_scale_mat = EigenMatrix::From(y_scale); + + math::SetConstant set_zero; + + // Set Output(X@Grad) be zero. + if (d_x) { + d_x->mutable_data(ctx.GetPlace()); + set_zero(ctx.device_context(), d_x, static_cast(0)); + } + + // Set Output(Y@Grad) be zero. + if (d_y) { + d_y->mutable_data(ctx.GetPlace()); + set_zero(ctx.device_context(), d_y, static_cast(0)); + } + + // Caculate the Output(X@Grad) and Output(Y@Grad). + if (d_x || d_y) { + Eigen::DSizes bcast_for_x(1, y_dim); + Eigen::DSizes bcast_for_y(1, x_dim); + for (int i = 0; i < out_dim; ++i) { + Tensor weight_i = weight->Slice(i, i + 1).Resize( + framework::make_ddim({x_dim, y_dim})); + auto output_vec = d_out_mat.chip(i, 1); + if (d_x) { + y_scale_mat.device(place) = + output_vec.reshape(Eigen::DSizes(batch_size, 1)) + .broadcast(bcast_for_x) * + y_mat; + math::gemm(ctx.device_context(), CblasNoTrans, CblasTrans, + batch_size, x_dim, y_dim, 1, y_scale.data(), + weight_i.data(), 1, d_x->data()); + } + if (d_y) { + x_scale_mat.device(place) = + output_vec.reshape(Eigen::DSizes(batch_size, 1)) + .broadcast(bcast_for_y) * + x_mat; + math::gemm(ctx.device_context(), CblasNoTrans, CblasNoTrans, + batch_size, y_dim, x_dim, 1, x_scale.data(), + weight_i.data(), 1, d_y->data()); + } + } + } + + // Caculate the gradient of Input(Weight). + if (d_weight) { + d_weight->mutable_data(ctx.GetPlace()); + Eigen::DSizes bcast_for_weight(1, x_dim); + for (int i = 0; i < out_dim; ++i) { + Tensor d_weight_i = d_weight->Slice(i, i + 1).Resize( + framework::make_ddim({x_dim, y_dim})); + auto output_vec = d_out_mat.chip(i, 1); + x_scale_mat.device(place) = + output_vec.reshape(Eigen::DSizes(batch_size, 1)) + .broadcast(bcast_for_weight) * + x_mat; + math::gemm(ctx.device_context(), CblasTrans, CblasNoTrans, + x_dim, y_dim, batch_size, 1, x_scale.data(), + y->data(), 0, d_weight_i.data()); + } + } + + // Caculate the gradient of Input(Bias). + if (d_bias) { + d_bias->mutable_data(ctx.GetPlace()); + auto d_bias_mat = EigenMatrix::From(*d_bias); + d_bias_mat.device(place) = d_out_mat.sum(Eigen::DSizes(0)); + } + } +}; + +} // namespace operators +} // namespace paddle diff --git a/paddle/operators/conditional_block_op.cc b/paddle/operators/conditional_block_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..d5b124682d755ffb39f32c9f001a3cf113a01a2c --- /dev/null +++ b/paddle/operators/conditional_block_op.cc @@ -0,0 +1,197 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ +#include +#include "paddle/framework/executor.h" +#include "paddle/framework/op_registry.h" + +namespace paddle { +namespace operators { + +class ConditionalOp : public framework::OperatorBase { + public: + ConditionalOp(const std::string &type, + const framework::VariableNameMap &inputs, + const framework::VariableNameMap &outputs, + const framework::AttributeMap &attrs) + : OperatorBase(type, inputs, outputs, attrs) {} + + protected: + std::vector InputTensors( + const framework::Scope &scope) const { + std::vector retv; + auto xs = Inputs("X"); + retv.resize(xs.size(), nullptr); + std::transform( + xs.begin(), xs.end(), retv.begin(), + [&scope](const std::string &var_name) -> const framework::LoDTensor * { + auto *var = scope.FindVar(var_name); + PADDLE_ENFORCE(var != nullptr, "Cannot find variable %s", var_name); + return &var->Get(); + }); + return retv; + } +}; + +class ConditionalBlockOp : public ConditionalOp { + public: + ConditionalBlockOp(const std::string &type, + const framework::VariableNameMap &inputs, + const framework::VariableNameMap &outputs, + const framework::AttributeMap &attrs) + : ConditionalOp(type, inputs, outputs, attrs) {} + void Run(const framework::Scope &scope, + const platform::DeviceContext &dev_ctx) const override { + auto xs = InputTensors(scope); + bool need_run = std::all_of( + xs.begin(), xs.end(), + [](const framework::LoDTensor *t) { return t->numel() != 0; }); + + if (need_run) { + auto *scope_var = scope.FindVar(Output("Scope")); + PADDLE_ENFORCE(scope_var != nullptr, "Must set scope"); + auto *scopes = scope_var->GetMutable>(); + scopes->resize(1); + scopes->front() = &scope.NewScope(); + auto &cur_scope = *scopes->front(); + + auto *block = Attr("block"); + framework::Executor exec(dev_ctx); + exec.Run(*block->Program(), &cur_scope, block->ID(), false); + } + } +}; + +class ConditionalBlockOpProtoMaker : public framework::OpProtoAndCheckerMaker { + public: + ConditionalBlockOpProtoMaker(framework::OpProto *proto, + framework::OpAttrChecker *op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("X", + "The conditional variable of this operator. If X is empty, the " + "whole sub-block will not be executed.") + .AsDuplicable(); + AddInput("Params", "The input variables of the sub-block.").AsDuplicable(); + AddOutput("Out", "The output variables of the sub-block.").AsDuplicable(); + AddOutput("Scope", + "(std::vector) The step scope of conditional block. To " + "unify the conditional block, rnn and while op, the type of " + "scope is std::vector"); + AddAttr( + "block", "The step block of conditional block operator"); + AddComment(R"DOC(Conditional block operator + +Run the sub-block if X is not empty. Params is the other inputs and Out is the +outputs of the sub-block. +)DOC"); + } +}; + +class ConditionalBlockGradOp : public ConditionalOp { + public: + ConditionalBlockGradOp(const std::string &type, + const framework::VariableNameMap &inputs, + const framework::VariableNameMap &outputs, + const framework::AttributeMap &attrs) + : ConditionalOp(type, inputs, outputs, attrs) {} + void Run(const framework::Scope &scope, + const platform::DeviceContext &dev_ctx) const override { + auto xs = this->InputTensors(scope); + bool need_run = std::all_of( + xs.begin(), xs.end(), + [](const framework::LoDTensor *t) { return t->numel() != 0; }); + + if (need_run) { + auto *scope_var = scope.FindVar(Input("Scope")); + PADDLE_ENFORCE(scope_var != nullptr, "Must set scope"); + auto &scopes = scope_var->Get>(); + framework::Scope &cur_scope = *scopes[0]; + + auto *block = Attr("block"); + framework::Executor exec(dev_ctx); + exec.Run(*block->Program(), &cur_scope, block->ID(), false); + + AssignLocalGradientToGlobal(dev_ctx, cur_scope, Inputs("Params"), + Outputs(framework::GradVarName("Params"))); + + AssignLocalGradientToGlobal(dev_ctx, cur_scope, Inputs("X"), + Outputs(framework::GradVarName("X"))); + } + } + + private: + void AssignLocalGradientToGlobal( + const platform::DeviceContext &dev_ctx, const framework::Scope &cur_scope, + const std::vector &p_names, + const std::vector &pg_names) const { + for (size_t i = 0; i < p_names.size(); ++i) { + auto out_grad_name = pg_names[i]; + auto in_grad_name = framework::GradVarName(p_names[i]); + auto *in_var = cur_scope.FindVar(in_grad_name); + if (in_var == nullptr) { + continue; + } + auto new_in_grad_name = cur_scope.Rename(in_grad_name); + auto assign = + framework::OpRegistry::CreateOp("assign", {{"X", {new_in_grad_name}}}, + {{"Out", {out_grad_name}}}, {}); + assign->Run(cur_scope, dev_ctx); + cur_scope.Rename(new_in_grad_name, in_grad_name); + } + } +}; + +class ConditionalBlockGradInferShape : public framework::InferShapeBase { + public: + void operator()(framework::InferShapeContext *context) const override { + PADDLE_ENFORCE(context->HasInputs("X")); + if (context->HasInputs("Params")) { + PADDLE_ENFORCE(context->HasOutputs(framework::GradVarName("Params"))); + context->SetOutputsDim(framework::GradVarName("Params"), + context->GetInputsDim("Params")); + } + PADDLE_ENFORCE(context->HasOutputs(framework::GradVarName("X"))); + context->SetOutputsDim(framework::GradVarName("X"), + context->GetInputsDim("X")); + } +}; + +class ConditionalBlockGradMaker : public framework::SingleGradOpDescMaker { + public: + using framework::SingleGradOpDescMaker::SingleGradOpDescMaker; + + protected: + std::unique_ptr Apply() const override { + auto grad_op = new framework::OpDescBind(); + grad_op->SetType("conditional_block_grad"); + grad_op->SetInput("X", Input("X")); + grad_op->SetInput("Params", Input("Params")); + grad_op->SetInput("Out", Output("Out")); + grad_op->SetInput(framework::GradVarName("Out"), OutputGrad("Out")); + grad_op->SetInput("Scope", Output("Scope")); + grad_op->SetOutput(framework::GradVarName("X"), InputGrad("X")); + grad_op->SetOutput(framework::GradVarName("Params"), InputGrad("Params")); + grad_op->SetBlockAttr("block", *this->grad_block_[0]); + return std::unique_ptr(grad_op); + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OPERATOR(conditional_block, ops::ConditionalBlockOp, + ops::ConditionalBlockOpProtoMaker, + ops::ConditionalBlockGradMaker); +REGISTER_OPERATOR(conditional_block_grad, ops::ConditionalBlockGradOp, + ops::ConditionalBlockGradInferShape); diff --git a/paddle/operators/l1_norm_op.h b/paddle/operators/l1_norm_op.h index de459818ad83d389e5a95e0303ae40b32743c4e7..3c60dc3dc7415f34ed9d238e6f41b197ec404883 100644 --- a/paddle/operators/l1_norm_op.h +++ b/paddle/operators/l1_norm_op.h @@ -29,7 +29,7 @@ class L1NormKernel : public framework::OpKernel { Out->mutable_data(context.GetPlace()); auto x = framework::EigenVector::Flatten(*X); - auto out = framework::EigenVector::Flatten(*Out); + auto out = framework::EigenScalar::From(*Out); auto place = context.GetEigenDevice(); out.device(place) = x.abs().sum(); diff --git a/paddle/operators/lod_reset_op.cc b/paddle/operators/lod_reset_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..32831cb1e2cf188a507773ef1e00b22de98d82ab --- /dev/null +++ b/paddle/operators/lod_reset_op.cc @@ -0,0 +1,120 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#include "paddle/operators/lod_reset_op.h" + +namespace paddle { +namespace operators { + +class LoDResetOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + void InferShape(framework::InferShapeContext *ctx) const override { + // input check + PADDLE_ENFORCE(ctx->HasInput("X"), + "Input(X) of LoDResetOp should not be null."); + PADDLE_ENFORCE(ctx->HasOutput("Out"), + "Output(Out) of LoDResetOp should not be null."); + // If target LoD is not set form Input(), then it must be set from Attr(). + if (!ctx->HasInput("TargetLoD")) { + auto level0 = ctx->Attrs().Get>("target_lod"); + PADDLE_ENFORCE(level0.size() > 1, + "Target LoD is not found, should be set to be a valid one " + "through Input() or Attr()."); + } + ctx->SetOutputDim("Out", ctx->GetInputDim("X")); + } + + protected: + framework::OpKernelType GetKernelType( + const framework::ExecutionContext &ctx) const override { + return framework::OpKernelType( + framework::ToDataType(ctx.Input("X")->type()), + ctx.device_context()); + } +}; + +class LoDResetOpMaker : public framework::OpProtoAndCheckerMaker { + public: + LoDResetOpMaker(framework::OpProto *proto, + framework::OpAttrChecker *op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("X", "(LoDTensor) The input tensor of lod_reset operator."); + AddInput("TargetLoD", + "(Tensor, optional) The target level 0 LoD from Input().") + .AsDispensable(); + AddOutput("Out", "(LoDTensor) The output tensor of lod_reset operator."); + AddAttr>("target_lod", + "The target level 0 LoD from Attr().") + .SetDefault(std::vector{}); + AddComment(R"DOC(LoDReset operator + +Reset LoD of Input(X) into a new one specified by Input(TargetLoD) or +Attr(target_lod), or set LoD for Input(X) if it doesn't have one. +Currently the lod_reset operator only supports the reset of level 0 LoD. +At least one of Input(TargetLoD) and Attr(target_lod) must be set, +and if both of them are set, Input(TargetLoD) will be chosen as the +target LoD. + +An example: +Given a float LoDTensor X with shape (6, 1), its transpose form represents + + [1.0, 2.0, 3.0, 4.0, 5.0, 6.0], + +with LoD = [[0, 2, 5, 6]] and the three (transposed) sequences look like + + [1.0, 2.0], [3.0, 4.0, 5.0], [6.0]. + +If target LoD = [0, 4, 6], the lod_reset operator will reset the LoD and +the sequences that the LoDTensor Output(Out) contains becomes: + + [1.0, 2.0, 3.0, 4.0], [5.0, 6.0]. + +)DOC"); + } +}; + +class LoDResetGradOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + void InferShape(framework::InferShapeContext *ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) shouldn't be null."); + PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")), + "Input(Out@GRAD) shouldn't be null."); + ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X")); + } + + protected: + framework::OpKernelType GetKernelType( + const framework::ExecutionContext &ctx) const override { + return framework::OpKernelType( + framework::ToDataType(ctx.Input("X")->type()), + ctx.device_context()); + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OP(lod_reset, ops::LoDResetOp, ops::LoDResetOpMaker, lod_reset_grad, + ops::LoDResetGradOp); +REGISTER_OP_CPU_KERNEL(lod_reset, + ops::LoDResetKernel, + ops::LoDResetKernel); +REGISTER_OP_CPU_KERNEL( + lod_reset_grad, ops::LoDResetGradKernel, + ops::LoDResetGradKernel); diff --git a/paddle/operators/lod_reset_op.cu b/paddle/operators/lod_reset_op.cu new file mode 100644 index 0000000000000000000000000000000000000000..5244a17c3aad01909e3b8cf5f4d5abf8a44edc7f --- /dev/null +++ b/paddle/operators/lod_reset_op.cu @@ -0,0 +1,24 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#include "paddle/operators/lod_reset_op.h" + +namespace ops = paddle::operators; + +REGISTER_OP_GPU_KERNEL(lod_reset, + ops::LoDResetKernel, + ops::LoDResetKernel); +REGISTER_OP_GPU_KERNEL( + lod_reset_grad, ops::LoDResetGradKernel, + ops::LoDResetGradKernel); diff --git a/paddle/operators/lod_reset_op.h b/paddle/operators/lod_reset_op.h new file mode 100644 index 0000000000000000000000000000000000000000..2bb916ccee80c83a02ea429fe95f5fafc86ccfa6 --- /dev/null +++ b/paddle/operators/lod_reset_op.h @@ -0,0 +1,78 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#pragma once + +#include "paddle/framework/eigen.h" +#include "paddle/framework/op_registry.h" + +namespace paddle { +namespace operators { + +template +class LoDResetKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const { + auto* out = ctx.Output("Out"); + auto* in = ctx.Input("X"); + auto* lod_t = ctx.Input("TargetLoD"); + + std::vector level0; + if (lod_t) { + auto* lod = lod_t->data(); + if (platform::is_gpu_place(ctx.GetPlace())) { + framework::Tensor lod_cpu; + lod_cpu.CopyFrom(*lod_t, platform::CPUPlace(), ctx.device_context()); + lod = lod_cpu.data(); + } + level0 = std::vector(lod, lod + lod_t->numel()); + } else { + level0 = ctx.Attr>("target_lod"); + } + + PADDLE_ENFORCE(level0.size() > 1UL, + "The size of target LoD should be greater than 1."); + PADDLE_ENFORCE(level0[0] == 0, + "Target LoD should be a vector starting from 0."); + PADDLE_ENFORCE(level0.back() == in->dims()[0], + "Target LoD should be a vector end with the " + "first dimension of Input(X)."); + for (size_t i = 0; i < level0.size() - 1; ++i) { + PADDLE_ENFORCE(level0[i + 1] > level0[i], + "Target LoD should be an ascending vector."); + } + + out->ShareDataWith(*in); + // cast level0 to size_t + std::vector ulevel0(level0.size(), 0); + std::transform(level0.begin(), level0.end(), ulevel0.begin(), + [](int a) { return static_cast(a); }); + framework::LoD target_lod; + target_lod.push_back(ulevel0); + out->set_lod(target_lod); + } +}; + +template +class LoDResetGradKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const { + auto* d_out = ctx.Input(framework::GradVarName("Out")); + auto* d_x = ctx.Output(framework::GradVarName("X")); + + d_x->ShareDataWith(*d_out); + } +}; +} // namespace operators +} // namespace paddle diff --git a/paddle/operators/matmul_op.h b/paddle/operators/matmul_op.h index 5ce30740c90b5cd0bd4f8ab183cf985ed5d827c1..4f565946d596b5e5fbf90f16c0c13c780c36886c 100644 --- a/paddle/operators/matmul_op.h +++ b/paddle/operators/matmul_op.h @@ -74,11 +74,10 @@ Tensor CombineBatchAndN(const framework::ExecutionContext& context, Tensor output; auto in_dims = input.dims(); if (in_dims.size() == 3) { - output.Resize(in_dims); + output.Resize({in_dims[1], in_dims[0], in_dims[2]}); output.mutable_data(context.GetPlace()); EigenTranspose(context, input, output, {1, 0, 2}); - std::vector out_dims = {in_dims[1], in_dims[0] * in_dims[2]}; - output.Resize(make_ddim(out_dims)); + output.Resize({in_dims[1], in_dims[0] * in_dims[2]}); } else { output.ShareDataWith(input); } diff --git a/paddle/operators/merge_lod_tensor_op.cc b/paddle/operators/merge_lod_tensor_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..80460c476921b63ec5228a9780880c7db3c85217 --- /dev/null +++ b/paddle/operators/merge_lod_tensor_op.cc @@ -0,0 +1,182 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/framework/op_registry.h" +#include "paddle/memory/memcpy.h" + +namespace paddle { +namespace operators { + +using LoD = framework::LoD; + +class MergeLoDTensorOp : public framework::OperatorBase { + public: + MergeLoDTensorOp(const std::string &type, + const framework::VariableNameMap &inputs, + const framework::VariableNameMap &outputs, + const framework::AttributeMap &attrs) + : OperatorBase(type, inputs, outputs, attrs) {} + void Run(const framework::Scope &scope, + const platform::DeviceContext &dev_ctx) const override { + auto &x = scope.FindVar(Input("X"))->Get(); + auto &mask = scope.FindVar(Input("Mask"))->Get(); + auto &in_true = scope.FindVar(Input("InTrue"))->Get(); + auto &in_false = + scope.FindVar(Input("InFalse"))->Get(); + auto *out = + scope.FindVar(Output("Out"))->GetMutable(); + auto level = static_cast(Attr("level")); + + auto &mask_dim = mask.dims(); + + std::unique_ptr cpu_mask{new framework::LoDTensor()}; + if (platform::is_cpu_place(mask.place())) { + cpu_mask->ShareDataWith(mask); + } else if (platform::is_gpu_place(mask.place())) { +#ifdef PADDLE_WITH_CUDA + cpu_mask->CopyFrom(mask, platform::CPUPlace(), dev_ctx); +#else + PADDLE_THROW("Not supported GPU, Please compile WITH_GPU option"); +#endif + } + auto *mask_data = cpu_mask->data(); + + int rank = in_true.dims().size(); + platform::Place place = in_true.place(); + std::type_index data_type = in_true.type(); + framework::DDim in_true_dims = + framework::slice_ddim(in_true.dims(), 1, rank); + + int64_t batch_size = in_true.dims()[0] + in_false.dims()[0]; + + auto in_true_dim_vec = framework::vectorize(in_true_dims); + in_true_dim_vec.insert(in_true_dim_vec.begin(), batch_size); + + framework::DDim out_dims = framework::make_ddim(in_true_dim_vec); + out->Resize(out_dims); + out->mutable_data(place, data_type); + + auto *out_lod = out->mutable_lod(); + out_lod->clear(); + size_t out_offset = 0; + + // Build LoDTensor `out` + + size_t in_true_idx = 0; + size_t in_false_idx = 0; + for (size_t i = 0; i < static_cast(mask_dim[0]); i++) { + const framework::LoDTensor *input = nullptr; + size_t *in_idx = nullptr; + if (static_cast(mask_data[i]) == 0) { + input = &in_false; + in_idx = &in_false_idx; + } else { + input = &in_true; + in_idx = &in_true_idx; + } + auto lod_and_offset = framework::GetSubLoDAndAbsoluteOffset( + input->lod(), *in_idx, (*in_idx) + 1, 0); + auto &lod_length = lod_and_offset.first; + + framework::AppendLoD(out_lod, lod_length); + + size_t start_offset = lod_and_offset.second.first; + size_t end_offset = lod_and_offset.second.second; + + PADDLE_ENFORCE_GE(end_offset, start_offset); + size_t len = end_offset - start_offset; + if (len == 0) { + continue; + } + out->Slice(out_offset, out_offset + len) + .CopyFrom(input->Slice(start_offset, end_offset), place, dev_ctx); + out_offset += len; + (*in_idx) += 1; + } + + for (size_t i = 0; i < level; i++) { + out_lod->insert(out_lod->begin(), x.lod()[i]); + } + } +}; + +class MergeLoDTensorOpProtoMaker : public framework::OpProtoAndCheckerMaker { + public: + MergeLoDTensorOpProtoMaker(framework::OpProto *proto, + framework::OpAttrChecker *op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("X", + "The input LoDTensor, contains complete lod information to " + "construct the output"); + AddInput("Mask", "A bool column vector which mask the input"); + AddInput("InTrue", "The True branch to be merged"); + AddInput("InFalse", "The False branch to be merged"); + AddOutput("Out", "The merged output LoDTensor"); + AddAttr("level", "(int) the specific lod level to rank.") + .SetDefault(0) + .EqualGreaterThan(0); + AddComment( + R"DOC( + Merge True and False branches of LoDTensor into a single Output, + with a mask at certain lod level. X is used to obtain complete + lod information. Please refer to SplitLoDTensorOp.)DOC"); + } +}; + +class MergeLoDTensorInferShape : public framework::InferShapeBase { + public: + void operator()(framework::InferShapeContext *context) const override { + PADDLE_ENFORCE(context->HasInput("X"), + "MergeLoDTensorOp must has input X."); + PADDLE_ENFORCE(context->HasInput("Mask"), + "MergeLoDTensorOp must has input Mask."); + PADDLE_ENFORCE(context->HasInput("InTrue"), + "MergeLoDTensorOp must has input InTrue."); + PADDLE_ENFORCE(context->HasInput("InFalse"), + "MergeLoDTensorOp must has input InFalse."); + PADDLE_ENFORCE(context->HasOutput("Out"), + "MergeLoDTensorOp must has output Out"); + + auto mask_dim = context->GetInputDim("Mask"); + PADDLE_ENFORCE_EQ(mask_dim.size(), 2); + PADDLE_ENFORCE_EQ(mask_dim[1], 1); + + context->SetOutputDim("Out", context->GetInputDim("InTrue")); + } +}; + +class MergeLoDTensorGradMaker : public framework::SingleGradOpDescMaker { + public: + using framework::SingleGradOpDescMaker::SingleGradOpDescMaker; + + protected: + std::unique_ptr Apply() const override { + auto *grad_op = new framework::OpDescBind(); + grad_op->SetType("split_lod_tensor"); + grad_op->SetInput("X", OutputGrad("Out")); + grad_op->SetInput("Mask", Input("Mask")); + grad_op->SetOutput("OutTrue", InputGrad("InTrue")); + grad_op->SetOutput("OutFalse", InputGrad("InFalse")); + grad_op->SetAttrMap(Attrs()); + return std::unique_ptr(grad_op); + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OPERATOR(merge_lod_tensor, ops::MergeLoDTensorOp, + ops::MergeLoDTensorOpProtoMaker, + ops::MergeLoDTensorInferShape, ops::MergeLoDTensorGradMaker); diff --git a/paddle/operators/sequence_concat_op.cc b/paddle/operators/sequence_concat_op.cc index db737bed7a4d2dc5b60cbc6ac172caec95acd35e..d1de0b444712a8c304c33bd194e306dfe3c41f02 100644 --- a/paddle/operators/sequence_concat_op.cc +++ b/paddle/operators/sequence_concat_op.cc @@ -47,7 +47,7 @@ class SequenceConcatOpMaker : public framework::OpProtoAndCheckerMaker { framework::OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", - "(vector) Input is a vector of LoDTensor, " + "(LodTensorArray) Input is a vector of LoDTensor, " "each of which is a variable-length sequence or nested sequence.") .AsDuplicable(); AddOutput("Out", diff --git a/paddle/operators/sequence_pool_op.h b/paddle/operators/sequence_pool_op.h index 2b8a25c2414c20efaffedfc8603697b3a104634f..7f136d8cf0e1eaae7b4de32988b60ae8a5034cc6 100644 --- a/paddle/operators/sequence_pool_op.h +++ b/paddle/operators/sequence_pool_op.h @@ -126,6 +126,7 @@ class SequencePoolGradKernel : public framework::OpKernel { int64_t h = static_cast(lod[i + 1] - lod[i]); auto in_g_e = EigenMatrix::From(in_g_t, {h, w}); auto out_g_e = EigenMatrix::From(out_g_t, {1, w}); + auto out_g_e_v = EigenVector::Flatten(out_g_t); Eigen::DSizes bcast(h, 1); if (pooltype == "AVERAGE") { @@ -136,9 +137,9 @@ class SequencePoolGradKernel : public framework::OpKernel { in_g_e.device(place) = (out_g_e / std::sqrt(static_cast(h))).broadcast(bcast); } else if (pooltype == "LAST") { - in_g_e.chip(h - 1, 0).device(place) = out_g_e; + in_g_e.chip(h - 1, 0).device(place) = out_g_e_v; } else if (pooltype == "FIRST") { - in_g_e.chip(0, 0).device(place) = out_g_e; + in_g_e.chip(0, 0).device(place) = out_g_e_v; } else { PADDLE_THROW("unsupported pooling pooltype"); } diff --git a/paddle/operators/split_lod_tensor_op.cc b/paddle/operators/split_lod_tensor_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..db635f2ba0804143c9a2e04ff006dfbc8744f3fc --- /dev/null +++ b/paddle/operators/split_lod_tensor_op.cc @@ -0,0 +1,186 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/framework/op_registry.h" +#include "paddle/memory/memcpy.h" + +namespace paddle { +namespace operators { + +struct CopyRange { + size_t begin; + size_t end; +}; + +using LoD = framework::LoD; + +class SplitLoDTensorOp : public framework::OperatorBase { + public: + SplitLoDTensorOp(const std::string &type, + const framework::VariableNameMap &inputs, + const framework::VariableNameMap &outputs, + const framework::AttributeMap &attrs) + : OperatorBase(type, inputs, outputs, attrs) {} + void Run(const framework::Scope &scope, + const platform::DeviceContext &dev_ctx) const override { + auto &x = scope.FindVar(Input("X"))->Get(); + auto &mask = scope.FindVar(Input("Mask"))->Get(); + auto *out_true = + scope.FindVar(Output("OutTrue"))->GetMutable(); + auto *out_false = + scope.FindVar(Output("OutFalse"))->GetMutable(); + auto level = static_cast(Attr("level")); + auto &x_lod = x.lod(); + auto &mask_dim = mask.dims(); + + std::unique_ptr cpu_mask{new framework::LoDTensor()}; + if (platform::is_cpu_place(mask.place())) { + cpu_mask->ShareDataWith(mask); + } else if (platform::is_gpu_place(mask.place())) { +#ifdef PADDLE_WITH_CUDA + cpu_mask->CopyFrom(mask, platform::CPUPlace(), dev_ctx); +#else + PADDLE_THROW("Not supported GPU, Please compile WITH_GPU option"); +#endif + } + auto *mask_data = cpu_mask->data(); + + std::vector> copy_ranges(mask_dim[0]); + + // set out_true/out_false lod + for (size_t t = 0; t < 2; t++) { + LoD *lod = nullptr; + if (t == 0) { + lod = out_false->mutable_lod(); + } else { + lod = out_true->mutable_lod(); + } + lod->clear(); + for (size_t i = 0; i < static_cast(mask_dim[0]); i++) { + if (static_cast(mask_data[i]) == t) { + size_t start_idx = i; + auto lod_and_offset = framework::GetSubLoDAndAbsoluteOffset( + x_lod, start_idx, start_idx + 1, level); + + auto &lod_length = lod_and_offset.first; + framework::AppendLoD(lod, lod_length); + + size_t start_offset = lod_and_offset.second.first; + size_t end_offset = lod_and_offset.second.second; + copy_ranges[t].emplace_back(CopyRange{start_offset, end_offset}); + } + } + } + + for (size_t t = 0; t < 2; ++t) { + framework::LoDTensor *out; + if (t == 0) { + out = out_false; + } else { + out = out_true; + } + auto &ranges = copy_ranges[t]; + size_t height = std::accumulate( + ranges.begin(), ranges.end(), 0UL, + [](size_t a, const CopyRange &b) { return a + b.end - b.begin; }); + auto x_dim = x.dims(); + x_dim[0] = static_cast(height); + out->Resize(x_dim); + out->mutable_data(x.place(), x.type()); + size_t offset = 0; + for (auto &each_range : ranges) { + size_t len = each_range.end - each_range.begin; + if (len == 0) { + continue; + } + // out[offset: offset+len] = x[each_range.begin: each_range.end] + out->Slice(static_cast(offset), static_cast(offset + len)) + .CopyFrom(x.Slice(static_cast(each_range.begin), + static_cast(each_range.end)), + x.place(), dev_ctx); + offset += len; + } + } + } +}; + +class SplitLoDTensorOpProtoMaker : public framework::OpProtoAndCheckerMaker { + public: + SplitLoDTensorOpProtoMaker(framework::OpProto *proto, + framework::OpAttrChecker *op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("X", "The input LoDTensor"); + AddInput("Mask", "A bool column vector which mask the input"); + AddOutput("OutTrue", "True branch of input LoDTensor"); + AddOutput("OutFalse", "False branch of input LoDTensor"); + AddAttr("level", "(int) the specific lod level to split.") + .SetDefault(0) + .EqualGreaterThan(0); + AddComment( + R"DOC( + Split a LoDTensor with a Mask at certain level. The input LoDTensor + has 3 sequence at certain lod level. The Mask is a bool column vector, + such as [0, 1, 0] at the same level. The first and third sequence will + be send to False Output LoDTensor; whereas the second sequence will + be send to True Output LoDTensor. Please refer to MergeLoDTensorOp.)DOC"); + } +}; + +class SplitLoDTensorInferShape : public framework::InferShapeBase { + public: + void operator()(framework::InferShapeContext *context) const override { + PADDLE_ENFORCE(context->HasInput("X"), + "SplitLoDTensorOp must has input X."); + PADDLE_ENFORCE(context->HasInput("Mask"), + "SplitLoDTensorOp must has input Mask."); + PADDLE_ENFORCE(context->HasOutput("OutTrue"), + "SplitLoDTensorOp must has output OutTrue."); + PADDLE_ENFORCE(context->HasOutput("OutFalse"), + "SplitLoDTensorOp must has output OutFalse."); + + auto mask_dim = context->GetInputDim("Mask"); + PADDLE_ENFORCE_EQ(mask_dim.size(), 2); + PADDLE_ENFORCE_EQ(mask_dim[1], 1); + + context->SetOutputDim("OutTrue", context->GetInputDim("X")); + context->SetOutputDim("OutFalse", context->GetInputDim("X")); + } +}; + +class SplitLoDTensorArrayGradMaker : public framework::SingleGradOpDescMaker { + public: + using framework::SingleGradOpDescMaker::SingleGradOpDescMaker; + + protected: + std::unique_ptr Apply() const override { + auto *grad_op = new framework::OpDescBind(); + grad_op->SetType("merge_lod_tensor"); + grad_op->SetInput("InTrue", OutputGrad("OutTrue")); + grad_op->SetInput("InFalse", OutputGrad("OutFalse")); + grad_op->SetInput("Mask", Input("Mask")); + grad_op->SetInput("X", Input("X")); + grad_op->SetOutput("Out", InputGrad("X")); + grad_op->SetAttrMap(Attrs()); + return std::unique_ptr(grad_op); + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OPERATOR(split_lod_tensor, ops::SplitLoDTensorOp, + ops::SplitLoDTensorOpProtoMaker, + ops::SplitLoDTensorInferShape, + ops::SplitLoDTensorArrayGradMaker); diff --git a/paddle/operators/squared_l2_norm_op.h b/paddle/operators/squared_l2_norm_op.h index c8d37ac40c1533a77acf78e6a42e1659555127e1..48d7b1c2d56882f04330dbf27b0a92e37cb8874c 100644 --- a/paddle/operators/squared_l2_norm_op.h +++ b/paddle/operators/squared_l2_norm_op.h @@ -29,7 +29,7 @@ class SquaredL2NormKernel : public framework::OpKernel { Out->mutable_data(context.GetPlace()); auto x = framework::EigenVector::Flatten(*X); - auto out = framework::EigenVector::Flatten(*Out); + auto out = framework::EigenScalar::From(*Out); auto place = context.GetEigenDevice(); out.device(place) = x.square().sum(); diff --git a/paddle/platform/call_once.h b/paddle/platform/call_once.h index 248baf6613c5caebdabcecff5d57290585238d78..d9f49527dcf150fcb35d3af512088f75dec0b5c6 100644 --- a/paddle/platform/call_once.h +++ b/paddle/platform/call_once.h @@ -27,20 +27,22 @@ namespace platform { This wrap is a hack to avoid this bug. */ -template +template inline void call_once(std::once_flag& flag, Callable&& f, Args&&... args) { bool good = false; std::exception ex; - std::call_once(flag, [&]() { - try { - f(args...); - good = true; - } catch (const std::exception& e) { - ex = e; - } catch (...) { - ex = std::runtime_error("excption caught in call_once"); - } - }); + std::call_once(flag, + [&](Args&&... args) { + try { + f(args...); + good = true; + } catch (const std::exception& e) { + ex = e; + } catch (...) { + ex = std::runtime_error("excption caught in call_once"); + } + }, + args...); if (!good) { throw std::exception(ex); } diff --git a/paddle/pybind/pybind.cc b/paddle/pybind/pybind.cc index 0f906e0e470b7f95bb2103ae55330fc1831aa78f..3d8d3f1d2fd3977f945928c723db5fcafffeae85 100644 --- a/paddle/pybind/pybind.cc +++ b/paddle/pybind/pybind.cc @@ -42,6 +42,9 @@ limitations under the License. */ #include "paddle/platform/gpu_info.h" #endif +// disable auto conversion to list in Python +PYBIND11_MAKE_OPAQUE(paddle::framework::LoDTensorArray); + namespace paddle { namespace pybind { static size_t UniqueIntegerGenerator(const std::string &prefix) { diff --git a/python/CMakeLists.txt b/python/CMakeLists.txt index 32578ad7799c0a276972ccef7770c2eae8438069..c8632295a25b160513a8e154bf1a5453c0005031 100644 --- a/python/CMakeLists.txt +++ b/python/CMakeLists.txt @@ -37,10 +37,10 @@ configure_file(${CMAKE_CURRENT_SOURCE_DIR}/setup.py.in ${CMAKE_CURRENT_BINARY_DIR}/setup.py) -add_custom_command(OUTPUT ${PADDLE_SOURCE_DIR}/python/paddle/v2/framework/core.so - COMMAND cmake -E copy $ ${PADDLE_SOURCE_DIR}/python/paddle/v2/framework/core.so +add_custom_command(OUTPUT ${PADDLE_SOURCE_DIR}/python/paddle/v2/fluid/core.so + COMMAND cmake -E copy $ ${PADDLE_SOURCE_DIR}/python/paddle/v2/fluid/core.so DEPENDS paddle_pybind) -add_custom_target(copy_paddle_pybind ALL DEPENDS ${PADDLE_SOURCE_DIR}/python/paddle/v2/framework/core.so) +add_custom_target(copy_paddle_pybind ALL DEPENDS ${PADDLE_SOURCE_DIR}/python/paddle/v2/fluid/core.so) add_custom_command(OUTPUT ${PADDLE_PYTHON_BUILD_DIR}/.timestamp @@ -66,7 +66,7 @@ if (WITH_TESTING) add_subdirectory(paddle/v2/tests) add_subdirectory(paddle/v2/reader/tests) add_subdirectory(paddle/v2/plot/tests) - add_subdirectory(paddle/v2/framework/tests) + add_subdirectory(paddle/v2/fluid/tests) endif() endif() install(DIRECTORY ${PADDLE_PYTHON_PACKAGE_DIR} diff --git a/python/paddle/trainer_config_helpers/layers.py b/python/paddle/trainer_config_helpers/layers.py index 336ee338fae45430931cde0b19fbf50d3001ee0c..3b436117edafcdb9517bb23bf03b45c242c0f52f 100644 --- a/python/paddle/trainer_config_helpers/layers.py +++ b/python/paddle/trainer_config_helpers/layers.py @@ -3658,9 +3658,10 @@ def gru_step_layer(input, :param gate_act: Activation type of this layer's two gates. SigmoidActivation is the default activation. :type gate_act: BaseActivation - :param bias_attr: The bias attribute. If the parameter is set to False or an object - whose type is not ParameterAttribute, no bias is defined. If the - parameter is set to True, the bias is initialized to zero. + :param bias_attr: The parameter attribute for bias. If this parameter is set to + False or an object whose type is not ParameterAttribute, no bias + is defined. If this parameter is set to True, + the bias is initialized to zero. :type bias_attr: ParameterAttribute | None | bool | Any :param param_attr: The parameter attribute. See ParameterAttribute for details. :type param_attr: ParameterAttribute @@ -3728,9 +3729,10 @@ def gru_step_naive_layer(input, :param gate_act: Activation type of this layer's two gates. SigmoidActivation is the default activation. :type gate_act: BaseActivation - :param bias_attr: The bias attribute. If the parameter is set to False or an object - whose type is not ParameterAttribute, no bias is defined. If the - parameter is set to True, the bias is initialized to zero. + :param bias_attr: The parameter attribute for bias. If this parameter is set to + False or an object whose type is not ParameterAttribute, no bias + is defined. If this parameter is set to True, + the bias is initialized to zero. :type bias_attr: ParameterAttribute | None | bool | Any :param param_attr: The parameter attribute. See ParameterAttribute for details. :type param_attr: ParameterAttribute @@ -3863,9 +3865,10 @@ def recurrent_layer(input, :type input: LayerOutput :param act: Activation type. TanhActivation is the default activation. :type act: BaseActivation - :param bias_attr: The bias attribute. If the parameter is set to False or an object - whose type is not ParameterAttribute, no bias is defined. If the - parameter is set to True, the bias is initialized to zero. + :param bias_attr: The parameter attribute for bias. If this parameter is set to + False or an object whose type is not ParameterAttribute, + no bias is defined. If the parameter is set to True, + the bias is initialized to zero. :type bias_attr: ParameterAttribute | None | bool | Any :param param_attr: The parameter attribute. See ParameterAttribute for details. @@ -4885,9 +4888,10 @@ def tensor_layer(a, :param param_attr: The parameter attribute. See ParameterAttribute for details. :type param_attr: ParameterAttribute - :param bias_attr: The bias attribute. If the parameter is set to False or an object - whose type is not ParameterAttribute, no bias is defined. If the - parameter is set to True, the bias is initialized to zero. + :param bias_attr: The parameter attribute for bias. If this parameter is set to + False or an object whose type is not ParameterAttribute, + no bias is defined. If this parameter is set to True, + the bias is initialized to zero. :type bias_attr: ParameterAttribute | None | bool | Any :param layer_attr: The extra layer attribute. See ExtraLayerAttribute for details. @@ -4961,9 +4965,10 @@ def selective_fc_layer(input, :param param_attr: The parameter attribute. See ParameterAttribute for details. :type param_attr: ParameterAttribute - :param bias_attr: The bias attribute. If the parameter is set to False or an object - whose type is not ParameterAttribute, no bias is defined. If the - parameter is set to True, the bias is initialized to zero. + :param bias_attr: The parameter attribute for bias. If this parameter is set to + False or an object whose type is not ParameterAttribute, + no bias is defined. If this parameter is set to True, + the bias is initialized to zero. :type bias_attr: ParameterAttribute | None | bool | Any :param layer_attr: The extra layer attribute. See ExtraLayerAttribute for details. @@ -5662,10 +5667,10 @@ def nce_layer(input, to the num_classes. Each member of the list defines the probability of a class given input x. :type neg_distribution: list | tuple | collections.Sequence | None - :param bias_attr: The attribute for bias. If this parameter is set False or - any object whose type is not ParameterAttribute, no bias - is added. If this parameter is set True, the bias is - initialized to zero. + :param bias_attr: The parameter attribute for bias. If this parameter is set to + False or an object whose type is not ParameterAttribute, + no bias is defined. If this parameter is set to True, + the bias is initialized to zero. :type bias_attr: ParameterAttribute | None | bool | Any :param layer_attr: The extra layer attribute. See ExtraLayerAttribute for details. @@ -6578,9 +6583,9 @@ def gated_unit_layer(input, :param gate_param_attr: The parameter attribute of the gate. See ParameterAttribute for details. :type gate_param_attr: ParameterAttribute - :param gate_bias_attr: The bias attribute of the gate. If the parameter is set to False or + :param gate_bias_attr: The bias attribute of the gate. If this parameter is set to False or an object whose type is not ParameterAttribute, no bias is defined. - If the parameter is set to True, the bias is initialized to zero. + If this parameter is set to True, the bias is initialized to zero. :type gate_bias_attr: ParameterAttribute | bool | None | Any :param inproj_attr: Extra layer attributes of the projection. See ExtraLayerAttribute for details. @@ -6588,9 +6593,9 @@ def gated_unit_layer(input, :param inproj_param_attr: The parameter attribute of the projection. See ParameterAttribute for details. :type inproj_param_attr: ParameterAttribute - :param inproj_bias_attr: The bias attribute of the projection. If the parameter is set to False + :param inproj_bias_attr: The bias attribute of the projection. If this parameter is set to False or an object whose type is not ParameterAttribute, no bias is defined. - If the parameter is set to True, the bias is initialized to zero. + If this parameter is set to True, the bias is initialized to zero. :type inproj_bias_attr: ParameterAttribute | bool | None | Any :param layer_attr: Extra layer attribute of the product. See ExtraLayerAttribute for details. diff --git a/python/paddle/trainer_config_helpers/networks.py b/python/paddle/trainer_config_helpers/networks.py index 3821d075cba5d39b5808a39093b8570d9302b667..d323d34c3ff47614342934c2a02492f66d27dc10 100644 --- a/python/paddle/trainer_config_helpers/networks.py +++ b/python/paddle/trainer_config_helpers/networks.py @@ -681,34 +681,42 @@ def lstmemory_unit(input, state_act=TanhActivation()) - :param input: input layer. + :param input: Input layer. :type input: LayerOutput - :param out_memory: output of previous time step + :param out_memory: The output of previous time step. :type out_memory: LayerOutput | None - :param name: lstmemory unit name. + :param name: The lstmemory unit name. :type name: basestring - :param size: lstmemory unit size. + :param size: The lstmemory unit size. :type size: int - :param param_attr: parameter attribute, None means default attribute. + :param param_attr: The parameter attribute for the weights in + input to hidden projection. + None means default attribute. :type param_attr: ParameterAttribute - :param act: last activiation type of lstm. + :param act: The last activiation type of lstm. :type act: BaseActivation - :param gate_act: gate activiation type of lstm. + :param gate_act: The gate activiation type of lstm. :type gate_act: BaseActivation - :param state_act: state activiation type of lstm. + :param state_act: The state activiation type of lstm. :type state_act: BaseActivation - :param input_proj_bias_attr: bias attribute for input to hidden projection. - False means no bias, None means default bias. - :type input_proj_bias_attr: ParameterAttribute|False|None - :param input_proj_layer_attr: extra layer attribute for input to hidden - projection of the LSTM unit, such as dropout, error clipping. + :param input_proj_bias_attr: The parameter attribute for the bias in + input to hidden projection. + False or None means no bias. + If this parameter is set to True, + the bias is initialized to zero. + :type input_proj_bias_attr: ParameterAttribute|bool|None + :param input_proj_layer_attr: The extra layer attribute for + input to hidden projection of the LSTM unit, + such as dropout, error clipping. :type input_proj_layer_attr: ExtraLayerAttribute - :param lstm_bias_attr: bias parameter attribute of lstm layer. - False means no bias, None means default bias. - :type lstm_bias_attr: ParameterAttribute|False|None - :param lstm_layer_attr: extra attribute of lstm layer. + :param lstm_bias_attr: The parameter attribute for the bias in lstm layer. + False or None means no bias. + If this parameter is set to True, + the bias is initialized to zero. + :type lstm_bias_attr: ParameterAttribute|True|None + :param lstm_layer_attr: The extra attribute of lstm layer. :type lstm_layer_attr: ExtraLayerAttribute - :return: lstmemory unit name. + :return: The lstmemory unit name. :rtype: LayerOutput """ if size is None: @@ -786,34 +794,42 @@ def lstmemory_group(input, gate_act=SigmoidActivation(), state_act=TanhActivation()) - :param input: input layer. + :param input: Input layer. :type input: LayerOutput - :param size: lstmemory group size. + :param size: The lstmemory group size. :type size: int - :param name: name of lstmemory group. + :param name: The name of lstmemory group. :type name: basestring - :param out_memory: output of previous time step. + :param out_memory: The output of previous time step. :type out_memory: LayerOutput | None - :param reverse: process the input in a reverse order or not. + :param reverse: Process the input in a reverse order or not. :type reverse: bool - :param param_attr: parameter attribute, None means default attribute. + :param param_attr: The parameter attribute for the weights in + input to hidden projection. + None means default attribute. :type param_attr: ParameterAttribute - :param act: last activiation type of lstm. + :param act: The last activiation type of lstm. :type act: BaseActivation - :param gate_act: gate activiation type of lstm. + :param gate_act: The gate activiation type of lstm. :type gate_act: BaseActivation - :param state_act: state activiation type of lstm. + :param state_act: The state activiation type of lstm. :type state_act: BaseActivation - :param lstm_bias_attr: bias parameter attribute of lstm layer. - False means no bias, None means default bias. - :type lstm_bias_attr: ParameterAttribute|False|None - :param input_proj_bias_attr: bias attribute for input to hidden projection. - False means no bias, None means default bias. - :type input_proj_bias_attr: ParameterAttribute|False|None - :param input_proj_layer_attr: extra layer attribute for input to hidden - projection of the LSTM unit, such as dropout, error clipping. + :param input_proj_bias_attr: The parameter attribute for the bias in + input to hidden projection. + False or None means no bias. + If this parameter is set to True, + the bias is initialized to zero. + :type input_proj_bias_attr: ParameterAttribute|bool|None + :param input_proj_layer_attr: The extra layer attribute for + input to hidden projection of the LSTM unit, + such as dropout, error clipping. :type input_proj_layer_attr: ExtraLayerAttribute - :param lstm_layer_attr: lstm layer's extra attribute. + :param lstm_bias_attr: The parameter attribute for the bias in lstm layer. + False or None means no bias. + If this parameter is set to True, + the bias is initialized to zero. + :type lstm_bias_attr: ParameterAttribute|True|None + :param lstm_layer_attr: The extra attribute of lstm layer. :type lstm_layer_attr: ExtraLayerAttribute :return: the lstmemory group. :rtype: LayerOutput diff --git a/python/paddle/v2/framework/.gitignore b/python/paddle/v2/fluid/.gitignore similarity index 100% rename from python/paddle/v2/framework/.gitignore rename to python/paddle/v2/fluid/.gitignore diff --git a/python/paddle/v2/framework/__init__.py b/python/paddle/v2/fluid/__init__.py similarity index 100% rename from python/paddle/v2/framework/__init__.py rename to python/paddle/v2/fluid/__init__.py diff --git a/python/paddle/v2/framework/backward.py b/python/paddle/v2/fluid/backward.py similarity index 97% rename from python/paddle/v2/framework/backward.py rename to python/paddle/v2/fluid/backward.py index 678efd5d20585355a684bb2df16fdb57a69e0eeb..f188582178f667125ec95cd230100fdb10ce7e88 100644 --- a/python/paddle/v2/framework/backward.py +++ b/python/paddle/v2/fluid/backward.py @@ -1,4 +1,4 @@ -from paddle.v2.framework import framework as framework +from paddle.v2.fluid import framework as framework __all__ = ['append_backward_ops'] diff --git a/python/paddle/v2/framework/default_scope_funcs.py b/python/paddle/v2/fluid/default_scope_funcs.py similarity index 92% rename from python/paddle/v2/framework/default_scope_funcs.py rename to python/paddle/v2/fluid/default_scope_funcs.py index c07f9a6ab96ac86fd6d20fbe0bc560845107f063..60c6165b6bd959f7bb3d92afed667f00f73f144f 100644 --- a/python/paddle/v2/framework/default_scope_funcs.py +++ b/python/paddle/v2/fluid/default_scope_funcs.py @@ -13,7 +13,7 @@ A `scoped_function` will take a `function` as input. That function will be invoked in a new local scope. """ -import paddle.v2.framework.core +import paddle.v2.fluid.core import threading __tl_scope__ = threading.local() @@ -27,13 +27,13 @@ __all__ = [ def get_cur_scope(): """ Get current scope. - :rtype: paddle.v2.framework.core.Scope + :rtype: paddle.v2.fluid.core.Scope """ cur_scope_stack = getattr(__tl_scope__, 'cur_scope', None) if cur_scope_stack is None: __tl_scope__.cur_scope = list() if len(__tl_scope__.cur_scope) == 0: - __tl_scope__.cur_scope.append(paddle.v2.framework.core.Scope()) + __tl_scope__.cur_scope.append(paddle.v2.fluid.core.Scope()) return __tl_scope__.cur_scope[-1] diff --git a/python/paddle/v2/framework/evaluator.py b/python/paddle/v2/fluid/evaluator.py similarity index 94% rename from python/paddle/v2/framework/evaluator.py rename to python/paddle/v2/fluid/evaluator.py index 254dd5f1a33eef17ad7a0117541255a4399ef23c..180d0135ffe8fa8982cfcde242033b5a69eed1cf 100644 --- a/python/paddle/v2/framework/evaluator.py +++ b/python/paddle/v2/fluid/evaluator.py @@ -1,6 +1,6 @@ -import paddle.v2.framework.op as op +import paddle.v2.fluid.op as op import numpy as np -import paddle.v2.framework.core as core +import paddle.v2.fluid.core as core def avg_accumulate(accumulated_var, per_eval, num_batches, place): @@ -22,7 +22,7 @@ class Evaluator(object): NOTE: default run on CPUPlace(), running on GPUPlace doesn't improve performance much. :param scope: the scope instance contains the input. - :type scope: paddle.v2.framework.core.scope + :type scope: paddle.v2.fluid.core.scope :param operator: operator name for caculating the evaluation for each mini-batch. :type operator: string :param input: output variable name of forward network. diff --git a/python/paddle/v2/framework/executor.py b/python/paddle/v2/fluid/executor.py similarity index 94% rename from python/paddle/v2/framework/executor.py rename to python/paddle/v2/fluid/executor.py index f5c833190e73a277bef2509e02c4be051768933d..ed1c2c06daa7ede97e138049a1f7044d071c31e8 100644 --- a/python/paddle/v2/framework/executor.py +++ b/python/paddle/v2/fluid/executor.py @@ -1,5 +1,5 @@ -import paddle.v2.framework.core as core -from paddle.v2.framework.framework import Block, Program, g_main_program +import paddle.v2.fluid.core as core +from paddle.v2.fluid.framework import Block, Program, g_main_program g_scope = core.Scope() diff --git a/python/paddle/v2/framework/framework.py b/python/paddle/v2/fluid/framework.py similarity index 99% rename from python/paddle/v2/framework/framework.py rename to python/paddle/v2/fluid/framework.py index b9db2707c0705659260c04ab3412f429058a1316..e2587b4f74506c6eb2b253fa9b24db4838bfedbc 100644 --- a/python/paddle/v2/framework/framework.py +++ b/python/paddle/v2/fluid/framework.py @@ -1,5 +1,5 @@ -import paddle.v2.framework.core as core -import paddle.v2.framework.proto.framework_pb2 as framework_pb2 +import paddle.v2.fluid.core as core +import paddle.v2.fluid.proto.framework_pb2 as framework_pb2 import collections import numpy as np import copy @@ -285,7 +285,7 @@ class Operator(object): self.desc.check_attrs() no_kernel_op_set = { 'feed', 'fetch', 'save', 'load', 'recurrent', - 'rnn_memory_helper_grad', 'while' + 'rnn_memory_helper_grad', 'conditional_block', 'while' } if type not in no_kernel_op_set: self.desc.infer_var_type(self.block.desc) diff --git a/python/paddle/v2/framework/initializer.py b/python/paddle/v2/fluid/initializer.py similarity index 99% rename from python/paddle/v2/framework/initializer.py rename to python/paddle/v2/fluid/initializer.py index 98a87bfa86efb39f381b9f99b2b1f0d7ec7d9833..ded144ecd5db83ce50ca0dc6243fdc52ac0b7a2f 100644 --- a/python/paddle/v2/framework/initializer.py +++ b/python/paddle/v2/fluid/initializer.py @@ -1,4 +1,4 @@ -import paddle.v2.framework.framework as framework +import paddle.v2.fluid.framework as framework import numpy as np __all__ = [ diff --git a/python/paddle/v2/framework/io.py b/python/paddle/v2/fluid/io.py similarity index 98% rename from python/paddle/v2/framework/io.py rename to python/paddle/v2/fluid/io.py index 5c247904a330e25b1a9f53db431947840db3f615..394a171c67a99ffb0c7caaf71e850fe541f8286e 100644 --- a/python/paddle/v2/framework/io.py +++ b/python/paddle/v2/fluid/io.py @@ -1,7 +1,7 @@ import os import cPickle as pickle -from paddle.v2.framework.framework import Program, Parameter, g_main_program, \ +from paddle.v2.fluid.framework import Program, Parameter, g_main_program, \ Variable __all__ = [ diff --git a/python/paddle/v2/framework/layer_helper.py b/python/paddle/v2/fluid/layer_helper.py similarity index 94% rename from python/paddle/v2/framework/layer_helper.py rename to python/paddle/v2/fluid/layer_helper.py index c38346b79fecfb2f82a60b360c505da16ecdf3c0..9dc3c119ea47ca11956d85119ce1ec6d3d6bb7e8 100644 --- a/python/paddle/v2/framework/layer_helper.py +++ b/python/paddle/v2/fluid/layer_helper.py @@ -1,10 +1,10 @@ import copy import itertools -from paddle.v2.framework.framework import Variable, g_main_program, \ +from paddle.v2.fluid.framework import Variable, g_main_program, \ g_startup_program, unique_name, Program -from paddle.v2.framework.initializer import ConstantInitializer, \ - UniformInitializer +from paddle.v2.fluid.initializer import ConstantInitializer, \ + UniformInitializer, XavierInitializer class LayerHelper(object): @@ -61,7 +61,7 @@ class LayerHelper(object): @property def param_attr(self): - default = {'name': None, 'initializer': UniformInitializer()} + default = {'name': None, 'initializer': XavierInitializer()} actual = self.kwargs.get('param_attr', None) if actual is None: actual = default @@ -70,10 +70,11 @@ class LayerHelper(object): actual[default_field] = default[default_field] return actual + @property def bias_attr(self): - default = {'name': None, 'initializer': ConstantInitializer()} + default = {'name': None, 'initializer': XavierInitializer()} bias_attr = self.kwargs.get('bias_attr', None) - if bias_attr is True: + if bias_attr is None: bias_attr = default if isinstance(bias_attr, dict): @@ -166,7 +167,7 @@ class LayerHelper(object): num_flatten_dims = 1 size = list(input_var.shape[num_flatten_dims:]) - bias_attr = self.bias_attr() + bias_attr = self.bias_attr if not bias_attr: return input_var diff --git a/python/paddle/v2/framework/layers.py b/python/paddle/v2/fluid/layers.py similarity index 83% rename from python/paddle/v2/framework/layers.py rename to python/paddle/v2/fluid/layers.py index 9a1999243750aca62a4ef898ae979d273902b45c..8a1aa1c42d5a006539d221f96e3535434c9a4c43 100644 --- a/python/paddle/v2/framework/layers.py +++ b/python/paddle/v2/fluid/layers.py @@ -1,22 +1,24 @@ -import paddle.v2.framework.core as core -from paddle.v2.framework.framework import OpProtoHolder, Variable, Program, \ +import paddle.v2.fluid.core as core +import paddle.v2.fluid.proto.framework_pb2 as framework_pb2 +from paddle.v2.fluid.framework import OpProtoHolder, Variable, Program, \ Operator -from paddle.v2.framework.initializer import ConstantInitializer, \ +from paddle.v2.fluid.initializer import ConstantInitializer, \ NormalInitializer -from paddle.v2.framework.layer_helper import LayerHelper, unique_name +from paddle.v2.fluid.layer_helper import LayerHelper, unique_name import re +import cStringIO __all__ = [ 'fc', 'data', 'cross_entropy', 'conv2d', 'pool2d', 'embedding', 'concat', 'StaticRNN', 'cast', 'sequence_conv', 'sequence_pool', 'sums', 'cos_sim', - 'batch_norm', 'accuracy' + 'batch_norm', 'accuracy', 'split_lod_tensor' ] def fc(input, size, param_attr=None, - bias_attr=True, + bias_attr=None, name=None, act=None, num_flatten_dims=1, @@ -125,6 +127,55 @@ def embedding(input, return tmp +# TODO(qijun): expose H0 and C0 +def dynamic_lstm(input, + size, + data_type='float32', + param_attr=None, + bias_attr=None, + use_peepholes=True, + is_reverse=False, + gate_activation='sigmoid', + cell_activation='tanh', + candidate_activation='tanh', + main_program=None, + startup_program=None): + helper = LayerHelper('lstm', **locals()) + size = size / 4 + weight = helper.create_parameter( + attr=helper.param_attr, shape=[size, 4 * size], dtype=data_type) + bias_size = [1, 7 * size] + if not use_peepholes: + bias_size[1] = 4 * size + bias = helper.create_parameter( + attr=helper.bias_attr, shape=bias_size, dtype=data_type, suffix='b') + + hidden = helper.create_tmp_variable(data_type) + cell = helper.create_tmp_variable(data_type) + batch_gate = helper.create_tmp_variable(data_type) + batch_cell_pre_act = helper.create_tmp_variable(data_type) + + helper.append_op( + type='lstm', + inputs={'Input': input, + 'Weight': weight, + 'Bias': bias}, + outputs={ + 'Hidden': hidden, + 'Cell': cell, + 'BatchGate': batch_gate, + 'BatchCellPreAct': batch_cell_pre_act + }, + attrs={ + 'use_peepholes': use_peepholes, + 'is_reverse': is_reverse, + 'gate_activation': gate_activation, + 'cell_activation': cell_activation, + 'candidate_activation': candidate_activation + }) + return hidden, cell + + def data(name, shape, data_type='float32', @@ -175,6 +226,11 @@ def data(name, stop_gradient=stop_gradient) +def create_tensor(dtype, name=None, main_program=None): + helper = LayerHelper("create_tensor", **locals()) + return helper.create_variable(name=helper.name, dtype=dtype) + + def _convert_(name): """ Formatting. @@ -191,6 +247,58 @@ def _convert_(name): return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower() +def _generate_doc_string_(op_proto): + """ + Generate docstring by OpProto + + Args: + op_proto (framework_pb2.OpProto): a protobuf message typed OpProto + + Returns: + str: the document string + """ + + def _type_to_str_(tp): + return framework_pb2.AttrType.Name(tp) + + if not isinstance(op_proto, framework_pb2.OpProto): + raise TypeError("OpProto should be `framework_pb2.OpProto`") + + buf = cStringIO.StringIO() + buf.write(op_proto.comment) + buf.write('\nArgs:\n') + for each_input in op_proto.inputs: + line_begin = ' {0}: '.format(_convert_(each_input.name)) + buf.write(line_begin) + buf.write(each_input.comment) + buf.write('\n') + buf.write(' ' * len(line_begin)) + buf.write('Duplicable: ') + buf.write(str(each_input.duplicable)) + buf.write(' Optional: ') + buf.write(str(each_input.dispensable)) + buf.write('\n') + + for each_attr in op_proto.attrs: + buf.write(' ') + buf.write(each_attr.name) + buf.write(' (') + buf.write(_type_to_str_(each_attr.type)) + buf.write('): ') + buf.write(each_attr.comment) + buf.write('\n') + + if len(op_proto.outputs) != 0: + buf.write('\nReturns:\n') + buf.write(' ') + for each_opt in op_proto.outputs: + if not each_opt.intermediate: + break + buf.write(each_opt.comment) + + return buf.getvalue() + + def _create_op_func_(op_type): """ Create an Operator for a Function. @@ -249,11 +357,6 @@ def _create_op_func_(op_type): return dtype def func(**kwargs): - """ - This function implements the function for the operator. This process - involves doing the sanity check (using the function above), reading - inputs from protobuf and applying the activations on top. - """ helper = LayerHelper(op_type, **kwargs) dtype = infer_and_check_data_type(op_proto, **kwargs) @@ -277,6 +380,7 @@ def _create_op_func_(op_type): func.__name__ = op_type globals()[op_type] = func + func.__doc__ = _generate_doc_string_(op_proto) global __all__ __all__.append(op_type) @@ -352,6 +456,56 @@ def sums(input, main_program=None, startup_program=None): return out +def assign(input, output, main_program=None): + helper = LayerHelper('assign', **locals()) + helper.append_op( + type='scale', + inputs={'X': [input]}, + outputs={'Out': [output]}, + attrs={'scale': 1.0}) + return output + + +def split_lod_tensor(input, + mask, + level, + main_program=None, + startup_program=None): + helper = LayerHelper('split_lod_tensor', **locals()) + out_true = helper.create_tmp_variable(dtype=input.data_type) + out_false = helper.create_tmp_variable(dtype=input.data_type) + helper.append_op( + type='split_lod_tensor', + inputs={ + 'X': input, + 'Mask': mask, + }, + outputs={'OutTrue': out_true, + 'OutFalse': out_false}, + attrs={'level': level}) + return out_true, out_false + + +def merge_lod_tensor(in_true, + in_false, + x, + mask, + level, + main_program=None, + startup_program=None): + helper = LayerHelper('merge_lod_tensor', **locals()) + out = helper.create_tmp_variable(dtype=x.data_type) + helper.append_op( + type='merge_lod_tensor', + inputs={'X': x, + 'Mask': mask, + 'InTrue': in_true, + 'InFalse': in_false}, + outputs={'Out': out}, + attrs={'level': level}) + return out + + def cos_sim(X, Y, **kwargs): """ This function performs the cosine similarity between two tensors @@ -685,6 +839,23 @@ def batch_norm(input, return helper.append_activation(batch_norm_out) +def beam_search_decode(ids, scores, main_program=None, startup_program=None): + helper = LayerHelper('beam_search_decode', **locals()) + sentence_ids = helper.create_tmp_variable(dtype=ids.data_type) + sentence_scores = helper.create_tmp_variable(dtype=ids.data_type) + + helper.append_op( + type="beam_search_decode", + inputs={"Ids": ids, + "Scores": scores}, + outputs={ + "SentenceIds": sentence_ids, + "SentenceScores": sentence_scores + }) + + return sentence_ids, sentence_scores + + class BlockGuard(object): """ BlockGuard class. @@ -1276,3 +1447,73 @@ def array_length(array, main_program=None): helper.append_op( type='lod_array_length', inputs={'X': [array]}, outputs={'Out': [tmp]}) return tmp + + +class ConditionalBlockGuard(BlockGuard): + def __init__(self, block): + if not isinstance(block, ConditionalBlock): + raise TypeError("block should be conditional block") + super(ConditionalBlockGuard, self).__init__(block.helper.main_program) + self.block = block + + def __enter__(self): + return super(ConditionalBlockGuard, self).__enter__() + + def __exit__(self, exc_type, exc_val, exc_tb): + self.block.complete() + return super(ConditionalBlockGuard, self).__exit__(exc_type, exc_val, + exc_tb) + + +class ConditionalBlock(object): + def __init__(self, inputs, name=None, main_program=None): + for each_input in inputs: + if not isinstance(each_input, Variable): + raise TypeError("Each input should be variable") + self.inputs = inputs + self.helper = LayerHelper( + 'conditional_block', name=name, main_program=main_program) + + def block(self): + return ConditionalBlockGuard(self) + + def complete(self): + inside_block = self.helper.main_program.current_block() + parent_block = self.helper.main_program.block(inside_block.parent_idx) + + intermediate = set() + params = set() + + for each_op in inside_block.ops: + assert isinstance(each_op, Operator) + for iname in each_op.input_names: + for in_var_name in each_op.input(iname): + if in_var_name not in intermediate: + params.add(in_var_name) + + for oname in each_op.output_names: + for out_var_name in each_op.output(oname): + intermediate.add(out_var_name) + input_set = set([ipt.name for ipt in self.inputs]) + + param_list = [ + parent_block.var(each_name) for each_name in params + if each_name not in input_set + ] + + out_list = [ + parent_block.var(var_name) for var_name in parent_block.vars + if var_name not in intermediate + ] + + step_scope = parent_block.create_var( + type=core.VarDesc.VarType.STEP_SCOPES) + parent_block.append_op( + type='conditional_block', + inputs={ + 'X': self.inputs, + 'Params': param_list, + }, + outputs={'Out': out_list, + 'Scope': [step_scope]}, + attrs={'block': inside_block}) diff --git a/python/paddle/v2/framework/net_drawer.py b/python/paddle/v2/fluid/net_drawer.py similarity index 96% rename from python/paddle/v2/framework/net_drawer.py rename to python/paddle/v2/fluid/net_drawer.py index 045e267c253e2485e75df3fb95cc0e591ee29ea5..17ad547c2bb5b79ef8225dd1a8f1ef49a6572508 100644 --- a/python/paddle/v2/framework/net_drawer.py +++ b/python/paddle/v2/fluid/net_drawer.py @@ -3,8 +3,8 @@ import json import logging from collections import defaultdict -import paddle.v2.framework.core as core -import paddle.v2.framework.proto.framework_pb2 as framework_pb2 +import paddle.v2.fluid.core as core +import paddle.v2.fluid.proto.framework_pb2 as framework_pb2 logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) diff --git a/python/paddle/v2/framework/nets.py b/python/paddle/v2/fluid/nets.py similarity index 98% rename from python/paddle/v2/framework/nets.py rename to python/paddle/v2/fluid/nets.py index 725d2fa7f5e7a862eea0ef9172a9e63858ebd0dd..5e14ca594bc7965dc29039ba57bb7b26b1ce6871 100644 --- a/python/paddle/v2/framework/nets.py +++ b/python/paddle/v2/fluid/nets.py @@ -1,4 +1,4 @@ -import paddle.v2.framework.layers as layers +import paddle.v2.fluid.layers as layers __all__ = ["simple_img_conv_pool", "sequence_conv_pool"] diff --git a/python/paddle/v2/framework/op.py b/python/paddle/v2/fluid/op.py similarity index 98% rename from python/paddle/v2/framework/op.py rename to python/paddle/v2/fluid/op.py index bc771a964adf9f97cbeae87c06ce954c76051150..5828803497ec06bc7644da18ca752f61469ca53f 100644 --- a/python/paddle/v2/framework/op.py +++ b/python/paddle/v2/fluid/op.py @@ -1,5 +1,5 @@ -import paddle.v2.framework.core as core -import paddle.v2.framework.proto.framework_pb2 as framework_pb2 +import paddle.v2.fluid.core as core +import paddle.v2.fluid.proto.framework_pb2 as framework_pb2 def get_all_op_protos(): diff --git a/python/paddle/v2/framework/optimizer.py b/python/paddle/v2/fluid/optimizer.py similarity index 98% rename from python/paddle/v2/framework/optimizer.py rename to python/paddle/v2/fluid/optimizer.py index f06c0fb98d572fb54a85996668cc6f32726ec9de..4252a6f08509fec92ac5c45d32169232e1dd190f 100644 --- a/python/paddle/v2/framework/optimizer.py +++ b/python/paddle/v2/fluid/optimizer.py @@ -1,11 +1,11 @@ from collections import defaultdict -import paddle.v2.framework.framework as framework -from paddle.v2.framework.framework import unique_name, Program -from paddle.v2.framework.backward import append_backward_ops -from paddle.v2.framework.initializer import ConstantInitializer -from paddle.v2.framework.regularizer import append_regularization_ops -from paddle.v2.framework.layer_helper import LayerHelper +import paddle.v2.fluid.framework as framework +from paddle.v2.fluid.framework import unique_name, Program +from paddle.v2.fluid.backward import append_backward_ops +from paddle.v2.fluid.initializer import ConstantInitializer +from paddle.v2.fluid.regularizer import append_regularization_ops +from paddle.v2.fluid.layer_helper import LayerHelper __all__ = [ 'SGDOptimizer', 'MomentumOptimizer', 'AdagradOptimizer', 'AdamOptimizer', diff --git a/python/paddle/v2/framework/regularizer.py b/python/paddle/v2/fluid/regularizer.py similarity index 98% rename from python/paddle/v2/framework/regularizer.py rename to python/paddle/v2/fluid/regularizer.py index 5111ac5566feb7d334ff4cd8e70daa0cfbd6e552..098cd0dd6439554f49e429ab75fb11bfa2c9d28c 100644 --- a/python/paddle/v2/framework/regularizer.py +++ b/python/paddle/v2/fluid/regularizer.py @@ -1,4 +1,4 @@ -import paddle.v2.framework.framework as framework +import paddle.v2.fluid.framework as framework __all__ = [ 'append_regularization_ops', 'L2DecayRegularizer', 'L1DecayRegularizer' diff --git a/python/paddle/v2/framework/tests/.gitignore b/python/paddle/v2/fluid/tests/.gitignore similarity index 100% rename from python/paddle/v2/framework/tests/.gitignore rename to python/paddle/v2/fluid/tests/.gitignore diff --git a/python/paddle/v2/fluid/tests/CMakeLists.txt b/python/paddle/v2/fluid/tests/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..e795627bfe9e8ad0c196349a332e62e975f20aa3 --- /dev/null +++ b/python/paddle/v2/fluid/tests/CMakeLists.txt @@ -0,0 +1,7 @@ +file(GLOB TEST_OPS RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" "test_*.py") +string(REPLACE ".py" "" TEST_OPS "${TEST_OPS}") +foreach(src ${TEST_OPS}) + py_test(${src} SRCS ${src}.py) +endforeach() + +add_subdirectory(book) diff --git a/python/paddle/v2/framework/tests/CMakeLists.txt b/python/paddle/v2/fluid/tests/book/CMakeLists.txt similarity index 100% rename from python/paddle/v2/framework/tests/CMakeLists.txt rename to python/paddle/v2/fluid/tests/book/CMakeLists.txt diff --git a/python/paddle/v2/framework/tests/test_fit_a_line.py b/python/paddle/v2/fluid/tests/book/test_fit_a_line.py similarity index 87% rename from python/paddle/v2/framework/tests/test_fit_a_line.py rename to python/paddle/v2/fluid/tests/book/test_fit_a_line.py index 6e09b88dca34de2579131e7bdc16b26cf6cde49c..5ef963bffa4e4fa3992e1f811d7f514662809410 100644 --- a/python/paddle/v2/framework/tests/test_fit_a_line.py +++ b/python/paddle/v2/fluid/tests/book/test_fit_a_line.py @@ -1,11 +1,11 @@ import paddle.v2 as paddle -import paddle.v2.framework.layers as layers -import paddle.v2.framework.core as core -import paddle.v2.framework.optimizer as optimizer +import paddle.v2.fluid.layers as layers +import paddle.v2.fluid.core as core +import paddle.v2.fluid.optimizer as optimizer -from paddle.v2.framework.framework import Program -from paddle.v2.framework.io import save_persistables, load_persistables -from paddle.v2.framework.executor import Executor +from paddle.v2.fluid.framework import Program +from paddle.v2.fluid.io import save_persistables, load_persistables +from paddle.v2.fluid.executor import Executor import numpy as np diff --git a/python/paddle/v2/framework/tests/test_image_classification_train.py b/python/paddle/v2/fluid/tests/book/test_image_classification_train.py similarity index 95% rename from python/paddle/v2/framework/tests/test_image_classification_train.py rename to python/paddle/v2/fluid/tests/book/test_image_classification_train.py index a4165da9703c55ae3347123409407f0cae30856f..e253b8d27fd29746b41d82a63b11485032e77ebb 100644 --- a/python/paddle/v2/framework/tests/test_image_classification_train.py +++ b/python/paddle/v2/fluid/tests/book/test_image_classification_train.py @@ -1,12 +1,12 @@ import numpy as np import paddle.v2 as paddle -import paddle.v2.framework.core as core -import paddle.v2.framework.layers as layers -import paddle.v2.framework.nets as nets -import paddle.v2.framework.optimizer as optimizer -from paddle.v2.framework.executor import Executor -from paddle.v2.framework.framework import g_startup_program, g_main_program -from paddle.v2.framework.initializer import XavierInitializer +import paddle.v2.fluid.core as core +import paddle.v2.fluid.layers as layers +import paddle.v2.fluid.nets as nets +import paddle.v2.fluid.optimizer as optimizer +from paddle.v2.fluid.executor import Executor +from paddle.v2.fluid.framework import g_startup_program, g_main_program +from paddle.v2.fluid.initializer import XavierInitializer def resnet_cifar10(input, depth=32, main_program=None, startup_program=None): diff --git a/python/paddle/v2/framework/tests/test_recognize_digits_conv.py b/python/paddle/v2/fluid/tests/book/test_recognize_digits_conv.py similarity index 90% rename from python/paddle/v2/framework/tests/test_recognize_digits_conv.py rename to python/paddle/v2/fluid/tests/book/test_recognize_digits_conv.py index 66c629eb4261a9b971f25611d8e49f0cb671304a..2b723125412c17f3805ee3cae046b0788aa34997 100644 --- a/python/paddle/v2/framework/tests/test_recognize_digits_conv.py +++ b/python/paddle/v2/fluid/tests/book/test_recognize_digits_conv.py @@ -1,11 +1,11 @@ import paddle.v2 as paddle -import paddle.v2.framework.layers as layers -import paddle.v2.framework.nets as nets -import paddle.v2.framework.core as core -import paddle.v2.framework.optimizer as optimizer +import paddle.v2.fluid.layers as layers +import paddle.v2.fluid.nets as nets +import paddle.v2.fluid.core as core +import paddle.v2.fluid.optimizer as optimizer -from paddle.v2.framework.framework import Program -from paddle.v2.framework.executor import Executor +from paddle.v2.fluid.framework import Program +from paddle.v2.fluid.executor import Executor import numpy as np diff --git a/python/paddle/v2/framework/tests/test_recognize_digits_mlp.py b/python/paddle/v2/fluid/tests/book/test_recognize_digits_mlp.py similarity index 88% rename from python/paddle/v2/framework/tests/test_recognize_digits_mlp.py rename to python/paddle/v2/fluid/tests/book/test_recognize_digits_mlp.py index 076cf882160cd53f45ef291d82ba57ada843a287..2e1a9f236b6621c7334a9eb04272a6eb69c86fab 100644 --- a/python/paddle/v2/framework/tests/test_recognize_digits_mlp.py +++ b/python/paddle/v2/fluid/tests/book/test_recognize_digits_mlp.py @@ -1,12 +1,12 @@ import paddle.v2 as paddle -import paddle.v2.framework.layers as layers -import paddle.v2.framework.core as core -import paddle.v2.framework.optimizer as optimizer - -from paddle.v2.framework.framework import Program -from paddle.v2.framework.executor import Executor -from paddle.v2.framework.regularizer import L2DecayRegularizer -from paddle.v2.framework.initializer import UniformInitializer +import paddle.v2.fluid.layers as layers +import paddle.v2.fluid.core as core +import paddle.v2.fluid.optimizer as optimizer + +from paddle.v2.fluid.framework import Program +from paddle.v2.fluid.executor import Executor +from paddle.v2.fluid.regularizer import L2DecayRegularizer +from paddle.v2.fluid.initializer import UniformInitializer import numpy as np diff --git a/python/paddle/v2/framework/tests/test_recommender_system.py b/python/paddle/v2/fluid/tests/book/test_recommender_system.py similarity index 97% rename from python/paddle/v2/framework/tests/test_recommender_system.py rename to python/paddle/v2/fluid/tests/book/test_recommender_system.py index 31562b4391d16b831d53801cfa21c7bdf8c3ab8d..4708dfe3e9209a3254a9e1903cbedf07ebc5d2d0 100644 --- a/python/paddle/v2/framework/tests/test_recommender_system.py +++ b/python/paddle/v2/fluid/tests/book/test_recommender_system.py @@ -1,11 +1,11 @@ import paddle.v2 as paddle -import paddle.v2.framework.layers as layers -import paddle.v2.framework.nets as nets -import paddle.v2.framework.core as core -import paddle.v2.framework.optimizer as optimizer +import paddle.v2.fluid.layers as layers +import paddle.v2.fluid.nets as nets +import paddle.v2.fluid.core as core +import paddle.v2.fluid.optimizer as optimizer -from paddle.v2.framework.framework import Program -from paddle.v2.framework.executor import Executor +from paddle.v2.fluid.framework import Program +from paddle.v2.fluid.executor import Executor import numpy as np diff --git a/python/paddle/v2/framework/tests/test_understand_sentiment_conv.py b/python/paddle/v2/fluid/tests/book/test_understand_sentiment_conv.py similarity index 90% rename from python/paddle/v2/framework/tests/test_understand_sentiment_conv.py rename to python/paddle/v2/fluid/tests/book/test_understand_sentiment_conv.py index eb377e9264b6031e9bf484a90b7c2b39442407f1..dc4b63da9b37aff55fc6362f239e3e61004a3866 100644 --- a/python/paddle/v2/framework/tests/test_understand_sentiment_conv.py +++ b/python/paddle/v2/fluid/tests/book/test_understand_sentiment_conv.py @@ -1,11 +1,11 @@ import paddle.v2 as paddle -import paddle.v2.framework.layers as layers -import paddle.v2.framework.nets as nets -import paddle.v2.framework.core as core -import paddle.v2.framework.optimizer as optimizer +import paddle.v2.fluid.layers as layers +import paddle.v2.fluid.nets as nets +import paddle.v2.fluid.core as core +import paddle.v2.fluid.optimizer as optimizer -from paddle.v2.framework.framework import Program, g_main_program, g_startup_program -from paddle.v2.framework.executor import Executor +from paddle.v2.fluid.framework import Program, g_main_program, g_startup_program +from paddle.v2.fluid.executor import Executor import numpy as np diff --git a/python/paddle/v2/fluid/tests/book/test_understand_sentiment_dynamic_lstm.py b/python/paddle/v2/fluid/tests/book/test_understand_sentiment_dynamic_lstm.py new file mode 100644 index 0000000000000000000000000000000000000000..6d507f4c8e39ba039603a5b7618e7a82d1dcb21b --- /dev/null +++ b/python/paddle/v2/fluid/tests/book/test_understand_sentiment_dynamic_lstm.py @@ -0,0 +1,110 @@ +import paddle.v2 as paddle +import paddle.v2.fluid.layers as layers +import paddle.v2.fluid.nets as nets +import paddle.v2.fluid.core as core +import paddle.v2.fluid.optimizer as optimizer + +from paddle.v2.fluid.framework import Program, g_main_program, g_startup_program +from paddle.v2.fluid.executor import Executor + +import numpy as np + + +def stacked_lstm_net(input_dim, + class_dim=2, + emb_dim=128, + hid_dim=512, + stacked_num=3): + assert stacked_num % 2 == 1 + data = layers.data(name="words", shape=[1], data_type="int64") + label = layers.data(name="label", shape=[1], data_type="int64") + + emb = layers.embedding(input=data, size=[input_dim, emb_dim]) + # add bias attr + + # TODO(qijun) linear act + fc1 = layers.fc(input=emb, size=hid_dim) + lstm1, cell1 = layers.dynamic_lstm(input=fc1, size=hid_dim) + + inputs = [fc1, lstm1] + + for i in range(2, stacked_num + 1): + fc = layers.fc(input=inputs, size=hid_dim) + lstm, cell = layers.dynamic_lstm( + input=fc, size=hid_dim, is_reverse=(i % 2) == 0) + inputs = [fc, lstm] + + fc_last = layers.sequence_pool(input=inputs[0], pool_type='max') + lstm_last = layers.sequence_pool(input=inputs[1], pool_type='max') + + prediction = layers.fc(input=[fc_last, lstm_last], + size=class_dim, + act='softmax') + cost = layers.cross_entropy(input=prediction, label=label) + avg_cost = layers.mean(x=cost) + adam_optimizer = optimizer.AdamOptimizer(learning_rate=0.002) + opts = adam_optimizer.minimize(avg_cost) + acc = layers.accuracy(input=prediction, label=label) + return avg_cost, acc + + +def to_lodtensor(data, place): + seq_lens = [len(seq) for seq in data] + cur_len = 0 + lod = [cur_len] + for l in seq_lens: + cur_len += l + lod.append(cur_len) + flattened_data = np.concatenate(data, axis=0).astype("int64") + flattened_data = flattened_data.reshape([len(flattened_data), 1]) + res = core.LoDTensor() + res.set(flattened_data, place) + res.set_lod([lod]) + return res + + +def main(): + BATCH_SIZE = 100 + PASS_NUM = 5 + + word_dict = paddle.dataset.imdb.word_dict() + print "load word dict successfully" + dict_dim = len(word_dict) + class_dim = 2 + + cost, acc = stacked_lstm_net(input_dim=dict_dim, class_dim=class_dim) + + train_data = paddle.batch( + paddle.reader.shuffle( + paddle.dataset.imdb.train(word_dict), buf_size=1000), + batch_size=BATCH_SIZE) + place = core.CPUPlace() + exe = Executor(place) + + exe.run(g_startup_program) + + for pass_id in xrange(PASS_NUM): + for data in train_data(): + tensor_words = to_lodtensor(map(lambda x: x[0], data), place) + + label = np.array(map(lambda x: x[1], data)).astype("int64") + label = label.reshape([BATCH_SIZE, 1]) + + tensor_label = core.LoDTensor() + tensor_label.set(label, place) + + outs = exe.run(g_main_program, + feed={"words": tensor_words, + "label": tensor_label}, + fetch_list=[cost, acc]) + cost_val = np.array(outs[0]) + acc_val = np.array(outs[1]) + + print("cost=" + str(cost_val) + " acc=" + str(acc_val)) + if cost_val < 1.0 and acc_val > 0.7: + exit(0) + exit(1) + + +if __name__ == '__main__': + main() diff --git a/python/paddle/v2/framework/tests/test_understand_sentiment_lstm.py b/python/paddle/v2/fluid/tests/book/test_understand_sentiment_lstm.py similarity index 92% rename from python/paddle/v2/framework/tests/test_understand_sentiment_lstm.py rename to python/paddle/v2/fluid/tests/book/test_understand_sentiment_lstm.py index 26cbd01bc04916e53554e6f70bee7bcf25d6371c..848dcce974a107402c33013e9f84211fd4979e21 100644 --- a/python/paddle/v2/framework/tests/test_understand_sentiment_lstm.py +++ b/python/paddle/v2/fluid/tests/book/test_understand_sentiment_lstm.py @@ -1,10 +1,10 @@ import paddle.v2 as paddle -import paddle.v2.framework.layers as layers -import paddle.v2.framework.core as core -import paddle.v2.framework.optimizer as optimizer +import paddle.v2.fluid.layers as layers +import paddle.v2.fluid.core as core +import paddle.v2.fluid.optimizer as optimizer -from paddle.v2.framework.framework import g_main_program, g_startup_program -from paddle.v2.framework.executor import Executor +from paddle.v2.fluid.framework import g_main_program, g_startup_program +from paddle.v2.fluid.executor import Executor import numpy as np diff --git a/python/paddle/v2/framework/tests/test_word2vec.py b/python/paddle/v2/fluid/tests/book/test_word2vec.py similarity index 95% rename from python/paddle/v2/framework/tests/test_word2vec.py rename to python/paddle/v2/fluid/tests/book/test_word2vec.py index cb9fc2ab62b56348db7a320f7d40d2f0a7bf9d21..054dbd5a3d090ba8a08e8f101de11c69ddd36d8a 100644 --- a/python/paddle/v2/framework/tests/test_word2vec.py +++ b/python/paddle/v2/fluid/tests/book/test_word2vec.py @@ -1,10 +1,10 @@ import paddle.v2 as paddle -import paddle.v2.framework.layers as layers -import paddle.v2.framework.core as core -import paddle.v2.framework.optimizer as optimizer +import paddle.v2.fluid.layers as layers +import paddle.v2.fluid.core as core +import paddle.v2.fluid.optimizer as optimizer -from paddle.v2.framework.framework import Program -from paddle.v2.framework.executor import Executor +from paddle.v2.fluid.framework import Program +from paddle.v2.fluid.executor import Executor import numpy as np diff --git a/python/paddle/v2/framework/tests/op_test.py b/python/paddle/v2/fluid/tests/op_test.py similarity index 98% rename from python/paddle/v2/framework/tests/op_test.py rename to python/paddle/v2/fluid/tests/op_test.py index 4a269341a4be6c1b72fde5166b7dd089236700b8..90269e308a31d2606b23d741ce0d0fa91a0a6aeb 100644 --- a/python/paddle/v2/framework/tests/op_test.py +++ b/python/paddle/v2/fluid/tests/op_test.py @@ -2,12 +2,12 @@ import unittest import numpy as np import random import itertools -import paddle.v2.framework.core as core +import paddle.v2.fluid.core as core import collections -from paddle.v2.framework.backward import append_backward_ops -from paddle.v2.framework.op import Operator -from paddle.v2.framework.executor import Executor -from paddle.v2.framework.framework import Program, OpProtoHolder +from paddle.v2.fluid.backward import append_backward_ops +from paddle.v2.fluid.op import Operator +from paddle.v2.fluid.executor import Executor +from paddle.v2.fluid.framework import Program, OpProtoHolder def randomize_probability(batch_size, class_num, dtype='float32'): diff --git a/python/paddle/v2/framework/tests/test_accuracy_op.py b/python/paddle/v2/fluid/tests/test_accuracy_op.py similarity index 100% rename from python/paddle/v2/framework/tests/test_accuracy_op.py rename to python/paddle/v2/fluid/tests/test_accuracy_op.py diff --git a/python/paddle/v2/framework/tests/test_activation_op.py b/python/paddle/v2/fluid/tests/test_activation_op.py similarity index 100% rename from python/paddle/v2/framework/tests/test_activation_op.py rename to python/paddle/v2/fluid/tests/test_activation_op.py diff --git a/python/paddle/v2/framework/tests/test_adadelta_op.py b/python/paddle/v2/fluid/tests/test_adadelta_op.py similarity index 100% rename from python/paddle/v2/framework/tests/test_adadelta_op.py rename to python/paddle/v2/fluid/tests/test_adadelta_op.py diff --git a/python/paddle/v2/framework/tests/test_adagrad_op.py b/python/paddle/v2/fluid/tests/test_adagrad_op.py similarity index 100% rename from python/paddle/v2/framework/tests/test_adagrad_op.py rename to python/paddle/v2/fluid/tests/test_adagrad_op.py diff --git a/python/paddle/v2/framework/tests/test_adam_op.py b/python/paddle/v2/fluid/tests/test_adam_op.py similarity index 100% rename from python/paddle/v2/framework/tests/test_adam_op.py rename to python/paddle/v2/fluid/tests/test_adam_op.py diff --git a/python/paddle/v2/framework/tests/test_adamax_op.py b/python/paddle/v2/fluid/tests/test_adamax_op.py similarity index 100% rename from python/paddle/v2/framework/tests/test_adamax_op.py rename to python/paddle/v2/fluid/tests/test_adamax_op.py diff --git a/python/paddle/v2/framework/tests/test_array_read_write_op.py b/python/paddle/v2/fluid/tests/test_array_read_write_op.py similarity index 91% rename from python/paddle/v2/framework/tests/test_array_read_write_op.py rename to python/paddle/v2/fluid/tests/test_array_read_write_op.py index 79e9938216e2abda5432e525804b0bcb9a655655..e019a4e15f0e25deaedf30911b44e576c8f89013 100644 --- a/python/paddle/v2/framework/tests/test_array_read_write_op.py +++ b/python/paddle/v2/fluid/tests/test_array_read_write_op.py @@ -1,9 +1,9 @@ import unittest -import paddle.v2.framework.core as core -import paddle.v2.framework.layers as layers -from paddle.v2.framework.executor import Executor -from paddle.v2.framework.backward import append_backward_ops -from paddle.v2.framework.framework import g_main_program +import paddle.v2.fluid.core as core +import paddle.v2.fluid.layers as layers +from paddle.v2.fluid.executor import Executor +from paddle.v2.fluid.backward import append_backward_ops +from paddle.v2.fluid.framework import g_main_program import numpy diff --git a/python/paddle/v2/fluid/tests/test_assign_op.py b/python/paddle/v2/fluid/tests/test_assign_op.py new file mode 100644 index 0000000000000000000000000000000000000000..1b0c145f1a69678b228bc70e4e4e273f5bcf9888 --- /dev/null +++ b/python/paddle/v2/fluid/tests/test_assign_op.py @@ -0,0 +1,21 @@ +import op_test +import numpy +import unittest + + +class TestAssignOp(op_test.OpTest): + def setUp(self): + self.op_type = "assign" + x = numpy.random.random(size=(100, 10)) + self.inputs = {'X': x} + self.outputs = {'Out': x} + + def test_forward(self): + self.check_output() + + def test_backward(self): + self.check_grad(['X'], 'Out') + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/v2/framework/tests/test_auc_op.py b/python/paddle/v2/fluid/tests/test_auc_op.py similarity index 100% rename from python/paddle/v2/framework/tests/test_auc_op.py rename to python/paddle/v2/fluid/tests/test_auc_op.py diff --git a/python/paddle/v2/framework/tests/test_batch_norm_op.py b/python/paddle/v2/fluid/tests/test_batch_norm_op.py similarity index 99% rename from python/paddle/v2/framework/tests/test_batch_norm_op.py rename to python/paddle/v2/fluid/tests/test_batch_norm_op.py index dee339f43c2ee33fc8a691e0915bddf2c1679285..71f9599e0de83c86808f7e62547f80d3d50ffc7d 100644 --- a/python/paddle/v2/framework/tests/test_batch_norm_op.py +++ b/python/paddle/v2/fluid/tests/test_batch_norm_op.py @@ -1,8 +1,8 @@ import unittest import numpy as np from op_test import OpTest -import paddle.v2.framework.core as core -from paddle.v2.framework.op import Operator +import paddle.v2.fluid.core as core +from paddle.v2.fluid.op import Operator def grad_var_name(var_name): diff --git a/python/paddle/v2/fluid/tests/test_bilinear_tensor_product_op.py b/python/paddle/v2/fluid/tests/test_bilinear_tensor_product_op.py new file mode 100644 index 0000000000000000000000000000000000000000..080ca43b8269e0f6a9f4d0ce3973f4d4a07a8e2a --- /dev/null +++ b/python/paddle/v2/fluid/tests/test_bilinear_tensor_product_op.py @@ -0,0 +1,37 @@ +import unittest +import numpy as np +from op_test import OpTest + + +class TestBilinearTensorProductOp(OpTest): + def setUp(self): + self.op_type = "bilinear_tensor_product" + batch_size = 6 + size0 = 3 + size1 = 4 + size2 = 5 + a = np.random.random((batch_size, size0)).astype("float32") + b = np.random.random((batch_size, size1)).astype("float32") + w = np.random.random((size2, size0, size1)).astype("float32") + bias = np.random.random((1, size2)).astype("float32") + output = np.zeros((batch_size, size2)).astype("float32") + for i in range(size2): + w_i = w[i, :, :] + output[:, i] = np.sum(np.matmul(a, w_i) * b, axis=1) + self.inputs = { + 'X': a, + 'Y': b, + 'Weight': w, + 'Bias': bias, + } + self.outputs = {'Out': output + bias} + + def test_check_output(self): + self.check_output() + + def test_check_grad_normal(self): + self.check_grad(['X', 'Y', 'Weight', 'Bias'], 'Out') + + +if __name__ == "__main__": + unittest.main() diff --git a/python/paddle/v2/framework/tests/test_cast_op.py b/python/paddle/v2/fluid/tests/test_cast_op.py similarity index 93% rename from python/paddle/v2/framework/tests/test_cast_op.py rename to python/paddle/v2/fluid/tests/test_cast_op.py index 52ee71a8a4058a1367d9e493e02d8f2469ccfc9f..0c4b6310652e84d3dd7f281a8b98ae0435072afb 100644 --- a/python/paddle/v2/framework/tests/test_cast_op.py +++ b/python/paddle/v2/fluid/tests/test_cast_op.py @@ -1,7 +1,7 @@ import op_test import unittest import numpy as np -import paddle.v2.framework.core as core +import paddle.v2.fluid.core as core class TestCastOp(op_test.OpTest): diff --git a/python/paddle/v2/framework/tests/test_chunk_eval_op.py b/python/paddle/v2/fluid/tests/test_chunk_eval_op.py similarity index 100% rename from python/paddle/v2/framework/tests/test_chunk_eval_op.py rename to python/paddle/v2/fluid/tests/test_chunk_eval_op.py diff --git a/python/paddle/v2/framework/tests/test_clip_by_norm_op.py b/python/paddle/v2/fluid/tests/test_clip_by_norm_op.py similarity index 100% rename from python/paddle/v2/framework/tests/test_clip_by_norm_op.py rename to python/paddle/v2/fluid/tests/test_clip_by_norm_op.py diff --git a/python/paddle/v2/framework/tests/test_clip_op.py b/python/paddle/v2/fluid/tests/test_clip_op.py similarity index 100% rename from python/paddle/v2/framework/tests/test_clip_op.py rename to python/paddle/v2/fluid/tests/test_clip_op.py diff --git a/python/paddle/v2/framework/tests/test_compare_op.py b/python/paddle/v2/fluid/tests/test_compare_op.py similarity index 100% rename from python/paddle/v2/framework/tests/test_compare_op.py rename to python/paddle/v2/fluid/tests/test_compare_op.py diff --git a/python/paddle/v2/framework/tests/test_concat_op.py b/python/paddle/v2/fluid/tests/test_concat_op.py similarity index 100% rename from python/paddle/v2/framework/tests/test_concat_op.py rename to python/paddle/v2/fluid/tests/test_concat_op.py diff --git a/python/paddle/v2/framework/tests/test_cond_op.py b/python/paddle/v2/fluid/tests/test_cond_op.py similarity index 97% rename from python/paddle/v2/framework/tests/test_cond_op.py rename to python/paddle/v2/fluid/tests/test_cond_op.py index 09a3f5dc97c342fc61cd407bb338c1696e8d6c76..9d1df44b9065f8101e90b87815660f8c0818645f 100644 --- a/python/paddle/v2/framework/tests/test_cond_op.py +++ b/python/paddle/v2/fluid/tests/test_cond_op.py @@ -1,8 +1,8 @@ import logging -import paddle.v2.framework.core as core +import paddle.v2.fluid.core as core import unittest import numpy as np -from paddle.v2.framework.op import Operator, CondOp +from paddle.v2.fluid.op import Operator, CondOp class PySimpleCond(object): diff --git a/python/paddle/v2/fluid/tests/test_conditional_block.py b/python/paddle/v2/fluid/tests/test_conditional_block.py new file mode 100644 index 0000000000000000000000000000000000000000..293803f004a1513611fba30634d5552e1da84fef --- /dev/null +++ b/python/paddle/v2/fluid/tests/test_conditional_block.py @@ -0,0 +1,40 @@ +import unittest +import paddle.v2.fluid.layers as layers +import paddle.v2.fluid.core as core +from paddle.v2.fluid.framework import g_startup_program, g_main_program +from paddle.v2.fluid.executor import Executor +from paddle.v2.fluid.backward import append_backward_ops +import numpy + + +class ConditionalBlock(unittest.TestCase): + def test_forward(self): + data = layers.data(name='X', shape=[1], data_type='float32') + data.stop_gradient = False + cond = layers.ConditionalBlock(inputs=[data]) + out = layers.create_tensor(dtype='float32') + with cond.block(): + hidden = layers.fc(input=data, size=10) + layers.assign(hidden, out) + + cpu = core.CPUPlace() + exe = Executor(cpu) + exe.run(g_startup_program) + + x = core.LoDTensor() + x.set(numpy.random.random(size=(10, 1)).astype('float32'), cpu) + + outs = map(numpy.array, exe.run(feed={'X': x}, fetch_list=[out]))[0] + print outs + loss = layers.mean(x=out) + append_backward_ops(loss=loss) + outs = map(numpy.array, + exe.run(feed={'X': x}, + fetch_list=[ + g_main_program.block(0).var(data.name + "@GRAD") + ]))[0] + print outs + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/v2/framework/tests/test_conv2d_op.py b/python/paddle/v2/fluid/tests/test_conv2d_op.py similarity index 100% rename from python/paddle/v2/framework/tests/test_conv2d_op.py rename to python/paddle/v2/fluid/tests/test_conv2d_op.py diff --git a/python/paddle/v2/framework/tests/test_conv2d_transpose_op.py b/python/paddle/v2/fluid/tests/test_conv2d_transpose_op.py similarity index 100% rename from python/paddle/v2/framework/tests/test_conv2d_transpose_op.py rename to python/paddle/v2/fluid/tests/test_conv2d_transpose_op.py diff --git a/python/paddle/v2/framework/tests/test_conv3d_op.py b/python/paddle/v2/fluid/tests/test_conv3d_op.py similarity index 100% rename from python/paddle/v2/framework/tests/test_conv3d_op.py rename to python/paddle/v2/fluid/tests/test_conv3d_op.py diff --git a/python/paddle/v2/framework/tests/test_conv3d_transpose_op.py b/python/paddle/v2/fluid/tests/test_conv3d_transpose_op.py similarity index 100% rename from python/paddle/v2/framework/tests/test_conv3d_transpose_op.py rename to python/paddle/v2/fluid/tests/test_conv3d_transpose_op.py diff --git a/python/paddle/v2/framework/tests/test_conv_shift_op.py b/python/paddle/v2/fluid/tests/test_conv_shift_op.py similarity index 100% rename from python/paddle/v2/framework/tests/test_conv_shift_op.py rename to python/paddle/v2/fluid/tests/test_conv_shift_op.py diff --git a/python/paddle/v2/framework/tests/test_cos_sim_op.py b/python/paddle/v2/fluid/tests/test_cos_sim_op.py similarity index 100% rename from python/paddle/v2/framework/tests/test_cos_sim_op.py rename to python/paddle/v2/fluid/tests/test_cos_sim_op.py diff --git a/python/paddle/v2/fluid/tests/test_create_op_doc_string.py b/python/paddle/v2/fluid/tests/test_create_op_doc_string.py new file mode 100644 index 0000000000000000000000000000000000000000..42b6f7a3616bbce53a8cae68a5fc1eda411a7422 --- /dev/null +++ b/python/paddle/v2/fluid/tests/test_create_op_doc_string.py @@ -0,0 +1,11 @@ +import unittest +import paddle.v2.fluid.layers as layers + + +class TestDocString(unittest.TestCase): + def test_layer_doc_string(self): + print layers.dropout.__doc__ + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/v2/framework/tests/test_crf_decoding_op.py b/python/paddle/v2/fluid/tests/test_crf_decoding_op.py similarity index 100% rename from python/paddle/v2/framework/tests/test_crf_decoding_op.py rename to python/paddle/v2/fluid/tests/test_crf_decoding_op.py diff --git a/python/paddle/v2/framework/tests/test_crop_op.py b/python/paddle/v2/fluid/tests/test_crop_op.py similarity index 100% rename from python/paddle/v2/framework/tests/test_crop_op.py rename to python/paddle/v2/fluid/tests/test_crop_op.py diff --git a/python/paddle/v2/framework/tests/test_cross_entropy_op.py b/python/paddle/v2/fluid/tests/test_cross_entropy_op.py similarity index 100% rename from python/paddle/v2/framework/tests/test_cross_entropy_op.py rename to python/paddle/v2/fluid/tests/test_cross_entropy_op.py diff --git a/python/paddle/v2/framework/tests/test_decayed_adagrad_op.py b/python/paddle/v2/fluid/tests/test_decayed_adagrad_op.py similarity index 100% rename from python/paddle/v2/framework/tests/test_decayed_adagrad_op.py rename to python/paddle/v2/fluid/tests/test_decayed_adagrad_op.py diff --git a/python/paddle/v2/framework/tests/test_default_scope_funcs.py b/python/paddle/v2/fluid/tests/test_default_scope_funcs.py similarity index 94% rename from python/paddle/v2/framework/tests/test_default_scope_funcs.py rename to python/paddle/v2/fluid/tests/test_default_scope_funcs.py index 09a9850d054e3d7e6bf6db363fc577bdff8e9f43..738e69529ea447e87516d5e0efc098910b966ded 100644 --- a/python/paddle/v2/framework/tests/test_default_scope_funcs.py +++ b/python/paddle/v2/fluid/tests/test_default_scope_funcs.py @@ -1,4 +1,4 @@ -from paddle.v2.framework.default_scope_funcs import * +from paddle.v2.fluid.default_scope_funcs import * import unittest diff --git a/python/paddle/v2/framework/tests/test_dropout_op.py b/python/paddle/v2/fluid/tests/test_dropout_op.py similarity index 100% rename from python/paddle/v2/framework/tests/test_dropout_op.py rename to python/paddle/v2/fluid/tests/test_dropout_op.py diff --git a/python/paddle/v2/framework/tests/test_dynamic_recurrent_op.py b/python/paddle/v2/fluid/tests/test_dynamic_recurrent_op.py similarity index 98% rename from python/paddle/v2/framework/tests/test_dynamic_recurrent_op.py rename to python/paddle/v2/fluid/tests/test_dynamic_recurrent_op.py index 70af9dbc49f5ff3222cf3d549a110931140b43c4..c2d8b48ea944ae40a451492b8e9fad38dda0835c 100644 --- a/python/paddle/v2/framework/tests/test_dynamic_recurrent_op.py +++ b/python/paddle/v2/fluid/tests/test_dynamic_recurrent_op.py @@ -1,7 +1,7 @@ import logging -import paddle.v2.framework.core as core +import paddle.v2.fluid.core as core import unittest -from paddle.v2.framework.op import Operator, DynamicRecurrentOp +from paddle.v2.fluid.op import Operator, DynamicRecurrentOp import numpy as np # for siplicity, just one level LoD diff --git a/python/paddle/v2/framework/tests/test_elementwise_add_op.py b/python/paddle/v2/fluid/tests/test_elementwise_add_op.py similarity index 100% rename from python/paddle/v2/framework/tests/test_elementwise_add_op.py rename to python/paddle/v2/fluid/tests/test_elementwise_add_op.py diff --git a/python/paddle/v2/framework/tests/test_elementwise_div_op.py b/python/paddle/v2/fluid/tests/test_elementwise_div_op.py similarity index 100% rename from python/paddle/v2/framework/tests/test_elementwise_div_op.py rename to python/paddle/v2/fluid/tests/test_elementwise_div_op.py diff --git a/python/paddle/v2/framework/tests/test_elementwise_mul_op.py b/python/paddle/v2/fluid/tests/test_elementwise_mul_op.py similarity index 100% rename from python/paddle/v2/framework/tests/test_elementwise_mul_op.py rename to python/paddle/v2/fluid/tests/test_elementwise_mul_op.py diff --git a/python/paddle/v2/framework/tests/test_elementwise_sub_op.py b/python/paddle/v2/fluid/tests/test_elementwise_sub_op.py similarity index 100% rename from python/paddle/v2/framework/tests/test_elementwise_sub_op.py rename to python/paddle/v2/fluid/tests/test_elementwise_sub_op.py diff --git a/python/paddle/v2/framework/tests/test_evaluator.py b/python/paddle/v2/fluid/tests/test_evaluator.py similarity index 92% rename from python/paddle/v2/framework/tests/test_evaluator.py rename to python/paddle/v2/fluid/tests/test_evaluator.py index 37dbfbc06bcd0da7e11924a048679c74a1cfb373..1d51205b703f83ec32c2e948394e5d3f5c87d1d9 100644 --- a/python/paddle/v2/framework/tests/test_evaluator.py +++ b/python/paddle/v2/fluid/tests/test_evaluator.py @@ -1,6 +1,6 @@ -from paddle.v2.framework.evaluator import Evaluator -from paddle.v2.framework.op import Operator -import paddle.v2.framework.core as core +from paddle.v2.fluid.evaluator import Evaluator +from paddle.v2.fluid.op import Operator +import paddle.v2.fluid.core as core import unittest import op_test import numpy as np diff --git a/python/paddle/v2/framework/tests/test_exception.py b/python/paddle/v2/fluid/tests/test_exception.py similarity index 89% rename from python/paddle/v2/framework/tests/test_exception.py rename to python/paddle/v2/fluid/tests/test_exception.py index 5ae048817cfcc1ec85e0d0e0c5db749da4521012..b871f40c4a07ae2db7559e5a0f15664b21e94402 100644 --- a/python/paddle/v2/framework/tests/test_exception.py +++ b/python/paddle/v2/fluid/tests/test_exception.py @@ -1,4 +1,4 @@ -import paddle.v2.framework.core as core +import paddle.v2.fluid.core as core import unittest diff --git a/python/paddle/v2/framework/tests/test_executor_and_mul.py b/python/paddle/v2/fluid/tests/test_executor_and_mul.py similarity index 83% rename from python/paddle/v2/framework/tests/test_executor_and_mul.py rename to python/paddle/v2/fluid/tests/test_executor_and_mul.py index c885cfbebd4b665ddf50adbc43673942dc949a0b..709250d0c86dde84ac22c37d8e2385ca4a80a40a 100644 --- a/python/paddle/v2/framework/tests/test_executor_and_mul.py +++ b/python/paddle/v2/fluid/tests/test_executor_and_mul.py @@ -1,8 +1,8 @@ import unittest -from paddle.v2.framework.layers import mul, data -import paddle.v2.framework.core as core -from paddle.v2.framework.executor import Executor -from paddle.v2.framework.framework import g_main_program +from paddle.v2.fluid.layers import mul, data +import paddle.v2.fluid.core as core +from paddle.v2.fluid.executor import Executor +from paddle.v2.fluid.framework import g_main_program import numpy diff --git a/python/paddle/v2/framework/tests/test_expand_op.py b/python/paddle/v2/fluid/tests/test_expand_op.py similarity index 100% rename from python/paddle/v2/framework/tests/test_expand_op.py rename to python/paddle/v2/fluid/tests/test_expand_op.py diff --git a/python/paddle/v2/framework/tests/test_feed_fetch_method.py b/python/paddle/v2/fluid/tests/test_feed_fetch_method.py similarity index 95% rename from python/paddle/v2/framework/tests/test_feed_fetch_method.py rename to python/paddle/v2/fluid/tests/test_feed_fetch_method.py index fbd659ece0188140e197982ea818d7c3897daf4e..178c85b0dd50df61b1fd35ef5d53ebbf39445cb4 100644 --- a/python/paddle/v2/framework/tests/test_feed_fetch_method.py +++ b/python/paddle/v2/fluid/tests/test_feed_fetch_method.py @@ -1,4 +1,4 @@ -import paddle.v2.framework.core as core +import paddle.v2.fluid.core as core import unittest import numpy as np diff --git a/python/paddle/v2/framework/tests/test_fill_constant_batch_size_like_op.py b/python/paddle/v2/fluid/tests/test_fill_constant_batch_size_like_op.py similarity index 100% rename from python/paddle/v2/framework/tests/test_fill_constant_batch_size_like_op.py rename to python/paddle/v2/fluid/tests/test_fill_constant_batch_size_like_op.py diff --git a/python/paddle/v2/framework/tests/test_fill_constant_op.py b/python/paddle/v2/fluid/tests/test_fill_constant_op.py similarity index 100% rename from python/paddle/v2/framework/tests/test_fill_constant_op.py rename to python/paddle/v2/fluid/tests/test_fill_constant_op.py diff --git a/python/paddle/v2/framework/tests/test_fill_zeros_like_op.py b/python/paddle/v2/fluid/tests/test_fill_zeros_like_op.py similarity index 100% rename from python/paddle/v2/framework/tests/test_fill_zeros_like_op.py rename to python/paddle/v2/fluid/tests/test_fill_zeros_like_op.py diff --git a/python/paddle/v2/framework/tests/test_framework_debug_str.py b/python/paddle/v2/fluid/tests/test_framework_debug_str.py similarity index 85% rename from python/paddle/v2/framework/tests/test_framework_debug_str.py rename to python/paddle/v2/fluid/tests/test_framework_debug_str.py index 8fdf8f91171ee334fac93c05a4d49056fa0e803d..a4cbabdb36362c4ca14b76f366b648d6dbdbf7b3 100644 --- a/python/paddle/v2/framework/tests/test_framework_debug_str.py +++ b/python/paddle/v2/fluid/tests/test_framework_debug_str.py @@ -1,5 +1,5 @@ import unittest -from paddle.v2.framework.framework import Program +from paddle.v2.fluid.framework import Program class TestDebugStringFramework(unittest.TestCase): diff --git a/python/paddle/v2/framework/tests/test_gather_op.py b/python/paddle/v2/fluid/tests/test_gather_op.py similarity index 100% rename from python/paddle/v2/framework/tests/test_gather_op.py rename to python/paddle/v2/fluid/tests/test_gather_op.py diff --git a/python/paddle/v2/framework/tests/test_gaussian_random_op.py b/python/paddle/v2/fluid/tests/test_gaussian_random_op.py similarity index 91% rename from python/paddle/v2/framework/tests/test_gaussian_random_op.py rename to python/paddle/v2/fluid/tests/test_gaussian_random_op.py index 0dc7e091a5c8dd046f36cab7f79a15b2281cdd90..627ab4e23562f14538d85f2e21edeb7d72d940bb 100644 --- a/python/paddle/v2/framework/tests/test_gaussian_random_op.py +++ b/python/paddle/v2/fluid/tests/test_gaussian_random_op.py @@ -1,6 +1,6 @@ import unittest -import paddle.v2.framework.core as core -from paddle.v2.framework.op import Operator +import paddle.v2.fluid.core as core +from paddle.v2.fluid.op import Operator import numpy diff --git a/python/paddle/v2/framework/tests/test_gru_op.py b/python/paddle/v2/fluid/tests/test_gru_op.py similarity index 100% rename from python/paddle/v2/framework/tests/test_gru_op.py rename to python/paddle/v2/fluid/tests/test_gru_op.py diff --git a/python/paddle/v2/framework/tests/test_gru_unit_op.py b/python/paddle/v2/fluid/tests/test_gru_unit_op.py similarity index 100% rename from python/paddle/v2/framework/tests/test_gru_unit_op.py rename to python/paddle/v2/fluid/tests/test_gru_unit_op.py diff --git a/python/paddle/v2/framework/tests/test_huber_loss_op.py b/python/paddle/v2/fluid/tests/test_huber_loss_op.py similarity index 100% rename from python/paddle/v2/framework/tests/test_huber_loss_op.py rename to python/paddle/v2/fluid/tests/test_huber_loss_op.py diff --git a/python/paddle/v2/framework/tests/test_image_classification_layer.py b/python/paddle/v2/fluid/tests/test_image_classification_layer.py similarity index 95% rename from python/paddle/v2/framework/tests/test_image_classification_layer.py rename to python/paddle/v2/fluid/tests/test_image_classification_layer.py index b1a267ec32b1c937b946bee82e41b846ebbf1288..bf5444107fa1609e67b09823b82e5fb92234b0a4 100644 --- a/python/paddle/v2/framework/tests/test_image_classification_layer.py +++ b/python/paddle/v2/fluid/tests/test_image_classification_layer.py @@ -1,8 +1,8 @@ import unittest -import paddle.v2.framework.layers as layers -import paddle.v2.framework.nets as nets -from paddle.v2.framework.framework import Program +import paddle.v2.fluid.layers as layers +import paddle.v2.fluid.nets as nets +from paddle.v2.fluid.framework import Program def conv_block(input, diff --git a/python/paddle/v2/framework/tests/test_infer_shape.py b/python/paddle/v2/fluid/tests/test_infer_shape.py similarity index 98% rename from python/paddle/v2/framework/tests/test_infer_shape.py rename to python/paddle/v2/fluid/tests/test_infer_shape.py index 2b2995f5e22d8c50d67498688c069252bf6e02fc..9f6695ce02de749178046fbb613a58ba591b3dbc 100644 --- a/python/paddle/v2/framework/tests/test_infer_shape.py +++ b/python/paddle/v2/fluid/tests/test_infer_shape.py @@ -1,6 +1,6 @@ import unittest -import paddle.v2.framework.core as core +import paddle.v2.fluid.core as core class TestInferShape(unittest.TestCase): diff --git a/python/paddle/v2/framework/tests/test_inference_model_io.py b/python/paddle/v2/fluid/tests/test_inference_model_io.py similarity index 90% rename from python/paddle/v2/framework/tests/test_inference_model_io.py rename to python/paddle/v2/fluid/tests/test_inference_model_io.py index 48984f86a1864baade58aeb8e35c6065cc2a4bbb..98b95713b73e8eba93bd6a58eaaed603cfae7952 100644 --- a/python/paddle/v2/framework/tests/test_inference_model_io.py +++ b/python/paddle/v2/fluid/tests/test_inference_model_io.py @@ -1,11 +1,11 @@ import paddle.v2 as paddle -import paddle.v2.framework.layers as layers -import paddle.v2.framework.core as core -import paddle.v2.framework.optimizer as optimizer +import paddle.v2.fluid.layers as layers +import paddle.v2.fluid.core as core +import paddle.v2.fluid.optimizer as optimizer -from paddle.v2.framework.framework import Program -from paddle.v2.framework.io import save_inference_model, load_inference_model -import paddle.v2.framework.executor as executor +from paddle.v2.fluid.framework import Program +from paddle.v2.fluid.io import save_inference_model, load_inference_model +import paddle.v2.fluid.executor as executor import unittest import numpy as np diff --git a/python/paddle/v2/framework/tests/test_initializer.py b/python/paddle/v2/fluid/tests/test_initializer.py similarity index 98% rename from python/paddle/v2/framework/tests/test_initializer.py rename to python/paddle/v2/fluid/tests/test_initializer.py index bd4d2e39d770aebb7468d516f463533185ea8680..f2eb79b209627f5814847db6d96c0a17300d9b5a 100644 --- a/python/paddle/v2/framework/tests/test_initializer.py +++ b/python/paddle/v2/fluid/tests/test_initializer.py @@ -1,8 +1,8 @@ import numpy as np import unittest -import paddle.v2.framework.framework as framework -import paddle.v2.framework.initializer as initializer +import paddle.v2.fluid.framework as framework +import paddle.v2.fluid.initializer as initializer DELTA = 0.00001 diff --git a/python/paddle/v2/framework/tests/test_l1_norm_op.py b/python/paddle/v2/fluid/tests/test_l1_norm_op.py similarity index 100% rename from python/paddle/v2/framework/tests/test_l1_norm_op.py rename to python/paddle/v2/fluid/tests/test_l1_norm_op.py diff --git a/python/paddle/v2/framework/tests/test_layers.py b/python/paddle/v2/fluid/tests/test_layers.py similarity index 97% rename from python/paddle/v2/framework/tests/test_layers.py rename to python/paddle/v2/fluid/tests/test_layers.py index b42af5ea45d54723e96279f9e16f82a1d52ad236..3d18e7ce3a4dc6c6b917a1000de39fca71f6ac18 100644 --- a/python/paddle/v2/framework/tests/test_layers.py +++ b/python/paddle/v2/fluid/tests/test_layers.py @@ -1,7 +1,7 @@ -import paddle.v2.framework.layers as layers -import paddle.v2.framework.nets as nets -from paddle.v2.framework.framework import Program -import paddle.v2.framework.core as core +import paddle.v2.fluid.layers as layers +import paddle.v2.fluid.nets as nets +from paddle.v2.fluid.framework import Program +import paddle.v2.fluid.core as core import unittest diff --git a/python/paddle/v2/framework/tests/test_linear_chain_crf_op.py b/python/paddle/v2/fluid/tests/test_linear_chain_crf_op.py similarity index 100% rename from python/paddle/v2/framework/tests/test_linear_chain_crf_op.py rename to python/paddle/v2/fluid/tests/test_linear_chain_crf_op.py diff --git a/python/paddle/v2/framework/tests/test_lod_array_length_op.py b/python/paddle/v2/fluid/tests/test_lod_array_length_op.py similarity index 79% rename from python/paddle/v2/framework/tests/test_lod_array_length_op.py rename to python/paddle/v2/fluid/tests/test_lod_array_length_op.py index af2b4d705e7ec121bd5f1350f0a642ae8c44bf1e..a01ae83772185df218b8c453557dc0cac719673b 100644 --- a/python/paddle/v2/framework/tests/test_lod_array_length_op.py +++ b/python/paddle/v2/fluid/tests/test_lod_array_length_op.py @@ -1,7 +1,7 @@ import unittest -import paddle.v2.framework.layers as layers -from paddle.v2.framework.executor import Executor -import paddle.v2.framework.core as core +import paddle.v2.fluid.layers as layers +from paddle.v2.fluid.executor import Executor +import paddle.v2.fluid.core as core import numpy diff --git a/python/paddle/v2/framework/tests/test_lod_rank_table.py b/python/paddle/v2/fluid/tests/test_lod_rank_table.py similarity index 78% rename from python/paddle/v2/framework/tests/test_lod_rank_table.py rename to python/paddle/v2/fluid/tests/test_lod_rank_table.py index 408145c10f46e24e8a54b05b4f3afa9231b6ffd6..bbc11930b9e804c2769cc590c298c6e90dc36ca6 100644 --- a/python/paddle/v2/framework/tests/test_lod_rank_table.py +++ b/python/paddle/v2/fluid/tests/test_lod_rank_table.py @@ -1,7 +1,7 @@ -from paddle.v2.framework.layers import lod_rank_table, data -from paddle.v2.framework.executor import Executor -from paddle.v2.framework.framework import g_main_program -import paddle.v2.framework.core as core +from paddle.v2.fluid.layers import lod_rank_table, data +from paddle.v2.fluid.executor import Executor +from paddle.v2.fluid.framework import g_main_program +import paddle.v2.fluid.core as core import numpy import unittest diff --git a/python/paddle/v2/fluid/tests/test_lod_reset_op.py b/python/paddle/v2/fluid/tests/test_lod_reset_op.py new file mode 100644 index 0000000000000000000000000000000000000000..652ccecfa443fc95f08f52df766709cb550f4049 --- /dev/null +++ b/python/paddle/v2/fluid/tests/test_lod_reset_op.py @@ -0,0 +1,64 @@ +import unittest +import numpy as np +from op_test import OpTest + + +class TestLodResetOpByAttr(OpTest): + def setUp(self): + self.op_type = "lod_reset" + x = np.random.random((10, 20)).astype("float32") + lod = [[0, 3, 5, 10]] + target_lod_0 = [0, 7, 10] + self.inputs = {'X': (x, lod)} + self.attrs = {'target_lod': target_lod_0} + self.outputs = {'Out': (x, [target_lod_0])} + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad(["X"], "Out") + + +class TestLodResetOpByInput(OpTest): + def setUp(self): + self.op_type = "lod_reset" + x = np.random.random((10, 20)).astype("float32") + lod = [[0, 3, 5, 10]] + target_lod_0 = [0, 4, 7, 10] + self.inputs = { + 'X': (x, lod), + 'TargetLoD': np.array([target_lod_0]).astype('int32') + } + self.outputs = {'Out': (x, [target_lod_0])} + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad(["X"], "Out", no_grad_set=set("TargetLoD")) + + +class TestLodResetOpBoth(OpTest): + def setUp(self): + self.op_type = "lod_reset" + x = np.random.random((10, 20)).astype("float32") + lod = [[0, 3, 5, 10]] + target_lod_0_attr = [0, 7, 10] + target_lod_0_in = [0, 4, 7, 10] + self.inputs = { + 'X': (x, lod), + 'TargetLoD': np.array(target_lod_0_in).astype('int32') + } + self.attrs = {'target_lod': target_lod_0_attr} + self.outputs = {'Out': (x, [target_lod_0_in])} + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad(["X"], "Out", no_grad_set=set("TargetLoD")) + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/v2/framework/tests/test_lod_tensor_array.py b/python/paddle/v2/fluid/tests/test_lod_tensor_array.py similarity index 96% rename from python/paddle/v2/framework/tests/test_lod_tensor_array.py rename to python/paddle/v2/fluid/tests/test_lod_tensor_array.py index a433bcf622b14a1d2d33b5b98d555e1a21e4b9e8..d6d3e23fd8898a62528d63795d1bff1b72752477 100644 --- a/python/paddle/v2/framework/tests/test_lod_tensor_array.py +++ b/python/paddle/v2/fluid/tests/test_lod_tensor_array.py @@ -1,5 +1,5 @@ import unittest -import paddle.v2.framework.core as core +import paddle.v2.fluid.core as core import numpy diff --git a/python/paddle/v2/framework/tests/test_lod_tensor_array_ops.py b/python/paddle/v2/fluid/tests/test_lod_tensor_array_ops.py similarity index 96% rename from python/paddle/v2/framework/tests/test_lod_tensor_array_ops.py rename to python/paddle/v2/fluid/tests/test_lod_tensor_array_ops.py index e9713666b3f64d7a39afadab7da6b22f149b8cf8..b18cb6b49fa41f26e1b6de1128690507c5a2f099 100644 --- a/python/paddle/v2/framework/tests/test_lod_tensor_array_ops.py +++ b/python/paddle/v2/fluid/tests/test_lod_tensor_array_ops.py @@ -1,10 +1,10 @@ import unittest -import paddle.v2.framework.core as core +import paddle.v2.fluid.core as core import numpy -import paddle.v2.framework.layers as layers -from paddle.v2.framework.framework import Program -from paddle.v2.framework.executor import Executor -from paddle.v2.framework.backward import append_backward_ops +import paddle.v2.fluid.layers as layers +from paddle.v2.fluid.framework import Program +from paddle.v2.fluid.executor import Executor +from paddle.v2.fluid.backward import append_backward_ops class TestCPULoDTensorArrayOps(unittest.TestCase): diff --git a/python/paddle/v2/framework/tests/test_lookup_table_op.py b/python/paddle/v2/fluid/tests/test_lookup_table_op.py similarity index 100% rename from python/paddle/v2/framework/tests/test_lookup_table_op.py rename to python/paddle/v2/fluid/tests/test_lookup_table_op.py diff --git a/python/paddle/v2/framework/tests/test_lrn_op.py b/python/paddle/v2/fluid/tests/test_lrn_op.py similarity index 100% rename from python/paddle/v2/framework/tests/test_lrn_op.py rename to python/paddle/v2/fluid/tests/test_lrn_op.py diff --git a/python/paddle/v2/framework/tests/test_lstm_op.py b/python/paddle/v2/fluid/tests/test_lstm_op.py similarity index 100% rename from python/paddle/v2/framework/tests/test_lstm_op.py rename to python/paddle/v2/fluid/tests/test_lstm_op.py diff --git a/python/paddle/v2/framework/tests/test_lstm_unit_op.py b/python/paddle/v2/fluid/tests/test_lstm_unit_op.py similarity index 100% rename from python/paddle/v2/framework/tests/test_lstm_unit_op.py rename to python/paddle/v2/fluid/tests/test_lstm_unit_op.py diff --git a/python/paddle/v2/framework/tests/test_margin_rank_loss_op.py b/python/paddle/v2/fluid/tests/test_margin_rank_loss_op.py similarity index 100% rename from python/paddle/v2/framework/tests/test_margin_rank_loss_op.py rename to python/paddle/v2/fluid/tests/test_margin_rank_loss_op.py diff --git a/python/paddle/v2/framework/tests/test_matmul_op.py b/python/paddle/v2/fluid/tests/test_matmul_op.py similarity index 100% rename from python/paddle/v2/framework/tests/test_matmul_op.py rename to python/paddle/v2/fluid/tests/test_matmul_op.py diff --git a/python/paddle/v2/framework/tests/test_mean_op.py b/python/paddle/v2/fluid/tests/test_mean_op.py similarity index 100% rename from python/paddle/v2/framework/tests/test_mean_op.py rename to python/paddle/v2/fluid/tests/test_mean_op.py diff --git a/python/paddle/v2/framework/tests/test_minus_op.py b/python/paddle/v2/fluid/tests/test_minus_op.py similarity index 100% rename from python/paddle/v2/framework/tests/test_minus_op.py rename to python/paddle/v2/fluid/tests/test_minus_op.py diff --git a/python/paddle/v2/framework/tests/test_modified_huber_loss_op.py b/python/paddle/v2/fluid/tests/test_modified_huber_loss_op.py similarity index 100% rename from python/paddle/v2/framework/tests/test_modified_huber_loss_op.py rename to python/paddle/v2/fluid/tests/test_modified_huber_loss_op.py diff --git a/python/paddle/v2/framework/tests/test_momentum_op.py b/python/paddle/v2/fluid/tests/test_momentum_op.py similarity index 100% rename from python/paddle/v2/framework/tests/test_momentum_op.py rename to python/paddle/v2/fluid/tests/test_momentum_op.py diff --git a/python/paddle/v2/framework/tests/test_mul_op.py b/python/paddle/v2/fluid/tests/test_mul_op.py similarity index 100% rename from python/paddle/v2/framework/tests/test_mul_op.py rename to python/paddle/v2/fluid/tests/test_mul_op.py diff --git a/python/paddle/v2/framework/tests/test_multiplex_op.py b/python/paddle/v2/fluid/tests/test_multiplex_op.py similarity index 100% rename from python/paddle/v2/framework/tests/test_multiplex_op.py rename to python/paddle/v2/fluid/tests/test_multiplex_op.py diff --git a/python/paddle/v2/framework/tests/test_nccl_init_op.py b/python/paddle/v2/fluid/tests/test_nccl_init_op.py similarity index 91% rename from python/paddle/v2/framework/tests/test_nccl_init_op.py rename to python/paddle/v2/fluid/tests/test_nccl_init_op.py index 054909fdf5517a68c6a07971c65a1d5bdc20d4fa..a536800ccd81fdc2f3b7c8320cede4f8ecf3a8cb 100644 --- a/python/paddle/v2/framework/tests/test_nccl_init_op.py +++ b/python/paddle/v2/fluid/tests/test_nccl_init_op.py @@ -1,8 +1,8 @@ import unittest, os import numpy as np import paddle.v2 as paddle -from paddle.v2.framework.op import Operator -import paddle.v2.framework.core as core +from paddle.v2.fluid.op import Operator +import paddle.v2.fluid.core as core from op_test import OpTest, create_op, set_input if not core.is_compile_gpu(): diff --git a/python/paddle/v2/framework/tests/test_net.py b/python/paddle/v2/fluid/tests/test_net.py similarity index 93% rename from python/paddle/v2/framework/tests/test_net.py rename to python/paddle/v2/fluid/tests/test_net.py index 8503257feb8e1a5802f3f889f72c559a2aaa583a..318df08a9e73ac95cab73c34182bc6220ef6c681 100644 --- a/python/paddle/v2/framework/tests/test_net.py +++ b/python/paddle/v2/fluid/tests/test_net.py @@ -1,5 +1,5 @@ -import paddle.v2.framework.core as core -from paddle.v2.framework.op import Operator +import paddle.v2.fluid.core as core +from paddle.v2.fluid.op import Operator import unittest diff --git a/python/paddle/v2/framework/tests/test_op_support_gpu.py b/python/paddle/v2/fluid/tests/test_op_support_gpu.py similarity index 84% rename from python/paddle/v2/framework/tests/test_op_support_gpu.py rename to python/paddle/v2/fluid/tests/test_op_support_gpu.py index dd36c666c440a5c378dfceac4502cd8277417412..a0eb4bd5fd2cc178ffe0763efdee61524ad6d4bd 100644 --- a/python/paddle/v2/framework/tests/test_op_support_gpu.py +++ b/python/paddle/v2/fluid/tests/test_op_support_gpu.py @@ -1,5 +1,5 @@ import unittest -import paddle.v2.framework.core as core +import paddle.v2.fluid.core as core class TestOpSupportGPU(unittest.TestCase): diff --git a/python/paddle/v2/framework/tests/test_operator.py b/python/paddle/v2/fluid/tests/test_operator.py similarity index 97% rename from python/paddle/v2/framework/tests/test_operator.py rename to python/paddle/v2/fluid/tests/test_operator.py index 98f6b2f5ee639120557cb85b3ada6d2931f7d0d2..4aa022ef90159cd96eed4e4dbe30cf5d1e8a41a7 100644 --- a/python/paddle/v2/framework/tests/test_operator.py +++ b/python/paddle/v2/fluid/tests/test_operator.py @@ -1,7 +1,7 @@ import unittest -import paddle.v2.framework.op as op -import paddle.v2.framework.core as core -import paddle.v2.framework.proto.framework_pb2 as framework_pb2 +import paddle.v2.fluid.op as op +import paddle.v2.fluid.core as core +import paddle.v2.fluid.proto.framework_pb2 as framework_pb2 class TestGetAllProtos(unittest.TestCase): diff --git a/python/paddle/v2/framework/tests/test_operator_desc.py b/python/paddle/v2/fluid/tests/test_operator_desc.py similarity index 96% rename from python/paddle/v2/framework/tests/test_operator_desc.py rename to python/paddle/v2/fluid/tests/test_operator_desc.py index a0bc4e0b91602cfc90f91a1e2dd4bce22c0dbf6d..e8362d2e9c6038c04c24dce35de8c53bfde78142 100644 --- a/python/paddle/v2/framework/tests/test_operator_desc.py +++ b/python/paddle/v2/fluid/tests/test_operator_desc.py @@ -1,6 +1,6 @@ import unittest -from paddle.v2.framework.framework import Variable, Program, g_main_program -import paddle.v2.framework.core as core +from paddle.v2.fluid.framework import Variable, Program, g_main_program +import paddle.v2.fluid.core as core class TestOperator(unittest.TestCase): diff --git a/python/paddle/v2/framework/tests/test_optimizer.py b/python/paddle/v2/fluid/tests/test_optimizer.py similarity index 98% rename from python/paddle/v2/framework/tests/test_optimizer.py rename to python/paddle/v2/fluid/tests/test_optimizer.py index a39e7402600c7a94301de030c90ea51264248cf1..0ebf7cdf208c41eacfdff88f59455584eff4ff8f 100644 --- a/python/paddle/v2/framework/tests/test_optimizer.py +++ b/python/paddle/v2/fluid/tests/test_optimizer.py @@ -1,8 +1,8 @@ import unittest -import paddle.v2.framework.framework as framework -import paddle.v2.framework.optimizer as optimizer -from paddle.v2.framework.backward import append_backward_ops +import paddle.v2.fluid.framework as framework +import paddle.v2.fluid.optimizer as optimizer +from paddle.v2.fluid.backward import append_backward_ops class TestOptimizer(unittest.TestCase): diff --git a/python/paddle/v2/framework/tests/test_pad_op.py b/python/paddle/v2/fluid/tests/test_pad_op.py similarity index 100% rename from python/paddle/v2/framework/tests/test_pad_op.py rename to python/paddle/v2/fluid/tests/test_pad_op.py diff --git a/python/paddle/v2/framework/tests/test_parameter.py b/python/paddle/v2/fluid/tests/test_parameter.py similarity index 87% rename from python/paddle/v2/framework/tests/test_parameter.py rename to python/paddle/v2/fluid/tests/test_parameter.py index f04eb4cf27276b0f7da0793c97742ac42e4583be..71a1bd2aaf5a9c6362ce0d35c256ed228e942fce 100644 --- a/python/paddle/v2/framework/tests/test_parameter.py +++ b/python/paddle/v2/fluid/tests/test_parameter.py @@ -1,6 +1,6 @@ import unittest -from paddle.v2.framework.framework import g_main_program -import paddle.v2.framework.core as core +from paddle.v2.fluid.framework import g_main_program +import paddle.v2.fluid.core as core class TestParameter(unittest.TestCase): diff --git a/python/paddle/v2/framework/tests/test_pool2d_op.py b/python/paddle/v2/fluid/tests/test_pool2d_op.py similarity index 100% rename from python/paddle/v2/framework/tests/test_pool2d_op.py rename to python/paddle/v2/fluid/tests/test_pool2d_op.py diff --git a/python/paddle/v2/framework/tests/test_pool3d_op.py b/python/paddle/v2/fluid/tests/test_pool3d_op.py similarity index 100% rename from python/paddle/v2/framework/tests/test_pool3d_op.py rename to python/paddle/v2/fluid/tests/test_pool3d_op.py diff --git a/python/paddle/v2/framework/tests/test_pool_max_op.py b/python/paddle/v2/fluid/tests/test_pool_max_op.py similarity index 100% rename from python/paddle/v2/framework/tests/test_pool_max_op.py rename to python/paddle/v2/fluid/tests/test_pool_max_op.py diff --git a/python/paddle/v2/framework/tests/test_positive_negative_pair_op.py b/python/paddle/v2/fluid/tests/test_positive_negative_pair_op.py similarity index 100% rename from python/paddle/v2/framework/tests/test_positive_negative_pair_op.py rename to python/paddle/v2/fluid/tests/test_positive_negative_pair_op.py diff --git a/python/paddle/v2/framework/tests/test_precision_recall_op.py b/python/paddle/v2/fluid/tests/test_precision_recall_op.py similarity index 100% rename from python/paddle/v2/framework/tests/test_precision_recall_op.py rename to python/paddle/v2/fluid/tests/test_precision_recall_op.py diff --git a/python/paddle/v2/framework/tests/test_prelu_op.py b/python/paddle/v2/fluid/tests/test_prelu_op.py similarity index 100% rename from python/paddle/v2/framework/tests/test_prelu_op.py rename to python/paddle/v2/fluid/tests/test_prelu_op.py diff --git a/python/paddle/v2/framework/tests/test_program.py b/python/paddle/v2/fluid/tests/test_program.py similarity index 96% rename from python/paddle/v2/framework/tests/test_program.py rename to python/paddle/v2/fluid/tests/test_program.py index 7be67b6614ee3302a319289b821a214a81b6f64e..ef2daf6916e14c015a39ae0193948e7ff6531449 100644 --- a/python/paddle/v2/framework/tests/test_program.py +++ b/python/paddle/v2/fluid/tests/test_program.py @@ -1,8 +1,8 @@ import unittest -import paddle.v2.framework.core as core -from paddle.v2.framework.framework import Program -from paddle.v2.framework.framework import g_main_program +import paddle.v2.fluid.core as core +from paddle.v2.fluid.framework import Program +from paddle.v2.fluid.framework import g_main_program class TestProgram(unittest.TestCase): diff --git a/python/paddle/v2/framework/tests/test_protobuf.py b/python/paddle/v2/fluid/tests/test_protobuf.py similarity index 92% rename from python/paddle/v2/framework/tests/test_protobuf.py rename to python/paddle/v2/fluid/tests/test_protobuf.py index 848a396b3b6eec57d500b464780b64f339b09e94..e064374176fa221cfd042b7dbd2ddcb3b5ec41ec 100644 --- a/python/paddle/v2/framework/tests/test_protobuf.py +++ b/python/paddle/v2/fluid/tests/test_protobuf.py @@ -1,4 +1,4 @@ -import paddle.v2.framework.proto.framework_pb2 as framework_pb2 +import paddle.v2.fluid.proto.framework_pb2 as framework_pb2 import unittest diff --git a/python/paddle/v2/framework/tests/test_protobuf_descs.py b/python/paddle/v2/fluid/tests/test_protobuf_descs.py similarity index 99% rename from python/paddle/v2/framework/tests/test_protobuf_descs.py rename to python/paddle/v2/fluid/tests/test_protobuf_descs.py index 2fd3d5d165ada5026510e0dc3e2c55b6e0596ff3..098a9802dfc6763ce2a2356b7267a439145b7939 100644 --- a/python/paddle/v2/framework/tests/test_protobuf_descs.py +++ b/python/paddle/v2/fluid/tests/test_protobuf_descs.py @@ -1,5 +1,5 @@ import unittest -import paddle.v2.framework.core as core +import paddle.v2.fluid.core as core class TestOpDesc(unittest.TestCase): diff --git a/python/paddle/v2/framework/tests/test_proximal_adagrad_op.py b/python/paddle/v2/fluid/tests/test_proximal_adagrad_op.py similarity index 100% rename from python/paddle/v2/framework/tests/test_proximal_adagrad_op.py rename to python/paddle/v2/fluid/tests/test_proximal_adagrad_op.py diff --git a/python/paddle/v2/framework/tests/test_proximal_gd_op.py b/python/paddle/v2/fluid/tests/test_proximal_gd_op.py similarity index 100% rename from python/paddle/v2/framework/tests/test_proximal_gd_op.py rename to python/paddle/v2/fluid/tests/test_proximal_gd_op.py diff --git a/python/paddle/v2/framework/tests/test_rank_loss_op.py b/python/paddle/v2/fluid/tests/test_rank_loss_op.py similarity index 100% rename from python/paddle/v2/framework/tests/test_rank_loss_op.py rename to python/paddle/v2/fluid/tests/test_rank_loss_op.py diff --git a/python/paddle/v2/framework/tests/test_recurrent_op.py b/python/paddle/v2/fluid/tests/test_recurrent_op.py similarity index 98% rename from python/paddle/v2/framework/tests/test_recurrent_op.py rename to python/paddle/v2/fluid/tests/test_recurrent_op.py index 16100429dd4010eb5c9a3e8896212f39295a4c8a..b623d1231838faff9e91c9234befb1f647fe8ec2 100644 --- a/python/paddle/v2/framework/tests/test_recurrent_op.py +++ b/python/paddle/v2/fluid/tests/test_recurrent_op.py @@ -1,11 +1,11 @@ import unittest -import paddle.v2.framework.layers as layers -from paddle.v2.framework.framework import Program -from paddle.v2.framework.executor import Executor -from paddle.v2.framework.backward import append_backward_ops +import paddle.v2.fluid.layers as layers +from paddle.v2.fluid.framework import Program +from paddle.v2.fluid.executor import Executor +from paddle.v2.fluid.backward import append_backward_ops import numpy as np -import paddle.v2.framework.core as core +import paddle.v2.fluid.core as core class PyRNNBase(object): diff --git a/python/paddle/v2/framework/tests/test_reduce_op.py b/python/paddle/v2/fluid/tests/test_reduce_op.py similarity index 100% rename from python/paddle/v2/framework/tests/test_reduce_op.py rename to python/paddle/v2/fluid/tests/test_reduce_op.py diff --git a/python/paddle/v2/framework/tests/test_regularizer.py b/python/paddle/v2/fluid/tests/test_regularizer.py similarity index 92% rename from python/paddle/v2/framework/tests/test_regularizer.py rename to python/paddle/v2/fluid/tests/test_regularizer.py index b21dceb584bdc660e48598a600f57cb6095b3802..f5d1eb3b96211bd7c7335dbe116a1d765d7bae50 100644 --- a/python/paddle/v2/framework/tests/test_regularizer.py +++ b/python/paddle/v2/fluid/tests/test_regularizer.py @@ -1,9 +1,9 @@ import unittest -import paddle.v2.framework.framework as framework -import paddle.v2.framework.optimizer as optimizer -import paddle.v2.framework.regularizer as regularizer -from paddle.v2.framework.backward import append_backward_ops +import paddle.v2.fluid.framework as framework +import paddle.v2.fluid.optimizer as optimizer +import paddle.v2.fluid.regularizer as regularizer +from paddle.v2.fluid.backward import append_backward_ops class TestL2DecayRegularizer(unittest.TestCase): diff --git a/python/paddle/v2/framework/tests/test_reshape_op.py b/python/paddle/v2/fluid/tests/test_reshape_op.py similarity index 100% rename from python/paddle/v2/framework/tests/test_reshape_op.py rename to python/paddle/v2/fluid/tests/test_reshape_op.py diff --git a/python/paddle/v2/framework/tests/test_rmsprop_op.py b/python/paddle/v2/fluid/tests/test_rmsprop_op.py similarity index 100% rename from python/paddle/v2/framework/tests/test_rmsprop_op.py rename to python/paddle/v2/fluid/tests/test_rmsprop_op.py diff --git a/python/paddle/v2/framework/tests/test_rnn_memory_helper_op.py b/python/paddle/v2/fluid/tests/test_rnn_memory_helper_op.py similarity index 95% rename from python/paddle/v2/framework/tests/test_rnn_memory_helper_op.py rename to python/paddle/v2/fluid/tests/test_rnn_memory_helper_op.py index 731beff17cc96d26c2d9390a956c774b8676b179..a3cba92504a28590083df57e69f7662a887d94a6 100644 --- a/python/paddle/v2/framework/tests/test_rnn_memory_helper_op.py +++ b/python/paddle/v2/fluid/tests/test_rnn_memory_helper_op.py @@ -1,10 +1,10 @@ import unittest -from paddle.v2.framework.framework import Program -from paddle.v2.framework.executor import Executor -from paddle.v2.framework.backward import append_backward_ops +from paddle.v2.fluid.framework import Program +from paddle.v2.fluid.executor import Executor +from paddle.v2.fluid.backward import append_backward_ops import numpy as np -import paddle.v2.framework.core as core +import paddle.v2.fluid.core as core def create_tensor(np_data, place): diff --git a/python/paddle/v2/framework/tests/test_scale_op.py b/python/paddle/v2/fluid/tests/test_scale_op.py similarity index 100% rename from python/paddle/v2/framework/tests/test_scale_op.py rename to python/paddle/v2/fluid/tests/test_scale_op.py diff --git a/python/paddle/v2/framework/tests/test_scatter_op.py b/python/paddle/v2/fluid/tests/test_scatter_op.py similarity index 100% rename from python/paddle/v2/framework/tests/test_scatter_op.py rename to python/paddle/v2/fluid/tests/test_scatter_op.py diff --git a/python/paddle/v2/framework/tests/test_scope.py b/python/paddle/v2/fluid/tests/test_scope.py similarity index 81% rename from python/paddle/v2/framework/tests/test_scope.py rename to python/paddle/v2/fluid/tests/test_scope.py index 14743654792716e4a7ebce5238b142addc86337e..e4857b590aa6e09f1fa37c4a8a70a3ec9495b085 100644 --- a/python/paddle/v2/framework/tests/test_scope.py +++ b/python/paddle/v2/fluid/tests/test_scope.py @@ -1,22 +1,22 @@ -import paddle.v2.framework.core +import paddle.v2.fluid.core import unittest class TestScope(unittest.TestCase): def test_create_destroy(self): - paddle_c = paddle.v2.framework.core + paddle_c = paddle.v2.fluid.core scope = paddle_c.Scope() self.assertIsNotNone(scope) scope_with_parent = scope.new_scope() self.assertIsNotNone(scope_with_parent) def test_none_variable(self): - paddle_c = paddle.v2.framework.core + paddle_c = paddle.v2.fluid.core scope = paddle_c.Scope() self.assertIsNone(scope.find_var("test")) def test_create_var_get_var(self): - paddle_c = paddle.v2.framework.core + paddle_c = paddle.v2.fluid.core scope = paddle_c.Scope() var_a = scope.var("var_a") self.assertIsNotNone(var_a) @@ -25,7 +25,7 @@ class TestScope(unittest.TestCase): self.assertIsNotNone(scope2.find_var('var_a')) def test_var_get_int(self): - paddle_c = paddle.v2.framework.core + paddle_c = paddle.v2.fluid.core scope = paddle_c.Scope() var = scope.var("test_int") var.set_int(10) diff --git a/python/paddle/v2/framework/tests/test_selected_rows.py b/python/paddle/v2/fluid/tests/test_selected_rows.py similarity index 96% rename from python/paddle/v2/framework/tests/test_selected_rows.py rename to python/paddle/v2/fluid/tests/test_selected_rows.py index e8a930cb08c42b48f678bdd7bdb7698923535d4f..93daf37aa2ceb8a599973f7b02874f23fe0763ff 100644 --- a/python/paddle/v2/framework/tests/test_selected_rows.py +++ b/python/paddle/v2/fluid/tests/test_selected_rows.py @@ -1,4 +1,4 @@ -import paddle.v2.framework.core as core +import paddle.v2.fluid.core as core import unittest import numpy as np diff --git a/python/paddle/v2/framework/tests/test_seq_concat_op.py b/python/paddle/v2/fluid/tests/test_seq_concat_op.py similarity index 100% rename from python/paddle/v2/framework/tests/test_seq_concat_op.py rename to python/paddle/v2/fluid/tests/test_seq_concat_op.py diff --git a/python/paddle/v2/framework/tests/test_seq_conv.py b/python/paddle/v2/fluid/tests/test_seq_conv.py similarity index 100% rename from python/paddle/v2/framework/tests/test_seq_conv.py rename to python/paddle/v2/fluid/tests/test_seq_conv.py diff --git a/python/paddle/v2/framework/tests/test_seq_expand.py b/python/paddle/v2/fluid/tests/test_seq_expand.py similarity index 100% rename from python/paddle/v2/framework/tests/test_seq_expand.py rename to python/paddle/v2/fluid/tests/test_seq_expand.py diff --git a/python/paddle/v2/framework/tests/test_seq_pool.py b/python/paddle/v2/fluid/tests/test_seq_pool.py similarity index 100% rename from python/paddle/v2/framework/tests/test_seq_pool.py rename to python/paddle/v2/fluid/tests/test_seq_pool.py diff --git a/python/paddle/v2/framework/tests/test_sequence_softmax_op.py b/python/paddle/v2/fluid/tests/test_sequence_softmax_op.py similarity index 100% rename from python/paddle/v2/framework/tests/test_sequence_softmax_op.py rename to python/paddle/v2/fluid/tests/test_sequence_softmax_op.py diff --git a/python/paddle/v2/framework/tests/test_sgd_op.py b/python/paddle/v2/fluid/tests/test_sgd_op.py similarity index 97% rename from python/paddle/v2/framework/tests/test_sgd_op.py rename to python/paddle/v2/fluid/tests/test_sgd_op.py index 01262bba4d43adaed179baef88ccab6e69b0884b..ca05a381f06cfd40b7939dbda8d4f1f4aacd0271 100644 --- a/python/paddle/v2/framework/tests/test_sgd_op.py +++ b/python/paddle/v2/fluid/tests/test_sgd_op.py @@ -1,7 +1,7 @@ import unittest import numpy as np -import paddle.v2.framework.core as core -from paddle.v2.framework.op import Operator +import paddle.v2.fluid.core as core +from paddle.v2.fluid.op import Operator from op_test import OpTest diff --git a/python/paddle/v2/framework/tests/test_shrink_rnn_memory.py b/python/paddle/v2/fluid/tests/test_shrink_rnn_memory.py similarity index 86% rename from python/paddle/v2/framework/tests/test_shrink_rnn_memory.py rename to python/paddle/v2/fluid/tests/test_shrink_rnn_memory.py index 2090455b969806685b525f1e588b6570e3072430..1a3b88e18e38b88d75ad17a0bb6a2965d1e60406 100644 --- a/python/paddle/v2/framework/tests/test_shrink_rnn_memory.py +++ b/python/paddle/v2/fluid/tests/test_shrink_rnn_memory.py @@ -1,9 +1,9 @@ import unittest -import paddle.v2.framework.core as core -from paddle.v2.framework.executor import Executor -import paddle.v2.framework.layers as layers -from paddle.v2.framework.backward import append_backward_ops -from paddle.v2.framework.framework import g_main_program +import paddle.v2.fluid.core as core +from paddle.v2.fluid.executor import Executor +import paddle.v2.fluid.layers as layers +from paddle.v2.fluid.backward import append_backward_ops +from paddle.v2.fluid.framework import g_main_program import numpy diff --git a/python/paddle/v2/framework/tests/test_sigmoid_cross_entropy_with_logits_op.py b/python/paddle/v2/fluid/tests/test_sigmoid_cross_entropy_with_logits_op.py similarity index 100% rename from python/paddle/v2/framework/tests/test_sigmoid_cross_entropy_with_logits_op.py rename to python/paddle/v2/fluid/tests/test_sigmoid_cross_entropy_with_logits_op.py diff --git a/python/paddle/v2/framework/tests/test_sign_op.py b/python/paddle/v2/fluid/tests/test_sign_op.py similarity index 100% rename from python/paddle/v2/framework/tests/test_sign_op.py rename to python/paddle/v2/fluid/tests/test_sign_op.py diff --git a/python/paddle/v2/framework/tests/test_smooth_l1_loss_op.py b/python/paddle/v2/fluid/tests/test_smooth_l1_loss_op.py similarity index 100% rename from python/paddle/v2/framework/tests/test_smooth_l1_loss_op.py rename to python/paddle/v2/fluid/tests/test_smooth_l1_loss_op.py diff --git a/python/paddle/v2/framework/tests/test_softmax_op.py b/python/paddle/v2/fluid/tests/test_softmax_op.py similarity index 100% rename from python/paddle/v2/framework/tests/test_softmax_op.py rename to python/paddle/v2/fluid/tests/test_softmax_op.py diff --git a/python/paddle/v2/framework/tests/test_softmax_with_cross_entropy_op.py b/python/paddle/v2/fluid/tests/test_softmax_with_cross_entropy_op.py similarity index 100% rename from python/paddle/v2/framework/tests/test_softmax_with_cross_entropy_op.py rename to python/paddle/v2/fluid/tests/test_softmax_with_cross_entropy_op.py diff --git a/python/paddle/v2/fluid/tests/test_split_and_merge_lod_tensor_op.py b/python/paddle/v2/fluid/tests/test_split_and_merge_lod_tensor_op.py new file mode 100644 index 0000000000000000000000000000000000000000..3aed83b2ea3418c54f9540279ae6e2e0045421fa --- /dev/null +++ b/python/paddle/v2/fluid/tests/test_split_and_merge_lod_tensor_op.py @@ -0,0 +1,181 @@ +import unittest +import paddle.v2.fluid.core as core +import numpy as np +import paddle.v2.fluid.layers as layers +from paddle.v2.fluid.framework import Program +from paddle.v2.fluid.executor import Executor +from paddle.v2.fluid.backward import append_backward_ops + + +class TestCPULoDTensorArrayOps(unittest.TestCase): + def place(self): + return core.CPUPlace() + + def test_split_and_merge_lod_tensor_no_lod(self): + tensor = core.LoDTensor() + tensor.set(np.arange(10).reshape(10, 1).astype('int32'), self.place()) + + mask_np = np.array([0, 0, 1, 1, 1, 1, 0, 0, 0, 0]).astype('bool') + mask_np = np.expand_dims(mask_np, axis=1) + + mask = core.LoDTensor() + mask.set(mask_np, self.place()) + + expect_true_tensor = np.array([2, 3, 4, 5]).astype('int32') + expect_true_tensor = np.expand_dims(expect_true_tensor, axis=1) + expect_true = core.LoDTensor() + expect_true.set(expect_true_tensor, self.place()) + + expect_false_tensor = np.array([0, 1, 6, 7, 8, 9]).astype('int32') + expect_false_tensor = np.expand_dims(expect_false_tensor, axis=1) + + expect_false = core.LoDTensor() + expect_false.set(expect_false_tensor, self.place()) + + self.main( + tensor=tensor, + mask=mask, + expect_true=expect_true, + expect_false=expect_false, + expect_out=tensor) + + def test_split_and_merge_lod_tensor_level_0(self): + tensor = core.LoDTensor() + tensor.set(np.arange(10).reshape(10, 1).astype('int32'), self.place()) + tensor.set_lod([[0, 3, 9, 10]]) + + mask_np = np.array([0, 1, 0]).astype('bool') + mask_np = np.expand_dims(mask_np, axis=1) + + mask = core.LoDTensor() + mask.set(mask_np, self.place()) + + expect_true_tensor = np.array([3, 4, 5, 6, 7, 8]).astype('int32') + expect_true_tensor = np.expand_dims(expect_true_tensor, axis=1) + expect_true = core.LoDTensor() + expect_true.set(expect_true_tensor, self.place()) + expect_true.set_lod([[0, 6]]) + + expect_false_tensor = np.array([0, 1, 2, 9]).astype('int32') + expect_false_tensor = np.expand_dims(expect_false_tensor, axis=1) + expect_false_lod = [[0, 3, 4]] + + expect_false = core.LoDTensor() + expect_false.set(expect_false_tensor, self.place()) + expect_false.set_lod(expect_false_lod) + + self.main( + tensor=tensor, + mask=mask, + expect_true=expect_true, + expect_false=expect_false, + expect_out=tensor) + + def main(self, tensor, mask, expect_true, expect_false, expect_out, + level=0): + place = self.place() + program = Program() + x = layers.data(name='x', shape=[1], main_program=program) + x.persistable = True + + y = layers.data(name='y', shape=[1], main_program=program) + y.persistable = True + + out_true, out_false = layers.split_lod_tensor( + input=x, mask=y, level=level, main_program=program) + out_true.persistable = True + out_false.persistable = True + + out = layers.merge_lod_tensor( + in_true=out_true, + in_false=out_false, + mask=y, + x=x, + level=level, + main_program=program) + + out.persistable = True + + exe = Executor(place) + scope = core.Scope() + exe.run(program, feed={'x': tensor, 'y': mask}, scope=scope) + + var_true = scope.find_var(out_true.name).get_tensor() + + var_false = scope.find_var(out_false.name).get_tensor() + + var_out = scope.find_var(out.name).get_tensor() + + self.check_tensor_same(var_true, expect_true) + self.check_tensor_same(var_false, expect_false) + self.check_tensor_same(var_out, expect_out) + + def check_tensor_same(self, actual, expect): + self.assertTrue(np.allclose(np.array(actual), np.array(expect))) + self.assertEqual(actual.lod(), expect.lod()) + + +class TestCPUSplitMergeLoDTensorGrad(unittest.TestCase): + def test_grad(self): + place = core.CPUPlace() + program = Program() + + x = layers.data( + name='x', + shape=[1], + data_type='float32', + main_program=program, + stop_gradient=False) + y = layers.data( + name='y', + shape=[1], + data_type='bool', + main_program=program, + stop_gradient=False) + + level = 0 + + out_true, out_false = layers.split_lod_tensor( + input=x, mask=y, level=level, main_program=program) + out = layers.merge_lod_tensor( + in_true=out_true, + in_false=out_false, + mask=y, + x=x, + level=level, + main_program=program) + mean = layers.mean(x=out, main_program=program) + + append_backward_ops(mean) + + tensor = core.LoDTensor() + tensor.set(np.arange(10).reshape(10, 1).astype('float32'), place) + tensor.set_lod([[0, 3, 9, 10]]) + + mask_np = np.array([0, 1, 0]).astype('bool') + mask_np = np.expand_dims(mask_np, axis=1) + + mask = core.LoDTensor() + mask.set(mask_np, place) + + exe = Executor(place) + scope = core.Scope() + + g_vars = program.global_block().var(x.name + "@GRAD") + g_out = [ + item.sum() + for item in map(np.array, + exe.run(program, + feed={'x': tensor, + 'y': mask}, + fetch_list=[g_vars], + scope=scope)) + ] + + g_out_sum = np.array(g_out).sum() + + self.assertAlmostEqual(1.0, g_out_sum, delta=0.1) + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/v2/framework/tests/test_split_op.py b/python/paddle/v2/fluid/tests/test_split_op.py similarity index 100% rename from python/paddle/v2/framework/tests/test_split_op.py rename to python/paddle/v2/fluid/tests/test_split_op.py diff --git a/python/paddle/v2/framework/tests/test_squared_l2_distance_op.py b/python/paddle/v2/fluid/tests/test_squared_l2_distance_op.py similarity index 100% rename from python/paddle/v2/framework/tests/test_squared_l2_distance_op.py rename to python/paddle/v2/fluid/tests/test_squared_l2_distance_op.py diff --git a/python/paddle/v2/framework/tests/test_squared_l2_norm_op.py b/python/paddle/v2/fluid/tests/test_squared_l2_norm_op.py similarity index 100% rename from python/paddle/v2/framework/tests/test_squared_l2_norm_op.py rename to python/paddle/v2/fluid/tests/test_squared_l2_norm_op.py diff --git a/python/paddle/v2/framework/tests/test_sum_op.py b/python/paddle/v2/fluid/tests/test_sum_op.py similarity index 100% rename from python/paddle/v2/framework/tests/test_sum_op.py rename to python/paddle/v2/fluid/tests/test_sum_op.py diff --git a/python/paddle/v2/framework/tests/test_tensor.py b/python/paddle/v2/fluid/tests/test_tensor.py similarity index 98% rename from python/paddle/v2/framework/tests/test_tensor.py rename to python/paddle/v2/fluid/tests/test_tensor.py index e0cd2fa8aaf2db2991ad2b9a3053f0d00b509cd4..9f870d9eb3485aa0b54eb781b906f4232d12c49e 100644 --- a/python/paddle/v2/framework/tests/test_tensor.py +++ b/python/paddle/v2/fluid/tests/test_tensor.py @@ -1,4 +1,4 @@ -import paddle.v2.framework.core as core +import paddle.v2.fluid.core as core import unittest import numpy diff --git a/python/paddle/v2/framework/tests/test_tensor_array.py b/python/paddle/v2/fluid/tests/test_tensor_array.py similarity index 98% rename from python/paddle/v2/framework/tests/test_tensor_array.py rename to python/paddle/v2/fluid/tests/test_tensor_array.py index 50b3e09162a24201ee45cbd017dfef8a60f0da78..d6929ba16e4dae0c57adcceb4f0e78c094eee55c 100644 --- a/python/paddle/v2/framework/tests/test_tensor_array.py +++ b/python/paddle/v2/fluid/tests/test_tensor_array.py @@ -1,5 +1,5 @@ import logging -import paddle.v2.framework.core as core +import paddle.v2.fluid.core as core import unittest import numpy as np diff --git a/python/paddle/v2/framework/tests/test_top_k_op.py b/python/paddle/v2/fluid/tests/test_top_k_op.py similarity index 100% rename from python/paddle/v2/framework/tests/test_top_k_op.py rename to python/paddle/v2/fluid/tests/test_top_k_op.py diff --git a/python/paddle/v2/framework/tests/test_transpose_op.py b/python/paddle/v2/fluid/tests/test_transpose_op.py similarity index 100% rename from python/paddle/v2/framework/tests/test_transpose_op.py rename to python/paddle/v2/fluid/tests/test_transpose_op.py diff --git a/python/paddle/v2/framework/tests/test_uniform_random_op.py b/python/paddle/v2/fluid/tests/test_uniform_random_op.py similarity index 90% rename from python/paddle/v2/framework/tests/test_uniform_random_op.py rename to python/paddle/v2/fluid/tests/test_uniform_random_op.py index ded777105e0fc64eb82bf4013bfba7ba9d0ddefa..f736dfb2e85552b321403c961da517f3b3efb100 100644 --- a/python/paddle/v2/framework/tests/test_uniform_random_op.py +++ b/python/paddle/v2/fluid/tests/test_uniform_random_op.py @@ -1,6 +1,6 @@ import unittest -from paddle.v2.framework.op import Operator -import paddle.v2.framework.core as core +from paddle.v2.fluid.op import Operator +import paddle.v2.fluid.core as core import numpy diff --git a/python/paddle/v2/framework/tests/test_variable.py b/python/paddle/v2/fluid/tests/test_variable.py similarity index 93% rename from python/paddle/v2/framework/tests/test_variable.py rename to python/paddle/v2/fluid/tests/test_variable.py index 03115f10a5a494424c6f8310c544c569be818e5b..a3e60a751719666bdca56a3096b688125d09f4b2 100644 --- a/python/paddle/v2/framework/tests/test_variable.py +++ b/python/paddle/v2/fluid/tests/test_variable.py @@ -1,6 +1,6 @@ import unittest -from paddle.v2.framework.framework import Variable, g_main_program, Program -import paddle.v2.framework.core as core +from paddle.v2.fluid.framework import Variable, g_main_program, Program +import paddle.v2.fluid.core as core import numpy as np diff --git a/python/paddle/v2/framework/tests/test_while_op.py b/python/paddle/v2/fluid/tests/test_while_op.py similarity index 94% rename from python/paddle/v2/framework/tests/test_while_op.py rename to python/paddle/v2/fluid/tests/test_while_op.py index 1c344eae49705ecce586154c30c4d4f770022e7e..0f01acb3b94dc55a3536e751108e785ddc6e47bb 100644 --- a/python/paddle/v2/framework/tests/test_while_op.py +++ b/python/paddle/v2/fluid/tests/test_while_op.py @@ -1,7 +1,7 @@ import unittest -import paddle.v2.framework.layers as layers -from paddle.v2.framework.executor import Executor -import paddle.v2.framework.core as core +import paddle.v2.fluid.layers as layers +from paddle.v2.fluid.executor import Executor +import paddle.v2.fluid.core as core import numpy diff --git a/python/paddle/v2/framework/tests/test_beam_search_decode_op.py b/python/paddle/v2/framework/tests/test_beam_search_decode_op.py new file mode 100644 index 0000000000000000000000000000000000000000..e9f180bbaea0f5922bee0a3e2a8c715d683c0d16 --- /dev/null +++ b/python/paddle/v2/framework/tests/test_beam_search_decode_op.py @@ -0,0 +1,75 @@ +import unittest + +import numpy as np +import paddle.v2.framework.core as core +from paddle.v2.framework.op import Operator + + +class TestBeamSearchDecodeOp(unittest.TestCase): + def setUp(self): + self.scope = core.Scope() + self.cpu_place = core.CPUPlace() + + def append_lod_tensor(self, tensor_array, lod, data): + lod_tensor = core.LoDTensor() + lod_tensor.set_lod(lod) + lod_tensor.set(data, self.cpu_place) + tensor_array.append(lod_tensor) + + def test_get_set(self): + ids = self.scope.var("ids").get_lod_tensor_array() + self.append_lod_tensor( + ids, [[0, 3, 6], [0, 1, 2, 3, 4, 5, 6]], + np.array( + [1, 2, 3, 4, 5, 6], dtype="int64")) + self.append_lod_tensor( + ids, [[0, 3, 6], [0, 1, 1, 3, 5, 5, 6]], + np.array( + [0, 1, 2, 3, 4, 5], dtype="int64")) + self.append_lod_tensor( + ids, [[0, 3, 6], [0, 0, 1, 2, 3, 4, 5]], + np.array( + [0, 1, 2, 3, 4], dtype="int64")) + + scores = self.scope.var("scores").get_lod_tensor_array() + self.append_lod_tensor( + scores, [[0, 3, 6], [0, 1, 2, 3, 4, 5, 6]], + np.array( + [1, 2, 3, 4, 5, 6], dtype="float32")) + self.append_lod_tensor( + scores, [[0, 3, 6], [0, 1, 1, 3, 5, 5, 6]], + np.array( + [0, 1, 2, 3, 4, 5], dtype="float32")) + self.append_lod_tensor( + scores, [[0, 3, 6], [0, 0, 1, 2, 3, 4, 5]], + np.array( + [0, 1, 2, 3, 4], dtype="float32")) + + sentence_ids = self.scope.var("sentence_ids").get_tensor() + sentence_scores = self.scope.var("sentence_scores").get_tensor() + + beam_search_decode_op = Operator( + "beam_search_decode", + # inputs + Ids="ids", + Scores="scores", + # outputs + SentenceIds="sentence_ids", + SentenceScores="sentence_scores") + + ctx = core.DeviceContext.create(self.cpu_place) + beam_search_decode_op.run(self.scope, ctx) + + expected_lod = [[0, 4, 8], [0, 1, 3, 6, 9, 10, 13, 16, 19]] + self.assertEqual(sentence_ids.lod(), expected_lod) + self.assertEqual(sentence_scores.lod(), expected_lod) + + expected_data = np.array( + [2, 1, 0, 3, 1, 0, 3, 2, 1, 5, 4, 3, 2, 4, 4, 3, 6, 5, 4], "int64") + self.assertTrue(np.array_equal(np.array(sentence_ids), expected_data)) + self.assertTrue( + np.array_equal(np.array(sentence_scores), expected_data)) + + +if __name__ == '__main__': + unittest.main() diff --git a/python/setup.py.in b/python/setup.py.in index 5348c2d8d7e9b5adc5fe93e2943bef149ba047cc..fe91df10daf303bb14d1e5f28817984d261e0880 100644 --- a/python/setup.py.in +++ b/python/setup.py.in @@ -13,8 +13,8 @@ packages=['paddle', 'paddle.v2.reader', 'paddle.v2.master', 'paddle.v2.plot', - 'paddle.v2.framework', - 'paddle.v2.framework.proto', + 'paddle.v2.fluid', + 'paddle.v2.fluid.proto', 'py_paddle'] with open('@PADDLE_SOURCE_DIR@/python/requirements.txt') as f: @@ -44,14 +44,14 @@ setup(name='paddlepaddle', ext_modules=[Extension('_foo', ['stub.cc'])], package_data={ 'paddle.v2.master': ['libpaddle_master.so'], - 'paddle.v2.framework': ['core.so'], + 'paddle.v2.fluid': ['core.so'], 'py_paddle':['*.py','_swig_paddle.so'] }, package_dir={ '': '${CMAKE_CURRENT_SOURCE_DIR}', - # The paddle.v2.framework.proto will be generated while compiling. + # The paddle.v2.fluid.proto will be generated while compiling. # So that package points to other directory. - 'paddle.v2.framework.proto': '${PADDLE_BINARY_DIR}/paddle/framework', + 'paddle.v2.fluid.proto': '${PADDLE_BINARY_DIR}/paddle/framework', 'py_paddle': '${PADDLE_SOURCE_DIR}/paddle/py_paddle' }, scripts=paddle_bins,