From 78af6e601181449f434d9fc4af791b373bcde47a Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Wed, 9 Aug 2017 17:11:01 +0800 Subject: [PATCH] Add OutputVars method to get all outputs or outputs without intermediate --- paddle/framework/op_registry.h | 25 ++--------------------- paddle/framework/operator.cc | 12 +++++++++-- paddle/framework/operator.h | 31 +++++++++++++++++++++++++++++ paddle/operators/net_op.cc | 35 ++++++++++++++++++--------------- paddle/operators/net_op.h | 4 ++++ paddle/operators/net_op_test.cc | 19 +++++------------- 6 files changed, 71 insertions(+), 55 deletions(-) diff --git a/paddle/framework/op_registry.h b/paddle/framework/op_registry.h index f11ce8fd377..03b14ea0216 100644 --- a/paddle/framework/op_registry.h +++ b/paddle/framework/op_registry.h @@ -22,6 +22,7 @@ limitations under the License. */ #include "paddle/framework/attribute.h" #include "paddle/framework/framework.pb.h" #include "paddle/framework/grad_op_builder.h" +#include "paddle/framework/operator.h" #include "paddle/framework/scope.h" namespace paddle { @@ -127,7 +128,7 @@ class OpRegistry { static void RegisterOp(const std::string& op_type) { op_creators()[op_type] = [] { return new OpType; }; OpAttrChecker& op_checker = op_checkers()[op_type]; - OpProto& op_proto = protos()[op_type]; + OpProto& op_proto = OpProtos()[op_type]; auto maker = ProtoMakerType(&op_proto, &op_checker); maker.Validate(); *op_proto.mutable_type() = op_type; @@ -135,17 +136,6 @@ class OpRegistry { op_proto.IsInitialized(), "Fail to initialize %s's OpProto, because %s is not initialized", op_type, op_proto.InitializationErrorString()); - - VarIndexMaps()[op_type].reset(new VarIndexMap()); - auto& varmap = *VarIndexMaps()[op_type]; - int idx = 0; - for (auto& var : op_proto.inputs()) { - varmap[var.name()] = idx++; - } - idx = 0; - for (auto& var : op_proto.outputs()) { - varmap[var.name()] = idx++; - } } template @@ -212,22 +202,11 @@ class OpRegistry { return grad_op; } - static std::unordered_map& protos() { - static std::unordered_map protos_; - return protos_; - } - static std::unordered_map& grad_ops() { static std::unordered_map grad_ops_; return grad_ops_; } - static std::unordered_map>& - VarIndexMaps() { - static std::unordered_map> maps_; - return maps_; - } - static std::unordered_map& op_creators() { static std::unordered_map op_creators_; return op_creators_; diff --git a/paddle/framework/operator.cc b/paddle/framework/operator.cc index e69db305b49..1210ee1ec4c 100644 --- a/paddle/framework/operator.cc +++ b/paddle/framework/operator.cc @@ -12,9 +12,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include - #include "paddle/framework/operator.h" +#include +#include "paddle/framework/op_registry.h" namespace paddle { namespace framework { @@ -33,6 +33,14 @@ ExecutionContext::GetEigenDevice() const { } #endif +static std::unordered_map* g_op_protos = nullptr; +std::unordered_map& OpProtos() { + if (g_op_protos == nullptr) { + g_op_protos = new std::unordered_map(); + } + return *g_op_protos; +} + const std::string& OperatorBase::Input(const std::string& name) const { auto it = inputs_.find(name); PADDLE_ENFORCE(it != inputs_.end(), "Op %s does not have output %s", type_, diff --git a/paddle/framework/operator.h b/paddle/framework/operator.h index 499bb7ef77e..15b1c736761 100644 --- a/paddle/framework/operator.h +++ b/paddle/framework/operator.h @@ -50,6 +50,8 @@ inline std::string GradVarName(const std::string& var_name) { return var_name + kGradVarSuffix; } +extern std::unordered_map& OpProtos(); + class OperatorBase; class InferShapeContext; class ExecutionContext; @@ -103,6 +105,35 @@ class OperatorBase { //! TODO add a vector_view to prevent memory copy. const std::vector& Outputs(const std::string& name) const; + virtual std::vector OutputVars(bool has_intermediate) const { + std::vector ret_val; + if (has_intermediate) { + // push all outputs into ret_val + for (auto& o : outputs_) { + ret_val.reserve(ret_val.size() + o.second.size()); + ret_val.insert(ret_val.end(), o.second.begin(), o.second.end()); + } + return ret_val; + } + auto it = OpProtos().find(type_); + PADDLE_ENFORCE( + it != OpProtos().end(), + "Operator %s not registered, cannot figure out intermediate outputs", + type_); + + // get all OpProto::Var for outputs + for (auto& o : it->second.outputs()) { + // ignore all intermediate output + if (o.intermediate()) continue; + auto out = outputs_.find(o.name()); + if (out != outputs_.end()) { + ret_val.reserve(ret_val.size() + out->second.size()); + ret_val.insert(ret_val.end(), out->second.begin(), out->second.end()); + } + } + return ret_val; + } + public: std::string type_; // NOTE: in case of OpGrad, inputs_ contains: diff --git a/paddle/operators/net_op.cc b/paddle/operators/net_op.cc index b0746883d04..6a118087a73 100644 --- a/paddle/operators/net_op.cc +++ b/paddle/operators/net_op.cc @@ -21,19 +21,20 @@ namespace paddle { namespace operators { +const char NetOp::kAll[] = "all"; + void NetOp::CompleteAddOp(bool calc) { add_op_done_ = true; if (!calc) return; std::set input_set; std::set output_set; - std::set temp_output; for (auto& op : ops_) { for (auto& ipt : op->inputs_) { for (auto& var_name : ipt.second) { if (!Contains(output_set, var_name)) { // Not other op's output input_set.insert(var_name); } else { - temp_output.insert(var_name); + intermediate_outputs_.insert(var_name); } } } @@ -44,24 +45,12 @@ void NetOp::CompleteAddOp(bool calc) { } } } - auto& inputs = inputs_["all"]; + auto& inputs = inputs_[kAll]; inputs.reserve(input_set.size()); std::copy(input_set.begin(), input_set.end(), std::back_inserter(inputs)); - auto& outputs = outputs_["all"]; + auto& outputs = outputs_[kAll]; outputs.reserve(output_set.size()); std::copy(output_set.begin(), output_set.end(), std::back_inserter(outputs)); - - //! TODO figure out how to generate temporary_index in Network. - std::vector tmp_index; - tmp_index.reserve(temp_output.size()); - int output_len = static_cast(outputs.size()); - for (int i = 0; i < output_len; ++i) { - if (Contains(temp_output, outputs[i])) { - tmp_index.push_back(i); - } - } - - attrs_["temporary_index"] = tmp_index; } std::string NetOp::DebugString() const { @@ -78,5 +67,19 @@ std::string NetOp::DebugString() const { bool NetOp::IsNetOp() const { return true; } +std::vector NetOp::OutputVars(bool has_intermediate) const { + if (has_intermediate) { + return this->outputs_.at(kAll); + } + auto& all = this->outputs_.at(kAll); + std::vector ret_val; + for (auto& each : all) { + if (!Contains(intermediate_outputs_, each)) { + ret_val.push_back(each); + } + } + return ret_val; +} + } // namespace operators } // namespace paddle diff --git a/paddle/operators/net_op.h b/paddle/operators/net_op.h index 4e2353aa2bd..61f6187aec9 100644 --- a/paddle/operators/net_op.h +++ b/paddle/operators/net_op.h @@ -36,6 +36,8 @@ namespace operators { */ class NetOp : public framework::OperatorBase { public: + static const char kAll[]; + /** * Infer all the operators' input and output variables' shapes, will be called * before every mini-batch @@ -91,11 +93,13 @@ class NetOp : public framework::OperatorBase { std::string DebugString() const override; bool IsNetOp() const override; + std::vector OutputVars(bool has_intermediate) const override; std::vector> ops_; private: bool add_op_done_{false}; + std::set intermediate_outputs_; template static bool Contains(T container, KeyType key) { diff --git a/paddle/operators/net_op_test.cc b/paddle/operators/net_op_test.cc index 977f3de706f..c167f908241 100644 --- a/paddle/operators/net_op_test.cc +++ b/paddle/operators/net_op_test.cc @@ -54,22 +54,13 @@ TEST(OpKernel, all) { net->CompleteAddOp(); AssertSameVectorWithoutOrder({"x", "w1", "b1", "w2", "b2"}, - net->inputs_.at("__all__")); - AssertSameVectorWithoutOrder({"y", "z"}, net->outputs_.at("__all__")); - auto tmp_idx_iter = net->attrs_.find("temporary_index"); - ASSERT_NE(net->attrs_.end(), tmp_idx_iter); - auto& tmp_idx = boost::get>(tmp_idx_iter->second); - ASSERT_EQ(1UL, tmp_idx.size()); - ASSERT_EQ("y", net->outputs_.at("__all__")[tmp_idx[0]]); + net->inputs_.at(NetOp::kAll)); + AssertSameVectorWithoutOrder({"y", "z"}, net->outputs_.at(NetOp::kAll)); - Scope scope; - platform::CPUDeviceContext dev_ctx; + auto final_outs = net->OutputVars(false); - net->InferShape(scope); - net->Run(scope, dev_ctx); - ASSERT_EQ(2, infer_shape_cnt); - ASSERT_EQ(2, run_cnt); - ASSERT_THROW(net->AddOp(op2), platform::EnforceNotMet); + ASSERT_EQ(final_outs.size(), 1UL); + ASSERT_EQ(final_outs[0], "z"); } TEST(NetOp, insert_op) { -- GitLab