From 51ef0ad7662c4b2e7e611ded471978c7ff04af5c Mon Sep 17 00:00:00 2001 From: Xin Pan Date: Tue, 28 Aug 2018 18:38:12 +0800 Subject: [PATCH] allow to use name_scope for debugging and visiualization --- paddle/fluid/API.spec | 1 + paddle/fluid/framework/ir/graph_viz_pass.cc | 18 ++++- paddle/fluid/framework/ir/node.h | 2 +- paddle/fluid/framework/op_proto_maker.cc | 3 + paddle/fluid/framework/op_proto_maker.h | 1 + paddle/fluid/pybind/const_value.cc | 3 + python/paddle/fluid/framework.py | 68 +++++++++++++++++++ python/paddle/fluid/optimizer.py | 4 +- .../fluid/tests/unittests/test_name_scope.py | 45 ++++++++++++ .../unittests/test_parallel_executor_mnist.py | 26 +++---- 10 files changed, 154 insertions(+), 17 deletions(-) create mode 100644 python/paddle/fluid/tests/unittests/test_name_scope.py diff --git a/paddle/fluid/API.spec b/paddle/fluid/API.spec index 106198362f3..9b65a9b4581 100644 --- a/paddle/fluid/API.spec +++ b/paddle/fluid/API.spec @@ -36,6 +36,7 @@ paddle.fluid.default_startup_program ArgSpec(args=[], varargs=None, keywords=Non paddle.fluid.default_main_program ArgSpec(args=[], varargs=None, keywords=None, defaults=None) paddle.fluid.program_guard ArgSpec(args=[], varargs='args', keywords='kwds', defaults=None) paddle.fluid.get_var ArgSpec(args=['name', 'program'], varargs=None, keywords=None, defaults=(None,)) +paddle.fluid.name_scope ArgSpec(args=[], varargs='args', keywords='kwds', defaults=None) paddle.fluid.Executor.__init__ ArgSpec(args=['self', 'place'], varargs=None, keywords=None, defaults=None) paddle.fluid.Executor.close ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None) paddle.fluid.Executor.run ArgSpec(args=['self', 'program', 'feed', 'fetch_list', 'feed_var_name', 'fetch_var_name', 'scope', 'return_numpy', 'use_program_cache'], varargs=None, keywords=None, defaults=(None, None, None, 'feed', 'fetch', None, True, False)) diff --git a/paddle/fluid/framework/ir/graph_viz_pass.cc b/paddle/fluid/framework/ir/graph_viz_pass.cc index 3a114c6a237..78068df8789 100644 --- a/paddle/fluid/framework/ir/graph_viz_pass.cc +++ b/paddle/fluid/framework/ir/graph_viz_pass.cc @@ -17,12 +17,26 @@ limitations under the License. */ #include "paddle/fluid/framework/ir/graph_viz_pass.h" #include "paddle/fluid/inference/analysis/dot.h" +#include "paddle/fluid/framework/op_proto_maker.h" +#include "paddle/fluid/string/printf.h" namespace paddle { namespace framework { namespace ir { -static const char kGraphVizPath[] = "graph_viz_path"; using inference::analysis::Dot; +namespace { +const char kGraphVizPath[] = "graph_viz_path"; + +std::string FormatName(const Node* op) { + if (!op->Op() || + !op->Op()->HasAttr(OpProtoAndCheckerMaker::OpNamescopeAttrName())) { + return op->Name(); + } + const std::string full_scope = boost::get( + op->Op()->GetAttr(OpProtoAndCheckerMaker::OpNamescopeAttrName())); + return string::Sprintf("%s%s", full_scope.c_str(), op->Name().c_str()); +} +} // namespace std::unique_ptr GraphVizPass::ApplyImpl( std::unique_ptr graph) const { @@ -54,7 +68,7 @@ std::unique_ptr GraphVizPass::ApplyImpl( auto marked_nodes = ConsumeMarkedNodes(graph.get()); // Create nodes for (const Node* n : graph->Nodes()) { - std::string node_id = n->Name() + "(" + std::to_string(n->id()) + ")"; + std::string node_id = FormatName(n) + "(" + std::to_string(n->id()) + ")"; if (n->IsOp()) { decltype(op_attrs) attr = marked_nodes.count(n) ? marked_op_attrs : op_attrs; diff --git a/paddle/fluid/framework/ir/node.h b/paddle/fluid/framework/ir/node.h index 6d40e385229..545ff3b4a4b 100644 --- a/paddle/fluid/framework/ir/node.h +++ b/paddle/fluid/framework/ir/node.h @@ -59,7 +59,7 @@ class Node { return var_desc_.get(); } - OpDesc* Op() { + OpDesc* Op() const { PADDLE_ENFORCE(IsOp()); return op_desc_.get(); } diff --git a/paddle/fluid/framework/op_proto_maker.cc b/paddle/fluid/framework/op_proto_maker.cc index 2288c7fe660..4fa047bf3ee 100644 --- a/paddle/fluid/framework/op_proto_maker.cc +++ b/paddle/fluid/framework/op_proto_maker.cc @@ -129,6 +129,9 @@ void OpProtoAndCheckerMaker::operator()(proto::OpProto* proto, "Optimized for variable") .SetDefault({}); + AddAttr(OpNamescopeAttrName(), "Operator name with namesope.") + .SetDefault(""); + Validate(); } diff --git a/paddle/fluid/framework/op_proto_maker.h b/paddle/fluid/framework/op_proto_maker.h index 80970291c9c..18827385ad6 100644 --- a/paddle/fluid/framework/op_proto_maker.h +++ b/paddle/fluid/framework/op_proto_maker.h @@ -39,6 +39,7 @@ class OpProtoAndCheckerMaker { public: static const char *OpRoleAttrName() { return "op_role"; } static const char *OpRoleVarAttrName() { return "op_role_var"; } + static const char *OpNamescopeAttrName() { return "op_namescope"; } void operator()(proto::OpProto *proto, OpAttrChecker *attr_checker); diff --git a/paddle/fluid/pybind/const_value.cc b/paddle/fluid/pybind/const_value.cc index e4415ed15c7..f577068d1f3 100644 --- a/paddle/fluid/pybind/const_value.cc +++ b/paddle/fluid/pybind/const_value.cc @@ -43,6 +43,9 @@ void BindConstValue(pybind11::module* m) { op_proto_and_checker_maker.def( "kOpRoleVarAttrName", framework::OpProtoAndCheckerMaker::OpRoleVarAttrName); + op_proto_and_checker_maker.def( + "kOpNameScopeAttrName", + framework::OpProtoAndCheckerMaker::OpNamescopeAttrName); } } // namespace pybind diff --git a/python/paddle/fluid/framework.py b/python/paddle/fluid/framework.py index fbe766336b1..b0e0d27ff7a 100644 --- a/python/paddle/fluid/framework.py +++ b/python/paddle/fluid/framework.py @@ -43,6 +43,7 @@ __all__ = [ 'default_main_program', 'program_guard', 'get_var', + 'name_scope', ] EMPTY_VAR_NAME = core.kEmptyVarName() @@ -52,6 +53,70 @@ ZERO_VAR_SUFFIX = core.kZeroVarSuffix() CONTROL_DEP_VAR_PREFIX = core.kControlDepVarName() +class NameScope(object): + def __init__(self, name="", parent=None): + self._children = dict() + self._name = name + self._parent = parent + + def child(self, prefix): + if prefix not in self._children: + new_child = NameScope(prefix, self) + self._children[prefix] = [new_child] + else: + new_child = NameScope(prefix + "_%d" % len(self._children[prefix]), + self) + self._children[prefix].append(new_child) + return new_child + + def parent(self): + return self._parent + + def name(self): + return self._name + + +_name_scope = NameScope() + + +@contextlib.contextmanager +def name_scope(prefix=None): + """ + Generate hierarchical name prefix for the operators. + + Note: This should only used for debugging and visualization purpose. + Don't use it for serious analysis such as graph/program transformations. + + Args: + prefix(str): prefix. + + Examples: + .. code-block:: python + with name_scope("encoder"): + ... + with name_scope("decoder"): + ... + with name_scope("attention"): + ... + """ + # TODO(panyx0718): Only [0-9a-z]. + assert prefix, "namescope prefix cannot be empty." + global _name_scope + _name_scope = _name_scope.child(prefix) + yield + _name_scope = _name_scope.parent() + + +def _full_name_scope(): + global _name_scope + scope = _name_scope + name = "" + while scope: + name = scope.name() + "/" + name + scope = scope.parent() + return name + + def generate_control_dev_var_name(): import random return CONTROL_DEP_VAR_PREFIX + "@" + str(random.random()) @@ -515,6 +580,9 @@ class Operator(object): self.desc.set_type(type) proto = OpProtoHolder.instance().get_op_proto(type) + namescope_var_name = op_maker.kOpNameScopeAttrName() + op_attrs[namescope_var_name] = _full_name_scope() + def find_name(var_list, name): for var_name in var_list: if var_list[var_name] is not None and var_name == name: diff --git a/python/paddle/fluid/optimizer.py b/python/paddle/fluid/optimizer.py index 6b9749a5799..33d6311b971 100644 --- a/python/paddle/fluid/optimizer.py +++ b/python/paddle/fluid/optimizer.py @@ -15,7 +15,7 @@ from __future__ import print_function import re from collections import defaultdict -from paddle.fluid.framework import Program, Variable +from paddle.fluid.framework import Program, Variable, name_scope from . import framework from . import layers from .backward import append_backward @@ -237,7 +237,7 @@ class Optimizer(object): if param_and_grad[1] is None: continue with param_and_grad[0].block.program.optimized_guard( - param_and_grad): + param_and_grad), name_scope("optimizer"): if param_and_grad[0].trainable is True: optimize_op = self._append_optimize_op(loss.block, param_and_grad) diff --git a/python/paddle/fluid/tests/unittests/test_name_scope.py b/python/paddle/fluid/tests/unittests/test_name_scope.py new file mode 100644 index 00000000000..08c802e20d2 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_name_scope.py @@ -0,0 +1,45 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import unittest +import paddle.fluid as fluid + + +class TestNameScope(unittest.TestCase): + def test_name_scope(self): + with fluid.name_scope("s1"): + a = fluid.layers.data(name='data', shape=[1], dtype='int32') + b = a + 1 + with fluid.name_scope("s2"): + c = b * 1 + with fluid.name_scope("s3"): + d = c / 1 + with fluid.name_scope("s1"): + f = fluid.layers.pow(d, 2.0) + with fluid.name_scope("s4"): + g = f - 1 + + for op in fluid.default_main_program().block(0).ops: + if op.type == 'elementwise_add': + self.assertEqual(op.desc.attr("op_namescope"), '/s1/') + elif op.type == 'elementwise_mul': + self.assertEqual(op.desc.attr("op_namescope"), '/s1/s2/') + elif op.type == 'elementwise_div': + self.assertEqual(op.desc.attr("op_namescope"), '/s1/s3/') + elif op.type == 'elementwise_sub': + self.assertEqual(op.desc.attr("op_namescope"), '/s4/') + elif op.type == 'pow': + self.assertEqual(op.desc.attr("op_namescope"), '/s1_1/') diff --git a/python/paddle/fluid/tests/unittests/test_parallel_executor_mnist.py b/python/paddle/fluid/tests/unittests/test_parallel_executor_mnist.py index 5b96d641d66..af3745987aa 100644 --- a/python/paddle/fluid/tests/unittests/test_parallel_executor_mnist.py +++ b/python/paddle/fluid/tests/unittests/test_parallel_executor_mnist.py @@ -67,18 +67,20 @@ def fc_with_batchnorm(use_feed): hidden = img for _ in range(1): - hidden = fluid.layers.fc( - hidden, - size=200, - act='tanh', - bias_attr=fluid.ParamAttr( - initializer=fluid.initializer.Constant(value=1.0))) - - hidden = fluid.layers.batch_norm(input=hidden) - - prediction = fluid.layers.fc(hidden, size=10, act='softmax') - loss = fluid.layers.cross_entropy(input=prediction, label=label) - loss = fluid.layers.mean(loss) + with fluid.name_scope("hidden"): + hidden = fluid.layers.fc( + hidden, + size=200, + act='tanh', + bias_attr=fluid.ParamAttr( + initializer=fluid.initializer.Constant(value=1.0))) + + hidden = fluid.layers.batch_norm(input=hidden) + with fluid.name_scope("fc_layer"): + prediction = fluid.layers.fc(hidden, size=10, act='softmax') + with fluid.name_scope("loss"): + loss = fluid.layers.cross_entropy(input=prediction, label=label) + loss = fluid.layers.mean(loss) return loss -- GitLab