提交 51ef0ad7 编写于 作者: X Xin Pan

allow to use name_scope for debugging and visiualization

上级 9ae55dd7
...@@ -36,6 +36,7 @@ paddle.fluid.default_startup_program ArgSpec(args=[], varargs=None, keywords=Non ...@@ -36,6 +36,7 @@ paddle.fluid.default_startup_program ArgSpec(args=[], varargs=None, keywords=Non
paddle.fluid.default_main_program ArgSpec(args=[], varargs=None, keywords=None, defaults=None) paddle.fluid.default_main_program ArgSpec(args=[], varargs=None, keywords=None, defaults=None)
paddle.fluid.program_guard ArgSpec(args=[], varargs='args', keywords='kwds', defaults=None) paddle.fluid.program_guard ArgSpec(args=[], varargs='args', keywords='kwds', defaults=None)
paddle.fluid.get_var ArgSpec(args=['name', 'program'], varargs=None, keywords=None, defaults=(None,)) paddle.fluid.get_var ArgSpec(args=['name', 'program'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.name_scope ArgSpec(args=[], varargs='args', keywords='kwds', defaults=None)
paddle.fluid.Executor.__init__ ArgSpec(args=['self', 'place'], varargs=None, keywords=None, defaults=None) paddle.fluid.Executor.__init__ ArgSpec(args=['self', 'place'], varargs=None, keywords=None, defaults=None)
paddle.fluid.Executor.close ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None) paddle.fluid.Executor.close ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
paddle.fluid.Executor.run ArgSpec(args=['self', 'program', 'feed', 'fetch_list', 'feed_var_name', 'fetch_var_name', 'scope', 'return_numpy', 'use_program_cache'], varargs=None, keywords=None, defaults=(None, None, None, 'feed', 'fetch', None, True, False)) paddle.fluid.Executor.run ArgSpec(args=['self', 'program', 'feed', 'fetch_list', 'feed_var_name', 'fetch_var_name', 'scope', 'return_numpy', 'use_program_cache'], varargs=None, keywords=None, defaults=(None, None, None, 'feed', 'fetch', None, True, False))
......
...@@ -17,12 +17,26 @@ limitations under the License. */ ...@@ -17,12 +17,26 @@ limitations under the License. */
#include "paddle/fluid/framework/ir/graph_viz_pass.h" #include "paddle/fluid/framework/ir/graph_viz_pass.h"
#include "paddle/fluid/inference/analysis/dot.h" #include "paddle/fluid/inference/analysis/dot.h"
#include "paddle/fluid/framework/op_proto_maker.h"
#include "paddle/fluid/string/printf.h"
namespace paddle { namespace paddle {
namespace framework { namespace framework {
namespace ir { namespace ir {
static const char kGraphVizPath[] = "graph_viz_path";
using inference::analysis::Dot; using inference::analysis::Dot;
namespace {
const char kGraphVizPath[] = "graph_viz_path";
std::string FormatName(const Node* op) {
if (!op->Op() ||
!op->Op()->HasAttr(OpProtoAndCheckerMaker::OpNamescopeAttrName())) {
return op->Name();
}
const std::string full_scope = boost::get<std::string>(
op->Op()->GetAttr(OpProtoAndCheckerMaker::OpNamescopeAttrName()));
return string::Sprintf("%s%s", full_scope.c_str(), op->Name().c_str());
}
} // namespace
std::unique_ptr<ir::Graph> GraphVizPass::ApplyImpl( std::unique_ptr<ir::Graph> GraphVizPass::ApplyImpl(
std::unique_ptr<ir::Graph> graph) const { std::unique_ptr<ir::Graph> graph) const {
...@@ -54,7 +68,7 @@ std::unique_ptr<ir::Graph> GraphVizPass::ApplyImpl( ...@@ -54,7 +68,7 @@ std::unique_ptr<ir::Graph> GraphVizPass::ApplyImpl(
auto marked_nodes = ConsumeMarkedNodes(graph.get()); auto marked_nodes = ConsumeMarkedNodes(graph.get());
// Create nodes // Create nodes
for (const Node* n : graph->Nodes()) { for (const Node* n : graph->Nodes()) {
std::string node_id = n->Name() + "(" + std::to_string(n->id()) + ")"; std::string node_id = FormatName(n) + "(" + std::to_string(n->id()) + ")";
if (n->IsOp()) { if (n->IsOp()) {
decltype(op_attrs) attr = decltype(op_attrs) attr =
marked_nodes.count(n) ? marked_op_attrs : op_attrs; marked_nodes.count(n) ? marked_op_attrs : op_attrs;
......
...@@ -59,7 +59,7 @@ class Node { ...@@ -59,7 +59,7 @@ class Node {
return var_desc_.get(); return var_desc_.get();
} }
OpDesc* Op() { OpDesc* Op() const {
PADDLE_ENFORCE(IsOp()); PADDLE_ENFORCE(IsOp());
return op_desc_.get(); return op_desc_.get();
} }
......
...@@ -129,6 +129,9 @@ void OpProtoAndCheckerMaker::operator()(proto::OpProto* proto, ...@@ -129,6 +129,9 @@ void OpProtoAndCheckerMaker::operator()(proto::OpProto* proto,
"Optimized for variable") "Optimized for variable")
.SetDefault({}); .SetDefault({});
AddAttr<std::string>(OpNamescopeAttrName(), "Operator name with namesope.")
.SetDefault("");
Validate(); Validate();
} }
......
...@@ -39,6 +39,7 @@ class OpProtoAndCheckerMaker { ...@@ -39,6 +39,7 @@ class OpProtoAndCheckerMaker {
public: public:
static const char *OpRoleAttrName() { return "op_role"; } static const char *OpRoleAttrName() { return "op_role"; }
static const char *OpRoleVarAttrName() { return "op_role_var"; } static const char *OpRoleVarAttrName() { return "op_role_var"; }
static const char *OpNamescopeAttrName() { return "op_namescope"; }
void operator()(proto::OpProto *proto, OpAttrChecker *attr_checker); void operator()(proto::OpProto *proto, OpAttrChecker *attr_checker);
......
...@@ -43,6 +43,9 @@ void BindConstValue(pybind11::module* m) { ...@@ -43,6 +43,9 @@ void BindConstValue(pybind11::module* m) {
op_proto_and_checker_maker.def( op_proto_and_checker_maker.def(
"kOpRoleVarAttrName", "kOpRoleVarAttrName",
framework::OpProtoAndCheckerMaker::OpRoleVarAttrName); framework::OpProtoAndCheckerMaker::OpRoleVarAttrName);
op_proto_and_checker_maker.def(
"kOpNameScopeAttrName",
framework::OpProtoAndCheckerMaker::OpNamescopeAttrName);
} }
} // namespace pybind } // namespace pybind
......
...@@ -43,6 +43,7 @@ __all__ = [ ...@@ -43,6 +43,7 @@ __all__ = [
'default_main_program', 'default_main_program',
'program_guard', 'program_guard',
'get_var', 'get_var',
'name_scope',
] ]
EMPTY_VAR_NAME = core.kEmptyVarName() EMPTY_VAR_NAME = core.kEmptyVarName()
...@@ -52,6 +53,70 @@ ZERO_VAR_SUFFIX = core.kZeroVarSuffix() ...@@ -52,6 +53,70 @@ ZERO_VAR_SUFFIX = core.kZeroVarSuffix()
CONTROL_DEP_VAR_PREFIX = core.kControlDepVarName() CONTROL_DEP_VAR_PREFIX = core.kControlDepVarName()
class NameScope(object):
def __init__(self, name="", parent=None):
self._children = dict()
self._name = name
self._parent = parent
def child(self, prefix):
if prefix not in self._children:
new_child = NameScope(prefix, self)
self._children[prefix] = [new_child]
else:
new_child = NameScope(prefix + "_%d" % len(self._children[prefix]),
self)
self._children[prefix].append(new_child)
return new_child
def parent(self):
return self._parent
def name(self):
return self._name
_name_scope = NameScope()
@contextlib.contextmanager
def name_scope(prefix=None):
"""
Generate hierarchical name prefix for the operators.
Note: This should only used for debugging and visualization purpose.
Don't use it for serious analysis such as graph/program transformations.
Args:
prefix(str): prefix.
Examples:
.. code-block:: python
with name_scope("encoder"):
...
with name_scope("decoder"):
...
with name_scope("attention"):
...
"""
# TODO(panyx0718): Only [0-9a-z].
assert prefix, "namescope prefix cannot be empty."
global _name_scope
_name_scope = _name_scope.child(prefix)
yield
_name_scope = _name_scope.parent()
def _full_name_scope():
global _name_scope
scope = _name_scope
name = ""
while scope:
name = scope.name() + "/" + name
scope = scope.parent()
return name
def generate_control_dev_var_name(): def generate_control_dev_var_name():
import random import random
return CONTROL_DEP_VAR_PREFIX + "@" + str(random.random()) return CONTROL_DEP_VAR_PREFIX + "@" + str(random.random())
...@@ -515,6 +580,9 @@ class Operator(object): ...@@ -515,6 +580,9 @@ class Operator(object):
self.desc.set_type(type) self.desc.set_type(type)
proto = OpProtoHolder.instance().get_op_proto(type) proto = OpProtoHolder.instance().get_op_proto(type)
namescope_var_name = op_maker.kOpNameScopeAttrName()
op_attrs[namescope_var_name] = _full_name_scope()
def find_name(var_list, name): def find_name(var_list, name):
for var_name in var_list: for var_name in var_list:
if var_list[var_name] is not None and var_name == name: if var_list[var_name] is not None and var_name == name:
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
from __future__ import print_function from __future__ import print_function
import re import re
from collections import defaultdict from collections import defaultdict
from paddle.fluid.framework import Program, Variable from paddle.fluid.framework import Program, Variable, name_scope
from . import framework from . import framework
from . import layers from . import layers
from .backward import append_backward from .backward import append_backward
...@@ -237,7 +237,7 @@ class Optimizer(object): ...@@ -237,7 +237,7 @@ class Optimizer(object):
if param_and_grad[1] is None: if param_and_grad[1] is None:
continue continue
with param_and_grad[0].block.program.optimized_guard( with param_and_grad[0].block.program.optimized_guard(
param_and_grad): param_and_grad), name_scope("optimizer"):
if param_and_grad[0].trainable is True: if param_and_grad[0].trainable is True:
optimize_op = self._append_optimize_op(loss.block, optimize_op = self._append_optimize_op(loss.block,
param_and_grad) param_and_grad)
......
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import paddle.fluid as fluid
class TestNameScope(unittest.TestCase):
def test_name_scope(self):
with fluid.name_scope("s1"):
a = fluid.layers.data(name='data', shape=[1], dtype='int32')
b = a + 1
with fluid.name_scope("s2"):
c = b * 1
with fluid.name_scope("s3"):
d = c / 1
with fluid.name_scope("s1"):
f = fluid.layers.pow(d, 2.0)
with fluid.name_scope("s4"):
g = f - 1
for op in fluid.default_main_program().block(0).ops:
if op.type == 'elementwise_add':
self.assertEqual(op.desc.attr("op_namescope"), '/s1/')
elif op.type == 'elementwise_mul':
self.assertEqual(op.desc.attr("op_namescope"), '/s1/s2/')
elif op.type == 'elementwise_div':
self.assertEqual(op.desc.attr("op_namescope"), '/s1/s3/')
elif op.type == 'elementwise_sub':
self.assertEqual(op.desc.attr("op_namescope"), '/s4/')
elif op.type == 'pow':
self.assertEqual(op.desc.attr("op_namescope"), '/s1_1/')
...@@ -67,6 +67,7 @@ def fc_with_batchnorm(use_feed): ...@@ -67,6 +67,7 @@ def fc_with_batchnorm(use_feed):
hidden = img hidden = img
for _ in range(1): for _ in range(1):
with fluid.name_scope("hidden"):
hidden = fluid.layers.fc( hidden = fluid.layers.fc(
hidden, hidden,
size=200, size=200,
...@@ -75,8 +76,9 @@ def fc_with_batchnorm(use_feed): ...@@ -75,8 +76,9 @@ def fc_with_batchnorm(use_feed):
initializer=fluid.initializer.Constant(value=1.0))) initializer=fluid.initializer.Constant(value=1.0)))
hidden = fluid.layers.batch_norm(input=hidden) hidden = fluid.layers.batch_norm(input=hidden)
with fluid.name_scope("fc_layer"):
prediction = fluid.layers.fc(hidden, size=10, act='softmax') prediction = fluid.layers.fc(hidden, size=10, act='softmax')
with fluid.name_scope("loss"):
loss = fluid.layers.cross_entropy(input=prediction, label=label) loss = fluid.layers.cross_entropy(input=prediction, label=label)
loss = fluid.layers.mean(loss) loss = fluid.layers.mean(loss)
return loss return loss
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册