未验证 提交 c709a04a 编写于 作者: X Xin Pan 提交者: GitHub

Merge pull request #13018 from panyx0718/name

support op_namescope for better debugging
......@@ -36,6 +36,7 @@ paddle.fluid.default_startup_program ArgSpec(args=[], varargs=None, keywords=Non
paddle.fluid.default_main_program ArgSpec(args=[], varargs=None, keywords=None, defaults=None)
paddle.fluid.program_guard ArgSpec(args=[], varargs='args', keywords='kwds', defaults=None)
paddle.fluid.get_var ArgSpec(args=['name', 'program'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.name_scope ArgSpec(args=[], varargs='args', keywords='kwds', defaults=None)
paddle.fluid.Executor.__init__ ArgSpec(args=['self', 'place'], varargs=None, keywords=None, defaults=None)
paddle.fluid.Executor.close ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
paddle.fluid.Executor.run ArgSpec(args=['self', 'program', 'feed', 'fetch_list', 'feed_var_name', 'fetch_var_name', 'scope', 'return_numpy', 'use_program_cache'], varargs=None, keywords=None, defaults=(None, None, None, 'feed', 'fetch', None, True, False))
......
......@@ -16,13 +16,27 @@ limitations under the License. */
#include <unordered_set>
#include "paddle/fluid/framework/ir/graph_viz_pass.h"
#include "paddle/fluid/framework/op_proto_maker.h"
#include "paddle/fluid/inference/analysis/dot.h"
#include "paddle/fluid/string/printf.h"
namespace paddle {
namespace framework {
namespace ir {
static const char kGraphVizPath[] = "graph_viz_path";
using inference::analysis::Dot;
namespace {
const char kGraphVizPath[] = "graph_viz_path";
std::string FormatName(const Node* node) {
if (!node->IsOp() || !node->Op() ||
!node->Op()->HasAttr(OpProtoAndCheckerMaker::OpNamescopeAttrName())) {
return node->Name();
}
const std::string full_scope = boost::get<std::string>(
node->Op()->GetAttr(OpProtoAndCheckerMaker::OpNamescopeAttrName()));
return string::Sprintf("%s%s", full_scope.c_str(), node->Name().c_str());
}
} // namespace
std::unique_ptr<ir::Graph> GraphVizPass::ApplyImpl(
std::unique_ptr<ir::Graph> graph) const {
......@@ -54,7 +68,7 @@ std::unique_ptr<ir::Graph> GraphVizPass::ApplyImpl(
auto marked_nodes = ConsumeMarkedNodes(graph.get());
// Create nodes
for (const Node* n : graph->Nodes()) {
std::string node_id = n->Name() + "(" + std::to_string(n->id()) + ")";
std::string node_id = FormatName(n) + "(" + std::to_string(n->id()) + ")";
if (n->IsOp()) {
decltype(op_attrs) attr =
marked_nodes.count(n) ? marked_op_attrs : op_attrs;
......
......@@ -55,11 +55,11 @@ class Node {
std::string Name() const { return name_; }
VarDesc* Var() {
PADDLE_ENFORCE(type_ == Type::kVariable);
PADDLE_ENFORCE(IsVar());
return var_desc_.get();
}
OpDesc* Op() {
OpDesc* Op() const {
PADDLE_ENFORCE(IsOp());
return op_desc_.get();
}
......
......@@ -129,6 +129,9 @@ void OpProtoAndCheckerMaker::operator()(proto::OpProto* proto,
"Optimized for variable")
.SetDefault({});
AddAttr<std::string>(OpNamescopeAttrName(), "Operator name with namesope.")
.SetDefault("");
Validate();
}
......
......@@ -39,6 +39,7 @@ class OpProtoAndCheckerMaker {
public:
static const char *OpRoleAttrName() { return "op_role"; }
static const char *OpRoleVarAttrName() { return "op_role_var"; }
static const char *OpNamescopeAttrName() { return "op_namescope"; }
void operator()(proto::OpProto *proto, OpAttrChecker *attr_checker);
......
......@@ -43,6 +43,9 @@ void BindConstValue(pybind11::module* m) {
op_proto_and_checker_maker.def(
"kOpRoleVarAttrName",
framework::OpProtoAndCheckerMaker::OpRoleVarAttrName);
op_proto_and_checker_maker.def(
"kOpNameScopeAttrName",
framework::OpProtoAndCheckerMaker::OpNamescopeAttrName);
}
} // namespace pybind
......
......@@ -43,6 +43,7 @@ __all__ = [
'default_main_program',
'program_guard',
'get_var',
'name_scope',
]
EMPTY_VAR_NAME = core.kEmptyVarName()
......@@ -52,6 +53,70 @@ ZERO_VAR_SUFFIX = core.kZeroVarSuffix()
CONTROL_DEP_VAR_PREFIX = core.kControlDepVarName()
class NameScope(object):
def __init__(self, name="", parent=None):
self._children = dict()
self._name = name
self._parent = parent
def child(self, prefix):
if prefix not in self._children:
new_child = NameScope(prefix, self)
self._children[prefix] = [new_child]
else:
new_child = NameScope(prefix + "_%d" % len(self._children[prefix]),
self)
self._children[prefix].append(new_child)
return new_child
def parent(self):
return self._parent
def name(self):
return self._name
_name_scope = NameScope()
@contextlib.contextmanager
def name_scope(prefix=None):
"""
Generate hierarchical name prefix for the operators.
Note: This should only used for debugging and visualization purpose.
Don't use it for serious analysis such as graph/program transformations.
Args:
prefix(str): prefix.
Examples:
.. code-block:: python
with name_scope("encoder"):
...
with name_scope("decoder"):
...
with name_scope("attention"):
...
"""
# TODO(panyx0718): Only [0-9a-z].
assert prefix, "namescope prefix cannot be empty."
global _name_scope
_name_scope = _name_scope.child(prefix)
yield
_name_scope = _name_scope.parent()
def _full_name_scope():
global _name_scope
scope = _name_scope
name = ""
while scope:
name = scope.name() + "/" + name
scope = scope.parent()
return name
def generate_control_dev_var_name():
import random
return CONTROL_DEP_VAR_PREFIX + "@" + str(random.random())
......@@ -515,6 +580,9 @@ class Operator(object):
self.desc.set_type(type)
proto = OpProtoHolder.instance().get_op_proto(type)
namescope_var_name = op_maker.kOpNameScopeAttrName()
op_attrs[namescope_var_name] = _full_name_scope()
def find_name(var_list, name):
for var_name in var_list:
if var_list[var_name] is not None and var_name == name:
......
......@@ -15,7 +15,7 @@
from __future__ import print_function
import re
from collections import defaultdict
from paddle.fluid.framework import Program, Variable
from paddle.fluid.framework import Program, Variable, name_scope
from . import framework
from . import layers
from .backward import append_backward
......@@ -237,7 +237,7 @@ class Optimizer(object):
if param_and_grad[1] is None:
continue
with param_and_grad[0].block.program.optimized_guard(
param_and_grad):
param_and_grad), name_scope("optimizer"):
if param_and_grad[0].trainable is True:
optimize_op = self._append_optimize_op(loss.block,
param_and_grad)
......
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import paddle.fluid as fluid
class TestNameScope(unittest.TestCase):
def test_name_scope(self):
with fluid.name_scope("s1"):
a = fluid.layers.data(name='data', shape=[1], dtype='int32')
b = a + 1
with fluid.name_scope("s2"):
c = b * 1
with fluid.name_scope("s3"):
d = c / 1
with fluid.name_scope("s1"):
f = fluid.layers.pow(d, 2.0)
with fluid.name_scope("s4"):
g = f - 1
for op in fluid.default_main_program().block(0).ops:
if op.type == 'elementwise_add':
self.assertEqual(op.desc.attr("op_namescope"), '/s1/')
elif op.type == 'elementwise_mul':
self.assertEqual(op.desc.attr("op_namescope"), '/s1/s2/')
elif op.type == 'elementwise_div':
self.assertEqual(op.desc.attr("op_namescope"), '/s1/s3/')
elif op.type == 'elementwise_sub':
self.assertEqual(op.desc.attr("op_namescope"), '/s4/')
elif op.type == 'pow':
self.assertEqual(op.desc.attr("op_namescope"), '/s1_1/')
......@@ -67,7 +67,10 @@ class TestOperator(unittest.TestCase):
self.assertEqual(mul_op.output("Out"), ["mul.out"])
self.assertEqual(
set(mul_op.attr_names),
set(["x_num_col_dims", "y_num_col_dims", "op_role", "op_role_var"]))
set([
"x_num_col_dims", "y_num_col_dims", "op_role", "op_role_var",
"op_namescope"
]))
self.assertEqual(mul_op.has_attr("x_num_col_dims"), True)
self.assertEqual(mul_op.attr_type("x_num_col_dims"), core.AttrType.INT)
self.assertEqual(mul_op.attr("x_num_col_dims"), 1)
......
......@@ -67,6 +67,7 @@ def fc_with_batchnorm(use_feed):
hidden = img
for _ in range(1):
with fluid.name_scope("hidden"):
hidden = fluid.layers.fc(
hidden,
size=200,
......@@ -75,8 +76,9 @@ def fc_with_batchnorm(use_feed):
initializer=fluid.initializer.Constant(value=1.0)))
hidden = fluid.layers.batch_norm(input=hidden)
with fluid.name_scope("fc_layer"):
prediction = fluid.layers.fc(hidden, size=10, act='softmax')
with fluid.name_scope("loss"):
loss = fluid.layers.cross_entropy(input=prediction, label=label)
loss = fluid.layers.mean(loss)
return loss
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册