提交 cf2f23cc 编写于 作者: D dangqingqing

Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into bp_test

...@@ -30,6 +30,7 @@ static void ForEachVarName(Map& names, T callback) { ...@@ -30,6 +30,7 @@ static void ForEachVarName(Map& names, T callback) {
} }
} }
// return whether all the names + suffixes in the set
static bool AllInSet( static bool AllInSet(
const std::map<std::string, std::vector<std::string>>& names, const std::map<std::string, std::vector<std::string>>& names,
const std::string& suffix, const std::unordered_set<std::string>& set) { const std::string& suffix, const std::unordered_set<std::string>& set) {
...@@ -48,7 +49,7 @@ static std::shared_ptr<OperatorBase> NOP() { ...@@ -48,7 +49,7 @@ static std::shared_ptr<OperatorBase> NOP() {
return net_op; return net_op;
} }
// Get backward operator from a forward operator, recursively implementation. // Get backward operator from a forward operator, a recursive implementation.
// //
// no_grad_names the gradient variable names without gradient calculating. // no_grad_names the gradient variable names without gradient calculating.
// //
...@@ -56,27 +57,30 @@ static std::shared_ptr<OperatorBase> NOP() { ...@@ -56,27 +57,30 @@ static std::shared_ptr<OperatorBase> NOP() {
// BackwardRecursive. use `uid = uniq_id++;` to get the unique index, and // BackwardRecursive. use `uid = uniq_id++;` to get the unique index, and
// pass `uniq_id` through recursive calling. // pass `uniq_id` through recursive calling.
// //
// returns The backward operator. For simple situation, it is a simple // returns The backward operator. In a simple situation, it may be a simple
// operator. For complex situation, it is a NetOp. // operator, in a complex situation, it maybe a NetOp.
// //
// See Backward.h for details // See Backward.h for details
static std::shared_ptr<OperatorBase> BackwardRecursive( static std::shared_ptr<OperatorBase> BackwardRecursive(
const OperatorBase& forwardOp, const OperatorBase& forwardOp,
std::unordered_set<std::string>& no_grad_names, size_t& uniq_id); std::unordered_set<std::string>& no_grad_names, size_t& uniq_id);
std::shared_ptr<OperatorBase> BackwardRecursive( std::shared_ptr<OperatorBase> BackwardRecursive(
const OperatorBase& forwardOp, const OperatorBase& forwardOp,
std::unordered_set<std::string>& no_grad_names, size_t& uniq_id) { std::unordered_set<std::string>& no_grad_names, size_t& uniq_id) {
// If all input gradients of forwarding operator do not need to calculate, // If all input gradients of forwarding operator do not need to calculate,
// just return an NOP. Not return null ptr because NOP does not take // just return an NOP. Not return null ptr because NOP does not take
// too much time for calculation, but it is useful for simplifying logic. // much time for calculation, but it is useful for simplifying logic.
if (AllInSet(forwardOp.inputs_, kGradVarSuffix, no_grad_names)) { if (AllInSet(forwardOp.inputs_ /*names*/, kGradVarSuffix /*suffix*/,
no_grad_names /*set*/)) {
return NOP(); return NOP();
} }
// All output gradients of forwarding operator do not need to calculate. // All output gradients of forwarding operator do not need to calculate.
// Then all input gradients cannot be computed at all, and we put them into // Then all input gradients cannot be computed at all, and we put them into
// `no_grad_names` set. Return an NOP. // `no_grad_names` set. Return an NOP.
if (AllInSet(forwardOp.outputs_, kGradVarSuffix, no_grad_names)) { if (AllInSet(forwardOp.outputs_ /*names*/, kGradVarSuffix /*suffix*/,
no_grad_names /*set*/)) {
ForEachVarName(forwardOp.inputs_, ForEachVarName(forwardOp.inputs_,
[&no_grad_names](const std::string& name) -> bool { [&no_grad_names](const std::string& name) -> bool {
no_grad_names.insert(GradVarName(name)); no_grad_names.insert(GradVarName(name));
...@@ -93,11 +97,11 @@ std::shared_ptr<OperatorBase> BackwardRecursive( ...@@ -93,11 +97,11 @@ std::shared_ptr<OperatorBase> BackwardRecursive(
auto& forwardNet = static_cast<const operators::NetOp&>(forwardOp); auto& forwardNet = static_cast<const operators::NetOp&>(forwardOp);
// Map from output gradient variable name to operator's indices in // Map from output gradient variable name to operator's indices in
// backward net. That operator generates that variable. // backward net's ops_. That operator generates that variable.
std::unordered_map<std::string, std::vector<size_t>> dup_output_ops; std::unordered_map<std::string, std::vector<size_t>> dup_output_ops;
size_t local_op_id = 0; size_t local_op_id = 0;
// reversely travel forwardNet // reversely travel forwardNet and collect all duplicate outputs.
for (auto it = forwardNet.ops_.rbegin(); it != forwardNet.ops_.rend(); for (auto it = forwardNet.ops_.rbegin(); it != forwardNet.ops_.rend();
++it, ++local_op_id) { ++it, ++local_op_id) {
auto fwd = *it; auto fwd = *it;
...@@ -112,35 +116,41 @@ std::shared_ptr<OperatorBase> BackwardRecursive( ...@@ -112,35 +116,41 @@ std::shared_ptr<OperatorBase> BackwardRecursive(
// Get unique ID for this method. // Get unique ID for this method.
auto uid = uniq_id++; auto uid = uniq_id++;
// TODO(dzh): more comment // TODO(dzh): more comment
// multiple operators which have the same output (y for example) may
// overwrite the same y variable when backward, special operations are token
// to handle this case. For each duplicate output, rename it to an alias
// (original name with a offset), append an `add` op for its operator,
// and finally sum all the alias variable to the final output variable y.
using Pos = std::pair<size_t, std::shared_ptr<OperatorBase>>; using Pos = std::pair<size_t, std::shared_ptr<OperatorBase>>;
std::list<Pos> insert_position; std::list<Pos> insert_position;
for (auto& dup_output_op : dup_output_ops) { for (auto& dup_output_op : dup_output_ops) {
const std::string& name = dup_output_op.first; const std::string& name = dup_output_op.first;
auto& dup_op = dup_output_op.second; auto& dup_op = dup_output_op.second;
// no duplicate output
if (dup_op.size() == 1) continue; if (dup_op.size() == 1) continue;
std::vector<std::string> dup_outputs;
// process the duplicate outputs
std::vector<std::string> dup_outputs;
for (size_t i = 0; i < dup_op.size(); ++i) { for (size_t i = 0; i < dup_op.size(); ++i) {
// rename each duplicate output to an alias
auto op_offset = dup_op[i]; auto op_offset = dup_op[i];
dup_outputs.push_back(name + "@RENAME@" + std::to_string(uid) + "@" + dup_outputs.push_back(name + "@RENAME@" + std::to_string(uid) + "@" +
std::to_string(i)); std::to_string(i));
net->ops_[op_offset]->Rename(name, dup_outputs.back()); net->ops_[op_offset]->Rename(name, dup_outputs.back());
} }
// collect all the offset to append `add` op for each alias
insert_position.push_back( insert_position.push_back(
{dup_op.back(), {dup_op.back(), OpRegistry::CreateOp("add", {{"X", {dup_outputs}}},
OpRegistry::CreateOp( {{"Out", {name}}}, {})});
"add", {{"X", {dup_outputs}}}, {{"Out", {name}}},
{{"input_format",
std::vector<int>{0, static_cast<int>(dup_outputs.size())}}})});
} }
// make sure the inserted `add` ops follow the BFS order.
insert_position.sort( insert_position.sort(
[](const Pos& l, const Pos& r) { return l.first > r.first; }); [](const Pos& l, const Pos& r) { return l.first > r.first; });
for (auto& pos : insert_position) { for (auto& pos : insert_position) {
net->InsertOp(pos.first + 1, pos.second); net->InsertOp(pos.first + 1, pos.second);
} }
} else { } else {
std::shared_ptr<OperatorBase> grad_op = OpRegistry::CreateGradOp(forwardOp); std::shared_ptr<OperatorBase> grad_op = OpRegistry::CreateGradOp(forwardOp);
...@@ -176,7 +186,7 @@ std::shared_ptr<OperatorBase> BackwardRecursive( ...@@ -176,7 +186,7 @@ std::shared_ptr<OperatorBase> BackwardRecursive(
net->type_ = "@GENERATED_BACKWARD@"; net->type_ = "@GENERATED_BACKWARD@";
net->CompleteAddOp(); net->CompleteAddOp();
return net; return net;
} } // namespace framework
// See header for comments // See header for comments
std::shared_ptr<OperatorBase> Backward( std::shared_ptr<OperatorBase> Backward(
......
...@@ -14,14 +14,21 @@ limitations under the License. */ ...@@ -14,14 +14,21 @@ limitations under the License. */
#pragma once #pragma once
#include <execinfo.h> #include <dlfcn.h> // for dladdr
#include <execinfo.h> // for backtrace
#include <iomanip> #include <iomanip>
#include <memory>
#include <sstream> #include <sstream>
#include <stdexcept> #include <stdexcept>
#include <string> #include <string>
#include "paddle/string/printf.h" #include "paddle/string/printf.h"
#include "paddle/string/to_string.h" #include "paddle/string/to_string.h"
#ifdef __GNUC__
#include <cxxabi.h> // for __cxa_demangle
#endif
#ifndef PADDLE_ONLY_CPU #ifndef PADDLE_ONLY_CPU
#include "paddle/platform/dynload/cublas.h" #include "paddle/platform/dynload/cublas.h"
...@@ -39,6 +46,19 @@ limitations under the License. */ ...@@ -39,6 +46,19 @@ limitations under the License. */
namespace paddle { namespace paddle {
namespace platform { namespace platform {
namespace {
#ifdef __GNUC__
inline std::string demangle(std::string name) {
int status = -4; // some arbitrary value to eliminate the compiler warning
std::unique_ptr<char, void (*)(void*)> res{
abi::__cxa_demangle(name.c_str(), NULL, NULL, &status), std::free};
return (status == 0) ? res.get() : name;
}
#else
inline std::string demangle(std::string name) { return name; }
#endif
}
struct EnforceNotMet : public std::exception { struct EnforceNotMet : public std::exception {
std::exception_ptr exp_; std::exception_ptr exp_;
std::string err_str_; std::string err_str_;
...@@ -48,15 +68,29 @@ struct EnforceNotMet : public std::exception { ...@@ -48,15 +68,29 @@ struct EnforceNotMet : public std::exception {
std::rethrow_exception(exp_); std::rethrow_exception(exp_);
} catch (const std::exception& exp) { } catch (const std::exception& exp) {
std::ostringstream sout; std::ostringstream sout;
sout << string::Sprintf("%s at [%s:%d]", exp.what(), f, l) << std::endl; sout << string::Sprintf("%s at [%s:%d]", exp.what(), f, l) << std::endl;
sout << "Call Stacks: " << std::endl; sout << "PaddlePaddle Call Stacks: " << std::endl;
void* call_stack[TRACE_STACK_LIMIT]; void* call_stack[TRACE_STACK_LIMIT];
int sz = backtrace(call_stack, TRACE_STACK_LIMIT); auto size = backtrace(call_stack, TRACE_STACK_LIMIT);
auto line = backtrace_symbols(call_stack, sz); auto symbols = backtrace_symbols(call_stack, size);
for (int i = 0; i < sz; ++i) {
sout << line[i] << std::endl; Dl_info info;
for (int i = 0; i < size; ++i) {
if (dladdr(call_stack[i], &info)) {
auto demangled = demangle(info.dli_sname);
auto addr_offset = static_cast<char*>(call_stack[i]) -
static_cast<char*>(info.dli_saddr);
sout << string::Sprintf("%-3d %*0p %s + %zd\n", i,
2 + sizeof(void*) * 2, call_stack[i],
demangled, addr_offset);
} else {
sout << string::Sprintf("%-3d %*0p %s\n", i, 2 + sizeof(void*) * 2,
call_stack[i]);
}
} }
free(line); free(symbols);
err_str_ = sout.str(); err_str_ = sout.str();
} }
} }
...@@ -170,7 +204,7 @@ inline void throw_on_error(T e) { ...@@ -170,7 +204,7 @@ inline void throw_on_error(T e) {
* PADDLE_ENFORCE_EQ(a, b); * PADDLE_ENFORCE_EQ(a, b);
* *
* will raise an expression described as follows: * will raise an expression described as follows:
* "enforce a == b failed, 1 != 2" with detailed stack infomation. * "enforce a == b failed, 1 != 2" with detailed stack information.
* *
* extra messages is also supported, for example: * extra messages is also supported, for example:
* PADDLE_ENFORCE(a, b, "some simple enforce failed between %d numbers", 2) * PADDLE_ENFORCE(a, b, "some simple enforce failed between %d numbers", 2)
......
...@@ -298,8 +298,8 @@ def pnpair_evaluator( ...@@ -298,8 +298,8 @@ def pnpair_evaluator(
input, input,
label, label,
info, info,
name=None, weight=None,
weight=None, ): name=None, ):
""" """
Positive-negative pair rate Evaluator which adapts to rank task like Positive-negative pair rate Evaluator which adapts to rank task like
learning to rank. This evaluator must contain at least three layers. learning to rank. This evaluator must contain at least three layers.
...@@ -308,27 +308,31 @@ def pnpair_evaluator( ...@@ -308,27 +308,31 @@ def pnpair_evaluator(
.. code-block:: python .. code-block:: python
eval = pnpair_evaluator(input, info, label) eval = pnpair_evaluator(input, label, info)
:param name: Evaluator name.
:type name: None|basestring
:param input: Input Layer name. The output prediction of network. :param input: Input Layer name. The output prediction of network.
:type input: LayerOutput :type input: LayerOutput
:param label: Label layer name. :param label: Label layer name.
:type label: LayerOutput :type label: LayerOutput
:param info: Label layer name. (TODO, explaination) :param info: Info layer name. (TODO, explaination)
:type info: LayerOutput :type info: LayerOutput
:param weight: Weight Layer name. It should be a matrix with size :param weight: Weight Layer name. It should be a matrix with size
[sample_num, 1]. (TODO, explaination) [sample_num, 1]. (TODO, explaination)
:type weight: LayerOutput :type weight: LayerOutput
:param name: Evaluator name.
:type name: None|basestring
""" """
if not isinstance(input, list):
input = [input]
if label:
input.append(label)
if info:
input.append(info)
evaluator_base( evaluator_base(
name=name,
type="pnpair",
input=input, input=input,
label=label, type="pnpair",
info=info, weight=weight,
weight=weight) name=name, )
@evaluator(EvaluatorAttribute.FOR_CLASSIFICATION) @evaluator(EvaluatorAttribute.FOR_CLASSIFICATION)
...@@ -429,12 +433,12 @@ def chunk_evaluator( ...@@ -429,12 +433,12 @@ def chunk_evaluator(
.. code-block:: text .. code-block:: text
Scheme Description Scheme Description
plain Use the same label for the whole chunk. plain Use the same label for the whole chunk.
IOB Two labels for chunk type X, B-X for chunk begining and I-X for chunk inside. IOB Two labels for chunk type X, B-X for chunk begining and I-X for chunk inside.
IOE Two labels for chunk type X, E-X for chunk ending and I-X for chunk inside. IOE Two labels for chunk type X, E-X for chunk ending and I-X for chunk inside.
IOBES Four labels for chunk type X, B-X for chunk begining, I-X for chunk inside, E-X for chunk end and S-X for single word chunk. IOBES Four labels for chunk type X, B-X for chunk begining, I-X for chunk inside, E-X for chunk end and S-X for single word chunk.
To make it clear, let's illustrate by an NER example. To make it clear, let's illustrate by an NER example.
Assuming that there are three named entity types including ORG, PER and LOC which are called 'chunk type' here, Assuming that there are three named entity types including ORG, PER and LOC which are called 'chunk type' here,
if 'IOB' scheme were used, the label set will be extended to a set including B-ORG, I-ORG, B-PER, I-PER, B-LOC, I-LOC and O, if 'IOB' scheme were used, the label set will be extended to a set including B-ORG, I-ORG, B-PER, I-PER, B-LOC, I-LOC and O,
...@@ -451,7 +455,7 @@ def chunk_evaluator( ...@@ -451,7 +455,7 @@ def chunk_evaluator(
tagType = label % numTagType tagType = label % numTagType
chunkType = label / numTagType chunkType = label / numTagType
otherChunkType = numChunkTypes otherChunkType = numChunkTypes
The following table shows the mapping rule between tagType and tag type in each scheme. The following table shows the mapping rule between tagType and tag type in each scheme.
.. code-block:: text .. code-block:: text
...@@ -475,7 +479,7 @@ def chunk_evaluator( ...@@ -475,7 +479,7 @@ def chunk_evaluator(
O 6 O 6
In this example, chunkType has three values: 0 for ORG, 1 for PER, 2 for LOC, because the scheme is In this example, chunkType has three values: 0 for ORG, 1 for PER, 2 for LOC, because the scheme is
"IOB" so tagType has two values: 0 for B and 1 for I. "IOB" so tagType has two values: 0 for B and 1 for I.
Here we will use I-LOC to explain the above mapping rules in detail. Here we will use I-LOC to explain the above mapping rules in detail.
For I-LOC, the label id is 5, so we can get tagType=1 and chunkType=2, which means I-LOC is a part of NER chunk LOC For I-LOC, the label id is 5, so we can get tagType=1 and chunkType=2, which means I-LOC is a part of NER chunk LOC
and the tag is I. and the tag is I.
...@@ -486,7 +490,7 @@ def chunk_evaluator( ...@@ -486,7 +490,7 @@ def chunk_evaluator(
eval = chunk_evaluator(input, label, chunk_scheme, num_chunk_types) eval = chunk_evaluator(input, label, chunk_scheme, num_chunk_types)
:param input: The input layers. :param input: The input layers.
:type input: LayerOutput :type input: LayerOutput
:param label: An input layer containing the ground truth label. :param label: An input layer containing the ground truth label.
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册