diff --git a/paddle/framework/backward.cc b/paddle/framework/backward.cc index a82dc4ef4b99b8d18aa24ba2259ca2a89440bacf..1fddad8d16f9db09eaaba40229847e74ea028a8d 100644 --- a/paddle/framework/backward.cc +++ b/paddle/framework/backward.cc @@ -30,6 +30,7 @@ static void ForEachVarName(const Map& names, T callback) { } } +// return whether all the names + suffixes in the set static bool AllInSet( const std::map>& names, const std::string& suffix, const std::unordered_set& set) { @@ -48,7 +49,7 @@ static std::shared_ptr NOP() { return net_op; } -// Get backward operator from a forward operator, recursively implementation. +// Get backward operator from a forward operator, a recursive implementation. // // no_grad_names the gradient variable names without gradient calculating. // @@ -56,28 +57,31 @@ static std::shared_ptr NOP() { // BackwardRecursive. use `uid = uniq_id++;` to get the unique index, and // pass `uniq_id` through recursive calling. // -// returns The backward operator. For simple situation, it is a simple -// operator. For complex situation, it is a NetOp. +// returns The backward operator. In a simple situation, it may be a simple +// operator, in a complex situation, it maybe a NetOp. // // See Backward.h for details static std::shared_ptr BackwardRecursive( const OperatorBase& forwardOp, std::unordered_set& no_grad_names, size_t& uniq_id); + std::shared_ptr BackwardRecursive( const OperatorBase& forwardOp, std::unordered_set& no_grad_names, size_t& uniq_id) { // If all input gradients of forwarding operator do not need to calculate, // just return an NOP. Not return null ptr because NOP does not take - // too much time for calculation, but it is useful for simplifying logic. - if (AllInSet(forwardOp.Inputs(), kGradVarSuffix, no_grad_names)) { + // too much time for calculation, but it is useful for simplifying logic. + if (AllInSet(forwardOp.Inputs() /*names*/, kGradVarSuffix /*suffix*/, + no_grad_names /*set*/)) { return NOP(); } // All output gradients of forwarding operator do not need to calculate. // Then all input gradients cannot be computed at all, and we put them into // `no_grad_names` set. Return an NOP. - if (AllInSet(forwardOp.Outputs(), kGradVarSuffix, no_grad_names)) { - ForEachVarName(forwardOp.Inputs(), + if (AllInSet(forwardOp.Output() /*names*/, kGradVarSuffix /*suffix*/, + no_grad_names /*set*/)) { + ForEachVarName(forwardOp.inputs_, [&no_grad_names](const std::string& name) -> bool { no_grad_names.insert(GradVarName(name)); return false; @@ -93,11 +97,11 @@ std::shared_ptr BackwardRecursive( auto& forwardNet = static_cast(forwardOp); // Map from output gradient variable name to operator's indices in - // backward net. That operator generates that variable. + // backward net's ops_. That operator generates that variable. std::unordered_map> dup_output_ops; size_t local_op_id = 0; - // reversely travel forwardNet + // reversely travel forwardNet and collect all duplicate outputs. for (auto it = forwardNet.ops_.rbegin(); it != forwardNet.ops_.rend(); ++it, ++local_op_id) { auto fwd = *it; @@ -112,35 +116,41 @@ std::shared_ptr BackwardRecursive( // Get unique ID for this method. auto uid = uniq_id++; // TODO(dzh): more comment + // multiple operators which have the same output (y for example) may + // overwrite the same y variable when backward, special operations are token + // to handle this case. For each duplicate output, rename it to an alias + // (original name with a offset), append an `add` op for its operator, + // and finally sum all the alias variable to the final output variable y. using Pos = std::pair>; std::list insert_position; for (auto& dup_output_op : dup_output_ops) { const std::string& name = dup_output_op.first; auto& dup_op = dup_output_op.second; + // no duplicate output if (dup_op.size() == 1) continue; - std::vector dup_outputs; + // process the duplicate outputs + std::vector dup_outputs; for (size_t i = 0; i < dup_op.size(); ++i) { + // rename each duplicate output to an alias auto op_offset = dup_op[i]; dup_outputs.push_back(name + "@RENAME@" + std::to_string(uid) + "@" + std::to_string(i)); net->ops_[op_offset]->Rename(name, dup_outputs.back()); } + // collect all the offset to append `add` op for each alias insert_position.push_back( - {dup_op.back(), - OpRegistry::CreateOp( - "add", {{"X", {dup_outputs}}}, {{"Out", {name}}}, - {{"input_format", - std::vector{0, static_cast(dup_outputs.size())}}})}); + {dup_op.back(), OpRegistry::CreateOp("add", {{"X", {dup_outputs}}}, + {{"Out", {name}}}, {})}); } + // make sure the inserted `add` ops follow the BFS order. insert_position.sort( [](const Pos& l, const Pos& r) { return l.first > r.first; }); for (auto& pos : insert_position) { net->InsertOp(pos.first + 1, pos.second); } - } else { std::shared_ptr grad_op = OpRegistry::CreateGradOp(forwardOp); @@ -176,7 +186,7 @@ std::shared_ptr BackwardRecursive( net->SetType("@GENERATED_BACKWARD@"); net->CompleteAddOp(); return net; -} +} // namespace framework // See header for comments std::shared_ptr Backward( diff --git a/python/paddle/trainer_config_helpers/evaluators.py b/python/paddle/trainer_config_helpers/evaluators.py index 44d52edfa7bae49bea196eba9387391b171840d8..57979db4de08989ab583b0ab41589c09789a0921 100644 --- a/python/paddle/trainer_config_helpers/evaluators.py +++ b/python/paddle/trainer_config_helpers/evaluators.py @@ -298,8 +298,8 @@ def pnpair_evaluator( input, label, info, - name=None, - weight=None, ): + weight=None, + name=None, ): """ Positive-negative pair rate Evaluator which adapts to rank task like learning to rank. This evaluator must contain at least three layers. @@ -308,27 +308,31 @@ def pnpair_evaluator( .. code-block:: python - eval = pnpair_evaluator(input, info, label) + eval = pnpair_evaluator(input, label, info) - :param name: Evaluator name. - :type name: None|basestring :param input: Input Layer name. The output prediction of network. :type input: LayerOutput :param label: Label layer name. :type label: LayerOutput - :param info: Label layer name. (TODO, explaination) + :param info: Info layer name. (TODO, explaination) :type info: LayerOutput :param weight: Weight Layer name. It should be a matrix with size [sample_num, 1]. (TODO, explaination) :type weight: LayerOutput + :param name: Evaluator name. + :type name: None|basestring """ + if not isinstance(input, list): + input = [input] + if label: + input.append(label) + if info: + input.append(info) evaluator_base( - name=name, - type="pnpair", input=input, - label=label, - info=info, - weight=weight) + type="pnpair", + weight=weight, + name=name, ) @evaluator(EvaluatorAttribute.FOR_CLASSIFICATION) @@ -429,12 +433,12 @@ def chunk_evaluator( .. code-block:: text - Scheme Description + Scheme Description plain Use the same label for the whole chunk. - IOB Two labels for chunk type X, B-X for chunk begining and I-X for chunk inside. + IOB Two labels for chunk type X, B-X for chunk begining and I-X for chunk inside. IOE Two labels for chunk type X, E-X for chunk ending and I-X for chunk inside. - IOBES Four labels for chunk type X, B-X for chunk begining, I-X for chunk inside, E-X for chunk end and S-X for single word chunk. - + IOBES Four labels for chunk type X, B-X for chunk begining, I-X for chunk inside, E-X for chunk end and S-X for single word chunk. + To make it clear, let's illustrate by an NER example. Assuming that there are three named entity types including ORG, PER and LOC which are called 'chunk type' here, if 'IOB' scheme were used, the label set will be extended to a set including B-ORG, I-ORG, B-PER, I-PER, B-LOC, I-LOC and O, @@ -451,7 +455,7 @@ def chunk_evaluator( tagType = label % numTagType chunkType = label / numTagType otherChunkType = numChunkTypes - + The following table shows the mapping rule between tagType and tag type in each scheme. .. code-block:: text @@ -475,7 +479,7 @@ def chunk_evaluator( O 6 In this example, chunkType has three values: 0 for ORG, 1 for PER, 2 for LOC, because the scheme is - "IOB" so tagType has two values: 0 for B and 1 for I. + "IOB" so tagType has two values: 0 for B and 1 for I. Here we will use I-LOC to explain the above mapping rules in detail. For I-LOC, the label id is 5, so we can get tagType=1 and chunkType=2, which means I-LOC is a part of NER chunk LOC and the tag is I. @@ -486,7 +490,7 @@ def chunk_evaluator( eval = chunk_evaluator(input, label, chunk_scheme, num_chunk_types) - + :param input: The input layers. :type input: LayerOutput :param label: An input layer containing the ground truth label.