未验证 提交 8f872863 编写于 作者: Q qingqing01 提交者: GitHub

Fix bug for backward tanspiler when using parallel_do operator. (#9282)

* Temporarily fix bug for backward tanspiler when using parallel_do operator.

* Fix bug for backward tanspiler when using parallel_do operator
上级 466f28a6
...@@ -126,6 +126,7 @@ width and height. ...@@ -126,6 +126,7 @@ width and height.
} // namespace paddle } // namespace paddle
namespace ops = paddle::operators; namespace ops = paddle::operators;
REGISTER_OP_WITHOUT_GRADIENT(box_coder, ops::BoxCoderOp, ops::BoxCoderOpMaker); REGISTER_OPERATOR(box_coder, ops::BoxCoderOp, ops::BoxCoderOpMaker,
paddle::framework::EmptyGradOpMaker);
REGISTER_OP_CPU_KERNEL(box_coder, ops::BoxCoderKernel<float>, REGISTER_OP_CPU_KERNEL(box_coder, ops::BoxCoderKernel<float>,
ops::BoxCoderKernel<double>); ops::BoxCoderKernel<double>);
...@@ -188,8 +188,8 @@ The general steps are as follows. First, calculate the true positive and ...@@ -188,8 +188,8 @@ The general steps are as follows. First, calculate the true positive and
} // namespace paddle } // namespace paddle
namespace ops = paddle::operators; namespace ops = paddle::operators;
REGISTER_OP_WITHOUT_GRADIENT(detection_map, ops::DetectionMAPOp, REGISTER_OPERATOR(detection_map, ops::DetectionMAPOp, ops::DetectionMAPOpMaker,
ops::DetectionMAPOpMaker); paddle::framework::EmptyGradOpMaker);
REGISTER_OP_CPU_KERNEL( REGISTER_OP_CPU_KERNEL(
detection_map, ops::DetectionMAPOpKernel<paddle::platform::CPUPlace, float>, detection_map, ops::DetectionMAPOpKernel<paddle::platform::CPUPlace, float>,
ops::DetectionMAPOpKernel<paddle::platform::CPUPlace, double>); ops::DetectionMAPOpKernel<paddle::platform::CPUPlace, double>);
...@@ -87,8 +87,9 @@ $$ ...@@ -87,8 +87,9 @@ $$
} // namespace paddle } // namespace paddle
namespace ops = paddle::operators; namespace ops = paddle::operators;
REGISTER_OP_WITHOUT_GRADIENT(iou_similarity, ops::IOUSimilarityOp, REGISTER_OPERATOR(iou_similarity, ops::IOUSimilarityOp,
ops::IOUSimilarityOpMaker); ops::IOUSimilarityOpMaker,
paddle::framework::EmptyGradOpMaker);
REGISTER_OP_CPU_KERNEL( REGISTER_OP_CPU_KERNEL(
iou_similarity, iou_similarity,
......
...@@ -324,8 +324,9 @@ MatchIndices elements with value -1. ...@@ -324,8 +324,9 @@ MatchIndices elements with value -1.
} // namespace paddle } // namespace paddle
namespace ops = paddle::operators; namespace ops = paddle::operators;
REGISTER_OP_WITHOUT_GRADIENT(mine_hard_examples, ops::MineHardExamplesOp, REGISTER_OPERATOR(mine_hard_examples, ops::MineHardExamplesOp,
ops::MineHardExamplesOpMaker); ops::MineHardExamplesOpMaker,
paddle::framework::EmptyGradOpMaker);
REGISTER_OP_CPU_KERNEL( REGISTER_OP_CPU_KERNEL(
mine_hard_examples, mine_hard_examples,
......
...@@ -168,7 +168,9 @@ https://arxiv.org/abs/1512.02325. ...@@ -168,7 +168,9 @@ https://arxiv.org/abs/1512.02325.
} // namespace paddle } // namespace paddle
namespace ops = paddle::operators; namespace ops = paddle::operators;
REGISTER_OP_WITHOUT_GRADIENT(prior_box, ops::PriorBoxOp, ops::PriorBoxOpMaker); REGISTER_OPERATOR(prior_box, ops::PriorBoxOp, ops::PriorBoxOpMaker,
paddle::framework::EmptyGradOpMaker);
REGISTER_OP_CPU_KERNEL( REGISTER_OP_CPU_KERNEL(
prior_box, ops::PriorBoxOpKernel<paddle::platform::CPUPlace, float>, prior_box, ops::PriorBoxOpKernel<paddle::platform::CPUPlace, float>,
ops::PriorBoxOpKernel<paddle::platform::CPUPlace, double>); ops::PriorBoxOpKernel<paddle::platform::CPUPlace, double>);
...@@ -153,8 +153,8 @@ template struct NegTargetAssignFunctor<platform::CPUDeviceContext, float, ...@@ -153,8 +153,8 @@ template struct NegTargetAssignFunctor<platform::CPUDeviceContext, float,
} // namespace paddle } // namespace paddle
namespace ops = paddle::operators; namespace ops = paddle::operators;
REGISTER_OP_WITHOUT_GRADIENT(target_assign, ops::TargetAssignOp, REGISTER_OPERATOR(target_assign, ops::TargetAssignOp, ops::TargetAssignOpMaker,
ops::TargetAssignOpMaker); paddle::framework::EmptyGradOpMaker);
REGISTER_OP_CPU_KERNEL( REGISTER_OP_CPU_KERNEL(
target_assign, target_assign,
ops::TargetAssignKernel<paddle::platform::CPUDeviceContext, int, float>, ops::TargetAssignKernel<paddle::platform::CPUDeviceContext, int, float>,
......
...@@ -129,13 +129,11 @@ def detection_output(loc, ...@@ -129,13 +129,11 @@ def detection_output(loc,
prior_box_var=prior_box_var, prior_box_var=prior_box_var,
target_box=loc, target_box=loc,
code_type='decode_center_size') code_type='decode_center_size')
old_shape = scores.shape old_shape = scores.shape
scores = ops.reshape(x=scores, shape=(-1, old_shape[-1])) scores = ops.reshape(x=scores, shape=(-1, old_shape[-1]))
scores = nn.softmax(input=scores) scores = nn.softmax(input=scores)
scores = ops.reshape(x=scores, shape=old_shape) scores = ops.reshape(x=scores, shape=old_shape)
scores = nn.transpose(scores, perm=[0, 2, 1]) scores = nn.transpose(scores, perm=[0, 2, 1])
nmsed_outs = helper.create_tmp_variable(dtype=decoded_box.dtype) nmsed_outs = helper.create_tmp_variable(dtype=decoded_box.dtype)
helper.append_op( helper.append_op(
type="multiclass_nms", type="multiclass_nms",
...@@ -475,6 +473,7 @@ def ssd_loss(location, ...@@ -475,6 +473,7 @@ def ssd_loss(location,
# 2. Compute confidence for mining hard examples # 2. Compute confidence for mining hard examples
# 2.1. Get the target label based on matched indices # 2.1. Get the target label based on matched indices
gt_label = ops.reshape(x=gt_label, shape=gt_label.shape + (1, )) gt_label = ops.reshape(x=gt_label, shape=gt_label.shape + (1, ))
gt_label.stop_gradient = True
target_label, _ = target_assign( target_label, _ = target_assign(
gt_label, matched_indices, mismatch_value=background_label) gt_label, matched_indices, mismatch_value=background_label)
# 2.2. Compute confidence loss. # 2.2. Compute confidence loss.
...@@ -482,10 +481,12 @@ def ssd_loss(location, ...@@ -482,10 +481,12 @@ def ssd_loss(location,
confidence = __reshape_to_2d(confidence) confidence = __reshape_to_2d(confidence)
target_label = tensor.cast(x=target_label, dtype='int64') target_label = tensor.cast(x=target_label, dtype='int64')
target_label = __reshape_to_2d(target_label) target_label = __reshape_to_2d(target_label)
target_label.stop_gradient = True
conf_loss = nn.softmax_with_cross_entropy(confidence, target_label) conf_loss = nn.softmax_with_cross_entropy(confidence, target_label)
# 3. Mining hard examples # 3. Mining hard examples
conf_loss = ops.reshape(x=conf_loss, shape=(num, num_prior)) conf_loss = ops.reshape(x=conf_loss, shape=(num, num_prior))
conf_loss.stop_gradient = True
neg_indices = helper.create_tmp_variable(dtype='int32') neg_indices = helper.create_tmp_variable(dtype='int32')
dtype = matched_indices.dtype dtype = matched_indices.dtype
updated_matched_indices = helper.create_tmp_variable(dtype=dtype) updated_matched_indices = helper.create_tmp_variable(dtype=dtype)
...@@ -695,6 +696,8 @@ def multi_box_head(inputs, ...@@ -695,6 +696,8 @@ def multi_box_head(inputs,
outputs={"Boxes": box, outputs={"Boxes": box,
"Variances": var}, "Variances": var},
attrs=attrs, ) attrs=attrs, )
box.stop_gradient = True
var.stop_gradient = True
return box, var return box, var
def _reshape_with_axis_(input, axis=1): def _reshape_with_axis_(input, axis=1):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册