diff --git a/paddle/fluid/operators/conv_op.cc b/paddle/fluid/operators/conv_op.cc index 6b378ec1bcb75aa1c925f830ce1e6823051e07e7..2ecece707314f1e8b1b0bc9ad28f53ec5e1d405e 100644 --- a/paddle/fluid/operators/conv_op.cc +++ b/paddle/fluid/operators/conv_op.cc @@ -60,8 +60,9 @@ void ConvOp::InferShape(framework::InferShapeContext* ctx) const { "Due to the settings of paddings, filter_dims and " "dilations, the output size is less than 0, please check " "again."); - output_shape.push_back(OutputSize(in_dims[i + 2], filter_dims[i + 2], - dilations[i], paddings[i], strides[i])); + output_shape.push_back(ConvOutputSize(in_dims[i + 2], filter_dims[i + 2], + dilations[i], paddings[i], + strides[i])); } ctx->SetOutputDim("Output", framework::make_ddim(output_shape)); ctx->ShareLoD("Input", "Output"); diff --git a/paddle/fluid/operators/conv_op.h b/paddle/fluid/operators/conv_op.h index ecbe3d505ac0f50f3871cc29c0274cb681a0bdfd..c93c2e73f720ae025a4ad4f8146a7c6c3c382eea 100644 --- a/paddle/fluid/operators/conv_op.h +++ b/paddle/fluid/operators/conv_op.h @@ -28,8 +28,8 @@ using Tensor = framework::Tensor; // Base convolution operator definations for other conv // like operators to reuse the implementation. -inline int OutputSize(int input_size, int filter_size, int dilation, - int padding, int stride) { +inline int ConvOutputSize(int input_size, int filter_size, int dilation, + int padding, int stride) { const int dkernel = dilation * (filter_size - 1) + 1; const int output_size = (input_size + 2 * padding - dkernel) / stride + 1; return output_size; diff --git a/paddle/fluid/operators/parallel_do_op.cc b/paddle/fluid/operators/parallel_do_op.cc index d63962bb524256e05a1fceff16da03499ab8b5b9..f09a79ffc5262a0ab50cf3f4001dd6a2912fb970 100644 --- a/paddle/fluid/operators/parallel_do_op.cc +++ b/paddle/fluid/operators/parallel_do_op.cc @@ -256,6 +256,10 @@ class ParallelDoGradOp : public framework::OperatorBase { } } for (auto &s : Outputs(framework::GradVarName(kParameters))) { + if (s == "@EMPTY@") { + continue; + } + VLOG(3) << "Moving " << s; CopyOrShare(*sub_scopes[0]->FindVar(s), place, scope.FindVar(s)); } WaitOnPlaces(places); @@ -266,6 +270,9 @@ class ParallelDoGradOp : public framework::OperatorBase { const std::vector &sub_scopes, const platform::PlaceList &places) const { for (auto &s : Outputs(framework::GradVarName(kParameters))) { + if (s == "@EMPTY@") { + continue; + } VLOG(3) << "Accumulating " << s; if (s == framework::kEmptyVarName) continue; std::string tmp_name;