From 36aeb30d12b2cdb7a763b59c70bc427eec7a49e3 Mon Sep 17 00:00:00 2001 From: dangqingqing Date: Thu, 21 Sep 2017 11:07:56 +0800 Subject: [PATCH] Remove LoDTensor in some operators' InferShape and refine ShareLoD function. --- paddle/framework/operator.cc | 15 +++---- paddle/framework/operator.h | 51 +++++++--------------- paddle/operators/accuracy_op.cc | 4 +- paddle/operators/add_op.cc | 2 +- paddle/operators/concat_op.cc | 2 +- paddle/operators/cos_sim_op.cc | 14 +++--- paddle/operators/cross_entropy_op.cc | 2 +- paddle/operators/dropout_op.cc | 2 +- paddle/operators/elementwise_mul_op.cc | 10 ++--- paddle/operators/fill_zeros_like_op.cc | 4 +- paddle/operators/gather_op.cc | 4 +- paddle/operators/gaussian_random_op.cc | 2 +- paddle/operators/lookup_table_op.cc | 6 +-- paddle/operators/mean_op.cc | 4 +- paddle/operators/minus_op.cc | 4 +- paddle/operators/mul_op.cc | 10 ++--- paddle/operators/pad_op.cc | 6 +-- paddle/operators/prelu_op.cc | 8 ++-- paddle/operators/reshape_op.cc | 6 +-- paddle/operators/rowwise_add_op.cc | 8 ++-- paddle/operators/scale_op.cc | 4 +- paddle/operators/scatter_op.cc | 7 ++- paddle/operators/sequence_avg_pool_op.cc | 5 +-- paddle/operators/sgd_op.cc | 2 +- paddle/operators/sigmoid_op.cc | 7 ++- paddle/operators/softmax_op.cc | 5 +-- paddle/operators/split_op.cc | 2 +- paddle/operators/squared_l2_distance_op.cc | 12 +++-- paddle/operators/sum_op.cc | 6 +-- paddle/operators/top_k_op.cc | 4 +- paddle/operators/uniform_random_op.cc | 2 +- 31 files changed, 93 insertions(+), 127 deletions(-) diff --git a/paddle/framework/operator.cc b/paddle/framework/operator.cc index f8a64a7866..fdc0660837 100644 --- a/paddle/framework/operator.cc +++ b/paddle/framework/operator.cc @@ -207,23 +207,22 @@ const std::vector InferShapeContext::MultiInput( } template <> -Tensor* ExecutionContext::Output(const std::string& name) const { - auto* var = OutputVar(name); - return var == nullptr ? nullptr : const_cast(GetTensorFromVar(var)); +Tensor* InferShapeContext::Output(const std::string& name) const { + auto var = OutputVar(name); + return var == nullptr ? nullptr : var->GetMutable(); } template <> -std::vector ExecutionContext::MultiOutput( +std::vector InferShapeContext::MultiOutput( const std::string& name) const { auto names = op().Outputs(name); std::vector res; res.reserve(names.size()); std::transform(names.begin(), names.end(), std::back_inserter(res), [&](const std::string& sub_name) { - auto var = scope().FindVar(sub_name); - return var == nullptr - ? nullptr - : const_cast(GetTensorFromVar(var)); + auto var = scope_.FindVar(sub_name); + return var == nullptr ? nullptr + : var->GetMutable(); }); return res; } diff --git a/paddle/framework/operator.h b/paddle/framework/operator.h index 28a253ec0b..4a078258d2 100644 --- a/paddle/framework/operator.h +++ b/paddle/framework/operator.h @@ -277,9 +277,9 @@ class InferShapeContext { return res; } - std::vector MultiOutputVar(const std::string& name) const { + std::vector MultiOutputVar(const std::string& name) const { auto names = op_.Outputs(name); - std::vector res; + std::vector res; res.reserve(names.size()); std::transform(names.begin(), names.end(), std::back_inserter(res), [this](const std::string& name) { @@ -336,12 +336,19 @@ class InferShapeContext { return &var->Get(); } - void ShareLoD(const std::string& in, const std::string& out) const { - PADDLE_ENFORCE(InputVar(in)->IsType(), - "The Input(%s) must be LoDTensor.", in); - PADDLE_ENFORCE(OutputVar(out)->IsType(), - "The Output(%s) must be LoDTensor.", out); - Output(out)->set_lod(Input(in)->lod()); + void ShareLoD(const std::string& in, const std::string& out, size_t i = 0, + size_t j = 0) const { + PADDLE_ENFORCE_LT(i, InputSize(in)); + PADDLE_ENFORCE_LT(j, OutputSize(out)); + auto* in_var = MultiInputVar(in)[i]; + auto* out_var = MultiOutputVar(out)[j]; + PADDLE_ENFORCE(in_var->IsType(), + "The %d-th input of Input(%s) must be LoDTensor.", in); + PADDLE_ENFORCE(out_var->IsType(), + "The %d-th output of Output(%s) must be LoDTensor.", out); + auto in_tensor = in_var->Get(); + auto* out_tensor = out_var->GetMutable(); + out_tensor->set_lod(in_tensor.lod()); } private: @@ -388,38 +395,10 @@ class ExecutionContext : public InferShapeContext { return device_context_; } - // redefine Output function, - // use Variable::Get instead of Variable::GetMutable - template - T* Output(const std::string& name) const { - auto var = OutputVar(name); - return var == nullptr ? nullptr : const_cast(&var->Get()); - } - - // redefine MultiOutput function. - // use Variable::Get instead of Variable::GetMutable - template - std::vector MultiOutput(const std::string& name) const { - auto names = op().Outputs(name); - std::vector res; - res.reserve(names.size()); - std::transform( - names.begin(), names.end(), std::back_inserter(res), - [&](const std::string& sub_name) { return Output(sub_name); }); - return res; - } - private: const platform::DeviceContext& device_context_; }; -template <> -Tensor* ExecutionContext::Output(const std::string& name) const; - -template <> -std::vector ExecutionContext::MultiOutput( - const std::string& name) const; - class OpKernel { public: /** diff --git a/paddle/operators/accuracy_op.cc b/paddle/operators/accuracy_op.cc index 391258b40b..70e4f9da12 100644 --- a/paddle/operators/accuracy_op.cc +++ b/paddle/operators/accuracy_op.cc @@ -39,8 +39,8 @@ class AccuracyOp : public framework::OperatorWithKernel { PADDLE_ENFORCE_EQ(inference->dims()[0], label->dims()[0], "inference size must be the same as label size"); - ctx.Output("Accuracy")->Resize({1}); - ctx.ShareLoD("Inference", "Accuracy"); + ctx.Output("Accuracy")->Resize({1}); + ctx.ShareLoD("Inference", /*->*/ "Accuracy"); } }; diff --git a/paddle/operators/add_op.cc b/paddle/operators/add_op.cc index e83c1efeaf..ed11d09697 100644 --- a/paddle/operators/add_op.cc +++ b/paddle/operators/add_op.cc @@ -33,7 +33,7 @@ class AddOp : public framework::OperatorWithKernel { PADDLE_ENFORCE_EQ(ctx.Input("X")->dims(), ctx.Input("Y")->dims(), "Two input of Add Op's dimension must be same."); - ctx.Output("Out")->Resize( + ctx.Output("Out")->Resize( ctx.Input("X")->dims()); } }; diff --git a/paddle/operators/concat_op.cc b/paddle/operators/concat_op.cc index 223bb0ffe6..07f847079e 100644 --- a/paddle/operators/concat_op.cc +++ b/paddle/operators/concat_op.cc @@ -29,7 +29,7 @@ class ConcatOp : public framework::OperatorWithKernel { "Output(Out) of ConcatOp should not be null."); auto ins = ctx.MultiInput("X"); - auto *out = ctx.Output("Out"); + auto *out = ctx.Output("Out"); size_t axis = static_cast(ctx.Attr("axis")); size_t n = ins.size(); diff --git a/paddle/operators/cos_sim_op.cc b/paddle/operators/cos_sim_op.cc index 840848fa08..b56ee2047b 100644 --- a/paddle/operators/cos_sim_op.cc +++ b/paddle/operators/cos_sim_op.cc @@ -54,10 +54,10 @@ class CosSimOp : public framework::OperatorWithKernel { " just 1 (which will be broadcasted to match Input(X))."); // resize tensor - ctx.Output("Out")->Resize({x_dims[0], 1}); - ctx.Output("XNorm")->Resize({x_dims[0], 1}); - ctx.Output("YNorm")->Resize({y_dims[0], 1}); - ctx.ShareLoD("X", "Out"); + ctx.Output("Out")->Resize({x_dims[0], 1}); + ctx.Output("XNorm")->Resize({x_dims[0], 1}); + ctx.Output("YNorm")->Resize({y_dims[0], 1}); + ctx.ShareLoD("X", /*->*/ "Out"); } }; @@ -143,10 +143,8 @@ class CosSimOpGrad : public framework::OperatorWithKernel { "Shape of Input(Out@Grad) must be [X.Dim(0), 1]."); // resize tensor - auto *x_grad = - ctx.Output(framework::GradVarName("X")); - auto *y_grad = - ctx.Output(framework::GradVarName("Y")); + auto *x_grad = ctx.Output(framework::GradVarName("X")); + auto *y_grad = ctx.Output(framework::GradVarName("Y")); if (x_grad) x_grad->Resize(x_dims); if (y_grad) y_grad->Resize(y_dims); } diff --git a/paddle/operators/cross_entropy_op.cc b/paddle/operators/cross_entropy_op.cc index 5de8f1489d..fd91d39d5f 100644 --- a/paddle/operators/cross_entropy_op.cc +++ b/paddle/operators/cross_entropy_op.cc @@ -52,7 +52,7 @@ class CrossEntropyOp : public framework::OperatorWithKernel { } ctx.Output("Y")->Resize({x->dims()[0], 1}); - ctx.ShareLoD("X", "Y"); + ctx.ShareLoD("X", /*->*/ "Y"); } }; diff --git a/paddle/operators/dropout_op.cc b/paddle/operators/dropout_op.cc index bfa1992d79..dc773e510e 100644 --- a/paddle/operators/dropout_op.cc +++ b/paddle/operators/dropout_op.cc @@ -38,7 +38,7 @@ class DropoutOp : public framework::OperatorWithKernel { if (ctx.Attr("is_training") == 1) { ctx.Output("Mask")->Resize(dims); } - ctx.ShareLoD("X", "Out"); + ctx.ShareLoD("X", /*->*/ "Out"); } }; diff --git a/paddle/operators/elementwise_mul_op.cc b/paddle/operators/elementwise_mul_op.cc index 304e45fa5b..02bd4c7b85 100644 --- a/paddle/operators/elementwise_mul_op.cc +++ b/paddle/operators/elementwise_mul_op.cc @@ -37,8 +37,8 @@ class ElementWiseMulOp : public framework::OperatorWithKernel { auto y_dim = ctx.Input("Y")->dims(); PADDLE_ENFORCE_GE(x_dim.size(), y_dim.size(), "Rank of first input must >= rank of second input.") - ctx.Output("Out")->Resize(x_dim); - ctx.ShareLoD("X", "Out"); + ctx.Output("Out")->Resize(x_dim); + ctx.ShareLoD("X", /*->*/ "Out"); } }; @@ -91,10 +91,8 @@ class ElementWiseMulOpGrad : public framework::OperatorWithKernel { auto x_dims = ctx.Input("X")->dims(); auto y_dims = ctx.Input("Y")->dims(); auto out_dims = ctx.Input(framework::GradVarName("Out"))->dims(); - auto *x_grad = - ctx.Output(framework::GradVarName("X")); - auto *y_grad = - ctx.Output(framework::GradVarName("Y")); + auto *x_grad = ctx.Output(framework::GradVarName("X")); + auto *y_grad = ctx.Output(framework::GradVarName("Y")); PADDLE_ENFORCE_GE(x_dims.size(), y_dims.size(), "Rank of first input must >= rank of second input.") diff --git a/paddle/operators/fill_zeros_like_op.cc b/paddle/operators/fill_zeros_like_op.cc index a238b59b78..761a527a55 100644 --- a/paddle/operators/fill_zeros_like_op.cc +++ b/paddle/operators/fill_zeros_like_op.cc @@ -28,9 +28,9 @@ class FillZerosLikeOp : public framework::OperatorWithKernel { PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar("Y"), "Output(Y) of FillZerosLikeOp should not be null."); - ctx.Output("Y")->Resize( + ctx.Output("Y")->Resize( ctx.Input("X")->dims()); - ctx.ShareLoD("X", "Y"); + ctx.ShareLoD("X", /*->*/ "Y"); } }; diff --git a/paddle/operators/gather_op.cc b/paddle/operators/gather_op.cc index d445b61c16..fecd1ce214 100644 --- a/paddle/operators/gather_op.cc +++ b/paddle/operators/gather_op.cc @@ -35,7 +35,7 @@ class GatherOp : public framework::OperatorWithKernel { PADDLE_ENFORCE_GE(batch_size, 0, "Batch size must be >0"); framework::DDim output_dims(ctx.Input("X")->dims()); output_dims[0] = batch_size; - ctx.Output("Out")->Resize(output_dims); + ctx.Output("Out")->Resize(output_dims); } }; @@ -45,7 +45,7 @@ class GatherGradOp : public framework::OperatorWithKernel { protected: void InferShape(const framework::InferShapeContext &ctx) const override { - auto X_grad = ctx.Output(framework::GradVarName("X")); + auto X_grad = ctx.Output(framework::GradVarName("X")); auto X = ctx.Input("X"); X_grad->Resize(X->dims()); diff --git a/paddle/operators/gaussian_random_op.cc b/paddle/operators/gaussian_random_op.cc index c0e161bbc0..5b7cbb5cc7 100644 --- a/paddle/operators/gaussian_random_op.cc +++ b/paddle/operators/gaussian_random_op.cc @@ -48,7 +48,7 @@ class GaussianRandomOp : public framework::OperatorWithKernel { ctx.OutputVar("Out"), "Output(Out) of GaussianRandomOp should not be null."); - auto* tensor = ctx.Output("Out"); + auto* tensor = ctx.Output("Out"); auto dims = Attr>("dims"); std::vector temp; temp.reserve(dims.size()); diff --git a/paddle/operators/lookup_table_op.cc b/paddle/operators/lookup_table_op.cc index 8f533f1cc3..04ac24662e 100644 --- a/paddle/operators/lookup_table_op.cc +++ b/paddle/operators/lookup_table_op.cc @@ -32,10 +32,10 @@ class LookupTableOp : public framework::OperatorWithKernel { auto table_t = ctx.Input("W"); auto ids_t = ctx.Input("Ids"); - auto output_t = ctx.Output("Out"); + auto output_t = ctx.Output("Out"); output_t->Resize({ids_t->dims()[0], table_t->dims()[1]}); - ctx.ShareLoD("Ids", "Out"); + ctx.ShareLoD("Ids", /*->*/ "Out"); } }; @@ -69,7 +69,7 @@ class LookupTableOpGrad : public framework::OperatorWithKernel { void InferShape(const framework::InferShapeContext &context) const override { auto table = context.Input("W"); auto d_table = - context.Output(framework::GradVarName("W")); + context.Output(framework::GradVarName("W")); d_table->Resize(table->dims()); } }; diff --git a/paddle/operators/mean_op.cc b/paddle/operators/mean_op.cc index 96540ff454..b04384bda8 100644 --- a/paddle/operators/mean_op.cc +++ b/paddle/operators/mean_op.cc @@ -27,7 +27,7 @@ class MeanOp : public framework::OperatorWithKernel { "Input(X) of MeanOp should not be null."); PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar("Out"), "Output(Out) of MeanOp should not be null."); - ctx.Output("Out")->Resize({1}); + ctx.Output("Out")->Resize({1}); } }; @@ -48,7 +48,7 @@ class MeanGradOp : public framework::OperatorWithKernel { protected: void InferShape(const framework::InferShapeContext &ctx) const override { - ctx.Output(framework::GradVarName("X")) + ctx.Output(framework::GradVarName("X")) ->Resize(ctx.Input("X")->dims()); } }; diff --git a/paddle/operators/minus_op.cc b/paddle/operators/minus_op.cc index 5036f9f98a..29cb85489b 100644 --- a/paddle/operators/minus_op.cc +++ b/paddle/operators/minus_op.cc @@ -40,8 +40,8 @@ class MinusOp : public framework::OperatorWithKernel { PADDLE_ENFORCE_EQ( left_tensor->numel(), right_tensor->numel(), "Minus operator must take two tensor with same num of elements"); - ctx.Output("Out")->Resize(left_tensor->dims()); - ctx.ShareLoD("X", "Out"); + ctx.Output("Out")->Resize(left_tensor->dims()); + ctx.ShareLoD("X", /*->*/ "Out"); } }; diff --git a/paddle/operators/mul_op.cc b/paddle/operators/mul_op.cc index b2409a1870..5303a31501 100644 --- a/paddle/operators/mul_op.cc +++ b/paddle/operators/mul_op.cc @@ -53,9 +53,9 @@ class MulOp : public framework::OperatorWithKernel { PADDLE_ENFORCE_EQ( x_mat_dims[1], y_mat_dims[0], "First matrix's width must be equal with second matrix's height."); - ctx.Output("Out")->Resize( + ctx.Output("Out")->Resize( {x_mat_dims[0], y_mat_dims[1]}); - ctx.ShareLoD("X", "Out"); + ctx.ShareLoD("X", /*->*/ "Out"); } }; @@ -109,10 +109,8 @@ class MulOpGrad : public framework::OperatorWithKernel { auto x_dims = ctx.Input("X")->dims(); auto y_dims = ctx.Input("Y")->dims(); auto out_dims = ctx.Input(framework::GradVarName("Out"))->dims(); - auto *x_grad = - ctx.Output(framework::GradVarName("X")); - auto *y_grad = - ctx.Output(framework::GradVarName("Y")); + auto *x_grad = ctx.Output(framework::GradVarName("X")); + auto *y_grad = ctx.Output(framework::GradVarName("Y")); auto x_mat_dims = framework::flatten_to_2d(x_dims, Attr("x_num_col_dims")); diff --git a/paddle/operators/pad_op.cc b/paddle/operators/pad_op.cc index 98de18fb9f..375d8a35ac 100644 --- a/paddle/operators/pad_op.cc +++ b/paddle/operators/pad_op.cc @@ -39,12 +39,12 @@ class PadOp : public framework::OperatorWithKernel { for (int i = 0; i < x_dim.size(); ++i) { out_dims[i] = x_dim[i] + paddings[i * 2] + paddings[i * 2 + 1]; } - ctx.Output("Out")->Resize( + ctx.Output("Out")->Resize( framework::make_ddim(out_dims)); if (out_dims[0] == x_dim[0]) { // Only pass LoD when the first dimension is equal between // output and input. - ctx.ShareLoD("X", "Out"); + ctx.ShareLoD("X", /*->*/ "Out"); } } }; @@ -106,7 +106,7 @@ class PadOpGrad : public framework::OperatorWithKernel { PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(framework::GradVarName("Out")), "Input(Out@GRAD) should not be null"); auto x_dims = ctx.Input("X")->dims(); - auto *x_g = ctx.Output(framework::GradVarName("X")); + auto *x_g = ctx.Output(framework::GradVarName("X")); if (x_g != nullptr) { x_g->Resize(x_dims); } diff --git a/paddle/operators/prelu_op.cc b/paddle/operators/prelu_op.cc index 2b7b82a3e1..912196c190 100644 --- a/paddle/operators/prelu_op.cc +++ b/paddle/operators/prelu_op.cc @@ -36,9 +36,9 @@ class PReluOp : public framework::OperatorWithKernel { PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar("Out"), "Output(Out) should not be null"); - auto *out = ctx.Output("Out"); + auto *out = ctx.Output("Out"); out->Resize(in->dims()); - ctx.ShareLoD("X", "Out"); + ctx.ShareLoD("X", /*->*/ "Out"); } }; @@ -72,11 +72,11 @@ class PReluGradOp : public framework::OperatorWithKernel { PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), "Input(X) must not be null."); PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(framework::GradVarName("Out")), "Input(Out@GRAD) should not be null"); - auto *dx = ctx.Output(framework::GradVarName("X")); + auto *dx = ctx.Output(framework::GradVarName("X")); auto *x = ctx.Input("X"); auto *dalpha = - ctx.Output(framework::GradVarName("Alpha")); + ctx.Output(framework::GradVarName("Alpha")); auto *alpha = ctx.Input("Alpha"); dx->Resize(x->dims()); diff --git a/paddle/operators/reshape_op.cc b/paddle/operators/reshape_op.cc index c090758619..ddb93007e2 100644 --- a/paddle/operators/reshape_op.cc +++ b/paddle/operators/reshape_op.cc @@ -50,11 +50,11 @@ class ReshapeOp : public framework::OperatorWithKernel { std::transform(shape.begin(), shape.end(), shape_int64.begin(), [](int a) { return static_cast(a); }); auto out_dims = framework::make_ddim(shape_int64); - ctx.Output("Out")->Resize(out_dims); + ctx.Output("Out")->Resize(out_dims); if (shape[0] == in->dims()[0]) { // Only pass LoD when the first dimension is equal between // output and input. - ctx.ShareLoD("X", "Out"); + ctx.ShareLoD("X", /*->*/ "Out"); } } }; @@ -99,7 +99,7 @@ class ReshapeGradOp : public framework::OperatorWithKernel { PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(framework::GradVarName("Out")), "Input(Out@GRAD) shouldn't be null."); auto dims = ctx.Input("X")->dims(); - auto *d_in = ctx.Output(framework::GradVarName("X")); + auto *d_in = ctx.Output(framework::GradVarName("X")); d_in->Resize(dims); } }; diff --git a/paddle/operators/rowwise_add_op.cc b/paddle/operators/rowwise_add_op.cc index 90cdb2558b..fc3ad721f2 100644 --- a/paddle/operators/rowwise_add_op.cc +++ b/paddle/operators/rowwise_add_op.cc @@ -44,8 +44,8 @@ class RowwiseAddOp : public framework::OperatorWithKernel { framework::slice_ddim(x_dims, num_col_dims, x_dims.size()), b_dims, "The width of two operands must be same"); PADDLE_ENFORCE_EQ(ctx.OutputSize("Out"), 1, "The output size must be 1"); - ctx.Output("Out")->Resize(x_dims); - ctx.ShareLoD("X", "Out"); + ctx.Output("Out")->Resize(x_dims); + ctx.ShareLoD("X", /*->*/ "Out"); } }; @@ -84,8 +84,8 @@ class RowwiseAddGradOp : public framework::OperatorWithKernel { PADDLE_ENFORCE_EQ( framework::slice_ddim(x_dims, num_col_dims, x_dims.size()), b_dims, "The width of two operands must be same"); - auto *dx = ctx.Output(framework::GradVarName("X")); - auto *db = ctx.Output(framework::GradVarName("b")); + auto *dx = ctx.Output(framework::GradVarName("X")); + auto *db = ctx.Output(framework::GradVarName("b")); if (dx) dx->Resize(x_dims); if (db) db->Resize(b_dims); } diff --git a/paddle/operators/scale_op.cc b/paddle/operators/scale_op.cc index ca1bc4ac80..3940037c37 100644 --- a/paddle/operators/scale_op.cc +++ b/paddle/operators/scale_op.cc @@ -33,9 +33,9 @@ class ScaleOp : public framework::OperatorWithKernel { "Output(Out) of ScaleOp should not be null."); auto *in = ctx.Input("X"); - auto *out = ctx.Output("Out"); + auto *out = ctx.Output("Out"); out->Resize(in->dims()); - ctx.ShareLoD("X", "Out"); + // ctx.ShareLoD("X", /*->*/ "Out"); } }; diff --git a/paddle/operators/scatter_op.cc b/paddle/operators/scatter_op.cc index 8820262732..3f02081a06 100644 --- a/paddle/operators/scatter_op.cc +++ b/paddle/operators/scatter_op.cc @@ -44,7 +44,7 @@ class ScatterOp : public framework::OperatorWithKernel { framework::DDim data_dim(ctx.Input("Updates")->dims()); for (int i = 1; i < data_dim.size(); ++i) PADDLE_ENFORCE_EQ(data_dim[i], ctx.Input("Updates")->dims()[i]); - ctx.Output("Out")->Resize( + ctx.Output("Out")->Resize( ctx.Input("Ref")->dims()); } }; @@ -56,10 +56,9 @@ class ScatterGradOp : public framework::OperatorWithKernel { protected: void InferShape(const framework::InferShapeContext &ctx) const override { auto *dUpdates = - ctx.Output(framework::GradVarName("Updates")); + ctx.Output(framework::GradVarName("Updates")); auto *Updates = ctx.Input("Updates"); - auto *dRef = - ctx.Output(framework::GradVarName("Ref")); + auto *dRef = ctx.Output(framework::GradVarName("Ref")); auto *Ref = ctx.Input("Ref"); dRef->Resize(Ref->dims()); diff --git a/paddle/operators/sequence_avg_pool_op.cc b/paddle/operators/sequence_avg_pool_op.cc index 9815b8f3a8..11d42ac44e 100644 --- a/paddle/operators/sequence_avg_pool_op.cc +++ b/paddle/operators/sequence_avg_pool_op.cc @@ -38,7 +38,7 @@ class SequenceAvgPoolOp : public framework::OperatorWithKernel { /*batch size = */ static_cast(lod[0].size() - 1), "The first dimension of Input(X) must be large than batch size."); dims[0] = lod[0].size() - 1; - ctx.Output("Out")->Resize({dims}); + ctx.Output("Out")->Resize({dims}); } }; @@ -74,8 +74,7 @@ class SequenceAvgPoolGradOp : public framework::OperatorWithKernel { for (int64_t i = 1; i < og_dims.size(); ++i) { PADDLE_ENFORCE_EQ(og_dims[i], x_dims[i], "The dimension mismatch."); } - auto* x_grad = - ctx.Output(framework::GradVarName("X")); + auto* x_grad = ctx.Output(framework::GradVarName("X")); x_grad->Resize(x_dims); } }; diff --git a/paddle/operators/sgd_op.cc b/paddle/operators/sgd_op.cc index 1232e64c7f..b063e24272 100644 --- a/paddle/operators/sgd_op.cc +++ b/paddle/operators/sgd_op.cc @@ -33,7 +33,7 @@ class SGDOp : public framework::OperatorWithKernel { PADDLE_ENFORCE_EQ(ctx.Input("param")->dims(), ctx.Input("grad")->dims(), "Two input of SGD Op's dimension must be same."); - ctx.Output("param_out") + ctx.Output("param_out") ->Resize(ctx.Input("param")->dims()); } }; diff --git a/paddle/operators/sigmoid_op.cc b/paddle/operators/sigmoid_op.cc index 42befa22d0..d2a38d1ebe 100644 --- a/paddle/operators/sigmoid_op.cc +++ b/paddle/operators/sigmoid_op.cc @@ -28,9 +28,8 @@ class SigmoidOp : public framework::OperatorWithKernel { PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar("Y"), "Output(Y) of SigmoidOp should not be null."); - ctx.Output("Y")->Resize( - ctx.Input("X")->dims()); - ctx.ShareLoD("X", "Y"); + ctx.Output("Y")->Resize(ctx.Input("X")->dims()); + ctx.ShareLoD("X", /*->*/ "Y"); } }; @@ -51,7 +50,7 @@ class SigmoidOpGrad : public framework::OperatorWithKernel { protected: void InferShape(const framework::InferShapeContext &ctx) const override { - ctx.Output(framework::GradVarName("X")) + ctx.Output(framework::GradVarName("X")) ->Resize(ctx.Input("Y")->dims()); } }; diff --git a/paddle/operators/softmax_op.cc b/paddle/operators/softmax_op.cc index c67eb028c8..e15cfe4850 100644 --- a/paddle/operators/softmax_op.cc +++ b/paddle/operators/softmax_op.cc @@ -30,8 +30,7 @@ class SoftmaxOp : public framework::OperatorWithKernel { PADDLE_ENFORCE(ctx.Input("X")->dims().size() == 2UL, "The input of softmax op must be a matrix."); - ctx.Output("Y")->Resize( - ctx.Input("X")->dims()); + ctx.Output("Y")->Resize(ctx.Input("X")->dims()); } }; @@ -77,7 +76,7 @@ class SoftmaxOpGrad : public framework::OperatorWithKernel { ctx.Input(framework::GradVarName("Y"))->dims(), "Input(Y) and its gradients should have a same shape."); - ctx.Output(framework::GradVarName("X")) + ctx.Output(framework::GradVarName("X")) ->Resize(ctx.Input("X")->dims()); } }; diff --git a/paddle/operators/split_op.cc b/paddle/operators/split_op.cc index 61296f5c81..a9d35b4fb7 100644 --- a/paddle/operators/split_op.cc +++ b/paddle/operators/split_op.cc @@ -27,7 +27,7 @@ class SplitOp : public framework::OperatorWithKernel { void InferShape(const framework::InferShapeContext &ctx) const override { // infershape auto *in = ctx.Input("X"); - auto outs = ctx.MultiOutput("Out"); + auto outs = ctx.MultiOutput("Out"); size_t axis = static_cast(ctx.Attr("axis")); size_t num = static_cast(ctx.Attr("num")); std::vector sections = diff --git a/paddle/operators/squared_l2_distance_op.cc b/paddle/operators/squared_l2_distance_op.cc index dfe8e6decd..33a564b05b 100644 --- a/paddle/operators/squared_l2_distance_op.cc +++ b/paddle/operators/squared_l2_distance_op.cc @@ -54,10 +54,10 @@ class SquaredL2DistanceOp : public framework::OperatorWithKernel { "First dimension of target must be equal to input " "or to 1."); - ctx.Output("sub_result") + ctx.Output("sub_result") ->Resize({x_dims[0], x->numel() / x_dims[0]}); - ctx.Output("Out")->Resize({x_dims[0], 1}); - ctx.ShareLoD("X", "Out"); + ctx.Output("Out")->Resize({x_dims[0], 1}); + ctx.ShareLoD("X", /*->*/ "Out"); } }; @@ -104,10 +104,8 @@ class SquaredL2DistanceGradOp : public framework::OperatorWithKernel { PADDLE_ENFORCE_EQ(out_dims[1], 1, "Second dimension of output gradient " "must be 1."); - auto* x_grad = - ctx.Output(framework::GradVarName("X")); - auto* y_grad = - ctx.Output(framework::GradVarName("Y")); + auto* x_grad = ctx.Output(framework::GradVarName("X")); + auto* y_grad = ctx.Output(framework::GradVarName("Y")); if (x_grad) x_grad->Resize(x_dims); if (y_grad) y_grad->Resize(y_dims); } diff --git a/paddle/operators/sum_op.cc b/paddle/operators/sum_op.cc index ebc57d6b7b..437fc262f3 100644 --- a/paddle/operators/sum_op.cc +++ b/paddle/operators/sum_op.cc @@ -28,7 +28,7 @@ class SumOp : public framework::OperatorWithKernel { "Output(Out) of SumOp should not be null."); auto ins = ctx.MultiInput("X"); - auto *out = ctx.Output("Out"); + auto *out = ctx.Output("Out"); int N = ins.size(); auto in_dim = ins[0]->dims(); @@ -39,7 +39,7 @@ class SumOp : public framework::OperatorWithKernel { PADDLE_ENFORCE(in_dim == dim, "Input tensors must have same shape"); } out->Resize(in_dim); - ctx.ShareLoD(ctx.op().Inputs("X")[0], "Out"); + ctx.ShareLoD("X", /*->*/ "Out"); } }; @@ -65,7 +65,7 @@ class SumGradOp : public framework::OperatorWithKernel { protected: void InferShape(const framework::InferShapeContext &ctx) const override { auto outputs = - ctx.MultiOutput(framework::GradVarName("X")); + ctx.MultiOutput(framework::GradVarName("X")); auto dims = ctx.Input(framework::GradVarName("Out"))->dims(); for (auto output : outputs) { output->Resize(dims); diff --git a/paddle/operators/top_k_op.cc b/paddle/operators/top_k_op.cc index 169b815fef..a6e43964e9 100644 --- a/paddle/operators/top_k_op.cc +++ b/paddle/operators/top_k_op.cc @@ -40,8 +40,8 @@ class TopkOp : public framework::OperatorWithKernel { framework::DDim dims = input->dims(); dims[dims.size() - 1] = k; - ctx.Output("Out")->Resize(dims); - ctx.Output("Indices")->Resize(dims); + ctx.Output("Out")->Resize(dims); + ctx.Output("Indices")->Resize(dims); } }; diff --git a/paddle/operators/uniform_random_op.cc b/paddle/operators/uniform_random_op.cc index 184bcbc29c..17ea48361b 100644 --- a/paddle/operators/uniform_random_op.cc +++ b/paddle/operators/uniform_random_op.cc @@ -54,7 +54,7 @@ class UniformRandomOp : public framework::OperatorWithKernel { PADDLE_ENFORCE(Attr("min") < Attr("max"), "uniform_random's min must less then max"); - auto* tensor = ctx.Output("Out"); + auto* tensor = ctx.Output("Out"); auto dims = Attr>("dims"); std::vector temp; temp.reserve(dims.size()); -- GitLab