diff --git a/paddle/operators/block_expand_op.cc b/paddle/operators/block_expand_op.cc index 317a43bb7b0b3859352c70bc3f8028adac787bbb..bef82183b8caa6692d1434a3a7da13f8d9656208 100644 --- a/paddle/operators/block_expand_op.cc +++ b/paddle/operators/block_expand_op.cc @@ -23,7 +23,6 @@ class BlockExpandOp : public framework::OperatorWithKernel { protected: void InferShape(framework::InferShapeContext* ctx) const override { - using namespace framework; PADDLE_ENFORCE(ctx->HasInput("X"), "Input of BlockExpandOp should not be null."); PADDLE_ENFORCE(ctx->HasOutput("Out"), @@ -142,7 +141,6 @@ class BlockExpandGradOp : public framework::OperatorWithKernel { protected: void InferShape(framework::InferShapeContext* ctx) const override { - using namespace framework; PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null"); PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")), "Input(Out@GRAD) shouldn't be null."); diff --git a/paddle/operators/block_expand_op.h b/paddle/operators/block_expand_op.h index 022dc3a1233f8ff0e8e7b61182ecc9de9d550b6c..2e4f0cb6f1d1df92e6392196afb368c234385bb2 100644 --- a/paddle/operators/block_expand_op.h +++ b/paddle/operators/block_expand_op.h @@ -23,6 +23,9 @@ namespace paddle { namespace operators { +using Tensor = framework::Tensor; +using LoDTensor = framework::LoDTensor; + inline int get_output_size(int img_size, int block_size, int stride, int padding) { return (1 + (img_size + 2 * padding - block_size + stride - 1) / stride); @@ -32,7 +35,6 @@ template class BlockExpandKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { - using namespace framework; const Tensor* in = ctx.Input("X"); LoDTensor* out = ctx.Output("Out"); out->mutable_data(ctx.GetPlace()); @@ -89,11 +91,10 @@ template class BlockExpandGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { - using namespace framework; auto* in = ctx.Input("X"); Tensor* d_out = const_cast(ctx.Input(framework::GradVarName("Out"))); - auto* d_x = ctx.Output(GradVarName("X")); + auto* d_x = ctx.Output(framework::GradVarName("X")); d_x->mutable_data(ctx.GetPlace()); auto x_v = framework::EigenVector::Flatten(*d_x);