diff --git a/paddle/framework/operator.h b/paddle/framework/operator.h index 8397570d26f06f0238e9c5afc85d721df7679257..9874a7c295d07601cffc776c24f5334df1d8c83a 100644 --- a/paddle/framework/operator.h +++ b/paddle/framework/operator.h @@ -229,6 +229,10 @@ class InferShapeContext { InferShapeContext(const OperatorBase& op, const Scope& scope) : op_(op), scope_(scope) {} + const OperatorBase& op() const { return op_; } + + const Scope& scope() const { return scope_; } + size_t InputSize(const std::string& name) const { return op_.Inputs(name).size(); } @@ -312,6 +316,7 @@ class InferShapeContext { return res; } + private: const OperatorBase& op_; const Scope& scope_; }; diff --git a/paddle/framework/operator_test.cc b/paddle/framework/operator_test.cc index 1d7efb7b9403f7c1c6bdbb27a0258f79ae032f43..f7c9e6b196a9d63c91d83fb6d985472a4e8976c4 100644 --- a/paddle/framework/operator_test.cc +++ b/paddle/framework/operator_test.cc @@ -122,10 +122,10 @@ class CPUKernelTest : public OpKernel { public: void Compute(const ExecutionContext& ctx) const { std::cout << "this is cpu kernel" << std::endl; - std::cout << ctx.op_.DebugString() << std::endl; + std::cout << ctx.op().DebugString() << std::endl; cpu_kernel_run_num++; - ASSERT_EQ(ctx.op_.Input("x"), "IN1"); - ASSERT_EQ(ctx.op_.Output("y"), "OUT1"); + ASSERT_EQ(ctx.op().Input("x"), "IN1"); + ASSERT_EQ(ctx.op().Output("y"), "OUT1"); } }; @@ -148,7 +148,7 @@ class OpKernelTestMultiInputsProtoAndCheckerMaker class CPUKernalMultiInputsTest : public OpKernel { public: void Compute(const ExecutionContext& ctx) const { - auto xs = ctx.op_.Inputs("xs"); + auto xs = ctx.op().Inputs("xs"); ASSERT_EQ(xs.size(), 3UL); ASSERT_EQ(xs[0], "x0"); ASSERT_EQ(xs[1], "x1"); @@ -172,10 +172,10 @@ class CPUKernalMultiInputsTest : public OpKernel { auto outTensor0 = ctx.MultiOutput("ys"); ASSERT_EQ(outTensor0.size(), 2U); - auto k = ctx.op_.Input("k"); + auto k = ctx.op().Input("k"); ASSERT_EQ(k, "k0"); - auto ys = ctx.op_.Outputs("ys"); + auto ys = ctx.op().Outputs("ys"); ASSERT_EQ(ys.size(), 2UL); ASSERT_EQ(ys[0], "y0"); ASSERT_EQ(ys[1], "y1"); diff --git a/paddle/operators/gaussian_random_op.cc b/paddle/operators/gaussian_random_op.cc index a85363ad81d2a23e7267026c067f74f8c94c4786..9eb976fed1719f1bb5e51cb38dd4052153fe6851 100644 --- a/paddle/operators/gaussian_random_op.cc +++ b/paddle/operators/gaussian_random_op.cc @@ -19,13 +19,13 @@ template class CPUGaussianRandomKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { - float mean = context.op_.GetAttr("mean"); - float std = context.op_.GetAttr("std"); + float mean = context.op().GetAttr("mean"); + float std = context.op().GetAttr("std"); auto* tensor = context.Output("Out"); T* data = tensor->mutable_data(context.GetPlace()); unsigned int seed = - static_cast(context.op_.GetAttr("seed")); + static_cast(context.op().GetAttr("seed")); std::minstd_rand engine; if (seed == 0) { seed = std::random_device()(); diff --git a/paddle/operators/mul_op.cc b/paddle/operators/mul_op.cc index 173cc3850ca9d97200e272ec59d1bd3fe09b5053..5b8b5f6c118cbe213e1783256a940dff6fdccc46 100644 --- a/paddle/operators/mul_op.cc +++ b/paddle/operators/mul_op.cc @@ -29,10 +29,10 @@ class MulOp : public framework::OperatorWithKernel { auto dim1 = ctx.Input("Y")->dims(); PADDLE_ENFORCE_EQ(dim0.size(), 2, "input X(%s) should be a tensor with 2 dims, a matrix", - ctx.op_.Input("X")); + ctx.op().Input("X")); PADDLE_ENFORCE_EQ(dim1.size(), 2, "input Y(%s) should be a tensor with 2 dims, a matrix", - ctx.op_.Input("Y")); + ctx.op().Input("Y")); PADDLE_ENFORCE_EQ( dim0[1], dim1[0], "First matrix's width must be equal with second matrix's height."); diff --git a/paddle/operators/scale_op.h b/paddle/operators/scale_op.h index aea64f1b0428ffe79ba8d90cf79dbfd2b5ef36f4..760b26f5e9ab132be5039ea9edafe9517f357261 100644 --- a/paddle/operators/scale_op.h +++ b/paddle/operators/scale_op.h @@ -27,7 +27,7 @@ class ScaleKernel : public framework::OpKernel { auto* in = context.Input("X"); tensor->mutable_data(in->place()); - auto scale = static_cast(context.op_.GetAttr("scale")); + auto scale = static_cast(context.op().GetAttr("scale")); auto eigen_out = framework::EigenVector::Flatten(*tensor); auto eigen_in = framework::EigenVector::Flatten(*in); diff --git a/paddle/operators/sgd_op.h b/paddle/operators/sgd_op.h index a0b5000ffbf54364e15f87870913926a071fa972..b54a7864b056b332582b7be1fec0e86c571d9717 100644 --- a/paddle/operators/sgd_op.h +++ b/paddle/operators/sgd_op.h @@ -31,7 +31,7 @@ class SGDOpKernel : public framework::OpKernel { auto param = ctx.Input("param"); auto grad = ctx.Input("grad"); auto param_out = ctx.Output("param_out"); - float lr = ctx.op_.GetAttr("learning_rate"); + float lr = ctx.op().GetAttr("learning_rate"); param_out->mutable_data(ctx.GetPlace()); diff --git a/paddle/operators/uniform_random_op.cc b/paddle/operators/uniform_random_op.cc index 29491137e6d8b4bfa2d0d07d48ffed1212a6131f..dbff2a1365fbd38f243a1a756f5ab1fe61f09430 100644 --- a/paddle/operators/uniform_random_op.cc +++ b/paddle/operators/uniform_random_op.cc @@ -27,15 +27,15 @@ class CPUUniformRandomKernel : public framework::OpKernel { auto* tensor = context.Output("Out"); T* data = tensor->mutable_data(context.GetPlace()); unsigned int seed = - static_cast(context.op_.GetAttr("seed")); + static_cast(context.op().GetAttr("seed")); std::minstd_rand engine; if (seed == 0) { seed = std::random_device()(); } engine.seed(seed); std::uniform_real_distribution dist( - static_cast(context.op_.GetAttr("min")), - static_cast(context.op_.GetAttr("max"))); + static_cast(context.op().GetAttr("min")), + static_cast(context.op().GetAttr("max"))); ssize_t size = framework::product(tensor->dims()); for (ssize_t i = 0; i < size; ++i) { data[i] = dist(engine);