From 9de6a4b375270f2e066147bfa92c84b84916ea56 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Tue, 5 Sep 2017 22:03:51 -0700 Subject: [PATCH] Change `Op::GetAttr` to `Op::Attr` Fix #3902 --- paddle/framework/op_registry_test.cc | 6 +++--- paddle/framework/operator.h | 6 +++--- paddle/operators/gaussian_random_op.cc | 8 ++++---- paddle/operators/gaussian_random_op.cu | 4 ++-- paddle/operators/rnn/recurrent_op_utils.cc | 8 ++++---- paddle/operators/scale_op.cc | 2 +- paddle/operators/scale_op.h | 2 +- paddle/operators/sgd_op.h | 2 +- paddle/operators/uniform_random_op.cc | 10 +++++----- paddle/operators/uniform_random_op.cu | 4 ++-- 10 files changed, 26 insertions(+), 26 deletions(-) diff --git a/paddle/framework/op_registry_test.cc b/paddle/framework/op_registry_test.cc index b43f6a8cc..0e2fb27b6 100644 --- a/paddle/framework/op_registry_test.cc +++ b/paddle/framework/op_registry_test.cc @@ -80,7 +80,7 @@ TEST(OpRegistry, CreateOp) { paddle::framework::Scope scope; paddle::platform::CPUDeviceContext dev_ctx; op->Run(scope, dev_ctx); - float scale_get = op->GetAttr("scale"); + float scale_get = op->Attr("scale"); ASSERT_EQ(scale_get, scale); } @@ -121,7 +121,7 @@ TEST(OpRegistry, DefaultValue) { paddle::framework::Scope scope; paddle::platform::CPUDeviceContext dev_ctx; op->Run(scope, dev_ctx); - ASSERT_EQ(op->GetAttr("scale"), 1.0); + ASSERT_EQ(op->Attr("scale"), 1.0); } TEST(OpRegistry, CustomChecker) { @@ -172,6 +172,6 @@ TEST(OpRegistry, CustomChecker) { paddle::platform::CPUDeviceContext dev_ctx; paddle::framework::Scope scope; op->Run(scope, dev_ctx); - int test_attr = op->GetAttr("test_attr"); + int test_attr = op->Attr("test_attr"); ASSERT_EQ(test_attr, 4); } \ No newline at end of file diff --git a/paddle/framework/operator.h b/paddle/framework/operator.h index da92220b0..9a98d4d3b 100644 --- a/paddle/framework/operator.h +++ b/paddle/framework/operator.h @@ -69,7 +69,7 @@ class OperatorBase { virtual ~OperatorBase() {} template - inline const T& GetAttr(const std::string& name) const { + inline const T& Attr(const std::string& name) const { PADDLE_ENFORCE(attrs_.count(name) != 0, "%s should be in AttributeMap", name); return boost::get(attrs_.at(name)); @@ -238,8 +238,8 @@ class InferShapeContext { const Scope& scope() const { return scope_; } template - inline const T& GetAttr(const std::string& name) const { - return op_.GetAttr(name); + inline const T& Attr(const std::string& name) const { + return op_.Attr(name); } size_t InputSize(const std::string& name) const { diff --git a/paddle/operators/gaussian_random_op.cc b/paddle/operators/gaussian_random_op.cc index 056447901..193b176c6 100644 --- a/paddle/operators/gaussian_random_op.cc +++ b/paddle/operators/gaussian_random_op.cc @@ -19,12 +19,12 @@ template class CPUGaussianRandomKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { - float mean = context.GetAttr("mean"); - float std = context.GetAttr("std"); + float mean = context.Attr("mean"); + float std = context.Attr("std"); auto* tensor = context.Output("Out"); T* data = tensor->mutable_data(context.GetPlace()); - unsigned int seed = static_cast(context.GetAttr("seed")); + unsigned int seed = static_cast(context.Attr("seed")); std::minstd_rand engine; if (seed == 0) { seed = std::random_device()(); @@ -45,7 +45,7 @@ class GaussianRandomOp : public framework::OperatorWithKernel { protected: void InferShape(const framework::InferShapeContext& context) const override { auto* tensor = context.Output("Out"); - auto dims = GetAttr>("dims"); + auto dims = Attr>("dims"); PADDLE_ENFORCE(dims.size() > 0UL, "dims can be one int or array. dims must be set."); tensor->Resize(framework::make_ddim(dims)); diff --git a/paddle/operators/gaussian_random_op.cu b/paddle/operators/gaussian_random_op.cu index 833a82bbf..9788d8940 100644 --- a/paddle/operators/gaussian_random_op.cu +++ b/paddle/operators/gaussian_random_op.cu @@ -47,8 +47,8 @@ class GPUGaussianRandomKernel : public framework::OpKernel { std::random_device rd; seed = rd(); } - T mean = static_cast(context.GetAttr("mean")); - T std = static_cast(context.GetAttr("std")); + T mean = static_cast(context.Attr("mean")); + T std = static_cast(context.Attr("std")); thrust::counting_iterator index_sequence_begin(0); ssize_t N = framework::product(tensor->dims()); thrust::transform(index_sequence_begin, index_sequence_begin + N, diff --git a/paddle/operators/rnn/recurrent_op_utils.cc b/paddle/operators/rnn/recurrent_op_utils.cc index a9b65c30f..ddc963faa 100644 --- a/paddle/operators/rnn/recurrent_op_utils.cc +++ b/paddle/operators/rnn/recurrent_op_utils.cc @@ -109,7 +109,7 @@ void InitArgument(const ArgumentName& name, Argument* arg, arg->step_scopes = op.Output(name.step_scopes); auto inlinks = op.Inputs(name.inlinks); - auto inlink_alias = op.GetAttr>(name.inlink_alias); + auto inlink_alias = op.Attr>(name.inlink_alias); PADDLE_ENFORCE(inlinks.size() == inlink_alias.size(), "the size of inlinks and inlink_alias don't match:%d,%d", inlinks.size(), inlink_alias.size()); @@ -121,7 +121,7 @@ void InitArgument(const ArgumentName& name, Argument* arg, } auto outlinks = op.Outputs(name.outlinks); - auto outlink_alias = op.GetAttr>(name.outlink_alias); + auto outlink_alias = op.Attr>(name.outlink_alias); PADDLE_ENFORCE(outlinks.size() == outlink_alias.size(), "the size of outlinks and outlink_alias don't match:%d,%d", outlinks.size(), outlink_alias.size()); @@ -135,8 +135,8 @@ void InitArgument(const ArgumentName& name, Argument* arg, auto boot_memories = op.Inputs(name.boot_memories); // attributes - auto memories = op.GetAttr>(name.memories); - auto pre_memories = op.GetAttr>(name.pre_memories); + auto memories = op.Attr>(name.memories); + auto pre_memories = op.Attr>(name.pre_memories); PADDLE_ENFORCE(memories.size() == boot_memories.size(), "the size of memories, boot_memories don't match:%d,%d", diff --git a/paddle/operators/scale_op.cc b/paddle/operators/scale_op.cc index 8e96a74c9..8d65e2754 100644 --- a/paddle/operators/scale_op.cc +++ b/paddle/operators/scale_op.cc @@ -60,7 +60,7 @@ class ScaleGradOp : public NetOp { AppendOp(framework::OpRegistry::CreateOp( "scale", {{"X", {Input(framework::GradVarName("Out"))}}}, {{"Out", {Output(framework::GradVarName("X"))}}}, - {{"scale", GetAttr("scale")}})); + {{"scale", Attr("scale")}})); CompleteAddOp(false); } }; diff --git a/paddle/operators/scale_op.h b/paddle/operators/scale_op.h index 65fb77eef..02fbdc52b 100644 --- a/paddle/operators/scale_op.h +++ b/paddle/operators/scale_op.h @@ -27,7 +27,7 @@ class ScaleKernel : public framework::OpKernel { auto* in = context.Input("X"); tensor->mutable_data(in->place()); - auto scale = static_cast(context.GetAttr("scale")); + auto scale = static_cast(context.Attr("scale")); auto eigen_out = framework::EigenVector::Flatten(*tensor); auto eigen_in = framework::EigenVector::Flatten(*in); diff --git a/paddle/operators/sgd_op.h b/paddle/operators/sgd_op.h index 8422b622e..f8888f9c3 100644 --- a/paddle/operators/sgd_op.h +++ b/paddle/operators/sgd_op.h @@ -31,7 +31,7 @@ class SGDOpKernel : public framework::OpKernel { auto param = ctx.Input("param"); auto grad = ctx.Input("grad"); auto param_out = ctx.Output("param_out"); - float lr = ctx.GetAttr("learning_rate"); + float lr = ctx.Attr("learning_rate"); param_out->mutable_data(ctx.GetPlace()); diff --git a/paddle/operators/uniform_random_op.cc b/paddle/operators/uniform_random_op.cc index 2d943c450..8dbc8b064 100644 --- a/paddle/operators/uniform_random_op.cc +++ b/paddle/operators/uniform_random_op.cc @@ -26,15 +26,15 @@ class CPUUniformRandomKernel : public framework::OpKernel { void Compute(const framework::ExecutionContext& context) const override { auto* tensor = context.Output("Out"); T* data = tensor->mutable_data(context.GetPlace()); - unsigned int seed = static_cast(context.GetAttr("seed")); + unsigned int seed = static_cast(context.Attr("seed")); std::minstd_rand engine; if (seed == 0) { seed = std::random_device()(); } engine.seed(seed); std::uniform_real_distribution dist( - static_cast(context.GetAttr("min")), - static_cast(context.GetAttr("max"))); + static_cast(context.Attr("min")), + static_cast(context.Attr("max"))); ssize_t size = framework::product(tensor->dims()); for (ssize_t i = 0; i < size; ++i) { data[i] = dist(engine); @@ -48,10 +48,10 @@ class UniformRandomOp : public framework::OperatorWithKernel { protected: void InferShape(const framework::InferShapeContext& ctx) const override { - PADDLE_ENFORCE(GetAttr("min") < GetAttr("max"), + PADDLE_ENFORCE(Attr("min") < Attr("max"), "uniform_random's min must less then max"); auto* tensor = ctx.Output("Out"); - auto dims = GetAttr>("dims"); + auto dims = Attr>("dims"); tensor->Resize(framework::make_ddim(dims)); } }; diff --git a/paddle/operators/uniform_random_op.cu b/paddle/operators/uniform_random_op.cu index df993c077..fc6f856fa 100644 --- a/paddle/operators/uniform_random_op.cu +++ b/paddle/operators/uniform_random_op.cu @@ -50,8 +50,8 @@ class GPUUniformRandomKernel : public framework::OpKernel { std::random_device rd; seed = rd(); } - T min = static_cast(context.GetAttr("min")); - T max = static_cast(context.GetAttr("max")); + T min = static_cast(context.Attr("min")); + T max = static_cast(context.Attr("max")); thrust::counting_iterator index_sequence_begin(0); ssize_t N = framework::product(tensor->dims()); thrust::transform(index_sequence_begin, index_sequence_begin + N, -- GitLab