diff --git a/paddle/framework/op_registry_test.cc b/paddle/framework/op_registry_test.cc index b43f6a8cc56fdd2dc483bef303cf1213b171a5e4..0e2fb27b653e88846c71a025e694bfe3d4613641 100644 --- a/paddle/framework/op_registry_test.cc +++ b/paddle/framework/op_registry_test.cc @@ -80,7 +80,7 @@ TEST(OpRegistry, CreateOp) { paddle::framework::Scope scope; paddle::platform::CPUDeviceContext dev_ctx; op->Run(scope, dev_ctx); - float scale_get = op->GetAttr("scale"); + float scale_get = op->Attr("scale"); ASSERT_EQ(scale_get, scale); } @@ -121,7 +121,7 @@ TEST(OpRegistry, DefaultValue) { paddle::framework::Scope scope; paddle::platform::CPUDeviceContext dev_ctx; op->Run(scope, dev_ctx); - ASSERT_EQ(op->GetAttr("scale"), 1.0); + ASSERT_EQ(op->Attr("scale"), 1.0); } TEST(OpRegistry, CustomChecker) { @@ -172,6 +172,6 @@ TEST(OpRegistry, CustomChecker) { paddle::platform::CPUDeviceContext dev_ctx; paddle::framework::Scope scope; op->Run(scope, dev_ctx); - int test_attr = op->GetAttr("test_attr"); + int test_attr = op->Attr("test_attr"); ASSERT_EQ(test_attr, 4); } \ No newline at end of file diff --git a/paddle/framework/operator.h b/paddle/framework/operator.h index da92220b04e313e4743cc77241755b685d0791ad..9a98d4d3be0d1cb875d614b263f1e4365ede4113 100644 --- a/paddle/framework/operator.h +++ b/paddle/framework/operator.h @@ -69,7 +69,7 @@ class OperatorBase { virtual ~OperatorBase() {} template - inline const T& GetAttr(const std::string& name) const { + inline const T& Attr(const std::string& name) const { PADDLE_ENFORCE(attrs_.count(name) != 0, "%s should be in AttributeMap", name); return boost::get(attrs_.at(name)); @@ -238,8 +238,8 @@ class InferShapeContext { const Scope& scope() const { return scope_; } template - inline const T& GetAttr(const std::string& name) const { - return op_.GetAttr(name); + inline const T& Attr(const std::string& name) const { + return op_.Attr(name); } size_t InputSize(const std::string& name) const { diff --git a/paddle/operators/gaussian_random_op.cc b/paddle/operators/gaussian_random_op.cc index 8bb61275badfccec49953015a47b87b0879153bf..6574880c0eb6324b2dd175e39a364d2ef46e735e 100644 --- a/paddle/operators/gaussian_random_op.cc +++ b/paddle/operators/gaussian_random_op.cc @@ -19,12 +19,12 @@ template class CPUGaussianRandomKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { - float mean = context.GetAttr("mean"); - float std = context.GetAttr("std"); + float mean = context.Attr("mean"); + float std = context.Attr("std"); auto* tensor = context.Output("Out"); T* data = tensor->mutable_data(context.GetPlace()); - unsigned int seed = static_cast(context.GetAttr("seed")); + unsigned int seed = static_cast(context.Attr("seed")); std::minstd_rand engine; if (seed == 0) { seed = std::random_device()(); @@ -45,7 +45,7 @@ class GaussianRandomOp : public framework::OperatorWithKernel { protected: void InferShape(const framework::InferShapeContext& context) const override { auto* tensor = context.Output("Out"); - auto dims = GetAttr>("dims"); + auto dims = Attr>("dims"); std::vector temp; temp.reserve(dims.size()); for (auto dim : dims) { diff --git a/paddle/operators/gaussian_random_op.cu b/paddle/operators/gaussian_random_op.cu index 833a82bbf293a0892531283dc681ca2edd72f6a1..d9dbc1dcfe6a6676938d64be93c879ea69148018 100644 --- a/paddle/operators/gaussian_random_op.cu +++ b/paddle/operators/gaussian_random_op.cu @@ -42,13 +42,13 @@ class GPUGaussianRandomKernel : public framework::OpKernel { void Compute(const framework::ExecutionContext& context) const override { auto* tensor = context.Output("Out"); T* data = tensor->mutable_data(context.GetPlace()); - unsigned int seed = static_cast(context.GetAttr("seed")); + unsigned int seed = static_cast(context.Attr("seed")); if (seed == 0) { std::random_device rd; seed = rd(); } - T mean = static_cast(context.GetAttr("mean")); - T std = static_cast(context.GetAttr("std")); + T mean = static_cast(context.Attr("mean")); + T std = static_cast(context.Attr("std")); thrust::counting_iterator index_sequence_begin(0); ssize_t N = framework::product(tensor->dims()); thrust::transform(index_sequence_begin, index_sequence_begin + N, diff --git a/paddle/operators/rnn/recurrent_op_utils.cc b/paddle/operators/rnn/recurrent_op_utils.cc index 69e723b4019fe553426bafbf02b3334ea4acfcf1..97872c67ac99fbf6c9c177d52f1d4069163e8548 100644 --- a/paddle/operators/rnn/recurrent_op_utils.cc +++ b/paddle/operators/rnn/recurrent_op_utils.cc @@ -109,7 +109,7 @@ void InitArgument(const ArgumentName& name, Argument* arg, arg->step_scopes = op.Output(name.step_scopes); auto inlinks = op.Inputs(name.inlinks); - auto inlink_alias = op.GetAttr>(name.inlink_alias); + auto inlink_alias = op.Attr>(name.inlink_alias); PADDLE_ENFORCE(inlinks.size() == inlink_alias.size(), "the size of inlinks and inlink_alias don't match:%d,%d", inlinks.size(), inlink_alias.size()); @@ -121,7 +121,7 @@ void InitArgument(const ArgumentName& name, Argument* arg, } auto outlinks = op.Outputs(name.outlinks); - auto outlink_alias = op.GetAttr>(name.outlink_alias); + auto outlink_alias = op.Attr>(name.outlink_alias); PADDLE_ENFORCE(outlinks.size() == outlink_alias.size(), "the size of outlinks and outlink_alias don't match:%d,%d", outlinks.size(), outlink_alias.size()); @@ -135,8 +135,8 @@ void InitArgument(const ArgumentName& name, Argument* arg, auto boot_memories = op.Inputs(name.boot_memories); // attributes - auto memories = op.GetAttr>(name.memories); - auto pre_memories = op.GetAttr>(name.pre_memories); + auto memories = op.Attr>(name.memories); + auto pre_memories = op.Attr>(name.pre_memories); PADDLE_ENFORCE(memories.size() == boot_memories.size(), "the size of memories, boot_memories don't match:%d,%d", diff --git a/paddle/operators/scale_op.cc b/paddle/operators/scale_op.cc index 005152ed71f79a5a592b942ebe7ce1d460892a55..3d82b345829b0a554a204ada91c807e42b71dc58 100644 --- a/paddle/operators/scale_op.cc +++ b/paddle/operators/scale_op.cc @@ -60,7 +60,7 @@ class ScaleGradOp : public NetOp { AppendOp(framework::OpRegistry::CreateOp( "scale", {{"X", {Input(framework::GradVarName("Out"))}}}, {{"Out", {Output(framework::GradVarName("X"))}}}, - {{"scale", GetAttr("scale")}})); + {{"scale", Attr("scale")}})); CompleteAddOp(false); } }; diff --git a/paddle/operators/scale_op.h b/paddle/operators/scale_op.h index 65fb77eefad812fa52ac053b791ba1b8f480375f..02fbdc52bbf89c9f2acc5eeaa1197e4ccbca9d31 100644 --- a/paddle/operators/scale_op.h +++ b/paddle/operators/scale_op.h @@ -27,7 +27,7 @@ class ScaleKernel : public framework::OpKernel { auto* in = context.Input("X"); tensor->mutable_data(in->place()); - auto scale = static_cast(context.GetAttr("scale")); + auto scale = static_cast(context.Attr("scale")); auto eigen_out = framework::EigenVector::Flatten(*tensor); auto eigen_in = framework::EigenVector::Flatten(*in); diff --git a/paddle/operators/sgd_op.h b/paddle/operators/sgd_op.h index 8422b622ee54ba76fb98b7dacfa9618031c1c88c..f8888f9c362e1c39af42236bb3a23be37aa3ae15 100644 --- a/paddle/operators/sgd_op.h +++ b/paddle/operators/sgd_op.h @@ -31,7 +31,7 @@ class SGDOpKernel : public framework::OpKernel { auto param = ctx.Input("param"); auto grad = ctx.Input("grad"); auto param_out = ctx.Output("param_out"); - float lr = ctx.GetAttr("learning_rate"); + float lr = ctx.Attr("learning_rate"); param_out->mutable_data(ctx.GetPlace()); diff --git a/paddle/operators/uniform_random_op.cc b/paddle/operators/uniform_random_op.cc index 40cef8942a3648af5629e5a5db0f021ae3d6f1c1..f2aeef6c310df8535e67fa3906301a87f8ec4694 100644 --- a/paddle/operators/uniform_random_op.cc +++ b/paddle/operators/uniform_random_op.cc @@ -26,15 +26,15 @@ class CPUUniformRandomKernel : public framework::OpKernel { void Compute(const framework::ExecutionContext& context) const override { auto* tensor = context.Output("Out"); T* data = tensor->mutable_data(context.GetPlace()); - unsigned int seed = static_cast(context.GetAttr("seed")); + unsigned int seed = static_cast(context.Attr("seed")); std::minstd_rand engine; if (seed == 0) { seed = std::random_device()(); } engine.seed(seed); std::uniform_real_distribution dist( - static_cast(context.GetAttr("min")), - static_cast(context.GetAttr("max"))); + static_cast(context.Attr("min")), + static_cast(context.Attr("max"))); int64_t size = framework::product(tensor->dims()); for (int64_t i = 0; i < size; ++i) { data[i] = dist(engine); @@ -48,10 +48,10 @@ class UniformRandomOp : public framework::OperatorWithKernel { protected: void InferShape(const framework::InferShapeContext& ctx) const override { - PADDLE_ENFORCE(GetAttr("min") < GetAttr("max"), + PADDLE_ENFORCE(Attr("min") < Attr("max"), "uniform_random's min must less then max"); auto* tensor = ctx.Output("Out"); - auto dims = GetAttr>("dims"); + auto dims = Attr>("dims"); std::vector temp; temp.reserve(dims.size()); for (auto dim : dims) { diff --git a/paddle/operators/uniform_random_op.cu b/paddle/operators/uniform_random_op.cu index df993c07794b0b2408e4edc8a45fae9a17aef01c..c2c041b144b6ca1f019f972e1301b756ec1c9301 100644 --- a/paddle/operators/uniform_random_op.cu +++ b/paddle/operators/uniform_random_op.cu @@ -45,13 +45,13 @@ class GPUUniformRandomKernel : public framework::OpKernel { void Compute(const framework::ExecutionContext& context) const override { auto* tensor = context.Output("Out"); T* data = tensor->mutable_data(context.GetPlace()); - unsigned int seed = static_cast(context.GetAttr("seed")); + unsigned int seed = static_cast(context.Attr("seed")); if (seed == 0) { std::random_device rd; seed = rd(); } - T min = static_cast(context.GetAttr("min")); - T max = static_cast(context.GetAttr("max")); + T min = static_cast(context.Attr("min")); + T max = static_cast(context.Attr("max")); thrust::counting_iterator index_sequence_begin(0); ssize_t N = framework::product(tensor->dims()); thrust::transform(index_sequence_begin, index_sequence_begin + N, diff --git a/paddle/platform/cudnn_helper.h b/paddle/platform/cudnn_helper.h index 24ddf3441caa6e5f45a7b96af26a23ed324dc1b6..2841d2a2dbec5c17ef098a06c976ca01247820f5 100644 --- a/paddle/platform/cudnn_helper.h +++ b/paddle/platform/cudnn_helper.h @@ -14,6 +14,7 @@ limitations under the License. */ #pragma once +#include #include "paddle/platform/dynload/cudnn.h" #include "paddle/platform/enforce.h" #include "paddle/platform/macros.h"