diff --git a/paddle/fluid/framework/framework.proto b/paddle/fluid/framework/framework.proto index 2545e6c6f186b67ef1d22e7b5714d6f3913716ac..423fe5e69b4be7beb3a9e46ffe09ba31e2b4a98a 100644 --- a/paddle/fluid/framework/framework.proto +++ b/paddle/fluid/framework/framework.proto @@ -25,17 +25,17 @@ message Version { optional int64 version = 1 [ default = 0 ]; } enum AttrType { INT = 0; - FLOAT = 1; - STRING = 2; - INTS = 3; - FLOATS = 4; - STRINGS = 5; - BOOLEAN = 6; - BOOLEANS = 7; - BLOCK = 8; - LONG = 9; - BLOCKS = 10; - LONGS = 11; + LONG = 1; + FLOAT = 2; + STRING = 3; + INTS = 4; + LONGS = 5; + FLOATS = 6; + STRINGS = 7; + BOOLEAN = 8; + BOOLEANS = 9; + BLOCK = 10; + BLOCKS = 11; } // OpDesc describes an instance of a C++ framework::OperatorBase @@ -46,17 +46,17 @@ message OpDesc { required string name = 1; required AttrType type = 2; optional int32 i = 3; - optional float f = 4; - optional string s = 5; - repeated int32 ints = 6; - repeated float floats = 7; - repeated string strings = 8; - optional bool b = 10; - repeated bool bools = 11; - optional int32 block_idx = 12; - optional int64 l = 13; + optional int64 l = 4; + optional float f = 5; + optional string s = 6; + repeated int32 ints = 7; + repeated int64 longs = 8; + repeated float floats = 9; + repeated string strings = 10; + optional bool b = 11; + repeated bool bools = 12; + optional int32 block_idx = 13; repeated int32 blocks_idx = 14; - optional int64 longs = 15; }; message Var { diff --git a/paddle/fluid/framework/op_desc.cc b/paddle/fluid/framework/op_desc.cc index 7f81fb8641545e88750f118d6e80d1f1ae9fc860..29b00612585cb85ec0a307aaabb08baa18d3f926 100644 --- a/paddle/fluid/framework/op_desc.cc +++ b/paddle/fluid/framework/op_desc.cc @@ -415,11 +415,13 @@ struct SetAttrDescVisitor : public boost::static_visitor { void operator()(const std::vector &v) const { std::vector blocks_idx; for (auto blk : v) { - blocks_idx.push_back(blk->ID()); + blocks_idx.push_sback(blk->ID()); } VectorToRepeated(blocks_idx, attr_->mutable_blocks_idx()); } - void operator()(BlockDesc *desc) const { attr_->set_block_idx(desc->ID()); } + void operator()(BlockDesapply_visitorc *desc) const { + attr_->set_block_idx(desc->ID()); + } void operator()(int64_t v) const { attr_->set_l(v); } void operator()(const std::vector &v) const { diff --git a/paddle/fluid/framework/type_defs.h b/paddle/fluid/framework/type_defs.h index 2de6233a9e0d320ec9a06d547db3575eb61925c0..1cbf6c32aba52ea4a3cd83e12c95804bac40e0e6 100644 --- a/paddle/fluid/framework/type_defs.h +++ b/paddle/fluid/framework/type_defs.h @@ -33,10 +33,10 @@ using VariableNameMap = std::map>; // The order should be as same as framework.proto using Attribute = - boost::variant, - std::vector, std::vector, bool, - std::vector, BlockDesc*, int64_t, - std::vector, std::vector>; + boost::variant, std::vector, std::vector, + std::vector, bool, std::vector, + BlockDesc*, std::vector>; using AttributeMap = std::unordered_map; diff --git a/paddle/fluid/operators/fill_constant_op.cc b/paddle/fluid/operators/fill_constant_op.cc index e04a68717b351ddb0be5a7e70aa9297e5eb0125f..252f313440296bd9e5eebf26f67b08bbe7decce8 100644 --- a/paddle/fluid/operators/fill_constant_op.cc +++ b/paddle/fluid/operators/fill_constant_op.cc @@ -24,7 +24,7 @@ class FillConstantInferShape : public framework::InferShapeBase { void operator()(framework::InferShapeContext *ctx) const override { PADDLE_ENFORCE(ctx->HasOutput("Out"), "Output(Out) of FillConstantOp should not be null."); - auto &shape = ctx->Attrs().Get>("shape"); + auto &shape = ctx->Attrs().Get>("shape"); ctx->SetOutputDim("Out", framework::make_ddim(shape)); } }; @@ -47,10 +47,10 @@ class FillConstantOp : public framework::OperatorBase { if (out_var.IsType()) { tensor = out_var.GetMutable(); - tensor->Resize(framework::make_ddim(Attr>("shape"))); + tensor->Resize(framework::make_ddim(Attr>("shape"))); } else if (out_var.IsType()) { tensor = out_var.GetMutable()->mutable_value(); - tensor->Resize(framework::make_ddim(Attr>("shape"))); + tensor->Resize(framework::make_ddim(Attr>("shape"))); } else { PADDLE_THROW( "fill constant op's output only" @@ -83,7 +83,8 @@ class FillConstantOpMaker : public framework::OpProtoAndCheckerMaker { "(int, default 5 (FP32)) " "Output data type") .SetDefault(framework::proto::VarType::FP32); - AddAttr>("shape", "(vector) The shape of the output"); + AddAttr>("shape", + "(vector) The shape of the output"); AddAttr("value", "(float, default 0) The value to be filled") .SetDefault(0.0f); AddAttr("force_cpu", diff --git a/paddle/fluid/operators/gaussian_random_op.cc b/paddle/fluid/operators/gaussian_random_op.cc index 1488aab1926b5b4ba7bceed582700f5a11fc6c93..c70d5b8bc7569c38cbc003aca7c62dc503df11cf 100644 --- a/paddle/fluid/operators/gaussian_random_op.cc +++ b/paddle/fluid/operators/gaussian_random_op.cc @@ -52,7 +52,7 @@ class GaussianRandomOp : public framework::OperatorWithKernel { void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasOutput("Out"), "Output(Out) of GaussianRandomOp should not be null."); - auto shape = ctx->Attrs().Get>("shape"); + auto shape = ctx->Attrs().Get>("shape"); std::vector temp; temp.reserve(shape.size()); for (auto dim : shape) { @@ -88,9 +88,9 @@ class GaussianRandomOpMaker : public framework::OpProtoAndCheckerMaker { void Make() override { AddOutput("Out", "Output matrix of gaussian random op"); - AddAttr>("shape", - "(vector) " - "The dimension of random tensor."); + AddAttr>("shape", + "(vector) " + "The dimension of random tensor."); AddAttr("mean", "(float, default 0.0) " "mean of random tensor.") diff --git a/paddle/fluid/operators/uniform_random_op.cc b/paddle/fluid/operators/uniform_random_op.cc index aa907595cb7cf165974caa69fe8eb0370471732d..e3132ae76f624f3338d749e4fcebbd0ecd7ffe79 100644 --- a/paddle/fluid/operators/uniform_random_op.cc +++ b/paddle/fluid/operators/uniform_random_op.cc @@ -29,7 +29,7 @@ class CPUUniformRandomKernel : public framework::OpKernel { if (out_var->IsType()) { tensor = out_var->GetMutable(); } else if (out_var->IsType()) { - auto shape = ctx.Attr>("shape"); + auto shape = ctx.Attr>("shape"); auto *selected_rows = out_var->GetMutable(); tensor = selected_rows->mutable_value(); tensor->Resize(framework::make_ddim(shape)); @@ -67,7 +67,7 @@ class UniformRandomOp : public framework::OperatorWithKernel { PADDLE_ENFORCE( ctx->Attrs().Get("min") < ctx->Attrs().Get("max"), "uniform_random's min must less then max"); - auto &shape = ctx->Attrs().Get>("shape"); + auto &shape = ctx->Attrs().Get>("shape"); std::vector temp; temp.reserve(shape.size()); for (auto dim : shape) { @@ -94,7 +94,7 @@ This operator initializes a tensor with random values sampled from a uniform distribution. The random result is in set [min, max]. )DOC"); - AddAttr>("shape", "The shape of the output tensor"); + AddAttr>("shape", "The shape of the output tensor"); AddAttr("min", "Minimum value of uniform random. [default -1.0].") .SetDefault(-1.0f); AddAttr("max", "Maximun value of uniform random. [default 1.0].") diff --git a/paddle/fluid/pybind/protobuf.cc b/paddle/fluid/pybind/protobuf.cc index 0edfbfaa875d1258f2c61915d26362d684320941..cbc83106fc36d3e03a05d1172b4574c09178435b 100644 --- a/paddle/fluid/pybind/protobuf.cc +++ b/paddle/fluid/pybind/protobuf.cc @@ -259,8 +259,8 @@ void BindOpDesc(pybind11::module *m) { pybind11::enum_(*m, "AttrType", "") .value("INT", pd::proto::AttrType::INT) .value("INTS", pd::proto::AttrType::INTS) - .value("LONG", pd::proto::AttrType::FLOAT) - .value("LONGS", pd::proto::AttrType::FLOAT) + .value("LONG", pd::proto::AttrType::LONG) + .value("LONGS", pd::proto::AttrType::LONGS) .value("FLOAT", pd::proto::AttrType::FLOAT) .value("FLOATS", pd::proto::AttrType::FLOATS) .value("STRING", pd::proto::AttrType::STRING)