diff --git a/paddle/fluid/operators/benchmark/op_tester.cc b/paddle/fluid/operators/benchmark/op_tester.cc index 064903c299d947df3c6b42d916fce8dcbd85eebb..fec091255f6391b77cd2858905f3aa2e5dd8baff 100644 --- a/paddle/fluid/operators/benchmark/op_tester.cc +++ b/paddle/fluid/operators/benchmark/op_tester.cc @@ -42,8 +42,8 @@ void OpTester::Init(const OpTesterConfig &config) { // Initialize the OpDesc if (op_desc_info.Has(config_.op_type)) { type_ = config_.op_type; - op_desc_.SetType(config_.op_type); + CreateOpDesc(); CreateInputVarDesc(); CreateOutputVarDesc(); } else { @@ -131,6 +131,40 @@ std::vector OpTester::GetOpProtoOutputNames() { return output_names; } +std::unordered_map +OpTester::GetOpProtoAttrNames() { + std::unordered_map attr_types; + const framework::proto::OpProto &proto = + framework::OpInfoMap::Instance().Get(type_).Proto(); + const std::vector skipped_attrs = { + framework::OpProtoAndCheckerMaker::OpRoleAttrName(), + framework::OpProtoAndCheckerMaker::OpRoleVarAttrName(), + framework::OpProtoAndCheckerMaker::OpNamescopeAttrName(), + framework::OpProtoAndCheckerMaker::OpCreationCallstackAttrName()}; + for (int i = 0; i != proto.attrs_size(); ++i) { + const auto &attr = proto.attrs(i); + if (!Has(skipped_attrs, attr.name())) { + VLOG(4) << "attr: " << attr.name() << ", type: " << attr.type(); + attr_types[attr.name()] = attr.type(); + } + } + return attr_types; +} + +framework::proto::VarType::Type OpTester::TransToVarType(std::string str) { + if (str == "int32") { + return framework::proto::VarType::INT32; + } else if (str == "int64") { + return framework::proto::VarType::INT64; + } else if (str == "fp32") { + return framework::proto::VarType::FP32; + } else if (str == "fp64") { + return framework::proto::VarType::FP64; + } else { + PADDLE_THROW("Unsupported dtype %s.", str.c_str()); + } +} + void OpTester::CreateInputVarDesc() { std::vector input_names = GetOpProtoInputNames(); for (auto &name : input_names) { @@ -145,11 +179,11 @@ void OpTester::CreateInputVarDesc() { // Need to support more type var->SetType(framework::proto::VarType::LOD_TENSOR); var->SetPersistable(false); - var->SetDataType(framework::proto::VarType::FP32); + var->SetDataType(TransToVarType(input->dtype)); var->SetShape(input->dims); op_desc_.SetInput(name, {var_name}); - input_lods_[var_name] = input->lod; + inputs_[var_name] = *input; } } @@ -167,6 +201,49 @@ void OpTester::CreateOutputVarDesc() { } } +void OpTester::CreateOpDesc() { + op_desc_.SetType(config_.op_type); + std::unordered_map attr_types = + GetOpProtoAttrNames(); + for (auto item : config_.attrs) { + const std::string &name = item.first; + if (attr_types.find(name) == attr_types.end()) { + LOG(FATAL) << "Operator " << type_ << " do not have attr " << name; + } + + const std::string &value_str = item.second; + const framework::proto::AttrType &type = attr_types[name]; + switch (type) { + case framework::proto::AttrType::BOOLEAN: + break; + case framework::proto::AttrType::INT: { + int value = StringTo(value_str); + op_desc_.SetAttr(name, {value}); + } break; + case framework::proto::AttrType::FLOAT: { + float value = StringTo(value_str); + op_desc_.SetAttr(name, {value}); + } break; + case framework::proto::AttrType::STRING: { + op_desc_.SetAttr(name, {value_str}); + } break; + case framework::proto::AttrType::BOOLEANS: + case framework::proto::AttrType::INTS: + case framework::proto::AttrType::FLOATS: + case framework::proto::AttrType::STRINGS: + LOG(FATAL) << "Not supported yet."; + break; + case framework::proto::AttrType::LONG: { + int64_t value = StringTo(value_str); + op_desc_.SetAttr(name, value); + } break; + case framework::proto::AttrType::LONGS: + default: + PADDLE_THROW("Unsupport attr type %d", type); + } + } +} + framework::VarDesc *OpTester::Var(const std::string &name) { auto it = vars_.find(name); if (it != vars_.end()) { @@ -179,24 +256,41 @@ framework::VarDesc *OpTester::Var(const std::string &name) { template void OpTester::SetupTensor(framework::LoDTensor *tensor, - const std::vector &shape, T lower, - T upper) { + const std::vector &shape, T lower, T upper, + const std::string &initializer) { static unsigned int seed = 100; std::mt19937 rng(seed++); std::uniform_real_distribution uniform_dist(0, 1); T *ptr = tensor->mutable_data(framework::make_ddim(shape), place_); - if (platform::is_cpu_place(place_)) { - for (int i = 0; i < tensor->numel(); ++i) { - ptr[i] = static_cast(uniform_dist(rng) * (upper - lower) + lower); - } + + framework::LoDTensor cpu_tensor; + T *cpu_ptr = nullptr; + + if (!platform::is_cpu_place(place_)) { + cpu_ptr = cpu_tensor.mutable_data(framework::make_ddim(shape), + platform::CPUPlace()); } else { - framework::LoDTensor cpu_tensor; - T *cpu_ptr = cpu_tensor.mutable_data(framework::make_ddim(shape), - platform::CPUPlace()); + cpu_ptr = ptr; + } + + if (initializer == "random") { for (int i = 0; i < cpu_tensor.numel(); ++i) { cpu_ptr[i] = static_cast(uniform_dist(rng) * (upper - lower) + lower); } + } else if (initializer == "natural") { + for (int i = 0; i < cpu_tensor.numel(); ++i) { + cpu_ptr[i] = lower + i; + } + } else if (initializer == "zeros") { + for (int i = 0; i < cpu_tensor.numel(); ++i) { + cpu_ptr[i] = 0; + } + } else { + PADDLE_THROW("Unsupported initializer %s.", initializer.c_str()); + } + + if (!platform::is_cpu_place(place_)) { TensorCopySync(cpu_tensor, place_, tensor); } } @@ -219,7 +313,7 @@ void OpTester::CreateVariables(framework::Scope *scope) { } } - for (auto &item : input_lods_) { + for (auto &item : inputs_) { // Allocate memory for input tensor auto &var_name = item.first; VLOG(3) << "Allocate memory for tensor " << var_name; @@ -229,11 +323,23 @@ void OpTester::CreateVariables(framework::Scope *scope) { auto *var = scope->Var(var_name); auto *tensor = var->GetMutable(); - SetupTensor(tensor, shape, static_cast(0.0), - static_cast(1.0)); + const auto &data_type = var_desc->GetDataType(); + if (data_type == framework::proto::VarType::INT32) { + SetupTensor(tensor, shape, 0, 1, item.second.initializer); + } else if (data_type == framework::proto::VarType::INT64) { + SetupTensor(tensor, shape, 0, 1, item.second.initializer); + } else if (data_type == framework::proto::VarType::FP32) { + SetupTensor(tensor, shape, static_cast(0.0), + static_cast(1.0), item.second.initializer); + } else if (data_type == framework::proto::VarType::FP64) { + SetupTensor(tensor, shape, static_cast(0.0), + static_cast(1.0), item.second.initializer); + } else { + PADDLE_THROW("Unsupported dtype %d.", data_type); + } VLOG(3) << "Set lod for tensor " << var_name; - std::vector> &lod_vec = item.second; + std::vector> &lod_vec = item.second.lod; framework::LoD lod; for (size_t i = 0; i < lod_vec.size(); ++i) { lod.push_back(lod_vec[i]); @@ -261,7 +367,16 @@ std::string OpTester::DebugString() { ss << GenSpaces(count) << "type: LOD_TENSOR\n"; ss << GenSpaces(count++) << "lod_tensor {\n"; ss << GenSpaces(count++) << "tensor {\n"; - ss << GenSpaces(count) << "data_type: FP32\n"; + const auto &data_type = var->GetDataType(); + if (data_type == framework::proto::VarType::INT32) { + ss << GenSpaces(count) << "data_type: INT32\n"; + } else if (data_type == framework::proto::VarType::INT64) { + ss << GenSpaces(count) << "data_type: INT64\n"; + } else if (data_type == framework::proto::VarType::FP32) { + ss << GenSpaces(count) << "data_type: FP32\n"; + } else if (data_type == framework::proto::VarType::FP64) { + ss << GenSpaces(count) << "data_type: FP64\n"; + } std::vector shape = var->GetShape(); for (auto d : shape) { ss << GenSpaces(count) << "dims: " << d << "\n"; @@ -288,6 +403,63 @@ std::string OpTester::DebugString() { ss << GenSpaces(--count) << "}\n"; } ss << GenSpaces(count) << "type: " << op_desc_.Type() << "\n"; + for (auto &name : op_desc_.AttrNames()) { + ss << GenSpaces(count++) << "attrs {\n"; + const auto &attr_type = op_desc_.GetAttrType(name); + const auto &attr = op_desc_.GetAttr(name); + ss << GenSpaces(count) << "name: \"" << name << "\"\n"; + switch (attr_type) { + case framework::proto::AttrType::BOOLEAN: { + ss << GenSpaces(count) << "type: BOOLEAN\n"; + ss << GenSpaces(count) << "b: " << boost::get(attr) << "\n"; + } break; + case framework::proto::AttrType::INT: { + ss << GenSpaces(count) << "type: INT\n"; + ss << GenSpaces(count) << "i: " << boost::get(attr) << "\n"; + } break; + case framework::proto::AttrType::FLOAT: { + ss << GenSpaces(count) << "type: FLOAT\n"; + ss << GenSpaces(count) << "f: " << boost::get(attr) << "\n"; + } break; + case framework::proto::AttrType::STRING: { + ss << GenSpaces(count) << "type: STRING\n"; + ss << GenSpaces(count) << "s: \"" << boost::get(attr) + << "\"\n"; + } break; + case framework::proto::AttrType::BOOLEANS: { + ss << GenSpaces(count) << "type: BOOLEANS\n"; + ss << GenSpaces(count) << "bools: " + << "\n"; + } break; + case framework::proto::AttrType::INTS: { + ss << GenSpaces(count) << "type: INTS\n"; + ss << GenSpaces(count) << "ints: " + << "\n"; + } break; + case framework::proto::AttrType::FLOATS: { + ss << GenSpaces(count) << "type: FLOATS\n"; + ss << GenSpaces(count) << "floats: " + << "\n"; + } break; + case framework::proto::AttrType::STRINGS: { + ss << GenSpaces(count) << "type: STRINGS\n"; + ss << GenSpaces(count) << "strings: " + << "\n"; + } break; + case framework::proto::AttrType::LONG: { + ss << GenSpaces(count) << "type: LONG\n"; + ss << GenSpaces(count) << "l: " << boost::get(attr) << "\n"; + } break; + case framework::proto::AttrType::LONGS: { + ss << GenSpaces(count) << "type: LONGS\n"; + ss << GenSpaces(count) << "longs: " + << "\n"; + } break; + default: + PADDLE_THROW("Unsupport attr type %d", attr_type); + } + ss << GenSpaces(--count) << "}\n"; + } ss << GenSpaces(--count) << "}\n"; return ss.str(); } @@ -299,6 +471,7 @@ TEST(op_tester, base) { FLAGS_op_config_list.c_str()); std::vector op_configs; while (!fin.eof()) { + VLOG(4) << "Reading config " << op_configs.size() << "..."; OpTesterConfig config; bool result = config.Init(fin); if (result) { diff --git a/paddle/fluid/operators/benchmark/op_tester.h b/paddle/fluid/operators/benchmark/op_tester.h index 8f150b23ad783acdfd203d471d578ab6aae71494..328389293c4b71a2f1fefbc3bf26fd46b79ec6e2 100644 --- a/paddle/fluid/operators/benchmark/op_tester.h +++ b/paddle/fluid/operators/benchmark/op_tester.h @@ -14,7 +14,9 @@ limitations under the License. */ #pragma once +#include #include +#include #include #include "paddle/fluid/framework/ddim.h" #include "paddle/fluid/framework/op_desc.h" @@ -39,16 +41,21 @@ class OpTester { private: std::vector GetOpProtoInputNames(); std::vector GetOpProtoOutputNames(); + std::unordered_map + GetOpProtoAttrNames(); + framework::proto::VarType::Type TransToVarType(std::string str); void CreateInputVarDesc(); void CreateOutputVarDesc(); + void CreateOpDesc(); framework::VarDesc *Var(const std::string &name); void CreateVariables(framework::Scope *scope); template void SetupTensor(framework::LoDTensor *input, - const std::vector &shape, T lower, T upper); + const std::vector &shape, T lower, T upper, + const std::string &initializer); void RunImpl(); @@ -57,7 +64,7 @@ class OpTester { std::string type_; framework::OpDesc op_desc_; std::unordered_map> vars_; - std::unordered_map>> input_lods_; + std::unordered_map inputs_; std::unique_ptr op_; platform::Place place_; std::unique_ptr scope_; diff --git a/paddle/fluid/operators/benchmark/op_tester_config.cc b/paddle/fluid/operators/benchmark/op_tester_config.cc index 8336804ec07d2b7b176f55ad4113452086296494..b4878ab04244cf6b54d323943fc1fbf4e3882660 100644 --- a/paddle/fluid/operators/benchmark/op_tester_config.cc +++ b/paddle/fluid/operators/benchmark/op_tester_config.cc @@ -14,7 +14,6 @@ limitations under the License. */ #include "paddle/fluid/operators/benchmark/op_tester_config.h" #include -#include "glog/logging.h" #include "paddle/fluid/platform/enforce.h" namespace paddle { @@ -40,6 +39,62 @@ static void EraseEndSep(std::string* str, } } +OpInputConfig::OpInputConfig(std::istream& is) { + std::string sep; + is >> sep; + if (sep == kStartSeparator) { + while (sep != kEndSeparator) { + is >> sep; + if (sep == "name" || sep == "name:") { + is >> name; + EraseEndSep(&name); + } else if (sep == "dtype" || sep == "dtype:") { + ParseDType(is); + } else if (sep == "initializer" || sep == "initializer:") { + ParseInitializer(is); + } else if (sep == "dims" || sep == "dims:") { + ParseDims(is); + } else if (sep == "lod" || sep == "lod:") { + ParseLoD(is); + } + } + } +} + +void OpInputConfig::ParseDType(std::istream& is) { + std::string dtype_str; + is >> dtype_str; + EraseEndSep(&dtype_str); + + if (dtype_str == "int32" || dtype_str == "int") { + dtype = "int32"; + } else if (dtype_str == "int64" || dtype_str == "long") { + dtype = "int64"; + } else if (dtype_str == "fp32" || dtype_str == "float") { + dtype = "fp32"; + } else if (dtype_str == "fp64" || dtype_str == "double") { + dtype = "fp64"; + } else { + PADDLE_THROW("Unsupported dtype %s", dtype_str.c_str()); + } + VLOG(4) << "dtype of input " << name << " is: " << dtype; +} + +void OpInputConfig::ParseInitializer(std::istream& is) { + std::string initializer_str; + is >> initializer_str; + EraseEndSep(&initializer_str); + + const std::vector supported_initializers = {"random", "natural", + "zeros"}; + if (!Has(supported_initializers, initializer_str)) { + PADDLE_THROW("Unsupported initializer %s", initializer_str.c_str()); + } + + initializer = initializer_str; + VLOG(4) << "initializer of input " << name << " is: " << initializer; +} + void OpInputConfig::ParseDims(std::istream& is) { std::string dims_str; is >> dims_str; @@ -84,7 +139,7 @@ void OpInputConfig::ParseLoD(std::istream& is) { number += lod_str[i]; ++i; } - level.push_back(atoi(number.c_str())); + level.push_back(StringTo(number)); } lod.push_back(level); } else if (lod_str[i] == '}') { @@ -93,24 +148,6 @@ void OpInputConfig::ParseLoD(std::istream& is) { } } -OpInputConfig::OpInputConfig(std::istream& is) { - std::string sep; - is >> sep; - if (sep == kStartSeparator) { - while (sep != kEndSeparator) { - is >> sep; - if (sep == "name" || sep == "name:") { - is >> name; - EraseEndSep(&name); - } else if (sep == "dims" || sep == "dims:") { - ParseDims(is); - } else if (sep == "lod" || sep == "lod:") { - ParseLoD(is); - } - } - } -} - OpTesterConfig::OpTesterConfig(const std::string& filename) { std::ifstream fin(filename, std::ios::in | std::ios::binary); PADDLE_ENFORCE(static_cast(fin), "Cannot open file %s", @@ -167,6 +204,7 @@ bool OpTesterConfig::ParseAttrs(std::istream& is) { is >> value; EraseEndSep(&key, ":"); EraseEndSep(&value); + VLOG(4) << "attrs: " << key << ", " << value; attrs[key] = value; } diff --git a/paddle/fluid/operators/benchmark/op_tester_config.h b/paddle/fluid/operators/benchmark/op_tester_config.h index c2ff6dafc053eb7202a686954d53ae6f3d62d02e..5803f82ac28867a481875c2af607290c5d366146 100644 --- a/paddle/fluid/operators/benchmark/op_tester_config.h +++ b/paddle/fluid/operators/benchmark/op_tester_config.h @@ -15,6 +15,7 @@ limitations under the License. */ #pragma once #include +#include #include #include #include @@ -27,10 +28,14 @@ struct OpInputConfig { OpInputConfig() {} explicit OpInputConfig(std::istream& is); + void ParseDType(std::istream& is); + void ParseInitializer(std::istream& is); void ParseDims(std::istream& is); void ParseLoD(std::istream& is); std::string name; + std::string dtype{"fp32"}; // int32/int, int64/long, fp32/float, fp64/double + std::string initializer{"random"}; // random, natural std::vector dims; std::vector> lod; }; @@ -55,6 +60,23 @@ struct OpTesterConfig { double runtime{0.0}; }; +static bool Has(const std::vector& vec, const std::string& item) { + for (size_t i = 0; i < vec.size(); ++i) { + if (vec[i] == item) { + return true; + } + } + return false; +} + +template +T StringTo(const std::string& str) { + std::istringstream is(str); + T value; + is >> value; + return value; +} + } // namespace benchmark } // namespace operators } // namespace paddle