未验证 提交 36e2d324 编写于 作者: Y Yiqun Liu 提交者: GitHub

Enhance the op benchmark: (#16066)

- Support setting attr in config
- Support setting dtype and initializer for input in config
test=develop
上级 9be825a9
......@@ -42,8 +42,8 @@ void OpTester::Init(const OpTesterConfig &config) {
// Initialize the OpDesc
if (op_desc_info.Has(config_.op_type)) {
type_ = config_.op_type;
op_desc_.SetType(config_.op_type);
CreateOpDesc();
CreateInputVarDesc();
CreateOutputVarDesc();
} else {
......@@ -131,6 +131,40 @@ std::vector<std::string> OpTester::GetOpProtoOutputNames() {
return output_names;
}
std::unordered_map<std::string, framework::proto::AttrType>
OpTester::GetOpProtoAttrNames() {
std::unordered_map<std::string, framework::proto::AttrType> attr_types;
const framework::proto::OpProto &proto =
framework::OpInfoMap::Instance().Get(type_).Proto();
const std::vector<std::string> skipped_attrs = {
framework::OpProtoAndCheckerMaker::OpRoleAttrName(),
framework::OpProtoAndCheckerMaker::OpRoleVarAttrName(),
framework::OpProtoAndCheckerMaker::OpNamescopeAttrName(),
framework::OpProtoAndCheckerMaker::OpCreationCallstackAttrName()};
for (int i = 0; i != proto.attrs_size(); ++i) {
const auto &attr = proto.attrs(i);
if (!Has(skipped_attrs, attr.name())) {
VLOG(4) << "attr: " << attr.name() << ", type: " << attr.type();
attr_types[attr.name()] = attr.type();
}
}
return attr_types;
}
framework::proto::VarType::Type OpTester::TransToVarType(std::string str) {
if (str == "int32") {
return framework::proto::VarType::INT32;
} else if (str == "int64") {
return framework::proto::VarType::INT64;
} else if (str == "fp32") {
return framework::proto::VarType::FP32;
} else if (str == "fp64") {
return framework::proto::VarType::FP64;
} else {
PADDLE_THROW("Unsupported dtype %s.", str.c_str());
}
}
void OpTester::CreateInputVarDesc() {
std::vector<std::string> input_names = GetOpProtoInputNames();
for (auto &name : input_names) {
......@@ -145,11 +179,11 @@ void OpTester::CreateInputVarDesc() {
// Need to support more type
var->SetType(framework::proto::VarType::LOD_TENSOR);
var->SetPersistable(false);
var->SetDataType(framework::proto::VarType::FP32);
var->SetDataType(TransToVarType(input->dtype));
var->SetShape(input->dims);
op_desc_.SetInput(name, {var_name});
input_lods_[var_name] = input->lod;
inputs_[var_name] = *input;
}
}
......@@ -167,6 +201,49 @@ void OpTester::CreateOutputVarDesc() {
}
}
void OpTester::CreateOpDesc() {
op_desc_.SetType(config_.op_type);
std::unordered_map<std::string, framework::proto::AttrType> attr_types =
GetOpProtoAttrNames();
for (auto item : config_.attrs) {
const std::string &name = item.first;
if (attr_types.find(name) == attr_types.end()) {
LOG(FATAL) << "Operator " << type_ << " do not have attr " << name;
}
const std::string &value_str = item.second;
const framework::proto::AttrType &type = attr_types[name];
switch (type) {
case framework::proto::AttrType::BOOLEAN:
break;
case framework::proto::AttrType::INT: {
int value = StringTo<int>(value_str);
op_desc_.SetAttr(name, {value});
} break;
case framework::proto::AttrType::FLOAT: {
float value = StringTo<float>(value_str);
op_desc_.SetAttr(name, {value});
} break;
case framework::proto::AttrType::STRING: {
op_desc_.SetAttr(name, {value_str});
} break;
case framework::proto::AttrType::BOOLEANS:
case framework::proto::AttrType::INTS:
case framework::proto::AttrType::FLOATS:
case framework::proto::AttrType::STRINGS:
LOG(FATAL) << "Not supported yet.";
break;
case framework::proto::AttrType::LONG: {
int64_t value = StringTo<int64_t>(value_str);
op_desc_.SetAttr(name, value);
} break;
case framework::proto::AttrType::LONGS:
default:
PADDLE_THROW("Unsupport attr type %d", type);
}
}
}
framework::VarDesc *OpTester::Var(const std::string &name) {
auto it = vars_.find(name);
if (it != vars_.end()) {
......@@ -179,24 +256,41 @@ framework::VarDesc *OpTester::Var(const std::string &name) {
template <typename T>
void OpTester::SetupTensor(framework::LoDTensor *tensor,
const std::vector<int64_t> &shape, T lower,
T upper) {
const std::vector<int64_t> &shape, T lower, T upper,
const std::string &initializer) {
static unsigned int seed = 100;
std::mt19937 rng(seed++);
std::uniform_real_distribution<double> uniform_dist(0, 1);
T *ptr = tensor->mutable_data<T>(framework::make_ddim(shape), place_);
if (platform::is_cpu_place(place_)) {
for (int i = 0; i < tensor->numel(); ++i) {
ptr[i] = static_cast<T>(uniform_dist(rng) * (upper - lower) + lower);
}
framework::LoDTensor cpu_tensor;
T *cpu_ptr = nullptr;
if (!platform::is_cpu_place(place_)) {
cpu_ptr = cpu_tensor.mutable_data<T>(framework::make_ddim(shape),
platform::CPUPlace());
} else {
framework::LoDTensor cpu_tensor;
T *cpu_ptr = cpu_tensor.mutable_data<T>(framework::make_ddim(shape),
platform::CPUPlace());
cpu_ptr = ptr;
}
if (initializer == "random") {
for (int i = 0; i < cpu_tensor.numel(); ++i) {
cpu_ptr[i] = static_cast<T>(uniform_dist(rng) * (upper - lower) + lower);
}
} else if (initializer == "natural") {
for (int i = 0; i < cpu_tensor.numel(); ++i) {
cpu_ptr[i] = lower + i;
}
} else if (initializer == "zeros") {
for (int i = 0; i < cpu_tensor.numel(); ++i) {
cpu_ptr[i] = 0;
}
} else {
PADDLE_THROW("Unsupported initializer %s.", initializer.c_str());
}
if (!platform::is_cpu_place(place_)) {
TensorCopySync(cpu_tensor, place_, tensor);
}
}
......@@ -219,7 +313,7 @@ void OpTester::CreateVariables(framework::Scope *scope) {
}
}
for (auto &item : input_lods_) {
for (auto &item : inputs_) {
// Allocate memory for input tensor
auto &var_name = item.first;
VLOG(3) << "Allocate memory for tensor " << var_name;
......@@ -229,11 +323,23 @@ void OpTester::CreateVariables(framework::Scope *scope) {
auto *var = scope->Var(var_name);
auto *tensor = var->GetMutable<framework::LoDTensor>();
SetupTensor<float>(tensor, shape, static_cast<float>(0.0),
static_cast<float>(1.0));
const auto &data_type = var_desc->GetDataType();
if (data_type == framework::proto::VarType::INT32) {
SetupTensor<int>(tensor, shape, 0, 1, item.second.initializer);
} else if (data_type == framework::proto::VarType::INT64) {
SetupTensor<int64_t>(tensor, shape, 0, 1, item.second.initializer);
} else if (data_type == framework::proto::VarType::FP32) {
SetupTensor<float>(tensor, shape, static_cast<float>(0.0),
static_cast<float>(1.0), item.second.initializer);
} else if (data_type == framework::proto::VarType::FP64) {
SetupTensor<double>(tensor, shape, static_cast<double>(0.0),
static_cast<double>(1.0), item.second.initializer);
} else {
PADDLE_THROW("Unsupported dtype %d.", data_type);
}
VLOG(3) << "Set lod for tensor " << var_name;
std::vector<std::vector<size_t>> &lod_vec = item.second;
std::vector<std::vector<size_t>> &lod_vec = item.second.lod;
framework::LoD lod;
for (size_t i = 0; i < lod_vec.size(); ++i) {
lod.push_back(lod_vec[i]);
......@@ -261,7 +367,16 @@ std::string OpTester::DebugString() {
ss << GenSpaces(count) << "type: LOD_TENSOR\n";
ss << GenSpaces(count++) << "lod_tensor {\n";
ss << GenSpaces(count++) << "tensor {\n";
ss << GenSpaces(count) << "data_type: FP32\n";
const auto &data_type = var->GetDataType();
if (data_type == framework::proto::VarType::INT32) {
ss << GenSpaces(count) << "data_type: INT32\n";
} else if (data_type == framework::proto::VarType::INT64) {
ss << GenSpaces(count) << "data_type: INT64\n";
} else if (data_type == framework::proto::VarType::FP32) {
ss << GenSpaces(count) << "data_type: FP32\n";
} else if (data_type == framework::proto::VarType::FP64) {
ss << GenSpaces(count) << "data_type: FP64\n";
}
std::vector<int64_t> shape = var->GetShape();
for (auto d : shape) {
ss << GenSpaces(count) << "dims: " << d << "\n";
......@@ -288,6 +403,63 @@ std::string OpTester::DebugString() {
ss << GenSpaces(--count) << "}\n";
}
ss << GenSpaces(count) << "type: " << op_desc_.Type() << "\n";
for (auto &name : op_desc_.AttrNames()) {
ss << GenSpaces(count++) << "attrs {\n";
const auto &attr_type = op_desc_.GetAttrType(name);
const auto &attr = op_desc_.GetAttr(name);
ss << GenSpaces(count) << "name: \"" << name << "\"\n";
switch (attr_type) {
case framework::proto::AttrType::BOOLEAN: {
ss << GenSpaces(count) << "type: BOOLEAN\n";
ss << GenSpaces(count) << "b: " << boost::get<bool>(attr) << "\n";
} break;
case framework::proto::AttrType::INT: {
ss << GenSpaces(count) << "type: INT\n";
ss << GenSpaces(count) << "i: " << boost::get<int>(attr) << "\n";
} break;
case framework::proto::AttrType::FLOAT: {
ss << GenSpaces(count) << "type: FLOAT\n";
ss << GenSpaces(count) << "f: " << boost::get<float>(attr) << "\n";
} break;
case framework::proto::AttrType::STRING: {
ss << GenSpaces(count) << "type: STRING\n";
ss << GenSpaces(count) << "s: \"" << boost::get<std::string>(attr)
<< "\"\n";
} break;
case framework::proto::AttrType::BOOLEANS: {
ss << GenSpaces(count) << "type: BOOLEANS\n";
ss << GenSpaces(count) << "bools: "
<< "\n";
} break;
case framework::proto::AttrType::INTS: {
ss << GenSpaces(count) << "type: INTS\n";
ss << GenSpaces(count) << "ints: "
<< "\n";
} break;
case framework::proto::AttrType::FLOATS: {
ss << GenSpaces(count) << "type: FLOATS\n";
ss << GenSpaces(count) << "floats: "
<< "\n";
} break;
case framework::proto::AttrType::STRINGS: {
ss << GenSpaces(count) << "type: STRINGS\n";
ss << GenSpaces(count) << "strings: "
<< "\n";
} break;
case framework::proto::AttrType::LONG: {
ss << GenSpaces(count) << "type: LONG\n";
ss << GenSpaces(count) << "l: " << boost::get<int64_t>(attr) << "\n";
} break;
case framework::proto::AttrType::LONGS: {
ss << GenSpaces(count) << "type: LONGS\n";
ss << GenSpaces(count) << "longs: "
<< "\n";
} break;
default:
PADDLE_THROW("Unsupport attr type %d", attr_type);
}
ss << GenSpaces(--count) << "}\n";
}
ss << GenSpaces(--count) << "}\n";
return ss.str();
}
......@@ -299,6 +471,7 @@ TEST(op_tester, base) {
FLAGS_op_config_list.c_str());
std::vector<OpTesterConfig> op_configs;
while (!fin.eof()) {
VLOG(4) << "Reading config " << op_configs.size() << "...";
OpTesterConfig config;
bool result = config.Init(fin);
if (result) {
......
......@@ -14,7 +14,9 @@ limitations under the License. */
#pragma once
#include <memory>
#include <string>
#include <unordered_map>
#include <vector>
#include "paddle/fluid/framework/ddim.h"
#include "paddle/fluid/framework/op_desc.h"
......@@ -39,16 +41,21 @@ class OpTester {
private:
std::vector<std::string> GetOpProtoInputNames();
std::vector<std::string> GetOpProtoOutputNames();
std::unordered_map<std::string, framework::proto::AttrType>
GetOpProtoAttrNames();
framework::proto::VarType::Type TransToVarType(std::string str);
void CreateInputVarDesc();
void CreateOutputVarDesc();
void CreateOpDesc();
framework::VarDesc *Var(const std::string &name);
void CreateVariables(framework::Scope *scope);
template <typename T>
void SetupTensor(framework::LoDTensor *input,
const std::vector<int64_t> &shape, T lower, T upper);
const std::vector<int64_t> &shape, T lower, T upper,
const std::string &initializer);
void RunImpl();
......@@ -57,7 +64,7 @@ class OpTester {
std::string type_;
framework::OpDesc op_desc_;
std::unordered_map<std::string, std::unique_ptr<framework::VarDesc>> vars_;
std::unordered_map<std::string, std::vector<std::vector<size_t>>> input_lods_;
std::unordered_map<std::string, OpInputConfig> inputs_;
std::unique_ptr<framework::OperatorBase> op_;
platform::Place place_;
std::unique_ptr<framework::Scope> scope_;
......
......@@ -14,7 +14,6 @@ limitations under the License. */
#include "paddle/fluid/operators/benchmark/op_tester_config.h"
#include <fstream>
#include "glog/logging.h"
#include "paddle/fluid/platform/enforce.h"
namespace paddle {
......@@ -40,6 +39,62 @@ static void EraseEndSep(std::string* str,
}
}
OpInputConfig::OpInputConfig(std::istream& is) {
std::string sep;
is >> sep;
if (sep == kStartSeparator) {
while (sep != kEndSeparator) {
is >> sep;
if (sep == "name" || sep == "name:") {
is >> name;
EraseEndSep(&name);
} else if (sep == "dtype" || sep == "dtype:") {
ParseDType(is);
} else if (sep == "initializer" || sep == "initializer:") {
ParseInitializer(is);
} else if (sep == "dims" || sep == "dims:") {
ParseDims(is);
} else if (sep == "lod" || sep == "lod:") {
ParseLoD(is);
}
}
}
}
void OpInputConfig::ParseDType(std::istream& is) {
std::string dtype_str;
is >> dtype_str;
EraseEndSep(&dtype_str);
if (dtype_str == "int32" || dtype_str == "int") {
dtype = "int32";
} else if (dtype_str == "int64" || dtype_str == "long") {
dtype = "int64";
} else if (dtype_str == "fp32" || dtype_str == "float") {
dtype = "fp32";
} else if (dtype_str == "fp64" || dtype_str == "double") {
dtype = "fp64";
} else {
PADDLE_THROW("Unsupported dtype %s", dtype_str.c_str());
}
VLOG(4) << "dtype of input " << name << " is: " << dtype;
}
void OpInputConfig::ParseInitializer(std::istream& is) {
std::string initializer_str;
is >> initializer_str;
EraseEndSep(&initializer_str);
const std::vector<std::string> supported_initializers = {"random", "natural",
"zeros"};
if (!Has(supported_initializers, initializer_str)) {
PADDLE_THROW("Unsupported initializer %s", initializer_str.c_str());
}
initializer = initializer_str;
VLOG(4) << "initializer of input " << name << " is: " << initializer;
}
void OpInputConfig::ParseDims(std::istream& is) {
std::string dims_str;
is >> dims_str;
......@@ -84,7 +139,7 @@ void OpInputConfig::ParseLoD(std::istream& is) {
number += lod_str[i];
++i;
}
level.push_back(atoi(number.c_str()));
level.push_back(StringTo<size_t>(number));
}
lod.push_back(level);
} else if (lod_str[i] == '}') {
......@@ -93,24 +148,6 @@ void OpInputConfig::ParseLoD(std::istream& is) {
}
}
OpInputConfig::OpInputConfig(std::istream& is) {
std::string sep;
is >> sep;
if (sep == kStartSeparator) {
while (sep != kEndSeparator) {
is >> sep;
if (sep == "name" || sep == "name:") {
is >> name;
EraseEndSep(&name);
} else if (sep == "dims" || sep == "dims:") {
ParseDims(is);
} else if (sep == "lod" || sep == "lod:") {
ParseLoD(is);
}
}
}
}
OpTesterConfig::OpTesterConfig(const std::string& filename) {
std::ifstream fin(filename, std::ios::in | std::ios::binary);
PADDLE_ENFORCE(static_cast<bool>(fin), "Cannot open file %s",
......@@ -167,6 +204,7 @@ bool OpTesterConfig::ParseAttrs(std::istream& is) {
is >> value;
EraseEndSep(&key, ":");
EraseEndSep(&value);
VLOG(4) << "attrs: " << key << ", " << value;
attrs[key] = value;
}
......
......@@ -15,6 +15,7 @@ limitations under the License. */
#pragma once
#include <istream>
#include <sstream>
#include <string>
#include <unordered_map>
#include <vector>
......@@ -27,10 +28,14 @@ struct OpInputConfig {
OpInputConfig() {}
explicit OpInputConfig(std::istream& is);
void ParseDType(std::istream& is);
void ParseInitializer(std::istream& is);
void ParseDims(std::istream& is);
void ParseLoD(std::istream& is);
std::string name;
std::string dtype{"fp32"}; // int32/int, int64/long, fp32/float, fp64/double
std::string initializer{"random"}; // random, natural
std::vector<int64_t> dims;
std::vector<std::vector<size_t>> lod;
};
......@@ -55,6 +60,23 @@ struct OpTesterConfig {
double runtime{0.0};
};
static bool Has(const std::vector<std::string>& vec, const std::string& item) {
for (size_t i = 0; i < vec.size(); ++i) {
if (vec[i] == item) {
return true;
}
}
return false;
}
template <typename T>
T StringTo(const std::string& str) {
std::istringstream is(str);
T value;
is >> value;
return value;
}
} // namespace benchmark
} // namespace operators
} // namespace paddle
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册