提交 5ffdfeda 编写于 作者: C cc 提交者: GitHub

Support gnustl, replace string funcs with ours, test=develop (#3263)

上级 73ca2e00
......@@ -67,7 +67,7 @@ void Run(const char* model_dir, int repeat) {
int main(int argc, char** argv) {
CHECK_EQ(argc, 3) << "usage: ./cmd <model_dir> <repeat>";
paddle::lite::Run(argv[1], std::stoi(argv[2]));
paddle::lite::Run(argv[1], atoi(argv[2]));
return 0;
}
......
......@@ -138,7 +138,7 @@ void Run(const std::vector<std::vector<int64_t>>& input_shapes,
std::ofstream out(FLAGS_arg_name + ".txt");
for (size_t i = 0; i < arg_num; ++i) {
sum += arg_tensor->data<float>()[i];
out << std::to_string(arg_tensor->data<float>()[i]) << "\n";
out << paddle::lite::to_string(arg_tensor->data<float>()[i]) << "\n";
}
LOG(INFO) << FLAGS_arg_name << " shape is " << os.str()
<< ", mean value is " << sum * 1. / arg_num;
......
......@@ -250,7 +250,7 @@ void Run(const std::vector<std::vector<int64_t>>& input_shapes,
std::ofstream out(FLAGS_arg_name + ".txt");
for (size_t i = 0; i < arg_num; ++i) {
sum += arg_tensor->data<float>()[i];
out << std::to_string(arg_tensor->data<float>()[i]) << "\n";
out << paddle::lite::to_string(arg_tensor->data<float>()[i]) << "\n";
}
LOG(INFO) << FLAGS_arg_name << " shape is " << os.str()
<< ", mean value is " << sum * 1. / arg_num;
......
......@@ -264,7 +264,7 @@ void Run(const std::vector<std::vector<int64_t>>& input_shapes,
std::ofstream out(FLAGS_arg_name + ".txt");
for (size_t i = 0; i < arg_num; ++i) {
sum += arg_tensor->data<float>()[i];
out << std::to_string(arg_tensor->data<float>()[i]) << "\n";
out << paddle::lite::to_string(arg_tensor->data<float>()[i]) << "\n";
}
LOG(INFO) << FLAGS_arg_name << " shape is " << os.str()
<< ", mean value is " << sum * 1. / arg_num;
......
......@@ -106,7 +106,7 @@ inline void read_from_file(lite::Tensor* t, const std::string& path) {
inline void save_float(float* data, const std::string& name, int len) {
static int counter = 0;
std::string old_string = std::to_string(counter);
std::string old_string = paddle::lite::to_string(counter);
std::string new_string =
std::string(3 - old_string.length(), '0') + old_string;
......
......@@ -351,10 +351,10 @@ class Tensor {
void printScale(std::string type) { printScale(); }
std::string dimsFileName() {
return std::to_string(shape_->num()) + "_" +
std::to_string(shape_->channel()) + "_" +
std::to_string(shape_->height()) + "_" +
std::to_string(shape_->width()) + ".txt";
return paddle::lite::to_string(shape_->num()) + "_" +
paddle::lite::to_string(shape_->channel()) + "_" +
paddle::lite::to_string(shape_->height()) + "_" +
paddle::lite::to_string(shape_->width()) + ".txt";
}
void saveToFile() { std::string path = dimsFileName(); }
......@@ -374,7 +374,7 @@ class Tensor {
invalidate();
std::ofstream ofs;
static int counter = 0;
std::string npath = std::to_string(counter) + "_" + path;
std::string npath = paddle::lite::to_string(counter) + "_" + path;
counter++;
save_file_with_name(npath);
}
......
......@@ -17,6 +17,7 @@
#include <string>
#include "glog/logging.h"
#include "lite/backends/x86/jit/gen/jitcode.h"
#include "lite/utils/string.h"
namespace paddle {
namespace lite {
......@@ -64,7 +65,7 @@ class VXXJitCode : public JitCode {
base += "_Vec";
}
base += (with_relu_ ? "_Relu" : "");
base += "_D" + std::to_string(num_);
base += "_D" + paddle::lite::to_string(num_);
return base;
}
void genCode() override;
......
......@@ -47,7 +47,7 @@ class EmbSeqPoolJitCode : public JitCode {
} else if (type_ == SeqPoolType::kSqrt) {
base += "_Sqrt";
}
base += ("_W" + std::to_string(tbl_w_));
base += ("_W" + paddle::lite::to_string(tbl_w_));
return base;
}
void genCode() override;
......
......@@ -38,8 +38,8 @@ class MatMulJitCode : public JitCode {
std::string name() const override {
std::string base = "MatMulJitCode";
base = base + "_M" + std::to_string(m_) + "_N" + std::to_string(n_) + "_K" +
std::to_string(k_);
base = base + "_M" + paddle::lite::to_string(m_) + "_N" +
paddle::lite::to_string(n_) + "_K" + paddle::lite::to_string(k_);
return base;
}
void genCode() override;
......
......@@ -47,7 +47,7 @@ class SeqPoolJitCode : public JitCode {
} else if (type_ == SeqPoolType::kSqrt) {
base += "_Sqrt";
}
base += ("_W" + std::to_string(w_));
base += ("_W" + paddle::lite::to_string(w_));
return base;
}
void genCode() override;
......
......@@ -48,13 +48,16 @@ std::string Visualize(mir::SSAGraph* graph) {
auto attr_type = op_info->GetAttrType(attr_name);
switch (attr_type) {
case AttrType::INT:
os << ":int:" << std::to_string(op_info->GetAttr<int>(attr_name));
os << ":int:"
<< paddle::lite::to_string(op_info->GetAttr<int>(attr_name));
break;
case AttrType::FLOAT:
os << ":float:" << std::to_string(op_info->GetAttr<float>(attr_name));
os << ":float:"
<< paddle::lite::to_string(op_info->GetAttr<float>(attr_name));
break;
case AttrType::BOOLEAN:
os << ":int:" << std::to_string(op_info->GetAttr<bool>(attr_name));
os << ":int:"
<< paddle::lite::to_string(op_info->GetAttr<bool>(attr_name));
break;
case AttrType::STRING:
os << ":string: \""
......
......@@ -238,7 +238,7 @@ void MemoryOptimizePass::PerformReusePlan(
if (reuse_table.count(name) && reuse_table.at(name) != name) {
auto replace_name = reuse_table.at(name);
input_node->AsArg().name =
replace_name + "(" + std::to_string(node_append_idx) + ")";
replace_name + "(" + paddle::lite::to_string(node_append_idx) + ")";
node_append_idx++;
}
}
......@@ -262,7 +262,7 @@ void MemoryOptimizePass::PerformReusePlan(
if (reuse_table.count(name) && reuse_table.at(name) != name) {
auto replace_name = reuse_table.at(name);
out_node->AsArg().name =
replace_name + "(" + std::to_string(node_append_idx) + ")";
replace_name + "(" + paddle::lite::to_string(node_append_idx) + ")";
node_append_idx++;
}
}
......
......@@ -66,11 +66,11 @@ std::string SubgraphVisualizer::operator()() {
} else {
exists_ops[op_type]++;
}
auto op_name = op_type + std::to_string(exists_ops[op_type]);
auto op_name = op_type + paddle::lite::to_string(exists_ops[op_type]);
std::string op_color = "white";
if (subgraph_indices.count(node)) {
auto subgraph_idx = subgraph_indices[node];
op_name += "_subgraph_" + std::to_string(subgraph_idx);
op_name += "_subgraph_" + paddle::lite::to_string(subgraph_idx);
op_color = subgraph_colors[subgraph_idx % subgraph_colors.size()];
}
dot.AddNode(op_name,
......
......@@ -39,7 +39,7 @@ std::vector<std::string> AddFCDesc(
CHECK_EQ(input_var_names.size(), 1);
CHECK_EQ(wshape.size(), 2);
static int id = 0;
std::string prefix = "fc_" + std::to_string(id);
std::string prefix = "fc_" + paddle::lite::to_string(id);
auto* op_desc = block_desc->AddOp<cpp::OpDesc>();
auto* wgt = block_desc->AddVar<cpp::VarDesc>();
......@@ -76,7 +76,7 @@ std::vector<std::string> AddElementwiseAddDesc(
const std::vector<std::string>& input_Y_names) {
// CHECK_EQ(input_var_names.size(), 2);
static int id = 0;
std::string prefix = "elementwise_add_" + std::to_string(id);
std::string prefix = "elementwise_add_" + paddle::lite::to_string(id);
auto* op_desc = block_desc->AddOp<cpp::OpDesc>();
auto* out = block_desc->AddVar<cpp::VarDesc>();
......@@ -100,7 +100,7 @@ std::vector<std::string> AddFeedDesc(
const std::vector<std::string>& input_X_names) {
// CHECK_EQ(input_var_names.size(), 1);
static int id = 0;
std::string prefix = "feed_" + std::to_string(id);
std::string prefix = "feed_" + paddle::lite::to_string(id);
auto* op_desc = block_desc->AddOp<cpp::OpDesc>();
auto* out = block_desc->AddVar<cpp::VarDesc>();
......@@ -123,7 +123,7 @@ std::vector<std::string> AddFetchDesc(
const std::vector<std::string>& input_X_names) {
// CHECK_EQ(input_var_names.size(), 1);
static int id = 0;
std::string prefix = "fetch_" + std::to_string(id);
std::string prefix = "fetch_" + paddle::lite::to_string(id);
auto* op_desc = block_desc->AddOp<cpp::OpDesc>();
auto* out = block_desc->AddVar<cpp::VarDesc>();
......
......@@ -201,7 +201,8 @@ void PrecisionCastPass::AddCastInst(const Type& from,
CHECK(in->IsArg());
// auto node_id = [&] { return graph->nodes().size(); };
auto cast_op_output_name = in->AsArg().name + "/precision_trans";
// in->AsArg().name + "/precision_trans/" + std::to_string(node_id());
// in->AsArg().name + "/precision_trans/" +
// paddle::lite::to_string(node_id());
auto* cast_op_output_arg = graph->NewArgumentNode(cast_op_output_name);
cast_op_output_arg->AsArg().type =
LiteType::GetTensorTy(from.target(), to.precision(), from.layout());
......
......@@ -263,8 +263,8 @@ class PrecisionProfiler {
&mean,
&std_dev,
out_name);
mean_str = std::to_string(mean);
std_dev_str = std::to_string(std_dev);
mean_str = paddle::lite::to_string(mean);
std_dev_str = paddle::lite::to_string(std_dev);
}
std::string kernel_info = op_name + ":" + kernel_place;
std::string output_arg_info = out_name + ":" +
......@@ -294,8 +294,8 @@ class PrecisionProfiler {
&mean,
&std_dev,
out_name);
mean_str = std::to_string(mean);
std_dev_str = std::to_string(std_dev);
mean_str = paddle::lite::to_string(mean);
std_dev_str = paddle::lite::to_string(std_dev);
}
std::string kernel_info = op_name + ":" + kernel_place;
std::string output_arg_info = out_name + ":" +
......
......@@ -30,9 +30,9 @@ Program FakeProgram() {
auto add_fc = [&](int id, std::string x) {
// create variables
std::string w1 = "w" + std::to_string(id);
std::string b1 = "b" + std::to_string(id);
std::string out1 = "out" + std::to_string(id);
std::string w1 = "w" + paddle::lite::to_string(id);
std::string b1 = "b" + paddle::lite::to_string(id);
std::string out1 = "out" + paddle::lite::to_string(id);
auto w1v = program.scope()->Var(w1)->GetMutable<lite::Tensor>();
auto b1v = program.scope()->Var(b1)->GetMutable<lite::Tensor>();
auto out1v = program.scope()->Var(out1)->GetMutable<lite::Tensor>();
......
......@@ -53,9 +53,9 @@ static std::string version() {
static int64_t int_version(const std::string& version) {
const std::vector<std::string> vec = Split(version, ".");
if (vec.size() == 3) {
return std::stoi(vec[0]) * MAJOR_COEFF +
std::stoi(vec[1]) * MINOR_COEFF +
std::stoi(vec[2]) * PATCH_COEFF;
return atoi(vec[0].c_str()) * MAJOR_COEFF +
atoi(vec[1].c_str()) * MINOR_COEFF +
atoi(vec[2].c_str()) * PATCH_COEFF;
}
return -1;
}
......
......@@ -207,7 +207,8 @@ void RunModel(std::string det_model_file,
cv::Mat roi = crop_img(img, rec_clip, classify_w, classify_h);
// uncomment two lines below, save roi img to disk
// std::string roi_name = "roi_" + std::to_string(i) + ".jpg";
// std::string roi_name = "roi_" + paddle::lite::to_string(i)
// + ".jpg";
// imwrite(roi_name, roi);
// Do PreProcess
......@@ -240,7 +241,7 @@ void RunModel(std::string det_model_file,
roi_color = cv::Scalar(0, 0, 255);
prob = 1 - prob;
}
std::string prob_str = std::to_string(prob * 100);
std::string prob_str = paddle::lite::to_string(prob * 100);
int point_idx = prob_str.find_last_of(".");
text += prob_str.substr(0, point_idx + 3) + "%";
......
......@@ -32,7 +32,7 @@ int64_t ShapeProduction(const shape_t& shape) {
std::string ShapePrint(const shape_t& shape) {
std::string shape_str{""};
for (auto i : shape) {
shape_str += std::to_string(i) + " ";
shape_str += paddle::lite::to_string(i) + " ";
}
return shape_str;
}
......
......@@ -126,7 +126,7 @@ std::vector<Object> detect_object(const float* data,
if (w > 0 && h > 0 && obj.prob <= 1) {
rect_out.push_back(obj);
cv::rectangle(image, rec_clip, cv::Scalar(0, 0, 255), 2, cv::LINE_AA);
std::string str_prob = std::to_string(obj.prob);
std::string str_prob = paddle::lite::to_string(obj.prob);
std::string text = std::string(class_names[obj.class_id]) + ": " +
str_prob.substr(0, str_prob.find(".") + 4);
int font_face = cv::FONT_HERSHEY_COMPLEX_SMALL;
......
......@@ -146,7 +146,7 @@ std::vector<Object> detect_object(const float* data,
if (w > 0 && h > 0 && obj.prob <= 1) {
rect_out.push_back(obj);
cv::rectangle(image, rec_clip, cv::Scalar(0, 0, 255), 1, cv::LINE_AA);
std::string str_prob = std::to_string(obj.prob);
std::string str_prob = paddle::lite::to_string(obj.prob);
std::string text = std::string(class_names[obj.class_id]) + ": " +
str_prob.substr(0, str_prob.find(".") + 4);
int font_face = cv::FONT_HERSHEY_COMPLEX_SMALL;
......
......@@ -111,11 +111,11 @@ void Module::AddOpDescHelper(const std::string &op_id,
switch (type) {
case AttrType::INT:
return std::to_string(desc.GetAttr<int>(name));
return paddle::lite::to_string(desc.GetAttr<int>(name));
case AttrType::FLOAT:
return std::to_string(desc.GetAttr<float>(name));
return paddle::lite::to_string(desc.GetAttr<float>(name));
case AttrType::BOOLEAN:
return std::to_string(desc.GetAttr<bool>(name));
return paddle::lite::to_string(desc.GetAttr<bool>(name));
case AttrType::STRING:
return "\"" + desc.GetAttr<std::string>(name) + "\"";
case AttrType::FLOATS: {
......
......@@ -153,16 +153,16 @@ class Module {
private:
std::string WeightUniqueName() const {
return "w_" + std::to_string(weight_counter_++);
return "w_" + paddle::lite::to_string(weight_counter_++);
}
std::string TmpVarUniqueName() const {
return "tmp_" + std::to_string(tmp_var_counter_++);
return "tmp_" + paddle::lite::to_string(tmp_var_counter_++);
}
std::string OpUniqueName() const {
return "op_" + std::to_string(op_counter_++);
return "op_" + paddle::lite::to_string(op_counter_++);
}
std::string KernelUniqueName() const {
return "kernel_" + std::to_string(kernel_counter_++);
return "kernel_" + paddle::lite::to_string(kernel_counter_++);
}
std::string DataRepr(const std::string &raw_data, PrecisionType dtype);
......
......@@ -33,7 +33,7 @@ std::string UniqueName(const std::string& prefix) {
counter = ++(it->second);
}
return prefix + "_" + std::to_string(counter);
return prefix + "_" + paddle::lite::to_string(counter);
}
bool HasInputArg(const OpInfo* op_info,
......
......@@ -87,7 +87,8 @@ class Graph {
auto idx = Add(name, node);
CHECK_GE(idx, 1);
// Generate a unique name for the created HiAI IR
node->set_data(std::make_shared<T>(name + "__" + std::to_string(idx)));
node->set_data(
std::make_shared<T>(name + "__" + paddle::lite::to_string(idx)));
return node;
}
......
......@@ -64,10 +64,12 @@ int SplitConverter(void* ctx, OpLite* op, KernelBase* kernel) {
split_op->create_dynamic_output_y(out_names.size());
int idx = 1;
for (auto& out_name : out_names) {
auto zero_node = graph->Add(out_name + "/zero" + std::to_string(idx), 0);
auto zero_node =
graph->Add(out_name + "/zero" + paddle::lite::to_string(idx), 0);
auto add_node = graph->Add<ge::op::Add>(out_name);
auto add_op = add_node->data<ge::op::Add>();
add_op->set_input_x1(*split_node->data(), "y" + std::to_string(idx));
add_op->set_input_x1(*split_node->data(),
"y" + paddle::lite::to_string(idx));
add_op->set_input_x2(*zero_node->data());
idx++;
}
......
......@@ -49,7 +49,7 @@ std::shared_ptr<Node> Graph::Add(const std::string& name,
CHECK_GE(idx, 1);
node->set_data(std::make_shared<xtcl::xExpr>(layer));
// Generate a unique name for the current XTCL layer
builder_.SetLayer(name + "__" + std::to_string(idx));
builder_.SetLayer(name + "__" + paddle::lite::to_string(idx));
return node;
}
......
......@@ -155,7 +155,7 @@ TEST(ListBuilder, basic) {
for (int i = 0; i < num_elems; i++) {
auto* elem = li.New();
elem->set("elem-" + std::to_string(i));
elem->set("elem-" + paddle::lite::to_string(i));
}
li.Save();
table.SaveToFile("2.bf");
......@@ -169,7 +169,7 @@ TEST(ListBuilder, basic) {
li1.Load();
for (int i = 0; i < num_elems; i++) {
ASSERT_EQ(li1.Get(i).data(), "elem-" + std::to_string(i));
ASSERT_EQ(li1.Get(i).data(), "elem-" + paddle::lite::to_string(i));
}
}
......
......@@ -128,7 +128,7 @@ class ConcateComputeTester : public arena::TestCase {
for (int i = 0; i < x_dims_.production(); i++) {
x_data[i] = static_cast<float>(i + n);
}
const std::string x_name = "x_tensor_" + std::to_string(n);
const std::string x_name = "x_tensor_" + paddle::lite::to_string(n);
x_vct_.push_back(x_name);
SetCommonTensor(x_name, x_dims_, x_data.data());
}
......
......@@ -52,7 +52,8 @@ class FillConstantComputeTester : public arena::TestCase {
is_use_shape_tensor_list_(is_use_shape_tensor_list) {
if (is_use_shape_tensor_list) {
for (int i = 0; i < shape.size(); i++) {
shape_tensor_list_.push_back(shape_tensor_ + std::to_string(i));
shape_tensor_list_.push_back(shape_tensor_ +
paddle::lite::to_string(i));
}
}
}
......
......@@ -45,7 +45,8 @@ class ReshapeComputeTester : public arena::TestCase {
: TestCase(place, alias), dims_(dims) {
if (is_shape_tensor_vct) {
for (size_t i = 0; i < shape.size(); i++) {
shape_tensor_vct_.emplace_back(op_type_ + "/shape" + std::to_string(i));
shape_tensor_vct_.emplace_back(op_type_ + "/shape" +
paddle::lite::to_string(i));
}
} else if (is_shape_tensor) {
shape_tensor_ = op_type_ + "/shape";
......
......@@ -168,8 +168,9 @@ class SliceComputeTester : public arena::TestCase {
std::vector<std::string> ends_tensor_list_;
for (int i = 0; i < starts_.size(); ++i) {
starts_tensor_list_.push_back("starts_tensor_list_" +
std::to_string(i));
ends_tensor_list_.push_back("ends_tensor_list_" + std::to_string(i));
paddle::lite::to_string(i));
ends_tensor_list_.push_back("ends_tensor_list_" +
paddle::lite::to_string(i));
}
op_desc->SetInput("StartsTensorList", {starts_tensor_list_});
op_desc->SetInput("EndsTensorList", {ends_tensor_list_});
......@@ -203,15 +204,15 @@ class SliceComputeTester : public arena::TestCase {
} else if (use_tensor_list_) {
Scope& scope_ = this->scope();
for (int i = 0; i < starts_.size(); ++i) {
auto* tensor =
scope_.NewTensor("starts_tensor_list_" + std::to_string(i));
auto* tensor = scope_.NewTensor("starts_tensor_list_" +
paddle::lite::to_string(i));
tensor->Resize(DDim({1}));
auto* d = tensor->mutable_data<int>();
d[0] = starts_[i];
}
for (int i = 0; i < ends_.size(); ++i) {
auto* tensor =
scope_.NewTensor("ends_tensor_list_" + std::to_string(i));
scope_.NewTensor("ends_tensor_list_" + paddle::lite::to_string(i));
tensor->Resize(DDim({1}));
auto* d = tensor->mutable_data<int>();
d[0] = ends_[i];
......
......@@ -123,7 +123,7 @@ class UnsqueezeComputeTester : public arena::TestCase {
} else if (input_axes_flag_ == 3) {
std::string name = "axes_tensor_";
for (size_t i = 0; i < axes_.size(); i++) {
name = name + std::to_string(i);
name = name + paddle::lite::to_string(i);
axes_tensor_list_.push_back(name);
SetCommonTensor(name, DDim({1}), &axes_[i]);
}
......
......@@ -29,6 +29,7 @@
#include <cstring>
#include <string>
#include "lite/utils/replace_stl/stream.h"
#include "lite/utils/string.h"
#ifdef LITE_WITH_ANDROID
#include <android/log.h>
......@@ -171,7 +172,7 @@ class VLogMessage {
if (GLOG_v_int < level_int) {
return;
}
const char* level = std::to_string(level_int).c_str();
const char* level = paddle::lite::to_string(level_int).c_str();
paddle::lite::gen_log(log_stream_, file, func, lineno, level);
}
......
......@@ -15,6 +15,7 @@
#include "lite/utils/replace_stl/stream.h"
#include <assert.h>
#include <stdio.h>
#include "lite/utils/string.h"
#ifdef LITE_ON_TINY_PUBLISH
......@@ -39,9 +40,9 @@ void ostream::pad(const std::string& text) {
#ifdef LITE_SHUTDOWN_LOG
#define ADD_DATA_AS_STRING(data_, obj_)
#else
#define ADD_DATA_AS_STRING(data_, obj_) \
std::string text = std::to_string(obj_); \
pad(text); \
#define ADD_DATA_AS_STRING(data_, obj_) \
std::string text = paddle::lite::to_string(obj_); \
pad(text); \
data_ = data_ + text;
#endif
......
......@@ -48,7 +48,14 @@ template <typename T>
static std::string to_string_with_precision(const T& v, const int n = 6) {
STL::stringstream ss;
ss.precision(n);
// ss << std::fixed << v;
ss << v;
return ss.str();
}
template <typename T>
static std::string to_string(const T& v) {
STL::stringstream ss;
ss << v;
return ss.str();
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册