提交 51a1f0ff 编写于 作者: E eclipsycn 提交者: GitHub

Merge pull request #192 from Eclipsess/develop

fix #191 use LOG instead of cout
......@@ -17,7 +17,7 @@ SOFTWARE.
==============================================================================*/
#include <fstream>
#include <iostream>
//#include <iostream>
#include "common/log.h"
#include "framework/framework.pb.h"
......
......@@ -34,12 +34,12 @@ namespace paddle_mobile {
const std::vector<std::shared_ptr<BlockDesc>> blocks =
to_predict_program_->Blocks();
// std::cout << " **block size " << blocks.size() << std::endl;
// DLOG << " **block size " << blocks.size();
for (int i = 0; i < blocks.size(); ++i) {
std::shared_ptr<BlockDesc> block_desc = blocks[i];
std::vector<std::shared_ptr<OpDesc>> ops =
block_desc->Ops();
// std::cout << " ops " << ops.size() << std::endl;
// DLOG << " ops " << ops.size();
for (int j = 0; j < ops.size(); ++j) {
std::shared_ptr<OpDesc> op = ops[j];
// if (op->Type() ==
......@@ -47,35 +47,26 @@ namespace paddle_mobile {
// if
// (op->GetAttrMap().at("axis").Get<int>()
// != -1) {
// std::cout
// << "attr: axis = "
// <<
// op->GetAttrMap().at("axis").Get<int>()
// << std::endl;
// DLOG << "attr: axis =
// "
// <<
// op->GetAttrMap().at("axis").Get<int>();
// }
// }
// std::cout << "op:" << op->Type() << std::endl;
// DLOG << "op:" << op->Type();
if (op->Type() == "elementwise_add" &&
op->Input("X")[0] == "batch_norm_2.tmp_2") {
std::cout << " elementwise_add attr size: "
<< op->GetAttrMap().size() << std::endl;
std::cout
<< " inputs size: " << op->GetInputs().size()
<< std::endl;
std::cout
<< " outputs size: " << op->GetOutputs().size()
<< std::endl;
std::cout << " Input X is : " << op->Input("X")[0]
<< std::endl;
std::cout << " Input Y is : " << op->Input("Y")[0]
<< std::endl;
std::cout
<< " Output Out is : " << op->Output("Out")[0]
<< std::endl;
DLOG << " elementwise_add attr size: "
<< op->GetAttrMap().size();
DLOG << " inputs size: " << op->GetInputs().size();
DLOG << " outputs size: "
<< op->GetOutputs().size();
DLOG << " Input X is : " << op->Input("X")[0];
DLOG << " Input Y is : " << op->Input("Y")[0];
DLOG << " Output Out is : " << op->Output("Out")[0];
Attribute axis_attr = op->GetAttrMap().at("axis");
int axis = axis_attr.Get<int>();
std::cout << " Attr axis is : " << axis
<< std::endl;
DLOG << " Attr axis is : " << axis;
std::shared_ptr<
operators::ElementwiseAddOp<Dtype, float>>
......@@ -104,10 +95,8 @@ namespace paddle_mobile {
Variable *con_output = scope->Var("elementwise_add_0.tmp_0");
Tensor *output_tensor = con_output->GetMutable<Tensor>();
output_tensor->mutable_data<float>({1, 3, 224, 224});
// std::cout << typeid(output_tensor).name() << std::endl;
// std::cout << "output_tensor dims: " << output_tensor->dims()
// <<
// std::endl;
// DLOG << typeid(output_tensor).name();
// DLOG << "output_tensor dims: " << output_tensor->dims();
std::shared_ptr<Tensor> out_tensor =
std::make_shared<LoDTensor>();
......@@ -131,7 +120,7 @@ namespace paddle_mobile {
for (int j = 0;
j < ops_of_block_[*to_predict_block.get()].size(); ++j) {
auto op = ops_of_block_[*to_predict_block.get()][j];
std::cout << "op -> run()" << std::endl;
DLOG << "op -> run()";
op->Run();
}
}
......@@ -142,8 +131,8 @@ namespace paddle_mobile {
namespace test {
void testElementwiseAdd() {
std::cout << "----------**********----------" << std::endl;
std::cout << "begin to run ElementAddOp Test" << std::endl;
DLOG << "----------**********----------";
DLOG << "begin to run ElementAddOp Test";
paddle_mobile::Loader<paddle_mobile::CPU> loader;
auto program = loader.Load(
std::string("../../test/models/"
......@@ -165,18 +154,16 @@ namespace paddle_mobile {
auto output_add = testElementwiseAddOp.predict_add(inputx, inputy);
float *output_add_ptr = output_add->data<float>();
for (int j = 0; j < output_add->numel(); ++j) {
// std::cout << "value of output: " << output_add_ptr[j] <<
// std::endl;
}
// for (int j = 0; j < output_add->numel(); ++j) {
// DLOG << "value of output: " << output_add_ptr[j];
// }
/// output (1,3,224,224)
std::cout << "output memory size : " << output_add->memory_size()
<< std::endl;
std::cout << "output numel : " << output_add->numel() << std::endl;
DLOG << "output memory size : " << output_add->memory_size();
DLOG << "output numel : " << output_add->numel();
std::cout << inputx_ptr[226] << " + " << inputy_ptr[2] << " = "
<< output_add_ptr[226] << std::endl;
DLOG << inputx_ptr[226] << " + " << inputy_ptr[2] << " = "
<< output_add_ptr[226];
}
} // namespace test
} // namespace paddle_mobile
......@@ -34,57 +34,47 @@ namespace paddle_mobile {
const std::vector<std::shared_ptr<BlockDesc>> blocks =
to_predict_program_->Blocks();
// std::cout << " **block size " << blocks.size() << std::endl;
// DLOG << " **block size " << blocks.size();
for (int i = 0; i < blocks.size(); ++i) {
std::shared_ptr<BlockDesc> block_desc = blocks[i];
std::vector<std::shared_ptr<OpDesc>> ops =
block_desc->Ops();
// std::cout << " ops " << ops.size() << std::endl;
// DLOG << " ops " << ops.size();
for (int j = 0; j < ops.size(); ++j) {
std::shared_ptr<OpDesc> op = ops[j];
if (op->Type() == "mul") {
std::cout << "x_num_col_dims : "
<< op->GetAttrMap()
.at("x_num_col_dims")
.Get<int>()
<< std::endl;
std::cout << "y_num_col_dims : "
<< op->GetAttrMap()
.at("y_num_col_dims")
.Get<int>()
<< std::endl;
std::cout << " Input X is : " << op->Input("X")[0]
<< std::endl;
}
// std::cout << "op:" << op->Type() << std::endl;
// if (op->Type() == "mul") {
// DLOG << "x_num_col_dims :
// "
// << op->GetAttrMap()
// .at("x_num_col_dims")
// .Get<int>();
// DLOG << "y_num_col_dims :
// "
// << op->GetAttrMap()
// .at("y_num_col_dims")
// .Get<int>();
// DLOG << " Input X is : "
// << op->Input("X")[0];
// }
// DLOG << "op:" << op->Type();
if (op->Type() == "mul" &&
op->Input("X")[0] == "pool2d_0.tmp_0") {
std::cout
<< " mul attr size: " << op->GetAttrMap().size()
<< std::endl;
std::cout
<< " inputs size: " << op->GetInputs().size()
<< std::endl;
std::cout
<< " outputs size: " << op->GetOutputs().size()
<< std::endl;
std::cout << " Input X is : " << op->Input("X")[0]
<< std::endl;
std::cout << " Input Y is : " << op->Input("Y")[0]
<< std::endl;
std::cout
<< " Output Out is : " << op->Output("Out")[0]
<< std::endl;
std::cout << "x_num_col_dims : "
<< op->GetAttrMap()
.at("x_num_col_dims")
.Get<int>()
<< std::endl;
std::cout << "y_num_col_dims : "
<< op->GetAttrMap()
.at("y_num_col_dims")
.Get<int>()
<< std::endl;
DLOG << " mul attr size: "
<< op->GetAttrMap().size();
DLOG << " inputs size: " << op->GetInputs().size();
DLOG << " outputs size: "
<< op->GetOutputs().size();
DLOG << " Input X is : " << op->Input("X")[0];
DLOG << " Input Y is : " << op->Input("Y")[0];
DLOG << " Output Out is : " << op->Output("Out")[0];
DLOG << "x_num_col_dims : "
<< op->GetAttrMap()
.at("x_num_col_dims")
.Get<int>();
DLOG << "y_num_col_dims : "
<< op->GetAttrMap()
.at("y_num_col_dims")
.Get<int>();
std::shared_ptr<operators::MulOp<Dtype, float>>
add = std::make_shared<
......@@ -112,9 +102,8 @@ namespace paddle_mobile {
Variable *con_output = scope->Var("fc_0.tmp_0");
Tensor *output_tensor = con_output->GetMutable<Tensor>();
output_tensor->mutable_data<float>({3, 3});
// std::cout << typeid(output_tensor).name() << std::endl;
// std::cout << "output_tensor dims: " << output_tensor->dims()
// << std::endl;
// DLOG << typeid(output_tensor).name();
// DLOG << "output_tensor dims: " << output_tensor->dims();
std::shared_ptr<Tensor> out_tensor =
std::make_shared<LoDTensor>();
......@@ -138,7 +127,7 @@ namespace paddle_mobile {
for (int j = 0;
j < ops_of_block_[*to_predict_block.get()].size(); ++j) {
auto op = ops_of_block_[*to_predict_block.get()][j];
std::cout << "op -> run()" << std::endl;
DLOG << "op -> run()";
op->Run();
}
}
......@@ -149,8 +138,8 @@ namespace paddle_mobile {
namespace test {
void testMul() {
std::cout << "----------**********----------" << std::endl;
std::cout << "begin to run MulOp Test" << std::endl;
DLOG << "----------**********----------";
DLOG << "begin to run MulOp Test";
paddle_mobile::Loader<paddle_mobile::CPU> loader;
auto program = loader.Load(
std::string("../../test/models/"
......@@ -175,40 +164,39 @@ namespace paddle_mobile {
float *output_mul_ptr = output_mul->data<float>();
auto dimx_1 = inputx.numel() / inputx.dims()[0];
std::cout << "inputx : " << std::endl;
DLOG << " inputx : ";
for (int i = 0; i < inputx.dims()[0]; ++i) {
for (int j = 0; j < dimx_1; ++j) {
std::cout << inputx_ptr[i * dimx_1 + j] << " ";
DLOGF("%f ", inputx_ptr[i * dimx_1 + j]);
}
std::cout << std::endl;
DLOGF("\n");
}
auto dimy_1 = inputy.numel() / inputy.dims()[0];
std::cout << "inputy : " << std::endl;
DLOG << " inputy : ";
for (int i = 0; i < inputy.dims()[0]; ++i) {
for (int j = 0; j < dimy_1; ++j) {
std::cout << inputy_ptr[i * dimy_1 + j] << " ";
DLOGF("%f ", inputy_ptr[i * dimx_1 + j]);
}
std::cout << std::endl;
DLOGF("\n");
}
auto dim_output_1 = output_mul->numel() / output_mul->dims()[0];
std::cout << "output : " << std::endl;
DLOG << " output : ";
for (int i = 0; i < output_mul->dims()[0]; ++i) {
for (int j = 0; j < dim_output_1; ++j) {
std::cout << output_mul_ptr[i * dimy_1 + j] << " ";
DLOGF("%f ", output_mul_ptr[i * dimy_1 + j]);
}
std::cout << std::endl;
DLOGF("\n");
}
/// output (3,3)
std::cout << "output memory size : " << output_mul->memory_size()
<< std::endl;
std::cout << "output numel : " << output_mul->numel() << std::endl;
DLOG << "output memory size : " << output_mul->memory_size();
DLOG << "output numel : " << output_mul->numel();
std::cout << inputx_ptr[0] << " x " << inputy_ptr[0] << " + "
<< inputx_ptr[1] << " x " << inputy_ptr[0 + 3] << " = "
<< output_mul_ptr[0] << std::endl;
DLOG << inputx_ptr[0] << " x " << inputy_ptr[0] << " + "
<< inputx_ptr[1] << " x " << inputy_ptr[0 + 3] << " = "
<< output_mul_ptr[0];
}
} // namespace test
} // namespace paddle_mobile
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册