提交 f153342b 编写于 作者: 朔-望's avatar 朔-望

test tidy

---
Checks: 'clang-diagnostic-*,clang-analyzer-*'
WarningsAsErrors: ''
HeaderFilterRegex: ''
AnalyzeTemporaryDtors: false
FormatStyle: none
User: allonli
CheckOptions:
- key: google-readability-braces-around-statements.ShortStatementLines
value: '1'
- key: google-readability-function-size.StatementThreshold
value: '800'
- key: google-readability-namespace-comments.ShortNamespaceLines
value: '10'
- key: google-readability-namespace-comments.SpacesBeforeComments
value: '2'
- key: modernize-loop-convert.MaxCopySize
value: '16'
- key: modernize-loop-convert.MinConfidence
value: reasonable
- key: modernize-loop-convert.NamingStyle
value: CamelCase
- key: modernize-pass-by-value.IncludeStyle
value: llvm
- key: modernize-replace-auto-ptr.IncludeStyle
value: llvm
- key: modernize-use-nullptr.NullMacros
value: 'NULL'
Checks:
'clang-diagnostic-*,clang-analyzer-*,-*,
*
-android-*
-bugprone-bool-pointer-implicit-conversion
-cert-env33-c
-cert-dcl50-cpp
-cert-dcl59-cpp
-cppcoreguidelines-no-malloc
-cppcoreguidelines-owning-memory
-cppcoreguidelines-pro-bounds-array-to-pointer-decay
-cppcoreguidelines-pro-bounds-constant-array-index
-cppcoreguidelines-pro-bounds-pointer-arithmetic
-cppcoreguidelines-pro-type-const-cast
-cppcoreguidelines-pro-type-cstyle-cast
-cppcoreguidelines-pro-type-reinterpret-cast
-cppcoreguidelines-pro-type-union-access
-cppcoreguidelines-pro-type-vararg
-cppcoreguidelines-special-member-functions
-fuchsia-*
-google-*
google-default-arguments
google-explicit-constructor
google-runtime-member-string-references
google-runtime-operator
-hicpp-braces-around-statements
-hicpp-named-parameter
-hicpp-no-array-decay
-hicpp-no-assembler
-hicpp-no-malloc
-hicpp-function-size
-hicpp-special-member-functions
-hicpp-vararg
-llvm-*
-objc-*
-readability-else-after-return
-readability-implicit-bool-conversion
-readability-named-parameter
-readability-simplify-boolean-expr
-readability-braces-around-statements
-readability-identifier-naming
-readability-function-size
-readability-redundant-member-init
-misc-bool-pointer-implicit-conversion
-misc-definitions-in-headers
-misc-unused-alias-decls
-misc-unused-parameters
-misc-unused-using-decls
-modernize-use-using
-modernize-use-default-member-init
-clang-diagnostic-*
-clang-analyzer-*'
...
......@@ -45,7 +45,6 @@ script:
- |
function timeout() { perl -e 'alarm shift; exec @ARGV' "$@"; }
echo "========allonli======="
- |
timeout 600 .travis/${JOB}.sh # 10min timeout
RESULT=$?; if [ $RESULT -eq 0 ] || [ $RESULT -eq 142 ]; then true; else exit 1; fi;
......
......@@ -5,7 +5,8 @@ add_definitions(-DPADDLE_MOBILE_DEBUG="true")
set(CMAKE_BUILD_TYPE RelWithDebInfo)
set(CMAKE_VERBOSE_MAKEFILE on)
set(CMAKE_VERBOSE_MAKEFILE ON)
set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY build)
set(CMAKE_LIBRARY_OUTPUT_DIRECTORY build)
set(CMAKE_RUNTIME_OUTPUT_DIRECTORY build)
......
......@@ -27,66 +27,67 @@ SOFTWARE.
namespace paddle_mobile {
enum LogLevel {
kNO_LOG,
kLOG_ERROR,
kLOG_WARNING,
kLOG_INFO,
kLOG_DEBUG,
kLOG_DEBUG1,
kLOG_DEBUG2,
kLOG_DEBUG3,
kLOG_DEBUG4
};
// log level
static LogLevel log_level = kLOG_DEBUG4;
static std::vector<std::string> logs{"NO", "ERROR ", "WARNING",
"INFO ", "DEBUG ", "DEBUG1 ",
"DEBUG2 ", "DEBUG3 ", "DEBUG4 "};
struct ToLog;
struct Print {
friend struct ToLog;
template<typename T> Print &operator<<(T const &value) {
buffer_ << value;
return *this;
}
private:
void print(LogLevel level) {
buffer_ << std::endl;
if (level == kLOG_ERROR) {
std::cerr << buffer_.str();
} else {
std::cout << buffer_.str();
}
}
std::ostringstream buffer_;
};
struct ToLog {
ToLog(LogLevel level = kLOG_DEBUG, const std::string &info = "")
: level_(level) {
unsigned blanks =
(unsigned) (level > kLOG_DEBUG ? (level - kLOG_DEBUG) * 4 : 1);
printer_ << logs[level] << " " << info << ":"
<< std::string(blanks, ' ');
}
template<typename T> ToLog &operator<<(T const &value) {
printer_ << value;
return *this;
}
~ToLog() { printer_.print(level_); }
private:
LogLevel level_;
Print printer_;
};
enum LogLevel {
kNO_LOG,
kLOG_ERROR,
kLOG_WARNING,
kLOG_INFO,
kLOG_DEBUG,
kLOG_DEBUG1,
kLOG_DEBUG2,
kLOG_DEBUG3,
kLOG_DEBUG4
};
// log level
static LogLevel log_level = kLOG_DEBUG4;
static std::vector<std::string> logs{"NO", "ERROR ", "WARNING",
"INFO ", "DEBUG ", "DEBUG1 ",
"DEBUG2 ", "DEBUG3 ", "DEBUG4 "};
struct ToLog;
struct Print;
struct Print {
friend struct ToLog;
template <typename T> Print &operator<<(T const &value) {
buffer_ << value;
return *this;
}
private:
void print(LogLevel level) {
buffer_ << std::endl;
if (level == kLOG_ERROR) {
std::cerr << buffer_.str();
} else {
std::cout << buffer_.str();
}
}
std::ostringstream buffer_;
};
struct ToLog {
ToLog(LogLevel level = kLOG_DEBUG, const std::string &info = "")
: level_(level) {
unsigned blanks =
(unsigned)(level > kLOG_DEBUG ? (level - kLOG_DEBUG) * 4 : 1);
printer_ << logs[level] << " " << info << ":"
<< std::string(blanks, ' ');
}
template <typename T> ToLog &operator<<(T const &value) {
printer_ << value;
return *this;
}
~ToLog() { printer_.print(level_); }
private:
LogLevel level_;
Print printer_;
};
#define LOG(level) \
if (level > paddle_mobile::log_level) { \
......@@ -100,6 +101,8 @@ private:
.str())
#define DLOG \
if (paddle_mobile::kLOG_DEBUG > paddle_mobile::log_level) { \
} else \
paddle_mobile::ToLog( \
paddle_mobile::kLOG_DEBUG, \
(std::stringstream() \
......@@ -109,6 +112,16 @@ private:
.str())
}
#define LOGF(level, format, ...) \
if (level > paddle_mobile::log_level) { \
} else \
printf(format, ##__VA_ARGS__)
#define DLOGF(format, ...) \
if (paddle_mobile::kLOG_DEBUG > paddle_mobile::log_level) { \
} else \
printf(format, ##__VA_ARGS__)
#else
namespace paddle_mobile {
......@@ -150,5 +163,10 @@ namespace paddle_mobile {
if (true) { \
} else \
paddle_mobile::ToLog(paddle_mobile::kLOG_DEBUG)
#define LOGF(level, format, ...)
#define DLOGF(format, ...)
}
#endif
......@@ -17,7 +17,7 @@ SOFTWARE.
==============================================================================*/
#include <fstream>
#include <iostream>
//#include <iostream>
#include "common/log.h"
#include "framework/framework.pb.h"
......
......@@ -34,12 +34,12 @@ namespace paddle_mobile {
const std::vector<std::shared_ptr<BlockDesc>> blocks =
to_predict_program_->Blocks();
// std::cout << " **block size " << blocks.size() << std::endl;
// DLOG << " **block size " << blocks.size();
for (int i = 0; i < blocks.size(); ++i) {
std::shared_ptr<BlockDesc> block_desc = blocks[i];
std::vector<std::shared_ptr<OpDesc>> ops =
block_desc->Ops();
// std::cout << " ops " << ops.size() << std::endl;
// DLOG << " ops " << ops.size();
for (int j = 0; j < ops.size(); ++j) {
std::shared_ptr<OpDesc> op = ops[j];
// if (op->Type() ==
......@@ -47,35 +47,26 @@ namespace paddle_mobile {
// if
// (op->GetAttrMap().at("axis").Get<int>()
// != -1) {
// std::cout
// << "attr: axis = "
// <<
// op->GetAttrMap().at("axis").Get<int>()
// << std::endl;
// DLOG << "attr: axis =
// "
// <<
// op->GetAttrMap().at("axis").Get<int>();
// }
// }
// std::cout << "op:" << op->Type() << std::endl;
// DLOG << "op:" << op->Type();
if (op->Type() == "elementwise_add" &&
op->Input("X")[0] == "batch_norm_2.tmp_2") {
std::cout << " elementwise_add attr size: "
<< op->GetAttrMap().size() << std::endl;
std::cout
<< " inputs size: " << op->GetInputs().size()
<< std::endl;
std::cout
<< " outputs size: " << op->GetOutputs().size()
<< std::endl;
std::cout << " Input X is : " << op->Input("X")[0]
<< std::endl;
std::cout << " Input Y is : " << op->Input("Y")[0]
<< std::endl;
std::cout
<< " Output Out is : " << op->Output("Out")[0]
<< std::endl;
DLOG << " elementwise_add attr size: "
<< op->GetAttrMap().size();
DLOG << " inputs size: " << op->GetInputs().size();
DLOG << " outputs size: "
<< op->GetOutputs().size();
DLOG << " Input X is : " << op->Input("X")[0];
DLOG << " Input Y is : " << op->Input("Y")[0];
DLOG << " Output Out is : " << op->Output("Out")[0];
Attribute axis_attr = op->GetAttrMap().at("axis");
int axis = axis_attr.Get<int>();
std::cout << " Attr axis is : " << axis
<< std::endl;
DLOG << " Attr axis is : " << axis;
std::shared_ptr<
operators::ElementwiseAddOp<Dtype, float>>
......@@ -104,10 +95,8 @@ namespace paddle_mobile {
Variable *con_output = scope->Var("elementwise_add_0.tmp_0");
Tensor *output_tensor = con_output->GetMutable<Tensor>();
output_tensor->mutable_data<float>({1, 3, 224, 224});
// std::cout << typeid(output_tensor).name() << std::endl;
// std::cout << "output_tensor dims: " << output_tensor->dims()
// <<
// std::endl;
// DLOG << typeid(output_tensor).name();
// DLOG << "output_tensor dims: " << output_tensor->dims();
std::shared_ptr<Tensor> out_tensor =
std::make_shared<LoDTensor>();
......@@ -131,7 +120,7 @@ namespace paddle_mobile {
for (int j = 0;
j < ops_of_block_[*to_predict_block.get()].size(); ++j) {
auto op = ops_of_block_[*to_predict_block.get()][j];
std::cout << "op -> run()" << std::endl;
DLOG << "op -> run()";
op->Run();
}
}
......@@ -142,8 +131,8 @@ namespace paddle_mobile {
namespace test {
void testElementwiseAdd() {
std::cout << "----------**********----------" << std::endl;
std::cout << "begin to run ElementAddOp Test" << std::endl;
DLOG << "----------**********----------";
DLOG << "begin to run ElementAddOp Test";
paddle_mobile::Loader<paddle_mobile::CPU> loader;
auto program = loader.Load(
std::string("../../test/models/"
......@@ -165,18 +154,16 @@ namespace paddle_mobile {
auto output_add = testElementwiseAddOp.predict_add(inputx, inputy);
float *output_add_ptr = output_add->data<float>();
for (int j = 0; j < output_add->numel(); ++j) {
// std::cout << "value of output: " << output_add_ptr[j] <<
// std::endl;
}
// for (int j = 0; j < output_add->numel(); ++j) {
// DLOG << "value of output: " << output_add_ptr[j];
// }
/// output (1,3,224,224)
std::cout << "output memory size : " << output_add->memory_size()
<< std::endl;
std::cout << "output numel : " << output_add->numel() << std::endl;
DLOG << "output memory size : " << output_add->memory_size();
DLOG << "output numel : " << output_add->numel();
std::cout << inputx_ptr[226] << " + " << inputy_ptr[2] << " = "
<< output_add_ptr[226] << std::endl;
DLOG << inputx_ptr[226] << " + " << inputy_ptr[2] << " = "
<< output_add_ptr[226];
}
} // namespace test
} // namespace paddle_mobile
......@@ -34,57 +34,47 @@ namespace paddle_mobile {
const std::vector<std::shared_ptr<BlockDesc>> blocks =
to_predict_program_->Blocks();
// std::cout << " **block size " << blocks.size() << std::endl;
// DLOG << " **block size " << blocks.size();
for (int i = 0; i < blocks.size(); ++i) {
std::shared_ptr<BlockDesc> block_desc = blocks[i];
std::vector<std::shared_ptr<OpDesc>> ops =
block_desc->Ops();
// std::cout << " ops " << ops.size() << std::endl;
// DLOG << " ops " << ops.size();
for (int j = 0; j < ops.size(); ++j) {
std::shared_ptr<OpDesc> op = ops[j];
if (op->Type() == "mul") {
std::cout << "x_num_col_dims : "
<< op->GetAttrMap()
.at("x_num_col_dims")
.Get<int>()
<< std::endl;
std::cout << "y_num_col_dims : "
<< op->GetAttrMap()
.at("y_num_col_dims")
.Get<int>()
<< std::endl;
std::cout << " Input X is : " << op->Input("X")[0]
<< std::endl;
}
// std::cout << "op:" << op->Type() << std::endl;
// if (op->Type() == "mul") {
// DLOG << "x_num_col_dims :
// "
// << op->GetAttrMap()
// .at("x_num_col_dims")
// .Get<int>();
// DLOG << "y_num_col_dims :
// "
// << op->GetAttrMap()
// .at("y_num_col_dims")
// .Get<int>();
// DLOG << " Input X is : "
// << op->Input("X")[0];
// }
// DLOG << "op:" << op->Type();
if (op->Type() == "mul" &&
op->Input("X")[0] == "pool2d_0.tmp_0") {
std::cout
<< " mul attr size: " << op->GetAttrMap().size()
<< std::endl;
std::cout
<< " inputs size: " << op->GetInputs().size()
<< std::endl;
std::cout
<< " outputs size: " << op->GetOutputs().size()
<< std::endl;
std::cout << " Input X is : " << op->Input("X")[0]
<< std::endl;
std::cout << " Input Y is : " << op->Input("Y")[0]
<< std::endl;
std::cout
<< " Output Out is : " << op->Output("Out")[0]
<< std::endl;
std::cout << "x_num_col_dims : "
<< op->GetAttrMap()
.at("x_num_col_dims")
.Get<int>()
<< std::endl;
std::cout << "y_num_col_dims : "
<< op->GetAttrMap()
.at("y_num_col_dims")
.Get<int>()
<< std::endl;
DLOG << " mul attr size: "
<< op->GetAttrMap().size();
DLOG << " inputs size: " << op->GetInputs().size();
DLOG << " outputs size: "
<< op->GetOutputs().size();
DLOG << " Input X is : " << op->Input("X")[0];
DLOG << " Input Y is : " << op->Input("Y")[0];
DLOG << " Output Out is : " << op->Output("Out")[0];
DLOG << "x_num_col_dims : "
<< op->GetAttrMap()
.at("x_num_col_dims")
.Get<int>();
DLOG << "y_num_col_dims : "
<< op->GetAttrMap()
.at("y_num_col_dims")
.Get<int>();
std::shared_ptr<operators::MulOp<Dtype, float>>
add = std::make_shared<
......@@ -112,9 +102,8 @@ namespace paddle_mobile {
Variable *con_output = scope->Var("fc_0.tmp_0");
Tensor *output_tensor = con_output->GetMutable<Tensor>();
output_tensor->mutable_data<float>({3, 3});
// std::cout << typeid(output_tensor).name() << std::endl;
// std::cout << "output_tensor dims: " << output_tensor->dims()
// << std::endl;
// DLOG << typeid(output_tensor).name();
// DLOG << "output_tensor dims: " << output_tensor->dims();
std::shared_ptr<Tensor> out_tensor =
std::make_shared<LoDTensor>();
......@@ -138,7 +127,7 @@ namespace paddle_mobile {
for (int j = 0;
j < ops_of_block_[*to_predict_block.get()].size(); ++j) {
auto op = ops_of_block_[*to_predict_block.get()][j];
std::cout << "op -> run()" << std::endl;
DLOG << "op -> run()";
op->Run();
}
}
......@@ -149,8 +138,8 @@ namespace paddle_mobile {
namespace test {
void testMul() {
std::cout << "----------**********----------" << std::endl;
std::cout << "begin to run MulOp Test" << std::endl;
DLOG << "----------**********----------";
DLOG << "begin to run MulOp Test";
paddle_mobile::Loader<paddle_mobile::CPU> loader;
auto program = loader.Load(
std::string("../../test/models/"
......@@ -175,40 +164,39 @@ namespace paddle_mobile {
float *output_mul_ptr = output_mul->data<float>();
auto dimx_1 = inputx.numel() / inputx.dims()[0];
std::cout << "inputx : " << std::endl;
DLOG << " inputx : ";
for (int i = 0; i < inputx.dims()[0]; ++i) {
for (int j = 0; j < dimx_1; ++j) {
std::cout << inputx_ptr[i * dimx_1 + j] << " ";
DLOGF("%f ", inputx_ptr[i * dimx_1 + j]);
}
std::cout << std::endl;
DLOGF("\n");
}
auto dimy_1 = inputy.numel() / inputy.dims()[0];
std::cout << "inputy : " << std::endl;
DLOG << " inputy : ";
for (int i = 0; i < inputy.dims()[0]; ++i) {
for (int j = 0; j < dimy_1; ++j) {
std::cout << inputy_ptr[i * dimy_1 + j] << " ";
DLOGF("%f ", inputy_ptr[i * dimx_1 + j]);
}
std::cout << std::endl;
DLOGF("\n");
}
auto dim_output_1 = output_mul->numel() / output_mul->dims()[0];
std::cout << "output : " << std::endl;
DLOG << " output : ";
for (int i = 0; i < output_mul->dims()[0]; ++i) {
for (int j = 0; j < dim_output_1; ++j) {
std::cout << output_mul_ptr[i * dimy_1 + j] << " ";
DLOGF("%f ", output_mul_ptr[i * dimy_1 + j]);
}
std::cout << std::endl;
DLOGF("\n");
}
/// output (3,3)
std::cout << "output memory size : " << output_mul->memory_size()
<< std::endl;
std::cout << "output numel : " << output_mul->numel() << std::endl;
DLOG << "output memory size : " << output_mul->memory_size();
DLOG << "output numel : " << output_mul->numel();
std::cout << inputx_ptr[0] << " x " << inputy_ptr[0] << " + "
<< inputx_ptr[1] << " x " << inputy_ptr[0 + 3] << " = "
<< output_mul_ptr[0] << std::endl;
DLOG << inputx_ptr[0] << " x " << inputy_ptr[0] << " + "
<< inputx_ptr[1] << " x " << inputy_ptr[0 + 3] << " = "
<< output_mul_ptr[0];
}
} // namespace test
} // namespace paddle_mobile
......@@ -19,13 +19,19 @@ SOFTWARE.
#include "common/log.h"
int main() {
LOG(paddle_mobile::kLOG_DEBUG) << "test debug"
<< " next log";
DLOGF("DASJFDAFJ%d -- %f", 12345, 344.234);
LOG(paddle_mobile::kLOG_DEBUG) << "test debug" << " next log";
LOG(paddle_mobile::kLOG_DEBUG1) << "test debug1"
<< " next log";
LOG(paddle_mobile::kLOG_DEBUG2) << "test debug2"
<< " next log";
DLOG << "test DLOG";
LOG(paddle_mobile::kLOG_ERROR) << " error occur !";
return 0;
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册