提交 51094216 编写于 作者: 朔-望's avatar 朔-望

/test contains many source files check style

上级 16995e56
......@@ -3,9 +3,9 @@ repos:
sha: v1.0.1
hooks:
- id: remove-crlf
files: (src).*\.(md|py|mm|swift|java|c|cc|cxx|cpp|cu|h|hpp|hxx)$
files: (src/).*\.(md|py|mm|swift|java|c|cc|cxx|cpp|cu|h|hpp|hxx)$
- id: remove-tabs
files: (src).*\.(md|py|mm|swift|java|c|cc|cxx|cpp|cu|h|hpp|hxx)$
files: (test/|src/).*\.(md|py|mm|swift|java|c|cc|cxx|cpp|cu|h|hpp|hxx)$
- repo: https://github.com/pre-commit/pre-commit-hooks
sha: 5bf6c09bfa1297d3692cadd621ef95f1284e33c0
......@@ -16,9 +16,9 @@ repos:
- id: detect-private-key
files: (?!.*tar.gz)^.*$
- id: end-of-file-fixer
files: (src).*\.(md|py|mm|swift|java|c|cc|cxx|cpp|h|hpp|hxx)$
files: (test/|src/).*\.(md|py|mm|swift|java|c|cc|cxx|cpp|h|hpp|hxx)$
- id: trailing-whitespace
files: (src).*\.(md|py|mm|swift|java|c|cc|cxx|cpp|h|hpp|hxx)$
files: (test/|src/).*\.(md|py|mm|swift|java|c|cc|cxx|cpp|h|hpp|hxx)$
- repo: local
hooks:
......@@ -26,7 +26,7 @@ repos:
name: copyright
entry: python ./tools/pre-commit.hooks/copyright.hook
language: system
files: (src).*\.(c|cc|cxx|cpp|h|hpp|hxx|py)$
files: (test/|src/).*\.(c|cc|cxx|cpp|h|hpp|hxx|py)$
exclude: (?!.*third_party)^.*$ | (?!.*book)^.*$
- repo: local
......@@ -36,17 +36,7 @@ repos:
description: Format files with ClangFormat.
entry: bash ./tools/pre-commit.hooks/clang-format.hook -i
language: system
files: (src).*\.(c|cc|cxx|cpp|h|hpp|hxx)$
#
#- repo: local
# hooks:
# - id: clang-tidy
# name: clang-tidy
# description: Check C++ code style using clang-tidy.
# entry: bash ./tools/pre-commit.hooks/.clang-tidy.hook -i
# language: system
# files: (src).*\.(c|cc|cxx|cpp|h|hpp|hxx)$
files: (test/|src/).*\.(c|cc|cxx|cpp|h|hpp|hxx)$
- repo: local
hooks:
......@@ -55,6 +45,16 @@ repos:
description: Check C++ code style using cpplint.
entry: bash ./tools/pre-commit.hooks/cpplint.hook
language: system
files: (test|src).*\.(c|cc|cxx|cpp|h|hpp|hxx)$
files: (test/|src/).*\.(c|cc|cxx|cpp|h|hpp|hxx)$
exclude: (?!.*third_party)^.*$ | (?!.*book)^.*$i | *\.pb\.cpp
#
#- repo: local
# hooks:
# - id: clang-tidy
# name: clang-tidy
# description: Check C++ code style using clang-tidy.
# entry: bash ./tools/pre-commit.hooks/.clang-tidy.hook -i
# language: system
# files: (src).*\.(c|cc|cxx|cpp|h|hpp|hxx)$
......@@ -15,7 +15,6 @@ limitations under the License. */
#include "common/log.h"
int main() {
DLOGF("DASJFDAFJ%d -- %f", 12345, 344.234);
LOGF(paddle_mobile::kLOG_DEBUG, "DASJFDAFJ%d -- %f", 12345, 344.234);
......
......@@ -12,13 +12,12 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "executor_for_test.h"
#include "framework/executor_for_test.h"
template <typename DeviceType, typename OpType>
Executor4Test<DeviceType, OpType>::Executor4Test(const Program<DeviceType> p,
std::string op_type)
: Executor<DeviceType>(p) {
if (this->program_.originProgram == nullptr) {
LOG(paddle_mobile::LogLevel::kLOG_ERROR)
<< "to_predict_program_ == nullptr";
......@@ -27,7 +26,7 @@ Executor4Test<DeviceType, OpType>::Executor4Test(const Program<DeviceType> p,
const std::vector<std::shared_ptr<BlockDesc>> blocks =
this->to_predict_program_->Blocks();
for (std::shared_ptr<BlockDesc> block_desc: blocks) {
for (std::shared_ptr<BlockDesc> block_desc : blocks) {
std::vector<std::shared_ptr<OpDesc>> ops = block_desc->Ops();
for (std::shared_ptr<OpDesc> op : ops) {
if (op->Type() == op_type) {
......@@ -43,9 +42,8 @@ Executor4Test<DeviceType, OpType>::Executor4Test(const Program<DeviceType> p,
}
template <typename DeviceType, typename OpType>
std::shared_ptr<Tensor>
Executor4Test<DeviceType, OpType>::predict(const Tensor &t, std::string input,
std::string output, DDim &dDim) {
std::shared_ptr<Tensor> Executor4Test<DeviceType, OpType>::predict(
const Tensor &t, std::string input, std::string output, const DDim &dDim) {
auto scope = this->program_.scope;
Variable *g_feed_value = scope->Var(input);
auto tensor = g_feed_value->GetMutable<Tensor>();
......@@ -68,5 +66,5 @@ template class Executor4Test<
paddle_mobile::CPU,
paddle_mobile::operators::PoolOp<paddle_mobile::CPU, float>>;
template class Executor4Test<
paddle_mobile::CPU,
paddle_mobile::operators::SoftmaxOp<paddle_mobile::CPU, float>>;
paddle_mobile::CPU,
paddle_mobile::operators::SoftmaxOp<paddle_mobile::CPU, float>>;
......@@ -21,20 +21,20 @@ limitations under the License. */
#include "operators/pool_op.h"
#include "operators/softmax_op.h"
using paddle_mobile::framework::Tensor;
using paddle_mobile::framework::LoDTensor;
using std::string;
using paddle_mobile::framework::BlockDesc;
using paddle_mobile::framework::DDim;
using paddle_mobile::framework::Executor;
using paddle_mobile::framework::Program;
using paddle_mobile::framework::BlockDesc;
using paddle_mobile::framework::LoDTensor;
using paddle_mobile::framework::OpDesc;
using paddle_mobile::framework::Program;
using paddle_mobile::framework::Tensor;
using paddle_mobile::framework::Variable;
using std::string;
template <typename DeviceType, typename OpType>
class Executor4Test : public Executor<DeviceType> {
public:
Executor4Test(Program<DeviceType> p, string op_type);
std::shared_ptr<Tensor> predict(const Tensor &t, string input, string output,
DDim &dDim);
const DDim &dDim);
};
......@@ -12,13 +12,13 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "io.h"
#include "/io.h"
int main() {
paddle_mobile::Loader<paddle_mobile::CPU> loader;
//../../../test/models/googlenet
//../../../test/models/mobilenet
// ../../../test/models/googlenet
// ../../../test/models/mobilenet
auto program = loader.Load(std::string("../models/googlenet"));
return 0;
}
\ No newline at end of file
}
......@@ -12,23 +12,19 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "/io.h"
#include "framework/program/program-optimize/node.h"
#include "framework/program/program-optimize/program_optimize.h"
#include "io.h"
using namespace paddle_mobile;
using namespace paddle_mobile::framework;
int main() {
Loader<paddle_mobile::CPU> loader;
// "../../../test/models/googlenet"
auto program = loader.Load("../models/googlenet");
ProgramOptimize optimize;
// program.originProgram->Description("origin");
// program.originProgram->Description("origin");
auto optimize_program = optimize.FushionOptimize(program.originProgram);
if (optimize_program != nullptr) {
// optimize_program->Description("optimize");
// optimize_program->Description("optimize");
} else {
LOG(kLOG_ERROR) << "optimize_program is null";
}
......
......@@ -19,8 +19,9 @@ limitations under the License. */
namespace paddle_mobile {
namespace framework {
template <typename Dtype> class TestBatchNormOp {
public:
template <typename Dtype>
class TestBatchNormOp {
public:
explicit TestBatchNormOp(const Program<Dtype> p) : program_(p) {
if (use_optimize_) {
to_predict_program_ = program_.optimizeProgram;
......@@ -59,8 +60,9 @@ public:
}
}
std::shared_ptr<Tensor> predict_bn(Tensor &t1, Tensor &t2, Tensor &t3,
Tensor &t4, Tensor &t5) {
std::shared_ptr<Tensor> predict_bn(const Tensor &t1, const Tensor &t2,
const Tensor &t3, const Tensor &t4,
const Tensor &t5) {
// feed
auto scope = program_.scope;
Variable *x1_feed_value = scope->Var("conv2d_0.tmp_0");
......@@ -96,7 +98,7 @@ public:
return out_tensor;
}
private:
private:
const framework::Program<Dtype> program_;
std::shared_ptr<ProgramDesc> to_predict_program_;
std::map<framework::BlockDesc,
......@@ -117,8 +119,8 @@ private:
};
template class TestBatchNormOp<CPU>;
} // namespace framework
} // namespace paddle_mobile
} // namespace framework
} // namespace paddle_mobile
int main() {
DLOG << "----------**********----------";
......
......@@ -19,8 +19,9 @@ limitations under the License. */
namespace paddle_mobile {
namespace framework {
template <typename Dtype> class TestConcatOp {
public:
template <typename Dtype>
class TestConcatOp {
public:
explicit TestConcatOp(const Program<Dtype> p) : program_(p) {
if (use_optimize_) {
to_predict_program_ = program_.optimizeProgram;
......@@ -55,8 +56,8 @@ public:
}
}
std::shared_ptr<Tensor> predict_concat(Tensor &t1, Tensor &t2, Tensor &t3,
Tensor &t4) {
std::shared_ptr<Tensor> predict_concat(const Tensor &t1, const Tensor &t2,
const Tensor &t3, const Tensor &t4) {
// feed
auto scope = program_.scope;
Variable *x1_feed_value = scope->Var("conv2d_3.tmp_1");
......@@ -88,7 +89,7 @@ public:
return out_tensor;
}
private:
private:
const framework::Program<Dtype> program_;
std::shared_ptr<ProgramDesc> to_predict_program_;
std::map<framework::BlockDesc,
......@@ -109,8 +110,8 @@ private:
};
template class TestConcatOp<CPU>;
} // namespace framework
} // namespace paddle_mobile
} // namespace framework
} // namespace paddle_mobile
int main() {
DLOG << "----------**********----------";
......
......@@ -12,13 +12,13 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "../framework/executor_for_test.h"
#include "../test_helper.h"
#include "./io.h"
#include <io>
#include "framework/executor_for_test.h"
#include "framework/test_helper.h"
int main() {
paddle_mobile::Loader<paddle_mobile::CPU> loader;
//../models/image_classification_resnet.inference.model
// ../models/image_classification_resnet.inference.model
auto program = loader.Load(std::string("../models/googlenet"));
if (program.originProgram == nullptr) {
DLOG << "program file read fail";
......@@ -32,8 +32,7 @@ int main() {
SetupTensor<float>(&input, {1, 3, 32, 32}, static_cast<float>(0),
static_cast<float>(1));
auto out_ddim = paddle_mobile::framework::make_ddim({1, 64, 56, 56});
auto output =
executor.predict(input, "data", "conv2d_0.tmp_0", out_ddim);
auto output = executor.predict(input, "data", "conv2d_0.tmp_0", out_ddim);
auto output_ptr = output->data<float>();
for (int j = 0; j < output->numel(); ++j) {
......
......@@ -19,8 +19,9 @@ limitations under the License. */
namespace paddle_mobile {
namespace framework {
template <typename Dtype> class TestElementwiseAddOp {
public:
template <typename Dtype>
class TestElementwiseAddOp {
public:
explicit TestElementwiseAddOp(const Program<Dtype> p) : program_(p) {
if (use_optimize_) {
to_predict_program_ = program_.optimizeProgram;
......@@ -59,7 +60,7 @@ public:
}
}
std::shared_ptr<Tensor> predict_add(Tensor &t1, Tensor &t2) {
std::shared_ptr<Tensor> predict_add(const Tensor &t1, const Tensor &t2) {
// feed
auto scope = program_.scope;
Variable *x_feed_value = scope->Var("batch_norm_2.tmp_2");
......@@ -83,7 +84,7 @@ public:
return out_tensor;
}
private:
private:
const framework::Program<Dtype> program_;
std::shared_ptr<ProgramDesc> to_predict_program_;
std::map<framework::BlockDesc,
......@@ -103,8 +104,8 @@ private:
};
template class TestElementwiseAddOp<CPU>;
} // namespace framework
} // namespace paddle_mobile
} // namespace framework
} // namespace paddle_mobile
int main() {
DLOG << "----------**********----------";
DLOG << "begin to run ElementAddOp Test";
......
......@@ -19,8 +19,9 @@ limitations under the License. */
namespace paddle_mobile {
namespace framework {
template <typename Dtype> class TestLrnOp {
public:
template <typename Dtype>
class TestLrnOp {
public:
explicit TestLrnOp(const Program<Dtype> p) : program_(p) {
if (use_optimize_) {
to_predict_program_ = program_.optimizeProgram;
......@@ -57,7 +58,7 @@ public:
}
}
std::shared_ptr<Tensor> predict_lrn(Tensor &t1) {
std::shared_ptr<Tensor> predict_lrn(const Tensor &t1) {
// feed
auto scope = program_.scope;
Variable *x1_feed_value = scope->Var("pool2d_0.tmp_0");
......@@ -77,7 +78,7 @@ public:
return out_tensor;
}
private:
private:
const framework::Program<Dtype> program_;
std::shared_ptr<ProgramDesc> to_predict_program_;
std::map<framework::BlockDesc,
......@@ -97,8 +98,8 @@ private:
};
template class TestLrnOp<CPU>;
} // namespace framework
} // namespace paddle_mobile
} // namespace framework
} // namespace paddle_mobile
int main() {
DLOG << "----------**********----------";
......
......@@ -19,8 +19,9 @@ limitations under the License. */
namespace paddle_mobile {
namespace framework {
template <typename Dtype> class TestMulOp {
public:
template <typename Dtype>
class TestMulOp {
public:
explicit TestMulOp(const Program<Dtype> p) : program_(p) {
if (use_optimize_) {
to_predict_program_ = program_.optimizeProgram;
......@@ -59,7 +60,7 @@ public:
}
}
std::shared_ptr<Tensor> predict_mul(Tensor &t1, Tensor &t2) {
std::shared_ptr<Tensor> predict_mul(const Tensor &t1, const Tensor &t2) {
// feed
auto scope = program_.scope;
Variable *x_feed_value = scope->Var("pool2d_0.tmp_0");
......@@ -83,7 +84,7 @@ public:
return out_tensor;
}
private:
private:
const framework::Program<Dtype> program_;
std::shared_ptr<ProgramDesc> to_predict_program_;
std::map<framework::BlockDesc,
......@@ -103,8 +104,8 @@ private:
};
template class TestMulOp<CPU>;
} // namespace framework
} // namespace paddle_mobile
} // namespace framework
} // namespace paddle_mobile
int main() {
DLOG << "----------**********----------";
......
......@@ -12,11 +12,14 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifndef TEST_TEST_INCLUDE_H_
#define TEST_TEST_INCLUDE_H_
#pragma once
#include <random>
#include "common/log.h"
#include "framework/ddim.h"
#include "framework/tensor.h"
#include <random>
template <typename T>
void SetupTensor(paddle_mobile::framework::Tensor *input,
......@@ -30,3 +33,5 @@ void SetupTensor(paddle_mobile::framework::Tensor *input,
input_ptr[i] = static_cast<T>(uniform_dist(rng) * (upper - lower) + lower);
}
}
#endif
......@@ -12,18 +12,23 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifndef TEST_TEST_INCLUDE_H_
#define TEST_TEST_INCLUDE_H_
#include <map>
#include <string>
#include <vector>
#include "framework/program/block_desc.h"
#include "./test_helper.h"
#include "/io.h"
#include "framework/framework.pb.h"
#include "framework/lod_tensor.h"
#include "framework/operator.h"
#include "framework/program/block_desc.h"
#include "framework/program/program.h"
#include "framework/program/program_desc.h"
#include "framework/scope.h"
#include "framework/tensor.h"
#include "framework/variable.h"
#include "io.h"
#include "test_helper.h"
#endif // TEST_TEST_INCLUDE_H_
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册