提交 bd09d517 编写于 作者: X xingzhaolong

Merge branch 'xzl/incubate/lite' into 'incubate/lite'

Refine Conv compute strategy choose.

See merge request inference/paddlelite!80
...@@ -34,6 +34,7 @@ double time_diff(Time t1, Time t2) { ...@@ -34,6 +34,7 @@ double time_diff(Time t1, Time t2) {
void Run(const char* model_dir, int repeat) { void Run(const char* model_dir, int repeat) {
#ifdef LITE_WITH_ARM #ifdef LITE_WITH_ARM
DeviceInfo::Init(); DeviceInfo::Init();
DeviceInfo::Global().SetRunMode(LITE_POWER_HIGH, 1);
#endif #endif
lite::Predictor predictor; lite::Predictor predictor;
std::vector<Place> valid_places({ std::vector<Place> valid_places({
...@@ -52,6 +53,7 @@ void Run(const char* model_dir, int repeat) { ...@@ -52,6 +53,7 @@ void Run(const char* model_dir, int repeat) {
data[i] = 1; data[i] = 1;
} }
for (int i = 0; i < 10; i++) predictor.Run();
auto time1 = time(); auto time1 = time();
for (int i = 0; i < repeat; i++) predictor.Run(); for (int i = 0; i < repeat; i++) predictor.Run();
auto time2 = time(); auto time2 = time();
...@@ -60,10 +62,16 @@ void Run(const char* model_dir, int repeat) { ...@@ -60,10 +62,16 @@ void Run(const char* model_dir, int repeat) {
auto* out = predictor.GetOutput(0); auto* out = predictor.GetOutput(0);
LOG(INFO) << out << " memory size " << out->data_size(); LOG(INFO) << out << " memory size " << out->data_size();
LOG(INFO) << "out " << out->data<float>()[0];
LOG(INFO) << "out " << out->data<float>()[1];
LOG(INFO) << "dims " << out->dims(); LOG(INFO) << "dims " << out->dims();
LOG(INFO) << "out data size: " << out->data_size(); LOG(INFO) << "out data size: " << out->data_size();
/*
float sum = 0.;
for (int i = 0; i < out->data_size(); i++) {
LOG(INFO) << "out " << out->data<float>()[i];
sum += out->data<float>()[i];
}
LOG(INFO) << sum;
*/
} }
} // namespace lite } // namespace lite
......
...@@ -74,7 +74,7 @@ void ConvCompute::PrepareForRun() { ...@@ -74,7 +74,7 @@ void ConvCompute::PrepareForRun() {
} else if (param.groups == 1 && kw == 3 && stride == 2 && kps_equal && } else if (param.groups == 1 && kw == 3 && stride == 2 && kps_equal &&
no_dilation) { no_dilation) {
// direct conv impl // direct conv impl
impl_ = new lite::arm::math::DirectConv<PRECISION(kFloat)>; impl_ = new lite::arm::math::GemmLikeConv<PRECISION(kFloat)>;
VLOG(3) << "invoking direct conv"; VLOG(3) << "invoking direct conv";
} else { } else {
impl_ = new lite::arm::math::GemmLikeConv<PRECISION(kFloat)>; impl_ = new lite::arm::math::GemmLikeConv<PRECISION(kFloat)>;
...@@ -123,8 +123,7 @@ void ConvComputeInt8<Ptype_out>::PrepareForRun() { ...@@ -123,8 +123,7 @@ void ConvComputeInt8<Ptype_out>::PrepareForRun() {
// weigth is int8 and bias is int32 so do not need trans // weigth is int8 and bias is int32 so do not need trans
if (param.groups == ic && ic == oc && kps_equal && no_dilation && flag_dw) { if (param.groups == ic && ic == oc && kps_equal && no_dilation && flag_dw) {
// impl_ = new lite::arm::math::DepthwiseConvInt8<Ptype_out>; impl_ = new lite::arm::math::DepthwiseConvInt8<Ptype_out>;
impl_ = new lite::arm::math::GemmLikeConvInt8<Ptype_out>;
VLOG(3) << "Run DepthwiseConv Int8"; VLOG(3) << "Run DepthwiseConv Int8";
} else if (param.groups == 1 && kw == 3 && (sw == 1 || sw == 2) && } else if (param.groups == 1 && kw == 3 && (sw == 1 || sw == 2) &&
kps_equal && no_dilation) { kps_equal && no_dilation) {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册