提交 6f7f8f5d 编写于 作者: S smilejames 提交者: GitHub

Merge pull request #283 from codeWorm2015/develop

fix #282 modify executor for test
......@@ -40,7 +40,8 @@ endif ()
#add_dependencies(paddle-mobile openblas_proj)
# gen static
ADD_LIBRARY(paddle-mobile SHARED ${PADDLE_MOBILE_CC} ${PADDLE_MOBILE_H} src/operators/feed_op.cpp src/operators/feed_op.h src/operators/fetch_op.cpp src/operators/fetch_op.h)
ADD_LIBRARY(paddle-mobile SHARED ${PADDLE_MOBILE_CC} ${PADDLE_MOBILE_H})
if (ANDROID)
# openblas.a need log lib
target_link_libraries(paddle-mobile protobuf-lite)
......
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "executor.h"
#include "lod_tensor.h"
#include "operators/conv_op.h"
namespace paddle_mobile {
namespace framework {} // namespace framework
} // namespace paddle_mobile
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <map>
#include <string>
#include <vector>
#include "framework.pb.h"
#include "framework/program/block_desc.h"
#include "framework/program/program.h"
#include "framework/program/program_desc.h"
#include "operator.h"
#include "scope.h"
#include "tensor.h"
#include "variable.h"
namespace paddle_mobile {
namespace framework {} // namespace framework
} // namespace paddle_mobile
......@@ -143,7 +143,6 @@ const framework::Program<Dtype, P> Loader<Dtype, P>::Load(
std::shared_ptr<framework::Scope> scope =
std::make_shared<framework::Scope>();
program.scope = scope;
originProgramDesc->Block(0);
for (const auto &block : originProgramDesc->Blocks()) {
......@@ -168,9 +167,6 @@ const framework::Program<Dtype, P> Loader<Dtype, P>::Load(
for (const auto &block : program_desc_proto.blocks()) {
LOG(kLOG_DEBUG) << "block: " << block.idx();
for (int j = 0; j < block.ops().size(); ++j) {
// if (j == 2) {
// break;
// }
framework::proto::OpDesc op = block.ops()[j];
LOG(kLOG_DEBUG1) << "op: " << op.type();
for (int m = 0; m < op.inputs_size(); ++m) {
......@@ -440,8 +436,15 @@ void Executor<Dtype, P>::InitMemory() {
for (const auto &block : to_predict_program_->Blocks()) {
for (const auto &var_desc : block->Vars()) {
auto var = program_.scope->Var(var_desc->Name());
auto tensor = var->template GetMutable<framework::LoDTensor>();
LoadMemory(tensor, program_.model_path + "/" + var_desc->Name());
if (var_desc->Persistable()) {
auto tensor = var->template GetMutable<framework::LoDTensor>();
LoadMemory(tensor, program_.model_path + "/" + var_desc->Name());
} else {
if (var_desc->GetType() == framework::proto::VarType::LOD_TENSOR) {
auto tensor = var->template GetMutable<framework::Tensor>();
tensor->template mutable_data<Ptype>();
}
}
}
}
}
......@@ -473,9 +476,9 @@ std::shared_ptr<framework::Tensor> Executor<Dtype, P>::predict(
template <typename Dtype, Precision P>
void Executor<Dtype, P>::predict(const framework::Tensor &t, int block_id) {
framework::Variable *g_feed_value = program_.scope->Var("feed");
auto feed_tensor = g_feed_value->GetMutable<framework::Tensor>();
feed_tensor->ShareDataWith(t);
// framework::Variable *g_feed_value = program_.scope->Var("feed");
// auto feed_tensor = g_feed_value->GetMutable<framework::Tensor>();
// feed_tensor->ShareDataWith(t);
std::shared_ptr<framework::BlockDesc> to_predict_block =
to_predict_program_->Block(block_id);
......
......@@ -53,7 +53,7 @@ class Executor {
protected:
void InitMemory();
void LoadMemory(framework::LoDTensor *tensor, const std::string &file_path);
const framework::Program<Dtype> program_;
framework::Program<Dtype> program_;
std::shared_ptr<framework::ProgramDesc> to_predict_program_;
void predict(const framework::Tensor &t, int block_id);
std::map<framework::BlockDesc,
......
......@@ -80,5 +80,5 @@ ADD_EXECUTABLE(test-enforce common/test_enforce.cpp)
target_link_libraries(test-enforce paddle-mobile)
# gen test
ADD_EXECUTABLE(test-googlenet net/test-googlenet.cpp test_helper.h test_include.h executor_for_test.h)
ADD_EXECUTABLE(test-googlenet net/test_googlenet.cpp test_helper.h test_include.h executor_for_test.h)
target_link_libraries(test-googlenet paddle-mobile)
......@@ -16,8 +16,8 @@ limitations under the License. */
#include <string>
#include <vector>
#include "common/log.h"
#include "framework/executor.h"
#include "io.h"
#include "operators/conv_op.h"
#include "operators/pool_op.h"
......@@ -38,7 +38,14 @@ template <typename DeviceType, typename OpType>
class Executor4Test : public Executor<DeviceType> {
public:
Executor4Test(Program<DeviceType> p, string op_type)
: Executor<DeviceType>(p) {
: Executor<DeviceType>() {
this->program_ = p;
if (this->use_optimize_) {
this->to_predict_program_ = this->program_.optimizeProgram;
} else {
this->to_predict_program_ = this->program_.originProgram;
}
if (this->program_.originProgram == nullptr) {
LOG(paddle_mobile::LogLevel::kLOG_ERROR)
<< "to_predict_program_ == nullptr";
......@@ -52,7 +59,6 @@ class Executor4Test : public Executor<DeviceType> {
std::shared_ptr<OpType> op_ptr = std::make_shared<OpType>(
op->Type(), op->GetInputs(), op->GetOutputs(), op->GetAttrMap(),
this->program_.scope);
this->ops_of_block_[*block_desc.get()].push_back(op_ptr);
break;
}
......@@ -73,7 +79,14 @@ class Executor4Test : public Executor<DeviceType> {
std::shared_ptr<Tensor> out_tensor = std::make_shared<LoDTensor>();
out_tensor.reset(output_tensor);
Executor<DeviceType>::predict(t, 0);
std::shared_ptr<paddle_mobile::framework::BlockDesc> to_predict_block =
this->to_predict_program_->Block(0);
for (int j = 0; j < this->ops_of_block_[*to_predict_block.get()].size();
++j) {
auto op = this->ops_of_block_[*to_predict_block.get()][j];
op->Run();
}
return out_tensor;
}
};
......@@ -16,7 +16,7 @@ limitations under the License. */
#include "../test_helper.h"
#include "../test_include.h"
#include "framework/executor.h"
#include "io.h"
int main() {
paddle_mobile::Loader<paddle_mobile::CPU> loader;
......
......@@ -13,25 +13,24 @@ See the License for the specific language governing permissions and
limitations under the License. */
#include "../executor_for_test.h"
#include "../test_helper.h"
#include "io.h"
#include "../test_include.h"
int main() {
paddle_mobile::Loader<paddle_mobile::CPU> loader;
// ../models/image_classification_resnet.inference.model
auto program = loader.Load(std::string("../models/googlenet"));
if (program.originProgram == nullptr) {
DLOG << "program file read fail";
}
auto program = loader.Load(g_googlenet);
PADDLE_MOBILE_ENFORCE(program.originProgram != nullptr,
"program file read fail");
Executor4Test<paddle_mobile::CPU,
paddle_mobile::operators::ConvOp<paddle_mobile::CPU, float>>
executor(program, "conv2d");
paddle_mobile::framework::Tensor input;
SetupTensor<float>(&input, {1, 3, 32, 32}, static_cast<float>(0),
static_cast<float>(1));
auto out_ddim = paddle_mobile::framework::make_ddim({1, 64, 56, 56});
GetInput<float>(g_test_image_1x3x224x224, &input, {1, 3, 224, 224});
auto out_ddim = paddle_mobile::framework::make_ddim({1, 64, 112, 112});
auto output = executor.predict(input, "data", "conv2d_0.tmp_0", out_ddim);
auto output_ptr = output->data<float>();
......
......@@ -21,7 +21,7 @@ limitations under the License. */
#include "framework/ddim.h"
#include "framework/tensor.h"
static const std::string g_google = "../models/googlenet";
static const std::string g_googlenet = "../models/googlenet";
static const std::string g_mobilenet = "../models/mobilenet";
static const std::string g_mobilenet_ssd = "../models/mobilenet";
static const std::string g_squeezenet = "../models/squeezenet";
......@@ -60,3 +60,14 @@ void GetInput(const std::string &input_name, std::vector<T> *input,
}
free(input_ptr);
}
template <typename T>
void GetInput(const std::string &input_name,
paddle_mobile::framework::Tensor *input,
paddle_mobile::framework::DDim dims) {
T *input_ptr = input->mutable_data<T>(dims);
std::ifstream in(input_name, std::ios::in | std::ios::binary);
in.read((char *)(input_ptr), input->numel() * sizeof(T));
in.close();
}
......@@ -19,6 +19,7 @@ limitations under the License. */
#include <vector>
#include "./test_helper.h"
#include "common/enforce.h"
#include "common/log.h"
#include "framework/framework.pb.h"
#include "framework/lod_tensor.h"
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册