提交 b21005df 编写于 作者: E eclipsess

test relu with excu4test

上级 6f7f8f5d
...@@ -21,6 +21,7 @@ limitations under the License. */ ...@@ -21,6 +21,7 @@ limitations under the License. */
#include "io.h" #include "io.h"
#include "operators/conv_op.h" #include "operators/conv_op.h"
#include "operators/pool_op.h" #include "operators/pool_op.h"
#include "operators/relu_op.h"
#include "operators/reshape_op.h" #include "operators/reshape_op.h"
#include "operators/softmax_op.h" #include "operators/softmax_op.h"
#include "operators/transpose_op.h" #include "operators/transpose_op.h"
...@@ -56,6 +57,7 @@ class Executor4Test : public Executor<DeviceType> { ...@@ -56,6 +57,7 @@ class Executor4Test : public Executor<DeviceType> {
std::vector<std::shared_ptr<OpDesc>> ops = block_desc->Ops(); std::vector<std::shared_ptr<OpDesc>> ops = block_desc->Ops();
for (std::shared_ptr<OpDesc> op : ops) { for (std::shared_ptr<OpDesc> op : ops) {
if (op->Type() == op_type) { if (op->Type() == op_type) {
/// test first meeting op in program
std::shared_ptr<OpType> op_ptr = std::make_shared<OpType>( std::shared_ptr<OpType> op_ptr = std::make_shared<OpType>(
op->Type(), op->GetInputs(), op->GetOutputs(), op->GetAttrMap(), op->Type(), op->GetInputs(), op->GetOutputs(), op->GetAttrMap(),
this->program_.scope); this->program_.scope);
......
...@@ -29,6 +29,9 @@ int main() { ...@@ -29,6 +29,9 @@ int main() {
paddle_mobile::framework::Tensor input; paddle_mobile::framework::Tensor input;
GetInput<float>(g_test_image_1x3x224x224, &input, {1, 3, 224, 224}); GetInput<float>(g_test_image_1x3x224x224, &input, {1, 3, 224, 224});
// // use SetupTensor if not has local input image .
// SetupTensor<float>(&input, {1, 3, 224, 224}, static_cast<float>(0),
// static_cast<float>(1));
auto out_ddim = paddle_mobile::framework::make_ddim({1, 64, 112, 112}); auto out_ddim = paddle_mobile::framework::make_ddim({1, 64, 112, 112});
auto output = executor.predict(input, "data", "conv2d_0.tmp_0", out_ddim); auto output = executor.predict(input, "data", "conv2d_0.tmp_0", out_ddim);
......
...@@ -111,7 +111,7 @@ int main() { ...@@ -111,7 +111,7 @@ int main() {
DLOG << "begin to run ElementAddOp Test"; DLOG << "begin to run ElementAddOp Test";
paddle_mobile::Loader<paddle_mobile::CPU> loader; paddle_mobile::Loader<paddle_mobile::CPU> loader;
auto program = auto program =
loader.Load(std::string("../../test/models/" loader.Load(std::string("../models/"
"image_classification_resnet.inference.model")); "image_classification_resnet.inference.model"));
/// input x (1,3,224,224) /// input x (1,3,224,224)
......
...@@ -12,108 +12,32 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ...@@ -12,108 +12,32 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#pragma once #include "../executor_for_test.h"
#include "../test_include.h" #include "../test_include.h"
#include "operators/relu_op.h"
namespace paddle_mobile {
namespace framework {
template <typename Dtype>
class TestReluOp {
public:
explicit TestReluOp(const Program<Dtype> p) : program_(p) {
if (use_optimize_) {
to_predict_program_ = program_.optimizeProgram;
} else {
to_predict_program_ = program_.originProgram;
}
const std::vector<std::shared_ptr<BlockDesc>> blocks =
to_predict_program_->Blocks();
// DLOG << " **block size " << blocks.size();
for (auto block_desc : blocks) {
std::vector<std::shared_ptr<OpDesc>> ops = block_desc->Ops();
// DLOG << " ops " << ops.size();
for (auto op : ops) {
if (op->Type() == "relu" &&
op->Input("X")[0] == "batch_norm_34.tmp_2") {
DLOG << "in";
std::shared_ptr<operators::ReluOp<Dtype, float>> test_op =
std::make_shared<operators::ReluOp<Dtype, float>>(
op->Type(), op->GetInputs(), op->GetOutputs(),
op->GetAttrMap(), program_.scope);
ops_of_block_[*block_desc.get()].push_back(test_op);
}
}
}
}
std::shared_ptr<Tensor> predict(const Tensor &t1) {
// feed
auto scope = program_.scope;
Variable *x1_feed_value = scope->Var("batch_norm_34.tmp_2");
auto tensor_x1 = x1_feed_value->GetMutable<Tensor>();
tensor_x1->ShareDataWith(t1);
Variable *output = scope->Var("batch_norm_34.tmp_3");
auto *output_tensor = output->GetMutable<Tensor>();
output_tensor->mutable_data<float>({1, 2, 3, 4});
// DLOG << typeid(output_tensor).name();
// DLOG << "output_tensor dims: " << output_tensor->dims();
std::shared_ptr<Tensor> out_tensor = std::make_shared<LoDTensor>();
out_tensor.reset(output_tensor);
predict(t1, 0);
return out_tensor;
// return outvars_tensor;
}
private:
const framework::Program<Dtype> program_;
std::shared_ptr<ProgramDesc> to_predict_program_;
std::map<framework::BlockDesc,
std::vector<std::shared_ptr<OperatorBase<Dtype>>>>
ops_of_block_;
bool use_optimize_ = false;
void predict(const Tensor &t1, int block_id) {
std::shared_ptr<BlockDesc> to_predict_block =
to_predict_program_->Block(block_id);
for (int j = 0; j < ops_of_block_[*to_predict_block.get()].size(); ++j) {
auto op = ops_of_block_[*to_predict_block.get()][j];
DLOG << "op -> run()";
op->Run();
}
}
};
template class TestReluOp<CPU>;
} // namespace framework
} // namespace paddle_mobile
int main() { int main() {
DLOG << "----------**********----------";
DLOG << "begin to run Relu Test";
paddle_mobile::Loader<paddle_mobile::CPU> loader; paddle_mobile::Loader<paddle_mobile::CPU> loader;
auto program = loader.Load(std::string("../../test/models/mobilenet+ssd")); // ../models/image_classification_resnet.inference.model
auto program = loader.Load(g_mobilenet_ssd);
/// input x (1,3,300,300) PADDLE_MOBILE_ENFORCE(program.originProgram != nullptr,
paddle_mobile::framework::Tensor inputx1; "program file read fail");
SetupTensor<float>(&inputx1, {1, 2, 3, 4}, static_cast<float>(-1),
static_cast<float>(1));
auto *inputx1_ptr = inputx1.data<float>();
paddle_mobile::framework::TestReluOp<paddle_mobile::CPU> testReluOp(program); Executor4Test<paddle_mobile::CPU,
paddle_mobile::operators::ReluOp<paddle_mobile::CPU, float>>
executor(program, "relu");
paddle_mobile::framework::Tensor input;
SetupTensor<float>(&input, {1, 2, 3, 4}, static_cast<float>(-1),
static_cast<float>(1));
auto output = testReluOp.predict(inputx1); auto out_ddim = paddle_mobile::framework::make_ddim({1, 2, 3, 4});
auto *output_ptr = output->data<float>(); auto output = executor.predict(input, "batch_norm_0.tmp_2",
"batch_norm_0.tmp_3", out_ddim);
for (int i = 0; i < output->numel(); i++) { auto output_ptr = output->data<float>();
DLOG << output_ptr[i]; for (int j = 0; j < output->numel(); ++j) {
DLOG << " value of output: " << output_ptr[j];
} }
return 0; return 0;
} }
...@@ -23,7 +23,7 @@ limitations under the License. */ ...@@ -23,7 +23,7 @@ limitations under the License. */
static const std::string g_googlenet = "../models/googlenet"; static const std::string g_googlenet = "../models/googlenet";
static const std::string g_mobilenet = "../models/mobilenet"; static const std::string g_mobilenet = "../models/mobilenet";
static const std::string g_mobilenet_ssd = "../models/mobilenet"; static const std::string g_mobilenet_ssd = "../models/mobilenet+ssd";
static const std::string g_squeezenet = "../models/squeezenet"; static const std::string g_squeezenet = "../models/squeezenet";
static const std::string g_resnet = static const std::string g_resnet =
"../models/image_classification_resnet.inference.model"; "../models/image_classification_resnet.inference.model";
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册