diff --git a/src/framework/tensor.h b/src/framework/tensor.h index 7fdb52c435c64e3365d028e843969f7a648ba604..b6a7c724ad13d3757101ebaf48d089cc4e7f957e 100644 --- a/src/framework/tensor.h +++ b/src/framework/tensor.h @@ -219,7 +219,8 @@ class Tensor { inline void check_memory_size() const { PADDLE_MOBILE_ENFORCE( - holder_, "Tensor holds no memory. Call Tensor::mutable_data first."); + holder_ != nullptr, + "Tensor holds no memory. Call Tensor::mutable_data first."); PADDLE_MOBILE_ENFORCE( numel() * SizeOfType(type()) <= memory_size(), "Tensor's dims_ is out of bound. CallTensor::mutable_data " diff --git a/src/framework/variable.h b/src/framework/variable.h index 3d8dd5158046f58dd4d206427328867140e95344..07cb6377e0c9ca89f828eded887b8d1da2d8aae6 100644 --- a/src/framework/variable.h +++ b/src/framework/variable.h @@ -45,8 +45,6 @@ class Variable : public PaddleMobileObject { bool IsInitialized() const { return holder_ != nullptr; } - const std::string Name() { return name_; } - template T *GetMutable() { if (!IsType()) { @@ -64,8 +62,6 @@ class Variable : public PaddleMobileObject { std::type_index Type() const { return holder_->Type(); } - void SetName(const string name) { name_ = name; } - private: struct Placeholder { Placeholder() = default; diff --git a/src/io.cpp b/src/io.cpp index e0df6e732ec3c15aa1b5ff9228f1e50a5b1c01cd..1c5e97bbb7eaa0257bb2f81ef131b8c6bc48547f 100644 --- a/src/io.cpp +++ b/src/io.cpp @@ -221,7 +221,7 @@ const framework::Program Loader::Load( } } - // originProgramDesc->Description("program: "); + originProgramDesc->Description("program: "); paddle_mobile__framework__proto__program_desc__free_unpacked(c_program, NULL); return program; @@ -381,7 +381,6 @@ void Executor::InitMemory() { } else { if (var_desc->Type() == framework::VARTYPE_TYPE_LOD_TENSOR) { auto tensor = var->template GetMutable(); - tensor->template mutable_data(); } } @@ -392,7 +391,8 @@ void Executor::InitMemory() { template void Executor::predict(const framework::Tensor &t, int block_id) { framework::Variable *g_feed_value = program_.scope->Var("feed"); - auto feed_tensor = g_feed_value->GetMutable(); + framework::Tensor *feed_tensor = + g_feed_value->GetMutable(); feed_tensor->Resize(t.dims()); feed_tensor->ShareDataWith(t); std::shared_ptr to_predict_block = @@ -408,7 +408,7 @@ std::vector::Ptype> Executor::predict( const std::vector &input, const std::vector &dims) { DLOG << "start predict: "; - framework::Tensor tensor; + framework::LoDTensor tensor; auto ddim = framework::make_ddim(dims); auto input_ptr = tensor.mutable_data(ddim); diff --git a/src/operators/kernel/arm/conv_kernel.cpp b/src/operators/kernel/arm/conv_kernel.cpp index 35ee19503291843d6529261b8d7cb32db6c44f9c..1e2572b984734dcd88be7c1c750fc0f07448e66d 100644 --- a/src/operators/kernel/arm/conv_kernel.cpp +++ b/src/operators/kernel/arm/conv_kernel.cpp @@ -38,7 +38,6 @@ void ConvKernel::Compute(const ConvParam ¶m) const { Tensor filter = *param.Filter(); Tensor *output = param.Output(); output->mutable_data(); - int groups = param.Groups(); std::vector strides = param.Strides(); std::vector paddings = param.Paddings(); @@ -78,6 +77,7 @@ void ConvKernel::Compute(const ConvParam ¶m) const { framework::DDim filter_matrix_shape = {filter.dims()[0], filter.numel() / filter.dims()[0]}; filter.Resize(filter_matrix_shape); + DLOG << " filter.dims() = " << filter.dims(); framework::DDim output_matrix_shape = { output->dims()[1], output->numel() / (output->dims()[0] * output->dims()[1])}; diff --git a/src/operators/op_param.h b/src/operators/op_param.h index 5ac6fc67af584331a1c28a8ce9a5578f4eb55cd4..02bda7147aa77648cf6a159bdb11d2f3e42ee304 100644 --- a/src/operators/op_param.h +++ b/src/operators/op_param.h @@ -207,7 +207,7 @@ class ConvParam : OpParam { const Tensor *Input() const { return input_; } - const LoDTensor *Filter() const { return filter_; } + const Tensor *Filter() const { return filter_; } Tensor *Output() const { return output_; } @@ -222,7 +222,7 @@ class ConvParam : OpParam { private: Tensor *input_; Tensor *output_; - LoDTensor *filter_; + Tensor *filter_; vector strides_; vector paddings_; vector dilations_; @@ -717,10 +717,10 @@ class FushionFcParam : public OpParam { public: FushionFcParam(const VariableNameMap &inputs, const VariableNameMap &outputs, const AttributeMap &attrs, const Scope &scope) { - input_x_ = InputXFrom(inputs, scope); - input_y_ = InputYFrom(inputs, scope); - input_z_ = InputZFrom(inputs, scope); - out_ = OutFrom(outputs, scope); + input_x_ = InputXFrom(inputs, scope); + input_y_ = InputYFrom(inputs, scope); + input_z_ = InputZFrom(inputs, scope); + out_ = OutFrom(outputs, scope); x_num_col_dims_ = GetAttr("x_num_col_dims", attrs); y_num_col_dims_ = GetAttr("y_num_col_dims", attrs); axis_ = GetAttr("axis", attrs); diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt index 20d6cfe7a780b36c9be4845519ee2730c049cfb2..f464c3bd94f92e8cbec1509c4e82df18658a7b1f 100644 --- a/test/CMakeLists.txt +++ b/test/CMakeLists.txt @@ -11,11 +11,11 @@ ADD_EXECUTABLE(test-mul-op operators/test_mul_op.cpp test_helper.h test_includ target_link_libraries(test-mul-op paddle-mobile) # gen test -ADD_EXECUTABLE(test-elementwiseadd-op operators/test_elementwise_add_op.cpp test_helper.h test_include.h) +ADD_EXECUTABLE(test-elementwiseadd-op operators/test_elementwise_add_op.cpp test_helper.h test_include.h) target_link_libraries(test-elementwiseadd-op paddle-mobile) # gen test -ADD_EXECUTABLE(test-concat-op operators/test_concat_op.cpp test_helper.h test_include.h) +ADD_EXECUTABLE(test-concat-op operators/test_concat_op.cpp test_helper.h test_include.h) target_link_libraries(test-concat-op paddle-mobile) # gen test diff --git a/test/executor_for_test.h b/test/executor_for_test.h index 045658cbfc8f8cf9a8b4d1693f0292bcab8f8722..a54a8bb191ad53bf5581a246f8a0ead633f84102 100644 --- a/test/executor_for_test.h +++ b/test/executor_for_test.h @@ -21,6 +21,7 @@ limitations under the License. */ #include "common/log.h" #include "framework/op_registry.h" #include "operators/conv_op.h" +#include "operators/elementwise_add_op.h" #include "operators/pool_op.h" #include "operators/relu_op.h" #include "operators/reshape_op.h" @@ -37,6 +38,7 @@ using paddle_mobile::framework::Program; using paddle_mobile::framework::Tensor; using paddle_mobile::framework::Variable; using std::string; +using std::vector; template class Executor4Test : public Executor { public: @@ -73,18 +75,34 @@ class Executor4Test : public Executor { } } - std::shared_ptr predict(const Tensor &t, string input, string output, - const DDim &dDim) { + template + vector> predict(const vector &ts, + const vector &input_names, + const vector &output_names, + const vector &ddims) { auto scope = this->program_.scope; - Variable *g_feed_value = scope->Var(input); - auto tensor = g_feed_value->GetMutable(); - tensor->ShareDataWith(t); + size_t input_size = input_names.size(); + size_t out_size = output_names.size(); - Variable *con_output = scope->Var(output); - auto *output_tensor = con_output->GetMutable(); - output_tensor->mutable_data(dDim); - std::shared_ptr out_tensor = std::make_shared(); - out_tensor.reset(output_tensor); + vector input_vars(input_size); + vector input_tensors(input_size); + for (int i = 0; i < input_size; i++) { + input_vars[i] = scope->Var(input_names[i]); + input_tensors[i] = input_vars[i]->GetMutable(); + input_tensors[i]->ShareDataWith(ts[i]); + } + + vector output_vars(out_size); + vector output_tensors(out_size); + vector> output_tensor_sptrs(out_size); + + for (int i = 0; i < out_size; i++) { + output_vars[i] = scope->Var(output_names[i]); + output_tensors[i] = output_vars[i]->GetMutable(); + output_tensors[i]->mutable_data(ddims[i]); + output_tensor_sptrs[i] = std::make_shared(); + output_tensor_sptrs[i].reset(output_tensors[i]); + } std::shared_ptr to_predict_block = this->to_predict_program_->Block(0); @@ -94,6 +112,6 @@ class Executor4Test : public Executor { op->Run(); } - return out_tensor; + return output_tensor_sptrs; } }; diff --git a/test/operators/test_batchnorm_op.cpp b/test/operators/test_batchnorm_op.cpp index 385617317df1c22158ed7fa9fd6b792118f884e3..ba2e06b80b418b62d2dc445fe87119ed84bfe4f6 100644 --- a/test/operators/test_batchnorm_op.cpp +++ b/test/operators/test_batchnorm_op.cpp @@ -68,27 +68,27 @@ class TestBatchNormOp { // feed auto scope = program_.scope; Variable *x1_feed_value = scope->Var("conv2d_0.tmp_0"); - auto tensor_x1 = x1_feed_value->GetMutable(); + auto tensor_x1 = x1_feed_value->GetMutable(); tensor_x1->ShareDataWith(t1); Variable *mean_feed_value = scope->Var("batch_norm_0.w_1"); - auto tensor_mean = mean_feed_value->GetMutable(); + auto tensor_mean = mean_feed_value->GetMutable(); tensor_mean->ShareDataWith(t2); Variable *scale_feed_value = scope->Var("batch_norm_0.w_0"); - auto tensor_scale = scale_feed_value->GetMutable(); + auto tensor_scale = scale_feed_value->GetMutable(); tensor_scale->ShareDataWith(t3); Variable *variance_feed_value = scope->Var("batch_norm_0.w_2"); - auto tensor_variance = variance_feed_value->GetMutable(); + auto tensor_variance = variance_feed_value->GetMutable(); tensor_variance->ShareDataWith(t4); Variable *bias_feed_value = scope->Var("batch_norm_0.b_0"); - auto tensor_bias = bias_feed_value->GetMutable(); + auto tensor_bias = bias_feed_value->GetMutable(); tensor_bias->ShareDataWith(t5); Variable *output = scope->Var("batch_norm_0.tmp_2"); - auto *output_tensor = output->GetMutable(); + auto *output_tensor = output->GetMutable(); output_tensor->mutable_data({4, 10, 2, 2}); // DLOG << typeid(output_tensor).name(); // DLOG << "output_tensor dims: " << output_tensor->dims(); diff --git a/test/operators/test_box_coder_op.cpp b/test/operators/test_box_coder_op.cpp index dea59e8bf2c0e1a23cf9f64d9b9d6485707c789e..b7695c91dfb394645adfddcf1e11b96dd45a3c94 100644 --- a/test/operators/test_box_coder_op.cpp +++ b/test/operators/test_box_coder_op.cpp @@ -62,19 +62,19 @@ class TestBoxCoderOp { // feed auto scope = program_.scope; Variable *prior_box = scope->Var("concat_0.tmp_0"); - auto tensor_x1 = prior_box->GetMutable(); + auto tensor_x1 = prior_box->GetMutable(); tensor_x1->ShareDataWith(t1); Variable *prior_box_var = scope->Var("concat_1.tmp_0"); - auto tensor_x2 = prior_box_var->GetMutable(); + auto tensor_x2 = prior_box_var->GetMutable(); tensor_x2->ShareDataWith(t2); Variable *target_box = scope->Var("concat_2.tmp_0"); - auto tensor_x3 = target_box->GetMutable(); + auto tensor_x3 = target_box->GetMutable(); tensor_x3->ShareDataWith(t3); Variable *boxes_output = scope->Var("box_coder_0.tmp_0"); - auto *boxes_output_tensor = boxes_output->GetMutable(); + auto *boxes_output_tensor = boxes_output->GetMutable(); boxes_output_tensor->mutable_data({1, 1917, 4}); // DLOG << typeid(output_tensor).name(); diff --git a/test/operators/test_concat_op.cpp b/test/operators/test_concat_op.cpp index 205274ea7ab240f2a9917960029b3efd40ee9205..a9bb072f1e941d15a058825b14fb007507f4d610 100644 --- a/test/operators/test_concat_op.cpp +++ b/test/operators/test_concat_op.cpp @@ -12,148 +12,64 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#pragma once +#include "../executor_for_test.h" #include "../test_include.h" #include "operators/concat_op.h" -namespace paddle_mobile { -namespace framework { - -template -class TestConcatOp { - public: - explicit TestConcatOp(const Program p) : program_(p) { - if (use_optimize_) { - to_predict_program_ = program_.optimizeProgram; - } else { - to_predict_program_ = program_.originProgram; - } - - const std::vector> blocks = - to_predict_program_->Blocks(); - // DLOG << " **block size " << blocks.size(); - for (int i = 0; i < blocks.size(); ++i) { - std::shared_ptr block_desc = blocks[i]; - std::vector> ops = block_desc->Ops(); - // DLOG << " ops " << ops.size(); - for (int j = 0; j < ops.size(); ++j) { - std::shared_ptr op = ops[j]; - if (op->Type() == "concat" && op->Input("X")[0] == "conv2d_3.tmp_1") { - DLOG << " mul attr size: " << op->GetAttrMap().size(); - DLOG << " inputs size: " << op->GetInputs().size(); - DLOG << " outputs size: " << op->GetOutputs().size(); - DLOG << " Input X is : " << op->Input("X")[0]; - DLOG << " Output Out is : " << op->Output("Out")[0]; - DLOG << " axis : " << op->GetAttrMap().at("axis").Get(); - - std::shared_ptr> concat = - std::make_shared>( - op->Type(), op->GetInputs(), op->GetOutputs(), - op->GetAttrMap(), program_.scope); - ops_of_block_[*block_desc.get()].push_back(concat); - } - } - } - } - - std::shared_ptr predict_concat(const Tensor &t1, const Tensor &t2, - const Tensor &t3, const Tensor &t4) { - // feed - auto scope = program_.scope; - Variable *x1_feed_value = scope->Var("conv2d_3.tmp_1"); - auto tensor_x1 = x1_feed_value->GetMutable(); - tensor_x1->ShareDataWith(t1); - - Variable *x2_feed_value = scope->Var("conv2d_5.tmp_1"); - auto tensor_x2 = x2_feed_value->GetMutable(); - tensor_x2->ShareDataWith(t2); - - Variable *x3_feed_value = scope->Var("conv2d_7.tmp_1"); - auto tensor_x3 = x3_feed_value->GetMutable(); - tensor_x3->ShareDataWith(t3); - - Variable *x4_feed_value = scope->Var("conv2d_8.tmp_1"); - auto tensor_x4 = x4_feed_value->GetMutable(); - tensor_x4->ShareDataWith(t4); - - Variable *con_output = scope->Var("concat_0.tmp_0"); - auto *output_tensor = con_output->GetMutable(); - output_tensor->mutable_data({4, 100, 2, 2}); - // DLOG << typeid(output_tensor).name(); - // DLOG << "output_tensor dims: " << output_tensor->dims(); - - std::shared_ptr out_tensor = std::make_shared(); - out_tensor.reset(output_tensor); - - predict_concat(t1, t2, t3, t4, 0); - return out_tensor; - } - - private: - const framework::Program program_; - std::shared_ptr to_predict_program_; - std::map>>> - ops_of_block_; - bool use_optimize_ = false; - - void predict_concat(const Tensor &t1, const Tensor &t2, const Tensor &t3, - const Tensor &t4, int block_id) { - std::shared_ptr to_predict_block = - to_predict_program_->Block(block_id); - for (int j = 0; j < ops_of_block_[*to_predict_block.get()].size(); ++j) { - auto op = ops_of_block_[*to_predict_block.get()][j]; - DLOG << "op -> run()"; - op->Run(); - } - } -}; - -template class TestConcatOp; -} // namespace framework -} // namespace paddle_mobile - int main() { - DLOG << "----------**********----------"; - DLOG << "begin to run ConcatOp Test"; paddle_mobile::Loader loader; - auto program = loader.Load(std::string("../../test/models/googlenet")); - - /// input x (4,10,2,2) - paddle_mobile::framework::Tensor inputx1; - SetupTensor(&inputx1, {4, 10, 2, 2}, static_cast(0), - static_cast(1)); - auto *inputx1_ptr = inputx1.data(); - /// input x (4,20,2,2) - paddle_mobile::framework::Tensor inputx2; - SetupTensor(&inputx2, {4, 20, 2, 2}, static_cast(0), - static_cast(1)); - auto *inputx2_ptr = inputx2.data(); - /// input x (4,30,2,2) - paddle_mobile::framework::Tensor inputx3; - SetupTensor(&inputx3, {4, 30, 2, 2}, static_cast(0), - static_cast(1)); - auto *inputx3_ptr = inputx3.data(); - /// input x (4,40,2,2) - paddle_mobile::framework::Tensor inputx4; - SetupTensor(&inputx4, {4, 40, 2, 2}, static_cast(0), - static_cast(1)); - auto *inputx4_ptr = inputx4.data(); - - paddle_mobile::framework::TestConcatOp testConcatOp( - program); - - auto output_concat = - testConcatOp.predict_concat(inputx1, inputx2, inputx3, inputx4); - auto *output_concat_ptr = output_concat->data(); - + auto program = loader.Load(g_googlenet); + PADDLE_MOBILE_ENFORCE(program.originProgram != nullptr, + "program file read fail"); + + Executor4Test> + executor(program, "concat"); + + // 1. input_tensors; + vector input_tensors; + + Tensor input1; + auto input1_data = CreateInput(&input1, {4, 10, 2, 2}, 0, 1); + input_tensors.push_back(input1); + Tensor input2; + auto input2_data = CreateInput(&input2, {4, 20, 2, 2}, 0, 1); + input_tensors.push_back(input2); + Tensor input3; + auto input3_data = CreateInput(&input3, {4, 30, 2, 2}, 0, 1); + input_tensors.push_back(input3); + Tensor input4; + auto input4_data = CreateInput(&input4, {4, 40, 2, 2}, 0, 1); + input_tensors.push_back(input4); + // 2. input_names + vector input_names({ + "conv2d_3.tmp_1", + "conv2d_5.tmp_1", + "conv2d_7.tmp_1", + "conv2d_8.tmp_1", + }); + + // 3. output_names + vector output_names({"concat_0.tmp_0"}); + + // 4. out_dims; + vector out_ddims; + auto out_ddim = paddle_mobile::framework::make_ddim({3, 100, 2, 2}); + out_ddims.push_back(out_ddim); + + auto output = executor.predict(input_tensors, input_names, + output_names, out_ddims); + + auto output0_data = output[0]->data(); + + // 5. test one example. int input_n = 1; int input_c = 2; int input_h = 0; int input_w = 1; - int stride0 = inputx3.numel() / inputx3.dims()[0]; - int stride1 = inputx3.numel() / inputx3.dims()[0] / inputx3.dims()[1]; - int stride2 = inputx3.dims()[3]; + int stride0 = input3.numel() / input3.dims()[0]; + int stride1 = input3.numel() / input3.dims()[0] / input3.dims()[1]; + int stride2 = input3.dims()[3]; /// inputx1 (4,10,2,2), /// inputx2 (4,20,2,2), /// inputx3 (4,30,2,2), @@ -163,10 +79,10 @@ int main() { int input_index = input_n * stride0 + input_c * stride1 + input_h * stride2 + input_w; int output_index = input_n * 100 * 2 * 2 + - (input_c + inputx1.dims()[1] + inputx2.dims()[1]) * 2 * 2 + + (input_c + input1.dims()[1] + input2.dims()[1]) * 2 * 2 + input_h * 2 + input_w; - DLOG << " inputx3[1,2,0,1] = " << inputx3_ptr[input_index]; - DLOG << " output[1,12,0,1] = " << output_concat_ptr[output_index]; + DLOG << " input3 [1, 2,0,1] = " << input3_data[input_index]; + DLOG << " output [1,32,0,1] = " << output0_data[output_index]; return 0; } diff --git a/test/operators/test_elementwise_add_op.cpp b/test/operators/test_elementwise_add_op.cpp index eeb642a3f486c81a93452b8a3a26354793c8eff1..1b4bf457a2ca7d4207ce3f9f0b20d68ee3f463e0 100644 --- a/test/operators/test_elementwise_add_op.cpp +++ b/test/operators/test_elementwise_add_op.cpp @@ -12,133 +12,52 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#pragma once +#include "../executor_for_test.h" #include "../test_include.h" -#include "operators/elementwise_add_op.h" -namespace paddle_mobile { -namespace framework { - -template -class TestElementwiseAddOp { - public: - explicit TestElementwiseAddOp(const Program p) : program_(p) { - if (use_optimize_) { - to_predict_program_ = program_.optimizeProgram; - } else { - to_predict_program_ = program_.originProgram; - } - - const std::vector> blocks = - to_predict_program_->Blocks(); - // DLOG << " **block size " << blocks.size(); - for (int i = 0; i < blocks.size(); ++i) { - std::shared_ptr block_desc = blocks[i]; - std::vector> ops = block_desc->Ops(); - // DLOG << " ops " << ops.size(); - for (int j = 0; j < ops.size(); ++j) { - std::shared_ptr op = ops[j]; - if (op->Type() == "elementwise_add" && - op->Input("X")[0] == "batch_norm_2.tmp_2") { - DLOG << " elementwise_add attr size: " << op->GetAttrMap().size(); - DLOG << " inputs size: " << op->GetInputs().size(); - DLOG << " outputs size: " << op->GetOutputs().size(); - DLOG << " Input X is : " << op->Input("X")[0]; - DLOG << " Input Y is : " << op->Input("Y")[0]; - DLOG << " Output Out is : " << op->Output("Out")[0]; - Attribute axis_attr = op->GetAttrMap().at("axis"); - int axis = axis_attr.Get(); - DLOG << " Attr axis is : " << axis; - - std::shared_ptr> add = - std::make_shared>( - op->Type(), op->GetInputs(), op->GetOutputs(), - op->GetAttrMap(), program_.scope); - ops_of_block_[*block_desc.get()].push_back(add); - } - } - } - } - - std::shared_ptr predict_add(const Tensor &t1, const Tensor &t2) { - // feed - auto scope = program_.scope; - Variable *x_feed_value = scope->Var("batch_norm_2.tmp_2"); - auto tensor_x = x_feed_value->GetMutable(); - tensor_x->ShareDataWith(t1); - - Variable *y_feed_value = scope->Var("batch_norm_0.tmp_3"); - auto tensor_y = y_feed_value->GetMutable(); - tensor_y->ShareDataWith(t2); - - Variable *con_output = scope->Var("elementwise_add_0.tmp_0"); - auto *output_tensor = con_output->GetMutable(); - output_tensor->mutable_data({1, 3, 224, 224}); - // DLOG << typeid(output_tensor).name(); - // DLOG << "output_tensor dims: " << output_tensor->dims(); +int main() { + paddle_mobile::Loader loader; + auto program = loader.Load(g_resnet); + PADDLE_MOBILE_ENFORCE(program.originProgram != nullptr, + "program file read fail"); - std::shared_ptr out_tensor = std::make_shared(); - out_tensor.reset(output_tensor); + Executor4Test> + executor(program, "elementwise_add"); - predict_add(t1, t2, 0); - return out_tensor; - } + // 1. input_tensors; + vector input_tensors; - private: - const framework::Program program_; - std::shared_ptr to_predict_program_; - std::map>>> - ops_of_block_; - bool use_optimize_ = false; + Tensor input1; + auto input1_data = CreateInput(&input1, {1, 3, 224, 224}, 0, 1); + input_tensors.push_back(input1); - void predict_add(const Tensor &t1, const Tensor &t2, int block_id) { - std::shared_ptr to_predict_block = - to_predict_program_->Block(block_id); - for (int j = 0; j < ops_of_block_[*to_predict_block.get()].size(); ++j) { - auto op = ops_of_block_[*to_predict_block.get()][j]; - DLOG << "op -> run()"; - op->Run(); - } - } -}; + Tensor input2; + auto input2_data = CreateInput(&input2, {224}, 0, 1); + input_tensors.push_back(input2); -template class TestElementwiseAddOp; -} // namespace framework -} // namespace paddle_mobile -int main() { - DLOG << "----------**********----------"; - DLOG << "begin to run ElementAddOp Test"; - paddle_mobile::Loader loader; - auto program = - loader.Load(std::string("../models/" - "image_classification_resnet.inference.model")); + // 2. input_names + vector input_names({ + "batch_norm_2.tmp_2", + "batch_norm_0.tmp_3", + }); - /// input x (1,3,224,224) - paddle_mobile::framework::Tensor inputx; - SetupTensor(&inputx, {1, 3, 224, 224}, static_cast(0), - static_cast(1)); - auto *inputx_ptr = inputx.data(); - /// input y (224,) - paddle_mobile::framework::Tensor inputy; - SetupTensor(&inputy, {224}, static_cast(0), - static_cast(1)); - auto *inputy_ptr = inputy.data(); + // 3. output_names + vector output_names({"elementwise_add_0.tmp_0"}); - paddle_mobile::framework::TestElementwiseAddOp - testElementwiseAddOp(program); + // 4. out_dims; + vector out_ddims; + auto out_ddim = paddle_mobile::framework::make_ddim({1, 3, 224, 224}); + out_ddims.push_back(out_ddim); - auto output_add = testElementwiseAddOp.predict_add(inputx, inputy); - auto *output_add_ptr = output_add->data(); - // for (int j = 0; j < output_add->numel(); ++j) { - // DLOG << "value of output: " << output_add_ptr[j]; - // } + auto output = executor.predict(input_tensors, input_names, + output_names, out_ddims); + auto output0_data = output[0]->data(); /// output (1,3,224,224) - DLOG << "output memory size : " << output_add->memory_size(); - DLOG << "output numel : " << output_add->numel(); + DLOG << "output memory size : " << output[0]->memory_size(); + DLOG << "output numel : " << output[0]->numel(); - DLOG << inputx_ptr[226] << " + " << inputy_ptr[2] << " = " - << output_add_ptr[226]; - return 0; + DLOG << input1_data[226] << " + " << input2_data[2] << " = " + << output0_data[226]; } diff --git a/test/operators/test_fushion_fc_op.cpp b/test/operators/test_fushion_fc_op.cpp index b52989b2e8b3f25a6994de7e630a6360ac8504d9..6063772d85a32af7cac166c9682a5c1e2d8ad1de 100644 --- a/test/operators/test_fushion_fc_op.cpp +++ b/test/operators/test_fushion_fc_op.cpp @@ -64,24 +64,24 @@ class TestFcOp { // feed auto scope = program_.scope; Variable *x_feed_value = scope->Var("pool2d_13.tmp_0"); - auto tensor_x = x_feed_value->GetMutable(); + auto tensor_x = x_feed_value->GetMutable(); tensor_x->ShareDataWith(t1); Variable *y_feed_value = scope->Var("loss3_classifier-loc_weights"); - auto tensor_y = y_feed_value->GetMutable(); + auto tensor_y = y_feed_value->GetMutable(); tensor_y->ShareDataWith(t2); Variable *z_feed_value = scope->Var("loss3_classifier-loc_biases"); - auto tensor_z = z_feed_value->GetMutable(); + auto tensor_z = z_feed_value->GetMutable(); tensor_z->ShareDataWith(t3); Variable *con_output = scope->Var("loss3_classifier-loc.tmp_1"); - auto *output_tensor = con_output->GetMutable(); + auto *output_tensor = con_output->GetMutable(); output_tensor->mutable_data({3, 10}); // DLOG << typeid(output_tensor).name(); // DLOG << "output_tensor dims: " << output_tensor->dims(); - std::shared_ptr out_tensor = std::make_shared(); + std::shared_ptr out_tensor = std::make_shared(); out_tensor.reset(output_tensor); predict(t1, t2, t3, 0); @@ -130,17 +130,17 @@ int main() { } /// input x (1,3,224,224) - paddle_mobile::framework::Tensor inputx; + paddle_mobile::framework::LoDTensor inputx; SetupTensor(&inputx, {3, 64, 1, 1}, static_cast(1), static_cast(1)); auto *inputx_ptr = inputx.data(); /// input y (224,) - paddle_mobile::framework::Tensor inputy; + paddle_mobile::framework::LoDTensor inputy; SetupTensor(&inputy, {64, 10}, static_cast(1.5), static_cast(1.5)); auto *inputy_ptr = inputy.data(); - paddle_mobile::framework::Tensor inputz; + paddle_mobile::framework::LoDTensor inputz; SetupTensor(&inputz, {10}, static_cast(0), static_cast(1)); auto *inputz_ptr = inputz.data(); diff --git a/test/operators/test_lrn_op.cpp b/test/operators/test_lrn_op.cpp index 2284b38abc378d38e8441d2d7462cc345678fef4..ba35639fb71668eef8d6b7bae454af5a9120a015 100644 --- a/test/operators/test_lrn_op.cpp +++ b/test/operators/test_lrn_op.cpp @@ -12,118 +12,51 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#pragma once +#include "../executor_for_test.h" #include "../test_include.h" #include "operators/lrn_op.h" -namespace paddle_mobile { -namespace framework { - -template -class TestLrnOp { - public: - explicit TestLrnOp(const Program p) : program_(p) { - if (use_optimize_) { - to_predict_program_ = program_.optimizeProgram; - } else { - to_predict_program_ = program_.originProgram; - } - - const std::vector> blocks = - to_predict_program_->Blocks(); - // DLOG << " **block size " << blocks.size(); - for (int i = 0; i < blocks.size(); ++i) { - std::shared_ptr block_desc = blocks[i]; - std::vector> ops = block_desc->Ops(); - // DLOG << " ops " << ops.size(); - for (int j = 0; j < ops.size(); ++j) { - std::shared_ptr op = ops[j]; - if (op->Type() == "lrn" && op->Input("X")[0] == "pool2d_0.tmp_0") { - DLOG << " mul attr size: " << op->GetAttrMap().size(); - DLOG << " inputs size: " << op->GetInputs().size(); - DLOG << " outputs size: " << op->GetOutputs().size(); - DLOG << " Input X is : " << op->Input("X")[0]; - DLOG << " Output Out is : " << op->Output("Out")[0]; - DLOG << " n : " << op->GetAttrMap().at("n").Get(); - DLOG << " alpha : " << op->GetAttrMap().at("alpha").Get(); - DLOG << " beta : " << op->GetAttrMap().at("beta").Get(); - DLOG << " k : " << op->GetAttrMap().at("k").Get(); - std::shared_ptr> lrn = - std::make_shared>( - op->Type(), op->GetInputs(), op->GetOutputs(), - op->GetAttrMap(), program_.scope); - ops_of_block_[*block_desc.get()].push_back(lrn); - } - } - } - } - - std::shared_ptr predict_lrn(const Tensor &t1) { - // feed - auto scope = program_.scope; - Variable *x1_feed_value = scope->Var("pool2d_0.tmp_0"); - auto tensor_x1 = x1_feed_value->GetMutable(); - tensor_x1->ShareDataWith(t1); - - Variable *con_output = scope->Var("pool1_norm1.tmp_1"); - auto *output_tensor = con_output->GetMutable(); - output_tensor->mutable_data({3, 4, 2, 2}); - // DLOG << typeid(output_tensor).name(); - // DLOG << "output_tensor dims: " << output_tensor->dims(); - - std::shared_ptr out_tensor = std::make_shared(); - out_tensor.reset(output_tensor); +int main() { + paddle_mobile::Loader loader; + auto program = loader.Load(g_googlenet); + PADDLE_MOBILE_ENFORCE(program.originProgram != nullptr, + "program file read fail"); - predict_lrn(t1, 0); - return out_tensor; - } + Executor4Test> + executor(program, "lrn"); - private: - const framework::Program program_; - std::shared_ptr to_predict_program_; - std::map>>> - ops_of_block_; - bool use_optimize_ = false; + // 1. input_tensors; + vector input_tensors; - void predict_lrn(const Tensor &t1, int block_id) { - std::shared_ptr to_predict_block = - to_predict_program_->Block(block_id); - for (int j = 0; j < ops_of_block_[*to_predict_block.get()].size(); ++j) { - auto op = ops_of_block_[*to_predict_block.get()][j]; - DLOG << "op -> run()"; - op->Run(); - } - } -}; + Tensor input1; + auto input1_data = CreateInput(&input1, {3, 4, 2, 2}, 0, 1); + input_tensors.push_back(input1); -template class TestLrnOp; -} // namespace framework -} // namespace paddle_mobile + // 2. input_names + vector input_names({ + "pool2d_0.tmp_0", + }); -int main() { - DLOG << "----------**********----------"; - DLOG << "begin to run LrnOp Test"; - paddle_mobile::Loader loader; - auto program = loader.Load(std::string("../../test/models/googlenet")); + // 3. output_names + vector output_names({"pool1_norm1.tmp_1"}); - /// input x (3,4,2,2) - paddle_mobile::framework::Tensor inputx1; - SetupTensor(&inputx1, {3, 4, 2, 2}, static_cast(0), - static_cast(1)); - auto *inputx1_ptr = inputx1.data(); + // 4. out_dims; + vector out_ddims; + auto out_ddim = paddle_mobile::framework::make_ddim({3, 4, 2, 2}); + out_ddims.push_back(out_ddim); - paddle_mobile::framework::TestLrnOp testLrnOp(program); + auto output = executor.predict(input_tensors, input_names, + output_names, out_ddims); - auto output_lrn = testLrnOp.predict_lrn(inputx1); - auto *output_lrn_ptr = output_lrn->data(); + auto output0_data = output[0]->data(); DLOG << " LrnOp input: "; for (int i = 0; i < 3; i++) { for (int j = 0; j < 4; j++) { for (int c = 0; c < 2; c++) { for (int d = 0; d < 2; d++) { - DLOGF("%f ", inputx1_ptr[i * 16 + j * 4 + c * 2 + d]); + DLOGF("%f ", input1_data[i * 16 + j * 4 + c * 2 + d]); } DLOGF("\n"); } @@ -136,7 +69,7 @@ int main() { for (int j = 0; j < 4; j++) { for (int c = 0; c < 2; c++) { for (int d = 0; d < 2; d++) { - DLOGF("%f ", output_lrn_ptr[i * 16 + j * 4 + c * 2 + d]); + DLOGF("%f ", output0_data[i * 16 + j * 4 + c * 2 + d]); } DLOGF("\n"); } @@ -144,8 +77,8 @@ int main() { } DLOGF("\n"); } - DLOG << inputx1_ptr[0] << " / ((1 + 0.00002 * ( " << inputx1_ptr[0] << "^2 + " - << inputx1_ptr[4] << "^2 + " << inputx1_ptr[8] << "^2 ))^0.75) = "; - DLOG << output_lrn_ptr[0]; + DLOG << input1_data[0] << " / ((1 + 0.00002 * ( " << input1_data[0] << "^2 + " + << input1_data[4] << "^2 + " << input1_data[8] << "^2 ))^0.75) = "; + DLOG << output0_data[0]; return 0; } diff --git a/test/operators/test_mul_op.cpp b/test/operators/test_mul_op.cpp index 061a942ed85737b41fbf0d5658134757ffbea68a..8acd4a99470b494df3a8931cfb3d140fdc39c4f0 100644 --- a/test/operators/test_mul_op.cpp +++ b/test/operators/test_mul_op.cpp @@ -12,158 +12,81 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#pragma once +#include "../executor_for_test.h" #include "../test_include.h" #include "operators/mul_op.h" -namespace paddle_mobile { -namespace framework { - -template -class TestMulOp { - public: - explicit TestMulOp(const Program p) : program_(p) { - if (use_optimize_) { - to_predict_program_ = program_.optimizeProgram; - } else { - to_predict_program_ = program_.originProgram; - } - - const std::vector> blocks = - to_predict_program_->Blocks(); - // DLOG << " **block size " << blocks.size(); - for (int i = 0; i < blocks.size(); ++i) { - std::shared_ptr block_desc = blocks[i]; - std::vector> ops = block_desc->Ops(); - // DLOG << " ops " << ops.size(); - for (int j = 0; j < ops.size(); ++j) { - std::shared_ptr op = ops[j]; - if (op->Type() == "mul" && op->Input("X")[0] == "pool2d_0.tmp_0") { - DLOG << " mul attr size: " << op->GetAttrMap().size(); - DLOG << " inputs size: " << op->GetInputs().size(); - DLOG << " outputs size: " << op->GetOutputs().size(); - DLOG << " Input X is : " << op->Input("X")[0]; - DLOG << " Input Y is : " << op->Input("Y")[0]; - DLOG << " Output Out is : " << op->Output("Out")[0]; - DLOG << "x_num_col_dims : " - << op->GetAttrMap().at("x_num_col_dims").Get(); - DLOG << "y_num_col_dims : " - << op->GetAttrMap().at("y_num_col_dims").Get(); - - std::shared_ptr> mul = - std::make_shared>( - op->Type(), op->GetInputs(), op->GetOutputs(), - op->GetAttrMap(), program_.scope); - ops_of_block_[*block_desc.get()].push_back(mul); - } - } - } - } - - std::shared_ptr predict_mul(const Tensor &t1, const Tensor &t2) { - // feed - auto scope = program_.scope; - Variable *x_feed_value = scope->Var("pool2d_0.tmp_0"); - auto tensor_x = x_feed_value->GetMutable(); - tensor_x->ShareDataWith(t1); - - Variable *y_feed_value = scope->Var("fc_0.w_0"); - auto tensor_y = y_feed_value->GetMutable(); - tensor_y->ShareDataWith(t2); - - Variable *con_output = scope->Var("fc_0.tmp_0"); - auto *output_tensor = con_output->GetMutable(); - output_tensor->mutable_data({3, 3}); - // DLOG << typeid(output_tensor).name(); - // DLOG << "output_tensor dims: " << output_tensor->dims(); - - std::shared_ptr out_tensor = std::make_shared(); - out_tensor.reset(output_tensor); - - predict_mul(t1, t2, 0); - return out_tensor; - } - - private: - const framework::Program program_; - std::shared_ptr to_predict_program_; - std::map>>> - ops_of_block_; - bool use_optimize_ = false; - - void predict_mul(const Tensor &t1, const Tensor &t2, int block_id) { - std::shared_ptr to_predict_block = - to_predict_program_->Block(block_id); - for (int j = 0; j < ops_of_block_[*to_predict_block.get()].size(); ++j) { - auto op = ops_of_block_[*to_predict_block.get()][j]; - DLOG << "op -> run()"; - op->Run(); - } - } -}; - -template class TestMulOp; -} // namespace framework -} // namespace paddle_mobile - int main() { - DLOG << "----------**********----------"; - DLOG << "begin to run MulOp Test"; paddle_mobile::Loader loader; - auto program = - loader.Load(std::string("../../test/models/" - "image_classification_resnet.inference.model")); - - /// input x (3,2,1,1) - paddle_mobile::framework::Tensor inputx; - SetupTensor(&inputx, {3, 2, 1, 1}, static_cast(0), - static_cast(1)); - auto *inputx_ptr = inputx.data(); - - /// input y (2,3) - paddle_mobile::framework::Tensor inputy; - SetupTensor(&inputy, {2, 3}, static_cast(0), - static_cast(1)); - auto *inputy_ptr = inputy.data(); - - paddle_mobile::framework::TestMulOp testMulOp(program); - - auto output_mul = testMulOp.predict_mul(inputx, inputy); - auto *output_mul_ptr = output_mul->data(); - - auto dimx_1 = inputx.numel() / inputx.dims()[0]; - DLOG << " inputx : "; - for (int i = 0; i < inputx.dims()[0]; ++i) { - for (int j = 0; j < dimx_1; ++j) { - DLOGF("%f ", inputx_ptr[i * dimx_1 + j]); + auto program = loader.Load(g_resnet); + PADDLE_MOBILE_ENFORCE(program.originProgram != nullptr, + "program file read fail"); + + Executor4Test> + executor(program, "mul"); + + // 1. input_tensors; + vector input_tensors; + + Tensor input1; + auto input1_data = CreateInput(&input1, {3, 2, 1, 1}, 0, 1); + input_tensors.push_back(input1); + Tensor input2; + auto input2_data = CreateInput(&input2, {2, 3}, 0, 1); + input_tensors.push_back(input2); + + // 2. input_names + vector input_names({ + "pool2d_0.tmp_0", + "fc_0.w_0", + }); + + // 3. output_names + vector output_names({"fc_0.tmp_0"}); + + // 4. out_dims; + vector out_ddims; + auto out_ddim = paddle_mobile::framework::make_ddim({3, 3}); + out_ddims.push_back(out_ddim); + + auto output = executor.predict(input_tensors, input_names, + output_names, out_ddims); + + auto output0_data = output[0]->data(); + + auto dim_1 = input1.numel() / input1.dims()[0]; + DLOG << " input1 : "; + for (int i = 0; i < input1.dims()[0]; ++i) { + for (int j = 0; j < dim_1; ++j) { + DLOGF("%f ", input1_data[i * dim_1 + j]); } DLOGF("\n"); } - auto dimy_1 = inputy.numel() / inputy.dims()[0]; - DLOG << " inputy : "; - for (int i = 0; i < inputy.dims()[0]; ++i) { - for (int j = 0; j < dimy_1; ++j) { - DLOGF("%f ", inputy_ptr[i * dimx_1 + j]); + auto dim_2 = input2.numel() / input2.dims()[0]; + DLOG << " input2 : "; + for (int i = 0; i < input2.dims()[0]; ++i) { + for (int j = 0; j < dim_2; ++j) { + DLOGF("%f ", input2_data[i * dim_2 + j]); } DLOGF("\n"); } - auto dim_output_1 = output_mul->numel() / output_mul->dims()[0]; + auto dim_output0 = output[0]->numel() / output[0]->dims()[0]; DLOG << " output : "; - for (int i = 0; i < output_mul->dims()[0]; ++i) { - for (int j = 0; j < dim_output_1; ++j) { - DLOGF("%f ", output_mul_ptr[i * dimy_1 + j]); + for (int i = 0; i < output[0]->dims()[0]; ++i) { + for (int j = 0; j < dim_output0; ++j) { + DLOGF("%f ", output0_data[i * dim_2 + j]); } DLOGF("\n"); } /// output (3,3) - DLOG << "output memory size : " << output_mul->memory_size(); - DLOG << "output numel : " << output_mul->numel(); + DLOG << "output memory size : " << output[0]->memory_size(); + DLOG << "output numel : " << output[0]->numel(); - DLOG << inputx_ptr[0] << " x " << inputy_ptr[0] << " + " << inputx_ptr[1] - << " x " << inputy_ptr[0 + 3] << " = " << output_mul_ptr[0]; + DLOG << input1_data[0] << " x " << input2_data[0] << " + " << input1_data[1] + << " x " << input2_data[0 + 3] << " = " << output0_data[0]; return 0; } diff --git a/test/operators/test_multiclass_nms_op.cpp b/test/operators/test_multiclass_nms_op.cpp index 01ad72b9bbc93ebd09710d2ad7edc449cd2facaf..e6c41bd4b3bb241964a23accf4633e65818465be 100644 --- a/test/operators/test_multiclass_nms_op.cpp +++ b/test/operators/test_multiclass_nms_op.cpp @@ -77,15 +77,15 @@ class TestMultiClassNMSOp { // feed auto scope = program_.scope; Variable *x1_feed_value = scope->Var("box_coder_0.tmp_0"); - auto tensor_x1 = x1_feed_value->GetMutable(); + auto tensor_x1 = x1_feed_value->GetMutable(); tensor_x1->ShareDataWith(t1); Variable *x2_feed_value = scope->Var("transpose_12.tmp_0"); - auto tensor_x2 = x2_feed_value->GetMutable(); + auto tensor_x2 = x2_feed_value->GetMutable(); tensor_x2->ShareDataWith(t2); Variable *output = scope->Var("detection_output_0.tmp_0"); - auto *output_tensor = output->GetMutable(); + auto *output_tensor = output->GetMutable(); output_tensor->mutable_data({1917, 6}); // DLOG << typeid(output_tensor).name(); diff --git a/test/operators/test_prior_box_op.cpp b/test/operators/test_prior_box_op.cpp index e365c4ed851f5c39324453edee3b37a03fbc97f1..80ede944936cb5ae31e2ed7e1e70c1257746149a 100644 --- a/test/operators/test_prior_box_op.cpp +++ b/test/operators/test_prior_box_op.cpp @@ -72,19 +72,19 @@ class TestPriorBoxOp { // feed auto scope = program_.scope; Variable *x1_feed_value = scope->Var("image"); - auto tensor_x1 = x1_feed_value->GetMutable(); + auto tensor_x1 = x1_feed_value->GetMutable(); tensor_x1->ShareDataWith(t1); Variable *x2_feed_value = scope->Var("batch_norm_26.tmp_3"); - auto tensor_x2 = x2_feed_value->GetMutable(); + auto tensor_x2 = x2_feed_value->GetMutable(); tensor_x2->ShareDataWith(t2); Variable *boxes_output = scope->Var("prior_box_1.tmp_0"); - auto *boxes_output_tensor = boxes_output->GetMutable(); + auto *boxes_output_tensor = boxes_output->GetMutable(); boxes_output_tensor->mutable_data({10, 10, 6, 4}); Variable *variances_output = scope->Var("prior_box_1.tmp_1"); - auto *variances_output_tesnor = variances_output->GetMutable(); + auto *variances_output_tesnor = variances_output->GetMutable(); variances_output_tesnor->mutable_data({10, 10, 6, 4}); // DLOG << typeid(output_tensor).name(); // DLOG << "output_tensor dims: " << output_tensor->dims(); diff --git a/test/operators/test_relu_op.cpp b/test/operators/test_relu_op.cpp index 6fefb0368bef48c5ad699b530deabff961e9c5d0..fb68b9211136e4272f6774a423f93f8f1087b6e7 100644 --- a/test/operators/test_relu_op.cpp +++ b/test/operators/test_relu_op.cpp @@ -14,12 +14,11 @@ limitations under the License. */ #include "../executor_for_test.h" #include "../test_include.h" +#include "operators/relu_op.h" int main() { paddle_mobile::Loader loader; - // ../models/image_classification_resnet.inference.model - auto program = loader.Load(g_mobilenet_ssd); - + auto program = loader.Load(g_resnet); PADDLE_MOBILE_ENFORCE(program.originProgram != nullptr, "program file read fail"); @@ -27,17 +26,33 @@ int main() { paddle_mobile::operators::ReluOp> executor(program, "relu"); - paddle_mobile::framework::Tensor input; - SetupTensor(&input, {1, 2, 3, 4}, static_cast(-1), - static_cast(1)); + // 1. input_tensors; + vector input_tensors; + + Tensor input1; + auto input1_data = CreateInput(&input1, {1, 2, 3, 4}, -1, 1); + input_tensors.push_back(input1); + + // 2. input_names + vector input_names({ + "batch_norm_0.tmp_2", + }); + // 3. output_names + vector output_names({"batch_norm_0.tmp_3"}); + + // 4. out_dims; + vector out_ddims; auto out_ddim = paddle_mobile::framework::make_ddim({1, 2, 3, 4}); - auto output = executor.predict(input, "batch_norm_0.tmp_2", - "batch_norm_0.tmp_3", out_ddim); + out_ddims.push_back(out_ddim); + + auto output = executor.predict(input_tensors, input_names, + output_names, out_ddims); + + auto output0_data = output[0]->data(); - auto output_ptr = output->data(); - for (int j = 0; j < output->numel(); ++j) { - DLOG << " value of output: " << output_ptr[j]; + for (int j = 0; j < output[0]->numel(); ++j) { + DLOG << " value of output: " << output0_data[j]; } return 0; } diff --git a/test/test_helper.h b/test/test_helper.h index e2d6a183cb7b4caf812a11e5e6b7ada8dbb3e747..029ed9742f6d59702718a0ca03a2b2ba62da514c 100644 --- a/test/test_helper.h +++ b/test/test_helper.h @@ -29,7 +29,8 @@ static const std::string g_resnet = "../models/image_classification_resnet.inference.model"; static const std::string g_test_image_1x3x224x224 = "../images/test_image_1x3x224x224_float"; - +using paddle_mobile::framework::DDim; +using paddle_mobile::framework::Tensor; template void SetupTensor(paddle_mobile::framework::Tensor *input, paddle_mobile::framework::DDim dims, T lower, T upper) { @@ -43,6 +44,12 @@ void SetupTensor(paddle_mobile::framework::Tensor *input, } } +template +T *CreateInput(Tensor *input, DDim dims, T low, T up) { + SetupTensor(input, dims, static_cast(low), static_cast(up)); + return input->data(); +} + template void GetInput(const std::string &input_name, std::vector *input, const std::vector &dims) {