diff --git a/src/framework/op_registry.h b/src/framework/op_registry.h index 233de642be76297706b497a35fa871fd45ca5dfa..62398dcb15dc61ef2f778b738da0afd073b37908 100644 --- a/src/framework/op_registry.h +++ b/src/framework/op_registry.h @@ -90,14 +90,6 @@ class OpRegistry { const std::string& type, const VariableNameMap& inputs, const VariableNameMap& outputs, const AttributeMap attrs, std::shared_ptr scope) { - LOG(paddle_mobile::kLOG_DEBUG1) << " type: " << type; - LOG(paddle_mobile::kLOG_DEBUG1) << " input size: " << inputs.size(); - LOG(paddle_mobile::kLOG_DEBUG1) << " output size: " << outputs.size(); - LOG(paddle_mobile::kLOG_DEBUG1) << " attr size: " << attrs.size(); - LOG(paddle_mobile::kLOG_DEBUG1) - << " OpInfoMap size: " << OpInfoMap::Instance()->map().size(); - LOG(paddle_mobile::kLOG_DEBUG1) << " has type: " << type << " " - << OpInfoMap::Instance()->Has(type); auto& info = OpInfoMap::Instance()->Get(type); auto op = info.Creator()(type, inputs, outputs, attrs, scope); return std::shared_ptr>(op); diff --git a/src/framework/variable.h b/src/framework/variable.h index 3d8dd5158046f58dd4d206427328867140e95344..07cb6377e0c9ca89f828eded887b8d1da2d8aae6 100644 --- a/src/framework/variable.h +++ b/src/framework/variable.h @@ -45,8 +45,6 @@ class Variable : public PaddleMobileObject { bool IsInitialized() const { return holder_ != nullptr; } - const std::string Name() { return name_; } - template T *GetMutable() { if (!IsType()) { @@ -64,8 +62,6 @@ class Variable : public PaddleMobileObject { std::type_index Type() const { return holder_->Type(); } - void SetName(const string name) { name_ = name; } - private: struct Placeholder { Placeholder() = default; diff --git a/src/io.cpp b/src/io.cpp index 271a3190ae0953525b25160e4f961c212b9b944c..1c5e97bbb7eaa0257bb2f81ef131b8c6bc48547f 100644 --- a/src/io.cpp +++ b/src/io.cpp @@ -45,7 +45,7 @@ static size_t ReadBuffer(const char *file_name, uint8_t **out) { printf("%s \n", file_name); FILE *fp; fp = fopen(file_name, "rb"); - PADDLE_MOBILE_ENFORCE(fp != NULL, "open failed !"); + PADDLE_MOBILE_ENFORCE(fp != NULL, " %s open failed !", file_name); fseek(fp, 0, SEEK_END); size_t size = ftell(fp); @@ -388,40 +388,13 @@ void Executor::InitMemory() { } } -template -std::shared_ptr Executor::predict( - framework::Tensor &t) { - // feed - auto scope = program_.scope; - framework::Variable *g_feed_value = scope->Var("pixel"); - auto tensor = g_feed_value->GetMutable(); - tensor->ShareDataWith(t); - - framework::Variable *con_output = scope->Var("conv2d_0.tmp_0"); - framework::Tensor *output_tensor = - con_output->GetMutable(); - output_tensor->mutable_data({1, 16, 32, 32}); - // std::cout << typeid(output_tensor).name() << std::endl; - // std::cout << "output_tensor dims: " << output_tensor->dims() << - // std::endl; - - std::shared_ptr out_tensor = - std::make_shared(); - out_tensor.reset(output_tensor); - - predict(t, 0); - return out_tensor; -} - template void Executor::predict(const framework::Tensor &t, int block_id) { framework::Variable *g_feed_value = program_.scope->Var("feed"); framework::Tensor *feed_tensor = g_feed_value->GetMutable(); feed_tensor->Resize(t.dims()); - feed_tensor->ShareDataWith(t); - std::shared_ptr to_predict_block = to_predict_program_->Block(block_id); for (int j = 0; j < ops_of_block_[*to_predict_block.get()].size(); ++j) { diff --git a/src/operators/kernel/arm/conv_kernel.cpp b/src/operators/kernel/arm/conv_kernel.cpp index 7a566b6ac8f57394eb0b1dd9bc81ea67d33cfe38..1e2572b984734dcd88be7c1c750fc0f07448e66d 100644 --- a/src/operators/kernel/arm/conv_kernel.cpp +++ b/src/operators/kernel/arm/conv_kernel.cpp @@ -43,13 +43,13 @@ void ConvKernel::Compute(const ConvParam ¶m) const { std::vector paddings = param.Paddings(); std::vector dilations = param.Dilations(); - DLOG << " compute end get Attrs " << strides[0]; + // DLOG << " compute end get Attrs " << strides[0]; const int batch_size = static_cast(input->dims()[0]); std::vector filter_shape_vec(framework::vectorize(filter.dims())); - std::vector output_shape_vec(framework::vectorize(output->dims())); + std::vector output_shape_vec(framework::vectorize(output->dims())); size_t data_dim = filter_shape_vec.size() - 2; std::vector col_shape_vec(1 + 2 * data_dim); col_shape_vec[0] = input->dims()[1] / groups; @@ -70,8 +70,6 @@ void ConvKernel::Compute(const ConvParam ¶m) const { col_matrix.ShareDataWith(col); col_matrix.Resize(col_matrix_shape); } - DLOG << " col_shape = " << col_shape; - DLOG << " col_matrix_shape = " << col_matrix_shape; framework::DDim input_shape = framework::slice_ddim( input->dims(), 1, static_cast(input->dims().size())); @@ -80,7 +78,6 @@ void ConvKernel::Compute(const ConvParam ¶m) const { filter.numel() / filter.dims()[0]}; filter.Resize(filter_matrix_shape); DLOG << " filter.dims() = " << filter.dims(); - framework::DDim output_matrix_shape = { output->dims()[1], output->numel() / (output->dims()[0] * output->dims()[1])}; @@ -117,9 +114,6 @@ void ConvKernel::Compute(const ConvParam ¶m) const { // gemm Tensor out_slice = out_batch.Slice(g * out_step, (g + 1) * out_step); Tensor filter_slice = filter.Slice(g * out_step, (g + 1) * out_step); - DLOG << " out_slice " << out_slice.dims(); - DLOG << " filter_slice " << filter_slice.dims(); - DLOG << " col_matrix " << col_matrix.dims(); math::matmul(filter_slice, false, col_matrix, false, static_cast(1), &out_slice, static_cast(0)); diff --git a/test/operators/test_sigmoid_op.cpp b/test/operators/test_sigmoid_op.cpp index e053ca1e904db2fdd9642eeaaaefd590d3c5624a..adf0376132773989c7ee728f4a38c561760254b5 100644 --- a/test/operators/test_sigmoid_op.cpp +++ b/test/operators/test_sigmoid_op.cpp @@ -19,16 +19,12 @@ limitations under the License. */ int main() { paddle_mobile::framework::Tensor input; paddle_mobile::framework::Tensor output; - DLOG << 1; SetupTensor(&input, {1, 4, 60, 60}, static_cast(0), static_cast(1)); - DLOG << 2; auto out_ddim = paddle_mobile::framework::make_ddim({1, 4, 60, 60}); output.Resize(out_ddim); - DLOG << 3; paddle_mobile::operators::sigmoid(&input, &output); - DLOG << 4; auto *output_ptr = output.data(); for (int j = 0; j < output.numel(); ++j) { DLOG << " value of output: " << output_ptr[j]; diff --git a/test/operators/test_softmax_op.cpp b/test/operators/test_softmax_op.cpp index 5dd42e83e3c14e2a15b7f201b46fe7beb2e1e8e6..ed5a1a49f5583e7fe8108675accdc2fc71a6a086 100644 --- a/test/operators/test_softmax_op.cpp +++ b/test/operators/test_softmax_op.cpp @@ -18,7 +18,7 @@ limitations under the License. */ int main() { paddle_mobile::Loader loader; - auto program = loader.Load(std::string("models/mobilenet")); + auto program = loader.Load(std::string("../models/mobilenet")); if (program.originProgram == nullptr) { DLOG << "program read file"; } diff --git a/test/test_helper.h b/test/test_helper.h index c0c301840faed5b350cf814d220e1f95a4c9d3bc..6b5d8335db0a4e986a9a3512c59b9fb711751b47 100644 --- a/test/test_helper.h +++ b/test/test_helper.h @@ -29,7 +29,8 @@ static const std::string g_resnet = "../models/image_classification_resnet.inference.model"; static const std::string g_test_image_1x3x224x224 = "../images/test_image_1x3x224x224_float"; - +using paddle_mobile::framework::Tensor; +using paddle_mobile::framework::DDim; template void SetupTensor(paddle_mobile::framework::Tensor *input, paddle_mobile::framework::DDim dims, T lower, T upper) {