diff --git a/deploy/cpp_infer/include/ocr_cls.h b/deploy/cpp_infer/include/ocr_cls.h new file mode 100644 index 0000000000000000000000000000000000000000..38a37cff3c035eafe3617d83b2cc15ca47f30186 --- /dev/null +++ b/deploy/cpp_infer/include/ocr_cls.h @@ -0,0 +1,81 @@ +// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "opencv2/core.hpp" +#include "opencv2/imgcodecs.hpp" +#include "opencv2/imgproc.hpp" +#include "paddle_api.h" +#include "paddle_inference_api.h" +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include + +namespace PaddleOCR { + +class Classifier { +public: + explicit Classifier(const std::string &model_dir, const bool &use_gpu, + const int &gpu_id, const int &gpu_mem, + const int &cpu_math_library_num_threads, + const bool &use_mkldnn, const bool &use_zero_copy_run, + const double &cls_thresh) { + this->use_gpu_ = use_gpu; + this->gpu_id_ = gpu_id; + this->gpu_mem_ = gpu_mem; + this->cpu_math_library_num_threads_ = cpu_math_library_num_threads; + this->use_mkldnn_ = use_mkldnn; + this->use_zero_copy_run_ = use_zero_copy_run; + + this->cls_thresh = cls_thresh; + + LoadModel(model_dir); + } + + // Load Paddle inference model + void LoadModel(const std::string &model_dir); + + cv::Mat Run(cv::Mat &img); + +private: + std::shared_ptr predictor_; + + bool use_gpu_ = false; + int gpu_id_ = 0; + int gpu_mem_ = 4000; + int cpu_math_library_num_threads_ = 4; + bool use_mkldnn_ = false; + bool use_zero_copy_run_ = false; + double cls_thresh = 0.5; + + std::vector mean_ = {0.5f, 0.5f, 0.5f}; + std::vector scale_ = {1 / 0.5f, 1 / 0.5f, 1 / 0.5f}; + bool is_scale_ = true; + + // pre-process + ClsResizeImg resize_op_; + Normalize normalize_op_; + Permute permute_op_; + +}; // class Classifier + +} // namespace PaddleOCR diff --git a/deploy/cpp_infer/src/ocr_cls.cpp b/deploy/cpp_infer/src/ocr_cls.cpp new file mode 100644 index 0000000000000000000000000000000000000000..40debaa7835d3174627f8b0528abba673c6e3d86 --- /dev/null +++ b/deploy/cpp_infer/src/ocr_cls.cpp @@ -0,0 +1,108 @@ +// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include + +namespace PaddleOCR { + +cv::Mat Classifier::Run(cv::Mat &img) { + cv::Mat src_img; + img.copyTo(src_img); + cv::Mat resize_img; + + std::vector cls_image_shape = {3, 48, 192}; + int index = 0; + float wh_ratio = float(img.cols) / float(img.rows); + + this->resize_op_.Run(img, resize_img, cls_image_shape); + + this->normalize_op_.Run(&resize_img, this->mean_, this->scale_, + this->is_scale_); + + std::vector input(1 * 3 * resize_img.rows * resize_img.cols, 0.0f); + + this->permute_op_.Run(&resize_img, input.data()); + + // Inference. + if (this->use_zero_copy_run_) { + auto input_names = this->predictor_->GetInputNames(); + auto input_t = this->predictor_->GetInputTensor(input_names[0]); + input_t->Reshape({1, 3, resize_img.rows, resize_img.cols}); + input_t->copy_from_cpu(input.data()); + this->predictor_->ZeroCopyRun(); + } else { + paddle::PaddleTensor input_t; + input_t.shape = {1, 3, resize_img.rows, resize_img.cols}; + input_t.data = + paddle::PaddleBuf(input.data(), input.size() * sizeof(float)); + input_t.dtype = PaddleDType::FLOAT32; + std::vector outputs; + this->predictor_->Run({input_t}, &outputs, 1); + } + + std::vector softmax_out; + std::vector label_out; + auto output_names = this->predictor_->GetOutputNames(); + auto softmax_out_t = this->predictor_->GetOutputTensor(output_names[0]); + auto softmax_shape_out = softmax_out_t->shape(); + + int softmax_out_num = + std::accumulate(softmax_shape_out.begin(), softmax_shape_out.end(), 1, + std::multiplies()); + + softmax_out.resize(softmax_out_num); + + softmax_out_t->copy_to_cpu(softmax_out.data()); + + float score = 0; + int label = 0; + for (int i = 0; i < softmax_out_num; i++) { + if (softmax_out[i] > score) { + score = softmax_out[i]; + label = i; + } + } + if (label % 2 == 1 && score > this->cls_thresh) { + cv::rotate(src_img, src_img, 1); + } + return src_img; +} + +void Classifier::LoadModel(const std::string &model_dir) { + AnalysisConfig config; + config.SetModel(model_dir + "/model", model_dir + "/params"); + + if (this->use_gpu_) { + config.EnableUseGpu(this->gpu_mem_, this->gpu_id_); + } else { + config.DisableGpu(); + if (this->use_mkldnn_) { + config.EnableMKLDNN(); + } + config.SetCpuMathLibraryNumThreads(this->cpu_math_library_num_threads_); + } + + // false for zero copy tensor + config.SwitchUseFeedFetchOps(!this->use_zero_copy_run_); + // true for multiple input + config.SwitchSpecifyInputNames(true); + + config.SwitchIrOptim(true); + + config.EnableMemoryOptim(); + config.DisableGlogInfo(); + + this->predictor_ = CreatePaddlePredictor(config); +} +} // namespace PaddleOCR