/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #pragma once #include #include #include #include #include #include "paddle/fluid/framework/ddim.h" #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/lod_tensor_array.h" #include "paddle/fluid/framework/naive_executor.h" #include "paddle/fluid/inference/api/details/reset_tensor_array.h" #include "paddle/fluid/inference/api/paddle_inference_api.h" #include "paddle/fluid/inference/io.h" #include "paddle/fluid/platform/init.h" #include "paddle/fluid/platform/profiler.h" namespace paddle { class NativePaddlePredictor : public PaddlePredictor { public: explicit NativePaddlePredictor(const NativeConfig &config) : config_(config) {} // will only create sub scope if have global scope bool Init(std::shared_ptr parent_scope); bool Run(const std::vector &inputs, std::vector *output_data, int batch_size = -1) override; std::unique_ptr Clone() override; ~NativePaddlePredictor() override; framework::Scope *scope() { return sub_scope_ ? sub_scope_ : scope_.get(); } protected: bool SetFeed(const std::vector &input_datas, framework::Scope *scope); bool GetFetch(std::vector *output_data, framework::Scope *scope); template void GetFetchOne(const framework::LoDTensor &fetchs, PaddleTensor *output_data); void PrepareFeedFetch(); NativeConfig config_; platform::Place place_; std::unique_ptr executor_; std::shared_ptr scope_; std::unique_ptr ctx_; std::unique_ptr inference_program_; std::vector feeds_; std::map feed_names_; std::vector fetchs_; // Memory buffer for feed inputs. The temporary LoDTensor will cause serious // concurrency problems, wrong results and memory leak, so cache them. std::vector feed_tensors_; // Do not use unique_ptr, use parent scope to delete framework::Scope *sub_scope_{nullptr}; details::TensorArrayBatchCleaner tensor_array_batch_cleaner_; }; } // namespace paddle