// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "lite/api/cxx_api.h" #include #include "lite/api/paddle_api.h" #include "lite/core/device_info.h" #include "lite/core/version.h" namespace paddle { namespace lite { class CxxPaddleApiImpl : public lite_api::PaddlePredictor { public: CxxPaddleApiImpl(); /// Create a new predictor from a config. void Init(const lite_api::CxxConfig &config); std::unique_ptr GetInput(int i) override; std::unique_ptr GetOutput(int i) const override; void Run() override; std::string GetVersion() const override; // get inputs names and get outputs names const std::vector &GetInputNames() override; const std::vector &GetOutputNames() override; std::unique_ptr GetTensor( const std::string &name) const override; // Get InputTebsor by name std::unique_ptr GetInputByName( const std::string &name) override; void SaveOptimizedModel(const std::string &model_dir, lite_api::LiteModelType model_type = lite_api::LiteModelType::kProtobuf) override; private: Predictor raw_predictor_; }; CxxPaddleApiImpl::CxxPaddleApiImpl() {} void CxxPaddleApiImpl::Init(const lite_api::CxxConfig &config) { #ifdef LITE_WITH_CUDA Env::Init(); #endif auto places = config.valid_places(); raw_predictor_.Build(config, places); } std::unique_ptr CxxPaddleApiImpl::GetInput(int i) { auto *x = raw_predictor_.GetInput(i); return std::unique_ptr(new lite_api::Tensor(x)); } std::unique_ptr CxxPaddleApiImpl::GetOutput( int i) const { const auto *x = raw_predictor_.GetOutput(i); return std::unique_ptr(new lite_api::Tensor(x)); } const std::vector &CxxPaddleApiImpl::GetInputNames() { return raw_predictor_.GetInputNames(); } const std::vector &CxxPaddleApiImpl::GetOutputNames() { return raw_predictor_.GetOutputNames(); } void CxxPaddleApiImpl::Run() { raw_predictor_.Run(); } std::string CxxPaddleApiImpl::GetVersion() const { return version(); } std::unique_ptr CxxPaddleApiImpl::GetTensor( const std::string &name) const { auto *x = raw_predictor_.GetTensor(name); return std::unique_ptr(new lite_api::Tensor(x)); } std::unique_ptr CxxPaddleApiImpl::GetInputByName( const std::string &name) { return std::unique_ptr( new lite_api::Tensor(raw_predictor_.GetInputByName(name))); } void CxxPaddleApiImpl::SaveOptimizedModel(const std::string &model_dir, lite_api::LiteModelType model_type) { raw_predictor_.SaveModel(model_dir, model_type); } } // namespace lite namespace lite_api { template <> std::shared_ptr CreatePaddlePredictor( const CxxConfig &config) { auto x = std::make_shared(); x->Init(config); return x; } } // namespace lite_api } // namespace paddle