// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "lite/api/light_api.h" #include #include "lite/api/paddle_api.h" #include "lite/core/version.h" #include "lite/model_parser/model_parser.h" #ifndef LITE_ON_TINY_PUBLISH #include "lite/api/paddle_use_kernels.h" #include "lite/api/paddle_use_ops.h" #endif namespace paddle { namespace lite { void LightPredictorImpl::Init(const lite_api::MobileConfig& config) { // LightPredictor Only support NaiveBuffer backend in publish lib if (config.lite_model_file().empty()) { raw_predictor_.reset( new LightPredictor(config.model_dir(), config.model_buffer(), config.param_buffer(), config.model_from_memory(), lite_api::LiteModelType::kNaiveBuffer)); } else { raw_predictor_.reset(new LightPredictor(config.lite_model_file(), config.model_from_memory())); } mode_ = config.power_mode(); threads_ = config.threads(); #ifdef LITE_WITH_NPU // Store the model-level configuration into scope for kernels, and use // exe_scope to store the execution-level configuration Context::SetSubgraphModelCacheDir( raw_predictor_->scope(), config.subgraph_model_cache_dir()); #endif #ifdef LITE_WITH_HUAWEI_ASCEND_NPU Context::SetHuaweiAscendDeviceID( config.get_device_id()); Context::SetSubgraphModelCacheDir( config.subgraph_model_cache_dir()); #endif } std::unique_ptr LightPredictorImpl::GetInput(int i) { return std::unique_ptr( new lite_api::Tensor(raw_predictor_->GetInput(i))); } std::unique_ptr LightPredictorImpl::GetOutput( int i) const { return std::unique_ptr( new lite_api::Tensor(raw_predictor_->GetOutput(i))); } void LightPredictorImpl::Run() { #ifdef LITE_WITH_ARM lite::DeviceInfo::Global().SetRunMode(mode_, threads_); #endif raw_predictor_->Run(); } std::shared_ptr LightPredictorImpl::Clone() { LOG(FATAL) << "The Clone API is not supported in LigthPredictor"; return nullptr; } std::shared_ptr LightPredictorImpl::Clone( const std::vector& var_names) { LOG(FATAL) << "The Clone API is not supported in LigthPredictor"; return nullptr; } std::string LightPredictorImpl::GetVersion() const { return lite::version(); } std::unique_ptr LightPredictorImpl::GetTensor( const std::string& name) const { return std::unique_ptr( new lite_api::Tensor(raw_predictor_->GetTensor(name))); } std::unique_ptr LightPredictorImpl::GetInputByName( const std::string& name) { return std::unique_ptr( new lite_api::Tensor(raw_predictor_->GetInputByName(name))); } std::vector LightPredictorImpl::GetInputNames() { return raw_predictor_->GetInputNames(); } std::vector LightPredictorImpl::GetOutputNames() { return raw_predictor_->GetOutputNames(); } } // namespace lite namespace lite_api { template <> std::shared_ptr CreatePaddlePredictor( const MobileConfig& config) { auto x = std::make_shared(); x->Init(config); return x; } } // namespace lite_api } // namespace paddle