diff --git a/paddle/fluid/inference/api/analysis_predictor.cc b/paddle/fluid/inference/api/analysis_predictor.cc index 6091c5a62594f939515fbd47325adf24280ad96d..f70c12dc87bb1238708b5e03f8af8fbf68673fae 100644 --- a/paddle/fluid/inference/api/analysis_predictor.cc +++ b/paddle/fluid/inference/api/analysis_predictor.cc @@ -304,7 +304,6 @@ bool AnalysisPredictor::GetFetch(std::vector *outputs, // NOTE All the members in AnalysisConfig should be copied to Argument. void AnalysisPredictor::OptimizeInferenceProgram() { - LOG(INFO) << "optimization program"; status_program_optimized_ = true; argument_.SetUseGPU(config_.use_gpu); @@ -313,11 +312,13 @@ void AnalysisPredictor::OptimizeInferenceProgram() { // Analyze inference_program if (!config_.model_dir.empty()) { argument_.SetModelDir(config_.model_dir); - } else if (!config_.param_file.empty() && !config_.prog_file.empty()) { + } else { + PADDLE_ENFORCE( + !config_.param_file.empty(), + "Either model_dir or (param_file, prog_file) should be set."); + PADDLE_ENFORCE(!config_.prog_file.empty()); argument_.SetModelProgramPath(config_.prog_file); argument_.SetModelParamsPath(config_.param_file); - } else { - PADDLE_THROW("Either model_dir or (param_file, prog_file) should be set."); } if (config_.use_gpu && config_.use_tensorrt_) { diff --git a/paddle/fluid/inference/io.cc b/paddle/fluid/inference/io.cc index 053c516f882cf0679611deca98000e1c4742daa2..060d6a89d465844e9a04e567968b5f40a48bb625 100644 --- a/paddle/fluid/inference/io.cc +++ b/paddle/fluid/inference/io.cc @@ -21,7 +21,6 @@ limitations under the License. */ #include "paddle/fluid/framework/feed_fetch_type.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/version.h" -#include "paddle/fluid/operators/impl/load_combine.h" #include "paddle/fluid/platform/cpu_helper.h" #include "paddle/fluid/pybind/pybind.h" diff --git a/paddle/fluid/operators/impl/CMakeLists.txt b/paddle/fluid/operators/impl/CMakeLists.txt deleted file mode 100644 index 1c0ff7c3d9de8608f67f45433a34b12297992277..0000000000000000000000000000000000000000 --- a/paddle/fluid/operators/impl/CMakeLists.txt +++ /dev/null @@ -1 +0,0 @@ -cc_library(load_combine_impl SRCS load_combine.cc DEPS scope lod_tensor device_context op_registry data_type_transform) diff --git a/paddle/fluid/operators/impl/load_combine.cc b/paddle/fluid/operators/impl/load_combine.cc deleted file mode 100644 index 454d583a102021a39ef5b66059f4a8fe50eb4d28..0000000000000000000000000000000000000000 --- a/paddle/fluid/operators/impl/load_combine.cc +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "paddle/fluid/operators/impl/load_combine.h" - -namespace paddle { -namespace operators { -namespace impl { - -void LoadParamsFromStream(const std::vector &out_var_names, - const paddle::platform::Place &place, - bool load_as_fp16, std::istream *buffer, - const paddle::framework::Scope *scope) { - auto *dev_ctx = platform::DeviceContextPool::Instance().Get(place); - for (size_t i = 0; i < out_var_names.size(); i++) { - auto *out_var = scope->FindVar(out_var_names[i]); - - PADDLE_ENFORCE(out_var != nullptr, "Output variable %s cannot be found", - out_var_names[i]); - - auto *tensor = out_var->GetMutable(); - - // Get data from fin to tensor - DeserializeFromStream(*buffer, tensor, *dev_ctx); - - auto in_dtype = framework::ToDataType(tensor->type()); - auto out_dtype = load_as_fp16 ? framework::proto::VarType::FP16 : in_dtype; - - if (in_dtype != out_dtype) { - // convert to float16 tensor - auto in_kernel_type = framework::OpKernelType(in_dtype, place); - auto out_kernel_type = framework::OpKernelType(out_dtype, place); - framework::LoDTensor fp16_tensor; - // copy LoD info to the new tensor - fp16_tensor.set_lod(tensor->lod()); - framework::TransDataType(in_kernel_type, out_kernel_type, *tensor, - &fp16_tensor); - - // reset output tensor - out_var->Clear(); - tensor = out_var->GetMutable(); - tensor->set_lod(fp16_tensor.lod()); - tensor->ShareDataWith(fp16_tensor); - } - } -} - -} // namespace impl -} // namespace operators -} // namespace paddle diff --git a/paddle/fluid/operators/impl/load_combine.h b/paddle/fluid/operators/impl/load_combine.h deleted file mode 100644 index 53ffcaf43a9d9753af72412af858f0a6024758c3..0000000000000000000000000000000000000000 --- a/paddle/fluid/operators/impl/load_combine.h +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#pragma once -#include -#include -#include "paddle/fluid/framework/data_type_transform.h" -#include "paddle/fluid/framework/op_registry.h" -#include "paddle/fluid/platform/device_context.h" - -namespace paddle { -namespace operators { -namespace impl { - -// Load parameters from a single stream. -void LoadParamsFromStream(const std::vector &out_var_names, - const platform::Place &place, bool load_as_fp16, - std::istream *buffer, const framework::Scope *scope); - -} // namespace impl -} // namespace operators -} // namespace paddle