提交 a6aa9569 编写于 作者: C chenguowei01

Merge branch 'develop' of https://github.com/PaddlePaddle/PaddleSeg into develop

...@@ -195,8 +195,8 @@ endif(NOT WIN32) ...@@ -195,8 +195,8 @@ endif(NOT WIN32)
if(WITH_GPU) if(WITH_GPU)
if(NOT WIN32) if(NOT WIN32)
if (USE_TENSORRT) if (USE_TENSORRT)
set(DEPS ${DEPS} ${PADDLE_DIR}/third_party/install/tensorrt/lib/libnvinfer${CMAKE_STATIC_LIBRARY_SUFFIX}) set(DEPS ${DEPS} ${PADDLE_DIR}/third_party/install/tensorrt/lib/libnvinfer${CMAKE_SHARED_LIBRARY_SUFFIX})
set(DEPS ${DEPS} ${PADDLE_DIR}/third_party/install/tensorrt/lib/libnvinfer_plugin${CMAKE_STATIC_LIBRARY_SUFFIX}) set(DEPS ${DEPS} ${PADDLE_DIR}/third_party/install/tensorrt/lib/libnvinfer_plugin${CMAKE_SHARED_LIBRARY_SUFFIX})
endif() endif()
set(DEPS ${DEPS} ${CUDA_LIB}/libcudart${CMAKE_SHARED_LIBRARY_SUFFIX}) set(DEPS ${DEPS} ${CUDA_LIB}/libcudart${CMAKE_SHARED_LIBRARY_SUFFIX})
set(DEPS ${DEPS} ${CUDNN_LIB}/libcudnn${CMAKE_SHARED_LIBRARY_SUFFIX}) set(DEPS ${DEPS} ${CUDNN_LIB}/libcudnn${CMAKE_SHARED_LIBRARY_SUFFIX})
......
...@@ -36,7 +36,6 @@ namespace PaddleSolution { ...@@ -36,7 +36,6 @@ namespace PaddleSolution {
const auto& model_dir = _model_config._model_path; const auto& model_dir = _model_config._model_path;
const auto& model_filename = _model_config._model_file_name; const auto& model_filename = _model_config._model_file_name;
const auto& params_filename = _model_config._param_file_name; const auto& params_filename = _model_config._param_file_name;
// load paddle model file // load paddle model file
if (_model_config._predictor_mode == "NATIVE") { if (_model_config._predictor_mode == "NATIVE") {
paddle::NativeConfig config; paddle::NativeConfig config;
...@@ -52,6 +51,12 @@ namespace PaddleSolution { ...@@ -52,6 +51,12 @@ namespace PaddleSolution {
paddle::AnalysisConfig config; paddle::AnalysisConfig config;
if (use_gpu) { if (use_gpu) {
config.EnableUseGpu(100, 0); config.EnableUseGpu(100, 0);
if (TRT_MAP.find(_model_config._trt_mode) != TRT_MAP.end()) {
auto precision = TRT_MAP[_model_config._trt_mode];
bool use_cab = (precision == paddle::AnalysisConfig::Precision::kInt8);
config.EnableTensorRtEngine(1 << 30, _model_config._batch_size, 40,
precision, false, use_cab);
}
} }
auto prog_file = utils::path_join(model_dir, model_filename); auto prog_file = utils::path_join(model_dir, model_filename);
auto param_file = utils::path_join(model_dir, params_filename); auto param_file = utils::path_join(model_dir, params_filename);
......
...@@ -55,5 +55,10 @@ class Predictor { ...@@ -55,5 +55,10 @@ class Predictor {
PaddleSolution::PaddleSegModelConfigPaser _model_config; PaddleSolution::PaddleSegModelConfigPaser _model_config;
std::shared_ptr<PaddleSolution::ImagePreProcessor> _preprocessor; std::shared_ptr<PaddleSolution::ImagePreProcessor> _preprocessor;
std::unique_ptr<paddle::PaddlePredictor> _main_predictor; std::unique_ptr<paddle::PaddlePredictor> _main_predictor;
std::map<std::string, paddle::AnalysisConfig::Precision> TRT_MAP = {
{"FP32", paddle::AnalysisConfig::Precision::kFloat32},
{"FP16", paddle::AnalysisConfig::Precision::kHalf},
{"INT8", paddle::AnalysisConfig::Precision::kInt8}
};
}; };
} // namespace PaddleSolution } // namespace PaddleSolution
...@@ -46,6 +46,7 @@ class PaddleSegModelConfigPaser { ...@@ -46,6 +46,7 @@ class PaddleSegModelConfigPaser {
_model_file_name.clear(); _model_file_name.clear();
_model_path.clear(); _model_path.clear();
_param_file_name.clear(); _param_file_name.clear();
_trt_mode.clear();
} }
std::string process_parenthesis(const std::string& str) { std::string process_parenthesis(const std::string& str) {
...@@ -180,6 +181,12 @@ class PaddleSegModelConfigPaser { ...@@ -180,6 +181,12 @@ class PaddleSegModelConfigPaser {
} else { } else {
_use_pr = 0; _use_pr = 0;
} }
// 16. trt_mode
if (config["DEPLOY"]["TRT_MODE"].IsDefined()) {
_trt_mode = config["DEPLOY"]["TRT_MODE"].as<std::string>();
} else {
_trt_mode = "";
}
return true; return true;
} }
...@@ -246,8 +253,10 @@ class PaddleSegModelConfigPaser { ...@@ -246,8 +253,10 @@ class PaddleSegModelConfigPaser {
std::string _predictor_mode; std::string _predictor_mode;
// DEPLOY.BATCH_SIZE // DEPLOY.BATCH_SIZE
int _batch_size; int _batch_size;
// USE_PR: OP Optimized model // DEPLOY.USE_PR: OP Optimized model
int _use_pr; int _use_pr;
// DEPLOY.TRT_MODE: TRT Precesion
std::string _trt_mode;
}; };
} // namespace PaddleSolution } // namespace PaddleSolution
...@@ -13,9 +13,7 @@ ...@@ -13,9 +13,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import sys
import struct import struct
import importlib
import paddle.fluid as fluid import paddle.fluid as fluid
import numpy as np import numpy as np
...@@ -26,6 +24,7 @@ from utils.config import cfg ...@@ -26,6 +24,7 @@ from utils.config import cfg
from loss import multi_softmax_with_loss from loss import multi_softmax_with_loss
from loss import multi_dice_loss from loss import multi_dice_loss
from loss import multi_bce_loss from loss import multi_bce_loss
from models.modeling import deeplab, unet, icnet, pspnet, hrnet
class ModelPhase(object): class ModelPhase(object):
...@@ -70,40 +69,23 @@ class ModelPhase(object): ...@@ -70,40 +69,23 @@ class ModelPhase(object):
return False return False
def map_model_name(model_name): def seg_model(image, class_num):
name_dict = { model_name = cfg.MODEL.MODEL_NAME
"unet": "unet.unet", if model_name == 'unet':
"deeplabv3p": "deeplab.deeplabv3p", logits = unet.unet(image, class_num)
"icnet": "icnet.icnet", elif model_name == 'deeplabv3p':
"pspnet": "pspnet.pspnet", logits = deeplab.deeplabv3p(image, class_num)
"hrnet": "hrnet.hrnet" elif model_name == 'icnet':
} logits = icnet.icnet(image, class_num)
if model_name in name_dict.keys(): elif model_name == 'pspnet':
return name_dict[model_name] logits = pspnet.pspnet(image, class_num)
elif model_name == 'hrnet':
logits = hrnet.hrnet(image, class_num)
else: else:
raise Exception( raise Exception(
"unknow model name, only support unet, deeplabv3p, icnet") "unknow model name, only support unet, deeplabv3p, icnet, pspnet, hrnet"
)
return logits
def get_func(func_name):
"""Helper to return a function object by name. func_name must identify a
function in this module or the path to a function relative to the base
'modeling' module.
"""
if func_name == '':
return None
try:
parts = func_name.split('.')
# Refers to a function in this module
if len(parts) == 1:
return globals()[parts[0]]
# Otherwise, assume we're referencing a module under modeling
module_name = 'models.' + '.'.join(parts[:-1])
module = importlib.import_module(module_name)
return getattr(module, parts[-1])
except Exception:
print('Failed to find function: {}'.format(func_name))
return module
def softmax(logit): def softmax(logit):
...@@ -124,6 +106,7 @@ def sigmoid_to_softmax(logit): ...@@ -124,6 +106,7 @@ def sigmoid_to_softmax(logit):
logit = fluid.layers.transpose(logit, [0, 3, 1, 2]) logit = fluid.layers.transpose(logit, [0, 3, 1, 2])
return logit return logit
def export_preprocess(image): def export_preprocess(image):
"""导出模型的预处理流程""" """导出模型的预处理流程"""
...@@ -135,10 +118,7 @@ def export_preprocess(image): ...@@ -135,10 +118,7 @@ def export_preprocess(image):
h_fix = cfg.AUG.FIX_RESIZE_SIZE[1] h_fix = cfg.AUG.FIX_RESIZE_SIZE[1]
w_fix = cfg.AUG.FIX_RESIZE_SIZE[0] w_fix = cfg.AUG.FIX_RESIZE_SIZE[0]
image = fluid.layers.resize_bilinear( image = fluid.layers.resize_bilinear(
image, image, out_shape=[h_fix, w_fix], align_corners=False, align_mode=0)
out_shape=[h_fix, w_fix],
align_corners=False,
align_mode=0)
elif cfg.AUG.AUG_METHOD == 'rangescaling': elif cfg.AUG.AUG_METHOD == 'rangescaling':
size = cfg.AUG.INF_RESIZE_VALUE size = cfg.AUG.INF_RESIZE_VALUE
value = fluid.layers.reduce_max(origin_shape) value = fluid.layers.reduce_max(origin_shape)
...@@ -160,8 +140,7 @@ def export_preprocess(image): ...@@ -160,8 +140,7 @@ def export_preprocess(image):
right = pad_target[1] - valid_shape[1] right = pad_target[1] - valid_shape[1]
paddings = fluid.layers.concat([up, down, left, right]) paddings = fluid.layers.concat([up, down, left, right])
paddings = fluid.layers.cast(paddings, 'int32') paddings = fluid.layers.cast(paddings, 'int32')
image = fluid.layers.pad2d( image = fluid.layers.pad2d(image, paddings=paddings, pad_value=127.5)
image, paddings=paddings, pad_value=127.5)
# normalize # normalize
mean = np.array(cfg.MEAN).reshape(1, len(cfg.MEAN), 1, 1) mean = np.array(cfg.MEAN).reshape(1, len(cfg.MEAN), 1, 1)
...@@ -199,7 +178,8 @@ def build_model(main_prog, start_prog, phase=ModelPhase.TRAIN): ...@@ -199,7 +178,8 @@ def build_model(main_prog, start_prog, phase=ModelPhase.TRAIN):
shape=[-1, -1, -1, cfg.DATASET.DATA_DIM], shape=[-1, -1, -1, cfg.DATASET.DATA_DIM],
dtype='float32', dtype='float32',
append_batch_size=False) append_batch_size=False)
image, valid_shape, origin_shape = export_preprocess(origin_image) image, valid_shape, origin_shape = export_preprocess(
origin_image)
else: else:
image = fluid.layers.data( image = fluid.layers.data(
...@@ -217,9 +197,6 @@ def build_model(main_prog, start_prog, phase=ModelPhase.TRAIN): ...@@ -217,9 +197,6 @@ def build_model(main_prog, start_prog, phase=ModelPhase.TRAIN):
iterable=False, iterable=False,
use_double_buffer=True) use_double_buffer=True)
model_name = map_model_name(cfg.MODEL.MODEL_NAME)
model_func = get_func("modeling." + model_name)
loss_type = cfg.SOLVER.LOSS loss_type = cfg.SOLVER.LOSS
if not isinstance(loss_type, list): if not isinstance(loss_type, list):
loss_type = list(loss_type) loss_type = list(loss_type)
...@@ -238,7 +215,7 @@ def build_model(main_prog, start_prog, phase=ModelPhase.TRAIN): ...@@ -238,7 +215,7 @@ def build_model(main_prog, start_prog, phase=ModelPhase.TRAIN):
raise Exception( raise Exception(
"softmax loss can not combine with dice loss or bce loss" "softmax loss can not combine with dice loss or bce loss"
) )
logits = model_func(image, class_num) logits = seg_model(image, class_num)
# 根据选择的loss函数计算相应的损失函数 # 根据选择的loss函数计算相应的损失函数
if ModelPhase.is_train(phase) or ModelPhase.is_eval(phase): if ModelPhase.is_train(phase) or ModelPhase.is_eval(phase):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册