提交 a6aa9569 编写于 作者: C chenguowei01

Merge branch 'develop' of https://github.com/PaddlePaddle/PaddleSeg into develop

......@@ -195,8 +195,8 @@ endif(NOT WIN32)
if(WITH_GPU)
if(NOT WIN32)
if (USE_TENSORRT)
set(DEPS ${DEPS} ${PADDLE_DIR}/third_party/install/tensorrt/lib/libnvinfer${CMAKE_STATIC_LIBRARY_SUFFIX})
set(DEPS ${DEPS} ${PADDLE_DIR}/third_party/install/tensorrt/lib/libnvinfer_plugin${CMAKE_STATIC_LIBRARY_SUFFIX})
set(DEPS ${DEPS} ${PADDLE_DIR}/third_party/install/tensorrt/lib/libnvinfer${CMAKE_SHARED_LIBRARY_SUFFIX})
set(DEPS ${DEPS} ${PADDLE_DIR}/third_party/install/tensorrt/lib/libnvinfer_plugin${CMAKE_SHARED_LIBRARY_SUFFIX})
endif()
set(DEPS ${DEPS} ${CUDA_LIB}/libcudart${CMAKE_SHARED_LIBRARY_SUFFIX})
set(DEPS ${DEPS} ${CUDNN_LIB}/libcudnn${CMAKE_SHARED_LIBRARY_SUFFIX})
......
......@@ -36,7 +36,6 @@ namespace PaddleSolution {
const auto& model_dir = _model_config._model_path;
const auto& model_filename = _model_config._model_file_name;
const auto& params_filename = _model_config._param_file_name;
// load paddle model file
if (_model_config._predictor_mode == "NATIVE") {
paddle::NativeConfig config;
......@@ -52,6 +51,12 @@ namespace PaddleSolution {
paddle::AnalysisConfig config;
if (use_gpu) {
config.EnableUseGpu(100, 0);
if (TRT_MAP.find(_model_config._trt_mode) != TRT_MAP.end()) {
auto precision = TRT_MAP[_model_config._trt_mode];
bool use_cab = (precision == paddle::AnalysisConfig::Precision::kInt8);
config.EnableTensorRtEngine(1 << 30, _model_config._batch_size, 40,
precision, false, use_cab);
}
}
auto prog_file = utils::path_join(model_dir, model_filename);
auto param_file = utils::path_join(model_dir, params_filename);
......
......@@ -55,5 +55,10 @@ class Predictor {
PaddleSolution::PaddleSegModelConfigPaser _model_config;
std::shared_ptr<PaddleSolution::ImagePreProcessor> _preprocessor;
std::unique_ptr<paddle::PaddlePredictor> _main_predictor;
std::map<std::string, paddle::AnalysisConfig::Precision> TRT_MAP = {
{"FP32", paddle::AnalysisConfig::Precision::kFloat32},
{"FP16", paddle::AnalysisConfig::Precision::kHalf},
{"INT8", paddle::AnalysisConfig::Precision::kInt8}
};
};
} // namespace PaddleSolution
......@@ -46,6 +46,7 @@ class PaddleSegModelConfigPaser {
_model_file_name.clear();
_model_path.clear();
_param_file_name.clear();
_trt_mode.clear();
}
std::string process_parenthesis(const std::string& str) {
......@@ -180,6 +181,12 @@ class PaddleSegModelConfigPaser {
} else {
_use_pr = 0;
}
// 16. trt_mode
if (config["DEPLOY"]["TRT_MODE"].IsDefined()) {
_trt_mode = config["DEPLOY"]["TRT_MODE"].as<std::string>();
} else {
_trt_mode = "";
}
return true;
}
......@@ -246,8 +253,10 @@ class PaddleSegModelConfigPaser {
std::string _predictor_mode;
// DEPLOY.BATCH_SIZE
int _batch_size;
// USE_PR: OP Optimized model
// DEPLOY.USE_PR: OP Optimized model
int _use_pr;
// DEPLOY.TRT_MODE: TRT Precesion
std::string _trt_mode;
};
} // namespace PaddleSolution
......@@ -13,9 +13,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import struct
import importlib
import paddle.fluid as fluid
import numpy as np
......@@ -26,6 +24,7 @@ from utils.config import cfg
from loss import multi_softmax_with_loss
from loss import multi_dice_loss
from loss import multi_bce_loss
from models.modeling import deeplab, unet, icnet, pspnet, hrnet
class ModelPhase(object):
......@@ -70,40 +69,23 @@ class ModelPhase(object):
return False
def map_model_name(model_name):
name_dict = {
"unet": "unet.unet",
"deeplabv3p": "deeplab.deeplabv3p",
"icnet": "icnet.icnet",
"pspnet": "pspnet.pspnet",
"hrnet": "hrnet.hrnet"
}
if model_name in name_dict.keys():
return name_dict[model_name]
def seg_model(image, class_num):
model_name = cfg.MODEL.MODEL_NAME
if model_name == 'unet':
logits = unet.unet(image, class_num)
elif model_name == 'deeplabv3p':
logits = deeplab.deeplabv3p(image, class_num)
elif model_name == 'icnet':
logits = icnet.icnet(image, class_num)
elif model_name == 'pspnet':
logits = pspnet.pspnet(image, class_num)
elif model_name == 'hrnet':
logits = hrnet.hrnet(image, class_num)
else:
raise Exception(
"unknow model name, only support unet, deeplabv3p, icnet")
def get_func(func_name):
"""Helper to return a function object by name. func_name must identify a
function in this module or the path to a function relative to the base
'modeling' module.
"""
if func_name == '':
return None
try:
parts = func_name.split('.')
# Refers to a function in this module
if len(parts) == 1:
return globals()[parts[0]]
# Otherwise, assume we're referencing a module under modeling
module_name = 'models.' + '.'.join(parts[:-1])
module = importlib.import_module(module_name)
return getattr(module, parts[-1])
except Exception:
print('Failed to find function: {}'.format(func_name))
return module
"unknow model name, only support unet, deeplabv3p, icnet, pspnet, hrnet"
)
return logits
def softmax(logit):
......@@ -124,6 +106,7 @@ def sigmoid_to_softmax(logit):
logit = fluid.layers.transpose(logit, [0, 3, 1, 2])
return logit
def export_preprocess(image):
"""导出模型的预处理流程"""
......@@ -135,10 +118,7 @@ def export_preprocess(image):
h_fix = cfg.AUG.FIX_RESIZE_SIZE[1]
w_fix = cfg.AUG.FIX_RESIZE_SIZE[0]
image = fluid.layers.resize_bilinear(
image,
out_shape=[h_fix, w_fix],
align_corners=False,
align_mode=0)
image, out_shape=[h_fix, w_fix], align_corners=False, align_mode=0)
elif cfg.AUG.AUG_METHOD == 'rangescaling':
size = cfg.AUG.INF_RESIZE_VALUE
value = fluid.layers.reduce_max(origin_shape)
......@@ -160,8 +140,7 @@ def export_preprocess(image):
right = pad_target[1] - valid_shape[1]
paddings = fluid.layers.concat([up, down, left, right])
paddings = fluid.layers.cast(paddings, 'int32')
image = fluid.layers.pad2d(
image, paddings=paddings, pad_value=127.5)
image = fluid.layers.pad2d(image, paddings=paddings, pad_value=127.5)
# normalize
mean = np.array(cfg.MEAN).reshape(1, len(cfg.MEAN), 1, 1)
......@@ -199,7 +178,8 @@ def build_model(main_prog, start_prog, phase=ModelPhase.TRAIN):
shape=[-1, -1, -1, cfg.DATASET.DATA_DIM],
dtype='float32',
append_batch_size=False)
image, valid_shape, origin_shape = export_preprocess(origin_image)
image, valid_shape, origin_shape = export_preprocess(
origin_image)
else:
image = fluid.layers.data(
......@@ -217,9 +197,6 @@ def build_model(main_prog, start_prog, phase=ModelPhase.TRAIN):
iterable=False,
use_double_buffer=True)
model_name = map_model_name(cfg.MODEL.MODEL_NAME)
model_func = get_func("modeling." + model_name)
loss_type = cfg.SOLVER.LOSS
if not isinstance(loss_type, list):
loss_type = list(loss_type)
......@@ -238,7 +215,7 @@ def build_model(main_prog, start_prog, phase=ModelPhase.TRAIN):
raise Exception(
"softmax loss can not combine with dice loss or bce loss"
)
logits = model_func(image, class_num)
logits = seg_model(image, class_num)
# 根据选择的loss函数计算相应的损失函数
if ModelPhase.is_train(phase) or ModelPhase.is_eval(phase):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册