提交 359c4e69 编写于 作者: W wuzewu

Fix incorrect references

上级 3f787a82
...@@ -50,10 +50,10 @@ class NLPBaseModule(RunModule): ...@@ -50,10 +50,10 @@ class NLPBaseModule(RunModule):
class NLPPredictionModule(NLPBaseModule): class NLPPredictionModule(NLPBaseModule):
def _set_config(self): def _set_config(self):
'''predictor config setting''' '''predictor config setting'''
cpu_config = paddle.device.core.AnalysisConfig(self.pretrained_model_path) cpu_config = paddle.fluid.core.AnalysisConfig(self.pretrained_model_path)
cpu_config.disable_glog_info() cpu_config.disable_glog_info()
cpu_config.disable_gpu() cpu_config.disable_gpu()
self.cpu_predictor = paddle.device.core.create_paddle_predictor(cpu_config) self.cpu_predictor = paddle.fluid.core.create_paddle_predictor(cpu_config)
try: try:
_places = os.environ['CUDA_VISIBLE_DEVICES'] _places = os.environ['CUDA_VISIBLE_DEVICES']
...@@ -62,10 +62,10 @@ class NLPPredictionModule(NLPBaseModule): ...@@ -62,10 +62,10 @@ class NLPPredictionModule(NLPBaseModule):
except: except:
use_gpu = False use_gpu = False
if use_gpu: if use_gpu:
gpu_config = paddle.device.core.AnalysisConfig(self.pretrained_model_path) gpu_config = paddle.fluid.core.AnalysisConfig(self.pretrained_model_path)
gpu_config.disable_glog_info() gpu_config.disable_glog_info()
gpu_config.enable_use_gpu(memory_pool_init_size_mb=500, device_id=0) gpu_config.enable_use_gpu(memory_pool_init_size_mb=500, device_id=0)
self.gpu_predictor = paddle.device.core.create_paddle_predictor(gpu_config) self.gpu_predictor = paddle.fluid.core.create_paddle_predictor(gpu_config)
def texts2tensor(self, texts: List[dict]) -> paddle.Tensor: def texts2tensor(self, texts: List[dict]) -> paddle.Tensor:
''' '''
...@@ -81,7 +81,7 @@ class NLPPredictionModule(NLPBaseModule): ...@@ -81,7 +81,7 @@ class NLPPredictionModule(NLPBaseModule):
for i, text in enumerate(texts): for i, text in enumerate(texts):
data += text['processed'] data += text['processed']
lod.append(len(text['processed']) + lod[i]) lod.append(len(text['processed']) + lod[i])
tensor = paddle.device.core.PaddleTensor(np.array(data).astype('int64')) tensor = paddle.fluid.core.PaddleTensor(np.array(data).astype('int64'))
tensor.name = 'words' tensor.name = 'words'
tensor.lod = [lod] tensor.lod = [lod]
tensor.shape = [lod[-1], 1] tensor.shape = [lod[-1], 1]
......
...@@ -21,20 +21,20 @@ import paddle ...@@ -21,20 +21,20 @@ import paddle
from paddlehub.utils.utils import Version from paddlehub.utils.utils import Version
dtype_map = { dtype_map = {
paddle.device.core.VarDesc.VarType.FP32: "float32", paddle.fluid.core.VarDesc.VarType.FP32: "float32",
paddle.device.core.VarDesc.VarType.FP64: "float64", paddle.fluid.core.VarDesc.VarType.FP64: "float64",
paddle.device.core.VarDesc.VarType.FP16: "float16", paddle.fluid.core.VarDesc.VarType.FP16: "float16",
paddle.device.core.VarDesc.VarType.INT32: "int32", paddle.fluid.core.VarDesc.VarType.INT32: "int32",
paddle.device.core.VarDesc.VarType.INT16: "int16", paddle.fluid.core.VarDesc.VarType.INT16: "int16",
paddle.device.core.VarDesc.VarType.INT64: "int64", paddle.fluid.core.VarDesc.VarType.INT64: "int64",
paddle.device.core.VarDesc.VarType.BOOL: "bool", paddle.fluid.core.VarDesc.VarType.BOOL: "bool",
paddle.device.core.VarDesc.VarType.INT16: "int16", paddle.fluid.core.VarDesc.VarType.INT16: "int16",
paddle.device.core.VarDesc.VarType.UINT8: "uint8", paddle.fluid.core.VarDesc.VarType.UINT8: "uint8",
paddle.device.core.VarDesc.VarType.INT8: "int8", paddle.fluid.core.VarDesc.VarType.INT8: "int8",
} }
def convert_dtype_to_string(dtype: str) -> paddle.device.core.VarDesc.VarType: def convert_dtype_to_string(dtype: str) -> paddle.fluid.core.VarDesc.VarType:
if dtype in dtype_map: if dtype in dtype_map:
return dtype_map[dtype] return dtype_map[dtype]
raise TypeError("dtype shoule in %s" % list(dtype_map.keys())) raise TypeError("dtype shoule in %s" % list(dtype_map.keys()))
......
...@@ -621,7 +621,7 @@ class BaseTask(object): ...@@ -621,7 +621,7 @@ class BaseTask(object):
self._eval_end_event(run_states) self._eval_end_event(run_states)
return run_states return run_states
def _create_predictor(self) -> paddle.device.core.PaddlePredictor: def _create_predictor(self) -> paddle.fluid.core.PaddlePredictor:
''' '''
create high-performance predictor for predict. create high-performance predictor for predict.
Returns: Returns:
...@@ -629,7 +629,7 @@ class BaseTask(object): ...@@ -629,7 +629,7 @@ class BaseTask(object):
''' '''
with generate_tempdir() as _dir: with generate_tempdir() as _dir:
self.save_inference_model(dirname=_dir) self.save_inference_model(dirname=_dir)
predictor_config = paddle.device.core.AnalysisConfig(_dir) predictor_config = paddle.fluid.core.AnalysisConfig(_dir)
predictor_config.disable_glog_info() predictor_config.disable_glog_info()
if self.config.use_cuda: if self.config.use_cuda:
...@@ -638,7 +638,7 @@ class BaseTask(object): ...@@ -638,7 +638,7 @@ class BaseTask(object):
else: else:
predictor_config.disable_gpu() predictor_config.disable_gpu()
predictor_config.enable_memory_optim() predictor_config.enable_memory_optim()
return paddle.device.core.create_paddle_predictor(predictor_config) return paddle.fluid.core.create_paddle_predictor(predictor_config)
def _run_with_predictor(self) -> List[RunState]: def _run_with_predictor(self) -> List[RunState]:
''' '''
...@@ -671,7 +671,7 @@ class BaseTask(object): ...@@ -671,7 +671,7 @@ class BaseTask(object):
tensor_batch = [[] for i in range(len(self.feed_list))] tensor_batch = [[] for i in range(len(self.feed_list))]
for i in range(len(processed_batch)): for i in range(len(processed_batch)):
processed_batch[i] = np.array(processed_batch[i]).reshape(feed_var_shape[i]).astype(feed_var_type[i]) processed_batch[i] = np.array(processed_batch[i]).reshape(feed_var_shape[i]).astype(feed_var_type[i])
tensor_batch[i] = paddle.device.core.PaddleTensor(processed_batch[i]) tensor_batch[i] = paddle.fluid.core.PaddleTensor(processed_batch[i])
fetch_result = self._predictor.run(tensor_batch) fetch_result = self._predictor.run(tensor_batch)
for index, result in enumerate(fetch_result): for index, result in enumerate(fetch_result):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册