Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
dbf07982
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
dbf07982
编写于
6年前
作者:
T
Tao Luo
提交者:
GitHub
6年前
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #13498 from luotao1/for_test
use clone(for_test=True) replace get_inference_program
上级
fc44087d
618944a2
变更
4
隐藏空白更改
内联
并排
Showing
4 changed file
with
10 addition
and
32 deletion
+10
-32
paddle/fluid/API.spec
paddle/fluid/API.spec
+0
-1
python/paddle/fluid/__init__.py
python/paddle/fluid/__init__.py
+1
-1
python/paddle/fluid/io.py
python/paddle/fluid/io.py
+1
-19
python/paddle/fluid/tests/unittests/dist_transformer.py
python/paddle/fluid/tests/unittests/dist_transformer.py
+8
-11
未找到文件。
paddle/fluid/API.spec
浏览文件 @
dbf07982
...
@@ -73,7 +73,6 @@ paddle.fluid.io.load_params ArgSpec(args=['executor', 'dirname', 'main_program',
...
@@ -73,7 +73,6 @@ paddle.fluid.io.load_params ArgSpec(args=['executor', 'dirname', 'main_program',
paddle.fluid.io.load_persistables ArgSpec(args=['executor', 'dirname', 'main_program', 'filename'], varargs=None, keywords=None, defaults=(None, None))
paddle.fluid.io.load_persistables ArgSpec(args=['executor', 'dirname', 'main_program', 'filename'], varargs=None, keywords=None, defaults=(None, None))
paddle.fluid.io.save_inference_model ArgSpec(args=['dirname', 'feeded_var_names', 'target_vars', 'executor', 'main_program', 'model_filename', 'params_filename', 'export_for_deployment'], varargs=None, keywords=None, defaults=(None, None, None, True))
paddle.fluid.io.save_inference_model ArgSpec(args=['dirname', 'feeded_var_names', 'target_vars', 'executor', 'main_program', 'model_filename', 'params_filename', 'export_for_deployment'], varargs=None, keywords=None, defaults=(None, None, None, True))
paddle.fluid.io.load_inference_model ArgSpec(args=['dirname', 'executor', 'model_filename', 'params_filename', 'pserver_endpoints'], varargs=None, keywords=None, defaults=(None, None, None))
paddle.fluid.io.load_inference_model ArgSpec(args=['dirname', 'executor', 'model_filename', 'params_filename', 'pserver_endpoints'], varargs=None, keywords=None, defaults=(None, None, None))
paddle.fluid.io.get_inference_program ArgSpec(args=['target_vars', 'main_program'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.initializer.ConstantInitializer.__init__ ArgSpec(args=['self', 'value', 'force_cpu'], varargs=None, keywords=None, defaults=(0.0, False))
paddle.fluid.initializer.ConstantInitializer.__init__ ArgSpec(args=['self', 'value', 'force_cpu'], varargs=None, keywords=None, defaults=(0.0, False))
paddle.fluid.initializer.UniformInitializer.__init__ ArgSpec(args=['self', 'low', 'high', 'seed'], varargs=None, keywords=None, defaults=(-1.0, 1.0, 0))
paddle.fluid.initializer.UniformInitializer.__init__ ArgSpec(args=['self', 'low', 'high', 'seed'], varargs=None, keywords=None, defaults=(-1.0, 1.0, 0))
paddle.fluid.initializer.NormalInitializer.__init__ ArgSpec(args=['self', 'loc', 'scale', 'seed'], varargs=None, keywords=None, defaults=(0.0, 1.0, 0))
paddle.fluid.initializer.NormalInitializer.__init__ ArgSpec(args=['self', 'loc', 'scale', 'seed'], varargs=None, keywords=None, defaults=(0.0, 1.0, 0))
...
...
This diff is collapsed.
Click to expand it.
python/paddle/fluid/__init__.py
浏览文件 @
dbf07982
...
@@ -46,7 +46,7 @@ from . import transpiler
...
@@ -46,7 +46,7 @@ from . import transpiler
from
.param_attr
import
ParamAttr
,
WeightNormParamAttr
from
.param_attr
import
ParamAttr
,
WeightNormParamAttr
from
.data_feeder
import
DataFeeder
from
.data_feeder
import
DataFeeder
from
.core
import
LoDTensor
,
LoDTensorArray
,
CPUPlace
,
CUDAPlace
,
CUDAPinnedPlace
,
Scope
from
.core
import
LoDTensor
,
LoDTensorArray
,
CPUPlace
,
CUDAPlace
,
CUDAPinnedPlace
,
Scope
from
.transpiler
import
DistributeTranspiler
,
InferenceTranspiler
,
\
from
.transpiler
import
DistributeTranspiler
,
\
memory_optimize
,
release_memory
,
DistributeTranspilerConfig
memory_optimize
,
release_memory
,
DistributeTranspilerConfig
from
.lod_tensor
import
create_lod_tensor
,
create_random_int_lodtensor
from
.lod_tensor
import
create_lod_tensor
,
create_random_int_lodtensor
from
.
import
clip
from
.
import
clip
...
...
This diff is collapsed.
Click to expand it.
python/paddle/fluid/io.py
浏览文件 @
dbf07982
...
@@ -27,8 +27,7 @@ from . import core
...
@@ -27,8 +27,7 @@ from . import core
__all__
=
[
__all__
=
[
'save_vars'
,
'save_params'
,
'save_persistables'
,
'load_vars'
,
'load_params'
,
'save_vars'
,
'save_params'
,
'save_persistables'
,
'load_vars'
,
'load_params'
,
'load_persistables'
,
'save_inference_model'
,
'load_inference_model'
,
'load_persistables'
,
'save_inference_model'
,
'load_inference_model'
'get_inference_program'
]
]
...
@@ -504,23 +503,6 @@ def load_persistables(executor, dirname, main_program=None, filename=None):
...
@@ -504,23 +503,6 @@ def load_persistables(executor, dirname, main_program=None, filename=None):
filename
=
filename
)
filename
=
filename
)
def
get_inference_program
(
target_vars
,
main_program
=
None
):
if
main_program
is
None
:
main_program
=
default_main_program
()
if
not
isinstance
(
target_vars
,
list
):
target_vars
=
[
target_vars
]
vars
=
[]
for
var
in
target_vars
:
if
isinstance
(
var
,
Evaluator
):
vars
.
extend
(
var
.
states
)
vars
.
extend
(
var
.
metrics
)
else
:
vars
.
append
(
var
)
pruned_program
=
main_program
.
_prune
(
targets
=
vars
)
inference_program
=
pruned_program
.
_inference_optimize
()
return
inference_program
def
prepend_feed_ops
(
inference_program
,
def
prepend_feed_ops
(
inference_program
,
feed_target_names
,
feed_target_names
,
feed_holder_name
=
'feed'
):
feed_holder_name
=
'feed'
):
...
...
This diff is collapsed.
Click to expand it.
python/paddle/fluid/tests/unittests/dist_transformer.py
浏览文件 @
dbf07982
...
@@ -437,13 +437,8 @@ def split_data(data, num_part):
...
@@ -437,13 +437,8 @@ def split_data(data, num_part):
]
]
def
test_context
(
t
rain_prog
m
,
avg_cost
,
train_exe
,
dev_count
,
data_input_names
,
def
test_context
(
t
est_progra
m
,
avg_cost
,
train_exe
,
dev_count
,
data_input_names
,
sum_cost
,
token_num
):
sum_cost
,
token_num
):
# Context to do validation.
test_program
=
train_progm
.
clone
()
with
fluid
.
program_guard
(
test_program
):
test_program
=
fluid
.
io
.
get_inference_program
([
avg_cost
])
val_data
=
DataReader
(
val_data
=
DataReader
(
src_vocab_fpath
=
TrainTaskConfig
.
src_vocab_fpath
,
src_vocab_fpath
=
TrainTaskConfig
.
src_vocab_fpath
,
trg_vocab_fpath
=
TrainTaskConfig
.
trg_vocab_fpath
,
trg_vocab_fpath
=
TrainTaskConfig
.
trg_vocab_fpath
,
...
@@ -505,7 +500,7 @@ def test_context(train_progm, avg_cost, train_exe, dev_count, data_input_names,
...
@@ -505,7 +500,7 @@ def test_context(train_progm, avg_cost, train_exe, dev_count, data_input_names,
def
train_loop
(
exe
,
train_progm
,
dev_count
,
sum_cost
,
avg_cost
,
lr_scheduler
,
def
train_loop
(
exe
,
train_progm
,
dev_count
,
sum_cost
,
avg_cost
,
lr_scheduler
,
token_num
,
predict
):
token_num
,
predict
,
test_program
):
# Initialize the parameters.
# Initialize the parameters.
if
TrainTaskConfig
.
ckpt_path
:
if
TrainTaskConfig
.
ckpt_path
:
lr_scheduler
.
current_steps
=
TrainTaskConfig
.
start_step
lr_scheduler
.
current_steps
=
TrainTaskConfig
.
start_step
...
@@ -554,7 +549,7 @@ def train_loop(exe, train_progm, dev_count, sum_cost, avg_cost, lr_scheduler,
...
@@ -554,7 +549,7 @@ def train_loop(exe, train_progm, dev_count, sum_cost, avg_cost, lr_scheduler,
-
1
]
+
label_data_input_fields
-
1
]
+
label_data_input_fields
if
TrainTaskConfig
.
val_file_pattern
is
not
None
:
if
TrainTaskConfig
.
val_file_pattern
is
not
None
:
test
=
test_context
(
t
rain_prog
m
,
avg_cost
,
train_exe
,
dev_count
,
test
=
test_context
(
t
est_progra
m
,
avg_cost
,
train_exe
,
dev_count
,
data_input_names
,
sum_cost
,
token_num
)
data_input_names
,
sum_cost
,
token_num
)
# the best cross-entropy value with label smoothing
# the best cross-entropy value with label smoothing
...
@@ -1647,6 +1642,8 @@ def get_model(is_dist, is_async):
...
@@ -1647,6 +1642,8 @@ def get_model(is_dist, is_async):
local_lr_scheduler
=
LearningRateScheduler
(
ModelHyperParams
.
d_model
,
local_lr_scheduler
=
LearningRateScheduler
(
ModelHyperParams
.
d_model
,
TrainTaskConfig
.
warmup_steps
,
TrainTaskConfig
.
warmup_steps
,
TrainTaskConfig
.
learning_rate
)
TrainTaskConfig
.
learning_rate
)
# Context to do validation.
test_program
=
fluid
.
default_main_program
().
clone
(
for_test
=
True
)
if
not
is_dist
:
if
not
is_dist
:
optimizer
=
fluid
.
optimizer
.
Adam
(
optimizer
=
fluid
.
optimizer
.
Adam
(
...
@@ -1671,7 +1668,7 @@ def get_model(is_dist, is_async):
...
@@ -1671,7 +1668,7 @@ def get_model(is_dist, is_async):
epsilon
=
TrainTaskConfig
.
eps
)
epsilon
=
TrainTaskConfig
.
eps
)
optimizer
.
minimize
(
sum_cost
)
optimizer
.
minimize
(
sum_cost
)
return
sum_cost
,
avg_cost
,
predict
,
token_num
,
local_lr_scheduler
return
sum_cost
,
avg_cost
,
predict
,
token_num
,
local_lr_scheduler
,
test_program
def
update_args
():
def
update_args
():
...
@@ -1705,7 +1702,7 @@ class DistTransformer2x2(TestDistRunnerBase):
...
@@ -1705,7 +1702,7 @@ class DistTransformer2x2(TestDistRunnerBase):
def
run_trainer
(
self
,
use_cuda
,
args
):
def
run_trainer
(
self
,
use_cuda
,
args
):
place
=
fluid
.
CUDAPlace
(
0
)
if
use_cuda
else
fluid
.
CPUPlace
()
place
=
fluid
.
CUDAPlace
(
0
)
if
use_cuda
else
fluid
.
CPUPlace
()
TrainTaskConfig
.
use_gpu
=
use_cuda
TrainTaskConfig
.
use_gpu
=
use_cuda
sum_cost
,
avg_cost
,
predict
,
token_num
,
local_lr_scheduler
=
get_model
(
sum_cost
,
avg_cost
,
predict
,
token_num
,
local_lr_scheduler
,
test_program
=
get_model
(
args
.
is_dist
,
not
args
.
sync_mode
)
args
.
is_dist
,
not
args
.
sync_mode
)
if
args
.
is_dist
:
if
args
.
is_dist
:
...
@@ -1726,7 +1723,7 @@ class DistTransformer2x2(TestDistRunnerBase):
...
@@ -1726,7 +1723,7 @@ class DistTransformer2x2(TestDistRunnerBase):
TrainTaskConfig
.
local
=
not
args
.
is_dist
TrainTaskConfig
.
local
=
not
args
.
is_dist
train_loop
(
startup_exe
,
trainer_prog
,
1
,
sum_cost
,
avg_cost
,
train_loop
(
startup_exe
,
trainer_prog
,
1
,
sum_cost
,
avg_cost
,
local_lr_scheduler
,
token_num
,
predict
)
local_lr_scheduler
,
token_num
,
predict
,
test_program
)
if
__name__
==
"__main__"
:
if
__name__
==
"__main__"
:
...
...
This diff is collapsed.
Click to expand it.
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录
反馈
建议
客服
返回
顶部