Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
s920243400
PaddleDetection
提交
fea7f0de
P
PaddleDetection
项目概览
s920243400
/
PaddleDetection
与 Fork 源项目一致
Fork自
PaddlePaddle / PaddleDetection
通知
2
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleDetection
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
fea7f0de
编写于
2月 13, 2019
作者:
Z
Zeng Jinle
提交者:
GitHub
2月 13, 2019
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #15667 from sneaxiy/fix_decorator_signature
Fix decorator signature error
上级
acfe28d5
7e399b06
变更
16
隐藏空白更改
内联
并排
Showing
16 changed file
with
76 addition
and
45 deletion
+76
-45
paddle/fluid/API.spec
paddle/fluid/API.spec
+12
-12
python/paddle/fluid/contrib/decoder/beam_search_decoder.py
python/paddle/fluid/contrib/decoder/beam_search_decoder.py
+3
-3
python/paddle/fluid/contrib/inferencer.py
python/paddle/fluid/contrib/inferencer.py
+2
-2
python/paddle/fluid/contrib/trainer.py
python/paddle/fluid/contrib/trainer.py
+2
-2
python/paddle/fluid/executor.py
python/paddle/fluid/executor.py
+2
-2
python/paddle/fluid/framework.py
python/paddle/fluid/framework.py
+7
-7
python/paddle/fluid/imperative/base.py
python/paddle/fluid/imperative/base.py
+2
-2
python/paddle/fluid/initializer.py
python/paddle/fluid/initializer.py
+2
-2
python/paddle/fluid/layers/control_flow.py
python/paddle/fluid/layers/control_flow.py
+2
-2
python/paddle/fluid/layers/io.py
python/paddle/fluid/layers/io.py
+2
-2
python/paddle/fluid/optimizer.py
python/paddle/fluid/optimizer.py
+2
-2
python/paddle/fluid/profiler.py
python/paddle/fluid/profiler.py
+3
-3
python/paddle/fluid/recordio_writer.py
python/paddle/fluid/recordio_writer.py
+2
-2
python/paddle/fluid/unique_name.py
python/paddle/fluid/unique_name.py
+2
-2
python/paddle/fluid/wrapped_decorator.py
python/paddle/fluid/wrapped_decorator.py
+30
-0
python/requirements.txt
python/requirements.txt
+1
-0
未找到文件。
paddle/fluid/API.spec
浏览文件 @
fea7f0de
...
...
@@ -8,13 +8,13 @@ paddle.fluid.Program.parse_from_string ArgSpec(args=['binary_str'], varargs=None
paddle.fluid.Program.to_string ArgSpec(args=['self', 'throw_on_error', 'with_details'], varargs=None, keywords=None, defaults=(False,))
paddle.fluid.default_startup_program ArgSpec(args=[], varargs=None, keywords=None, defaults=None)
paddle.fluid.default_main_program ArgSpec(args=[], varargs=None, keywords=None, defaults=None)
paddle.fluid.program_guard ArgSpec(args=[
], varargs='args', keywords='kwds', defaults=None
)
paddle.fluid.name_scope ArgSpec(args=[
], varargs='args', keywords='kwds', defaults=None
)
paddle.fluid.program_guard ArgSpec(args=[
'main_program', 'startup_program'], varargs=None, keywords=None, defaults=(None,)
)
paddle.fluid.name_scope ArgSpec(args=[
'prefix'], varargs=None, keywords=None, defaults=(None,)
)
paddle.fluid.Executor.__init__ ArgSpec(args=['self', 'place'], varargs=None, keywords=None, defaults=None)
paddle.fluid.Executor.close ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
paddle.fluid.Executor.run ArgSpec(args=['self', 'program', 'feed', 'fetch_list', 'feed_var_name', 'fetch_var_name', 'scope', 'return_numpy', 'use_program_cache'], varargs=None, keywords=None, defaults=(None, None, None, 'feed', 'fetch', None, True, False))
paddle.fluid.global_scope ArgSpec(args=[], varargs=None, keywords=None, defaults=None)
paddle.fluid.scope_guard ArgSpec(args=[
], varargs='args', keywords='kwds'
, defaults=None)
paddle.fluid.scope_guard ArgSpec(args=[
'scope'], varargs=None, keywords=None
, defaults=None)
paddle.fluid.DistributeTranspiler.__init__ ArgSpec(args=['self', 'config'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.DistributeTranspiler.get_pserver_program ArgSpec(args=['self', 'endpoint'], varargs=None, keywords=None, defaults=None)
paddle.fluid.DistributeTranspiler.get_pserver_programs ArgSpec(args=['self', 'endpoint'], varargs=None, keywords=None, defaults=None)
...
...
@@ -66,7 +66,7 @@ paddle.fluid.initializer.XavierInitializer.__init__ ArgSpec(args=['self', 'unifo
paddle.fluid.initializer.BilinearInitializer.__init__ ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
paddle.fluid.initializer.MSRAInitializer.__init__ ArgSpec(args=['self', 'uniform', 'fan_in', 'seed'], varargs=None, keywords=None, defaults=(True, None, 0))
paddle.fluid.initializer.force_init_on_cpu ArgSpec(args=[], varargs=None, keywords=None, defaults=None)
paddle.fluid.initializer.init_on_cpu ArgSpec(args=[], varargs=
'args', keywords='kwds'
, defaults=None)
paddle.fluid.initializer.init_on_cpu ArgSpec(args=[], varargs=
None, keywords=None
, defaults=None)
paddle.fluid.initializer.NumpyArrayInitializer.__init__ ArgSpec(args=['self', 'value'], varargs=None, keywords=None, defaults=None)
paddle.fluid.layers.fc ArgSpec(args=['input', 'size', 'num_flatten_dims', 'param_attr', 'bias_attr', 'act', 'is_test', 'name'], varargs=None, keywords=None, defaults=(1, None, None, None, False, None))
paddle.fluid.layers.embedding ArgSpec(args=['input', 'size', 'is_sparse', 'is_distributed', 'padding_idx', 'param_attr', 'dtype'], varargs=None, keywords=None, defaults=(False, False, None, None, 'float32'))
...
...
@@ -229,7 +229,7 @@ paddle.fluid.layers.random_data_generator ArgSpec(args=['low', 'high', 'shapes',
paddle.fluid.layers.py_reader ArgSpec(args=['capacity', 'shapes', 'dtypes', 'lod_levels', 'name', 'use_double_buffer'], varargs=None, keywords=None, defaults=(None, None, True))
paddle.fluid.layers.create_py_reader_by_data ArgSpec(args=['capacity', 'feed_list', 'name', 'use_double_buffer'], varargs=None, keywords=None, defaults=(None, True))
paddle.fluid.layers.Preprocessor.__init__ ArgSpec(args=['self', 'reader', 'name'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.layers.Preprocessor.block ArgSpec(args=[
], varargs='args', keywords='kwds'
, defaults=None)
paddle.fluid.layers.Preprocessor.block ArgSpec(args=[
'self'], varargs=None, keywords=None
, defaults=None)
paddle.fluid.layers.Preprocessor.inputs ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
paddle.fluid.layers.Preprocessor.outputs ArgSpec(args=['self'], varargs='outs', keywords=None, defaults=None)
paddle.fluid.layers.load ArgSpec(args=['out', 'file_path', 'load_as_fp16'], varargs=None, keywords=None, defaults=(None,))
...
...
@@ -270,7 +270,7 @@ paddle.fluid.layers.IfElse.input ArgSpec(args=['self', 'x'], varargs=None, keywo
paddle.fluid.layers.IfElse.output ArgSpec(args=['self'], varargs='outs', keywords=None, defaults=None)
paddle.fluid.layers.IfElse.true_block ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
paddle.fluid.layers.DynamicRNN.__init__ ArgSpec(args=['self', 'name'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.layers.DynamicRNN.block ArgSpec(args=[
], varargs='args', keywords='kwds'
, defaults=None)
paddle.fluid.layers.DynamicRNN.block ArgSpec(args=[
'self'], varargs=None, keywords=None
, defaults=None)
paddle.fluid.layers.DynamicRNN.memory ArgSpec(args=['self', 'init', 'shape', 'value', 'need_reorder', 'dtype'], varargs=None, keywords=None, defaults=(None, None, 0.0, False, 'float32'))
paddle.fluid.layers.DynamicRNN.output ArgSpec(args=['self'], varargs='outputs', keywords=None, defaults=None)
paddle.fluid.layers.DynamicRNN.static_input ArgSpec(args=['self', 'x'], varargs=None, keywords=None, defaults=None)
...
...
@@ -346,12 +346,12 @@ paddle.fluid.contrib.StateCell.set_state ArgSpec(args=['self', 'state_name', 'st
paddle.fluid.contrib.StateCell.state_updater ArgSpec(args=['self', 'updater'], varargs=None, keywords=None, defaults=None)
paddle.fluid.contrib.StateCell.update_states ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
paddle.fluid.contrib.TrainingDecoder.__init__ ArgSpec(args=['self', 'state_cell', 'name'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.contrib.TrainingDecoder.block ArgSpec(args=[
], varargs='args', keywords='kwds'
, defaults=None)
paddle.fluid.contrib.TrainingDecoder.block ArgSpec(args=[
'self'], varargs=None, keywords=None
, defaults=None)
paddle.fluid.contrib.TrainingDecoder.output ArgSpec(args=['self'], varargs='outputs', keywords=None, defaults=None)
paddle.fluid.contrib.TrainingDecoder.static_input ArgSpec(args=['self', 'x'], varargs=None, keywords=None, defaults=None)
paddle.fluid.contrib.TrainingDecoder.step_input ArgSpec(args=['self', 'x'], varargs=None, keywords=None, defaults=None)
paddle.fluid.contrib.BeamSearchDecoder.__init__ ArgSpec(args=['self', 'state_cell', 'init_ids', 'init_scores', 'target_dict_dim', 'word_dim', 'input_var_dict', 'topk_size', 'sparse_emb', 'max_len', 'beam_size', 'end_id', 'name'], varargs=None, keywords=None, defaults=({}, 50, True, 100, 1, 1, None))
paddle.fluid.contrib.BeamSearchDecoder.block ArgSpec(args=[
], varargs='args', keywords='kwds'
, defaults=None)
paddle.fluid.contrib.BeamSearchDecoder.block ArgSpec(args=[
'self'], varargs=None, keywords=None
, defaults=None)
paddle.fluid.contrib.BeamSearchDecoder.decode ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
paddle.fluid.contrib.BeamSearchDecoder.early_stop ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
paddle.fluid.contrib.BeamSearchDecoder.read_array ArgSpec(args=['self', 'init', 'is_ids', 'is_scores'], varargs=None, keywords=None, defaults=(False, False))
...
...
@@ -456,7 +456,7 @@ paddle.fluid.optimizer.AdadeltaOptimizer.apply_gradients ArgSpec(args=['self', '
paddle.fluid.optimizer.AdadeltaOptimizer.backward ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set', 'callbacks'], varargs=None, keywords=None, defaults=(None, None, None, None))
paddle.fluid.optimizer.AdadeltaOptimizer.minimize ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set'], varargs=None, keywords=None, defaults=(None, None, None))
paddle.fluid.optimizer.ModelAverage.__init__ ArgSpec(args=['self', 'average_window_rate', 'min_average_window', 'max_average_window', 'regularization', 'name'], varargs=None, keywords=None, defaults=(10000, 10000, None, None))
paddle.fluid.optimizer.ModelAverage.apply ArgSpec(args=[
], varargs='args', keywords='kwds', defaults=None
)
paddle.fluid.optimizer.ModelAverage.apply ArgSpec(args=[
'self', 'executor', 'need_restore'], varargs=None, keywords=None, defaults=(True,)
)
paddle.fluid.optimizer.ModelAverage.apply_gradients ArgSpec(args=['self', 'params_grads'], varargs=None, keywords=None, defaults=None)
paddle.fluid.optimizer.ModelAverage.backward ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set', 'callbacks'], varargs=None, keywords=None, defaults=(None, None, None, None))
paddle.fluid.optimizer.ModelAverage.minimize ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set'], varargs=None, keywords=None, defaults=(None, None, None))
...
...
@@ -491,14 +491,14 @@ paddle.fluid.clip.ErrorClipByValue.__init__ ArgSpec(args=['self', 'max', 'min'],
paddle.fluid.clip.GradientClipByValue.__init__ ArgSpec(args=['self', 'max', 'min'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.clip.GradientClipByNorm.__init__ ArgSpec(args=['self', 'clip_norm'], varargs=None, keywords=None, defaults=None)
paddle.fluid.clip.GradientClipByGlobalNorm.__init__ ArgSpec(args=['self', 'clip_norm', 'group_name'], varargs=None, keywords=None, defaults=('default_group',))
paddle.fluid.profiler.cuda_profiler ArgSpec(args=[
], varargs='args', keywords='kwds', defaults=None
)
paddle.fluid.profiler.cuda_profiler ArgSpec(args=[
'output_file', 'output_mode', 'config'], varargs=None, keywords=None, defaults=(None, None)
)
paddle.fluid.profiler.reset_profiler ArgSpec(args=[], varargs=None, keywords=None, defaults=None)
paddle.fluid.profiler.profiler ArgSpec(args=[
], varargs='args', keywords='kwds', defaults=None
)
paddle.fluid.profiler.profiler ArgSpec(args=[
'state', 'sorted_key', 'profile_path'], varargs=None, keywords=None, defaults=(None, '/tmp/profile')
)
paddle.fluid.profiler.start_profiler ArgSpec(args=['state'], varargs=None, keywords=None, defaults=None)
paddle.fluid.profiler.stop_profiler ArgSpec(args=['sorted_key', 'profile_path'], varargs=None, keywords=None, defaults=(None, '/tmp/profile'))
paddle.fluid.unique_name.generate ArgSpec(args=['key'], varargs=None, keywords=None, defaults=None)
paddle.fluid.unique_name.switch ArgSpec(args=['new_generator'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.unique_name.guard ArgSpec(args=[
], varargs='args', keywords='kwds', defaults=None
)
paddle.fluid.unique_name.guard ArgSpec(args=[
'new_generator'], varargs=None, keywords=None, defaults=(None,)
)
paddle.fluid.recordio_writer.convert_reader_to_recordio_file ArgSpec(args=['filename', 'reader_creator', 'feeder', 'compressor', 'max_num_records', 'feed_order'], varargs=None, keywords=None, defaults=(Compressor.Snappy, 1000, None))
paddle.fluid.recordio_writer.convert_reader_to_recordio_files ArgSpec(args=['filename', 'batch_per_file', 'reader_creator', 'feeder', 'compressor', 'max_num_records', 'feed_order'], varargs=None, keywords=None, defaults=(Compressor.Snappy, 1000, None))
paddle.fluid.Scope Scope() -> paddle.fluid.core._Scope
...
...
python/paddle/fluid/contrib/decoder/beam_search_decoder.py
浏览文件 @
fea7f0de
...
...
@@ -22,7 +22,7 @@ This API is still under active development and may change drastically.
from
__future__
import
print_function
import
contextlib
from
...wrapped_decorator
import
signature_safe_contextmanager
import
numpy
as
np
import
six
...
...
@@ -419,7 +419,7 @@ class TrainingDecoder(object):
self
.
_state_cell
=
state_cell
self
.
_state_cell
.
_enter_decoder
(
self
)
@
contextlib
.
contextmanager
@
signature_safe_
contextmanager
def
block
(
self
):
"""
Define the behavior of the decoder for each RNN time step.
...
...
@@ -613,7 +613,7 @@ class BeamSearchDecoder(object):
self
.
_word_dim
=
word_dim
self
.
_input_var_dict
=
input_var_dict
@
contextlib
.
contextmanager
@
signature_safe_
contextmanager
def
block
(
self
):
"""
Define the behavior of the decoder for each RNN time step.
...
...
python/paddle/fluid/contrib/inferencer.py
浏览文件 @
fea7f0de
...
...
@@ -14,7 +14,7 @@
from
__future__
import
print_function
import
contextlib
from
..wrapped_decorator
import
signature_safe_contextmanager
from
..
import
core
...
...
@@ -105,7 +105,7 @@ class Inferencer(object):
return
results
@
contextlib
.
contextmanager
@
signature_safe_
contextmanager
def
_prog_and_scope_guard
(
self
):
with
framework
.
program_guard
(
main_program
=
self
.
inference_program
):
with
executor
.
scope_guard
(
self
.
scope
):
...
...
python/paddle/fluid/contrib/trainer.py
浏览文件 @
fea7f0de
...
...
@@ -14,7 +14,7 @@
from
__future__
import
print_function
import
contextlib
from
..wrapped_decorator
import
signature_safe_contextmanager
import
os
import
errno
import
shutil
...
...
@@ -453,7 +453,7 @@ class Trainer(object):
io
.
save_inference_model
(
param_path
,
feeded_var_names
,
target_vars
,
exe
)
@
contextlib
.
contextmanager
@
signature_safe_
contextmanager
def
_prog_and_scope_guard
(
self
):
with
framework
.
program_guard
(
main_program
=
self
.
train_program
,
...
...
python/paddle/fluid/executor.py
浏览文件 @
fea7f0de
...
...
@@ -17,7 +17,7 @@ from __future__ import print_function
import
os
import
multiprocessing
import
numpy
as
np
import
contextlib
from
.wrapped_decorator
import
signature_safe_contextmanager
import
six
from
.framework
import
Program
,
default_main_program
,
Variable
from
.
import
core
...
...
@@ -49,7 +49,7 @@ def _switch_scope(scope):
return
ex
@
contextlib
.
contextmanager
@
signature_safe_
contextmanager
def
scope_guard
(
scope
):
"""
Change the global/default scope instance by Python `with` statement. All
...
...
python/paddle/fluid/framework.py
浏览文件 @
fea7f0de
...
...
@@ -16,7 +16,7 @@ from __future__ import print_function
import
collections
from
collections
import
defaultdict
import
contextlib
from
.wrapped_decorator
import
signature_safe_contextmanager
import
os
import
re
import
traceback
...
...
@@ -111,7 +111,7 @@ class NameScope(object):
_name_scope
=
NameScope
()
@
contextlib
.
contextmanager
@
signature_safe_
contextmanager
def
name_scope
(
prefix
=
None
):
"""
Generate hierarchical name prefix for the operators.
...
...
@@ -1775,7 +1775,7 @@ class Program(object):
def
set_op_role_var
(
self
,
var_name
):
self
.
_op_role_var
=
[
var_name
]
@
contextlib
.
contextmanager
@
signature_safe_
contextmanager
def
_optimized_guard
(
self
,
param_and_grads
):
"""
A with guard to set :code:`Optimization` :code:`OpRole` and
...
...
@@ -1805,7 +1805,7 @@ class Program(object):
self
.
_op_role_var
=
tmp_var
self
.
_current_role
=
tmp_role
@
contextlib
.
contextmanager
@
signature_safe_
contextmanager
def
_lr_schedule_guard
(
self
,
is_with_opt
=
False
):
"""
A with guard to set :code:`LRSched` :code:`OpRole` and
...
...
@@ -2459,7 +2459,7 @@ def switch_startup_program(program):
return
prev_program
@
contextlib
.
contextmanager
@
signature_safe_
contextmanager
def
program_guard
(
main_program
,
startup_program
=
None
):
"""
Change the global main program and startup program with `with` statement.
...
...
@@ -2524,7 +2524,7 @@ def _get_var(name, program=None):
return
program
.
global_block
().
var
(
name
)
@
contextlib
.
contextmanager
@
signature_safe_
contextmanager
def
_imperative_guard
(
tracer
):
global
_imperative_tracer_
tmp_trace
=
_imperative_tracer_
...
...
@@ -2535,7 +2535,7 @@ def _imperative_guard(tracer):
_imperative_tracer_
=
tmp_trace
@
contextlib
.
contextmanager
@
signature_safe_
contextmanager
def
_imperative_place_guard
(
place
):
global
_imperative_current_expected_place_
tmp_place
=
_imperative_current_expected_place_
...
...
python/paddle/fluid/imperative/base.py
浏览文件 @
fea7f0de
...
...
@@ -11,7 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
contextlib
from
..wrapped_decorator
import
signature_safe_contextmanager
import
numpy
as
np
from
paddle.fluid
import
core
...
...
@@ -24,7 +24,7 @@ def enabled():
return
framework
.
_in_imperative_mode
()
@
contextlib
.
contextmanager
@
signature_safe_
contextmanager
def
guard
(
place
=
None
):
train
=
framework
.
Program
()
startup
=
framework
.
Program
()
...
...
python/paddle/fluid/initializer.py
浏览文件 @
fea7f0de
...
...
@@ -16,7 +16,7 @@ from __future__ import print_function
from
.
import
framework
import
numpy
as
np
import
contextlib
from
.wrapped_decorator
import
signature_safe_contextmanager
from
.core
import
VarDesc
from
.
import
unique_name
...
...
@@ -49,7 +49,7 @@ def force_init_on_cpu():
return
_force_init_on_cpu_
@
contextlib
.
contextmanager
@
signature_safe_
contextmanager
def
init_on_cpu
():
"""
Force the variable to be inited on CPU.
...
...
python/paddle/fluid/layers/control_flow.py
浏览文件 @
fea7f0de
...
...
@@ -13,7 +13,7 @@
# limitations under the License.
from
__future__
import
print_function
import
contextlib
from
..wrapped_decorator
import
signature_safe_contextmanager
from
.layer_function_generator
import
autodoc
,
templatedoc
from
.tensor
import
assign
,
fill_constant
...
...
@@ -1532,7 +1532,7 @@ class DynamicRNN(object):
outputs
=
{
'Out'
:
[
x_reordered
]})
return
shrink_memory
(
x_reordered
,
self
.
step_idx
,
self
.
lod_rank_table
)
@
contextlib
.
contextmanager
@
signature_safe_
contextmanager
def
block
(
self
):
"""
The block for user to define operators in RNN. See the class docstring
...
...
python/paddle/fluid/layers/io.py
浏览文件 @
fea7f0de
...
...
@@ -13,7 +13,7 @@
# limitations under the License.
from
__future__
import
print_function
import
contextlib
from
..wrapped_decorator
import
signature_safe_contextmanager
import
multiprocessing
import
os
import
six
...
...
@@ -1116,7 +1116,7 @@ class Preprocessor(object):
def
_is_completed
(
self
):
return
self
.
sub_block
and
self
.
source_var_names
and
self
.
sink_var_names
@
contextlib
.
contextmanager
@
signature_safe_
contextmanager
def
block
(
self
):
self
.
status
=
Preprocessor
.
IN_SUB_BLOCK
self
.
sub_block
=
self
.
main_prog
.
_create_block
()
...
...
python/paddle/fluid/optimizer.py
浏览文件 @
fea7f0de
...
...
@@ -15,7 +15,7 @@
from
__future__
import
print_function
from
collections
import
defaultdict
from
contextlib
import
contextmanager
from
.wrapped_decorator
import
signature_safe_
contextmanager
from
paddle.fluid.framework
import
Program
,
Variable
,
name_scope
,
default_main_program
from
paddle.fluid.distribute_lookup_table
import
find_distributed_lookup_table
...
...
@@ -1610,7 +1610,7 @@ class ModelAverage(Optimizer):
},
stop_gradient
=
True
)
@
contextmanager
@
signature_safe_
contextmanager
def
apply
(
self
,
executor
,
need_restore
=
True
):
"""Apply average values to parameters of current model.
"""
...
...
python/paddle/fluid/profiler.py
浏览文件 @
fea7f0de
...
...
@@ -15,7 +15,7 @@
from
__future__
import
print_function
from
.
import
core
from
contextlib
import
contextmanager
from
.wrapped_decorator
import
signature_safe_
contextmanager
import
os
import
six
...
...
@@ -35,7 +35,7 @@ NVPROF_CONFIG = [
]
@
contextmanager
@
signature_safe_
contextmanager
def
cuda_profiler
(
output_file
,
output_mode
=
None
,
config
=
None
):
"""The CUDA profiler.
This fuctions is used to profile CUDA program by CUDA runtime application
...
...
@@ -217,7 +217,7 @@ def stop_profiler(sorted_key=None, profile_path='/tmp/profile'):
core
.
disable_profiler
(
key_map
[
sorted_key
],
profile_path
)
@
contextmanager
@
signature_safe_
contextmanager
def
profiler
(
state
,
sorted_key
=
None
,
profile_path
=
'/tmp/profile'
):
"""The profiler interface.
Different from cuda_profiler, this profiler can be used to profile both CPU
...
...
python/paddle/fluid/recordio_writer.py
浏览文件 @
fea7f0de
...
...
@@ -15,14 +15,14 @@
from
__future__
import
print_function
import
os
import
contextlib
from
.wrapped_decorator
import
signature_safe_contextmanager
from
.
import
core
__all__
=
[
'convert_reader_to_recordio_file'
,
'convert_reader_to_recordio_files'
]
@
contextlib
.
contextmanager
@
signature_safe_
contextmanager
def
create_recordio_writer
(
filename
,
compressor
=
core
.
RecordIOWriter
.
Compressor
.
Snappy
,
max_num_records
=
1000
):
...
...
python/paddle/fluid/unique_name.py
浏览文件 @
fea7f0de
...
...
@@ -15,7 +15,7 @@
from
__future__
import
print_function
import
collections
import
contextlib
from
.wrapped_decorator
import
signature_safe_contextmanager
import
six
import
sys
...
...
@@ -68,7 +68,7 @@ def switch(new_generator=None):
return
old
@
contextlib
.
contextmanager
@
signature_safe_
contextmanager
def
guard
(
new_generator
=
None
):
if
isinstance
(
new_generator
,
six
.
string_types
):
new_generator
=
UniqueNameGenerator
(
new_generator
)
...
...
python/paddle/fluid/wrapped_decorator.py
0 → 100644
浏览文件 @
fea7f0de
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
decorator
import
contextlib
__all__
=
[
'wrap_decorator'
,
'signature_safe_contextmanager'
]
def
wrap_decorator
(
decorator_func
):
@
decorator
.
decorator
def
__impl__
(
func
,
*
args
,
**
kwargs
):
wrapped_func
=
decorator_func
(
func
)
return
wrapped_func
(
*
args
,
**
kwargs
)
return
__impl__
signature_safe_contextmanager
=
wrap_decorator
(
contextlib
.
contextmanager
)
python/requirements.txt
浏览文件 @
fea7f0de
...
...
@@ -11,3 +11,4 @@ graphviz
six
funcsigs
pyyaml
decorator
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录