Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
7f4bdb1e
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
7f4bdb1e
编写于
2月 18, 2019
作者:
D
dzhwinter
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'release/1.3' into test/picked
test=release/1.3
上级
8f3bf905
745f88b9
变更
33
显示空白变更内容
内联
并排
Showing
33 changed file
with
117 addition
and
187 deletion
+117
-187
paddle/fluid/API.spec
paddle/fluid/API.spec
+12
-12
paddle/fluid/inference/analysis/ir_passes/subgraph_detector.cc
...e/fluid/inference/analysis/ir_passes/subgraph_detector.cc
+0
-71
paddle/fluid/inference/analysis/ir_passes/subgraph_detector.h
...le/fluid/inference/analysis/ir_passes/subgraph_detector.h
+1
-26
paddle/fluid/operators/fake_quantize_op.cc
paddle/fluid/operators/fake_quantize_op.cc
+6
-15
paddle/fluid/operators/jit/gen/act.h
paddle/fluid/operators/jit/gen/act.h
+2
-3
paddle/fluid/operators/jit/gen/blas.h
paddle/fluid/operators/jit/gen/blas.h
+2
-2
paddle/fluid/operators/jit/gen/gru.h
paddle/fluid/operators/jit/gen/gru.h
+2
-2
paddle/fluid/operators/jit/gen/hopv.h
paddle/fluid/operators/jit/gen/hopv.h
+2
-2
paddle/fluid/operators/jit/gen/jitcode.h
paddle/fluid/operators/jit/gen/jitcode.h
+2
-2
paddle/fluid/operators/jit/gen/lstm.h
paddle/fluid/operators/jit/gen/lstm.h
+2
-2
paddle/fluid/operators/jit/gen/matmul.h
paddle/fluid/operators/jit/gen/matmul.h
+2
-2
paddle/fluid/operators/jit/gen/seqpool.h
paddle/fluid/operators/jit/gen/seqpool.h
+2
-2
paddle/fluid/operators/jit/gen_base.h
paddle/fluid/operators/jit/gen_base.h
+2
-1
paddle/fluid/operators/row_conv_op.cc
paddle/fluid/operators/row_conv_op.cc
+5
-5
python/paddle/fluid/contrib/decoder/beam_search_decoder.py
python/paddle/fluid/contrib/decoder/beam_search_decoder.py
+3
-3
python/paddle/fluid/contrib/inferencer.py
python/paddle/fluid/contrib/inferencer.py
+2
-2
python/paddle/fluid/contrib/int8_inference/README.md
python/paddle/fluid/contrib/int8_inference/README.md
+2
-2
python/paddle/fluid/contrib/tests/CMakeLists.txt
python/paddle/fluid/contrib/tests/CMakeLists.txt
+5
-1
python/paddle/fluid/contrib/tests/test_calibration.py
python/paddle/fluid/contrib/tests/test_calibration.py
+0
-4
python/paddle/fluid/contrib/trainer.py
python/paddle/fluid/contrib/trainer.py
+2
-2
python/paddle/fluid/executor.py
python/paddle/fluid/executor.py
+2
-2
python/paddle/fluid/framework.py
python/paddle/fluid/framework.py
+7
-7
python/paddle/fluid/imperative/base.py
python/paddle/fluid/imperative/base.py
+2
-2
python/paddle/fluid/initializer.py
python/paddle/fluid/initializer.py
+2
-2
python/paddle/fluid/layers/control_flow.py
python/paddle/fluid/layers/control_flow.py
+2
-2
python/paddle/fluid/layers/io.py
python/paddle/fluid/layers/io.py
+2
-2
python/paddle/fluid/optimizer.py
python/paddle/fluid/optimizer.py
+2
-2
python/paddle/fluid/profiler.py
python/paddle/fluid/profiler.py
+3
-3
python/paddle/fluid/recordio_writer.py
python/paddle/fluid/recordio_writer.py
+2
-2
python/paddle/fluid/transpiler/memory_optimization_transpiler.py
...paddle/fluid/transpiler/memory_optimization_transpiler.py
+4
-0
python/paddle/fluid/unique_name.py
python/paddle/fluid/unique_name.py
+2
-2
python/paddle/fluid/wrapped_decorator.py
python/paddle/fluid/wrapped_decorator.py
+30
-0
python/requirements.txt
python/requirements.txt
+1
-0
未找到文件。
paddle/fluid/API.spec
浏览文件 @
7f4bdb1e
...
@@ -8,13 +8,13 @@ paddle.fluid.Program.parse_from_string ArgSpec(args=['binary_str'], varargs=None
...
@@ -8,13 +8,13 @@ paddle.fluid.Program.parse_from_string ArgSpec(args=['binary_str'], varargs=None
paddle.fluid.Program.to_string ArgSpec(args=['self', 'throw_on_error', 'with_details'], varargs=None, keywords=None, defaults=(False,))
paddle.fluid.Program.to_string ArgSpec(args=['self', 'throw_on_error', 'with_details'], varargs=None, keywords=None, defaults=(False,))
paddle.fluid.default_startup_program ArgSpec(args=[], varargs=None, keywords=None, defaults=None)
paddle.fluid.default_startup_program ArgSpec(args=[], varargs=None, keywords=None, defaults=None)
paddle.fluid.default_main_program ArgSpec(args=[], varargs=None, keywords=None, defaults=None)
paddle.fluid.default_main_program ArgSpec(args=[], varargs=None, keywords=None, defaults=None)
paddle.fluid.program_guard ArgSpec(args=[
], varargs='args', keywords='kwds', defaults=None
)
paddle.fluid.program_guard ArgSpec(args=[
'main_program', 'startup_program'], varargs=None, keywords=None, defaults=(None,)
)
paddle.fluid.name_scope ArgSpec(args=[
], varargs='args', keywords='kwds', defaults=None
)
paddle.fluid.name_scope ArgSpec(args=[
'prefix'], varargs=None, keywords=None, defaults=(None,)
)
paddle.fluid.Executor.__init__ ArgSpec(args=['self', 'place'], varargs=None, keywords=None, defaults=None)
paddle.fluid.Executor.__init__ ArgSpec(args=['self', 'place'], varargs=None, keywords=None, defaults=None)
paddle.fluid.Executor.close ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
paddle.fluid.Executor.close ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
paddle.fluid.Executor.run ArgSpec(args=['self', 'program', 'feed', 'fetch_list', 'feed_var_name', 'fetch_var_name', 'scope', 'return_numpy', 'use_program_cache'], varargs=None, keywords=None, defaults=(None, None, None, 'feed', 'fetch', None, True, False))
paddle.fluid.Executor.run ArgSpec(args=['self', 'program', 'feed', 'fetch_list', 'feed_var_name', 'fetch_var_name', 'scope', 'return_numpy', 'use_program_cache'], varargs=None, keywords=None, defaults=(None, None, None, 'feed', 'fetch', None, True, False))
paddle.fluid.global_scope ArgSpec(args=[], varargs=None, keywords=None, defaults=None)
paddle.fluid.global_scope ArgSpec(args=[], varargs=None, keywords=None, defaults=None)
paddle.fluid.scope_guard ArgSpec(args=[
], varargs='args', keywords='kwds'
, defaults=None)
paddle.fluid.scope_guard ArgSpec(args=[
'scope'], varargs=None, keywords=None
, defaults=None)
paddle.fluid.DistributeTranspiler.__init__ ArgSpec(args=['self', 'config'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.DistributeTranspiler.__init__ ArgSpec(args=['self', 'config'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.DistributeTranspiler.get_pserver_program ArgSpec(args=['self', 'endpoint'], varargs=None, keywords=None, defaults=None)
paddle.fluid.DistributeTranspiler.get_pserver_program ArgSpec(args=['self', 'endpoint'], varargs=None, keywords=None, defaults=None)
paddle.fluid.DistributeTranspiler.get_pserver_programs ArgSpec(args=['self', 'endpoint'], varargs=None, keywords=None, defaults=None)
paddle.fluid.DistributeTranspiler.get_pserver_programs ArgSpec(args=['self', 'endpoint'], varargs=None, keywords=None, defaults=None)
...
@@ -66,7 +66,7 @@ paddle.fluid.initializer.XavierInitializer.__init__ ArgSpec(args=['self', 'unifo
...
@@ -66,7 +66,7 @@ paddle.fluid.initializer.XavierInitializer.__init__ ArgSpec(args=['self', 'unifo
paddle.fluid.initializer.BilinearInitializer.__init__ ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
paddle.fluid.initializer.BilinearInitializer.__init__ ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
paddle.fluid.initializer.MSRAInitializer.__init__ ArgSpec(args=['self', 'uniform', 'fan_in', 'seed'], varargs=None, keywords=None, defaults=(True, None, 0))
paddle.fluid.initializer.MSRAInitializer.__init__ ArgSpec(args=['self', 'uniform', 'fan_in', 'seed'], varargs=None, keywords=None, defaults=(True, None, 0))
paddle.fluid.initializer.force_init_on_cpu ArgSpec(args=[], varargs=None, keywords=None, defaults=None)
paddle.fluid.initializer.force_init_on_cpu ArgSpec(args=[], varargs=None, keywords=None, defaults=None)
paddle.fluid.initializer.init_on_cpu ArgSpec(args=[], varargs=
'args', keywords='kwds'
, defaults=None)
paddle.fluid.initializer.init_on_cpu ArgSpec(args=[], varargs=
None, keywords=None
, defaults=None)
paddle.fluid.initializer.NumpyArrayInitializer.__init__ ArgSpec(args=['self', 'value'], varargs=None, keywords=None, defaults=None)
paddle.fluid.initializer.NumpyArrayInitializer.__init__ ArgSpec(args=['self', 'value'], varargs=None, keywords=None, defaults=None)
paddle.fluid.layers.fc ArgSpec(args=['input', 'size', 'num_flatten_dims', 'param_attr', 'bias_attr', 'act', 'is_test', 'name'], varargs=None, keywords=None, defaults=(1, None, None, None, False, None))
paddle.fluid.layers.fc ArgSpec(args=['input', 'size', 'num_flatten_dims', 'param_attr', 'bias_attr', 'act', 'is_test', 'name'], varargs=None, keywords=None, defaults=(1, None, None, None, False, None))
paddle.fluid.layers.embedding ArgSpec(args=['input', 'size', 'is_sparse', 'is_distributed', 'padding_idx', 'param_attr', 'dtype'], varargs=None, keywords=None, defaults=(False, False, None, None, 'float32'))
paddle.fluid.layers.embedding ArgSpec(args=['input', 'size', 'is_sparse', 'is_distributed', 'padding_idx', 'param_attr', 'dtype'], varargs=None, keywords=None, defaults=(False, False, None, None, 'float32'))
...
@@ -229,7 +229,7 @@ paddle.fluid.layers.random_data_generator ArgSpec(args=['low', 'high', 'shapes',
...
@@ -229,7 +229,7 @@ paddle.fluid.layers.random_data_generator ArgSpec(args=['low', 'high', 'shapes',
paddle.fluid.layers.py_reader ArgSpec(args=['capacity', 'shapes', 'dtypes', 'lod_levels', 'name', 'use_double_buffer'], varargs=None, keywords=None, defaults=(None, None, True))
paddle.fluid.layers.py_reader ArgSpec(args=['capacity', 'shapes', 'dtypes', 'lod_levels', 'name', 'use_double_buffer'], varargs=None, keywords=None, defaults=(None, None, True))
paddle.fluid.layers.create_py_reader_by_data ArgSpec(args=['capacity', 'feed_list', 'name', 'use_double_buffer'], varargs=None, keywords=None, defaults=(None, True))
paddle.fluid.layers.create_py_reader_by_data ArgSpec(args=['capacity', 'feed_list', 'name', 'use_double_buffer'], varargs=None, keywords=None, defaults=(None, True))
paddle.fluid.layers.Preprocessor.__init__ ArgSpec(args=['self', 'reader', 'name'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.layers.Preprocessor.__init__ ArgSpec(args=['self', 'reader', 'name'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.layers.Preprocessor.block ArgSpec(args=[
], varargs='args', keywords='kwds'
, defaults=None)
paddle.fluid.layers.Preprocessor.block ArgSpec(args=[
'self'], varargs=None, keywords=None
, defaults=None)
paddle.fluid.layers.Preprocessor.inputs ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
paddle.fluid.layers.Preprocessor.inputs ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
paddle.fluid.layers.Preprocessor.outputs ArgSpec(args=['self'], varargs='outs', keywords=None, defaults=None)
paddle.fluid.layers.Preprocessor.outputs ArgSpec(args=['self'], varargs='outs', keywords=None, defaults=None)
paddle.fluid.layers.load ArgSpec(args=['out', 'file_path', 'load_as_fp16'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.layers.load ArgSpec(args=['out', 'file_path', 'load_as_fp16'], varargs=None, keywords=None, defaults=(None,))
...
@@ -270,7 +270,7 @@ paddle.fluid.layers.IfElse.input ArgSpec(args=['self', 'x'], varargs=None, keywo
...
@@ -270,7 +270,7 @@ paddle.fluid.layers.IfElse.input ArgSpec(args=['self', 'x'], varargs=None, keywo
paddle.fluid.layers.IfElse.output ArgSpec(args=['self'], varargs='outs', keywords=None, defaults=None)
paddle.fluid.layers.IfElse.output ArgSpec(args=['self'], varargs='outs', keywords=None, defaults=None)
paddle.fluid.layers.IfElse.true_block ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
paddle.fluid.layers.IfElse.true_block ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
paddle.fluid.layers.DynamicRNN.__init__ ArgSpec(args=['self', 'name'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.layers.DynamicRNN.__init__ ArgSpec(args=['self', 'name'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.layers.DynamicRNN.block ArgSpec(args=[
], varargs='args', keywords='kwds'
, defaults=None)
paddle.fluid.layers.DynamicRNN.block ArgSpec(args=[
'self'], varargs=None, keywords=None
, defaults=None)
paddle.fluid.layers.DynamicRNN.memory ArgSpec(args=['self', 'init', 'shape', 'value', 'need_reorder', 'dtype'], varargs=None, keywords=None, defaults=(None, None, 0.0, False, 'float32'))
paddle.fluid.layers.DynamicRNN.memory ArgSpec(args=['self', 'init', 'shape', 'value', 'need_reorder', 'dtype'], varargs=None, keywords=None, defaults=(None, None, 0.0, False, 'float32'))
paddle.fluid.layers.DynamicRNN.output ArgSpec(args=['self'], varargs='outputs', keywords=None, defaults=None)
paddle.fluid.layers.DynamicRNN.output ArgSpec(args=['self'], varargs='outputs', keywords=None, defaults=None)
paddle.fluid.layers.DynamicRNN.static_input ArgSpec(args=['self', 'x'], varargs=None, keywords=None, defaults=None)
paddle.fluid.layers.DynamicRNN.static_input ArgSpec(args=['self', 'x'], varargs=None, keywords=None, defaults=None)
...
@@ -346,12 +346,12 @@ paddle.fluid.contrib.StateCell.set_state ArgSpec(args=['self', 'state_name', 'st
...
@@ -346,12 +346,12 @@ paddle.fluid.contrib.StateCell.set_state ArgSpec(args=['self', 'state_name', 'st
paddle.fluid.contrib.StateCell.state_updater ArgSpec(args=['self', 'updater'], varargs=None, keywords=None, defaults=None)
paddle.fluid.contrib.StateCell.state_updater ArgSpec(args=['self', 'updater'], varargs=None, keywords=None, defaults=None)
paddle.fluid.contrib.StateCell.update_states ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
paddle.fluid.contrib.StateCell.update_states ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
paddle.fluid.contrib.TrainingDecoder.__init__ ArgSpec(args=['self', 'state_cell', 'name'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.contrib.TrainingDecoder.__init__ ArgSpec(args=['self', 'state_cell', 'name'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.contrib.TrainingDecoder.block ArgSpec(args=[
], varargs='args', keywords='kwds'
, defaults=None)
paddle.fluid.contrib.TrainingDecoder.block ArgSpec(args=[
'self'], varargs=None, keywords=None
, defaults=None)
paddle.fluid.contrib.TrainingDecoder.output ArgSpec(args=['self'], varargs='outputs', keywords=None, defaults=None)
paddle.fluid.contrib.TrainingDecoder.output ArgSpec(args=['self'], varargs='outputs', keywords=None, defaults=None)
paddle.fluid.contrib.TrainingDecoder.static_input ArgSpec(args=['self', 'x'], varargs=None, keywords=None, defaults=None)
paddle.fluid.contrib.TrainingDecoder.static_input ArgSpec(args=['self', 'x'], varargs=None, keywords=None, defaults=None)
paddle.fluid.contrib.TrainingDecoder.step_input ArgSpec(args=['self', 'x'], varargs=None, keywords=None, defaults=None)
paddle.fluid.contrib.TrainingDecoder.step_input ArgSpec(args=['self', 'x'], varargs=None, keywords=None, defaults=None)
paddle.fluid.contrib.BeamSearchDecoder.__init__ ArgSpec(args=['self', 'state_cell', 'init_ids', 'init_scores', 'target_dict_dim', 'word_dim', 'input_var_dict', 'topk_size', 'sparse_emb', 'max_len', 'beam_size', 'end_id', 'name'], varargs=None, keywords=None, defaults=({}, 50, True, 100, 1, 1, None))
paddle.fluid.contrib.BeamSearchDecoder.__init__ ArgSpec(args=['self', 'state_cell', 'init_ids', 'init_scores', 'target_dict_dim', 'word_dim', 'input_var_dict', 'topk_size', 'sparse_emb', 'max_len', 'beam_size', 'end_id', 'name'], varargs=None, keywords=None, defaults=({}, 50, True, 100, 1, 1, None))
paddle.fluid.contrib.BeamSearchDecoder.block ArgSpec(args=[
], varargs='args', keywords='kwds'
, defaults=None)
paddle.fluid.contrib.BeamSearchDecoder.block ArgSpec(args=[
'self'], varargs=None, keywords=None
, defaults=None)
paddle.fluid.contrib.BeamSearchDecoder.decode ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
paddle.fluid.contrib.BeamSearchDecoder.decode ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
paddle.fluid.contrib.BeamSearchDecoder.early_stop ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
paddle.fluid.contrib.BeamSearchDecoder.early_stop ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
paddle.fluid.contrib.BeamSearchDecoder.read_array ArgSpec(args=['self', 'init', 'is_ids', 'is_scores'], varargs=None, keywords=None, defaults=(False, False))
paddle.fluid.contrib.BeamSearchDecoder.read_array ArgSpec(args=['self', 'init', 'is_ids', 'is_scores'], varargs=None, keywords=None, defaults=(False, False))
...
@@ -456,7 +456,7 @@ paddle.fluid.optimizer.AdadeltaOptimizer.apply_gradients ArgSpec(args=['self', '
...
@@ -456,7 +456,7 @@ paddle.fluid.optimizer.AdadeltaOptimizer.apply_gradients ArgSpec(args=['self', '
paddle.fluid.optimizer.AdadeltaOptimizer.backward ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set', 'callbacks'], varargs=None, keywords=None, defaults=(None, None, None, None))
paddle.fluid.optimizer.AdadeltaOptimizer.backward ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set', 'callbacks'], varargs=None, keywords=None, defaults=(None, None, None, None))
paddle.fluid.optimizer.AdadeltaOptimizer.minimize ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set'], varargs=None, keywords=None, defaults=(None, None, None))
paddle.fluid.optimizer.AdadeltaOptimizer.minimize ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set'], varargs=None, keywords=None, defaults=(None, None, None))
paddle.fluid.optimizer.ModelAverage.__init__ ArgSpec(args=['self', 'average_window_rate', 'min_average_window', 'max_average_window', 'regularization', 'name'], varargs=None, keywords=None, defaults=(10000, 10000, None, None))
paddle.fluid.optimizer.ModelAverage.__init__ ArgSpec(args=['self', 'average_window_rate', 'min_average_window', 'max_average_window', 'regularization', 'name'], varargs=None, keywords=None, defaults=(10000, 10000, None, None))
paddle.fluid.optimizer.ModelAverage.apply ArgSpec(args=[
], varargs='args', keywords='kwds', defaults=None
)
paddle.fluid.optimizer.ModelAverage.apply ArgSpec(args=[
'self', 'executor', 'need_restore'], varargs=None, keywords=None, defaults=(True,)
)
paddle.fluid.optimizer.ModelAverage.apply_gradients ArgSpec(args=['self', 'params_grads'], varargs=None, keywords=None, defaults=None)
paddle.fluid.optimizer.ModelAverage.apply_gradients ArgSpec(args=['self', 'params_grads'], varargs=None, keywords=None, defaults=None)
paddle.fluid.optimizer.ModelAverage.backward ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set', 'callbacks'], varargs=None, keywords=None, defaults=(None, None, None, None))
paddle.fluid.optimizer.ModelAverage.backward ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set', 'callbacks'], varargs=None, keywords=None, defaults=(None, None, None, None))
paddle.fluid.optimizer.ModelAverage.minimize ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set'], varargs=None, keywords=None, defaults=(None, None, None))
paddle.fluid.optimizer.ModelAverage.minimize ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set'], varargs=None, keywords=None, defaults=(None, None, None))
...
@@ -491,14 +491,14 @@ paddle.fluid.clip.ErrorClipByValue.__init__ ArgSpec(args=['self', 'max', 'min'],
...
@@ -491,14 +491,14 @@ paddle.fluid.clip.ErrorClipByValue.__init__ ArgSpec(args=['self', 'max', 'min'],
paddle.fluid.clip.GradientClipByValue.__init__ ArgSpec(args=['self', 'max', 'min'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.clip.GradientClipByValue.__init__ ArgSpec(args=['self', 'max', 'min'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.clip.GradientClipByNorm.__init__ ArgSpec(args=['self', 'clip_norm'], varargs=None, keywords=None, defaults=None)
paddle.fluid.clip.GradientClipByNorm.__init__ ArgSpec(args=['self', 'clip_norm'], varargs=None, keywords=None, defaults=None)
paddle.fluid.clip.GradientClipByGlobalNorm.__init__ ArgSpec(args=['self', 'clip_norm', 'group_name'], varargs=None, keywords=None, defaults=('default_group',))
paddle.fluid.clip.GradientClipByGlobalNorm.__init__ ArgSpec(args=['self', 'clip_norm', 'group_name'], varargs=None, keywords=None, defaults=('default_group',))
paddle.fluid.profiler.cuda_profiler ArgSpec(args=[
], varargs='args', keywords='kwds', defaults=None
)
paddle.fluid.profiler.cuda_profiler ArgSpec(args=[
'output_file', 'output_mode', 'config'], varargs=None, keywords=None, defaults=(None, None)
)
paddle.fluid.profiler.reset_profiler ArgSpec(args=[], varargs=None, keywords=None, defaults=None)
paddle.fluid.profiler.reset_profiler ArgSpec(args=[], varargs=None, keywords=None, defaults=None)
paddle.fluid.profiler.profiler ArgSpec(args=[
], varargs='args', keywords='kwds', defaults=None
)
paddle.fluid.profiler.profiler ArgSpec(args=[
'state', 'sorted_key', 'profile_path'], varargs=None, keywords=None, defaults=(None, '/tmp/profile')
)
paddle.fluid.profiler.start_profiler ArgSpec(args=['state'], varargs=None, keywords=None, defaults=None)
paddle.fluid.profiler.start_profiler ArgSpec(args=['state'], varargs=None, keywords=None, defaults=None)
paddle.fluid.profiler.stop_profiler ArgSpec(args=['sorted_key', 'profile_path'], varargs=None, keywords=None, defaults=(None, '/tmp/profile'))
paddle.fluid.profiler.stop_profiler ArgSpec(args=['sorted_key', 'profile_path'], varargs=None, keywords=None, defaults=(None, '/tmp/profile'))
paddle.fluid.unique_name.generate ArgSpec(args=['key'], varargs=None, keywords=None, defaults=None)
paddle.fluid.unique_name.generate ArgSpec(args=['key'], varargs=None, keywords=None, defaults=None)
paddle.fluid.unique_name.switch ArgSpec(args=['new_generator'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.unique_name.switch ArgSpec(args=['new_generator'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.unique_name.guard ArgSpec(args=[
], varargs='args', keywords='kwds', defaults=None
)
paddle.fluid.unique_name.guard ArgSpec(args=[
'new_generator'], varargs=None, keywords=None, defaults=(None,)
)
paddle.fluid.recordio_writer.convert_reader_to_recordio_file ArgSpec(args=['filename', 'reader_creator', 'feeder', 'compressor', 'max_num_records', 'feed_order'], varargs=None, keywords=None, defaults=(Compressor.Snappy, 1000, None))
paddle.fluid.recordio_writer.convert_reader_to_recordio_file ArgSpec(args=['filename', 'reader_creator', 'feeder', 'compressor', 'max_num_records', 'feed_order'], varargs=None, keywords=None, defaults=(Compressor.Snappy, 1000, None))
paddle.fluid.recordio_writer.convert_reader_to_recordio_files ArgSpec(args=['filename', 'batch_per_file', 'reader_creator', 'feeder', 'compressor', 'max_num_records', 'feed_order'], varargs=None, keywords=None, defaults=(Compressor.Snappy, 1000, None))
paddle.fluid.recordio_writer.convert_reader_to_recordio_files ArgSpec(args=['filename', 'batch_per_file', 'reader_creator', 'feeder', 'compressor', 'max_num_records', 'feed_order'], varargs=None, keywords=None, defaults=(Compressor.Snappy, 1000, None))
paddle.fluid.Scope Scope() -> paddle.fluid.core._Scope
paddle.fluid.Scope Scope() -> paddle.fluid.core._Scope
...
...
paddle/fluid/inference/analysis/ir_passes/subgraph_detector.cc
浏览文件 @
7f4bdb1e
...
@@ -460,77 +460,6 @@ inline bool CheckNodeIndegreeEquals(const Node &node, size_t n) {
...
@@ -460,77 +460,6 @@ inline bool CheckNodeIndegreeEquals(const Node &node, size_t n) {
return
node
.
inputs
.
size
()
==
n
;
return
node
.
inputs
.
size
()
==
n
;
}
}
NodesTSIterator
::
NodesTSIterator
(
const
std
::
vector
<
Node
*>
&
source
)
{
PADDLE_ENFORCE
(
!
source
.
empty
(),
"Start points of topological sorting should not be empty!"
);
// CHECK all the inputs' in-degree is 0
for
(
auto
*
node
:
source
)
{
PADDLE_ENFORCE
(
CheckNodeIndegreeEquals
(
*
node
,
0
));
}
std
::
unordered_set
<
Node
*>
visited
;
std
::
unordered_set
<
Node
*>
to_visit
{
source
.
begin
(),
source
.
end
()};
std
::
vector
<
Node
*>
inlink_visited
;
while
(
!
to_visit
.
empty
())
{
std
::
vector
<
Node
*>
queue
(
to_visit
.
begin
(),
to_visit
.
end
());
for
(
auto
*
p
:
queue
)
{
if
(
Agent
(
p
).
deleted
())
{
visited
.
insert
(
p
);
to_visit
.
erase
(
p
);
}
inlink_visited
.
clear
();
std
::
copy_if
(
p
->
inputs
.
begin
(),
p
->
inputs
.
end
(),
std
::
back_inserter
(
inlink_visited
),
[
&
](
Node
*
x
)
->
bool
{
return
visited
.
count
(
x
)
!=
0
;
});
if
(
inlink_visited
.
size
()
==
p
->
inputs
.
size
())
{
sorted_
.
push_back
(
p
);
for
(
auto
*
_
:
p
->
outputs
)
{
if
(
!
visited
.
count
(
_
))
{
to_visit
.
insert
(
_
);
}
}
to_visit
.
erase
(
p
);
visited
.
insert
(
p
);
}
}
}
}
NodesTSIterator
::
NodesTSIterator
(
const
NodesTSIterator
&
other
)
:
sorted_
(
other
.
sorted_
),
cursor_
(
other
.
cursor_
)
{}
Node
&
NodesTSIterator
::
operator
*
()
{
PADDLE_ENFORCE_LT
(
cursor_
,
sorted_
.
size
());
return
*
sorted_
[
cursor_
];
}
NodesTSIterator
&
NodesTSIterator
::
operator
++
()
{
if
(
++
cursor_
>=
sorted_
.
size
())
{
sorted_
.
clear
();
cursor_
=
0
;
}
return
*
this
;
}
NodesTSIterator
&
NodesTSIterator
::
operator
=
(
const
NodesTSIterator
&
other
)
{
cursor_
=
other
.
cursor_
;
sorted_
=
other
.
sorted_
;
return
*
this
;
}
bool
NodesTSIterator
::
operator
==
(
const
NodesTSIterator
&
other
)
{
return
sorted_
==
other
.
sorted_
&&
cursor_
==
other
.
cursor_
;
}
Node
*
NodesTSIterator
::
operator
->
()
{
PADDLE_ENFORCE_LT
(
cursor_
,
sorted_
.
size
());
return
sorted_
[
cursor_
];
}
}
// namespace analysis
}
// namespace analysis
}
// namespace inference
}
// namespace inference
}
// namespace paddle
}
// namespace paddle
paddle/fluid/inference/analysis/ir_passes/subgraph_detector.h
浏览文件 @
7f4bdb1e
...
@@ -30,6 +30,7 @@ namespace inference {
...
@@ -30,6 +30,7 @@ namespace inference {
namespace
analysis
{
namespace
analysis
{
using
framework
::
ir
::
Graph
;
using
framework
::
ir
::
Graph
;
using
framework
::
ir
::
NodesTSIterator
;
const
char
kIsFunctionNode
[]
=
"__is_function_node__"
;
const
char
kIsFunctionNode
[]
=
"__is_function_node__"
;
const
char
kFunctionNodeSubGraph
[]
=
"__function_node_sub_graph__"
;
const
char
kFunctionNodeSubGraph
[]
=
"__function_node_sub_graph__"
;
...
@@ -132,32 +133,6 @@ struct Agent {
...
@@ -132,32 +133,6 @@ struct Agent {
framework
::
ir
::
Node
*
x_
;
framework
::
ir
::
Node
*
x_
;
};
};
// Topological sorting iterator on nodes.
struct
NodesTSIterator
:
public
std
::
iterator
<
std
::
forward_iterator_tag
,
framework
::
ir
::
Node
*>
{
NodesTSIterator
()
=
default
;
explicit
NodesTSIterator
(
const
std
::
vector
<
framework
::
ir
::
Node
*>
&
source
);
NodesTSIterator
(
NodesTSIterator
&&
other
)
:
sorted_
(
std
::
move
(
other
.
sorted_
)),
cursor_
(
other
.
cursor_
)
{
other
.
cursor_
=
0
;
}
NodesTSIterator
(
const
NodesTSIterator
&
other
);
framework
::
ir
::
Node
&
operator
*
();
NodesTSIterator
&
operator
++
();
// TODO(Superjomn) current implementation just compare the first
// element, need to compare the graph and all the elements in the queue and
// set.
NodesTSIterator
&
operator
=
(
const
NodesTSIterator
&
other
);
bool
operator
==
(
const
NodesTSIterator
&
other
);
bool
operator
!=
(
const
NodesTSIterator
&
other
)
{
return
!
(
*
this
==
other
);
}
framework
::
ir
::
Node
*
operator
->
();
private:
std
::
vector
<
framework
::
ir
::
Node
*>
sorted_
;
size_t
cursor_
{
0
};
};
// The nodes those have no input will be treated as start points.
// The nodes those have no input will be treated as start points.
static
std
::
vector
<
framework
::
ir
::
Node
*>
ExtractStartPoints
(
const
Graph
&
g
)
{
static
std
::
vector
<
framework
::
ir
::
Node
*>
ExtractStartPoints
(
const
Graph
&
g
)
{
std
::
vector
<
framework
::
ir
::
Node
*>
result
;
std
::
vector
<
framework
::
ir
::
Node
*>
result
;
...
...
paddle/fluid/operators/fake_quantize_op.cc
浏览文件 @
7f4bdb1e
...
@@ -21,26 +21,17 @@ limitations under the License. */
...
@@ -21,26 +21,17 @@ limitations under the License. */
namespace
paddle
{
namespace
paddle
{
namespace
operators
{
namespace
operators
{
template
<
typename
T
,
int
MajorType
=
Eigen
::
RowMajor
,
template
<
typename
T
>
typename
IndexType
=
Eigen
::
DenseIndex
>
struct
Compare
{
using
EigenVectorArrayMap
=
public:
Eigen
::
TensorMap
<
Eigen
::
Tensor
<
T
,
1
,
MajorType
,
IndexType
>>
;
bool
operator
()(
const
T
a
,
const
T
b
)
{
return
(
std
::
abs
(
a
)
<
std
::
abs
(
b
));
}
};
template
<
typename
T
,
int
MajorType
=
Eigen
::
RowMajor
,
typename
IndexType
=
Eigen
::
DenseIndex
>
using
ConstEigenVectorArrayMap
=
Eigen
::
TensorMap
<
const
Eigen
::
Tensor
<
T
,
1
,
MajorType
,
IndexType
>>
;
template
<
typename
T
>
template
<
typename
T
>
struct
FindAbsMaxFunctor
<
platform
::
CPUDeviceContext
,
T
>
{
struct
FindAbsMaxFunctor
<
platform
::
CPUDeviceContext
,
T
>
{
void
operator
()(
const
platform
::
CPUDeviceContext
&
ctx
,
const
T
*
in
,
void
operator
()(
const
platform
::
CPUDeviceContext
&
ctx
,
const
T
*
in
,
const
int
num
,
T
*
out
)
{
const
int
num
,
T
*
out
)
{
Eigen
::
DSizes
<
Eigen
::
DenseIndex
,
1
>
idim
(
num
);
*
out
=
*
(
std
::
max_element
(
in
+
0
,
in
+
num
,
Compare
<
T
>
()));
Eigen
::
DSizes
<
Eigen
::
DenseIndex
,
1
>
odim
(
1
);
Eigen
::
TensorMap
<
Eigen
::
Tensor
<
const
T
,
1
,
Eigen
::
RowMajor
>>
in_e
(
in
,
idim
);
Eigen
::
TensorMap
<
Eigen
::
Tensor
<
T
,
1
,
Eigen
::
RowMajor
>>
out_e
(
out
,
odim
);
out_e
=
in_e
.
abs
().
maximum
();
}
}
};
};
...
...
paddle/fluid/operators/jit/gen/act.h
浏览文件 @
7f4bdb1e
...
@@ -63,7 +63,6 @@ class VActFunc : public JitCode {
...
@@ -63,7 +63,6 @@ class VActFunc : public JitCode {
public:
public:
explicit
VActFunc
(
size_t
code_size
,
void
*
code_ptr
)
explicit
VActFunc
(
size_t
code_size
,
void
*
code_ptr
)
:
JitCode
(
code_size
,
code_ptr
)
{}
:
JitCode
(
code_size
,
code_ptr
)
{}
virtual
const
char
*
name
()
const
=
0
;
virtual
void
genCode
()
=
0
;
virtual
void
genCode
()
=
0
;
protected:
protected:
...
@@ -269,7 +268,7 @@ class VActJitCode : public VActFunc {
...
@@ -269,7 +268,7 @@ class VActJitCode : public VActFunc {
this
->
genCode
();
this
->
genCode
();
}
}
const
char
*
name
()
const
override
{
std
::
string
name
()
const
override
{
std
::
string
base
=
"VActJitCode"
;
std
::
string
base
=
"VActJitCode"
;
switch
(
type_
)
{
switch
(
type_
)
{
case
operand_type
::
RELU
:
case
operand_type
::
RELU
:
...
@@ -293,7 +292,7 @@ class VActJitCode : public VActFunc {
...
@@ -293,7 +292,7 @@ class VActJitCode : public VActFunc {
default:
default:
break
;
break
;
}
}
return
base
.
c_str
()
;
return
base
;
}
}
void
genCode
()
override
;
void
genCode
()
override
;
...
...
paddle/fluid/operators/jit/gen/blas.h
浏览文件 @
7f4bdb1e
...
@@ -41,7 +41,7 @@ class VXXJitCode : public JitCode {
...
@@ -41,7 +41,7 @@ class VXXJitCode : public JitCode {
this
->
genCode
();
this
->
genCode
();
}
}
virtual
const
char
*
name
()
const
{
std
::
string
name
()
const
override
{
std
::
string
base
=
"VXXJitCode"
;
std
::
string
base
=
"VXXJitCode"
;
if
(
scalar_index_
==
1
)
{
if
(
scalar_index_
==
1
)
{
base
+=
"_Scalar"
;
base
+=
"_Scalar"
;
...
@@ -62,7 +62,7 @@ class VXXJitCode : public JitCode {
...
@@ -62,7 +62,7 @@ class VXXJitCode : public JitCode {
}
}
base
+=
(
with_relu_
?
"_Relu"
:
""
);
base
+=
(
with_relu_
?
"_Relu"
:
""
);
base
+=
"_D"
+
std
::
to_string
(
num_
);
base
+=
"_D"
+
std
::
to_string
(
num_
);
return
base
.
c_str
()
;
return
base
;
}
}
void
genCode
()
override
;
void
genCode
()
override
;
...
...
paddle/fluid/operators/jit/gen/gru.h
浏览文件 @
7f4bdb1e
...
@@ -49,7 +49,7 @@ class GRUJitCode : public VActFunc {
...
@@ -49,7 +49,7 @@ class GRUJitCode : public VActFunc {
this
->
genCode
();
this
->
genCode
();
}
}
const
char
*
name
()
const
override
{
std
::
string
name
()
const
override
{
std
::
string
base
=
"GRUJitCode"
;
std
::
string
base
=
"GRUJitCode"
;
if
(
id_
==
0
)
{
if
(
id_
==
0
)
{
base
+=
"_H1"
;
base
+=
"_H1"
;
...
@@ -81,7 +81,7 @@ class GRUJitCode : public VActFunc {
...
@@ -81,7 +81,7 @@ class GRUJitCode : public VActFunc {
};
};
AddTypeStr
(
act_gate_
);
AddTypeStr
(
act_gate_
);
AddTypeStr
(
act_cand_
);
AddTypeStr
(
act_cand_
);
return
base
.
c_str
()
;
return
base
;
}
}
void
genCode
()
override
;
void
genCode
()
override
;
...
...
paddle/fluid/operators/jit/gen/hopv.h
浏览文件 @
7f4bdb1e
...
@@ -35,14 +35,14 @@ class HOPVJitCode : public JitCode {
...
@@ -35,14 +35,14 @@ class HOPVJitCode : public JitCode {
this
->
genCode
();
this
->
genCode
();
}
}
virtual
const
char
*
name
()
const
{
std
::
string
name
()
const
override
{
std
::
string
base
=
"VXXJitCode"
;
std
::
string
base
=
"VXXJitCode"
;
if
(
type_
==
operand_type
::
MAX
)
{
if
(
type_
==
operand_type
::
MAX
)
{
base
+=
"_MAX"
;
base
+=
"_MAX"
;
}
else
{
}
else
{
base
+=
"_SUM"
;
base
+=
"_SUM"
;
}
}
return
base
.
c_str
()
;
return
base
;
}
}
void
genCode
()
override
;
void
genCode
()
override
;
...
...
paddle/fluid/operators/jit/gen/jitcode.h
浏览文件 @
7f4bdb1e
...
@@ -14,6 +14,7 @@
...
@@ -14,6 +14,7 @@
#pragma once
#pragma once
#include <string>
#include <type_traits>
#include <type_traits>
#include "paddle/fluid/operators/jit/gen_base.h"
#include "paddle/fluid/operators/jit/gen_base.h"
#include "paddle/fluid/platform/cpu_info.h"
#include "paddle/fluid/platform/cpu_info.h"
...
@@ -59,7 +60,7 @@ typedef enum {
...
@@ -59,7 +60,7 @@ typedef enum {
}
operand_type
;
}
operand_type
;
#define DECLARE_JIT_CODE(codename) \
#define DECLARE_JIT_CODE(codename) \
const char*
name() const override { return #codename; }
std::string
name() const override { return #codename; }
class
JitCode
:
public
GenBase
,
public
Xbyak
::
CodeGenerator
{
class
JitCode
:
public
GenBase
,
public
Xbyak
::
CodeGenerator
{
public:
public:
...
@@ -68,7 +69,6 @@ class JitCode : public GenBase, public Xbyak::CodeGenerator {
...
@@ -68,7 +69,6 @@ class JitCode : public GenBase, public Xbyak::CodeGenerator {
(
code_size
%
4096
!=
0
?
(
code_size
/
4096
+
1
)
*
4096
:
code_size
),
(
code_size
%
4096
!=
0
?
(
code_size
/
4096
+
1
)
*
4096
:
code_size
),
code_ptr
)
{}
code_ptr
)
{}
virtual
const
char
*
name
()
const
=
0
;
virtual
void
genCode
()
=
0
;
virtual
void
genCode
()
=
0
;
size_t
getSize
()
const
override
{
return
CodeGenerator
::
getSize
();
}
size_t
getSize
()
const
override
{
return
CodeGenerator
::
getSize
();
}
...
...
paddle/fluid/operators/jit/gen/lstm.h
浏览文件 @
7f4bdb1e
...
@@ -53,7 +53,7 @@ class LSTMJitCode : public VActFunc {
...
@@ -53,7 +53,7 @@ class LSTMJitCode : public VActFunc {
this
->
genCode
();
this
->
genCode
();
}
}
const
char
*
name
()
const
override
{
std
::
string
name
()
const
override
{
std
::
string
base
=
"LSTMJitCode"
;
std
::
string
base
=
"LSTMJitCode"
;
if
(
use_peephole_
)
{
if
(
use_peephole_
)
{
base
+=
"_Peephole"
;
base
+=
"_Peephole"
;
...
@@ -85,7 +85,7 @@ class LSTMJitCode : public VActFunc {
...
@@ -85,7 +85,7 @@ class LSTMJitCode : public VActFunc {
AddTypeStr
(
act_gate_
);
AddTypeStr
(
act_gate_
);
AddTypeStr
(
act_cand_
);
AddTypeStr
(
act_cand_
);
AddTypeStr
(
act_cell_
);
AddTypeStr
(
act_cell_
);
return
base
.
c_str
()
;
return
base
;
}
}
void
genCode
()
override
;
void
genCode
()
override
;
...
...
paddle/fluid/operators/jit/gen/matmul.h
浏览文件 @
7f4bdb1e
...
@@ -36,11 +36,11 @@ class MatMulJitCode : public JitCode {
...
@@ -36,11 +36,11 @@ class MatMulJitCode : public JitCode {
this
->
genCode
();
this
->
genCode
();
}
}
virtual
const
char
*
name
()
const
{
std
::
string
name
()
const
override
{
std
::
string
base
=
"MatMulJitCode"
;
std
::
string
base
=
"MatMulJitCode"
;
base
=
base
+
"_M"
+
std
::
to_string
(
m_
)
+
"_N"
+
std
::
to_string
(
n_
)
+
"_K"
+
base
=
base
+
"_M"
+
std
::
to_string
(
m_
)
+
"_N"
+
std
::
to_string
(
n_
)
+
"_K"
+
std
::
to_string
(
k_
);
std
::
to_string
(
k_
);
return
base
.
c_str
()
;
return
base
;
}
}
void
genCode
()
override
;
void
genCode
()
override
;
...
...
paddle/fluid/operators/jit/gen/seqpool.h
浏览文件 @
7f4bdb1e
...
@@ -38,7 +38,7 @@ class SeqPoolJitCode : public JitCode {
...
@@ -38,7 +38,7 @@ class SeqPoolJitCode : public JitCode {
this
->
genCode
();
this
->
genCode
();
}
}
virtual
const
char
*
name
()
const
{
std
::
string
name
()
const
override
{
std
::
string
base
=
"SeqPoolJitCode"
;
std
::
string
base
=
"SeqPoolJitCode"
;
if
(
type_
==
SeqPoolType
::
kSum
)
{
if
(
type_
==
SeqPoolType
::
kSum
)
{
base
+=
"_Sum"
;
base
+=
"_Sum"
;
...
@@ -48,7 +48,7 @@ class SeqPoolJitCode : public JitCode {
...
@@ -48,7 +48,7 @@ class SeqPoolJitCode : public JitCode {
base
+=
"_Sqrt"
;
base
+=
"_Sqrt"
;
}
}
base
+=
(
"_W"
+
std
::
to_string
(
w_
));
base
+=
(
"_W"
+
std
::
to_string
(
w_
));
return
base
.
c_str
()
;
return
base
;
}
}
void
genCode
()
override
;
void
genCode
()
override
;
...
...
paddle/fluid/operators/jit/gen_base.h
浏览文件 @
7f4bdb1e
...
@@ -16,6 +16,7 @@
...
@@ -16,6 +16,7 @@
#include <gflags/gflags.h>
#include <gflags/gflags.h>
#include <memory> // for unique_ptr
#include <memory> // for unique_ptr
#include <string>
#include <vector>
#include <vector>
#include "paddle/fluid/operators/jit/kernel_base.h"
#include "paddle/fluid/operators/jit/kernel_base.h"
...
@@ -28,7 +29,7 @@ namespace jit {
...
@@ -28,7 +29,7 @@ namespace jit {
class
GenBase
:
public
Kernel
{
class
GenBase
:
public
Kernel
{
public:
public:
virtual
~
GenBase
()
=
default
;
virtual
~
GenBase
()
=
default
;
virtual
const
char
*
name
()
const
=
0
;
virtual
std
::
string
name
()
const
=
0
;
virtual
size_t
getSize
()
const
=
0
;
virtual
size_t
getSize
()
const
=
0
;
virtual
const
unsigned
char
*
getCodeInternal
()
=
0
;
virtual
const
unsigned
char
*
getCodeInternal
()
=
0
;
template
<
typename
Func
>
template
<
typename
Func
>
...
...
paddle/fluid/operators/row_conv_op.cc
浏览文件 @
7f4bdb1e
...
@@ -109,23 +109,23 @@ from future subsequences in a computationally efficient manner to improve
...
@@ -109,23 +109,23 @@ from future subsequences in a computationally efficient manner to improve
unidirectional recurrent neural networks. The row convolution operator is
unidirectional recurrent neural networks. The row convolution operator is
different from the 1D sequence convolution, and is computed as follows:
different from the 1D sequence convolution, and is computed as follows:
Given an input sequence $
in$ of length $t$ and input dimension $d
$,
Given an input sequence $
X$ of length $t$ and input dimension $D
$,
and a filter ($W$) of size $context \times
d$,
and a filter ($W$) of size $context \times
D$,
the output sequence is convolved as:
the output sequence is convolved as:
$$
$$
out_{i
, :} = \\sum_{j=i}^{i + context} in_{j,:} \\cdot W_{i-j, :
}
out_{i
} = \\sum_{j=i}^{i + context - 1} X_{j} \\cdot W_{j-i
}
$$
$$
In the above equation:
In the above equation:
* $Out_{i}$: The i-th row of output variable with shape [1, D].
* $Out_{i}$: The i-th row of output variable with shape [1, D].
* $
\\tau
$: Future context size.
* $
context
$: Future context size.
* $X_{j}$: The j-th row of input variable with shape [1, D].
* $X_{j}$: The j-th row of input variable with shape [1, D].
* $W_{
i-j}$: The (i-j
)-th row of parameters with shape [1, D].
* $W_{
j-i}$: The (j-i
)-th row of parameters with shape [1, D].
More details about row_conv please refer to
More details about row_conv please refer to
the design document
the design document
...
...
python/paddle/fluid/contrib/decoder/beam_search_decoder.py
浏览文件 @
7f4bdb1e
...
@@ -22,7 +22,7 @@ This API is still under active development and may change drastically.
...
@@ -22,7 +22,7 @@ This API is still under active development and may change drastically.
from
__future__
import
print_function
from
__future__
import
print_function
import
contextlib
from
...wrapped_decorator
import
signature_safe_contextmanager
import
numpy
as
np
import
numpy
as
np
import
six
import
six
...
@@ -419,7 +419,7 @@ class TrainingDecoder(object):
...
@@ -419,7 +419,7 @@ class TrainingDecoder(object):
self
.
_state_cell
=
state_cell
self
.
_state_cell
=
state_cell
self
.
_state_cell
.
_enter_decoder
(
self
)
self
.
_state_cell
.
_enter_decoder
(
self
)
@
contextlib
.
contextmanager
@
signature_safe_
contextmanager
def
block
(
self
):
def
block
(
self
):
"""
"""
Define the behavior of the decoder for each RNN time step.
Define the behavior of the decoder for each RNN time step.
...
@@ -613,7 +613,7 @@ class BeamSearchDecoder(object):
...
@@ -613,7 +613,7 @@ class BeamSearchDecoder(object):
self
.
_word_dim
=
word_dim
self
.
_word_dim
=
word_dim
self
.
_input_var_dict
=
input_var_dict
self
.
_input_var_dict
=
input_var_dict
@
contextlib
.
contextmanager
@
signature_safe_
contextmanager
def
block
(
self
):
def
block
(
self
):
"""
"""
Define the behavior of the decoder for each RNN time step.
Define the behavior of the decoder for each RNN time step.
...
...
python/paddle/fluid/contrib/inferencer.py
浏览文件 @
7f4bdb1e
...
@@ -14,7 +14,7 @@
...
@@ -14,7 +14,7 @@
from
__future__
import
print_function
from
__future__
import
print_function
import
contextlib
from
..wrapped_decorator
import
signature_safe_contextmanager
from
..
import
core
from
..
import
core
...
@@ -105,7 +105,7 @@ class Inferencer(object):
...
@@ -105,7 +105,7 @@ class Inferencer(object):
return
results
return
results
@
contextlib
.
contextmanager
@
signature_safe_
contextmanager
def
_prog_and_scope_guard
(
self
):
def
_prog_and_scope_guard
(
self
):
with
framework
.
program_guard
(
main_program
=
self
.
inference_program
):
with
framework
.
program_guard
(
main_program
=
self
.
inference_program
):
with
executor
.
scope_guard
(
self
.
scope
):
with
executor
.
scope_guard
(
self
.
scope
):
...
...
python/paddle/fluid/contrib/int8_inference/README.md
浏览文件 @
7f4bdb1e
...
@@ -63,10 +63,10 @@ Notes:
...
@@ -63,10 +63,10 @@ Notes:
## 4. How to reproduce the results
## 4. How to reproduce the results
*
Small dataset
*
Small dataset
```
bash
```
bash
python python/paddle/fluid/contrib/tests/test_calibration.py
FLAGS_use_mkldnn
=
true
python python/paddle/fluid/contrib/tests/test_calibration.py
```
```
*
Full dataset
*
Full dataset
```
bash
```
bash
DATASET
=
full python python/paddle/fluid/contrib/tests/test_calibration.py
FLAGS_use_mkldnn
=
true
DATASET
=
full python python/paddle/fluid/contrib/tests/test_calibration.py
```
```
python/paddle/fluid/contrib/tests/CMakeLists.txt
浏览文件 @
7f4bdb1e
...
@@ -6,5 +6,9 @@ if(APPLE OR WIN32 OR NOT WITH_MKL)
...
@@ -6,5 +6,9 @@ if(APPLE OR WIN32 OR NOT WITH_MKL)
endif
()
endif
()
foreach
(
src
${
TEST_OPS
}
)
foreach
(
src
${
TEST_OPS
}
)
if
(
src MATCHES
"test_calibration"
)
py_test
(
${
src
}
SRCS
${
src
}
.py ENVS FLAGS_use_mkldnn=true
)
else
()
py_test
(
${
src
}
SRCS
${
src
}
.py
)
py_test
(
${
src
}
SRCS
${
src
}
.py
)
endif
()
endforeach
()
endforeach
()
python/paddle/fluid/contrib/tests/test_calibration.py
浏览文件 @
7f4bdb1e
...
@@ -199,7 +199,6 @@ class TestCalibrationForResnet50(unittest.TestCase):
...
@@ -199,7 +199,6 @@ class TestCalibrationForResnet50(unittest.TestCase):
def
run_program
(
self
,
model_path
,
generate_int8
=
False
,
algo
=
'direct'
):
def
run_program
(
self
,
model_path
,
generate_int8
=
False
,
algo
=
'direct'
):
image_shape
=
[
3
,
224
,
224
]
image_shape
=
[
3
,
224
,
224
]
os
.
environ
[
'FLAGS_use_mkldnn'
]
=
'True'
fluid
.
memory_optimize
(
fluid
.
default_main_program
())
fluid
.
memory_optimize
(
fluid
.
default_main_program
())
...
@@ -241,9 +240,6 @@ class TestCalibrationForResnet50(unittest.TestCase):
...
@@ -241,9 +240,6 @@ class TestCalibrationForResnet50(unittest.TestCase):
label
=
label
.
reshape
([
-
1
,
1
])
label
=
label
.
reshape
([
-
1
,
1
])
running_program
=
calibrator
.
sampling_program
.
clone
(
running_program
=
calibrator
.
sampling_program
.
clone
(
)
if
generate_int8
else
infer_program
.
clone
()
)
if
generate_int8
else
infer_program
.
clone
()
for
op
in
running_program
.
current_block
().
ops
:
if
op
.
has_attr
(
"use_mkldnn"
):
op
.
_set_attr
(
"use_mkldnn"
,
True
)
t1
=
time
.
time
()
t1
=
time
.
time
()
_
,
acc1
,
_
=
exe
.
run
(
_
,
acc1
,
_
=
exe
.
run
(
...
...
python/paddle/fluid/contrib/trainer.py
浏览文件 @
7f4bdb1e
...
@@ -14,7 +14,7 @@
...
@@ -14,7 +14,7 @@
from
__future__
import
print_function
from
__future__
import
print_function
import
contextlib
from
..wrapped_decorator
import
signature_safe_contextmanager
import
os
import
os
import
errno
import
errno
import
shutil
import
shutil
...
@@ -453,7 +453,7 @@ class Trainer(object):
...
@@ -453,7 +453,7 @@ class Trainer(object):
io
.
save_inference_model
(
param_path
,
feeded_var_names
,
target_vars
,
io
.
save_inference_model
(
param_path
,
feeded_var_names
,
target_vars
,
exe
)
exe
)
@
contextlib
.
contextmanager
@
signature_safe_
contextmanager
def
_prog_and_scope_guard
(
self
):
def
_prog_and_scope_guard
(
self
):
with
framework
.
program_guard
(
with
framework
.
program_guard
(
main_program
=
self
.
train_program
,
main_program
=
self
.
train_program
,
...
...
python/paddle/fluid/executor.py
浏览文件 @
7f4bdb1e
...
@@ -17,7 +17,7 @@ from __future__ import print_function
...
@@ -17,7 +17,7 @@ from __future__ import print_function
import
os
import
os
import
multiprocessing
import
multiprocessing
import
numpy
as
np
import
numpy
as
np
import
contextlib
from
.wrapped_decorator
import
signature_safe_contextmanager
import
six
import
six
from
.framework
import
Program
,
default_main_program
,
Variable
from
.framework
import
Program
,
default_main_program
,
Variable
from
.
import
core
from
.
import
core
...
@@ -49,7 +49,7 @@ def _switch_scope(scope):
...
@@ -49,7 +49,7 @@ def _switch_scope(scope):
return
ex
return
ex
@
contextlib
.
contextmanager
@
signature_safe_
contextmanager
def
scope_guard
(
scope
):
def
scope_guard
(
scope
):
"""
"""
Change the global/default scope instance by Python `with` statement. All
Change the global/default scope instance by Python `with` statement. All
...
...
python/paddle/fluid/framework.py
浏览文件 @
7f4bdb1e
...
@@ -16,7 +16,7 @@ from __future__ import print_function
...
@@ -16,7 +16,7 @@ from __future__ import print_function
import
collections
import
collections
from
collections
import
defaultdict
from
collections
import
defaultdict
import
contextlib
from
.wrapped_decorator
import
signature_safe_contextmanager
import
os
import
os
import
re
import
re
import
traceback
import
traceback
...
@@ -111,7 +111,7 @@ class NameScope(object):
...
@@ -111,7 +111,7 @@ class NameScope(object):
_name_scope
=
NameScope
()
_name_scope
=
NameScope
()
@
contextlib
.
contextmanager
@
signature_safe_
contextmanager
def
name_scope
(
prefix
=
None
):
def
name_scope
(
prefix
=
None
):
"""
"""
Generate hierarchical name prefix for the operators.
Generate hierarchical name prefix for the operators.
...
@@ -1775,7 +1775,7 @@ class Program(object):
...
@@ -1775,7 +1775,7 @@ class Program(object):
def
set_op_role_var
(
self
,
var_name
):
def
set_op_role_var
(
self
,
var_name
):
self
.
_op_role_var
=
[
var_name
]
self
.
_op_role_var
=
[
var_name
]
@
contextlib
.
contextmanager
@
signature_safe_
contextmanager
def
_optimized_guard
(
self
,
param_and_grads
):
def
_optimized_guard
(
self
,
param_and_grads
):
"""
"""
A with guard to set :code:`Optimization` :code:`OpRole` and
A with guard to set :code:`Optimization` :code:`OpRole` and
...
@@ -1805,7 +1805,7 @@ class Program(object):
...
@@ -1805,7 +1805,7 @@ class Program(object):
self
.
_op_role_var
=
tmp_var
self
.
_op_role_var
=
tmp_var
self
.
_current_role
=
tmp_role
self
.
_current_role
=
tmp_role
@
contextlib
.
contextmanager
@
signature_safe_
contextmanager
def
_lr_schedule_guard
(
self
,
is_with_opt
=
False
):
def
_lr_schedule_guard
(
self
,
is_with_opt
=
False
):
"""
"""
A with guard to set :code:`LRSched` :code:`OpRole` and
A with guard to set :code:`LRSched` :code:`OpRole` and
...
@@ -2459,7 +2459,7 @@ def switch_startup_program(program):
...
@@ -2459,7 +2459,7 @@ def switch_startup_program(program):
return
prev_program
return
prev_program
@
contextlib
.
contextmanager
@
signature_safe_
contextmanager
def
program_guard
(
main_program
,
startup_program
=
None
):
def
program_guard
(
main_program
,
startup_program
=
None
):
"""
"""
Change the global main program and startup program with `with` statement.
Change the global main program and startup program with `with` statement.
...
@@ -2524,7 +2524,7 @@ def _get_var(name, program=None):
...
@@ -2524,7 +2524,7 @@ def _get_var(name, program=None):
return
program
.
global_block
().
var
(
name
)
return
program
.
global_block
().
var
(
name
)
@
contextlib
.
contextmanager
@
signature_safe_
contextmanager
def
_imperative_guard
(
tracer
):
def
_imperative_guard
(
tracer
):
global
_imperative_tracer_
global
_imperative_tracer_
tmp_trace
=
_imperative_tracer_
tmp_trace
=
_imperative_tracer_
...
@@ -2535,7 +2535,7 @@ def _imperative_guard(tracer):
...
@@ -2535,7 +2535,7 @@ def _imperative_guard(tracer):
_imperative_tracer_
=
tmp_trace
_imperative_tracer_
=
tmp_trace
@
contextlib
.
contextmanager
@
signature_safe_
contextmanager
def
_imperative_place_guard
(
place
):
def
_imperative_place_guard
(
place
):
global
_imperative_current_expected_place_
global
_imperative_current_expected_place_
tmp_place
=
_imperative_current_expected_place_
tmp_place
=
_imperative_current_expected_place_
...
...
python/paddle/fluid/imperative/base.py
浏览文件 @
7f4bdb1e
...
@@ -11,7 +11,7 @@
...
@@ -11,7 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# See the License for the specific language governing permissions and
# limitations under the License.
# limitations under the License.
import
contextlib
from
..wrapped_decorator
import
signature_safe_contextmanager
import
numpy
as
np
import
numpy
as
np
from
paddle.fluid
import
core
from
paddle.fluid
import
core
...
@@ -24,7 +24,7 @@ def enabled():
...
@@ -24,7 +24,7 @@ def enabled():
return
framework
.
_in_imperative_mode
()
return
framework
.
_in_imperative_mode
()
@
contextlib
.
contextmanager
@
signature_safe_
contextmanager
def
guard
(
place
=
None
):
def
guard
(
place
=
None
):
train
=
framework
.
Program
()
train
=
framework
.
Program
()
startup
=
framework
.
Program
()
startup
=
framework
.
Program
()
...
...
python/paddle/fluid/initializer.py
浏览文件 @
7f4bdb1e
...
@@ -16,7 +16,7 @@ from __future__ import print_function
...
@@ -16,7 +16,7 @@ from __future__ import print_function
from
.
import
framework
from
.
import
framework
import
numpy
as
np
import
numpy
as
np
import
contextlib
from
.wrapped_decorator
import
signature_safe_contextmanager
from
.core
import
VarDesc
from
.core
import
VarDesc
from
.
import
unique_name
from
.
import
unique_name
...
@@ -49,7 +49,7 @@ def force_init_on_cpu():
...
@@ -49,7 +49,7 @@ def force_init_on_cpu():
return
_force_init_on_cpu_
return
_force_init_on_cpu_
@
contextlib
.
contextmanager
@
signature_safe_
contextmanager
def
init_on_cpu
():
def
init_on_cpu
():
"""
"""
Force the variable to be inited on CPU.
Force the variable to be inited on CPU.
...
...
python/paddle/fluid/layers/control_flow.py
浏览文件 @
7f4bdb1e
...
@@ -13,7 +13,7 @@
...
@@ -13,7 +13,7 @@
# limitations under the License.
# limitations under the License.
from
__future__
import
print_function
from
__future__
import
print_function
import
contextlib
from
..wrapped_decorator
import
signature_safe_contextmanager
from
.layer_function_generator
import
autodoc
,
templatedoc
from
.layer_function_generator
import
autodoc
,
templatedoc
from
.tensor
import
assign
,
fill_constant
from
.tensor
import
assign
,
fill_constant
...
@@ -1532,7 +1532,7 @@ class DynamicRNN(object):
...
@@ -1532,7 +1532,7 @@ class DynamicRNN(object):
outputs
=
{
'Out'
:
[
x_reordered
]})
outputs
=
{
'Out'
:
[
x_reordered
]})
return
shrink_memory
(
x_reordered
,
self
.
step_idx
,
self
.
lod_rank_table
)
return
shrink_memory
(
x_reordered
,
self
.
step_idx
,
self
.
lod_rank_table
)
@
contextlib
.
contextmanager
@
signature_safe_
contextmanager
def
block
(
self
):
def
block
(
self
):
"""
"""
The block for user to define operators in RNN. See the class docstring
The block for user to define operators in RNN. See the class docstring
...
...
python/paddle/fluid/layers/io.py
浏览文件 @
7f4bdb1e
...
@@ -13,7 +13,7 @@
...
@@ -13,7 +13,7 @@
# limitations under the License.
# limitations under the License.
from
__future__
import
print_function
from
__future__
import
print_function
import
contextlib
from
..wrapped_decorator
import
signature_safe_contextmanager
import
multiprocessing
import
multiprocessing
import
os
import
os
import
six
import
six
...
@@ -1116,7 +1116,7 @@ class Preprocessor(object):
...
@@ -1116,7 +1116,7 @@ class Preprocessor(object):
def
_is_completed
(
self
):
def
_is_completed
(
self
):
return
self
.
sub_block
and
self
.
source_var_names
and
self
.
sink_var_names
return
self
.
sub_block
and
self
.
source_var_names
and
self
.
sink_var_names
@
contextlib
.
contextmanager
@
signature_safe_
contextmanager
def
block
(
self
):
def
block
(
self
):
self
.
status
=
Preprocessor
.
IN_SUB_BLOCK
self
.
status
=
Preprocessor
.
IN_SUB_BLOCK
self
.
sub_block
=
self
.
main_prog
.
_create_block
()
self
.
sub_block
=
self
.
main_prog
.
_create_block
()
...
...
python/paddle/fluid/optimizer.py
浏览文件 @
7f4bdb1e
...
@@ -15,7 +15,7 @@
...
@@ -15,7 +15,7 @@
from
__future__
import
print_function
from
__future__
import
print_function
from
collections
import
defaultdict
from
collections
import
defaultdict
from
contextlib
import
contextmanager
from
.wrapped_decorator
import
signature_safe_
contextmanager
from
paddle.fluid.framework
import
Program
,
Variable
,
name_scope
,
default_main_program
from
paddle.fluid.framework
import
Program
,
Variable
,
name_scope
,
default_main_program
from
paddle.fluid.distribute_lookup_table
import
find_distributed_lookup_table
from
paddle.fluid.distribute_lookup_table
import
find_distributed_lookup_table
...
@@ -1610,7 +1610,7 @@ class ModelAverage(Optimizer):
...
@@ -1610,7 +1610,7 @@ class ModelAverage(Optimizer):
},
},
stop_gradient
=
True
)
stop_gradient
=
True
)
@
contextmanager
@
signature_safe_
contextmanager
def
apply
(
self
,
executor
,
need_restore
=
True
):
def
apply
(
self
,
executor
,
need_restore
=
True
):
"""Apply average values to parameters of current model.
"""Apply average values to parameters of current model.
"""
"""
...
...
python/paddle/fluid/profiler.py
浏览文件 @
7f4bdb1e
...
@@ -15,7 +15,7 @@
...
@@ -15,7 +15,7 @@
from
__future__
import
print_function
from
__future__
import
print_function
from
.
import
core
from
.
import
core
from
contextlib
import
contextmanager
from
.wrapped_decorator
import
signature_safe_
contextmanager
import
os
import
os
import
six
import
six
...
@@ -35,7 +35,7 @@ NVPROF_CONFIG = [
...
@@ -35,7 +35,7 @@ NVPROF_CONFIG = [
]
]
@
contextmanager
@
signature_safe_
contextmanager
def
cuda_profiler
(
output_file
,
output_mode
=
None
,
config
=
None
):
def
cuda_profiler
(
output_file
,
output_mode
=
None
,
config
=
None
):
"""The CUDA profiler.
"""The CUDA profiler.
This fuctions is used to profile CUDA program by CUDA runtime application
This fuctions is used to profile CUDA program by CUDA runtime application
...
@@ -217,7 +217,7 @@ def stop_profiler(sorted_key=None, profile_path='/tmp/profile'):
...
@@ -217,7 +217,7 @@ def stop_profiler(sorted_key=None, profile_path='/tmp/profile'):
core
.
disable_profiler
(
key_map
[
sorted_key
],
profile_path
)
core
.
disable_profiler
(
key_map
[
sorted_key
],
profile_path
)
@
contextmanager
@
signature_safe_
contextmanager
def
profiler
(
state
,
sorted_key
=
None
,
profile_path
=
'/tmp/profile'
):
def
profiler
(
state
,
sorted_key
=
None
,
profile_path
=
'/tmp/profile'
):
"""The profiler interface.
"""The profiler interface.
Different from cuda_profiler, this profiler can be used to profile both CPU
Different from cuda_profiler, this profiler can be used to profile both CPU
...
...
python/paddle/fluid/recordio_writer.py
浏览文件 @
7f4bdb1e
...
@@ -15,14 +15,14 @@
...
@@ -15,14 +15,14 @@
from
__future__
import
print_function
from
__future__
import
print_function
import
os
import
os
import
contextlib
from
.wrapped_decorator
import
signature_safe_contextmanager
from
.
import
core
from
.
import
core
__all__
=
[
__all__
=
[
'convert_reader_to_recordio_file'
,
'convert_reader_to_recordio_files'
'convert_reader_to_recordio_file'
,
'convert_reader_to_recordio_files'
]
]
@
contextlib
.
contextmanager
@
signature_safe_
contextmanager
def
create_recordio_writer
(
filename
,
def
create_recordio_writer
(
filename
,
compressor
=
core
.
RecordIOWriter
.
Compressor
.
Snappy
,
compressor
=
core
.
RecordIOWriter
.
Compressor
.
Snappy
,
max_num_records
=
1000
):
max_num_records
=
1000
):
...
...
python/paddle/fluid/transpiler/memory_optimization_transpiler.py
浏览文件 @
7f4bdb1e
...
@@ -355,6 +355,10 @@ class ControlFlowGraph(object):
...
@@ -355,6 +355,10 @@ class ControlFlowGraph(object):
is_forward
).
dtype
()
is_forward
).
dtype
()
cache_dtype
=
self
.
_find_var
(
block_desc
,
cache_var
,
cache_dtype
=
self
.
_find_var
(
block_desc
,
cache_var
,
is_forward
).
dtype
()
is_forward
).
dtype
()
if
x_dtype
!=
cache_dtype
:
if
PRINT_LOG
:
print
(
"x_dtype and cache_dtype are different!"
)
continue
if
not
compare_shape
(
x_shape
,
cache_shape
,
level
):
if
not
compare_shape
(
x_shape
,
cache_shape
,
level
):
continue
continue
...
...
python/paddle/fluid/unique_name.py
浏览文件 @
7f4bdb1e
...
@@ -15,7 +15,7 @@
...
@@ -15,7 +15,7 @@
from
__future__
import
print_function
from
__future__
import
print_function
import
collections
import
collections
import
contextlib
from
.wrapped_decorator
import
signature_safe_contextmanager
import
six
import
six
import
sys
import
sys
...
@@ -68,7 +68,7 @@ def switch(new_generator=None):
...
@@ -68,7 +68,7 @@ def switch(new_generator=None):
return
old
return
old
@
contextlib
.
contextmanager
@
signature_safe_
contextmanager
def
guard
(
new_generator
=
None
):
def
guard
(
new_generator
=
None
):
if
isinstance
(
new_generator
,
six
.
string_types
):
if
isinstance
(
new_generator
,
six
.
string_types
):
new_generator
=
UniqueNameGenerator
(
new_generator
)
new_generator
=
UniqueNameGenerator
(
new_generator
)
...
...
python/paddle/fluid/wrapped_decorator.py
0 → 100644
浏览文件 @
7f4bdb1e
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
decorator
import
contextlib
__all__
=
[
'wrap_decorator'
,
'signature_safe_contextmanager'
]
def
wrap_decorator
(
decorator_func
):
@
decorator
.
decorator
def
__impl__
(
func
,
*
args
,
**
kwargs
):
wrapped_func
=
decorator_func
(
func
)
return
wrapped_func
(
*
args
,
**
kwargs
)
return
__impl__
signature_safe_contextmanager
=
wrap_decorator
(
contextlib
.
contextmanager
)
python/requirements.txt
浏览文件 @
7f4bdb1e
...
@@ -11,3 +11,4 @@ graphviz
...
@@ -11,3 +11,4 @@ graphviz
six
six
funcsigs
funcsigs
pyyaml
pyyaml
decorator
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录