Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
d8a939d8
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
提交
d8a939d8
编写于
3月 04, 2019
作者:
L
Liu Yiqun
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'develop' into core_opt_choose_kernel
上级
d4674dab
cae6614c
变更
38
显示空白变更内容
内联
并排
Showing
38 changed file
with
911 addition
and
863 deletion
+911
-863
paddle/fluid/API.spec
paddle/fluid/API.spec
+500
-500
paddle/fluid/framework/CMakeLists.txt
paddle/fluid/framework/CMakeLists.txt
+2
-2
paddle/fluid/framework/details/fast_threaded_ssa_graph_executor.cc
...uid/framework/details/fast_threaded_ssa_graph_executor.cc
+3
-1
paddle/fluid/framework/tensor_util.cc
paddle/fluid/framework/tensor_util.cc
+7
-0
paddle/fluid/memory/CMakeLists.txt
paddle/fluid/memory/CMakeLists.txt
+1
-1
paddle/fluid/memory/memcpy.cc
paddle/fluid/memory/memcpy.cc
+20
-0
paddle/fluid/operators/ngraph/ngraph_bridge.cc
paddle/fluid/operators/ngraph/ngraph_bridge.cc
+1
-0
paddle/fluid/operators/ngraph/ngraph_bridge.h
paddle/fluid/operators/ngraph/ngraph_bridge.h
+1
-0
paddle/fluid/operators/ngraph/ops/accuracy_op.h
paddle/fluid/operators/ngraph/ops/accuracy_op.h
+2
-0
paddle/fluid/operators/ngraph/ops/activation_op.h
paddle/fluid/operators/ngraph/ops/activation_op.h
+2
-0
paddle/fluid/operators/ngraph/ops/batch_norm_op.h
paddle/fluid/operators/ngraph/ops/batch_norm_op.h
+2
-0
paddle/fluid/operators/ngraph/ops/binary_unary_op.h
paddle/fluid/operators/ngraph/ops/binary_unary_op.h
+2
-0
paddle/fluid/operators/ngraph/ops/conv2d_op.h
paddle/fluid/operators/ngraph/ops/conv2d_op.h
+2
-0
paddle/fluid/operators/ngraph/ops/cross_entropy_op.h
paddle/fluid/operators/ngraph/ops/cross_entropy_op.h
+2
-0
paddle/fluid/operators/ngraph/ops/elementwise_add_op.h
paddle/fluid/operators/ngraph/ops/elementwise_add_op.h
+2
-0
paddle/fluid/operators/ngraph/ops/fill_constant_op.h
paddle/fluid/operators/ngraph/ops/fill_constant_op.h
+2
-0
paddle/fluid/operators/ngraph/ops/mean_op.h
paddle/fluid/operators/ngraph/ops/mean_op.h
+2
-0
paddle/fluid/operators/ngraph/ops/momentum_op.h
paddle/fluid/operators/ngraph/ops/momentum_op.h
+2
-0
paddle/fluid/operators/ngraph/ops/mul_op.h
paddle/fluid/operators/ngraph/ops/mul_op.h
+2
-0
paddle/fluid/operators/ngraph/ops/pool2d_op.h
paddle/fluid/operators/ngraph/ops/pool2d_op.h
+2
-0
paddle/fluid/operators/ngraph/ops/scale_op.h
paddle/fluid/operators/ngraph/ops/scale_op.h
+2
-0
paddle/fluid/operators/ngraph/ops/softmax_op.h
paddle/fluid/operators/ngraph/ops/softmax_op.h
+2
-0
paddle/fluid/operators/ngraph/ops/top_k_op.h
paddle/fluid/operators/ngraph/ops/top_k_op.h
+2
-0
paddle/fluid/operators/reader/buffered_reader.cc
paddle/fluid/operators/reader/buffered_reader.cc
+14
-9
paddle/fluid/platform/device_tracer.cc
paddle/fluid/platform/device_tracer.cc
+54
-9
paddle/fluid/platform/device_tracer.h
paddle/fluid/platform/device_tracer.h
+12
-1
paddle/scripts/paddle_build.sh
paddle/scripts/paddle_build.sh
+14
-17
python/paddle/fluid/compiler.py
python/paddle/fluid/compiler.py
+43
-29
python/paddle/fluid/executor.py
python/paddle/fluid/executor.py
+34
-34
python/paddle/fluid/framework.py
python/paddle/fluid/framework.py
+0
-9
python/paddle/fluid/io.py
python/paddle/fluid/io.py
+2
-1
python/paddle/fluid/parallel_executor.py
python/paddle/fluid/parallel_executor.py
+19
-140
python/paddle/fluid/tests/unittests/mkldnn/test_conv2d_mkldnn_op.py
...dle/fluid/tests/unittests/mkldnn/test_conv2d_mkldnn_op.py
+118
-23
python/paddle/fluid/tests/unittests/mkldnn/test_pool2d_mkldnn_op.py
...dle/fluid/tests/unittests/mkldnn/test_pool2d_mkldnn_op.py
+18
-0
tools/check_doc_approval.py
tools/check_doc_approval.py
+0
-85
tools/diff_api.py
tools/diff_api.py
+6
-0
tools/print_signatures.py
tools/print_signatures.py
+11
-1
tools/timeline.py
tools/timeline.py
+1
-1
未找到文件。
paddle/fluid/API.spec
浏览文件 @
d8a939d8
paddle.fluid.Program.__init__
ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.Program.block
ArgSpec(args=['self', 'index'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.Program.clone
ArgSpec(args=['self', 'for_test'], varargs=None, keywords=None, defaults=(False,
))
paddle.fluid.Program.current_block
ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.Program.global_block
ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.Program.list_vars
ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.Program.parse_from_string
ArgSpec(args=['binary_str'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.Program.to_string
ArgSpec(args=['self', 'throw_on_error', 'with_details'], varargs=None, keywords=None, defaults=(False,
))
paddle.fluid.default_startup_program
ArgSpec(args=[], varargs=None, keywords=None, defaults=None
)
paddle.fluid.default_main_program
ArgSpec(args=[], varargs=None, keywords=None, defaults=None
)
paddle.fluid.program_guard
ArgSpec(args=['main_program', 'startup_program'], varargs=None, keywords=None, defaults=(None,
))
paddle.fluid.name_scope
ArgSpec(args=['prefix'], varargs=None, keywords=None, defaults=(None,
))
paddle.fluid.Executor.__init__
ArgSpec(args=['self', 'place'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.Executor.close
ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.Executor.run
ArgSpec(args=['self', 'program', 'feed', 'fetch_list', 'feed_var_name', 'fetch_var_name', 'scope', 'return_numpy', 'use_program_cache'], varargs=None, keywords=None, defaults=(None, None, None, 'feed', 'fetch', None, True, False
))
paddle.fluid.global_scope
ArgSpec(args=[], varargs=None, keywords=None, defaults=None
)
paddle.fluid.scope_guard
ArgSpec(args=['scope'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.DistributeTranspiler.__init__
ArgSpec(args=['self', 'config'], varargs=None, keywords=None, defaults=(None,
))
paddle.fluid.DistributeTranspiler.get_pserver_program
ArgSpec(args=['self', 'endpoint'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.DistributeTranspiler.get_pserver_programs
ArgSpec(args=['self', 'endpoint'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.DistributeTranspiler.get_startup_program
ArgSpec(args=['self', 'endpoint', 'pserver_program', 'startup_program'], varargs=None, keywords=None, defaults=(None, None
))
paddle.fluid.DistributeTranspiler.get_trainer_program
ArgSpec(args=['self', 'wait_port'], varargs=None, keywords=None, defaults=(True,
))
paddle.fluid.DistributeTranspiler.transpile
ArgSpec(args=['self', 'trainer_id', 'program', 'pservers', 'trainers', 'sync_mode', 'startup_program', 'current_endpoint'], varargs=None, keywords=None, defaults=(None, '127.0.0.1:6174', 1, True, None, '127.0.0.1:617
4'))
paddle.fluid.memory_optimize
ArgSpec(args=['input_program', 'skip_opt_set', 'print_log', 'level', 'skip_grads'], varargs=None, keywords=None, defaults=(None, False, 0, False
))
paddle.fluid.release_memory
ArgSpec(args=['input_program', 'skip_opt_set'], varargs=None, keywords=None, defaults=(None,
))
paddle.fluid.Program.__init__
(ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')
)
paddle.fluid.Program.block
(ArgSpec(args=['self', 'index'], varargs=None, keywords=None, defaults=None), ('document', 'af5346376065ff4cf6832a8ac0ae0945')
)
paddle.fluid.Program.clone
(ArgSpec(args=['self', 'for_test'], varargs=None, keywords=None, defaults=(False,)), ('document', 'ebb7765b2962bd2be041d19720e49d0f'
))
paddle.fluid.Program.current_block
(ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '5e162d3bf8dd625703463d9e4be36adb')
)
paddle.fluid.Program.global_block
(ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', 'cfb7e05a002b2e64650778cabde7301c')
)
paddle.fluid.Program.list_vars
(ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '1c8647b14fe57c7824b1c9562394dd3c')
)
paddle.fluid.Program.parse_from_string
(ArgSpec(args=['binary_str'], varargs=None, keywords=None, defaults=None), ('document', 'b6a7ffb239a30bf2ce58cfaca8d8b8d5')
)
paddle.fluid.Program.to_string
(ArgSpec(args=['self', 'throw_on_error', 'with_details'], varargs=None, keywords=None, defaults=(False,)), ('document', 'faec17e5a04af28e3776160e34504d15'
))
paddle.fluid.default_startup_program
(ArgSpec(args=[], varargs=None, keywords=None, defaults=None), ('document', '99e5d53d92d82797093332719c9e3ccd')
)
paddle.fluid.default_main_program
(ArgSpec(args=[], varargs=None, keywords=None, defaults=None), ('document', '5430f54ab4895f9f47db6bebbaf71659')
)
paddle.fluid.program_guard
(ArgSpec(args=['main_program', 'startup_program'], varargs=None, keywords=None, defaults=(None,)), ('document', 'b54f403e57825a1592aece03afe3afb6'
))
paddle.fluid.name_scope
(ArgSpec(args=['prefix'], varargs=None, keywords=None, defaults=(None,)), ('document', '0ef753f5cec69fef9ae6ad8b867b33a2'
))
paddle.fluid.Executor.__init__
(ArgSpec(args=['self', 'place'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')
)
paddle.fluid.Executor.close
(ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '78e512cabeda9c7f42cb7c7e88967ae7')
)
paddle.fluid.Executor.run
(ArgSpec(args=['self', 'program', 'feed', 'fetch_list', 'feed_var_name', 'fetch_var_name', 'scope', 'return_numpy', 'use_program_cache'], varargs=None, keywords=None, defaults=(None, None, None, 'feed', 'fetch', None, True, False)), ('document', 'aba8093edebf2d5c869b735b92811e45'
))
paddle.fluid.global_scope
(ArgSpec(args=[], varargs=None, keywords=None, defaults=None), ('document', 'e148d3ab1ed8edf3e928212a375959c0')
)
paddle.fluid.scope_guard
(ArgSpec(args=['scope'], varargs=None, keywords=None, defaults=None), ('document', 'b94d1f6bcc29c4fb58fc0058561250c2')
)
paddle.fluid.DistributeTranspiler.__init__
(ArgSpec(args=['self', 'config'], varargs=None, keywords=None, defaults=(None,)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'
))
paddle.fluid.DistributeTranspiler.get_pserver_program
(ArgSpec(args=['self', 'endpoint'], varargs=None, keywords=None, defaults=None), ('document', '292ab72977afbe58e6a3bde175452680')
)
paddle.fluid.DistributeTranspiler.get_pserver_programs
(ArgSpec(args=['self', 'endpoint'], varargs=None, keywords=None, defaults=None), ('document', '78f4949aedf317666a89ca74b3748ba8')
)
paddle.fluid.DistributeTranspiler.get_startup_program
(ArgSpec(args=['self', 'endpoint', 'pserver_program', 'startup_program'], varargs=None, keywords=None, defaults=(None, None)), ('document', 'd796fc0c8d51503b556fcf6dc15c4f0c'
))
paddle.fluid.DistributeTranspiler.get_trainer_program
(ArgSpec(args=['self', 'wait_port'], varargs=None, keywords=None, defaults=(True,)), ('document', '736330e31a7a54abccc0c7fd9119d9ff'
))
paddle.fluid.DistributeTranspiler.transpile
(ArgSpec(args=['self', 'trainer_id', 'program', 'pservers', 'trainers', 'sync_mode', 'startup_program', 'current_endpoint'], varargs=None, keywords=None, defaults=(None, '127.0.0.1:6174', 1, True, None, '127.0.0.1:6174')), ('document', '06ce55338dfe96311ad1078235ab3bf
4'))
paddle.fluid.memory_optimize
(ArgSpec(args=['input_program', 'skip_opt_set', 'print_log', 'level', 'skip_grads'], varargs=None, keywords=None, defaults=(None, False, 0, False)), ('document', 'eda17d0f1639bc6ca215cecf87f588a4'
))
paddle.fluid.release_memory
(ArgSpec(args=['input_program', 'skip_opt_set'], varargs=None, keywords=None, defaults=(None,)), ('document', 'ac4114d3df16264f1946deb3a8434a6f'
))
paddle.fluid.DistributeTranspilerConfig.__init__
paddle.fluid.ParallelExecutor.__init__
ArgSpec(args=['self', 'use_cuda', 'loss_name', 'main_program', 'share_vars_from', 'exec_strategy', 'build_strategy', 'num_trainers', 'trainer_id', 'scope'], varargs=None, keywords=None, defaults=(None, None, None, None, None, 1, 0, None
))
paddle.fluid.ParallelExecutor.run
ArgSpec(args=['self', 'fetch_list', 'feed', 'feed_dict', 'return_numpy'], varargs=None, keywords=None, defaults=(None, None, True
))
paddle.fluid.create_lod_tensor
ArgSpec(args=['data', 'recursive_seq_lens', 'place'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.create_random_int_lodtensor
ArgSpec(args=['recursive_seq_lens', 'base_shape', 'place', 'low', 'high'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.DataFeedDesc.__init__
ArgSpec(args=['self', 'proto_file'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.DataFeedDesc.desc
ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.DataFeedDesc.set_batch_size
ArgSpec(args=['self', 'batch_size'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.DataFeedDesc.set_dense_slots
ArgSpec(args=['self', 'dense_slots_name'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.DataFeedDesc.set_use_slots
ArgSpec(args=['self', 'use_slots_name'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.AsyncExecutor.__init__
ArgSpec(args=['self', 'place', 'run_mode'], varargs=None, keywords=None, defaults=(None, '
'))
paddle.fluid.AsyncExecutor.config_distributed_nodes
ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.AsyncExecutor.download_data
ArgSpec(args=['self', 'afs_path', 'local_path', 'fs_default_name', 'ugi', 'file_cnt', 'hadoop_home', 'process_num'], varargs=None, keywords=None, defaults=('$HADOOP_HOME', 12
))
paddle.fluid.AsyncExecutor.get_instance
ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.AsyncExecutor.init_model
ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.AsyncExecutor.init_server
ArgSpec(args=['self', 'dist_desc'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.AsyncExecutor.init_worker
ArgSpec(args=['self', 'dist_desc', 'startup_program'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.AsyncExecutor.run
ArgSpec(args=['self', 'program', 'data_feed', 'filelist', 'thread_num', 'fetch', 'mode', 'debug'], varargs=None, keywords=None, defaults=('', False
))
paddle.fluid.AsyncExecutor.save_model
ArgSpec(args=['self', 'save_path'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.AsyncExecutor.stop
ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.CompiledProgram.__init__
ArgSpec(args=['self', 'program_or_graph'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.CompiledProgram.with_data_parallel
ArgSpec(args=['self', 'loss_name', 'build_strategy', 'exec_strategy', 'share_vars_from'], varargs=None, keywords=None, defaults=(None, None, None, None
))
paddle.fluid.CompiledProgram.with_inference_optimize
ArgSpec(args=['self', 'config'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.ParallelExecutor.__init__
(ArgSpec(args=['self', 'use_cuda', 'loss_name', 'main_program', 'share_vars_from', 'exec_strategy', 'build_strategy', 'num_trainers', 'trainer_id', 'scope'], varargs=None, keywords=None, defaults=(None, None, None, None, None, 1, 0, None)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'
))
paddle.fluid.ParallelExecutor.run
(ArgSpec(args=['self', 'fetch_list', 'feed', 'feed_dict', 'return_numpy'], varargs=None, keywords=None, defaults=(None, None, True)), ('document', '2cb4bd74481861345c70228a0f57620c'
))
paddle.fluid.create_lod_tensor
(ArgSpec(args=['data', 'recursive_seq_lens', 'place'], varargs=None, keywords=None, defaults=None), ('document', '8e7bb21e83ff4604f5b379672e285b94')
)
paddle.fluid.create_random_int_lodtensor
(ArgSpec(args=['recursive_seq_lens', 'base_shape', 'place', 'low', 'high'], varargs=None, keywords=None, defaults=None), ('document', '368f638b99f1dfe59e9b02aa6f077752')
)
paddle.fluid.DataFeedDesc.__init__
(ArgSpec(args=['self', 'proto_file'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')
)
paddle.fluid.DataFeedDesc.desc
(ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '4294493e31c4bc9fc4bd48753044235f')
)
paddle.fluid.DataFeedDesc.set_batch_size
(ArgSpec(args=['self', 'batch_size'], varargs=None, keywords=None, defaults=None), ('document', '8d9f44601e0a99dd431f14fd9250cd21')
)
paddle.fluid.DataFeedDesc.set_dense_slots
(ArgSpec(args=['self', 'dense_slots_name'], varargs=None, keywords=None, defaults=None), ('document', 'eb894b464bbcd1b4bc8038398954f766')
)
paddle.fluid.DataFeedDesc.set_use_slots
(ArgSpec(args=['self', 'use_slots_name'], varargs=None, keywords=None, defaults=None), ('document', '415c56600ce4e198c071cad01409a690')
)
paddle.fluid.AsyncExecutor.__init__
(ArgSpec(args=['self', 'place', 'run_mode'], varargs=None, keywords=None, defaults=(None, '')), ('document', '6adf97f83acf6453d4a6a4b1070f3754
'))
paddle.fluid.AsyncExecutor.config_distributed_nodes
(ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '4810dbe1870452f16b3c60b6c5fd1459')
)
paddle.fluid.AsyncExecutor.download_data
(ArgSpec(args=['self', 'afs_path', 'local_path', 'fs_default_name', 'ugi', 'file_cnt', 'hadoop_home', 'process_num'], varargs=None, keywords=None, defaults=('$HADOOP_HOME', 12)), ('document', '799a2066cc26819f1ed31f47c15ad083'
))
paddle.fluid.AsyncExecutor.get_instance
(ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', 'f8688f76a2db1243c7097a60c507b182')
)
paddle.fluid.AsyncExecutor.init_model
(ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '504f39be2007404a17e5cabea1256c7d')
)
paddle.fluid.AsyncExecutor.init_server
(ArgSpec(args=['self', 'dist_desc'], varargs=None, keywords=None, defaults=None), ('document', 'c403ab46c5d3ef25c0f7e94ae75dcb68')
)
paddle.fluid.AsyncExecutor.init_worker
(ArgSpec(args=['self', 'dist_desc', 'startup_program'], varargs=None, keywords=None, defaults=None), ('document', 'dcf08f4bf2f3282acf11391f5d39c536')
)
paddle.fluid.AsyncExecutor.run
(ArgSpec(args=['self', 'program', 'data_feed', 'filelist', 'thread_num', 'fetch', 'mode', 'debug'], varargs=None, keywords=None, defaults=('', False)), ('document', '848fc53484e8326f6325feea87fe955c'
))
paddle.fluid.AsyncExecutor.save_model
(ArgSpec(args=['self', 'save_path'], varargs=None, keywords=None, defaults=None), ('document', 'c8ac0dfcb3b187aba25d03af7fea56b2')
)
paddle.fluid.AsyncExecutor.stop
(ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '5f23d043607bb5d55e466ec3f578e093')
)
paddle.fluid.CompiledProgram.__init__
(ArgSpec(args=['self', 'program_or_graph'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')
)
paddle.fluid.CompiledProgram.with_data_parallel
(ArgSpec(args=['self', 'loss_name', 'build_strategy', 'exec_strategy', 'share_vars_from'], varargs=None, keywords=None, defaults=(None, None, None, None)), ('document', 'e1af7fd53cf868554f312779fc803864'
))
paddle.fluid.CompiledProgram.with_inference_optimize
(ArgSpec(args=['self', 'config'], varargs=None, keywords=None, defaults=None), ('document', '9e5b009d850191a010e859189c127fd8')
)
paddle.fluid.ExecutionStrategy.__init__ __init__(self: paddle.fluid.core.ParallelExecutor.ExecutionStrategy) -> None
paddle.fluid.BuildStrategy.GradientScaleStrategy.__init__ __init__(self: paddle.fluid.core.ParallelExecutor.BuildStrategy.GradientScaleStrategy, arg0: int) -> None
paddle.fluid.BuildStrategy.ReduceStrategy.__init__ __init__(self: paddle.fluid.core.ParallelExecutor.BuildStrategy.ReduceStrategy, arg0: int) -> None
paddle.fluid.BuildStrategy.__init__ __init__(self: paddle.fluid.core.ParallelExecutor.BuildStrategy) -> None
paddle.fluid.io.save_vars
ArgSpec(args=['executor', 'dirname', 'main_program', 'vars', 'predicate', 'filename'], varargs=None, keywords=None, defaults=(None, None, None, None
))
paddle.fluid.io.save_params
ArgSpec(args=['executor', 'dirname', 'main_program', 'filename'], varargs=None, keywords=None, defaults=(None, None
))
paddle.fluid.io.save_persistables
ArgSpec(args=['executor', 'dirname', 'main_program', 'filename'], varargs=None, keywords=None, defaults=(None, None
))
paddle.fluid.io.load_vars
ArgSpec(args=['executor', 'dirname', 'main_program', 'vars', 'predicate', 'filename'], varargs=None, keywords=None, defaults=(None, None, None, None
))
paddle.fluid.io.load_params
ArgSpec(args=['executor', 'dirname', 'main_program', 'filename'], varargs=None, keywords=None, defaults=(None, None
))
paddle.fluid.io.load_persistables
ArgSpec(args=['executor', 'dirname', 'main_program', 'filename'], varargs=None, keywords=None, defaults=(None, None
))
paddle.fluid.io.save_inference_model
ArgSpec(args=['dirname', 'feeded_var_names', 'target_vars', 'executor', 'main_program', 'model_filename', 'params_filename', 'export_for_deployment'], varargs=None, keywords=None, defaults=(None, None, None, True
))
paddle.fluid.io.load_inference_model
ArgSpec(args=['dirname', 'executor', 'model_filename', 'params_filename', 'pserver_endpoints'], varargs=None, keywords=None, defaults=(None, None, None
))
paddle.fluid.initializer.ConstantInitializer.__init__
ArgSpec(args=['self', 'value', 'force_cpu'], varargs=None, keywords=None, defaults=(0.0, False
))
paddle.fluid.initializer.UniformInitializer.__init__
ArgSpec(args=['self', 'low', 'high', 'seed'], varargs=None, keywords=None, defaults=(-1.0, 1.0, 0
))
paddle.fluid.initializer.NormalInitializer.__init__
ArgSpec(args=['self', 'loc', 'scale', 'seed'], varargs=None, keywords=None, defaults=(0.0, 1.0, 0
))
paddle.fluid.initializer.TruncatedNormalInitializer.__init__
ArgSpec(args=['self', 'loc', 'scale', 'seed'], varargs=None, keywords=None, defaults=(0.0, 1.0, 0
))
paddle.fluid.initializer.XavierInitializer.__init__
ArgSpec(args=['self', 'uniform', 'fan_in', 'fan_out', 'seed'], varargs=None, keywords=None, defaults=(True, None, None, 0
))
paddle.fluid.initializer.BilinearInitializer.__init__
ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.initializer.MSRAInitializer.__init__
ArgSpec(args=['self', 'uniform', 'fan_in', 'seed'], varargs=None, keywords=None, defaults=(True, None, 0
))
paddle.fluid.initializer.force_init_on_cpu
ArgSpec(args=[], varargs=None, keywords=None, defaults=None
)
paddle.fluid.initializer.init_on_cpu
ArgSpec(args=[], varargs=None, keywords=None, defaults=None
)
paddle.fluid.initializer.NumpyArrayInitializer.__init__
ArgSpec(args=['self', 'value'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.layers.fc
ArgSpec(args=['input', 'size', 'num_flatten_dims', 'param_attr', 'bias_attr', 'act', 'is_test', 'name'], varargs=None, keywords=None, defaults=(1, None, None, None, False, None
))
paddle.fluid.layers.embedding
ArgSpec(args=['input', 'size', 'is_sparse', 'is_distributed', 'padding_idx', 'param_attr', 'dtype'], varargs=None, keywords=None, defaults=(False, False, None, None, 'float32
'))
paddle.fluid.layers.dynamic_lstm
ArgSpec(args=['input', 'size', 'h_0', 'c_0', 'param_attr', 'bias_attr', 'use_peepholes', 'is_reverse', 'gate_activation', 'cell_activation', 'candidate_activation', 'dtype', 'name'], varargs=None, keywords=None, defaults=(None, None, None, None, True, False, 'sigmoid', 'tanh', 'tanh', 'float32', None
))
paddle.fluid.layers.dynamic_lstmp
ArgSpec(args=['input', 'size', 'proj_size', 'param_attr', 'bias_attr', 'use_peepholes', 'is_reverse', 'gate_activation', 'cell_activation', 'candidate_activation', 'proj_activation', 'dtype', 'name', 'h_0', 'c_0', 'cell_clip', 'proj_clip'], varargs=None, keywords=None, defaults=(None, None, True, False, 'sigmoid', 'tanh', 'tanh', 'tanh', 'float32', None, None, None, None, None
))
paddle.fluid.layers.dynamic_gru
ArgSpec(args=['input', 'size', 'param_attr', 'bias_attr', 'is_reverse', 'gate_activation', 'candidate_activation', 'h_0', 'origin_mode'], varargs=None, keywords=None, defaults=(None, None, False, 'sigmoid', 'tanh', None, False
))
paddle.fluid.layers.gru_unit
ArgSpec(args=['input', 'hidden', 'size', 'param_attr', 'bias_attr', 'activation', 'gate_activation', 'origin_mode'], varargs=None, keywords=None, defaults=(None, None, 'tanh', 'sigmoid', False
))
paddle.fluid.layers.linear_chain_crf
ArgSpec(args=['input', 'label', 'param_attr'], varargs=None, keywords=None, defaults=(None,
))
paddle.fluid.layers.crf_decoding
ArgSpec(args=['input', 'param_attr', 'label'], varargs=None, keywords=None, defaults=(None,
))
paddle.fluid.layers.cos_sim
ArgSpec(args=['X', 'Y'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.layers.cross_entropy
ArgSpec(args=['input', 'label', 'soft_label', 'ignore_index'], varargs=None, keywords=None, defaults=(False, -100
))
paddle.fluid.layers.bpr_loss
ArgSpec(args=['input', 'label', 'name'], varargs=None, keywords=None, defaults=(None,
))
paddle.fluid.layers.square_error_cost
ArgSpec(args=['input', 'label'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.layers.chunk_eval
ArgSpec(args=['input', 'label', 'chunk_scheme', 'num_chunk_types', 'excluded_chunk_types'], varargs=None, keywords=None, defaults=(None,
))
paddle.fluid.layers.sequence_conv
ArgSpec(args=['input', 'num_filters', 'filter_size', 'filter_stride', 'padding', 'bias_attr', 'param_attr', 'act', 'name'], varargs=None, keywords=None, defaults=(3, 1, None, None, None, None, None
))
paddle.fluid.layers.conv2d
ArgSpec(args=['input', 'num_filters', 'filter_size', 'stride', 'padding', 'dilation', 'groups', 'param_attr', 'bias_attr', 'use_cudnn', 'act', 'name'], varargs=None, keywords=None, defaults=(1, 0, 1, None, None, None, True, None, None
))
paddle.fluid.layers.conv3d
ArgSpec(args=['input', 'num_filters', 'filter_size', 'stride', 'padding', 'dilation', 'groups', 'param_attr', 'bias_attr', 'use_cudnn', 'act', 'name'], varargs=None, keywords=None, defaults=(1, 0, 1, None, None, None, True, None, None
))
paddle.fluid.layers.sequence_pool
ArgSpec(args=['input', 'pool_type', 'is_test'], varargs=None, keywords=None, defaults=(False,
))
paddle.fluid.layers.sequence_softmax
ArgSpec(args=['input', 'use_cudnn', 'name'], varargs=None, keywords=None, defaults=(False, None
))
paddle.fluid.layers.softmax
ArgSpec(args=['input', 'use_cudnn', 'name'], varargs=None, keywords=None, defaults=(False, None
))
paddle.fluid.layers.pool2d
ArgSpec(args=['input', 'pool_size', 'pool_type', 'pool_stride', 'pool_padding', 'global_pooling', 'use_cudnn', 'ceil_mode', 'name', 'exclusive'], varargs=None, keywords=None, defaults=(-1, 'max', 1, 0, False, True, False, None, True
))
paddle.fluid.layers.pool3d
ArgSpec(args=['input', 'pool_size', 'pool_type', 'pool_stride', 'pool_padding', 'global_pooling', 'use_cudnn', 'ceil_mode', 'name', 'exclusive'], varargs=None, keywords=None, defaults=(-1, 'max', 1, 0, False, True, False, None, True
))
paddle.fluid.layers.adaptive_pool2d
ArgSpec(args=['input', 'pool_size', 'pool_type', 'require_index', 'name'], varargs=None, keywords=None, defaults=('max', False, None
))
paddle.fluid.layers.adaptive_pool3d
ArgSpec(args=['input', 'pool_size', 'pool_type', 'require_index', 'name'], varargs=None, keywords=None, defaults=('max', False, None
))
paddle.fluid.layers.batch_norm
ArgSpec(args=['input', 'act', 'is_test', 'momentum', 'epsilon', 'param_attr', 'bias_attr', 'data_layout', 'in_place', 'name', 'moving_mean_name', 'moving_variance_name', 'do_model_average_for_mean_and_var', 'fuse_with_relu', 'use_global_stats'], varargs=None, keywords=None, defaults=(None, False, 0.9, 1e-05, None, None, 'NCHW', False, None, None, None, False, False, False
))
paddle.fluid.layers.data_norm
ArgSpec(args=['input', 'act', 'epsilon', 'param_attr', 'data_layout', 'in_place', 'name', 'moving_mean_name', 'moving_variance_name', 'do_model_average_for_mean_and_var'], varargs=None, keywords=None, defaults=(None, 1e-05, None, 'NCHW', False, None, None, None, False
))
paddle.fluid.layers.beam_search_decode
ArgSpec(args=['ids', 'scores', 'beam_size', 'end_id', 'name'], varargs=None, keywords=None, defaults=(None,
))
paddle.fluid.layers.conv2d_transpose
ArgSpec(args=['input', 'num_filters', 'output_size', 'filter_size', 'padding', 'stride', 'dilation', 'groups', 'param_attr', 'bias_attr', 'use_cudnn', 'act', 'name'], varargs=None, keywords=None, defaults=(None, None, 0, 1, 1, None, None, None, True, None, None
))
paddle.fluid.layers.conv3d_transpose
ArgSpec(args=['input', 'num_filters', 'output_size', 'filter_size', 'padding', 'stride', 'dilation', 'groups', 'param_attr', 'bias_attr', 'use_cudnn', 'act', 'name'], varargs=None, keywords=None, defaults=(None, None, 0, 1, 1, None, None, None, True, None, None
))
paddle.fluid.layers.sequence_expand
ArgSpec(args=['x', 'y', 'ref_level', 'name'], varargs=None, keywords=None, defaults=(-1, None
))
paddle.fluid.layers.sequence_expand_as
ArgSpec(args=['x', 'y', 'name'], varargs=None, keywords=None, defaults=(None,
))
paddle.fluid.layers.sequence_pad
ArgSpec(args=['x', 'pad_value', 'maxlen', 'name'], varargs=None, keywords=None, defaults=(None, None
))
paddle.fluid.layers.sequence_unpad
ArgSpec(args=['x', 'length', 'name'], varargs=None, keywords=None, defaults=(None,
))
paddle.fluid.layers.lstm_unit
ArgSpec(args=['x_t', 'hidden_t_prev', 'cell_t_prev', 'forget_bias', 'param_attr', 'bias_attr', 'name'], varargs=None, keywords=None, defaults=(0.0, None, None, None
))
paddle.fluid.layers.reduce_sum
ArgSpec(args=['input', 'dim', 'keep_dim', 'name'], varargs=None, keywords=None, defaults=(None, False, None
))
paddle.fluid.layers.reduce_mean
ArgSpec(args=['input', 'dim', 'keep_dim', 'name'], varargs=None, keywords=None, defaults=(None, False, None
))
paddle.fluid.layers.reduce_max
ArgSpec(args=['input', 'dim', 'keep_dim', 'name'], varargs=None, keywords=None, defaults=(None, False, None
))
paddle.fluid.layers.reduce_min
ArgSpec(args=['input', 'dim', 'keep_dim', 'name'], varargs=None, keywords=None, defaults=(None, False, None
))
paddle.fluid.layers.reduce_prod
ArgSpec(args=['input', 'dim', 'keep_dim', 'name'], varargs=None, keywords=None, defaults=(None, False, None
))
paddle.fluid.layers.sequence_first_step
ArgSpec(args=['input'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.layers.sequence_last_step
ArgSpec(args=['input'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.layers.sequence_slice
ArgSpec(args=['input', 'offset', 'length', 'name'], varargs=None, keywords=None, defaults=(None,
))
paddle.fluid.layers.dropout
ArgSpec(args=['x', 'dropout_prob', 'is_test', 'seed', 'name', 'dropout_implementation'], varargs=None, keywords=None, defaults=(False, None, None, 'downgrade_in_infer
'))
paddle.fluid.layers.split
ArgSpec(args=['input', 'num_or_sections', 'dim', 'name'], varargs=None, keywords=None, defaults=(-1, None
))
paddle.fluid.layers.ctc_greedy_decoder
ArgSpec(args=['input', 'blank', 'name'], varargs=None, keywords=None, defaults=(None,
))
paddle.fluid.layers.edit_distance
ArgSpec(args=['input', 'label', 'normalized', 'ignored_tokens'], varargs=None, keywords=None, defaults=(True, None
))
paddle.fluid.layers.l2_normalize
ArgSpec(args=['x', 'axis', 'epsilon', 'name'], varargs=None, keywords=None, defaults=(1e-12, None
))
paddle.fluid.layers.matmul
ArgSpec(args=['x', 'y', 'transpose_x', 'transpose_y', 'alpha', 'name'], varargs=None, keywords=None, defaults=(False, False, 1.0, None
))
paddle.fluid.layers.topk
ArgSpec(args=['input', 'k', 'name'], varargs=None, keywords=None, defaults=(None,
))
paddle.fluid.layers.warpctc
ArgSpec(args=['input', 'label', 'blank', 'norm_by_times', 'use_cudnn'], varargs=None, keywords=None, defaults=(0, False, False
))
paddle.fluid.layers.sequence_reshape
ArgSpec(args=['input', 'new_dim'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.layers.transpose
ArgSpec(args=['x', 'perm', 'name'], varargs=None, keywords=None, defaults=(None,
))
paddle.fluid.layers.im2sequence
ArgSpec(args=['input', 'filter_size', 'stride', 'padding', 'input_image_size', 'out_stride', 'name'], varargs=None, keywords=None, defaults=(1, 1, 0, None, 1, None
))
paddle.fluid.layers.nce
ArgSpec(args=['input', 'label', 'num_total_classes', 'sample_weight', 'param_attr', 'bias_attr', 'num_neg_samples', 'name', 'sampler', 'custom_dist', 'seed', 'is_sparse'], varargs=None, keywords=None, defaults=(None, None, None, None, None, 'uniform', None, 0, False
))
paddle.fluid.layers.sampled_softmax_with_cross_entropy
ArgSpec(args=['logits', 'label', 'num_samples', 'num_true', 'remove_accidental_hits', 'use_customized_samples', 'customized_samples', 'customized_probabilities', 'seed'], varargs=None, keywords=None, defaults=(1, True, False, None, None, 0
))
paddle.fluid.layers.hsigmoid
ArgSpec(args=['input', 'label', 'num_classes', 'param_attr', 'bias_attr', 'name', 'path_table', 'path_code', 'is_custom', 'is_sparse'], varargs=None, keywords=None, defaults=(None, None, None, None, None, False, False
))
paddle.fluid.layers.beam_search
ArgSpec(args=['pre_ids', 'pre_scores', 'ids', 'scores', 'beam_size', 'end_id', 'level', 'is_accumulated', 'name', 'return_parent_idx'], varargs=None, keywords=None, defaults=(0, True, None, False
))
paddle.fluid.layers.row_conv
ArgSpec(args=['input', 'future_context_size', 'param_attr', 'act'], varargs=None, keywords=None, defaults=(None, None
))
paddle.fluid.layers.multiplex
ArgSpec(args=['inputs', 'index'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.layers.layer_norm
ArgSpec(args=['input', 'scale', 'shift', 'begin_norm_axis', 'epsilon', 'param_attr', 'bias_attr', 'act', 'name'], varargs=None, keywords=None, defaults=(True, True, 1, 1e-05, None, None, None, None
))
paddle.fluid.layers.group_norm
ArgSpec(args=['input', 'groups', 'epsilon', 'param_attr', 'bias_attr', 'act', 'data_layout', 'name'], varargs=None, keywords=None, defaults=(1e-05, None, None, None, 'NCHW', None
))
paddle.fluid.layers.softmax_with_cross_entropy
ArgSpec(args=['logits', 'label', 'soft_label', 'ignore_index', 'numeric_stable_mode', 'return_softmax'], varargs=None, keywords=None, defaults=(False, -100, True, False
))
paddle.fluid.layers.smooth_l1
ArgSpec(args=['x', 'y', 'inside_weight', 'outside_weight', 'sigma'], varargs=None, keywords=None, defaults=(None, None, None
))
paddle.fluid.layers.one_hot
ArgSpec(args=['input', 'depth'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.layers.autoincreased_step_counter
ArgSpec(args=['counter_name', 'begin', 'step'], varargs=None, keywords=None, defaults=(None, 1, 1
))
paddle.fluid.layers.reshape
ArgSpec(args=['x', 'shape', 'actual_shape', 'act', 'inplace', 'name'], varargs=None, keywords=None, defaults=(None, None, False, None
))
paddle.fluid.layers.squeeze
ArgSpec(args=['input', 'axes', 'name'], varargs=None, keywords=None, defaults=(None,
))
paddle.fluid.layers.unsqueeze
ArgSpec(args=['input', 'axes', 'name'], varargs=None, keywords=None, defaults=(None,
))
paddle.fluid.layers.lod_reset
ArgSpec(args=['x', 'y', 'target_lod'], varargs=None, keywords=None, defaults=(None, None
))
paddle.fluid.layers.lrn
ArgSpec(args=['input', 'n', 'k', 'alpha', 'beta', 'name'], varargs=None, keywords=None, defaults=(5, 1.0, 0.0001, 0.75, None
))
paddle.fluid.layers.pad
ArgSpec(args=['x', 'paddings', 'pad_value', 'name'], varargs=None, keywords=None, defaults=(0.0, None
))
paddle.fluid.layers.pad_constant_like
ArgSpec(args=['x', 'y', 'pad_value', 'name'], varargs=None, keywords=None, defaults=(0.0, None
))
paddle.fluid.layers.label_smooth
ArgSpec(args=['label', 'prior_dist', 'epsilon', 'dtype', 'name'], varargs=None, keywords=None, defaults=(None, 0.1, 'float32', None
))
paddle.fluid.layers.roi_pool
ArgSpec(args=['input', 'rois', 'pooled_height', 'pooled_width', 'spatial_scale'], varargs=None, keywords=None, defaults=(1, 1, 1.0
))
paddle.fluid.layers.roi_align
ArgSpec(args=['input', 'rois', 'pooled_height', 'pooled_width', 'spatial_scale', 'sampling_ratio', 'name'], varargs=None, keywords=None, defaults=(1, 1, 1.0, -1, None
))
paddle.fluid.layers.dice_loss
ArgSpec(args=['input', 'label', 'epsilon'], varargs=None, keywords=None, defaults=(1e-05,
))
paddle.fluid.layers.image_resize
ArgSpec(args=['input', 'out_shape', 'scale', 'name', 'resample', 'actual_shape', 'align_corners', 'align_mode'], varargs=None, keywords=None, defaults=(None, None, None, 'BILINEAR', None, True, 1
))
paddle.fluid.layers.image_resize_short
ArgSpec(args=['input', 'out_short_len', 'resample'], varargs=None, keywords=None, defaults=('BILINEAR',
))
paddle.fluid.layers.resize_bilinear
ArgSpec(args=['input', 'out_shape', 'scale', 'name', 'actual_shape', 'align_corners', 'align_mode'], varargs=None, keywords=None, defaults=(None, None, None, None, True, 1
))
paddle.fluid.layers.resize_nearest
ArgSpec(args=['input', 'out_shape', 'scale', 'name', 'actual_shape', 'align_corners'], varargs=None, keywords=None, defaults=(None, None, None, None, True
))
paddle.fluid.layers.gather
ArgSpec(args=['input', 'index'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.layers.scatter
ArgSpec(args=['input', 'index', 'updates', 'name'], varargs=None, keywords=None, defaults=(None,
))
paddle.fluid.layers.sequence_scatter
ArgSpec(args=['input', 'index', 'updates', 'name'], varargs=None, keywords=None, defaults=(None,
))
paddle.fluid.layers.random_crop
ArgSpec(args=['x', 'shape', 'seed'], varargs=None, keywords=None, defaults=(None,
))
paddle.fluid.layers.mean_iou
ArgSpec(args=['input', 'label', 'num_classes'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.layers.relu
ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,
))
paddle.fluid.layers.selu
ArgSpec(args=['x', 'scale', 'alpha', 'name'], varargs=None, keywords=None, defaults=(None, None, None
))
paddle.fluid.layers.log
ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,
))
paddle.fluid.layers.crop
ArgSpec(args=['x', 'shape', 'offsets', 'name'], varargs=None, keywords=None, defaults=(None, None, None
))
paddle.fluid.layers.rank_loss
ArgSpec(args=['label', 'left', 'right', 'name'], varargs=None, keywords=None, defaults=(None,
))
paddle.fluid.layers.margin_rank_loss
ArgSpec(args=['label', 'left', 'right', 'margin', 'name'], varargs=None, keywords=None, defaults=(0.1, None
))
paddle.fluid.layers.elu
ArgSpec(args=['x', 'alpha', 'name'], varargs=None, keywords=None, defaults=(1.0, None
))
paddle.fluid.layers.relu6
ArgSpec(args=['x', 'threshold', 'name'], varargs=None, keywords=None, defaults=(6.0, None
))
paddle.fluid.layers.pow
ArgSpec(args=['x', 'factor', 'name'], varargs=None, keywords=None, defaults=(1.0, None
))
paddle.fluid.layers.stanh
ArgSpec(args=['x', 'scale_a', 'scale_b', 'name'], varargs=None, keywords=None, defaults=(0.6666666666666666, 1.7159, None
))
paddle.fluid.layers.hard_sigmoid
ArgSpec(args=['x', 'slope', 'offset', 'name'], varargs=None, keywords=None, defaults=(0.2, 0.5, None
))
paddle.fluid.layers.swish
ArgSpec(args=['x', 'beta', 'name'], varargs=None, keywords=None, defaults=(1.0, None
))
paddle.fluid.layers.prelu
ArgSpec(args=['x', 'mode', 'param_attr', 'name'], varargs=None, keywords=None, defaults=(None, None
))
paddle.fluid.layers.brelu
ArgSpec(args=['x', 't_min', 't_max', 'name'], varargs=None, keywords=None, defaults=(0.0, 24.0, None
))
paddle.fluid.layers.leaky_relu
ArgSpec(args=['x', 'alpha', 'name'], varargs=None, keywords=None, defaults=(0.02, None
))
paddle.fluid.layers.soft_relu
ArgSpec(args=['x', 'threshold', 'name'], varargs=None, keywords=None, defaults=(40.0, None
))
paddle.fluid.layers.flatten
ArgSpec(args=['x', 'axis', 'name'], varargs=None, keywords=None, defaults=(1, None
))
paddle.fluid.layers.sequence_mask
ArgSpec(args=['x', 'maxlen', 'dtype', 'name'], varargs=None, keywords=None, defaults=(None, 'int64', None
))
paddle.fluid.layers.stack
ArgSpec(args=['x', 'axis'], varargs=None, keywords=None, defaults=(0,
))
paddle.fluid.layers.pad2d
ArgSpec(args=['input', 'paddings', 'mode', 'pad_value', 'data_format', 'name'], varargs=None, keywords=None, defaults=([0, 0, 0, 0], 'constant', 0.0, 'NCHW', None
))
paddle.fluid.layers.unstack
ArgSpec(args=['x', 'axis', 'num'], varargs=None, keywords=None, defaults=(0, None
))
paddle.fluid.layers.sequence_enumerate
ArgSpec(args=['input', 'win_size', 'pad_value', 'name'], varargs=None, keywords=None, defaults=(0, None
))
paddle.fluid.layers.expand
ArgSpec(args=['x', 'expand_times', 'name'], varargs=None, keywords=None, defaults=(None,
))
paddle.fluid.layers.sequence_concat
ArgSpec(args=['input', 'name'], varargs=None, keywords=None, defaults=(None,
))
paddle.fluid.layers.scale
ArgSpec(args=['x', 'scale', 'bias', 'bias_after_scale', 'act', 'name'], varargs=None, keywords=None, defaults=(1.0, 0.0, True, None, None
))
paddle.fluid.layers.elementwise_add
ArgSpec(args=['x', 'y', 'axis', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, None, None
))
paddle.fluid.layers.elementwise_div
ArgSpec(args=['x', 'y', 'axis', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, None, None
))
paddle.fluid.layers.elementwise_sub
ArgSpec(args=['x', 'y', 'axis', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, None, None
))
paddle.fluid.layers.elementwise_mul
ArgSpec(args=['x', 'y', 'axis', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, None, None
))
paddle.fluid.layers.elementwise_max
ArgSpec(args=['x', 'y', 'axis', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, None, None
))
paddle.fluid.layers.elementwise_min
ArgSpec(args=['x', 'y', 'axis', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, None, None
))
paddle.fluid.layers.elementwise_pow
ArgSpec(args=['x', 'y', 'axis', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, None, None
))
paddle.fluid.layers.uniform_random_batch_size_like
ArgSpec(args=['input', 'shape', 'dtype', 'input_dim_idx', 'output_dim_idx', 'min', 'max', 'seed'], varargs=None, keywords=None, defaults=('float32', 0, 0, -1.0, 1.0, 0
))
paddle.fluid.layers.gaussian_random
ArgSpec(args=['shape', 'mean', 'std', 'seed', 'dtype'], varargs=None, keywords=None, defaults=(0.0, 1.0, 0, 'float32
'))
paddle.fluid.layers.sampling_id
ArgSpec(args=['x', 'min', 'max', 'seed', 'dtype'], varargs=None, keywords=None, defaults=(0.0, 1.0, 0, 'float32
'))
paddle.fluid.layers.gaussian_random_batch_size_like
ArgSpec(args=['input', 'shape', 'input_dim_idx', 'output_dim_idx', 'mean', 'std', 'seed', 'dtype'], varargs=None, keywords=None, defaults=(0, 0, 0.0, 1.0, 0, 'float32
'))
paddle.fluid.layers.sum
ArgSpec(args=['x'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.layers.slice
ArgSpec(args=['input', 'axes', 'starts', 'ends'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.layers.shape
ArgSpec(args=['input'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.layers.logical_and
ArgSpec(args=['x', 'y', 'out', 'name'], varargs=None, keywords=None, defaults=(None, None
))
paddle.fluid.layers.logical_or
ArgSpec(args=['x', 'y', 'out', 'name'], varargs=None, keywords=None, defaults=(None, None
))
paddle.fluid.layers.logical_xor
ArgSpec(args=['x', 'y', 'out', 'name'], varargs=None, keywords=None, defaults=(None, None
))
paddle.fluid.layers.logical_not
ArgSpec(args=['x', 'out', 'name'], varargs=None, keywords=None, defaults=(None, None
))
paddle.fluid.layers.clip
ArgSpec(args=['x', 'min', 'max', 'name'], varargs=None, keywords=None, defaults=(None,
))
paddle.fluid.layers.clip_by_norm
ArgSpec(args=['x', 'max_norm', 'name'], varargs=None, keywords=None, defaults=(None,
))
paddle.fluid.layers.mean
ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,
))
paddle.fluid.layers.mul
ArgSpec(args=['x', 'y', 'x_num_col_dims', 'y_num_col_dims', 'name'], varargs=None, keywords=None, defaults=(1, 1, None
))
paddle.fluid.layers.sigmoid_cross_entropy_with_logits
ArgSpec(args=['x', 'label', 'ignore_index', 'name', 'normalize'], varargs=None, keywords=None, defaults=(-100, None, False
))
paddle.fluid.layers.maxout
ArgSpec(args=['x', 'groups', 'name'], varargs=None, keywords=None, defaults=(None,
))
paddle.fluid.layers.space_to_depth
ArgSpec(args=['x', 'blocksize', 'name'], varargs=None, keywords=None, defaults=(None,
))
paddle.fluid.layers.affine_grid
ArgSpec(args=['theta', 'out_shape', 'name'], varargs=None, keywords=None, defaults=(None,
))
paddle.fluid.layers.sequence_reverse
ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,
))
paddle.fluid.layers.affine_channel
ArgSpec(args=['x', 'scale', 'bias', 'data_layout', 'name'], varargs=None, keywords=None, defaults=(None, None, 'NCHW', None
))
paddle.fluid.layers.similarity_focus
ArgSpec(args=['input', 'axis', 'indexes', 'name'], varargs=None, keywords=None, defaults=(None,
))
paddle.fluid.layers.hash
ArgSpec(args=['input', 'hash_size', 'num_hash', 'name'], varargs=None, keywords=None, defaults=(1, None
))
paddle.fluid.layers.grid_sampler
ArgSpec(args=['x', 'grid', 'name'], varargs=None, keywords=None, defaults=(None,
))
paddle.fluid.layers.log_loss
ArgSpec(args=['input', 'label', 'epsilon', 'name'], varargs=None, keywords=None, defaults=(0.0001, None
))
paddle.fluid.layers.add_position_encoding
ArgSpec(args=['input', 'alpha', 'beta', 'name'], varargs=None, keywords=None, defaults=(None,
))
paddle.fluid.layers.bilinear_tensor_product
ArgSpec(args=['x', 'y', 'size', 'act', 'name', 'param_attr', 'bias_attr'], varargs=None, keywords=None, defaults=(None, None, None, None
))
paddle.fluid.layers.merge_selected_rows
ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,
))
paddle.fluid.layers.get_tensor_from_selected_rows
ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,
))
paddle.fluid.layers.lstm
ArgSpec(args=['input', 'init_h', 'init_c', 'max_len', 'hidden_size', 'num_layers', 'dropout_prob', 'is_bidirec', 'is_test', 'name', 'default_initializer', 'seed'], varargs=None, keywords=None, defaults=(0.0, False, False, None, None, -1
))
paddle.fluid.layers.shuffle_channel
ArgSpec(args=['x', 'group', 'name'], varargs=None, keywords=None, defaults=(None,
))
paddle.fluid.layers.py_func
ArgSpec(args=['func', 'x', 'out', 'backward_func', 'skip_vars_in_backward_input'], varargs=None, keywords=None, defaults=(None, None
))
paddle.fluid.layers.psroi_pool
ArgSpec(args=['input', 'rois', 'output_channels', 'spatial_scale', 'pooled_height', 'pooled_width', 'name'], varargs=None, keywords=None, defaults=(None,
))
paddle.fluid.layers.teacher_student_sigmoid_loss
ArgSpec(args=['input', 'label', 'soft_max_up_bound', 'soft_max_lower_bound'], varargs=None, keywords=None, defaults=(15.0, -15.0
))
paddle.fluid.layers.huber_loss
ArgSpec(args=['input', 'label', 'delta'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.layers.tree_conv
ArgSpec(args=['nodes_vector', 'edge_set', 'output_size', 'num_filters', 'max_depth', 'act', 'param_attr', 'bias_attr', 'name'], varargs=None, keywords=None, defaults=(1, 2, 'tanh', None, None, None
))
paddle.fluid.layers.data
ArgSpec(args=['name', 'shape', 'append_batch_size', 'dtype', 'lod_level', 'type', 'stop_gradient'], varargs=None, keywords=None, defaults=(True, 'float32', 0, VarType.LOD_TENSOR, True
))
paddle.fluid.layers.open_files
ArgSpec(args=['filenames', 'shapes', 'lod_levels', 'dtypes', 'thread_num', 'buffer_size', 'pass_num', 'is_test'], varargs=None, keywords=None, defaults=(None, None, 1, None
))
paddle.fluid.layers.read_file
ArgSpec(args=['reader'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.layers.shuffle
ArgSpec(args=['reader', 'buffer_size'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.layers.batch
ArgSpec(args=['reader', 'batch_size'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.layers.double_buffer
ArgSpec(args=['reader', 'place', 'name'], varargs=None, keywords=None, defaults=(None, None
))
paddle.fluid.layers.random_data_generator
ArgSpec(args=['low', 'high', 'shapes', 'lod_levels', 'for_parallel'], varargs=None, keywords=None, defaults=(True,
))
paddle.fluid.layers.py_reader
ArgSpec(args=['capacity', 'shapes', 'dtypes', 'lod_levels', 'name', 'use_double_buffer'], varargs=None, keywords=None, defaults=(None, None, True
))
paddle.fluid.layers.create_py_reader_by_data
ArgSpec(args=['capacity', 'feed_list', 'name', 'use_double_buffer'], varargs=None, keywords=None, defaults=(None, True
))
paddle.fluid.layers.Preprocessor.__init__
ArgSpec(args=['self', 'reader', 'name'], varargs=None, keywords=None, defaults=(None,
))
paddle.fluid.layers.Preprocessor.block
ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.layers.Preprocessor.inputs
ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.layers.Preprocessor.outputs
ArgSpec(args=['self'], varargs='outs', keywords=None, defaults=None
)
paddle.fluid.layers.load
ArgSpec(args=['out', 'file_path', 'load_as_fp16'], varargs=None, keywords=None, defaults=(None,
))
paddle.fluid.layers.create_tensor
ArgSpec(args=['dtype', 'name', 'persistable'], varargs=None, keywords=None, defaults=(None, False
))
paddle.fluid.layers.create_parameter
ArgSpec(args=['shape', 'dtype', 'name', 'attr', 'is_bias', 'default_initializer'], varargs=None, keywords=None, defaults=(None, None, False, None
))
paddle.fluid.layers.create_global_var
ArgSpec(args=['shape', 'value', 'dtype', 'persistable', 'force_cpu', 'name'], varargs=None, keywords=None, defaults=(False, False, None
))
paddle.fluid.layers.cast
ArgSpec(args=['x', 'dtype'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.layers.tensor_array_to_tensor
ArgSpec(args=['input', 'axis', 'name'], varargs=None, keywords=None, defaults=(1, None
))
paddle.fluid.layers.concat
ArgSpec(args=['input', 'axis', 'name'], varargs=None, keywords=None, defaults=(0, None
))
paddle.fluid.layers.sums
ArgSpec(args=['input', 'out'], varargs=None, keywords=None, defaults=(None,
))
paddle.fluid.layers.assign
ArgSpec(args=['input', 'output'], varargs=None, keywords=None, defaults=(None,
))
paddle.fluid.layers.fill_constant_batch_size_like
ArgSpec(args=['input', 'shape', 'dtype', 'value', 'input_dim_idx', 'output_dim_idx'], varargs=None, keywords=None, defaults=(0, 0
))
paddle.fluid.layers.fill_constant
ArgSpec(args=['shape', 'dtype', 'value', 'force_cpu', 'out'], varargs=None, keywords=None, defaults=(False, None
))
paddle.fluid.layers.argmin
ArgSpec(args=['x', 'axis'], varargs=None, keywords=None, defaults=(0,
))
paddle.fluid.layers.argmax
ArgSpec(args=['x', 'axis'], varargs=None, keywords=None, defaults=(0,
))
paddle.fluid.layers.argsort
ArgSpec(args=['input', 'axis', 'name'], varargs=None, keywords=None, defaults=(-1, None
))
paddle.fluid.layers.ones
ArgSpec(args=['shape', 'dtype', 'force_cpu'], varargs=None, keywords=None, defaults=(False,
))
paddle.fluid.layers.zeros
ArgSpec(args=['shape', 'dtype', 'force_cpu'], varargs=None, keywords=None, defaults=(False,
))
paddle.fluid.layers.reverse
ArgSpec(args=['x', 'axis'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.layers.has_inf
ArgSpec(args=['x'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.layers.has_nan
ArgSpec(args=['x'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.layers.isfinite
ArgSpec(args=['x'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.layers.While.__init__
ArgSpec(args=['self', 'cond', 'is_test', 'name'], varargs=None, keywords=None, defaults=(False, None
))
paddle.fluid.layers.While.block
ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.layers.Switch.__init__
ArgSpec(args=['self', 'name'], varargs=None, keywords=None, defaults=(None,
))
paddle.fluid.layers.Switch.case
ArgSpec(args=['self', 'condition'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.layers.Switch.default
ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.layers.increment
ArgSpec(args=['x', 'value', 'in_place'], varargs=None, keywords=None, defaults=(1.0, True
))
paddle.fluid.layers.array_write
ArgSpec(args=['x', 'i', 'array'], varargs=None, keywords=None, defaults=(None,
))
paddle.fluid.layers.create_array
ArgSpec(args=['dtype'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.layers.less_than
ArgSpec(args=['x', 'y', 'force_cpu', 'cond'], varargs=None, keywords='ignored', defaults=(None, None
))
paddle.fluid.layers.equal
ArgSpec(args=['x', 'y', 'cond'], varargs=None, keywords=None, defaults=(None,
))
paddle.fluid.layers.array_read
ArgSpec(args=['array', 'i'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.layers.array_length
ArgSpec(args=['array'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.layers.IfElse.__init__
ArgSpec(args=['self', 'cond', 'name'], varargs=None, keywords=None, defaults=(None,
))
paddle.fluid.layers.IfElse.false_block
ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.layers.IfElse.input
ArgSpec(args=['self', 'x'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.layers.IfElse.output
ArgSpec(args=['self'], varargs='outs', keywords=None, defaults=None
)
paddle.fluid.layers.IfElse.true_block
ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.layers.DynamicRNN.__init__
ArgSpec(args=['self', 'name'], varargs=None, keywords=None, defaults=(None,
))
paddle.fluid.layers.DynamicRNN.block
ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.layers.DynamicRNN.memory
ArgSpec(args=['self', 'init', 'shape', 'value', 'need_reorder', 'dtype'], varargs=None, keywords=None, defaults=(None, None, 0.0, False, 'float32
'))
paddle.fluid.layers.DynamicRNN.output
ArgSpec(args=['self'], varargs='outputs', keywords=None, defaults=None
)
paddle.fluid.layers.DynamicRNN.static_input
ArgSpec(args=['self', 'x'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.layers.DynamicRNN.step_input
ArgSpec(args=['self', 'x'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.layers.DynamicRNN.update_memory
ArgSpec(args=['self', 'ex_mem', 'new_mem'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.layers.StaticRNN.__init__
ArgSpec(args=['self', 'name'], varargs=None, keywords=None, defaults=(None,
))
paddle.fluid.layers.StaticRNN.memory
ArgSpec(args=['self', 'init', 'shape', 'batch_ref', 'init_value', 'init_batch_dim_idx', 'ref_batch_dim_idx'], varargs=None, keywords=None, defaults=(None, None, None, 0.0, 0, 1
))
paddle.fluid.layers.StaticRNN.output
ArgSpec(args=['self'], varargs='outputs', keywords=None, defaults=None
)
paddle.fluid.layers.StaticRNN.step
ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.layers.StaticRNN.step_input
ArgSpec(args=['self', 'x'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.layers.StaticRNN.step_output
ArgSpec(args=['self', 'o'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.layers.StaticRNN.update_memory
ArgSpec(args=['self', 'mem', 'var'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.layers.reorder_lod_tensor_by_rank
ArgSpec(args=['x', 'rank_table'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.layers.Print
ArgSpec(args=['input', 'first_n', 'message', 'summarize', 'print_tensor_name', 'print_tensor_type', 'print_tensor_shape', 'print_tensor_lod', 'print_phase'], varargs=None, keywords=None, defaults=(-1, None, -1, True, True, True, True, 'both
'))
paddle.fluid.layers.is_empty
ArgSpec(args=['x', 'cond'], varargs=None, keywords='ignored', defaults=(None,
))
paddle.fluid.layers.sigmoid
ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,
))
paddle.fluid.layers.logsigmoid
ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,
))
paddle.fluid.layers.exp
ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,
))
paddle.fluid.layers.tanh
ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,
))
paddle.fluid.layers.tanh_shrink
ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,
))
paddle.fluid.layers.softshrink
ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,
))
paddle.fluid.layers.sqrt
ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,
))
paddle.fluid.layers.abs
ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,
))
paddle.fluid.layers.ceil
ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,
))
paddle.fluid.layers.floor
ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,
))
paddle.fluid.layers.cos
ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,
))
paddle.fluid.layers.sin
ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,
))
paddle.fluid.layers.round
ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,
))
paddle.fluid.layers.reciprocal
ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,
))
paddle.fluid.layers.square
ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,
))
paddle.fluid.layers.softplus
ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,
))
paddle.fluid.layers.softsign
ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,
))
paddle.fluid.layers.uniform_random
ArgSpec(args=['shape', 'dtype', 'min', 'max', 'seed'], varargs=None, keywords=None, defaults=('float32', -1.0, 1.0, 0
))
paddle.fluid.layers.hard_shrink
ArgSpec(args=['x', 'threshold'], varargs=None, keywords=None, defaults=(None,
))
paddle.fluid.layers.cumsum
ArgSpec(args=['x', 'axis', 'exclusive', 'reverse'], varargs=None, keywords=None, defaults=(None, None, None
))
paddle.fluid.layers.thresholded_relu
ArgSpec(args=['x', 'threshold'], varargs=None, keywords=None, defaults=(None,
))
paddle.fluid.layers.prior_box
ArgSpec(args=['input', 'image', 'min_sizes', 'max_sizes', 'aspect_ratios', 'variance', 'flip', 'clip', 'steps', 'offset', 'name', 'min_max_aspect_ratios_order'], varargs=None, keywords=None, defaults=(None, [1.0], [0.1, 0.1, 0.2, 0.2], False, False, [0.0, 0.0], 0.5, None, False
))
paddle.fluid.layers.density_prior_box
ArgSpec(args=['input', 'image', 'densities', 'fixed_sizes', 'fixed_ratios', 'variance', 'clip', 'steps', 'offset', 'flatten_to_2d', 'name'], varargs=None, keywords=None, defaults=(None, None, None, [0.1, 0.1, 0.2, 0.2], False, [0.0, 0.0], 0.5, False, None
))
paddle.fluid.layers.multi_box_head
ArgSpec(args=['inputs', 'image', 'base_size', 'num_classes', 'aspect_ratios', 'min_ratio', 'max_ratio', 'min_sizes', 'max_sizes', 'steps', 'step_w', 'step_h', 'offset', 'variance', 'flip', 'clip', 'kernel_size', 'pad', 'stride', 'name', 'min_max_aspect_ratios_order'], varargs=None, keywords=None, defaults=(None, None, None, None, None, None, None, 0.5, [0.1, 0.1, 0.2, 0.2], True, False, 1, 0, 1, None, False
))
paddle.fluid.layers.bipartite_match
ArgSpec(args=['dist_matrix', 'match_type', 'dist_threshold', 'name'], varargs=None, keywords=None, defaults=(None, None, None
))
paddle.fluid.layers.target_assign
ArgSpec(args=['input', 'matched_indices', 'negative_indices', 'mismatch_value', 'name'], varargs=None, keywords=None, defaults=(None, None, None
))
paddle.fluid.layers.detection_output
ArgSpec(args=['loc', 'scores', 'prior_box', 'prior_box_var', 'background_label', 'nms_threshold', 'nms_top_k', 'keep_top_k', 'score_threshold', 'nms_eta'], varargs=None, keywords=None, defaults=(0, 0.3, 400, 200, 0.01, 1.0
))
paddle.fluid.layers.ssd_loss
ArgSpec(args=['location', 'confidence', 'gt_box', 'gt_label', 'prior_box', 'prior_box_var', 'background_label', 'overlap_threshold', 'neg_pos_ratio', 'neg_overlap', 'loc_loss_weight', 'conf_loss_weight', 'match_type', 'mining_type', 'normalize', 'sample_size'], varargs=None, keywords=None, defaults=(None, 0, 0.5, 3.0, 0.5, 1.0, 1.0, 'per_prediction', 'max_negative', True, None
))
paddle.fluid.layers.detection_map
ArgSpec(args=['detect_res', 'label', 'class_num', 'background_label', 'overlap_threshold', 'evaluate_difficult', 'has_state', 'input_states', 'out_states', 'ap_version'], varargs=None, keywords=None, defaults=(0, 0.3, True, None, None, None, 'integral
'))
paddle.fluid.layers.rpn_target_assign
ArgSpec(args=['bbox_pred', 'cls_logits', 'anchor_box', 'anchor_var', 'gt_boxes', 'is_crowd', 'im_info', 'rpn_batch_size_per_im', 'rpn_straddle_thresh', 'rpn_fg_fraction', 'rpn_positive_overlap', 'rpn_negative_overlap', 'use_random'], varargs=None, keywords=None, defaults=(256, 0.0, 0.5, 0.7, 0.3, True
))
paddle.fluid.layers.anchor_generator
ArgSpec(args=['input', 'anchor_sizes', 'aspect_ratios', 'variance', 'stride', 'offset', 'name'], varargs=None, keywords=None, defaults=(None, None, [0.1, 0.1, 0.2, 0.2], None, 0.5, None
))
paddle.fluid.layers.roi_perspective_transform
ArgSpec(args=['input', 'rois', 'transformed_height', 'transformed_width', 'spatial_scale'], varargs=None, keywords=None, defaults=(1.0,
))
paddle.fluid.layers.generate_proposal_labels
ArgSpec(args=['rpn_rois', 'gt_classes', 'is_crowd', 'gt_boxes', 'im_info', 'batch_size_per_im', 'fg_fraction', 'fg_thresh', 'bg_thresh_hi', 'bg_thresh_lo', 'bbox_reg_weights', 'class_nums', 'use_random'], varargs=None, keywords=None, defaults=(256, 0.25, 0.25, 0.5, 0.0, [0.1, 0.1, 0.2, 0.2], None, True
))
paddle.fluid.layers.generate_proposals
ArgSpec(args=['scores', 'bbox_deltas', 'im_info', 'anchors', 'variances', 'pre_nms_top_n', 'post_nms_top_n', 'nms_thresh', 'min_size', 'eta', 'name'], varargs=None, keywords=None, defaults=(6000, 1000, 0.5, 0.1, 1.0, None
))
paddle.fluid.layers.generate_mask_labels
ArgSpec(args=['im_info', 'gt_classes', 'is_crowd', 'gt_segms', 'rois', 'labels_int32', 'num_classes', 'resolution'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.layers.iou_similarity
ArgSpec(args=['x', 'y', 'name'], varargs=None, keywords=None, defaults=(None,
))
paddle.fluid.layers.box_coder
ArgSpec(args=['prior_box', 'prior_box_var', 'target_box', 'code_type', 'box_normalized', 'name', 'axis'], varargs=None, keywords=None, defaults=('encode_center_size', True, None, 0
))
paddle.fluid.layers.polygon_box_transform
ArgSpec(args=['input', 'name'], varargs=None, keywords=None, defaults=(None,
))
paddle.fluid.layers.yolov3_loss
ArgSpec(args=['x', 'gtbox', 'gtlabel', 'anchors', 'anchor_mask', 'class_num', 'ignore_thresh', 'downsample_ratio', 'name'], varargs=None, keywords=None, defaults=(None,
))
paddle.fluid.layers.box_clip
ArgSpec(args=['input', 'im_info', 'name'], varargs=None, keywords=None, defaults=(None,
))
paddle.fluid.layers.multiclass_nms
ArgSpec(args=['bboxes', 'scores', 'score_threshold', 'nms_top_k', 'keep_top_k', 'nms_threshold', 'normalized', 'nms_eta', 'background_label', 'name'], varargs=None, keywords=None, defaults=(0.3, True, 1.0, 0, None
))
paddle.fluid.layers.accuracy
ArgSpec(args=['input', 'label', 'k', 'correct', 'total'], varargs=None, keywords=None, defaults=(1, None, None
))
paddle.fluid.layers.auc
ArgSpec(args=['input', 'label', 'curve', 'num_thresholds', 'topk', 'slide_steps'], varargs=None, keywords=None, defaults=('ROC', 4095, 1, 1
))
paddle.fluid.layers.exponential_decay
ArgSpec(args=['learning_rate', 'decay_steps', 'decay_rate', 'staircase'], varargs=None, keywords=None, defaults=(False,
))
paddle.fluid.layers.natural_exp_decay
ArgSpec(args=['learning_rate', 'decay_steps', 'decay_rate', 'staircase'], varargs=None, keywords=None, defaults=(False,
))
paddle.fluid.layers.inverse_time_decay
ArgSpec(args=['learning_rate', 'decay_steps', 'decay_rate', 'staircase'], varargs=None, keywords=None, defaults=(False,
))
paddle.fluid.layers.polynomial_decay
ArgSpec(args=['learning_rate', 'decay_steps', 'end_learning_rate', 'power', 'cycle'], varargs=None, keywords=None, defaults=(0.0001, 1.0, False
))
paddle.fluid.layers.piecewise_decay
ArgSpec(args=['boundaries', 'values'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.layers.noam_decay
ArgSpec(args=['d_model', 'warmup_steps'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.layers.append_LARS
ArgSpec(args=['params_grads', 'learning_rate', 'weight_decay'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.layers.cosine_decay
ArgSpec(args=['learning_rate', 'step_each_epoch', 'epochs'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.contrib.InitState.__init__
ArgSpec(args=['self', 'init', 'shape', 'value', 'init_boot', 'need_reorder', 'dtype'], varargs=None, keywords=None, defaults=(None, None, 0.0, None, False, 'float32
'))
paddle.fluid.contrib.StateCell.__init__
ArgSpec(args=['self', 'inputs', 'states', 'out_state', 'name'], varargs=None, keywords=None, defaults=(None,
))
paddle.fluid.contrib.StateCell.compute_state
ArgSpec(args=['self', 'inputs'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.contrib.StateCell.get_input
ArgSpec(args=['self', 'input_name'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.contrib.StateCell.get_state
ArgSpec(args=['self', 'state_name'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.contrib.StateCell.out_state
ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.contrib.StateCell.set_state
ArgSpec(args=['self', 'state_name', 'state_value'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.contrib.StateCell.state_updater
ArgSpec(args=['self', 'updater'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.contrib.StateCell.update_states
ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.contrib.TrainingDecoder.__init__
ArgSpec(args=['self', 'state_cell', 'name'], varargs=None, keywords=None, defaults=(None,
))
paddle.fluid.contrib.TrainingDecoder.block
ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.contrib.TrainingDecoder.output
ArgSpec(args=['self'], varargs='outputs', keywords=None, defaults=None
)
paddle.fluid.contrib.TrainingDecoder.static_input
ArgSpec(args=['self', 'x'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.contrib.TrainingDecoder.step_input
ArgSpec(args=['self', 'x'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.contrib.BeamSearchDecoder.__init__
ArgSpec(args=['self', 'state_cell', 'init_ids', 'init_scores', 'target_dict_dim', 'word_dim', 'input_var_dict', 'topk_size', 'sparse_emb', 'max_len', 'beam_size', 'end_id', 'name'], varargs=None, keywords=None, defaults=({}, 50, True, 100, 1, 1, None
))
paddle.fluid.contrib.BeamSearchDecoder.block
ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.contrib.BeamSearchDecoder.decode
ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.contrib.BeamSearchDecoder.early_stop
ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.contrib.BeamSearchDecoder.read_array
ArgSpec(args=['self', 'init', 'is_ids', 'is_scores'], varargs=None, keywords=None, defaults=(False, False
))
paddle.fluid.contrib.BeamSearchDecoder.update_array
ArgSpec(args=['self', 'array', 'value'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.contrib.memory_usage
ArgSpec(args=['program', 'batch_size'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.contrib.op_freq_statistic
ArgSpec(args=['program'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.contrib.QuantizeTranspiler.__init__
ArgSpec(args=['self', 'weight_bits', 'activation_bits', 'activation_quantize_type', 'weight_quantize_type', 'window_size'], varargs=None, keywords=None, defaults=(8, 8, 'abs_max', 'abs_max', 10000
))
paddle.fluid.contrib.QuantizeTranspiler.convert_to_int8
ArgSpec(args=['self', 'program', 'place', 'scope'], varargs=None, keywords=None, defaults=(None,
))
paddle.fluid.contrib.QuantizeTranspiler.freeze_program
ArgSpec(args=['self', 'program', 'place', 'fuse_bn', 'scope'], varargs=None, keywords=None, defaults=(False, None
))
paddle.fluid.contrib.QuantizeTranspiler.training_transpile
ArgSpec(args=['self', 'program', 'startup_program'], varargs=None, keywords=None, defaults=(None, None
))
paddle.fluid.contrib.Calibrator.__init__
ArgSpec(args=['self'], varargs='args', keywords='kwargs', defaults=None
)
paddle.fluid.contrib.Calibrator.sample_data
ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.contrib.Calibrator.save_int8_model
ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.contrib.reader.ctr_reader.ctr_reader
ArgSpec(args=['feed_dict', 'file_type', 'file_format', 'dense_slot_index', 'sparse_slot_index', 'capacity', 'thread_num', 'batch_size', 'file_list', 'slots', 'name'], varargs=None, keywords=None, defaults=(None,
))
paddle.fluid.contrib.build_compressor
ArgSpec(args=['place', 'data_reader', 'data_feeder', 'scope', 'metrics', 'epoch', 'config'], varargs=None, keywords=None, defaults=(None, None, None, None, None, None, None
))
paddle.fluid.contrib.CompressPass.__init__
ArgSpec(args=['self', 'place', 'data_reader', 'data_feeder', 'scope', 'metrics', 'epoch', 'program_exe'], varargs=None, keywords=None, defaults=(None, None, None, None, None, None, None
))
paddle.fluid.contrib.CompressPass.add_strategy
ArgSpec(args=['self', 'strategy'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.contrib.CompressPass.apply
ArgSpec(args=['self', 'graph'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.contrib.ImitationGraph.__init__
ArgSpec(args=['self', 'program'], varargs=None, keywords=None, defaults=(None,
))
paddle.fluid.contrib.ImitationGraph.all_parameters
ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.contrib.SensitivePruneStrategy.__init__
ArgSpec(args=['self', 'pruner', 'start_epoch', 'end_epoch', 'delta_rate', 'acc_loss_threshold', 'sensitivities'], varargs=None, keywords=None, defaults=(None, 0, 10, 0.2, 0.2, None
))
paddle.fluid.contrib.SensitivePruneStrategy.on_batch_begin
ArgSpec(args=['self', 'context'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.contrib.SensitivePruneStrategy.on_batch_end
ArgSpec(args=['self', 'context'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.contrib.SensitivePruneStrategy.on_compress_begin
ArgSpec(args=['self', 'context'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.contrib.SensitivePruneStrategy.on_compress_end
ArgSpec(args=['self', 'context'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.contrib.SensitivePruneStrategy.on_epoch_begin
ArgSpec(args=['self', 'context'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.contrib.SensitivePruneStrategy.on_epoch_end
ArgSpec(args=['self', 'context'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.contrib.MagnitudePruner.__init__
ArgSpec(args=['self', 'threshold'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.contrib.MagnitudePruner.prune
ArgSpec(args=['self', 'param', 'threshold'], varargs=None, keywords=None, defaults=(None,
))
paddle.fluid.contrib.RatioPruner.__init__
ArgSpec(args=['self', 'ratios'], varargs=None, keywords=None, defaults=(None,
))
paddle.fluid.contrib.RatioPruner.prune
ArgSpec(args=['self', 'param', 'ratio'], varargs=None, keywords=None, defaults=(None,
))
paddle.fluid.contrib.load_persistables_for_increment
ArgSpec(args=['dirname', 'executor', 'program', 'lookup_table_var', 'lookup_table_var_path'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.contrib.load_persistables_for_inference
ArgSpec(args=['dirname', 'executor', 'program', 'lookup_table_var_name'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.contrib.convert_dist_to_sparse_program
ArgSpec(args=['program'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.contrib.HDFSClient.__init__
ArgSpec(args=['self', 'hadoop_home', 'configs'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.contrib.HDFSClient.delete
ArgSpec(args=['self', 'hdfs_path'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.contrib.HDFSClient.download
ArgSpec(args=['self', 'hdfs_path', 'local_path', 'overwrite', 'unzip'], varargs=None, keywords=None, defaults=(False, False
))
paddle.fluid.contrib.HDFSClient.is_dir
ArgSpec(args=['self', 'hdfs_path'], varargs=None, keywords=None, defaults=(None,
))
paddle.fluid.contrib.HDFSClient.is_exist
ArgSpec(args=['self', 'hdfs_path'], varargs=None, keywords=None, defaults=(None,
))
paddle.fluid.contrib.HDFSClient.ls
ArgSpec(args=['self', 'hdfs_path'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.contrib.HDFSClient.lsr
ArgSpec(args=['self', 'hdfs_path', 'only_file', 'sort'], varargs=None, keywords=None, defaults=(True, True
))
paddle.fluid.contrib.HDFSClient.make_local_dirs
ArgSpec(args=['local_path'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.contrib.HDFSClient.makedirs
ArgSpec(args=['self', 'hdfs_path'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.contrib.HDFSClient.rename
ArgSpec(args=['self', 'hdfs_src_path', 'hdfs_dst_path', 'overwrite'], varargs=None, keywords=None, defaults=(False,
))
paddle.fluid.contrib.HDFSClient.upload
ArgSpec(args=['self', 'hdfs_path', 'local_path', 'overwrite', 'retry_times'], varargs=None, keywords=None, defaults=(False, 5
))
paddle.fluid.contrib.multi_download
ArgSpec(args=['client', 'hdfs_path', 'local_path', 'trainer_id', 'trainers', 'multi_processes'], varargs=None, keywords=None, defaults=(5,
))
paddle.fluid.contrib.multi_upload
ArgSpec(args=['client', 'hdfs_path', 'local_path', 'multi_processes', 'overwrite', 'sync'], varargs=None, keywords=None, defaults=(5, False, True
))
paddle.fluid.transpiler.DistributeTranspiler.__init__
ArgSpec(args=['self', 'config'], varargs=None, keywords=None, defaults=(None,
))
paddle.fluid.transpiler.DistributeTranspiler.get_pserver_program
ArgSpec(args=['self', 'endpoint'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.transpiler.DistributeTranspiler.get_pserver_programs
ArgSpec(args=['self', 'endpoint'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.transpiler.DistributeTranspiler.get_startup_program
ArgSpec(args=['self', 'endpoint', 'pserver_program', 'startup_program'], varargs=None, keywords=None, defaults=(None, None
))
paddle.fluid.transpiler.DistributeTranspiler.get_trainer_program
ArgSpec(args=['self', 'wait_port'], varargs=None, keywords=None, defaults=(True,
))
paddle.fluid.transpiler.DistributeTranspiler.transpile
ArgSpec(args=['self', 'trainer_id', 'program', 'pservers', 'trainers', 'sync_mode', 'startup_program', 'current_endpoint'], varargs=None, keywords=None, defaults=(None, '127.0.0.1:6174', 1, True, None, '127.0.0.1:617
4'))
paddle.fluid.transpiler.memory_optimize
ArgSpec(args=['input_program', 'skip_opt_set', 'print_log', 'level', 'skip_grads'], varargs=None, keywords=None, defaults=(None, False, 0, False
))
paddle.fluid.transpiler.release_memory
ArgSpec(args=['input_program', 'skip_opt_set'], varargs=None, keywords=None, defaults=(None,
))
paddle.fluid.transpiler.HashName.__init__
ArgSpec(args=['self', 'pserver_endpoints'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.transpiler.HashName.dispatch
ArgSpec(args=['self', 'varlist'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.transpiler.HashName.reset
ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.transpiler.RoundRobin.__init__
ArgSpec(args=['self', 'pserver_endpoints'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.transpiler.RoundRobin.dispatch
ArgSpec(args=['self', 'varlist'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.transpiler.RoundRobin.reset
ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.io.save_vars
(ArgSpec(args=['executor', 'dirname', 'main_program', 'vars', 'predicate', 'filename'], varargs=None, keywords=None, defaults=(None, None, None, None)), ('document', 'b55d6193a1d4198d45b013fc5779e1f2'
))
paddle.fluid.io.save_params
(ArgSpec(args=['executor', 'dirname', 'main_program', 'filename'], varargs=None, keywords=None, defaults=(None, None)), ('document', '3a7a99abac3e1bf898871fe609354218'
))
paddle.fluid.io.save_persistables
(ArgSpec(args=['executor', 'dirname', 'main_program', 'filename'], varargs=None, keywords=None, defaults=(None, None)), ('document', '9141bb5f32caf7975eb3fd88c8a1b2da'
))
paddle.fluid.io.load_vars
(ArgSpec(args=['executor', 'dirname', 'main_program', 'vars', 'predicate', 'filename'], varargs=None, keywords=None, defaults=(None, None, None, None)), ('document', '0a5308f496632ab1ec3ba1f1377e6f95'
))
paddle.fluid.io.load_params
(ArgSpec(args=['executor', 'dirname', 'main_program', 'filename'], varargs=None, keywords=None, defaults=(None, None)), ('document', '41779819cef32f2246e83aebc5a002e2'
))
paddle.fluid.io.load_persistables
(ArgSpec(args=['executor', 'dirname', 'main_program', 'filename'], varargs=None, keywords=None, defaults=(None, None)), ('document', '28df5bfe26ca7a077f91156abb0fe6d2'
))
paddle.fluid.io.save_inference_model
(ArgSpec(args=['dirname', 'feeded_var_names', 'target_vars', 'executor', 'main_program', 'model_filename', 'params_filename', 'export_for_deployment'], varargs=None, keywords=None, defaults=(None, None, None, True)), ('document', '582d87b8df75a5a639a107db8ff86f9c'
))
paddle.fluid.io.load_inference_model
(ArgSpec(args=['dirname', 'executor', 'model_filename', 'params_filename', 'pserver_endpoints'], varargs=None, keywords=None, defaults=(None, None, None)), ('document', '7a5255386075dac3c75b7058254fcdcb'
))
paddle.fluid.initializer.ConstantInitializer.__init__
(ArgSpec(args=['self', 'value', 'force_cpu'], varargs=None, keywords=None, defaults=(0.0, False)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'
))
paddle.fluid.initializer.UniformInitializer.__init__
(ArgSpec(args=['self', 'low', 'high', 'seed'], varargs=None, keywords=None, defaults=(-1.0, 1.0, 0)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'
))
paddle.fluid.initializer.NormalInitializer.__init__
(ArgSpec(args=['self', 'loc', 'scale', 'seed'], varargs=None, keywords=None, defaults=(0.0, 1.0, 0)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'
))
paddle.fluid.initializer.TruncatedNormalInitializer.__init__
(ArgSpec(args=['self', 'loc', 'scale', 'seed'], varargs=None, keywords=None, defaults=(0.0, 1.0, 0)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'
))
paddle.fluid.initializer.XavierInitializer.__init__
(ArgSpec(args=['self', 'uniform', 'fan_in', 'fan_out', 'seed'], varargs=None, keywords=None, defaults=(True, None, None, 0)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'
))
paddle.fluid.initializer.BilinearInitializer.__init__
(ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', 'd389912dc079cbef432335a00017cec0')
)
paddle.fluid.initializer.MSRAInitializer.__init__
(ArgSpec(args=['self', 'uniform', 'fan_in', 'seed'], varargs=None, keywords=None, defaults=(True, None, 0)), ('document', '53c757bed9345f2ad3361902531e7cf5'
))
paddle.fluid.initializer.force_init_on_cpu
(ArgSpec(args=[], varargs=None, keywords=None, defaults=None), ('document', '6d0f3e22c90d9d500d36ff57daf056ee')
)
paddle.fluid.initializer.init_on_cpu
(ArgSpec(args=[], varargs=None, keywords=None, defaults=None), ('document', 'a6d7011ca3d8c0d454dac3a56eae0c29')
)
paddle.fluid.initializer.NumpyArrayInitializer.__init__
(ArgSpec(args=['self', 'value'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')
)
paddle.fluid.layers.fc
(ArgSpec(args=['input', 'size', 'num_flatten_dims', 'param_attr', 'bias_attr', 'act', 'is_test', 'name'], varargs=None, keywords=None, defaults=(1, None, None, None, False, None)), ('document', '1929058262994f212620599c63aea6bd'
))
paddle.fluid.layers.embedding
(ArgSpec(args=['input', 'size', 'is_sparse', 'is_distributed', 'padding_idx', 'param_attr', 'dtype'], varargs=None, keywords=None, defaults=(False, False, None, None, 'float32')), ('document', '89c2c55a0b0656b106064048e068e77a
'))
paddle.fluid.layers.dynamic_lstm
(ArgSpec(args=['input', 'size', 'h_0', 'c_0', 'param_attr', 'bias_attr', 'use_peepholes', 'is_reverse', 'gate_activation', 'cell_activation', 'candidate_activation', 'dtype', 'name'], varargs=None, keywords=None, defaults=(None, None, None, None, True, False, 'sigmoid', 'tanh', 'tanh', 'float32', None)), ('document', 'dfbb624f85015df29e994ca6999e8ff6'
))
paddle.fluid.layers.dynamic_lstmp
(ArgSpec(args=['input', 'size', 'proj_size', 'param_attr', 'bias_attr', 'use_peepholes', 'is_reverse', 'gate_activation', 'cell_activation', 'candidate_activation', 'proj_activation', 'dtype', 'name', 'h_0', 'c_0', 'cell_clip', 'proj_clip'], varargs=None, keywords=None, defaults=(None, None, True, False, 'sigmoid', 'tanh', 'tanh', 'tanh', 'float32', None, None, None, None, None)), ('document', 'b4b608b986eb9617aa0525e1be21d32d'
))
paddle.fluid.layers.dynamic_gru
(ArgSpec(args=['input', 'size', 'param_attr', 'bias_attr', 'is_reverse', 'gate_activation', 'candidate_activation', 'h_0', 'origin_mode'], varargs=None, keywords=None, defaults=(None, None, False, 'sigmoid', 'tanh', None, False)), ('document', '4ec4845fd7d991bcac822f8b0dfc101f'
))
paddle.fluid.layers.gru_unit
(ArgSpec(args=['input', 'hidden', 'size', 'param_attr', 'bias_attr', 'activation', 'gate_activation', 'origin_mode'], varargs=None, keywords=None, defaults=(None, None, 'tanh', 'sigmoid', False)), ('document', 'e0e2439f7af069b57badca18a6ba60b8'
))
paddle.fluid.layers.linear_chain_crf
(ArgSpec(args=['input', 'label', 'param_attr'], varargs=None, keywords=None, defaults=(None,)), ('document', '7c49ef4bbf0adfd4b9a1d98e2e5f3fea'
))
paddle.fluid.layers.crf_decoding
(ArgSpec(args=['input', 'param_attr', 'label'], varargs=None, keywords=None, defaults=(None,)), ('document', '7642373ab65d3fc3b96d16d10fef1538'
))
paddle.fluid.layers.cos_sim
(ArgSpec(args=['X', 'Y'], varargs=None, keywords=None, defaults=None), ('document', 'd740824aa7316b807c4b4a3c6c8c0bbe')
)
paddle.fluid.layers.cross_entropy
(ArgSpec(args=['input', 'label', 'soft_label', 'ignore_index'], varargs=None, keywords=None, defaults=(False, -100)), ('document', '025b364dafb4b7975c801eb33e7831a1'
))
paddle.fluid.layers.bpr_loss
(ArgSpec(args=['input', 'label', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '30add751a0f99347a6257634c03ff254'
))
paddle.fluid.layers.square_error_cost
(ArgSpec(args=['input', 'label'], varargs=None, keywords=None, defaults=None), ('document', '44b6eef4a0f2bc15f7d9745782406736')
)
paddle.fluid.layers.chunk_eval
(ArgSpec(args=['input', 'label', 'chunk_scheme', 'num_chunk_types', 'excluded_chunk_types'], varargs=None, keywords=None, defaults=(None,)), ('document', 'ee152a7ba3036e7b9ede9184545179b4'
))
paddle.fluid.layers.sequence_conv
(ArgSpec(args=['input', 'num_filters', 'filter_size', 'filter_stride', 'padding', 'bias_attr', 'param_attr', 'act', 'name'], varargs=None, keywords=None, defaults=(3, 1, None, None, None, None, None)), ('document', 'b6543768e1afaa2ecb869709d6e9c7e2'
))
paddle.fluid.layers.conv2d
(ArgSpec(args=['input', 'num_filters', 'filter_size', 'stride', 'padding', 'dilation', 'groups', 'param_attr', 'bias_attr', 'use_cudnn', 'act', 'name'], varargs=None, keywords=None, defaults=(1, 0, 1, None, None, None, True, None, None)), ('document', '8ca6121acd6d23cd8806a93f493c2e17'
))
paddle.fluid.layers.conv3d
(ArgSpec(args=['input', 'num_filters', 'filter_size', 'stride', 'padding', 'dilation', 'groups', 'param_attr', 'bias_attr', 'use_cudnn', 'act', 'name'], varargs=None, keywords=None, defaults=(1, 0, 1, None, None, None, True, None, None)), ('document', '37042620f9bd3a2da6e5d3138b2f724b'
))
paddle.fluid.layers.sequence_pool
(ArgSpec(args=['input', 'pool_type', 'is_test'], varargs=None, keywords=None, defaults=(False,)), ('document', 'a194fb80614023f543df3949fbd0d0b8'
))
paddle.fluid.layers.sequence_softmax
(ArgSpec(args=['input', 'use_cudnn', 'name'], varargs=None, keywords=None, defaults=(False, None)), ('document', '19ef6f9cdd27feac8a1ae060f19c10b4'
))
paddle.fluid.layers.softmax
(ArgSpec(args=['input', 'use_cudnn', 'name'], varargs=None, keywords=None, defaults=(False, None)), ('document', 'f19dd380864e61134ce3814e4be0de4b'
))
paddle.fluid.layers.pool2d
(ArgSpec(args=['input', 'pool_size', 'pool_type', 'pool_stride', 'pool_padding', 'global_pooling', 'use_cudnn', 'ceil_mode', 'name', 'exclusive'], varargs=None, keywords=None, defaults=(-1, 'max', 1, 0, False, True, False, None, True)), ('document', 'bbd84e855e660cd1084bb71a2fd0cdaa'
))
paddle.fluid.layers.pool3d
(ArgSpec(args=['input', 'pool_size', 'pool_type', 'pool_stride', 'pool_padding', 'global_pooling', 'use_cudnn', 'ceil_mode', 'name', 'exclusive'], varargs=None, keywords=None, defaults=(-1, 'max', 1, 0, False, True, False, None, True)), ('document', '043de7333b79ee0ac55053c14ed81625'
))
paddle.fluid.layers.adaptive_pool2d
(ArgSpec(args=['input', 'pool_size', 'pool_type', 'require_index', 'name'], varargs=None, keywords=None, defaults=('max', False, None)), ('document', '859b887174d06f361658f69cb7c06d95'
))
paddle.fluid.layers.adaptive_pool3d
(ArgSpec(args=['input', 'pool_size', 'pool_type', 'require_index', 'name'], varargs=None, keywords=None, defaults=('max', False, None)), ('document', '120f4323a3d7ed9c0916f15a59f0e497'
))
paddle.fluid.layers.batch_norm
(ArgSpec(args=['input', 'act', 'is_test', 'momentum', 'epsilon', 'param_attr', 'bias_attr', 'data_layout', 'in_place', 'name', 'moving_mean_name', 'moving_variance_name', 'do_model_average_for_mean_and_var', 'fuse_with_relu', 'use_global_stats'], varargs=None, keywords=None, defaults=(None, False, 0.9, 1e-05, None, None, 'NCHW', False, None, None, None, False, False, False)), ('document', 'c527b71b8a4c60dca8df8a745c2b598d'
))
paddle.fluid.layers.data_norm
(ArgSpec(args=['input', 'act', 'epsilon', 'param_attr', 'data_layout', 'in_place', 'name', 'moving_mean_name', 'moving_variance_name', 'do_model_average_for_mean_and_var'], varargs=None, keywords=None, defaults=(None, 1e-05, None, 'NCHW', False, None, None, None, False)), ('document', 'e45e09e65a2658e07cad987222f0d9ab'
))
paddle.fluid.layers.beam_search_decode
(ArgSpec(args=['ids', 'scores', 'beam_size', 'end_id', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'b0b8d53821716cd50c42e09b593f3feb'
))
paddle.fluid.layers.conv2d_transpose
(ArgSpec(args=['input', 'num_filters', 'output_size', 'filter_size', 'padding', 'stride', 'dilation', 'groups', 'param_attr', 'bias_attr', 'use_cudnn', 'act', 'name'], varargs=None, keywords=None, defaults=(None, None, 0, 1, 1, None, None, None, True, None, None)), ('document', '03993955ab1e6d3044c44e6f17fc85e9'
))
paddle.fluid.layers.conv3d_transpose
(ArgSpec(args=['input', 'num_filters', 'output_size', 'filter_size', 'padding', 'stride', 'dilation', 'groups', 'param_attr', 'bias_attr', 'use_cudnn', 'act', 'name'], varargs=None, keywords=None, defaults=(None, None, 0, 1, 1, None, None, None, True, None, None)), ('document', 'ec113c6a3686ac94f8fccd1a7953d445'
))
paddle.fluid.layers.sequence_expand
(ArgSpec(args=['x', 'y', 'ref_level', 'name'], varargs=None, keywords=None, defaults=(-1, None)), ('document', '79c375214fa427faac504043d162dae9'
))
paddle.fluid.layers.sequence_expand_as
(ArgSpec(args=['x', 'y', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '9d2611f84ab364c5da545e6a82f1770a'
))
paddle.fluid.layers.sequence_pad
(ArgSpec(args=['x', 'pad_value', 'maxlen', 'name'], varargs=None, keywords=None, defaults=(None, None)), ('document', '6a1adf3067b20f6e4bcb354d71c19184'
))
paddle.fluid.layers.sequence_unpad
(ArgSpec(args=['x', 'length', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'd12803c903c99aa36ec03aaac5f0cc5b'
))
paddle.fluid.layers.lstm_unit
(ArgSpec(args=['x_t', 'hidden_t_prev', 'cell_t_prev', 'forget_bias', 'param_attr', 'bias_attr', 'name'], varargs=None, keywords=None, defaults=(0.0, None, None, None)), ('document', '027723966f3ef0d7bc598f22287a96cc'
))
paddle.fluid.layers.reduce_sum
(ArgSpec(args=['input', 'dim', 'keep_dim', 'name'], varargs=None, keywords=None, defaults=(None, False, None)), ('document', 'b69998ce3ff4980fb21da0df05565f1b'
))
paddle.fluid.layers.reduce_mean
(ArgSpec(args=['input', 'dim', 'keep_dim', 'name'], varargs=None, keywords=None, defaults=(None, False, None)), ('document', 'd4d80dd98a1a5839f41eeb3a0f85f370'
))
paddle.fluid.layers.reduce_max
(ArgSpec(args=['input', 'dim', 'keep_dim', 'name'], varargs=None, keywords=None, defaults=(None, False, None)), ('document', '66a622db727551761ce4eb73eaa7f6a4'
))
paddle.fluid.layers.reduce_min
(ArgSpec(args=['input', 'dim', 'keep_dim', 'name'], varargs=None, keywords=None, defaults=(None, False, None)), ('document', 'd50ac552b5d131468ed466d08bb2d38c'
))
paddle.fluid.layers.reduce_prod
(ArgSpec(args=['input', 'dim', 'keep_dim', 'name'], varargs=None, keywords=None, defaults=(None, False, None)), ('document', 'fcd8301a0ce15f219c7a4bcd0c1e8eca'
))
paddle.fluid.layers.sequence_first_step
(ArgSpec(args=['input'], varargs=None, keywords=None, defaults=None), ('document', '2b290d3d77882bfe9bb8d331cac8cdd3')
)
paddle.fluid.layers.sequence_last_step
(ArgSpec(args=['input'], varargs=None, keywords=None, defaults=None), ('document', 'c16a892f44f7fe71bfa5afc32d3f34ce')
)
paddle.fluid.layers.sequence_slice
(ArgSpec(args=['input', 'offset', 'length', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'fdcea0e8b5bc7d8d4b1b072c521014e6'
))
paddle.fluid.layers.dropout
(ArgSpec(args=['x', 'dropout_prob', 'is_test', 'seed', 'name', 'dropout_implementation'], varargs=None, keywords=None, defaults=(False, None, None, 'downgrade_in_infer')), ('document', 'dc7042734c6d8b8ce97321f017f01d6f
'))
paddle.fluid.layers.split
(ArgSpec(args=['input', 'num_or_sections', 'dim', 'name'], varargs=None, keywords=None, defaults=(-1, None)), ('document', '652625345c2acb900029c78cc75f8aa6'
))
paddle.fluid.layers.ctc_greedy_decoder
(ArgSpec(args=['input', 'blank', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'ebbf2adbd79683dc93db03454dfa18c2'
))
paddle.fluid.layers.edit_distance
(ArgSpec(args=['input', 'label', 'normalized', 'ignored_tokens'], varargs=None, keywords=None, defaults=(True, None)), ('document', '97f0262f97602644c83142789d784571'
))
paddle.fluid.layers.l2_normalize
(ArgSpec(args=['x', 'axis', 'epsilon', 'name'], varargs=None, keywords=None, defaults=(1e-12, None)), ('document', '6e428384ce6a77207fa2c70d9f011990'
))
paddle.fluid.layers.matmul
(ArgSpec(args=['x', 'y', 'transpose_x', 'transpose_y', 'alpha', 'name'], varargs=None, keywords=None, defaults=(False, False, 1.0, None)), ('document', 'b4cbe1ac451005df6dad12e9ffdccca9'
))
paddle.fluid.layers.topk
(ArgSpec(args=['input', 'k', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'd3570c02f71bcd78e60b3f31dc8f5b32'
))
paddle.fluid.layers.warpctc
(ArgSpec(args=['input', 'label', 'blank', 'norm_by_times', 'use_cudnn'], varargs=None, keywords=None, defaults=(0, False, False)), ('document', 'aaba49c038ba927f0a8e45c0c9a686ab'
))
paddle.fluid.layers.sequence_reshape
(ArgSpec(args=['input', 'new_dim'], varargs=None, keywords=None, defaults=None), ('document', 'a10ab9bf88d4a7e328882d411abb6fd1')
)
paddle.fluid.layers.transpose
(ArgSpec(args=['x', 'perm', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'a1feac48b843d679db82312dc85885f4'
))
paddle.fluid.layers.im2sequence
(ArgSpec(args=['input', 'filter_size', 'stride', 'padding', 'input_image_size', 'out_stride', 'name'], varargs=None, keywords=None, defaults=(1, 1, 0, None, 1, None)), ('document', '3ce01160ede80b1c26f776f8fef9340f'
))
paddle.fluid.layers.nce
(ArgSpec(args=['input', 'label', 'num_total_classes', 'sample_weight', 'param_attr', 'bias_attr', 'num_neg_samples', 'name', 'sampler', 'custom_dist', 'seed', 'is_sparse'], varargs=None, keywords=None, defaults=(None, None, None, None, None, 'uniform', None, 0, False)), ('document', 'fddad4896dee5193e1cdf70882c2a347'
))
paddle.fluid.layers.sampled_softmax_with_cross_entropy
(ArgSpec(args=['logits', 'label', 'num_samples', 'num_true', 'remove_accidental_hits', 'use_customized_samples', 'customized_samples', 'customized_probabilities', 'seed'], varargs=None, keywords=None, defaults=(1, True, False, None, None, 0)), ('document', '5db30b8a74e8c93687943a3e8d221da0'
))
paddle.fluid.layers.hsigmoid
(ArgSpec(args=['input', 'label', 'num_classes', 'param_attr', 'bias_attr', 'name', 'path_table', 'path_code', 'is_custom', 'is_sparse'], varargs=None, keywords=None, defaults=(None, None, None, None, None, False, False)), ('document', '80641ee6810b1cdc3fd6e14fc89ecc9d'
))
paddle.fluid.layers.beam_search
(ArgSpec(args=['pre_ids', 'pre_scores', 'ids', 'scores', 'beam_size', 'end_id', 'level', 'is_accumulated', 'name', 'return_parent_idx'], varargs=None, keywords=None, defaults=(0, True, None, False)), ('document', 'b350b9a30a18e7efd7e1bb740eef6996'
))
paddle.fluid.layers.row_conv
(ArgSpec(args=['input', 'future_context_size', 'param_attr', 'act'], varargs=None, keywords=None, defaults=(None, None)), ('document', '17485788fffe4e2d36dc58c2ac8d174e'
))
paddle.fluid.layers.multiplex
(ArgSpec(args=['inputs', 'index'], varargs=None, keywords=None, defaults=None), ('document', '013795af319e2e86d3506741941078ee')
)
paddle.fluid.layers.layer_norm
(ArgSpec(args=['input', 'scale', 'shift', 'begin_norm_axis', 'epsilon', 'param_attr', 'bias_attr', 'act', 'name'], varargs=None, keywords=None, defaults=(True, True, 1, 1e-05, None, None, None, None)), ('document', 'de6a906950bae9f3c245cb744d22b94e'
))
paddle.fluid.layers.group_norm
(ArgSpec(args=['input', 'groups', 'epsilon', 'param_attr', 'bias_attr', 'act', 'data_layout', 'name'], varargs=None, keywords=None, defaults=(1e-05, None, None, None, 'NCHW', None)), ('document', '419c3a24a83cc89219a029cf4092788b'
))
paddle.fluid.layers.softmax_with_cross_entropy
(ArgSpec(args=['logits', 'label', 'soft_label', 'ignore_index', 'numeric_stable_mode', 'return_softmax'], varargs=None, keywords=None, defaults=(False, -100, True, False)), ('document', 'bce1b75e3d95b75cacd1099655cbb3c3'
))
paddle.fluid.layers.smooth_l1
(ArgSpec(args=['x', 'y', 'inside_weight', 'outside_weight', 'sigma'], varargs=None, keywords=None, defaults=(None, None, None)), ('document', 'c6b175d253c55baf4b9c0eca9b1dda88'
))
paddle.fluid.layers.one_hot
(ArgSpec(args=['input', 'depth'], varargs=None, keywords=None, defaults=None), ('document', '6148b6a555cbfb62fdcd030d8982c18c')
)
paddle.fluid.layers.autoincreased_step_counter
(ArgSpec(args=['counter_name', 'begin', 'step'], varargs=None, keywords=None, defaults=(None, 1, 1)), ('document', '3f6c828594720c9b2da89c464be94478'
))
paddle.fluid.layers.reshape
(ArgSpec(args=['x', 'shape', 'actual_shape', 'act', 'inplace', 'name'], varargs=None, keywords=None, defaults=(None, None, False, None)), ('document', '323c019f257e55ddea4a824a362de62f'
))
paddle.fluid.layers.squeeze
(ArgSpec(args=['input', 'axes', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '3229d06517f794e86ca3da14c38b1465'
))
paddle.fluid.layers.unsqueeze
(ArgSpec(args=['input', 'axes', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'bbd62da391b1df984a1909d069a759b2'
))
paddle.fluid.layers.lod_reset
(ArgSpec(args=['x', 'y', 'target_lod'], varargs=None, keywords=None, defaults=(None, None)), ('document', 'f122194c562bd674f6ecdccf33785f99'
))
paddle.fluid.layers.lrn
(ArgSpec(args=['input', 'n', 'k', 'alpha', 'beta', 'name'], varargs=None, keywords=None, defaults=(5, 1.0, 0.0001, 0.75, None)), ('document', '0795e9940e42dcd62953514ff7e09f77'
))
paddle.fluid.layers.pad
(ArgSpec(args=['x', 'paddings', 'pad_value', 'name'], varargs=None, keywords=None, defaults=(0.0, None)), ('document', '2f28153bdd2d5ea6f7bad5867bd03eeb'
))
paddle.fluid.layers.pad_constant_like
(ArgSpec(args=['x', 'y', 'pad_value', 'name'], varargs=None, keywords=None, defaults=(0.0, None)), ('document', 'd2e1f45fef51b2c214e3f2aa8976c46c'
))
paddle.fluid.layers.label_smooth
(ArgSpec(args=['label', 'prior_dist', 'epsilon', 'dtype', 'name'], varargs=None, keywords=None, defaults=(None, 0.1, 'float32', None)), ('document', '70c113658102a11cc5d8e3d45145737a'
))
paddle.fluid.layers.roi_pool
(ArgSpec(args=['input', 'rois', 'pooled_height', 'pooled_width', 'spatial_scale'], varargs=None, keywords=None, defaults=(1, 1, 1.0)), ('document', 'c317aa595deb31649083c8faa91cdb97'
))
paddle.fluid.layers.roi_align
(ArgSpec(args=['input', 'rois', 'pooled_height', 'pooled_width', 'spatial_scale', 'sampling_ratio', 'name'], varargs=None, keywords=None, defaults=(1, 1, 1.0, -1, None)), ('document', '12c5bbb8b38c42e623fbc47611d766e1'
))
paddle.fluid.layers.dice_loss
(ArgSpec(args=['input', 'label', 'epsilon'], varargs=None, keywords=None, defaults=(1e-05,)), ('document', '1ba0508d573f65feecf3564dce22aa1d'
))
paddle.fluid.layers.image_resize
(ArgSpec(args=['input', 'out_shape', 'scale', 'name', 'resample', 'actual_shape', 'align_corners', 'align_mode'], varargs=None, keywords=None, defaults=(None, None, None, 'BILINEAR', None, True, 1)), ('document', 'b3ecb819454832885c1f0f3ab9a5b938'
))
paddle.fluid.layers.image_resize_short
(ArgSpec(args=['input', 'out_short_len', 'resample'], varargs=None, keywords=None, defaults=('BILINEAR',)), ('document', '06211aefc50c5a3e940d7204d859cdf7'
))
paddle.fluid.layers.resize_bilinear
(ArgSpec(args=['input', 'out_shape', 'scale', 'name', 'actual_shape', 'align_corners', 'align_mode'], varargs=None, keywords=None, defaults=(None, None, None, None, True, 1)), ('document', 'e4fb4ed511b2293b8f04f7e872afbfd7'
))
paddle.fluid.layers.resize_nearest
(ArgSpec(args=['input', 'out_shape', 'scale', 'name', 'actual_shape', 'align_corners'], varargs=None, keywords=None, defaults=(None, None, None, None, True)), ('document', '735fa9758a6d7ff3b47d7b827f961c1d'
))
paddle.fluid.layers.gather
(ArgSpec(args=['input', 'index'], varargs=None, keywords=None, defaults=None), ('document', '98f1c86716b9b7f4dda83f20e2adeee2')
)
paddle.fluid.layers.scatter
(ArgSpec(args=['input', 'index', 'updates', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '65f8e9d8ddfd0b412f940579c4faa342'
))
paddle.fluid.layers.sequence_scatter
(ArgSpec(args=['input', 'index', 'updates', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '15b522457dfef103f0c20ca9d397678b'
))
paddle.fluid.layers.random_crop
(ArgSpec(args=['x', 'shape', 'seed'], varargs=None, keywords=None, defaults=(None,)), ('document', 'c9ab9e460ef0a1823249935a30e82c66'
))
paddle.fluid.layers.mean_iou
(ArgSpec(args=['input', 'label', 'num_classes'], varargs=None, keywords=None, defaults=None), ('document', '35cbbdfa585d027bb490707c95a176b9')
)
paddle.fluid.layers.relu
(ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '866ffa1cc93f29e23662b526a7596537'
))
paddle.fluid.layers.selu
(ArgSpec(args=['x', 'scale', 'alpha', 'name'], varargs=None, keywords=None, defaults=(None, None, None)), ('document', '9044c7fe667b76cb2d9264f2db11f417'
))
paddle.fluid.layers.log
(ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '98247c59d1c9b40af6730001b2aea73d'
))
paddle.fluid.layers.crop
(ArgSpec(args=['x', 'shape', 'offsets', 'name'], varargs=None, keywords=None, defaults=(None, None, None)), ('document', '883104791204d3127e24234bb630b2e7'
))
paddle.fluid.layers.rank_loss
(ArgSpec(args=['label', 'left', 'right', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'c542e39ac6add24a6bef6e79bf5617e2'
))
paddle.fluid.layers.margin_rank_loss
(ArgSpec(args=['label', 'left', 'right', 'margin', 'name'], varargs=None, keywords=None, defaults=(0.1, None)), ('document', '6d19dcc19917080b7ff3e03bde451bc8'
))
paddle.fluid.layers.elu
(ArgSpec(args=['x', 'alpha', 'name'], varargs=None, keywords=None, defaults=(1.0, None)), ('document', '463258ee9f8b60760eb1e26357cc9bfa'
))
paddle.fluid.layers.relu6
(ArgSpec(args=['x', 'threshold', 'name'], varargs=None, keywords=None, defaults=(6.0, None)), ('document', '6f367339caf6c7124bc262fe1475df70'
))
paddle.fluid.layers.pow
(ArgSpec(args=['x', 'factor', 'name'], varargs=None, keywords=None, defaults=(1.0, None)), ('document', 'a5117c1eb84aca2ac0b0abab337a4799'
))
paddle.fluid.layers.stanh
(ArgSpec(args=['x', 'scale_a', 'scale_b', 'name'], varargs=None, keywords=None, defaults=(0.6666666666666666, 1.7159, None)), ('document', '959936a477efc6c1447a9c8bf8ce94bb'
))
paddle.fluid.layers.hard_sigmoid
(ArgSpec(args=['x', 'slope', 'offset', 'name'], varargs=None, keywords=None, defaults=(0.2, 0.5, None)), ('document', 'c82059b6fea1aa730f9aac911807b756'
))
paddle.fluid.layers.swish
(ArgSpec(args=['x', 'beta', 'name'], varargs=None, keywords=None, defaults=(1.0, None)), ('document', 'ef745e55a48763ee7b46b21a81dc7e84'
))
paddle.fluid.layers.prelu
(ArgSpec(args=['x', 'mode', 'param_attr', 'name'], varargs=None, keywords=None, defaults=(None, None)), ('document', 'f6acef7ff7d887e49ff499fbb1dad4a9'
))
paddle.fluid.layers.brelu
(ArgSpec(args=['x', 't_min', 't_max', 'name'], varargs=None, keywords=None, defaults=(0.0, 24.0, None)), ('document', '3db337c195e156e6ef2b8b4a57113600'
))
paddle.fluid.layers.leaky_relu
(ArgSpec(args=['x', 'alpha', 'name'], varargs=None, keywords=None, defaults=(0.02, None)), ('document', 'f878486c82b576938151daad0de995a0'
))
paddle.fluid.layers.soft_relu
(ArgSpec(args=['x', 'threshold', 'name'], varargs=None, keywords=None, defaults=(40.0, None)), ('document', '869adce548c342d6cc1bd88a948d83c9'
))
paddle.fluid.layers.flatten
(ArgSpec(args=['x', 'axis', 'name'], varargs=None, keywords=None, defaults=(1, None)), ('document', 'cb295c13cb957db85cd9609269d7784d'
))
paddle.fluid.layers.sequence_mask
(ArgSpec(args=['x', 'maxlen', 'dtype', 'name'], varargs=None, keywords=None, defaults=(None, 'int64', None)), ('document', 'f0dd6eddd3bff015a3c05269d82fcbd8'
))
paddle.fluid.layers.stack
(ArgSpec(args=['x', 'axis'], varargs=None, keywords=None, defaults=(0,)), ('document', '367cfbb642839beacb5d117e2d2b4041'
))
paddle.fluid.layers.pad2d
(ArgSpec(args=['input', 'paddings', 'mode', 'pad_value', 'data_format', 'name'], varargs=None, keywords=None, defaults=([0, 0, 0, 0], 'constant', 0.0, 'NCHW', None)), ('document', '7f4d46320cc077ca2e8db600c35f4030'
))
paddle.fluid.layers.unstack
(ArgSpec(args=['x', 'axis', 'num'], varargs=None, keywords=None, defaults=(0, None)), ('document', '98eb9d633116efcfc6f90c114bd44fd6'
))
paddle.fluid.layers.sequence_enumerate
(ArgSpec(args=['input', 'win_size', 'pad_value', 'name'], varargs=None, keywords=None, defaults=(0, None)), ('document', 'f6028537085dc296103bbbd85fa7763d'
))
paddle.fluid.layers.expand
(ArgSpec(args=['x', 'expand_times', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '117d3607d1ffa0571835bbaebc7857ff'
))
paddle.fluid.layers.sequence_concat
(ArgSpec(args=['input', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '3a1d155dd1bf6e72a0a3e3e1519591d1'
))
paddle.fluid.layers.scale
(ArgSpec(args=['x', 'scale', 'bias', 'bias_after_scale', 'act', 'name'], varargs=None, keywords=None, defaults=(1.0, 0.0, True, None, None)), ('document', '30190413b2fa442e7466d6cf2ce5ea07'
))
paddle.fluid.layers.elementwise_add
(ArgSpec(args=['x', 'y', 'axis', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, None, None)), ('document', '6bfbe72cbadc95ac7ab88c05ed5bf9f0'
))
paddle.fluid.layers.elementwise_div
(ArgSpec(args=['x', 'y', 'axis', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, None, None)), ('document', 'cc6e6cc1cb942a152dde3ef08d5f165c'
))
paddle.fluid.layers.elementwise_sub
(ArgSpec(args=['x', 'y', 'axis', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, None, None)), ('document', 'a12abdab09c3e57af5a6e1e9f138684a'
))
paddle.fluid.layers.elementwise_mul
(ArgSpec(args=['x', 'y', 'axis', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, None, None)), ('document', '422c77dbfcff355a57b5fdd4ec876daa'
))
paddle.fluid.layers.elementwise_max
(ArgSpec(args=['x', 'y', 'axis', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, None, None)), ('document', 'f0bb0b2c454541cfafa761021a5cc776'
))
paddle.fluid.layers.elementwise_min
(ArgSpec(args=['x', 'y', 'axis', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, None, None)), ('document', '8a9cdefefbccbf9f6b0991c0946a21e9'
))
paddle.fluid.layers.elementwise_pow
(ArgSpec(args=['x', 'y', 'axis', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, None, None)), ('document', '1aea4e197c552a284f83888a3c67a32e'
))
paddle.fluid.layers.uniform_random_batch_size_like
(ArgSpec(args=['input', 'shape', 'dtype', 'input_dim_idx', 'output_dim_idx', 'min', 'max', 'seed'], varargs=None, keywords=None, defaults=('float32', 0, 0, -1.0, 1.0, 0)), ('document', '129e0a3257f1d532a948eedf9d5bf671'
))
paddle.fluid.layers.gaussian_random
(ArgSpec(args=['shape', 'mean', 'std', 'seed', 'dtype'], varargs=None, keywords=None, defaults=(0.0, 1.0, 0, 'float32')), ('document', '389dafe36e099841b6a7fb18d11f1b4c
'))
paddle.fluid.layers.sampling_id
(ArgSpec(args=['x', 'min', 'max', 'seed', 'dtype'], varargs=None, keywords=None, defaults=(0.0, 1.0, 0, 'float32')), ('document', '840fdac643d1341c1cae218d4511dbb9
'))
paddle.fluid.layers.gaussian_random_batch_size_like
(ArgSpec(args=['input', 'shape', 'input_dim_idx', 'output_dim_idx', 'mean', 'std', 'seed', 'dtype'], varargs=None, keywords=None, defaults=(0, 0, 0.0, 1.0, 0, 'float32')), ('document', '840026b4766613c5705e06563cd103b6
'))
paddle.fluid.layers.sum
(ArgSpec(args=['x'], varargs=None, keywords=None, defaults=None), ('document', 'a418e3ccb5e2ac21bd60f5cc221d5860')
)
paddle.fluid.layers.slice
(ArgSpec(args=['input', 'axes', 'starts', 'ends'], varargs=None, keywords=None, defaults=None), ('document', '01dbb91e7c74cb11336cd531013de51a')
)
paddle.fluid.layers.shape
(ArgSpec(args=['input'], varargs=None, keywords=None, defaults=None), ('document', '17db0f814eb7bb5a3fac1ca6e60e16d8')
)
paddle.fluid.layers.logical_and
(ArgSpec(args=['x', 'y', 'out', 'name'], varargs=None, keywords=None, defaults=(None, None)), ('document', 'cdcf20c494c92060d10feb9374532f42'
))
paddle.fluid.layers.logical_or
(ArgSpec(args=['x', 'y', 'out', 'name'], varargs=None, keywords=None, defaults=(None, None)), ('document', '0eae3f726a4afe590757552fa3ced012'
))
paddle.fluid.layers.logical_xor
(ArgSpec(args=['x', 'y', 'out', 'name'], varargs=None, keywords=None, defaults=(None, None)), ('document', 'b0daaa3fa4a0aa62f9b58c43d959eb25'
))
paddle.fluid.layers.logical_not
(ArgSpec(args=['x', 'out', 'name'], varargs=None, keywords=None, defaults=(None, None)), ('document', 'cd1c8cf31e040427d4e05711044caeb6'
))
paddle.fluid.layers.clip
(ArgSpec(args=['x', 'min', 'max', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'b020b7aab59719be98a4ae229a76deba'
))
paddle.fluid.layers.clip_by_norm
(ArgSpec(args=['x', 'max_norm', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'a1ea0bc5a926f427458c4254ca022749'
))
paddle.fluid.layers.mean
(ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'd638d915195ce86a8d7963b81110d4c8'
))
paddle.fluid.layers.mul
(ArgSpec(args=['x', 'y', 'x_num_col_dims', 'y_num_col_dims', 'name'], varargs=None, keywords=None, defaults=(1, 1, None)), ('document', 'ccd37fa6b53f074adbfb732d738c4c2d'
))
paddle.fluid.layers.sigmoid_cross_entropy_with_logits
(ArgSpec(args=['x', 'label', 'ignore_index', 'name', 'normalize'], varargs=None, keywords=None, defaults=(-100, None, False)), ('document', '180c284317ea45ef89a460d8d79c0b72'
))
paddle.fluid.layers.maxout
(ArgSpec(args=['x', 'groups', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '891870d069a6aea746d34cc53b61690c'
))
paddle.fluid.layers.space_to_depth
(ArgSpec(args=['x', 'blocksize', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '5f207ae10589ebe38a63575ef6ff8e1e'
))
paddle.fluid.layers.affine_grid
(ArgSpec(args=['theta', 'out_shape', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '51def402b8910e163cbace9d0c0526ed'
))
paddle.fluid.layers.sequence_reverse
(ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '77a6d80aa5551ca70324fc975c44507f'
))
paddle.fluid.layers.affine_channel
(ArgSpec(args=['x', 'scale', 'bias', 'data_layout', 'name'], varargs=None, keywords=None, defaults=(None, None, 'NCHW', None)), ('document', '2f46f1ff39a13ab00857e7b9f44b2fa7'
))
paddle.fluid.layers.similarity_focus
(ArgSpec(args=['input', 'axis', 'indexes', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '70e3b5182a18b40b47ecabd7c8490a35'
))
paddle.fluid.layers.hash
(ArgSpec(args=['input', 'hash_size', 'num_hash', 'name'], varargs=None, keywords=None, defaults=(1, None)), ('document', '9bb77f8dc002dd2ce75d4769eaaf5007'
))
paddle.fluid.layers.grid_sampler
(ArgSpec(args=['x', 'grid', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'd256cba1c41a5ed92ce3f31e24a2ca6d'
))
paddle.fluid.layers.log_loss
(ArgSpec(args=['input', 'label', 'epsilon', 'name'], varargs=None, keywords=None, defaults=(0.0001, None)), ('document', '4b5a2341023afe63157a066c14254f98'
))
paddle.fluid.layers.add_position_encoding
(ArgSpec(args=['input', 'alpha', 'beta', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '4b9c2e8af5817937d831820874b5aa77'
))
paddle.fluid.layers.bilinear_tensor_product
(ArgSpec(args=['x', 'y', 'size', 'act', 'name', 'param_attr', 'bias_attr'], varargs=None, keywords=None, defaults=(None, None, None, None)), ('document', 'aa7540a0fa73ff69a02e11b4091aab75'
))
paddle.fluid.layers.merge_selected_rows
(ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'dc63315b84f591ac79ecca0c3632027a'
))
paddle.fluid.layers.get_tensor_from_selected_rows
(ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '7ffc849e71f31dfe29030ff94e662de6'
))
paddle.fluid.layers.lstm
(ArgSpec(args=['input', 'init_h', 'init_c', 'max_len', 'hidden_size', 'num_layers', 'dropout_prob', 'is_bidirec', 'is_test', 'name', 'default_initializer', 'seed'], varargs=None, keywords=None, defaults=(0.0, False, False, None, None, -1)), ('document', 'd5e6c494ac35100e2ed4d4bd9a1ed932'
))
paddle.fluid.layers.shuffle_channel
(ArgSpec(args=['x', 'group', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '2fa6782d43d02ae64482d21235a82949'
))
paddle.fluid.layers.py_func
(ArgSpec(args=['func', 'x', 'out', 'backward_func', 'skip_vars_in_backward_input'], varargs=None, keywords=None, defaults=(None, None)), ('document', '8404e472ac12b4a30a505d3d3a3e5fdb'
))
paddle.fluid.layers.psroi_pool
(ArgSpec(args=['input', 'rois', 'output_channels', 'spatial_scale', 'pooled_height', 'pooled_width', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '1546136806fef5c08f6918544bd9151d'
))
paddle.fluid.layers.teacher_student_sigmoid_loss
(ArgSpec(args=['input', 'label', 'soft_max_up_bound', 'soft_max_lower_bound'], varargs=None, keywords=None, defaults=(15.0, -15.0)), ('document', '2f6ff96864054a31aa4bb659c6722c99'
))
paddle.fluid.layers.huber_loss
(ArgSpec(args=['input', 'label', 'delta'], varargs=None, keywords=None, defaults=None), ('document', '431a4301c35032166ec029f7432c80a7')
)
paddle.fluid.layers.tree_conv
(ArgSpec(args=['nodes_vector', 'edge_set', 'output_size', 'num_filters', 'max_depth', 'act', 'param_attr', 'bias_attr', 'name'], varargs=None, keywords=None, defaults=(1, 2, 'tanh', None, None, None)), ('document', '34ea12ac9f10a65dccbc50100d12e607'
))
paddle.fluid.layers.data
(ArgSpec(args=['name', 'shape', 'append_batch_size', 'dtype', 'lod_level', 'type', 'stop_gradient'], varargs=None, keywords=None, defaults=(True, 'float32', 0, VarType.LOD_TENSOR, True)), ('document', '33bbd42027d872b3818b3d64ec52e139'
))
paddle.fluid.layers.open_files
(ArgSpec(args=['filenames', 'shapes', 'lod_levels', 'dtypes', 'thread_num', 'buffer_size', 'pass_num', 'is_test'], varargs=None, keywords=None, defaults=(None, None, 1, None)), ('document', 'b1ae2e1cc0750e58726374061ea90ecc'
))
paddle.fluid.layers.read_file
(ArgSpec(args=['reader'], varargs=None, keywords=None, defaults=None), ('document', 'b0a1c2fc51c27a106da28f3308c41f5e')
)
paddle.fluid.layers.shuffle
(ArgSpec(args=['reader', 'buffer_size'], varargs=None, keywords=None, defaults=None), ('document', 'f967a73426db26f970bc70bfb03cffca')
)
paddle.fluid.layers.batch
(ArgSpec(args=['reader', 'batch_size'], varargs=None, keywords=None, defaults=None), ('document', 'f563d376d35e1a4c4db100fd11b381a0')
)
paddle.fluid.layers.double_buffer
(ArgSpec(args=['reader', 'place', 'name'], varargs=None, keywords=None, defaults=(None, None)), ('document', '07e5b796674796eb1ef3fee9c10d24e3'
))
paddle.fluid.layers.random_data_generator
(ArgSpec(args=['low', 'high', 'shapes', 'lod_levels', 'for_parallel'], varargs=None, keywords=None, defaults=(True,)), ('document', '9b7f0f86ec24bbc97643cadcb6499cff'
))
paddle.fluid.layers.py_reader
(ArgSpec(args=['capacity', 'shapes', 'dtypes', 'lod_levels', 'name', 'use_double_buffer'], varargs=None, keywords=None, defaults=(None, None, True)), ('document', '13dabc57863f62ab3141586784ee356b'
))
paddle.fluid.layers.create_py_reader_by_data
(ArgSpec(args=['capacity', 'feed_list', 'name', 'use_double_buffer'], varargs=None, keywords=None, defaults=(None, True)), ('document', '350f74d93fab9adb2ac4950f1c26416b'
))
paddle.fluid.layers.Preprocessor.__init__
(ArgSpec(args=['self', 'reader', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'
))
paddle.fluid.layers.Preprocessor.block
(ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')
)
paddle.fluid.layers.Preprocessor.inputs
(ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')
)
paddle.fluid.layers.Preprocessor.outputs
(ArgSpec(args=['self'], varargs='outs', keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')
)
paddle.fluid.layers.load
(ArgSpec(args=['out', 'file_path', 'load_as_fp16'], varargs=None, keywords=None, defaults=(None,)), ('document', '9d1a4bc97bbce9fa1d4f7a4200a771ff'
))
paddle.fluid.layers.create_tensor
(ArgSpec(args=['dtype', 'name', 'persistable'], varargs=None, keywords=None, defaults=(None, False)), ('document', 'c0c3d0194f83fff8ea99ce0820657dae'
))
paddle.fluid.layers.create_parameter
(ArgSpec(args=['shape', 'dtype', 'name', 'attr', 'is_bias', 'default_initializer'], varargs=None, keywords=None, defaults=(None, None, False, None)), ('document', 'd62b866c899bc1fedb5385f95b88e1f8'
))
paddle.fluid.layers.create_global_var
(ArgSpec(args=['shape', 'value', 'dtype', 'persistable', 'force_cpu', 'name'], varargs=None, keywords=None, defaults=(False, False, None)), ('document', 'ab914fac893607e29ac6e52bbdbea1a4'
))
paddle.fluid.layers.cast
(ArgSpec(args=['x', 'dtype'], varargs=None, keywords=None, defaults=None), ('document', '60cb8f843d625abf33f8bf12455b8f99')
)
paddle.fluid.layers.tensor_array_to_tensor
(ArgSpec(args=['input', 'axis', 'name'], varargs=None, keywords=None, defaults=(1, None)), ('document', 'b12717d3d4567e6119589f7f655b0cbb'
))
paddle.fluid.layers.concat
(ArgSpec(args=['input', 'axis', 'name'], varargs=None, keywords=None, defaults=(0, None)), ('document', 'b19b79be4f05e85d1d6cec642c9fb535'
))
paddle.fluid.layers.sums
(ArgSpec(args=['input', 'out'], varargs=None, keywords=None, defaults=(None,)), ('document', '42912092418620b4be07f36af31e7816'
))
paddle.fluid.layers.assign
(ArgSpec(args=['input', 'output'], varargs=None, keywords=None, defaults=(None,)), ('document', 'b690184f3537df5501e4d9d8f31152a5'
))
paddle.fluid.layers.fill_constant_batch_size_like
(ArgSpec(args=['input', 'shape', 'dtype', 'value', 'input_dim_idx', 'output_dim_idx'], varargs=None, keywords=None, defaults=(0, 0)), ('document', 'd4059a2f5763036b07018d76429f9acb'
))
paddle.fluid.layers.fill_constant
(ArgSpec(args=['shape', 'dtype', 'value', 'force_cpu', 'out'], varargs=None, keywords=None, defaults=(False, None)), ('document', '1d8b14729639fa38509c79b9784740fa'
))
paddle.fluid.layers.argmin
(ArgSpec(args=['x', 'axis'], varargs=None, keywords=None, defaults=(0,)), ('document', '2778a1d34be49263a51211885599ea37'
))
paddle.fluid.layers.argmax
(ArgSpec(args=['x', 'axis'], varargs=None, keywords=None, defaults=(0,)), ('document', '04114996cfb98994ba222804a1a6109f'
))
paddle.fluid.layers.argsort
(ArgSpec(args=['input', 'axis', 'name'], varargs=None, keywords=None, defaults=(-1, None)), ('document', '68ec45c6fb6b93e47de9c9a0945fb98e'
))
paddle.fluid.layers.ones
(ArgSpec(args=['shape', 'dtype', 'force_cpu'], varargs=None, keywords=None, defaults=(False,)), ('document', 'b402489c62e668df42e7daceb63c142b'
))
paddle.fluid.layers.zeros
(ArgSpec(args=['shape', 'dtype', 'force_cpu'], varargs=None, keywords=None, defaults=(False,)), ('document', 'c155e2efc56ffa5ed4658cca0272e491'
))
paddle.fluid.layers.reverse
(ArgSpec(args=['x', 'axis'], varargs=None, keywords=None, defaults=None), ('document', '8ee7cb6ca639e7460e825f953b65d94d')
)
paddle.fluid.layers.has_inf
(ArgSpec(args=['x'], varargs=None, keywords=None, defaults=None), ('document', '8f8c0306117ea441f20dcbbdba1f0ecc')
)
paddle.fluid.layers.has_nan
(ArgSpec(args=['x'], varargs=None, keywords=None, defaults=None), ('document', '2e53e83127dbfd86e7098bdfe9a549e8')
)
paddle.fluid.layers.isfinite
(ArgSpec(args=['x'], varargs=None, keywords=None, defaults=None), ('document', '0a437011c3906079fd8947ed3e52d292')
)
paddle.fluid.layers.While.__init__
(ArgSpec(args=['self', 'cond', 'is_test', 'name'], varargs=None, keywords=None, defaults=(False, None)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'
))
paddle.fluid.layers.While.block
(ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')
)
paddle.fluid.layers.Switch.__init__
(ArgSpec(args=['self', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'
))
paddle.fluid.layers.Switch.case
(ArgSpec(args=['self', 'condition'], varargs=None, keywords=None, defaults=None), ('document', 'f7c7160014c1b46cfeda9dd5808d1789')
)
paddle.fluid.layers.Switch.default
(ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '50853ae884df03d9c36703bb46d9ef07')
)
paddle.fluid.layers.increment
(ArgSpec(args=['x', 'value', 'in_place'], varargs=None, keywords=None, defaults=(1.0, True)), ('document', '73bb96ec4783ec1a11e760e8851b0e77'
))
paddle.fluid.layers.array_write
(ArgSpec(args=['x', 'i', 'array'], varargs=None, keywords=None, defaults=(None,)), ('document', '40b6d15f4c86b2b09df340d7778ad713'
))
paddle.fluid.layers.create_array
(ArgSpec(args=['dtype'], varargs=None, keywords=None, defaults=None), ('document', '2d4f20087080ba5105b55205ad5c5b6a')
)
paddle.fluid.layers.less_than
(ArgSpec(args=['x', 'y', 'force_cpu', 'cond'], varargs=None, keywords='ignored', defaults=(None, None)), ('document', '067bbc799c66289ca8b8924c26b6673f'
))
paddle.fluid.layers.equal
(ArgSpec(args=['x', 'y', 'cond'], varargs=None, keywords=None, defaults=(None,)), ('document', '80c29b1dc64718f0116de90d1ac88a77'
))
paddle.fluid.layers.array_read
(ArgSpec(args=['array', 'i'], varargs=None, keywords=None, defaults=None), ('document', '0275133f1dde2aed528b4d3230edf823')
)
paddle.fluid.layers.array_length
(ArgSpec(args=['array'], varargs=None, keywords=None, defaults=None), ('document', 'ffb8b9578ec66db565b223d313aa82a2')
)
paddle.fluid.layers.IfElse.__init__
(ArgSpec(args=['self', 'cond', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'
))
paddle.fluid.layers.IfElse.false_block
(ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')
)
paddle.fluid.layers.IfElse.input
(ArgSpec(args=['self', 'x'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')
)
paddle.fluid.layers.IfElse.output
(ArgSpec(args=['self'], varargs='outs', keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')
)
paddle.fluid.layers.IfElse.true_block
(ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')
)
paddle.fluid.layers.DynamicRNN.__init__
(ArgSpec(args=['self', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'
))
paddle.fluid.layers.DynamicRNN.block
(ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6d3e0a5d9aa519a9773a36e1620ea9b7')
)
paddle.fluid.layers.DynamicRNN.memory
(ArgSpec(args=['self', 'init', 'shape', 'value', 'need_reorder', 'dtype'], varargs=None, keywords=None, defaults=(None, None, 0.0, False, 'float32')), ('document', 'b9174d4e91505b0c8ecc193eb51e248d
'))
paddle.fluid.layers.DynamicRNN.output
(ArgSpec(args=['self'], varargs='outputs', keywords=None, defaults=None), ('document', 'b439a176a3328de8a75bdc5c08eece4a')
)
paddle.fluid.layers.DynamicRNN.static_input
(ArgSpec(args=['self', 'x'], varargs=None, keywords=None, defaults=None), ('document', 'f29ad2478b6b2ad4f413d2936a331ea0')
)
paddle.fluid.layers.DynamicRNN.step_input
(ArgSpec(args=['self', 'x'], varargs=None, keywords=None, defaults=None), ('document', '169d694d2224f62b4f3afdc3dbc19e95')
)
paddle.fluid.layers.DynamicRNN.update_memory
(ArgSpec(args=['self', 'ex_mem', 'new_mem'], varargs=None, keywords=None, defaults=None), ('document', '5d83987da13b98363d6a807a52d8024f')
)
paddle.fluid.layers.StaticRNN.__init__
(ArgSpec(args=['self', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'
))
paddle.fluid.layers.StaticRNN.memory
(ArgSpec(args=['self', 'init', 'shape', 'batch_ref', 'init_value', 'init_batch_dim_idx', 'ref_batch_dim_idx'], varargs=None, keywords=None, defaults=(None, None, None, 0.0, 0, 1)), ('document', 'c24e368e23afac1ed91a78a639d7a9c7'
))
paddle.fluid.layers.StaticRNN.output
(ArgSpec(args=['self'], varargs='outputs', keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')
)
paddle.fluid.layers.StaticRNN.step
(ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')
)
paddle.fluid.layers.StaticRNN.step_input
(ArgSpec(args=['self', 'x'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')
)
paddle.fluid.layers.StaticRNN.step_output
(ArgSpec(args=['self', 'o'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')
)
paddle.fluid.layers.StaticRNN.update_memory
(ArgSpec(args=['self', 'mem', 'var'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')
)
paddle.fluid.layers.reorder_lod_tensor_by_rank
(ArgSpec(args=['x', 'rank_table'], varargs=None, keywords=None, defaults=None), ('document', '3545f529ef04e8f6ecb76b47fa3df01a')
)
paddle.fluid.layers.Print
(ArgSpec(args=['input', 'first_n', 'message', 'summarize', 'print_tensor_name', 'print_tensor_type', 'print_tensor_shape', 'print_tensor_lod', 'print_phase'], varargs=None, keywords=None, defaults=(-1, None, -1, True, True, True, True, 'both')), ('document', '5fef91b0e21c93610785f2b1f7161732
'))
paddle.fluid.layers.is_empty
(ArgSpec(args=['x', 'cond'], varargs=None, keywords='ignored', defaults=(None,)), ('document', 'bbe578dbb49ad13e15b014e98c22b519'
))
paddle.fluid.layers.sigmoid
(ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '29a25ba78de79152076cacfc5443137d'
))
paddle.fluid.layers.logsigmoid
(ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '81ccb7acafd06c7728e11581f5d342e3'
))
paddle.fluid.layers.exp
(ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'e6b3e769413d96aab4176f96db25984b'
))
paddle.fluid.layers.tanh
(ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'e9d586a0b5bd05f67ee78048f9d503b6'
))
paddle.fluid.layers.tanh_shrink
(ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '1e521554b9fdda9061ec6d306f0709b7'
))
paddle.fluid.layers.softshrink
(ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '9eef31597bbafa2bd49691e072296e13'
))
paddle.fluid.layers.sqrt
(ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '072a8541e0f632366bba10f67cb0db27'
))
paddle.fluid.layers.abs
(ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '64650ac42cf82e9920cb0b172b1d29fd'
))
paddle.fluid.layers.ceil
(ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'c75d67dc5fe28f68e4cfffead4f698ad'
))
paddle.fluid.layers.floor
(ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '647b16c5da5ef909649ae02abb434973'
))
paddle.fluid.layers.cos
(ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '485f2686bcc2fe37a4bd893769c8a3e2'
))
paddle.fluid.layers.sin
(ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '01f1766aa76eff1df30147505b59f7c4'
))
paddle.fluid.layers.round
(ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'b47f5da13913d3e56bdb1e612a73f3f2'
))
paddle.fluid.layers.reciprocal
(ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'cc6ac2f14f03c52aaa83a59bf83b8d26'
))
paddle.fluid.layers.square
(ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '48dfb45d773dbc30126c3a7f777de5ee'
))
paddle.fluid.layers.softplus
(ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '459c5781e9d1dd88283b7c5769d7872a'
))
paddle.fluid.layers.softsign
(ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '80846bcd4bd457207457a6d5411f4148'
))
paddle.fluid.layers.uniform_random
(ArgSpec(args=['shape', 'dtype', 'min', 'max', 'seed'], varargs=None, keywords=None, defaults=('float32', -1.0, 1.0, 0)), ('document', '308b619af849caa82bbc31e897f5e641'
))
paddle.fluid.layers.hard_shrink
(ArgSpec(args=['x', 'threshold'], varargs=None, keywords=None, defaults=(None,)), ('document', 'c142f5884f3255e0d6075c286bbd531e'
))
paddle.fluid.layers.cumsum
(ArgSpec(args=['x', 'axis', 'exclusive', 'reverse'], varargs=None, keywords=None, defaults=(None, None, None)), ('document', '944d7c03057f5fc88bc78acd4d82f926'
))
paddle.fluid.layers.thresholded_relu
(ArgSpec(args=['x', 'threshold'], varargs=None, keywords=None, defaults=(None,)), ('document', '90566ea449ea4c681435546e2f70610a'
))
paddle.fluid.layers.prior_box
(ArgSpec(args=['input', 'image', 'min_sizes', 'max_sizes', 'aspect_ratios', 'variance', 'flip', 'clip', 'steps', 'offset', 'name', 'min_max_aspect_ratios_order'], varargs=None, keywords=None, defaults=(None, [1.0], [0.1, 0.1, 0.2, 0.2], False, False, [0.0, 0.0], 0.5, None, False)), ('document', '14cac0ee643fa6e026ad82aeeee75bd8'
))
paddle.fluid.layers.density_prior_box
(ArgSpec(args=['input', 'image', 'densities', 'fixed_sizes', 'fixed_ratios', 'variance', 'clip', 'steps', 'offset', 'flatten_to_2d', 'name'], varargs=None, keywords=None, defaults=(None, None, None, [0.1, 0.1, 0.2, 0.2], False, [0.0, 0.0], 0.5, False, None)), ('document', 'a0d762bb08de9ce93bc780aa57cd5cd9'
))
paddle.fluid.layers.multi_box_head
(ArgSpec(args=['inputs', 'image', 'base_size', 'num_classes', 'aspect_ratios', 'min_ratio', 'max_ratio', 'min_sizes', 'max_sizes', 'steps', 'step_w', 'step_h', 'offset', 'variance', 'flip', 'clip', 'kernel_size', 'pad', 'stride', 'name', 'min_max_aspect_ratios_order'], varargs=None, keywords=None, defaults=(None, None, None, None, None, None, None, 0.5, [0.1, 0.1, 0.2, 0.2], True, False, 1, 0, 1, None, False)), ('document', 'a6ab47a2fe681e52fabb7057ddf0efdd'
))
paddle.fluid.layers.bipartite_match
(ArgSpec(args=['dist_matrix', 'match_type', 'dist_threshold', 'name'], varargs=None, keywords=None, defaults=(None, None, None)), ('document', '3ddb9b966f193900193a95a3df77c3c1'
))
paddle.fluid.layers.target_assign
(ArgSpec(args=['input', 'matched_indices', 'negative_indices', 'mismatch_value', 'name'], varargs=None, keywords=None, defaults=(None, None, None)), ('document', 'c0b334f917828f95056f6ebe10907b1c'
))
paddle.fluid.layers.detection_output
(ArgSpec(args=['loc', 'scores', 'prior_box', 'prior_box_var', 'background_label', 'nms_threshold', 'nms_top_k', 'keep_top_k', 'score_threshold', 'nms_eta'], varargs=None, keywords=None, defaults=(0, 0.3, 400, 200, 0.01, 1.0)), ('document', 'c33093a82a46e3091e789e5572588db1'
))
paddle.fluid.layers.ssd_loss
(ArgSpec(args=['location', 'confidence', 'gt_box', 'gt_label', 'prior_box', 'prior_box_var', 'background_label', 'overlap_threshold', 'neg_pos_ratio', 'neg_overlap', 'loc_loss_weight', 'conf_loss_weight', 'match_type', 'mining_type', 'normalize', 'sample_size'], varargs=None, keywords=None, defaults=(None, 0, 0.5, 3.0, 0.5, 1.0, 1.0, 'per_prediction', 'max_negative', True, None)), ('document', '6d5028fd09d01ab82d296adc0ea95aee'
))
paddle.fluid.layers.detection_map
(ArgSpec(args=['detect_res', 'label', 'class_num', 'background_label', 'overlap_threshold', 'evaluate_difficult', 'has_state', 'input_states', 'out_states', 'ap_version'], varargs=None, keywords=None, defaults=(0, 0.3, True, None, None, None, 'integral')), ('document', '1467d91b50c22cd52103b4aa1ee9d0a1
'))
paddle.fluid.layers.rpn_target_assign
(ArgSpec(args=['bbox_pred', 'cls_logits', 'anchor_box', 'anchor_var', 'gt_boxes', 'is_crowd', 'im_info', 'rpn_batch_size_per_im', 'rpn_straddle_thresh', 'rpn_fg_fraction', 'rpn_positive_overlap', 'rpn_negative_overlap', 'use_random'], varargs=None, keywords=None, defaults=(256, 0.0, 0.5, 0.7, 0.3, True)), ('document', '1dddef3eb4b3cbd4df8e03ac480dbf97'
))
paddle.fluid.layers.anchor_generator
(ArgSpec(args=['input', 'anchor_sizes', 'aspect_ratios', 'variance', 'stride', 'offset', 'name'], varargs=None, keywords=None, defaults=(None, None, [0.1, 0.1, 0.2, 0.2], None, 0.5, None)), ('document', '23337cc57bbf5be73884b6bd0f849603'
))
paddle.fluid.layers.roi_perspective_transform
(ArgSpec(args=['input', 'rois', 'transformed_height', 'transformed_width', 'spatial_scale'], varargs=None, keywords=None, defaults=(1.0,)), ('document', '5761f9ed83654314416e24372b33bb84'
))
paddle.fluid.layers.generate_proposal_labels
(ArgSpec(args=['rpn_rois', 'gt_classes', 'is_crowd', 'gt_boxes', 'im_info', 'batch_size_per_im', 'fg_fraction', 'fg_thresh', 'bg_thresh_hi', 'bg_thresh_lo', 'bbox_reg_weights', 'class_nums', 'use_random'], varargs=None, keywords=None, defaults=(256, 0.25, 0.25, 0.5, 0.0, [0.1, 0.1, 0.2, 0.2], None, True)), ('document', '87863717edeb7fe87a1268976cbc015d'
))
paddle.fluid.layers.generate_proposals
(ArgSpec(args=['scores', 'bbox_deltas', 'im_info', 'anchors', 'variances', 'pre_nms_top_n', 'post_nms_top_n', 'nms_thresh', 'min_size', 'eta', 'name'], varargs=None, keywords=None, defaults=(6000, 1000, 0.5, 0.1, 1.0, None)), ('document', '57ab49f3f324f310b7eed322e7c1057a'
))
paddle.fluid.layers.generate_mask_labels
(ArgSpec(args=['im_info', 'gt_classes', 'is_crowd', 'gt_segms', 'rois', 'labels_int32', 'num_classes', 'resolution'], varargs=None, keywords=None, defaults=None), ('document', 'f73706a65468e9ca3e0bee4a31521b0a')
)
paddle.fluid.layers.iou_similarity
(ArgSpec(args=['x', 'y', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '587845f60c5d97ffdf2dfd21da52eca1'
))
paddle.fluid.layers.box_coder
(ArgSpec(args=['prior_box', 'prior_box_var', 'target_box', 'code_type', 'box_normalized', 'name', 'axis'], varargs=None, keywords=None, defaults=('encode_center_size', True, None, 0)), ('document', '032d0f4b7d8f6235ee5d91e473344f0e'
))
paddle.fluid.layers.polygon_box_transform
(ArgSpec(args=['input', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '0e5ac2507723a0b5adec473f9556799b'
))
paddle.fluid.layers.yolov3_loss
(ArgSpec(args=['x', 'gtbox', 'gtlabel', 'anchors', 'anchor_mask', 'class_num', 'ignore_thresh', 'downsample_ratio', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '991e934c3e09abf0edec7c9c978b4691'
))
paddle.fluid.layers.box_clip
(ArgSpec(args=['input', 'im_info', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '397e9e02b451d99c56e20f268fa03f2e'
))
paddle.fluid.layers.multiclass_nms
(ArgSpec(args=['bboxes', 'scores', 'score_threshold', 'nms_top_k', 'keep_top_k', 'nms_threshold', 'normalized', 'nms_eta', 'background_label', 'name'], varargs=None, keywords=None, defaults=(0.3, True, 1.0, 0, None)), ('document', 'ca7d1107b6c5d2d6d8221039a220fde0'
))
paddle.fluid.layers.accuracy
(ArgSpec(args=['input', 'label', 'k', 'correct', 'total'], varargs=None, keywords=None, defaults=(1, None, None)), ('document', '9808534c12c5e739a10f73ebb0b4eafd'
))
paddle.fluid.layers.auc
(ArgSpec(args=['input', 'label', 'curve', 'num_thresholds', 'topk', 'slide_steps'], varargs=None, keywords=None, defaults=('ROC', 4095, 1, 1)), ('document', 'e0e95334fce92d16c2d9db6e7caffc47'
))
paddle.fluid.layers.exponential_decay
(ArgSpec(args=['learning_rate', 'decay_steps', 'decay_rate', 'staircase'], varargs=None, keywords=None, defaults=(False,)), ('document', '98a5050bee8522fcea81aa795adaba51'
))
paddle.fluid.layers.natural_exp_decay
(ArgSpec(args=['learning_rate', 'decay_steps', 'decay_rate', 'staircase'], varargs=None, keywords=None, defaults=(False,)), ('document', '676a7bc2a218691db50bca233903d21e'
))
paddle.fluid.layers.inverse_time_decay
(ArgSpec(args=['learning_rate', 'decay_steps', 'decay_rate', 'staircase'], varargs=None, keywords=None, defaults=(False,)), ('document', 'd07e767d59c4a5e6c930f3e6756d3f82'
))
paddle.fluid.layers.polynomial_decay
(ArgSpec(args=['learning_rate', 'decay_steps', 'end_learning_rate', 'power', 'cycle'], varargs=None, keywords=None, defaults=(0.0001, 1.0, False)), ('document', '882634f420f626642f0874481263da40'
))
paddle.fluid.layers.piecewise_decay
(ArgSpec(args=['boundaries', 'values'], varargs=None, keywords=None, defaults=None), ('document', 'c717d9d1d78a53c809d01b8bc56f3cae')
)
paddle.fluid.layers.noam_decay
(ArgSpec(args=['d_model', 'warmup_steps'], varargs=None, keywords=None, defaults=None), ('document', 'd9a95746353fd574be36dc28d8726c28')
)
paddle.fluid.layers.append_LARS
(ArgSpec(args=['params_grads', 'learning_rate', 'weight_decay'], varargs=None, keywords=None, defaults=None), ('document', 'd24fa1e7d62ac8a534fc6a86002f84f8')
)
paddle.fluid.layers.cosine_decay
(ArgSpec(args=['learning_rate', 'step_each_epoch', 'epochs'], varargs=None, keywords=None, defaults=None), ('document', '9588c64c26ffaef3c466e404a6af9d9b')
)
paddle.fluid.contrib.InitState.__init__
(ArgSpec(args=['self', 'init', 'shape', 'value', 'init_boot', 'need_reorder', 'dtype'], varargs=None, keywords=None, defaults=(None, None, 0.0, None, False, 'float32')), ('document', '6adf97f83acf6453d4a6a4b1070f3754
'))
paddle.fluid.contrib.StateCell.__init__
(ArgSpec(args=['self', 'inputs', 'states', 'out_state', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'
))
paddle.fluid.contrib.StateCell.compute_state
(ArgSpec(args=['self', 'inputs'], varargs=None, keywords=None, defaults=None), ('document', '92973b3f222081a1d17069c683cf4a99')
)
paddle.fluid.contrib.StateCell.get_input
(ArgSpec(args=['self', 'input_name'], varargs=None, keywords=None, defaults=None), ('document', '6f24a007cfa184e32f01a960703bfd70')
)
paddle.fluid.contrib.StateCell.get_state
(ArgSpec(args=['self', 'state_name'], varargs=None, keywords=None, defaults=None), ('document', '630a4945cfe659ea4f307598fbbce5d2')
)
paddle.fluid.contrib.StateCell.out_state
(ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '7ad681dff0393ddf13a724194e720f28')
)
paddle.fluid.contrib.StateCell.set_state
(ArgSpec(args=['self', 'state_name', 'state_value'], varargs=None, keywords=None, defaults=None), ('document', 'd4e0e08cd5d9d9a571cbc52d114f5ae9')
)
paddle.fluid.contrib.StateCell.state_updater
(ArgSpec(args=['self', 'updater'], varargs=None, keywords=None, defaults=None), ('document', 'd5afe1b7665d94fb023b15cf913ca510')
)
paddle.fluid.contrib.StateCell.update_states
(ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', 'fe0b0f1338723516a35a30247899c81b')
)
paddle.fluid.contrib.TrainingDecoder.__init__
(ArgSpec(args=['self', 'state_cell', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'
))
paddle.fluid.contrib.TrainingDecoder.block
(ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '98d88fa1c989748410a12517c6a585bf')
)
paddle.fluid.contrib.TrainingDecoder.output
(ArgSpec(args=['self'], varargs='outputs', keywords=None, defaults=None), ('document', 'f0a457dee586559036202087ce2eff69')
)
paddle.fluid.contrib.TrainingDecoder.static_input
(ArgSpec(args=['self', 'x'], varargs=None, keywords=None, defaults=None), ('document', 'a024c72664fe815068423ba630b7658a')
)
paddle.fluid.contrib.TrainingDecoder.step_input
(ArgSpec(args=['self', 'x'], varargs=None, keywords=None, defaults=None), ('document', '4659db7a888a2495e71c1838a0483909')
)
paddle.fluid.contrib.BeamSearchDecoder.__init__
(ArgSpec(args=['self', 'state_cell', 'init_ids', 'init_scores', 'target_dict_dim', 'word_dim', 'input_var_dict', 'topk_size', 'sparse_emb', 'max_len', 'beam_size', 'end_id', 'name'], varargs=None, keywords=None, defaults=({}, 50, True, 100, 1, 1, None)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'
))
paddle.fluid.contrib.BeamSearchDecoder.block
(ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '98d88fa1c989748410a12517c6a585bf')
)
paddle.fluid.contrib.BeamSearchDecoder.decode
(ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '1e47c60f080c1343ebb6ceaef89656b2')
)
paddle.fluid.contrib.BeamSearchDecoder.early_stop
(ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '3a84a7454ed6707f79b9e954d92a7575')
)
paddle.fluid.contrib.BeamSearchDecoder.read_array
(ArgSpec(args=['self', 'init', 'is_ids', 'is_scores'], varargs=None, keywords=None, defaults=(False, False)), ('document', 'aa89eb8fd5e4cabaf5cc1bcae14665a4'
))
paddle.fluid.contrib.BeamSearchDecoder.update_array
(ArgSpec(args=['self', 'array', 'value'], varargs=None, keywords=None, defaults=None), ('document', '5754e9b3212b7c09497151516a0de5a7')
)
paddle.fluid.contrib.memory_usage
(ArgSpec(args=['program', 'batch_size'], varargs=None, keywords=None, defaults=None), ('document', '8fcb2f93bb743693baa8d4860a5ccc47')
)
paddle.fluid.contrib.op_freq_statistic
(ArgSpec(args=['program'], varargs=None, keywords=None, defaults=None), ('document', '4d43687113c4bf5b29d15aee2f4e4afa')
)
paddle.fluid.contrib.QuantizeTranspiler.__init__
(ArgSpec(args=['self', 'weight_bits', 'activation_bits', 'activation_quantize_type', 'weight_quantize_type', 'window_size'], varargs=None, keywords=None, defaults=(8, 8, 'abs_max', 'abs_max', 10000)), ('document', '14b39f1fcd5667ff556b1aad94357d1d'
))
paddle.fluid.contrib.QuantizeTranspiler.convert_to_int8
(ArgSpec(args=['self', 'program', 'place', 'scope'], varargs=None, keywords=None, defaults=(None,)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'
))
paddle.fluid.contrib.QuantizeTranspiler.freeze_program
(ArgSpec(args=['self', 'program', 'place', 'fuse_bn', 'scope'], varargs=None, keywords=None, defaults=(False, None)), ('document', '909675a1ab055c69b436a7893fcae4fd'
))
paddle.fluid.contrib.QuantizeTranspiler.training_transpile
(ArgSpec(args=['self', 'program', 'startup_program'], varargs=None, keywords=None, defaults=(None, None)), ('document', '6dd9909f10b283ba2892a99058a72884'
))
paddle.fluid.contrib.Calibrator.__init__
(ArgSpec(args=['self'], varargs='args', keywords='kwargs', defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')
)
paddle.fluid.contrib.Calibrator.sample_data
(ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '3b8c85ca1e2cf753cc8c90a6c6992958')
)
paddle.fluid.contrib.Calibrator.save_int8_model
(ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')
)
paddle.fluid.contrib.reader.ctr_reader.ctr_reader
(ArgSpec(args=['feed_dict', 'file_type', 'file_format', 'dense_slot_index', 'sparse_slot_index', 'capacity', 'thread_num', 'batch_size', 'file_list', 'slots', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'b2ebf3de2a6ef1af2c3b88d2db7591ab'
))
paddle.fluid.contrib.build_compressor
(ArgSpec(args=['place', 'data_reader', 'data_feeder', 'scope', 'metrics', 'epoch', 'config'], varargs=None, keywords=None, defaults=(None, None, None, None, None, None, None)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'
))
paddle.fluid.contrib.CompressPass.__init__
(ArgSpec(args=['self', 'place', 'data_reader', 'data_feeder', 'scope', 'metrics', 'epoch', 'program_exe'], varargs=None, keywords=None, defaults=(None, None, None, None, None, None, None)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'
))
paddle.fluid.contrib.CompressPass.add_strategy
(ArgSpec(args=['self', 'strategy'], varargs=None, keywords=None, defaults=None), ('document', '3bf6010b6f47d3c86df0ec8957be95e0')
)
paddle.fluid.contrib.CompressPass.apply
(ArgSpec(args=['self', 'graph'], varargs=None, keywords=None, defaults=None), ('document', 'a92bf85d4b59bd4f2ac1706d7c4899a6')
)
paddle.fluid.contrib.ImitationGraph.__init__
(ArgSpec(args=['self', 'program'], varargs=None, keywords=None, defaults=(None,)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'
))
paddle.fluid.contrib.ImitationGraph.all_parameters
(ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')
)
paddle.fluid.contrib.SensitivePruneStrategy.__init__
(ArgSpec(args=['self', 'pruner', 'start_epoch', 'end_epoch', 'delta_rate', 'acc_loss_threshold', 'sensitivities'], varargs=None, keywords=None, defaults=(None, 0, 10, 0.2, 0.2, None)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'
))
paddle.fluid.contrib.SensitivePruneStrategy.on_batch_begin
(ArgSpec(args=['self', 'context'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')
)
paddle.fluid.contrib.SensitivePruneStrategy.on_batch_end
(ArgSpec(args=['self', 'context'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')
)
paddle.fluid.contrib.SensitivePruneStrategy.on_compress_begin
(ArgSpec(args=['self', 'context'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')
)
paddle.fluid.contrib.SensitivePruneStrategy.on_compress_end
(ArgSpec(args=['self', 'context'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')
)
paddle.fluid.contrib.SensitivePruneStrategy.on_epoch_begin
(ArgSpec(args=['self', 'context'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')
)
paddle.fluid.contrib.SensitivePruneStrategy.on_epoch_end
(ArgSpec(args=['self', 'context'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')
)
paddle.fluid.contrib.MagnitudePruner.__init__
(ArgSpec(args=['self', 'threshold'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')
)
paddle.fluid.contrib.MagnitudePruner.prune
(ArgSpec(args=['self', 'param', 'threshold'], varargs=None, keywords=None, defaults=(None,)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'
))
paddle.fluid.contrib.RatioPruner.__init__
(ArgSpec(args=['self', 'ratios'], varargs=None, keywords=None, defaults=(None,)), ('document', 'e7a81a325b296a9ca502ee5adb4fc85d'
))
paddle.fluid.contrib.RatioPruner.prune
(ArgSpec(args=['self', 'param', 'ratio'], varargs=None, keywords=None, defaults=(None,)), ('document', '358cbf2978c91028fb96a195a9884645'
))
paddle.fluid.contrib.load_persistables_for_increment
(ArgSpec(args=['dirname', 'executor', 'program', 'lookup_table_var', 'lookup_table_var_path'], varargs=None, keywords=None, defaults=None), ('document', '11fbf7e8dd2289805de291b453a33ee7')
)
paddle.fluid.contrib.load_persistables_for_inference
(ArgSpec(args=['dirname', 'executor', 'program', 'lookup_table_var_name'], varargs=None, keywords=None, defaults=None), ('document', '5b5577bb3d24070da819674255d16196')
)
paddle.fluid.contrib.convert_dist_to_sparse_program
(ArgSpec(args=['program'], varargs=None, keywords=None, defaults=None), ('document', '4efbd93876832d4d35497cdbc7a1e6d8')
)
paddle.fluid.contrib.HDFSClient.__init__
(ArgSpec(args=['self', 'hadoop_home', 'configs'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')
)
paddle.fluid.contrib.HDFSClient.delete
(ArgSpec(args=['self', 'hdfs_path'], varargs=None, keywords=None, defaults=None), ('document', 'c3721aa2d4d9ef5a857dd47b2681c03e')
)
paddle.fluid.contrib.HDFSClient.download
(ArgSpec(args=['self', 'hdfs_path', 'local_path', 'overwrite', 'unzip'], varargs=None, keywords=None, defaults=(False, False)), ('document', 'ca55bde92184d3fd0f9f5c963b25e634'
))
paddle.fluid.contrib.HDFSClient.is_dir
(ArgSpec(args=['self', 'hdfs_path'], varargs=None, keywords=None, defaults=(None,)), ('document', '45bde1bae02605a205c8245b58b9156d'
))
paddle.fluid.contrib.HDFSClient.is_exist
(ArgSpec(args=['self', 'hdfs_path'], varargs=None, keywords=None, defaults=(None,)), ('document', 'be9c94bccff7ba0c1d95883ac62b5864'
))
paddle.fluid.contrib.HDFSClient.ls
(ArgSpec(args=['self', 'hdfs_path'], varargs=None, keywords=None, defaults=None), ('document', '808acac504870c7e46594b95674f8a86')
)
paddle.fluid.contrib.HDFSClient.lsr
(ArgSpec(args=['self', 'hdfs_path', 'only_file', 'sort'], varargs=None, keywords=None, defaults=(True, True)), ('document', 'fae835aa3354eb6a0434c0f9ba3c2747'
))
paddle.fluid.contrib.HDFSClient.make_local_dirs
(ArgSpec(args=['local_path'], varargs=None, keywords=None, defaults=None), ('document', 'e76b89c8e7f019b5da576c0026fcf689')
)
paddle.fluid.contrib.HDFSClient.makedirs
(ArgSpec(args=['self', 'hdfs_path'], varargs=None, keywords=None, defaults=None), ('document', '44d9972aae390aedf40aaea731a37e4b')
)
paddle.fluid.contrib.HDFSClient.rename
(ArgSpec(args=['self', 'hdfs_src_path', 'hdfs_dst_path', 'overwrite'], varargs=None, keywords=None, defaults=(False,)), ('document', '0eb133644d9a9f4da45bb39261ff0955'
))
paddle.fluid.contrib.HDFSClient.upload
(ArgSpec(args=['self', 'hdfs_path', 'local_path', 'overwrite', 'retry_times'], varargs=None, keywords=None, defaults=(False, 5)), ('document', '7d053b4bfd6dcfdd2c9dda0e0dbd9665'
))
paddle.fluid.contrib.multi_download
(ArgSpec(args=['client', 'hdfs_path', 'local_path', 'trainer_id', 'trainers', 'multi_processes'], varargs=None, keywords=None, defaults=(5,)), ('document', '100927be598ed8f9eaa1f3ef1b23568a'
))
paddle.fluid.contrib.multi_upload
(ArgSpec(args=['client', 'hdfs_path', 'local_path', 'multi_processes', 'overwrite', 'sync'], varargs=None, keywords=None, defaults=(5, False, True)), ('document', '183f34c83d30dbe16e09e8716c41958a'
))
paddle.fluid.transpiler.DistributeTranspiler.__init__
(ArgSpec(args=['self', 'config'], varargs=None, keywords=None, defaults=(None,)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'
))
paddle.fluid.transpiler.DistributeTranspiler.get_pserver_program
(ArgSpec(args=['self', 'endpoint'], varargs=None, keywords=None, defaults=None), ('document', '292ab72977afbe58e6a3bde175452680')
)
paddle.fluid.transpiler.DistributeTranspiler.get_pserver_programs
(ArgSpec(args=['self', 'endpoint'], varargs=None, keywords=None, defaults=None), ('document', '78f4949aedf317666a89ca74b3748ba8')
)
paddle.fluid.transpiler.DistributeTranspiler.get_startup_program
(ArgSpec(args=['self', 'endpoint', 'pserver_program', 'startup_program'], varargs=None, keywords=None, defaults=(None, None)), ('document', 'd796fc0c8d51503b556fcf6dc15c4f0c'
))
paddle.fluid.transpiler.DistributeTranspiler.get_trainer_program
(ArgSpec(args=['self', 'wait_port'], varargs=None, keywords=None, defaults=(True,)), ('document', '736330e31a7a54abccc0c7fd9119d9ff'
))
paddle.fluid.transpiler.DistributeTranspiler.transpile
(ArgSpec(args=['self', 'trainer_id', 'program', 'pservers', 'trainers', 'sync_mode', 'startup_program', 'current_endpoint'], varargs=None, keywords=None, defaults=(None, '127.0.0.1:6174', 1, True, None, '127.0.0.1:6174')), ('document', '06ce55338dfe96311ad1078235ab3bf
4'))
paddle.fluid.transpiler.memory_optimize
(ArgSpec(args=['input_program', 'skip_opt_set', 'print_log', 'level', 'skip_grads'], varargs=None, keywords=None, defaults=(None, False, 0, False)), ('document', 'eda17d0f1639bc6ca215cecf87f588a4'
))
paddle.fluid.transpiler.release_memory
(ArgSpec(args=['input_program', 'skip_opt_set'], varargs=None, keywords=None, defaults=(None,)), ('document', 'ac4114d3df16264f1946deb3a8434a6f'
))
paddle.fluid.transpiler.HashName.__init__
(ArgSpec(args=['self', 'pserver_endpoints'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')
)
paddle.fluid.transpiler.HashName.dispatch
(ArgSpec(args=['self', 'varlist'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')
)
paddle.fluid.transpiler.HashName.reset
(ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')
)
paddle.fluid.transpiler.RoundRobin.__init__
(ArgSpec(args=['self', 'pserver_endpoints'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')
)
paddle.fluid.transpiler.RoundRobin.dispatch
(ArgSpec(args=['self', 'varlist'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')
)
paddle.fluid.transpiler.RoundRobin.reset
(ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')
)
paddle.fluid.transpiler.DistributeTranspilerConfig.__init__
paddle.fluid.nets.simple_img_conv_pool
ArgSpec(args=['input', 'num_filters', 'filter_size', 'pool_size', 'pool_stride', 'pool_padding', 'pool_type', 'global_pooling', 'conv_stride', 'conv_padding', 'conv_dilation', 'conv_groups', 'param_attr', 'bias_attr', 'act', 'use_cudnn'], varargs=None, keywords=None, defaults=(0, 'max', False, 1, 0, 1, 1, None, None, None, True
))
paddle.fluid.nets.sequence_conv_pool
ArgSpec(args=['input', 'num_filters', 'filter_size', 'param_attr', 'act', 'pool_type', 'bias_attr'], varargs=None, keywords=None, defaults=(None, 'sigmoid', 'max', None
))
paddle.fluid.nets.glu
ArgSpec(args=['input', 'dim'], varargs=None, keywords=None, defaults=(-1,
))
paddle.fluid.nets.scaled_dot_product_attention
ArgSpec(args=['queries', 'keys', 'values', 'num_heads', 'dropout_rate'], varargs=None, keywords=None, defaults=(1, 0.0
))
paddle.fluid.nets.img_conv_group
ArgSpec(args=['input', 'conv_num_filter', 'pool_size', 'conv_padding', 'conv_filter_size', 'conv_act', 'param_attr', 'conv_with_batchnorm', 'conv_batchnorm_drop_rate', 'pool_stride', 'pool_type', 'use_cudnn'], varargs=None, keywords=None, defaults=(1, 3, None, None, False, 0.0, 1, 'max', True
))
paddle.fluid.optimizer.SGDOptimizer.__init__
ArgSpec(args=['self', 'learning_rate', 'regularization', 'name'], varargs=None, keywords=None, defaults=(None, None
))
paddle.fluid.optimizer.SGDOptimizer.apply_gradients
ArgSpec(args=['self', 'params_grads'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.optimizer.SGDOptimizer.backward
ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set', 'callbacks'], varargs=None, keywords=None, defaults=(None, None, None, None
))
paddle.fluid.optimizer.SGDOptimizer.minimize
ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set'], varargs=None, keywords=None, defaults=(None, None, None
))
paddle.fluid.optimizer.MomentumOptimizer.__init__
ArgSpec(args=['self', 'learning_rate', 'momentum', 'use_nesterov', 'regularization', 'name'], varargs=None, keywords=None, defaults=(False, None, None
))
paddle.fluid.optimizer.MomentumOptimizer.apply_gradients
ArgSpec(args=['self', 'params_grads'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.optimizer.MomentumOptimizer.backward
ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set', 'callbacks'], varargs=None, keywords=None, defaults=(None, None, None, None
))
paddle.fluid.optimizer.MomentumOptimizer.minimize
ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set'], varargs=None, keywords=None, defaults=(None, None, None
))
paddle.fluid.optimizer.AdagradOptimizer.__init__
ArgSpec(args=['self', 'learning_rate', 'epsilon', 'regularization', 'name', 'initial_accumulator_value'], varargs=None, keywords=None, defaults=(1e-06, None, None, 0.0
))
paddle.fluid.optimizer.AdagradOptimizer.apply_gradients
ArgSpec(args=['self', 'params_grads'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.optimizer.AdagradOptimizer.backward
ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set', 'callbacks'], varargs=None, keywords=None, defaults=(None, None, None, None
))
paddle.fluid.optimizer.AdagradOptimizer.minimize
ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set'], varargs=None, keywords=None, defaults=(None, None, None
))
paddle.fluid.optimizer.AdamOptimizer.__init__
ArgSpec(args=['self', 'learning_rate', 'beta1', 'beta2', 'epsilon', 'regularization', 'name', 'lazy_mode'], varargs=None, keywords=None, defaults=(0.001, 0.9, 0.999, 1e-08, None, None, False
))
paddle.fluid.optimizer.AdamOptimizer.apply_gradients
ArgSpec(args=['self', 'params_grads'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.optimizer.AdamOptimizer.backward
ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set', 'callbacks'], varargs=None, keywords=None, defaults=(None, None, None, None
))
paddle.fluid.optimizer.AdamOptimizer.minimize
ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set'], varargs=None, keywords=None, defaults=(None, None, None
))
paddle.fluid.optimizer.AdamaxOptimizer.__init__
ArgSpec(args=['self', 'learning_rate', 'beta1', 'beta2', 'epsilon', 'regularization', 'name'], varargs=None, keywords=None, defaults=(0.001, 0.9, 0.999, 1e-08, None, None
))
paddle.fluid.optimizer.AdamaxOptimizer.apply_gradients
ArgSpec(args=['self', 'params_grads'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.optimizer.AdamaxOptimizer.backward
ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set', 'callbacks'], varargs=None, keywords=None, defaults=(None, None, None, None
))
paddle.fluid.optimizer.AdamaxOptimizer.minimize
ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set'], varargs=None, keywords=None, defaults=(None, None, None
))
paddle.fluid.optimizer.DecayedAdagradOptimizer.__init__
ArgSpec(args=['self', 'learning_rate', 'decay', 'epsilon', 'regularization', 'name'], varargs=None, keywords=None, defaults=(0.95, 1e-06, None, None
))
paddle.fluid.optimizer.DecayedAdagradOptimizer.apply_gradients
ArgSpec(args=['self', 'params_grads'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.optimizer.DecayedAdagradOptimizer.backward
ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set', 'callbacks'], varargs=None, keywords=None, defaults=(None, None, None, None
))
paddle.fluid.optimizer.DecayedAdagradOptimizer.minimize
ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set'], varargs=None, keywords=None, defaults=(None, None, None
))
paddle.fluid.optimizer.FtrlOptimizer.__init__
ArgSpec(args=['self', 'learning_rate', 'l1', 'l2', 'lr_power', 'regularization', 'name'], varargs=None, keywords=None, defaults=(0.0, 0.0, -0.5, None, None
))
paddle.fluid.optimizer.FtrlOptimizer.apply_gradients
ArgSpec(args=['self', 'params_grads'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.optimizer.FtrlOptimizer.backward
ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set', 'callbacks'], varargs=None, keywords=None, defaults=(None, None, None, None
))
paddle.fluid.optimizer.FtrlOptimizer.minimize
ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set'], varargs=None, keywords=None, defaults=(None, None, None
))
paddle.fluid.optimizer.RMSPropOptimizer.__init__
ArgSpec(args=['self', 'learning_rate', 'rho', 'epsilon', 'momentum', 'centered', 'regularization', 'name'], varargs=None, keywords=None, defaults=(0.95, 1e-06, 0.0, False, None, None
))
paddle.fluid.optimizer.RMSPropOptimizer.apply_gradients
ArgSpec(args=['self', 'params_grads'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.optimizer.RMSPropOptimizer.backward
ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set', 'callbacks'], varargs=None, keywords=None, defaults=(None, None, None, None
))
paddle.fluid.optimizer.RMSPropOptimizer.minimize
ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set'], varargs=None, keywords=None, defaults=(None, None, None
))
paddle.fluid.optimizer.AdadeltaOptimizer.__init__
ArgSpec(args=['self', 'learning_rate', 'epsilon', 'rho', 'regularization', 'name'], varargs=None, keywords=None, defaults=(1e-06, 0.95, None, None
))
paddle.fluid.optimizer.AdadeltaOptimizer.apply_gradients
ArgSpec(args=['self', 'params_grads'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.optimizer.AdadeltaOptimizer.backward
ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set', 'callbacks'], varargs=None, keywords=None, defaults=(None, None, None, None
))
paddle.fluid.optimizer.AdadeltaOptimizer.minimize
ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set'], varargs=None, keywords=None, defaults=(None, None, None
))
paddle.fluid.optimizer.ModelAverage.__init__
ArgSpec(args=['self', 'average_window_rate', 'min_average_window', 'max_average_window', 'regularization', 'name'], varargs=None, keywords=None, defaults=(10000, 10000, None, None
))
paddle.fluid.optimizer.ModelAverage.apply
ArgSpec(args=['self', 'executor', 'need_restore'], varargs=None, keywords=None, defaults=(True,
))
paddle.fluid.optimizer.ModelAverage.apply_gradients
ArgSpec(args=['self', 'params_grads'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.optimizer.ModelAverage.backward
ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set', 'callbacks'], varargs=None, keywords=None, defaults=(None, None, None, None
))
paddle.fluid.optimizer.ModelAverage.minimize
ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set'], varargs=None, keywords=None, defaults=(None, None, None
))
paddle.fluid.optimizer.ModelAverage.restore
ArgSpec(args=['self', 'executor'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.optimizer.LarsMomentumOptimizer.__init__
ArgSpec(args=['self', 'learning_rate', 'momentum', 'lars_coeff', 'lars_weight_decay', 'regularization', 'name'], varargs=None, keywords=None, defaults=(0.001, 0.0005, None, None
))
paddle.fluid.optimizer.LarsMomentumOptimizer.apply_gradients
ArgSpec(args=['self', 'params_grads'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.optimizer.LarsMomentumOptimizer.backward
ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set', 'callbacks'], varargs=None, keywords=None, defaults=(None, None, None, None
))
paddle.fluid.optimizer.LarsMomentumOptimizer.minimize
ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set'], varargs=None, keywords=None, defaults=(None, None, None
))
paddle.fluid.backward.append_backward
ArgSpec(args=['loss', 'parameter_list', 'no_grad_set', 'callbacks'], varargs=None, keywords=None, defaults=(None, None, None
))
paddle.fluid.regularizer.L1DecayRegularizer.__init__
ArgSpec(args=['self', 'regularization_coeff'], varargs=None, keywords=None, defaults=(0.0,
))
paddle.fluid.regularizer.L2DecayRegularizer.__init__
ArgSpec(args=['self', 'regularization_coeff'], varargs=None, keywords=None, defaults=(0.0,
))
paddle.fluid.nets.simple_img_conv_pool
(ArgSpec(args=['input', 'num_filters', 'filter_size', 'pool_size', 'pool_stride', 'pool_padding', 'pool_type', 'global_pooling', 'conv_stride', 'conv_padding', 'conv_dilation', 'conv_groups', 'param_attr', 'bias_attr', 'act', 'use_cudnn'], varargs=None, keywords=None, defaults=(0, 'max', False, 1, 0, 1, 1, None, None, None, True)), ('document', 'e0f67f35abf27f666f81003113b90244'
))
paddle.fluid.nets.sequence_conv_pool
(ArgSpec(args=['input', 'num_filters', 'filter_size', 'param_attr', 'act', 'pool_type', 'bias_attr'], varargs=None, keywords=None, defaults=(None, 'sigmoid', 'max', None)), ('document', '48c434dd7bb827f69d90e5135d77470f'
))
paddle.fluid.nets.glu
(ArgSpec(args=['input', 'dim'], varargs=None, keywords=None, defaults=(-1,)), ('document', '08c1c57e1db6b20bf87b264cb7cf3ca8'
))
paddle.fluid.nets.scaled_dot_product_attention
(ArgSpec(args=['queries', 'keys', 'values', 'num_heads', 'dropout_rate'], varargs=None, keywords=None, defaults=(1, 0.0)), ('document', '921714c9bfb351b41403418265393203'
))
paddle.fluid.nets.img_conv_group
(ArgSpec(args=['input', 'conv_num_filter', 'pool_size', 'conv_padding', 'conv_filter_size', 'conv_act', 'param_attr', 'conv_with_batchnorm', 'conv_batchnorm_drop_rate', 'pool_stride', 'pool_type', 'use_cudnn'], varargs=None, keywords=None, defaults=(1, 3, None, None, False, 0.0, 1, 'max', True)), ('document', '3802be78fbfb206dae64a2d9f8480970'
))
paddle.fluid.optimizer.SGDOptimizer.__init__
(ArgSpec(args=['self', 'learning_rate', 'regularization', 'name'], varargs=None, keywords=None, defaults=(None, None)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'
))
paddle.fluid.optimizer.SGDOptimizer.apply_gradients
(ArgSpec(args=['self', 'params_grads'], varargs=None, keywords=None, defaults=None), ('document', 'bfe7305918552aaecfdaa22411dbe871')
)
paddle.fluid.optimizer.SGDOptimizer.backward
(ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set', 'callbacks'], varargs=None, keywords=None, defaults=(None, None, None, None)), ('document', 'ba3a113d0229ff7bc9d39bda0a6d947f'
))
paddle.fluid.optimizer.SGDOptimizer.minimize
(ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set'], varargs=None, keywords=None, defaults=(None, None, None)), ('document', '35fd5d3330c97903528c7e0dacc7f6ea'
))
paddle.fluid.optimizer.MomentumOptimizer.__init__
(ArgSpec(args=['self', 'learning_rate', 'momentum', 'use_nesterov', 'regularization', 'name'], varargs=None, keywords=None, defaults=(False, None, None)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'
))
paddle.fluid.optimizer.MomentumOptimizer.apply_gradients
(ArgSpec(args=['self', 'params_grads'], varargs=None, keywords=None, defaults=None), ('document', 'bfe7305918552aaecfdaa22411dbe871')
)
paddle.fluid.optimizer.MomentumOptimizer.backward
(ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set', 'callbacks'], varargs=None, keywords=None, defaults=(None, None, None, None)), ('document', 'ba3a113d0229ff7bc9d39bda0a6d947f'
))
paddle.fluid.optimizer.MomentumOptimizer.minimize
(ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set'], varargs=None, keywords=None, defaults=(None, None, None)), ('document', '35fd5d3330c97903528c7e0dacc7f6ea'
))
paddle.fluid.optimizer.AdagradOptimizer.__init__
(ArgSpec(args=['self', 'learning_rate', 'epsilon', 'regularization', 'name', 'initial_accumulator_value'], varargs=None, keywords=None, defaults=(1e-06, None, None, 0.0)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'
))
paddle.fluid.optimizer.AdagradOptimizer.apply_gradients
(ArgSpec(args=['self', 'params_grads'], varargs=None, keywords=None, defaults=None), ('document', 'bfe7305918552aaecfdaa22411dbe871')
)
paddle.fluid.optimizer.AdagradOptimizer.backward
(ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set', 'callbacks'], varargs=None, keywords=None, defaults=(None, None, None, None)), ('document', 'ba3a113d0229ff7bc9d39bda0a6d947f'
))
paddle.fluid.optimizer.AdagradOptimizer.minimize
(ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set'], varargs=None, keywords=None, defaults=(None, None, None)), ('document', '35fd5d3330c97903528c7e0dacc7f6ea'
))
paddle.fluid.optimizer.AdamOptimizer.__init__
(ArgSpec(args=['self', 'learning_rate', 'beta1', 'beta2', 'epsilon', 'regularization', 'name', 'lazy_mode'], varargs=None, keywords=None, defaults=(0.001, 0.9, 0.999, 1e-08, None, None, False)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'
))
paddle.fluid.optimizer.AdamOptimizer.apply_gradients
(ArgSpec(args=['self', 'params_grads'], varargs=None, keywords=None, defaults=None), ('document', 'bfe7305918552aaecfdaa22411dbe871')
)
paddle.fluid.optimizer.AdamOptimizer.backward
(ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set', 'callbacks'], varargs=None, keywords=None, defaults=(None, None, None, None)), ('document', 'ba3a113d0229ff7bc9d39bda0a6d947f'
))
paddle.fluid.optimizer.AdamOptimizer.minimize
(ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set'], varargs=None, keywords=None, defaults=(None, None, None)), ('document', '35fd5d3330c97903528c7e0dacc7f6ea'
))
paddle.fluid.optimizer.AdamaxOptimizer.__init__
(ArgSpec(args=['self', 'learning_rate', 'beta1', 'beta2', 'epsilon', 'regularization', 'name'], varargs=None, keywords=None, defaults=(0.001, 0.9, 0.999, 1e-08, None, None)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'
))
paddle.fluid.optimizer.AdamaxOptimizer.apply_gradients
(ArgSpec(args=['self', 'params_grads'], varargs=None, keywords=None, defaults=None), ('document', 'bfe7305918552aaecfdaa22411dbe871')
)
paddle.fluid.optimizer.AdamaxOptimizer.backward
(ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set', 'callbacks'], varargs=None, keywords=None, defaults=(None, None, None, None)), ('document', 'ba3a113d0229ff7bc9d39bda0a6d947f'
))
paddle.fluid.optimizer.AdamaxOptimizer.minimize
(ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set'], varargs=None, keywords=None, defaults=(None, None, None)), ('document', '35fd5d3330c97903528c7e0dacc7f6ea'
))
paddle.fluid.optimizer.DecayedAdagradOptimizer.__init__
(ArgSpec(args=['self', 'learning_rate', 'decay', 'epsilon', 'regularization', 'name'], varargs=None, keywords=None, defaults=(0.95, 1e-06, None, None)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'
))
paddle.fluid.optimizer.DecayedAdagradOptimizer.apply_gradients
(ArgSpec(args=['self', 'params_grads'], varargs=None, keywords=None, defaults=None), ('document', 'bfe7305918552aaecfdaa22411dbe871')
)
paddle.fluid.optimizer.DecayedAdagradOptimizer.backward
(ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set', 'callbacks'], varargs=None, keywords=None, defaults=(None, None, None, None)), ('document', 'ba3a113d0229ff7bc9d39bda0a6d947f'
))
paddle.fluid.optimizer.DecayedAdagradOptimizer.minimize
(ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set'], varargs=None, keywords=None, defaults=(None, None, None)), ('document', '35fd5d3330c97903528c7e0dacc7f6ea'
))
paddle.fluid.optimizer.FtrlOptimizer.__init__
(ArgSpec(args=['self', 'learning_rate', 'l1', 'l2', 'lr_power', 'regularization', 'name'], varargs=None, keywords=None, defaults=(0.0, 0.0, -0.5, None, None)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'
))
paddle.fluid.optimizer.FtrlOptimizer.apply_gradients
(ArgSpec(args=['self', 'params_grads'], varargs=None, keywords=None, defaults=None), ('document', 'bfe7305918552aaecfdaa22411dbe871')
)
paddle.fluid.optimizer.FtrlOptimizer.backward
(ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set', 'callbacks'], varargs=None, keywords=None, defaults=(None, None, None, None)), ('document', 'ba3a113d0229ff7bc9d39bda0a6d947f'
))
paddle.fluid.optimizer.FtrlOptimizer.minimize
(ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set'], varargs=None, keywords=None, defaults=(None, None, None)), ('document', '35fd5d3330c97903528c7e0dacc7f6ea'
))
paddle.fluid.optimizer.RMSPropOptimizer.__init__
(ArgSpec(args=['self', 'learning_rate', 'rho', 'epsilon', 'momentum', 'centered', 'regularization', 'name'], varargs=None, keywords=None, defaults=(0.95, 1e-06, 0.0, False, None, None)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'
))
paddle.fluid.optimizer.RMSPropOptimizer.apply_gradients
(ArgSpec(args=['self', 'params_grads'], varargs=None, keywords=None, defaults=None), ('document', 'bfe7305918552aaecfdaa22411dbe871')
)
paddle.fluid.optimizer.RMSPropOptimizer.backward
(ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set', 'callbacks'], varargs=None, keywords=None, defaults=(None, None, None, None)), ('document', 'ba3a113d0229ff7bc9d39bda0a6d947f'
))
paddle.fluid.optimizer.RMSPropOptimizer.minimize
(ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set'], varargs=None, keywords=None, defaults=(None, None, None)), ('document', '35fd5d3330c97903528c7e0dacc7f6ea'
))
paddle.fluid.optimizer.AdadeltaOptimizer.__init__
(ArgSpec(args=['self', 'learning_rate', 'epsilon', 'rho', 'regularization', 'name'], varargs=None, keywords=None, defaults=(1e-06, 0.95, None, None)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'
))
paddle.fluid.optimizer.AdadeltaOptimizer.apply_gradients
(ArgSpec(args=['self', 'params_grads'], varargs=None, keywords=None, defaults=None), ('document', 'bfe7305918552aaecfdaa22411dbe871')
)
paddle.fluid.optimizer.AdadeltaOptimizer.backward
(ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set', 'callbacks'], varargs=None, keywords=None, defaults=(None, None, None, None)), ('document', 'ba3a113d0229ff7bc9d39bda0a6d947f'
))
paddle.fluid.optimizer.AdadeltaOptimizer.minimize
(ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set'], varargs=None, keywords=None, defaults=(None, None, None)), ('document', '35fd5d3330c97903528c7e0dacc7f6ea'
))
paddle.fluid.optimizer.ModelAverage.__init__
(ArgSpec(args=['self', 'average_window_rate', 'min_average_window', 'max_average_window', 'regularization', 'name'], varargs=None, keywords=None, defaults=(10000, 10000, None, None)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'
))
paddle.fluid.optimizer.ModelAverage.apply
(ArgSpec(args=['self', 'executor', 'need_restore'], varargs=None, keywords=None, defaults=(True,)), ('document', '46234a5470590feb336346f70a3db715'
))
paddle.fluid.optimizer.ModelAverage.apply_gradients
(ArgSpec(args=['self', 'params_grads'], varargs=None, keywords=None, defaults=None), ('document', 'bfe7305918552aaecfdaa22411dbe871')
)
paddle.fluid.optimizer.ModelAverage.backward
(ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set', 'callbacks'], varargs=None, keywords=None, defaults=(None, None, None, None)), ('document', 'ba3a113d0229ff7bc9d39bda0a6d947f'
))
paddle.fluid.optimizer.ModelAverage.minimize
(ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set'], varargs=None, keywords=None, defaults=(None, None, None)), ('document', '35fd5d3330c97903528c7e0dacc7f6ea'
))
paddle.fluid.optimizer.ModelAverage.restore
(ArgSpec(args=['self', 'executor'], varargs=None, keywords=None, defaults=None), ('document', '18db9c70be9c4dd466f9844457b21bfe')
)
paddle.fluid.optimizer.LarsMomentumOptimizer.__init__
(ArgSpec(args=['self', 'learning_rate', 'momentum', 'lars_coeff', 'lars_weight_decay', 'regularization', 'name'], varargs=None, keywords=None, defaults=(0.001, 0.0005, None, None)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'
))
paddle.fluid.optimizer.LarsMomentumOptimizer.apply_gradients
(ArgSpec(args=['self', 'params_grads'], varargs=None, keywords=None, defaults=None), ('document', 'bfe7305918552aaecfdaa22411dbe871')
)
paddle.fluid.optimizer.LarsMomentumOptimizer.backward
(ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set', 'callbacks'], varargs=None, keywords=None, defaults=(None, None, None, None)), ('document', 'ba3a113d0229ff7bc9d39bda0a6d947f'
))
paddle.fluid.optimizer.LarsMomentumOptimizer.minimize
(ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set'], varargs=None, keywords=None, defaults=(None, None, None)), ('document', '35fd5d3330c97903528c7e0dacc7f6ea'
))
paddle.fluid.backward.append_backward
(ArgSpec(args=['loss', 'parameter_list', 'no_grad_set', 'callbacks'], varargs=None, keywords=None, defaults=(None, None, None)), ('document', '1a79bd7d10ae54ca763ec81bca36ba24'
))
paddle.fluid.regularizer.L1DecayRegularizer.__init__
(ArgSpec(args=['self', 'regularization_coeff'], varargs=None, keywords=None, defaults=(0.0,)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'
))
paddle.fluid.regularizer.L2DecayRegularizer.__init__
(ArgSpec(args=['self', 'regularization_coeff'], varargs=None, keywords=None, defaults=(0.0,)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'
))
paddle.fluid.LoDTensor.__init__ 1. __init__(self: paddle.fluid.core.LoDTensor, arg0: List[List[int]]) -> None 2. __init__(self: paddle.fluid.core.LoDTensor) -> None
paddle.fluid.LoDTensor.has_valid_recursive_sequence_lengths has_valid_recursive_sequence_lengths(self: paddle.fluid.core.LoDTensor) -> bool
paddle.fluid.LoDTensor.lod lod(self: paddle.fluid.core.LoDTensor) -> List[List[int]]
...
...
@@ -483,38 +483,38 @@ paddle.fluid.LoDTensorArray.append append(self: paddle.fluid.core.LoDTensorArray
paddle.fluid.CPUPlace.__init__ __init__(self: paddle.fluid.core.CPUPlace) -> None
paddle.fluid.CUDAPlace.__init__ __init__(self: paddle.fluid.core.CUDAPlace, arg0: int) -> None
paddle.fluid.CUDAPinnedPlace.__init__ __init__(self: paddle.fluid.core.CUDAPinnedPlace) -> None
paddle.fluid.ParamAttr.__init__
ArgSpec(args=['self', 'name', 'initializer', 'learning_rate', 'regularizer', 'trainable', 'gradient_clip', 'do_model_average'], varargs=None, keywords=None, defaults=(None, None, 1.0, None, True, None, False
))
paddle.fluid.WeightNormParamAttr.__init__
ArgSpec(args=['self', 'dim', 'name', 'initializer', 'learning_rate', 'regularizer', 'trainable', 'gradient_clip', 'do_model_average'], varargs=None, keywords=None, defaults=(None, None, None, 1.0, None, True, None, False
))
paddle.fluid.DataFeeder.__init__
ArgSpec(args=['self', 'feed_list', 'place', 'program'], varargs=None, keywords=None, defaults=(None,
))
paddle.fluid.DataFeeder.decorate_reader
ArgSpec(args=['self', 'reader', 'multi_devices', 'num_places', 'drop_last'], varargs=None, keywords=None, defaults=(None, True
))
paddle.fluid.DataFeeder.feed
ArgSpec(args=['self', 'iterable'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.DataFeeder.feed_parallel
ArgSpec(args=['self', 'iterable', 'num_places'], varargs=None, keywords=None, defaults=(None,
))
paddle.fluid.clip.ErrorClipByValue.__init__
ArgSpec(args=['self', 'max', 'min'], varargs=None, keywords=None, defaults=(None,
))
paddle.fluid.clip.GradientClipByValue.__init__
ArgSpec(args=['self', 'max', 'min'], varargs=None, keywords=None, defaults=(None,
))
paddle.fluid.clip.GradientClipByNorm.__init__
ArgSpec(args=['self', 'clip_norm'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.clip.GradientClipByGlobalNorm.__init__
ArgSpec(args=['self', 'clip_norm', 'group_name'], varargs=None, keywords=None, defaults=('default_group',
))
paddle.fluid.profiler.cuda_profiler
ArgSpec(args=['output_file', 'output_mode', 'config'], varargs=None, keywords=None, defaults=(None, None
))
paddle.fluid.profiler.reset_profiler
ArgSpec(args=[], varargs=None, keywords=None, defaults=None
)
paddle.fluid.profiler.profiler
ArgSpec(args=['state', 'sorted_key', 'profile_path'], varargs=None, keywords=None, defaults=(None, '/tmp/profile
'))
paddle.fluid.profiler.start_profiler
ArgSpec(args=['state'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.profiler.stop_profiler
ArgSpec(args=['sorted_key', 'profile_path'], varargs=None, keywords=None, defaults=(None, '/tmp/profile
'))
paddle.fluid.unique_name.generate
ArgSpec(args=['key'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.unique_name.switch
ArgSpec(args=['new_generator'], varargs=None, keywords=None, defaults=(None,
))
paddle.fluid.unique_name.guard
ArgSpec(args=['new_generator'], varargs=None, keywords=None, defaults=(None,
))
paddle.fluid.recordio_writer.convert_reader_to_recordio_file
ArgSpec(args=['filename', 'reader_creator', 'feeder', 'compressor', 'max_num_records', 'feed_order'], varargs=None, keywords=None, defaults=(Compressor.Snappy, 1000, None
))
paddle.fluid.recordio_writer.convert_reader_to_recordio_files
ArgSpec(args=['filename', 'batch_per_file', 'reader_creator', 'feeder', 'compressor', 'max_num_records', 'feed_order'], varargs=None, keywords=None, defaults=(Compressor.Snappy, 1000, None
))
paddle.fluid.ParamAttr.__init__
(ArgSpec(args=['self', 'name', 'initializer', 'learning_rate', 'regularizer', 'trainable', 'gradient_clip', 'do_model_average'], varargs=None, keywords=None, defaults=(None, None, 1.0, None, True, None, False)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'
))
paddle.fluid.WeightNormParamAttr.__init__
(ArgSpec(args=['self', 'dim', 'name', 'initializer', 'learning_rate', 'regularizer', 'trainable', 'gradient_clip', 'do_model_average'], varargs=None, keywords=None, defaults=(None, None, None, 1.0, None, True, None, False)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'
))
paddle.fluid.DataFeeder.__init__
(ArgSpec(args=['self', 'feed_list', 'place', 'program'], varargs=None, keywords=None, defaults=(None,)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'
))
paddle.fluid.DataFeeder.decorate_reader
(ArgSpec(args=['self', 'reader', 'multi_devices', 'num_places', 'drop_last'], varargs=None, keywords=None, defaults=(None, True)), ('document', '0eed2f198dc73c08a41b61edbc755753'
))
paddle.fluid.DataFeeder.feed
(ArgSpec(args=['self', 'iterable'], varargs=None, keywords=None, defaults=None), ('document', '459e316301279dfd82001b46f0b8ffca')
)
paddle.fluid.DataFeeder.feed_parallel
(ArgSpec(args=['self', 'iterable', 'num_places'], varargs=None, keywords=None, defaults=(None,)), ('document', '543863d1f9d4853758adb613b8659e85'
))
paddle.fluid.clip.ErrorClipByValue.__init__
(ArgSpec(args=['self', 'max', 'min'], varargs=None, keywords=None, defaults=(None,)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'
))
paddle.fluid.clip.GradientClipByValue.__init__
(ArgSpec(args=['self', 'max', 'min'], varargs=None, keywords=None, defaults=(None,)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'
))
paddle.fluid.clip.GradientClipByNorm.__init__
(ArgSpec(args=['self', 'clip_norm'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')
)
paddle.fluid.clip.GradientClipByGlobalNorm.__init__
(ArgSpec(args=['self', 'clip_norm', 'group_name'], varargs=None, keywords=None, defaults=('default_group',)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'
))
paddle.fluid.profiler.cuda_profiler
(ArgSpec(args=['output_file', 'output_mode', 'config'], varargs=None, keywords=None, defaults=(None, None)), ('document', '2e2fb1cfc469a67f19fb578a2ed6be79'
))
paddle.fluid.profiler.reset_profiler
(ArgSpec(args=[], varargs=None, keywords=None, defaults=None), ('document', '397ce757fabbe5c622e0c3458c41fcd0')
)
paddle.fluid.profiler.profiler
(ArgSpec(args=['state', 'sorted_key', 'profile_path'], varargs=None, keywords=None, defaults=(None, '/tmp/profile')), ('document', 'bd3a07eeb68e384f4d2d416cb2e28d86
'))
paddle.fluid.profiler.start_profiler
(ArgSpec(args=['state'], varargs=None, keywords=None, defaults=None), ('document', '88da8fb6dbebaee2f7520188a09574f9')
)
paddle.fluid.profiler.stop_profiler
(ArgSpec(args=['sorted_key', 'profile_path'], varargs=None, keywords=None, defaults=(None, '/tmp/profile')), ('document', 'a7500e39dd033f1e64f562e909333a8a
'))
paddle.fluid.unique_name.generate
(ArgSpec(args=['key'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')
)
paddle.fluid.unique_name.switch
(ArgSpec(args=['new_generator'], varargs=None, keywords=None, defaults=(None,)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'
))
paddle.fluid.unique_name.guard
(ArgSpec(args=['new_generator'], varargs=None, keywords=None, defaults=(None,)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'
))
paddle.fluid.recordio_writer.convert_reader_to_recordio_file
(ArgSpec(args=['filename', 'reader_creator', 'feeder', 'compressor', 'max_num_records', 'feed_order'], varargs=None, keywords=None, defaults=(Compressor.Snappy, 1000, None)), ('document', '65c7523e86f0c50bb729b01667f36310'
))
paddle.fluid.recordio_writer.convert_reader_to_recordio_files
(ArgSpec(args=['filename', 'batch_per_file', 'reader_creator', 'feeder', 'compressor', 'max_num_records', 'feed_order'], varargs=None, keywords=None, defaults=(Compressor.Snappy, 1000, None)), ('document', 'bc643f0f5f1b9db57ff0d8a57d379bd7'
))
paddle.fluid.Scope Scope() -> paddle.fluid.core._Scope
paddle.reader.map_readers
ArgSpec(args=['func'], varargs='readers', keywords=None, defaults=None
)
paddle.reader.buffered
ArgSpec(args=['reader', 'size'], varargs=None, keywords=None, defaults=None
)
paddle.reader.compose
ArgSpec(args=[], varargs='readers', keywords='kwargs', defaults=None
)
paddle.reader.chain
ArgSpec(args=[], varargs='readers', keywords=None, defaults=None
)
paddle.reader.shuffle
ArgSpec(args=['reader', 'buf_size'], varargs=None, keywords=None, defaults=None
)
paddle.reader.firstn
ArgSpec(args=['reader', 'n'], varargs=None, keywords=None, defaults=None
)
paddle.reader.xmap_readers
ArgSpec(args=['mapper', 'reader', 'process_num', 'buffer_size', 'order'], varargs=None, keywords=None, defaults=(False,
))
paddle.reader.PipeReader.__init__
ArgSpec(args=['self', 'command', 'bufsize', 'file_type'], varargs=None, keywords=None, defaults=(8192, 'plain
'))
paddle.reader.PipeReader.get_line
ArgSpec(args=['self', 'cut_lines', 'line_break'], varargs=None, keywords=None, defaults=(True, '\n
'))
paddle.reader.multiprocess_reader
ArgSpec(args=['readers', 'use_pipe', 'queue_size'], varargs=None, keywords=None, defaults=(True, 1000
))
paddle.reader.Fake.__init__
ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None
)
paddle.reader.creator.np_array
ArgSpec(args=['x'], varargs=None, keywords=None, defaults=None
)
paddle.reader.creator.text_file
ArgSpec(args=['path'], varargs=None, keywords=None, defaults=None
)
paddle.reader.creator.recordio
ArgSpec(args=['paths', 'buf_size'], varargs=None, keywords=None, defaults=(100,
))
paddle.reader.map_readers
(ArgSpec(args=['func'], varargs='readers', keywords=None, defaults=None), ('document', '77cbadb09df588e21e5cc0819b69c87d')
)
paddle.reader.buffered
(ArgSpec(args=['reader', 'size'], varargs=None, keywords=None, defaults=None), ('document', '0d6186f109feceb99f60ec50a0a624cb')
)
paddle.reader.compose
(ArgSpec(args=[], varargs='readers', keywords='kwargs', defaults=None), ('document', '884291104e1c3f37f33aae44b7deeb0d')
)
paddle.reader.chain
(ArgSpec(args=[], varargs='readers', keywords=None, defaults=None), ('document', 'd22c34e379a53901ae67a6bca7f4def4')
)
paddle.reader.shuffle
(ArgSpec(args=['reader', 'buf_size'], varargs=None, keywords=None, defaults=None), ('document', 'e42ea6fee23ce26b23cb142cd1d6522d')
)
paddle.reader.firstn
(ArgSpec(args=['reader', 'n'], varargs=None, keywords=None, defaults=None), ('document', 'c5bb8f7dd4f917f1569a368aab5b8aad')
)
paddle.reader.xmap_readers
(ArgSpec(args=['mapper', 'reader', 'process_num', 'buffer_size', 'order'], varargs=None, keywords=None, defaults=(False,)), ('document', '283bc0b8a0e26ae186b8b9bee4aec560'
))
paddle.reader.PipeReader.__init__
(ArgSpec(args=['self', 'command', 'bufsize', 'file_type'], varargs=None, keywords=None, defaults=(8192, 'plain')), ('document', '6adf97f83acf6453d4a6a4b1070f3754
'))
paddle.reader.PipeReader.get_line
(ArgSpec(args=['self', 'cut_lines', 'line_break'], varargs=None, keywords=None, defaults=(True, '\n')), ('document', '5f80a7ed70052f01665e4c74acccfa69
'))
paddle.reader.multiprocess_reader
(ArgSpec(args=['readers', 'use_pipe', 'queue_size'], varargs=None, keywords=None, defaults=(True, 1000)), ('document', '7d8b3a96e592107c893d5d51ce968ba0'
))
paddle.reader.Fake.__init__
(ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')
)
paddle.reader.creator.np_array
(ArgSpec(args=['x'], varargs=None, keywords=None, defaults=None), ('document', '28d457fbc9a71efa4ac91a3be179cada')
)
paddle.reader.creator.text_file
(ArgSpec(args=['path'], varargs=None, keywords=None, defaults=None), ('document', '44fe286ab6175a5464d3a961a68c266a')
)
paddle.reader.creator.recordio
(ArgSpec(args=['paths', 'buf_size'], varargs=None, keywords=None, defaults=(100,)), ('document', '11b3704ea42cfd537953387a7e58dae8'
))
paddle/fluid/framework/CMakeLists.txt
浏览文件 @
d8a939d8
...
...
@@ -38,10 +38,10 @@ if(WITH_GPU)
nv_library
(
tensor SRCS tensor.cc .tensor_util.cu DEPS place memory data_type device_context
)
add_dependencies
(
tensor tensor_util
)
else
()
nv_library
(
tensor SRCS tensor.cc tensor_util.cu DEPS place memory data_type device_context
)
nv_library
(
tensor SRCS tensor.cc tensor_util.cu DEPS place memory data_type device_context
profiler
)
endif
(
WIN32
)
else
()
cc_library
(
tensor SRCS tensor.cc tensor_util.cc DEPS place memory data_type device_context
)
cc_library
(
tensor SRCS tensor.cc tensor_util.cc DEPS place memory data_type device_context
profiler
)
endif
()
cc_test
(
tensor_test SRCS tensor_test.cc DEPS tensor
)
...
...
paddle/fluid/framework/details/fast_threaded_ssa_graph_executor.cc
浏览文件 @
d8a939d8
...
...
@@ -12,7 +12,9 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/framework/details/fast_threaded_ssa_graph_executor.h"
#include <memory>
#include <string>
#include <unordered_map>
#include <vector>
#include "paddle/fluid/framework/details/fetch_op_handle.h"
#include "paddle/fluid/framework/details/multi_devices_helper.h"
...
...
@@ -55,7 +57,7 @@ FeedFetchList FastThreadedSSAGraphExecutor::Run(
std
::
vector
<
FetchOpHandle
*>
fetch_ops
;
for
(
auto
&
fetch_var_name
:
fetch_tensors
)
{
for
(
auto
&
var_map
:
graph_
->
Get
<
details
::
GraphVars
>
(
"vars"
))
{
for
(
auto
&
var_map
:
graph_
->
Get
<
details
::
GraphVars
>
(
details
::
kGraphVars
))
{
auto
it
=
var_map
.
find
(
fetch_var_name
);
if
(
it
!=
var_map
.
end
())
{
fetched_vars
[
fetch_var_name
].
push_back
(
*
it
->
second
.
rbegin
());
...
...
paddle/fluid/framework/tensor_util.cc
浏览文件 @
d8a939d8
...
...
@@ -14,8 +14,11 @@
#include "paddle/fluid/framework/tensor_util.h"
#include <algorithm>
#include <limits>
#include <memory>
#include <utility>
#include <vector>
#include "paddle/fluid/framework/data_type.h"
#include "paddle/fluid/platform/profiler.h"
namespace
paddle
{
namespace
framework
{
...
...
@@ -135,16 +138,19 @@ void TensorCopySync(const Tensor& src, const platform::Place& dst_place,
#ifdef PADDLE_WITH_CUDA
else
if
(
platform
::
is_gpu_place
(
src_place
)
&&
// NOLINT
platform
::
is_cpu_place
(
dst_place
))
{
platform
::
RecordEvent
record_event
(
"TensorCopy:GPU->CPU"
);
auto
src_gpu_place
=
boost
::
get
<
platform
::
CUDAPlace
>
(
src_place
);
auto
dst_cpu_place
=
boost
::
get
<
platform
::
CPUPlace
>
(
dst_place
);
memory
::
Copy
(
dst_cpu_place
,
dst_ptr
,
src_gpu_place
,
src_ptr
,
size
,
nullptr
);
}
else
if
(
platform
::
is_cpu_place
(
src_place
)
&&
platform
::
is_gpu_place
(
dst_place
))
{
platform
::
RecordEvent
record_event
(
"TensorCopy:CPU->GPU"
);
auto
src_cpu_place
=
boost
::
get
<
platform
::
CPUPlace
>
(
src_place
);
auto
dst_gpu_place
=
boost
::
get
<
platform
::
CUDAPlace
>
(
dst_place
);
memory
::
Copy
(
dst_gpu_place
,
dst_ptr
,
src_cpu_place
,
src_ptr
,
size
,
nullptr
);
}
else
if
(
platform
::
is_gpu_place
(
src_place
)
&&
platform
::
is_gpu_place
(
dst_place
))
{
platform
::
RecordEvent
record_event
(
"TensorCopy:GPU->GPU"
);
if
(
src_ptr
==
dst_ptr
&&
platform
::
is_same_place
(
src_place
,
dst_place
))
{
VLOG
(
3
)
<<
"Skip copy the same data from "
<<
src_place
<<
" to "
<<
dst_place
;
...
...
@@ -155,6 +161,7 @@ void TensorCopySync(const Tensor& src, const platform::Place& dst_place,
memory
::
Copy
(
dst_gpu_place
,
dst_ptr
,
src_gpu_place
,
src_ptr
,
size
,
nullptr
);
}
else
if
(
platform
::
is_cuda_pinned_place
(
src_place
)
&&
platform
::
is_gpu_place
(
dst_place
))
{
platform
::
RecordEvent
record_event
(
"TensorCopy:CUDAPinned->GPU"
);
auto
src_pinned_place
=
boost
::
get
<
platform
::
CUDAPinnedPlace
>
(
src_place
);
auto
dst_gpu_place
=
boost
::
get
<
platform
::
CUDAPlace
>
(
dst_place
);
memory
::
Copy
(
dst_gpu_place
,
dst_ptr
,
src_pinned_place
,
src_ptr
,
size
,
...
...
paddle/fluid/memory/CMakeLists.txt
浏览文件 @
d8a939d8
add_subdirectory
(
detail
)
add_subdirectory
(
allocation
)
cc_library
(
malloc SRCS malloc.cc DEPS place enforce allocator_facade
)
cc_library
(
malloc SRCS malloc.cc DEPS place enforce allocator_facade
profiler
)
cc_library
(
memcpy SRCS memcpy.cc DEPS place
)
cc_library
(
memory
...
...
paddle/fluid/memory/memcpy.cc
浏览文件 @
d8a939d8
...
...
@@ -15,6 +15,7 @@ limitations under the License. */
#include "paddle/fluid/memory/memcpy.h"
#include <cstring> // for memcpy
#include "paddle/fluid/platform/profiler.h"
namespace
paddle
{
namespace
memory
{
...
...
@@ -29,14 +30,23 @@ void Copy<platform::CPUPlace, platform::CPUPlace>(platform::CPUPlace, void* dst,
#ifdef PADDLE_WITH_CUDA
static
constexpr
size_t
kMaxGpuAsyncCopyBytes
=
64
*
1024
;
// 64K
// NOTE(zcd): Do not use GpuMemcpySync as much as possible.
// because GpuMemcpySync issues the copying command to the default stream,
// which will make two commands from different streams cannot run concurrently.
// Reference:
// https://devblogs.nvidia.com/gpu-pro-tip-cuda-7-streams-simplify-concurrency/
template
<
>
void
Copy
<
platform
::
CPUPlace
,
platform
::
CUDAPlace
>
(
platform
::
CPUPlace
dst_place
,
void
*
dst
,
platform
::
CUDAPlace
src_place
,
const
void
*
src
,
size_t
num
,
cudaStream_t
stream
)
{
platform
::
SetDeviceId
(
src_place
.
device
);
if
(
stream
)
{
platform
::
RecordEvent
record_event
(
"GpuMemcpyAsync:GPU->CPU"
);
platform
::
GpuMemcpyAsync
(
dst
,
src
,
num
,
cudaMemcpyDeviceToHost
,
stream
);
}
else
{
platform
::
RecordEvent
record_event
(
"GpuMemcpySync:GPU->CPU"
);
platform
::
GpuMemcpySync
(
dst
,
src
,
num
,
cudaMemcpyDeviceToHost
);
// FIXME(zjl): do we really need it?
if
(
num
<=
kMaxGpuAsyncCopyBytes
)
{
...
...
@@ -51,8 +61,10 @@ void Copy<platform::CUDAPlace, platform::CPUPlace>(
const
void
*
src
,
size_t
num
,
cudaStream_t
stream
)
{
platform
::
SetDeviceId
(
dst_place
.
device
);
if
(
stream
)
{
platform
::
RecordEvent
record_event
(
"GpuMemcpyAsync:CPU->GPU"
);
platform
::
GpuMemcpyAsync
(
dst
,
src
,
num
,
cudaMemcpyHostToDevice
,
stream
);
}
else
{
platform
::
RecordEvent
record_event
(
"GpuMemcpySync:CPU->GPU"
);
platform
::
GpuMemcpySync
(
dst
,
src
,
num
,
cudaMemcpyHostToDevice
);
// FIXME(zjl): do we really need it?
if
(
num
<=
kMaxGpuAsyncCopyBytes
)
{
...
...
@@ -68,15 +80,19 @@ void Copy<platform::CUDAPlace, platform::CUDAPlace>(
if
(
dst_place
==
src_place
)
{
platform
::
SetDeviceId
(
src_place
.
device
);
if
(
stream
)
{
platform
::
RecordEvent
record_event
(
"GpuMemcpyAsync(same_gpu):GPU->GPU"
);
platform
::
GpuMemcpyAsync
(
dst
,
src
,
num
,
cudaMemcpyDeviceToDevice
,
stream
);
}
else
{
platform
::
RecordEvent
record_event
(
"GpuMemcpySync(same_gpu):GPU->GPU"
);
platform
::
GpuMemcpySync
(
dst
,
src
,
num
,
cudaMemcpyDeviceToDevice
);
}
}
else
{
if
(
stream
)
{
platform
::
RecordEvent
record_event
(
"GpuMemcpyPeerAsync:GPU->GPU"
);
platform
::
GpuMemcpyPeerAsync
(
dst
,
dst_place
.
device
,
src
,
src_place
.
device
,
num
,
stream
);
}
else
{
platform
::
RecordEvent
record_event
(
"GpuMemcpyPeerSync:GPU->GPU"
);
platform
::
GpuMemcpyPeerSync
(
dst
,
dst_place
.
device
,
src
,
src_place
.
device
,
num
);
}
...
...
@@ -111,8 +127,10 @@ void Copy<platform::CUDAPinnedPlace, platform::CUDAPlace>(
cudaStream_t
stream
)
{
platform
::
SetDeviceId
(
src_place
.
device
);
if
(
stream
)
{
platform
::
RecordEvent
record_event
(
"GpuMemcpyAsync:GPU->CUDAPinned"
);
platform
::
GpuMemcpyAsync
(
dst
,
src
,
num
,
cudaMemcpyDeviceToHost
,
stream
);
}
else
{
platform
::
RecordEvent
record_event
(
"GpuMemcpySync:GPU->CUDAPinned"
);
platform
::
GpuMemcpySync
(
dst
,
src
,
num
,
cudaMemcpyDeviceToHost
);
}
}
...
...
@@ -124,8 +142,10 @@ void Copy<platform::CUDAPlace, platform::CUDAPinnedPlace>(
cudaStream_t
stream
)
{
platform
::
SetDeviceId
(
dst_place
.
device
);
if
(
stream
)
{
platform
::
RecordEvent
record_event
(
"GpuMemcpyAsync:CUDAPinned->GPU"
);
platform
::
GpuMemcpyAsync
(
dst
,
src
,
num
,
cudaMemcpyHostToDevice
,
stream
);
}
else
{
platform
::
RecordEvent
record_event
(
"GpuMemcpySync:CUDAPinned->GPU"
);
platform
::
GpuMemcpySync
(
dst
,
src
,
num
,
cudaMemcpyHostToDevice
);
}
}
...
...
paddle/fluid/operators/ngraph/ngraph_bridge.cc
浏览文件 @
d8a939d8
...
...
@@ -14,6 +14,7 @@ limitations under the License. */
#include <algorithm>
#include <functional>
#include <memory>
#include <vector>
#include "ngraph/ngraph.hpp"
...
...
paddle/fluid/operators/ngraph/ngraph_bridge.h
浏览文件 @
d8a939d8
...
...
@@ -16,6 +16,7 @@ limitations under the License. */
#include <algorithm>
#include <map>
#include <memory>
#include <string>
#include <unordered_map>
...
...
paddle/fluid/operators/ngraph/ops/accuracy_op.h
浏览文件 @
d8a939d8
...
...
@@ -14,7 +14,9 @@ limitations under the License. */
#pragma once
#include <memory>
#include <string>
#include <unordered_map>
#include <vector>
#include "ngraph/ngraph.hpp"
#include "paddle/fluid/operators/ngraph/ops/op_bridge.h"
...
...
paddle/fluid/operators/ngraph/ops/activation_op.h
浏览文件 @
d8a939d8
...
...
@@ -14,7 +14,9 @@ limitations under the License. */
#pragma once
#include <memory>
#include <string>
#include <unordered_map>
#include "ngraph/ngraph.hpp"
#include "paddle/fluid/operators/ngraph/ops/op_bridge.h"
...
...
paddle/fluid/operators/ngraph/ops/batch_norm_op.h
浏览文件 @
d8a939d8
...
...
@@ -14,7 +14,9 @@ limitations under the License. */
#pragma once
#include <memory>
#include <string>
#include <unordered_map>
#include <vector>
#include "ngraph/ngraph.hpp"
...
...
paddle/fluid/operators/ngraph/ops/binary_unary_op.h
浏览文件 @
d8a939d8
...
...
@@ -14,7 +14,9 @@ limitations under the License. */
#pragma once
#include <memory>
#include <string>
#include <unordered_map>
#include "ngraph/ngraph.hpp"
#include "paddle/fluid/operators/ngraph/ops/op_bridge.h"
#include "paddle/fluid/platform/ngraph_helper.h"
...
...
paddle/fluid/operators/ngraph/ops/conv2d_op.h
浏览文件 @
d8a939d8
...
...
@@ -14,7 +14,9 @@ limitations under the License. */
#pragma once
#include <memory>
#include <string>
#include <unordered_map>
#include <vector>
#include "ngraph/ngraph.hpp"
#include "paddle/fluid/operators/ngraph/ops/op_bridge.h"
...
...
paddle/fluid/operators/ngraph/ops/cross_entropy_op.h
浏览文件 @
d8a939d8
...
...
@@ -15,7 +15,9 @@ limitations under the License. */
#pragma once
#include <functional>
#include <memory>
#include <string>
#include <unordered_map>
#include "ngraph/ngraph.hpp"
#include "paddle/fluid/operators/ngraph/ops/op_bridge.h"
...
...
paddle/fluid/operators/ngraph/ops/elementwise_add_op.h
浏览文件 @
d8a939d8
...
...
@@ -14,7 +14,9 @@ limitations under the License. */
#pragma once
#include <memory>
#include <string>
#include <unordered_map>
#include <vector>
#include "ngraph/ngraph.hpp"
...
...
paddle/fluid/operators/ngraph/ops/fill_constant_op.h
浏览文件 @
d8a939d8
...
...
@@ -14,7 +14,9 @@ limitations under the License. */
#pragma once
#include <memory>
#include <string>
#include <unordered_map>
#include <vector>
#include "ngraph/ngraph.hpp"
#include "paddle/fluid/operators/ngraph/ops/op_bridge.h"
...
...
paddle/fluid/operators/ngraph/ops/mean_op.h
浏览文件 @
d8a939d8
...
...
@@ -15,7 +15,9 @@ limitations under the License. */
#pragma once
#include <functional>
#include <memory>
#include <string>
#include <unordered_map>
#include "ngraph/ngraph.hpp"
#include "paddle/fluid/operators/ngraph/ops/elementwise_scalar_op.h"
...
...
paddle/fluid/operators/ngraph/ops/momentum_op.h
浏览文件 @
d8a939d8
...
...
@@ -14,7 +14,9 @@ limitations under the License. */
#pragma once
#include <memory>
#include <string>
#include <unordered_map>
#include <vector>
#include "ngraph/ngraph.hpp"
#include "paddle/fluid/operators/ngraph/ops/op_bridge.h"
...
...
paddle/fluid/operators/ngraph/ops/mul_op.h
浏览文件 @
d8a939d8
...
...
@@ -14,7 +14,9 @@ limitations under the License. */
#pragma once
#include <memory>
#include <string>
#include <unordered_map>
#include "ngraph/ngraph.hpp"
#include "paddle/fluid/operators/ngraph/ops/op_bridge.h"
#include "paddle/fluid/platform/ngraph_helper.h"
...
...
paddle/fluid/operators/ngraph/ops/pool2d_op.h
浏览文件 @
d8a939d8
...
...
@@ -14,7 +14,9 @@ limitations under the License. */
#pragma once
#include <memory>
#include <string>
#include <unordered_map>
#include <vector>
#include "ngraph/ngraph.hpp"
...
...
paddle/fluid/operators/ngraph/ops/scale_op.h
浏览文件 @
d8a939d8
...
...
@@ -14,7 +14,9 @@ limitations under the License. */
#pragma once
#include <memory>
#include <string>
#include <unordered_map>
#include "ngraph/ngraph.hpp"
#include "paddle/fluid/operators/ngraph/ops/elementwise_scalar_op.h"
#include "paddle/fluid/operators/ngraph/ops/op_bridge.h"
...
...
paddle/fluid/operators/ngraph/ops/softmax_op.h
浏览文件 @
d8a939d8
...
...
@@ -14,7 +14,9 @@ limitations under the License. */
#pragma once
#include <memory>
#include <string>
#include <unordered_map>
#include <vector>
#include "ngraph/ngraph.hpp"
#include "paddle/fluid/operators/ngraph/ops/elementwise_scalar_op.h"
...
...
paddle/fluid/operators/ngraph/ops/top_k_op.h
浏览文件 @
d8a939d8
...
...
@@ -14,7 +14,9 @@ limitations under the License. */
#pragma once
#include <memory>
#include <string>
#include <unordered_map>
#include "ngraph/ngraph.hpp"
#include "paddle/fluid/operators/ngraph/ops/op_bridge.h"
#include "paddle/fluid/platform/ngraph_helper.h"
...
...
paddle/fluid/operators/reader/buffered_reader.cc
浏览文件 @
d8a939d8
...
...
@@ -13,9 +13,11 @@
// limitations under the License.
#include "paddle/fluid/operators/reader/buffered_reader.h"
#include <memory>
#include <vector>
#include "paddle/fluid/framework/data_type.h"
#include "paddle/fluid/platform/profiler.h"
namespace
paddle
{
namespace
operators
{
namespace
reader
{
...
...
@@ -49,9 +51,10 @@ BufferedReader::BufferedReader(
.
Get
(
place_
)))
->
stream
();
events
.
resize
(
buffer_size
);
for
(
auto
&
event
:
events
)
PADDLE_ENFORCE
(
cudaStreamCreate
(
&
stream
));
for
(
auto
&
event
:
events
)
{
PADDLE_ENFORCE
(
cudaEventCreateWithFlags
(
&
event
,
cudaEventDisableTiming
));
PADDLE_ENFORCE
(
cudaStreamCreateWithFlags
(
&
stream
,
cudaStreamNonBlocking
));
}
}
#endif
cpu_buffer_
.
resize
(
buffer_size
);
...
...
@@ -83,12 +86,15 @@ void BufferedReader::ReadAsync(size_t i) {
#ifdef PADDLE_WITH_CUDA
// NOTE(liangdun): using async copy instead of TensorCopySync
// TensorCopySync would block other stream
// TensorCopySync would block other stream, because TensorCopySync
// issues the copying command to the default stream, it will make two
// commands from different streams cannot run concurrently.
if
(
platform
::
is_gpu_place
(
place_
))
{
platform
::
SetDeviceId
(
boost
::
get
<
platform
::
CUDAPlace
>
(
place_
).
device
);
PADDLE_ENFORCE
(
cudaStreamWaitEvent
(
stream
,
events
[
i
],
0
));
TensorVec
&
gpu
=
gpu_buffer_
[
i
];
gpu
.
resize
(
cpu
.
size
());
platform
::
RecordEvent
record_event
(
"BufferedReader:MemoryCopy"
);
for
(
size_t
i
=
0
;
i
<
cpu
.
size
();
++
i
)
{
gpu
[
i
].
Resize
(
cpu
[
i
].
dims
());
gpu
[
i
].
set_layout
(
cpu
[
i
].
layout
());
...
...
@@ -97,20 +103,19 @@ void BufferedReader::ReadAsync(size_t i) {
auto
gpu_ptr
=
gpu
[
i
].
mutable_data
(
place_
,
cpu
[
i
].
type
());
auto
size
=
cpu
[
i
].
numel
()
*
paddle
::
framework
::
SizeOfType
(
cpu
[
i
].
type
());
if
(
platform
::
is_cuda_pinned_place
(
cpu_place
))
if
(
platform
::
is_cuda_pinned_place
(
cpu_place
))
{
memory
::
Copy
(
boost
::
get
<
platform
::
CUDAPlace
>
(
place_
),
gpu_ptr
,
boost
::
get
<
platform
::
CUDAPinnedPlace
>
(
cpu_place
),
cpu_ptr
,
size
,
stream
);
else
if
((
platform
::
is_gpu_place
(
cpu_place
)))
}
else
if
((
platform
::
is_gpu_place
(
cpu_place
)))
{
memory
::
Copy
(
boost
::
get
<
platform
::
CUDAPlace
>
(
place_
),
gpu_ptr
,
boost
::
get
<
platform
::
CUDAPlace
>
(
cpu_place
),
cpu_ptr
,
size
,
stream
);
else
// if cpu place is not pinned, async copy is slower than sync copy,
// so we use sync copy instead.
}
else
{
memory
::
Copy
(
boost
::
get
<
platform
::
CUDAPlace
>
(
place_
),
gpu_ptr
,
boost
::
get
<
platform
::
CPUPlace
>
(
cpu_place
),
cpu_ptr
,
size
,
0
);
stream
);
}
gpu
[
i
].
set_lod
(
cpu
[
i
].
lod
());
}
PADDLE_ENFORCE
(
cudaStreamSynchronize
(
stream
));
...
...
paddle/fluid/platform/device_tracer.cc
浏览文件 @
d8a939d8
...
...
@@ -30,7 +30,6 @@ limitations under the License. */
#include "glog/logging.h"
#include "google/protobuf/text_format.h"
#include "paddle/fluid/framework/block_desc.h"
#include "paddle/fluid/platform/profiler.h"
#include "paddle/fluid/string/printf.h"
namespace
paddle
{
...
...
@@ -222,19 +221,24 @@ void CUPTIAPI bufferCompleted(CUcontext ctx, uint32_t streamId, uint8_t *buffer,
}
case
CUPTI_ACTIVITY_KIND_DRIVER
:
{
auto
*
api
=
reinterpret_cast
<
const
CUpti_ActivityAPI
*>
(
record
);
if
(
api
->
start
!=
0
&&
api
->
end
!=
0
)
// -1 device id represents
CUDA
api call
tracer
->
Add
CPU
Records
(
if
(
api
->
start
!=
0
&&
api
->
end
!=
0
)
{
// -1 device id represents
ActiveKind
api call
tracer
->
Add
ActiveKind
Records
(
DriverKind
(
api
->
cbid
),
api
->
start
,
api
->
end
,
-
1
,
GetThreadIdFromSystemThreadId
(
api
->
threadId
));
GetThreadIdFromSystemThreadId
(
api
->
threadId
),
api
->
correlationId
);
}
break
;
}
case
CUPTI_ACTIVITY_KIND_RUNTIME
:
{
auto
*
api
=
reinterpret_cast
<
const
CUpti_ActivityAPI
*>
(
record
);
if
(
api
->
start
!=
0
&&
api
->
end
!=
0
)
tracer
->
AddCPURecords
(
if
(
api
->
start
!=
0
&&
api
->
end
!=
0
)
{
// -1 device id represents ActiveKind api call
tracer
->
AddActiveKindRecords
(
RuntimeKind
(
api
->
cbid
),
api
->
start
,
api
->
end
,
-
1
,
GetThreadIdFromSystemThreadId
(
api
->
threadId
));
GetThreadIdFromSystemThreadId
(
api
->
threadId
),
api
->
correlationId
);
}
break
;
}
default:
{
break
;
}
...
...
@@ -313,6 +317,25 @@ class DeviceTracerImpl : public DeviceTracer {
stream_id
,
correlation_id
,
bytes
});
}
void
AddActiveKindRecords
(
const
std
::
string
&
anno
,
uint64_t
start_ns
,
uint64_t
end_ns
,
int64_t
device_id
,
int64_t
thread_id
,
uint32_t
correlation_id
)
{
if
(
anno
.
empty
())
{
VLOG
(
1
)
<<
"Empty timeline annotation."
;
return
;
}
thread_local
std
::
forward_list
<
ActiveKindRecord
>
*
local_active_kind_records
=
nullptr
;
if
(
local_active_kind_records
==
nullptr
)
{
std
::
lock_guard
<
std
::
mutex
>
l
(
trace_mu_
);
active_kind_records_
.
emplace_front
();
local_active_kind_records
=
&
active_kind_records_
.
front
();
}
// lock is not needed, only one thread call this function.
local_active_kind_records
->
push_front
(
ActiveKindRecord
{
anno
,
start_ns
,
end_ns
,
device_id
,
thread_id
,
correlation_id
});
}
void
AddKernelRecords
(
std
::
string
name
,
uint64_t
start
,
uint64_t
end
,
int64_t
device_id
,
int64_t
stream_id
,
uint32_t
correlation_id
)
{
...
...
@@ -355,6 +378,7 @@ class DeviceTracerImpl : public DeviceTracer {
}
const
std
::
vector
<
int
>
cbids
{
CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy_v3020
,
CUPTI_RUNTIME_TRACE_CBID_cudaSetupArgument_v3020
,
CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyAsync_v3020
,
CUPTI_RUNTIME_TRACE_CBID_cudaMemset_v3020
,
CUPTI_RUNTIME_TRACE_CBID_cudaMemsetAsync_v3020
,
...
...
@@ -385,6 +409,7 @@ class DeviceTracerImpl : public DeviceTracer {
correlations_
.
clear
();
for
(
auto
&
tmp
:
correlations_pairs
)
tmp
.
clear
();
for
(
auto
&
tmp
:
cpu_records_
)
tmp
.
clear
();
for
(
auto
&
tmp
:
active_kind_records_
)
tmp
.
clear
();
}
void
GenEventKernelCudaElapsedTime
()
{
...
...
@@ -437,7 +462,7 @@ class DeviceTracerImpl : public DeviceTracer {
event
->
set_device_id
(
r
.
device_id
);
}
VLOG
(
1
)
<<
"KernelRecord event miss: "
<<
miss
<<
" find: "
<<
find
;
for
(
auto
&
tmp
:
cpu_records_
)
for
(
auto
&
tmp
:
cpu_records_
)
{
for
(
const
CPURecord
&
r
:
tmp
)
{
auto
*
event
=
profile_pb
.
add_events
();
event
->
set_type
(
proto
::
Event
::
CPU
);
...
...
@@ -447,6 +472,24 @@ class DeviceTracerImpl : public DeviceTracer {
event
->
set_sub_device_id
(
r
.
thread_id
);
event
->
set_device_id
(
r
.
device_id
);
}
}
for
(
auto
&
tmp
:
active_kind_records_
)
{
for
(
const
ActiveKindRecord
&
r
:
tmp
)
{
auto
*
event
=
profile_pb
.
add_events
();
event
->
set_type
(
proto
::
Event
::
CPU
);
auto
c
=
correlations_
.
find
(
r
.
correlation_id
);
if
(
c
!=
correlations_
.
end
()
&&
c
->
second
!=
nullptr
)
{
event
->
set_name
(
c
->
second
->
name
());
event
->
set_detail_info
(
r
.
name
);
}
else
{
event
->
set_name
(
r
.
name
);
}
event
->
set_start_ns
(
r
.
start_ns
);
event
->
set_end_ns
(
r
.
end_ns
);
event
->
set_sub_device_id
(
r
.
thread_id
);
event
->
set_device_id
(
r
.
device_id
);
}
}
miss
=
find
=
0
;
for
(
const
MemRecord
&
r
:
mem_records_
)
{
auto
*
event
=
profile_pb
.
add_events
();
...
...
@@ -510,6 +553,7 @@ class DeviceTracerImpl : public DeviceTracer {
std
::
forward_list
<
KernelRecord
>
kernel_records_
;
std
::
forward_list
<
MemRecord
>
mem_records_
;
std
::
forward_list
<
std
::
forward_list
<
CPURecord
>>
cpu_records_
;
std
::
forward_list
<
std
::
forward_list
<
ActiveKindRecord
>>
active_kind_records_
;
std
::
forward_list
<
std
::
forward_list
<
std
::
pair
<
uint32_t
,
Event
*>>>
correlations_pairs
;
std
::
unordered_map
<
uint32_t
,
Event
*>
correlations_
;
...
...
@@ -613,6 +657,7 @@ void initCuptiCbidStr() {
REGISTER_RUNTIME_CBID_STR
(
cudaUnbindTexture_v3020
);
REGISTER_RUNTIME_CBID_STR
(
cudaSetupArgument_v3020
);
REGISTER_RUNTIME_CBID_STR
(
cudaLaunch_v3020
);
REGISTER_RUNTIME_CBID_STR
(
cudaDeviceGetPCIBusId_v4010
);
#if CUDA_VERSION >= 9000
REGISTER_RUNTIME_CBID_STR
(
cudaLaunchCooperativeKernel_v9000
);
REGISTER_RUNTIME_CBID_STR
(
cudaLaunchCooperativeKernelMultiDevice_v9000
);
...
...
paddle/fluid/platform/device_tracer.h
浏览文件 @
d8a939d8
...
...
@@ -63,7 +63,14 @@ class DeviceTracer {
uint32_t
correlation_id
;
uint64_t
bytes
;
};
struct
ActiveKindRecord
{
std
::
string
name
;
uint64_t
start_ns
;
uint64_t
end_ns
;
int64_t
device_id
;
int64_t
thread_id
;
uint32_t
correlation_id
;
};
virtual
~
DeviceTracer
()
{}
// Needs to be called once before use.
virtual
void
Enable
()
=
0
;
...
...
@@ -85,6 +92,10 @@ class DeviceTracer {
virtual
void
AddCPURecords
(
const
std
::
string
&
anno
,
uint64_t
start_ns
,
uint64_t
end_ns
,
int64_t
device_id
,
int64_t
thread_id
)
=
0
;
virtual
void
AddActiveKindRecords
(
const
std
::
string
&
anno
,
uint64_t
start_ns
,
uint64_t
end_ns
,
int64_t
device_id
,
int64_t
thread_id
,
uint32_t
correlation_id
)
=
0
;
// Add a cuda kernel stats. `correlation_id` will be mapped to annotation
// added before for human readability.
...
...
paddle/scripts/paddle_build.sh
浏览文件 @
d8a939d8
...
...
@@ -415,10 +415,11 @@ function assert_api_not_changed() {
source
.env/bin/activate
pip
install
${
PADDLE_ROOT
}
/build/python/dist/
*
whl
python
${
PADDLE_ROOT
}
/tools/print_signatures.py paddle.fluid,paddle.reader
>
new.spec
if
[
"
$1
"
==
"cp35-cp35m"
]
||
[
"
$1
"
==
"cp36-cp36m"
]
||
[
"
$1
"
==
"cp37-cp37m"
]
;
then
# Use sed to make python2 and python3 sepc keeps the same
sed
-i
's/arg0: str/arg0: unicode/g'
new.spec
sed
-i
"s/
\(
.*Transpiler.*
\)
.__init__ ArgSpec(args=
\[
'self'].*/
\1
.__init__ /g"
new.spec
sed
-i
"s/
\(
.*Transpiler.*
\)
.__init__
(
ArgSpec(args=
\[
'self'].*/
\1
.__init__ /g"
new.spec
fi
# ComposeNotAligned has significant difference between py2 and py3
sed
-i
'/.*ComposeNotAligned.*/d'
new.spec
...
...
@@ -452,11 +453,20 @@ function assert_api_spec_approvals() {
echo
"checking
${
API_FILE
}
change, PR:
${
GIT_PR_ID
}
, changes:
${
API_CHANGE
}
"
if
[
${
API_CHANGE
}
]
&&
[
"
${
GIT_PR_ID
}
"
!=
""
]
;
then
# NOTE: per_page=10000 should be ok for all cases, a PR review > 10000 is not human readable.
if
[
"
$API_FILE
"
==
"paddle/fluid/API.spec"
]
;
then
APPROVALS
=
`
curl
-H
"Authorization: token
${
GITHUB_API_TOKEN
}
"
https://api.github.com/repos/PaddlePaddle/Paddle/pulls/
${
GIT_PR_ID
}
/reviews?per_page
=
10000 |
\
python
${
PADDLE_ROOT
}
/tools/check_pr_approval.py 2 2887803 35982308
`
else
APPROVALS
=
`
curl
-H
"Authorization: token
${
GITHUB_API_TOKEN
}
"
https://api.github.com/repos/PaddlePaddle/Paddle/pulls/
${
GIT_PR_ID
}
/reviews?per_page
=
10000 |
\
python
${
PADDLE_ROOT
}
/tools/check_pr_approval.py 1 2887803
`
fi
echo
"current pr
${
GIT_PR_ID
}
got approvals:
${
APPROVALS
}
"
if
[
"
${
APPROVALS
}
"
==
"FALSE"
]
;
then
if
[
"
$API_FILE
"
==
"paddle/fluid/API.spec"
]
;
then
echo
"You must have panyx0718 and shanyi15 approval for the api change!
${
API_FILE
}
"
else
echo
"You must have panyx0718 approval for the api change!
${
API_FILE
}
"
fi
exit
1
fi
fi
...
...
@@ -472,19 +482,6 @@ function assert_api_spec_approvals() {
exit
1
fi
fi
pip
install
${
PADDLE_ROOT
}
/build/opt/paddle/share/wheels/
*
.whl
CHECK_DOCK_MD5
=
`
python
${
PADDLE_ROOT
}
/tools/check_doc_approval.py
`
if
[
"True"
!=
${
CHECK_DOCK_MD5
}
]
;
then
APPROVALS
=
`
curl
-H
"Authorization: token
${
GITHUB_API_TOKEN
}
"
https://api.github.com/repos/PaddlePaddle/Paddle/pulls/
${
GIT_PR_ID
}
/reviews?per_page
=
10000 |
\
python
${
PADDLE_ROOT
}
/tools/check_pr_approval.py 1 35982308
`
echo
"current pr
${
GIT_PR_ID
}
got approvals:
${
APPROVALS
}
"
if
[
"
${
APPROVALS
}
"
==
"FALSE"
]
;
then
echo
"You must have shanyi15 approval for the api doc change! "
exit
1
fi
echo
${
CHECK_DOCK_MD5
}
>
/root/.cache/doc_md5.txt
fi
}
...
...
python/paddle/fluid/compiler.py
浏览文件 @
d8a939d8
...
...
@@ -17,7 +17,6 @@ import os
import
six
import
sys
from
..
import
compat
as
cpt
from
.
import
framework
from
.
import
core
from
.
import
framework
...
...
@@ -36,6 +35,30 @@ def _place_obj(place):
return
p
def
_is_pserver_mode
(
main_program
):
main
=
main_program
if
main_program
\
else
default_main_program
()
for
op
in
main
.
global_block
().
ops
:
if
op
.
type
in
[
"send"
,
"recv"
]:
return
True
return
False
def
get_available_places
(
use_cuda
):
if
use_cuda
:
gpus_env
=
os
.
getenv
(
"FLAGS_selected_gpus"
)
if
gpus_env
:
gpus
=
[
int
(
s
)
for
s
in
gpus_env
.
split
(
","
)]
else
:
gpus
=
[
i
for
i
in
six
.
moves
.
range
(
core
.
get_cuda_device_count
())]
places
=
[
core
.
CUDAPlace
(
i
)
for
i
in
gpus
]
else
:
cpu_num
=
int
(
os
.
environ
.
get
(
'CPU_NUM'
,
multiprocessing
.
cpu_count
()))
places
=
[
core
.
CPUPlace
()
for
_
in
six
.
moves
.
range
(
cpu_num
)]
assert
places
,
"no place for execution"
return
places
class
CompiledProgram
(
object
):
"""
Compiles to Graph for execution.
...
...
@@ -127,8 +150,7 @@ class CompiledProgram(object):
self
.
_exec_strategy
=
ExecutionStrategy
()
if
self
.
_build_strategy
is
None
:
self
.
_build_strategy
=
BuildStrategy
()
self
.
_build_strategy
.
is_distribution
=
framework
.
is_pserver_mode
(
self
.
_program
)
self
.
_build_strategy
.
is_distribution
=
_is_pserver_mode
(
self
.
_program
)
return
self
def
with_inference_optimize
(
self
,
config
):
...
...
@@ -153,9 +175,9 @@ class CompiledProgram(object):
def
_with_distributed
(
self
):
raise
NotImplementedError
()
def
_compile_data_parallel
(
self
):
def
_compile_data_parallel
(
self
,
use_cuda
=
False
,
scope
=
None
):
if
self
.
_share_vars_from
:
if
s
elf
.
_s
cope
:
if
scope
:
sys
.
stderr
.
write
(
"share_vars_from is set, scope is ignored.
\n
"
)
if
not
self
.
_share_vars_from
.
_is_data_parallel
:
raise
ValueError
(
"share_vars_from is not data parallel. Cannot "
...
...
@@ -166,23 +188,11 @@ class CompiledProgram(object):
"var to share."
)
self
.
_local_scopes
=
self
.
_share_vars_from
.
_executor
.
local_scopes
()
else
:
assert
scope
is
not
None
,
""
self
.
_local_scopes
=
[]
self
.
_exec_strategy
.
use_cuda
=
isinstance
(
self
.
_place
,
core
.
CUDAPlace
)
if
self
.
_exec_strategy
.
use_cuda
:
gpus_env
=
os
.
getenv
(
"FLAGS_selected_gpus"
)
if
gpus_env
:
gpus
=
[
int
(
s
)
for
s
in
gpus_env
.
split
(
","
)]
else
:
gpus
=
[
i
for
i
in
six
.
moves
.
range
(
core
.
get_cuda_device_count
())
]
self
.
_places
=
[
core
.
CUDAPlace
(
i
)
for
i
in
gpus
]
else
:
cpu_num
=
int
(
os
.
environ
.
get
(
'CPU_NUM'
,
multiprocessing
.
cpu_count
()))
self
.
_places
=
[
core
.
CPUPlace
()
for
_
in
six
.
moves
.
range
(
cpu_num
)]
assert
self
.
_places
,
"no place for execution"
self
.
_exec_strategy
.
use_cuda
=
use_cuda
self
.
_places
=
get_available_places
(
self
.
_exec_strategy
.
use_cuda
)
if
self
.
_exec_strategy
.
num_threads
==
0
:
if
self
.
_exec_strategy
.
use_cuda
:
...
...
@@ -197,9 +207,11 @@ class CompiledProgram(object):
# FIXME(dzhwinter): enable_inplace should be after memory_optimize
# if turn on python memory optimize, turn off the inplace_pass.
if
self
.
_build_strategy
.
memory_optimize
is
None
:
self
.
_build_strategy
.
memory_optimize
=
False
if
self
.
_program
and
self
.
_program
.
_is_mem_optimized
else
True
self
.
_build_strategy
.
memory_optimize
=
False
\
if
self
.
_program
and
self
.
_program
.
_is_mem_optimized
else
True
if
self
.
_build_strategy
.
enable_inplace
is
None
:
self
.
_build_strategy
.
enable_inplace
=
False
if
self
.
_program
and
self
.
_program
.
_is_mem_optimized
else
True
self
.
_build_strategy
.
enable_inplace
=
False
\
if
self
.
_program
and
self
.
_program
.
_is_mem_optimized
else
True
# TODO(wuyi): trainer endpoings should be passed in through
# build_strategy, not program.xxx.
...
...
@@ -221,12 +233,12 @@ class CompiledProgram(object):
places
=
list
(
map
(
_place_obj
,
self
.
_places
))
return
core
.
ParallelExecutor
(
places
,
return
core
.
ParallelExecutor
(
places
,
set
(
self
.
_persistable_vars
),
cpt
.
to_text
(
self
.
_loss_name
)
if
self
.
_loss_name
else
six
.
u
(
''
),
self
.
_scope
,
self
.
_local_scopes
,
self
.
_exec_strategy
,
self
.
_build_strategy
,
self
.
_graph
)
if
self
.
_loss_name
else
six
.
u
(
''
),
scope
,
self
.
_local_scopes
,
self
.
_exec_strategy
,
self
.
_build_strategy
,
self
.
_graph
)
def
_compile_inference
(
self
):
return
core
.
create_paddle_predictor
(
self
.
_infer_config
)
...
...
@@ -253,7 +265,9 @@ class CompiledProgram(object):
self
.
_scope
=
scope
self
.
_place
=
place
if
self
.
_is_data_parallel
:
self
.
_executor
=
self
.
_compile_data_parallel
()
self
.
_executor
=
self
.
_compile_data_parallel
(
use_cuda
=
isinstance
(
self
.
_place
,
core
.
CUDAPlace
),
scope
=
self
.
_scope
)
elif
self
.
_is_inference
:
self
.
_executor
=
self
.
_compile_inference
()
else
:
...
...
python/paddle/fluid/executor.py
浏览文件 @
d8a939d8
...
...
@@ -261,20 +261,20 @@ def _as_lodtensor(data, place):
class
Executor
(
object
):
"""
An Executor in Python, only support the single-GPU running. For multi-cards, please refer to
ParallelExecutor.
Python executor takes a program, add feed operators and fetch operators to this program according
An Executor in Python, supports single/multiple-GPU running, and single/multiple-CPU running.
Python executor takes a program, adds feed operators and fetch operators to this program according
to feed map and fetch_list. Feed map provides input data for the program. fetch_list provides
the variables(or names) that user want
to get after program run
. Note: the executor will run all
the variables(or names) that user want
s to get after program runs
. Note: the executor will run all
operators in the program but not only the operators dependent by the fetch_list.
It store the global variables into the global scope, and create a local scope for the temporary
variables. The local scope contents will be discarded after every minibatch forward/backward finished.
But the global scope variables will be persistent through different runs.
All of ops in program will be running in sequence.
It stores the global variables into the global scope, and creates a local scope for the temporary
variables. The contents in local scope may be discarded after every minibatch forward/backward
finished. But the global scope variables will be persistent through different runs.
Example:
.. code-block:: python
# First create the Executor.
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
exe = fluid.Executor(place)
...
...
@@ -297,9 +297,6 @@ class Executor(object):
Args:
place(core.CPUPlace|core.CUDAPlace(n)): indicate the executor run on which device
Note: For debugging complicated network in parallel-GPUs, you can test it on the executor.
They has the exactly same arguments, and expected the same results.
"""
def
__init__
(
self
,
place
):
...
...
@@ -382,6 +379,12 @@ class Executor(object):
]
return
outs
'''
TODO(typhoonzero): Define "no longer use" meaning? Can user create
a new Executor for the same program and run?
TODO(panyx0718): Why ParallelExecutor doesn't have close?
'''
def
close
(
self
):
"""
Close this executor.
...
...
@@ -389,9 +392,6 @@ class Executor(object):
You can no longer use this executor after calling this method.
For the distributed training, this method would free the resource on PServers related to
the current Trainer.
TODO(typhoonzero): Define "no longer use" meaning? Can user create
a new Executor for the same program and run?
TODO(panyx0718): Why ParallelExecutor doesn't have close?
Example:
>>> cpu = core.CPUPlace()
...
...
python/paddle/fluid/framework.py
浏览文件 @
d8a939d8
...
...
@@ -87,15 +87,6 @@ def _current_expected_place():
return
_imperative_current_expected_place_
def
is_pserver_mode
(
main_program
):
main
=
main_program
if
main_program
\
else
default_main_program
()
for
op
in
main
.
global_block
().
ops
:
if
op
.
type
in
[
"send"
,
"recv"
]:
return
True
return
False
class
NameScope
(
object
):
def
__init__
(
self
,
name
=
""
,
parent
=
None
):
self
.
_children
=
dict
()
...
...
python/paddle/fluid/io.py
浏览文件 @
d8a939d8
...
...
@@ -468,9 +468,10 @@ def save_persistables(executor, dirname, main_program=None, filename=None):
exe = fluid.Executor(fluid.CPUPlace())
param_path = "./my_paddle_model"
# `prog` can be a program defined by the user
prog = fluid.default_main_program()
fluid.io.save_persistables(executor=exe, dirname=param_path,
main_program=
None
)
main_program=
prog
)
"""
if
main_program
and
main_program
.
_is_distributed
:
...
...
python/paddle/fluid/parallel_executor.py
浏览文件 @
d8a939d8
...
...
@@ -13,15 +13,11 @@
# limitations under the License.
from
__future__
import
print_function
import
multiprocessing
from
.
import
core
from
.
import
framework
from
.
import
executor
from
..
import
compat
as
cpt
import
warnings
from
.
import
compiler
import
sys
import
six
import
os
__all__
=
[
'ParallelExecutor'
]
...
...
@@ -97,99 +93,27 @@ class ParallelExecutor(object):
'Please use CompiledProgram and Executor. CompiledProgram '
'is a central place for optimization and Executor is the '
'unified executor. Example can be found in compiler.py.
\n
'
)
# step1: get places, the places are used in run too.
self
.
_places
=
[]
if
use_cuda
:
gpus_env
=
os
.
getenv
(
"FLAGS_selected_gpus"
)
if
gpus_env
:
gpus
=
[
int
(
s
)
for
s
in
gpus_env
.
split
(
","
)]
else
:
gpus
=
[
i
for
i
in
six
.
moves
.
range
(
core
.
get_cuda_device_count
())
]
self
.
_places
=
[
core
.
CUDAPlace
(
i
)
for
i
in
gpus
]
else
:
cpu_num
=
int
(
os
.
environ
.
get
(
'CPU_NUM'
,
multiprocessing
.
cpu_count
()))
self
.
_places
=
[
core
.
CPUPlace
()
for
_
in
six
.
moves
.
range
(
cpu_num
)]
assert
self
.
_places
,
"no place for execution"
# step2: init exec_strategy
if
exec_strategy
is
None
:
exec_strategy
=
ExecutionStrategy
()
exec_strategy
.
use_cuda
=
use_cuda
if
exec_strategy
.
num_threads
==
0
:
if
use_cuda
:
# Experiments on se-resnext shows that too many threads hurt
# performance. Worth tunning for other models in the future.
exec_strategy
.
num_threads
=
len
(
self
.
_places
)
*
4
else
:
cpu_num
=
int
(
os
.
environ
.
get
(
'CPU_NUM'
,
multiprocessing
.
cpu_count
()))
exec_strategy
.
num_threads
=
cpu_num
*
2
# step3: init build_strategy
if
build_strategy
is
None
:
build_strategy
=
BuildStrategy
()
build_strategy
.
num_trainers
=
num_trainers
build_strategy
.
trainer_id
=
trainer_id
# FIXME(zcd): is_distribution_ is a temporary field, because in pserver mode,
# num_trainers is 1, so the current fields of build_strategy doesn't tell if
# it's distributed model.
build_strategy
.
is_distribution
=
framework
.
is_pserver_mode
(
main_program
)
or
num_trainers
>
1
# step4: get main_program, scope, local_scopes
main
=
main_program
if
main_program
\
else
framework
.
default_main_program
()
# FIXME(dzhwinter): enable_inplace should be after memory_optimize
# if turn on python memory optimize, turn off the inplace_pass.
if
build_strategy
.
memory_optimize
is
None
:
build_strategy
.
memory_optimize
=
False
if
main
.
_is_mem_optimized
else
True
if
build_strategy
.
enable_inplace
is
None
:
build_strategy
.
enable_inplace
=
False
if
main
.
_is_mem_optimized
else
True
scope
=
scope
if
scope
is
not
None
else
executor
.
global_scope
()
if
share_vars_from
and
not
isinstance
(
share_vars_from
,
ParallelExecutor
):
raise
TypeError
(
"share_vars_from must be ParallelExecutor."
)
local_scopes
=
share_vars_from
.
executor
.
local_scopes
()
\
if
share_vars_from
else
[]
# step5: check trainers_endpoints, it is used for distribution.
trainers_endpoints
=
main
.
_trainers_endpoints
if
num_trainers
>
1
and
trainers_endpoints
:
assert
num_trainers
==
len
(
trainers_endpoints
),
"num_trainers == len(endpoints)"
build_strategy
.
trainers_endpoints
=
trainers_endpoints
# step6: get persistable_vars, places. persistable_vars
# need be broadcast to other local_scope.
persistable_vars
=
set
([
cpt
.
to_text
(
v
.
name
)
for
v
in
[
var
for
var
in
main
.
list_vars
()
if
var
.
persistable
and
var
.
type
!=
core
.
VarDesc
.
VarType
.
RAW
]
])
def
place_obj
(
place
):
p
=
core
.
Place
()
p
.
set_place
(
place
)
return
p
places
=
list
(
map
(
place_obj
,
self
.
_places
))
# step7: init ParallelExecutor
# ParallelExecutor API will be deprecated, don't support parallel graph.
self
.
_graph
=
core
.
Graph
(
main
.
desc
)
self
.
_places
=
compiler
.
get_available_places
(
use_cuda
)
self
.
_scope
=
scope
if
scope
is
not
None
else
executor
.
global_scope
()
self
.
executor
=
core
.
ParallelExecutor
(
places
,
persistable_vars
,
cpt
.
to_text
(
loss_name
)
if
loss_name
else
six
.
u
(
''
),
scope
,
local_scopes
,
exec_strategy
,
build_strategy
,
self
.
_graph
)
main_program
=
main_program
if
main_program
is
not
None
\
else
framework
.
default_main_program
()
self
.
scope
=
scope
self
.
_compiled_program
=
compiler
.
CompiledProgram
(
main_program
)
self
.
_compiled_program
.
with_data_parallel
(
loss_name
=
loss_name
,
build_strategy
=
build_strategy
,
exec_strategy
=
exec_strategy
,
share_vars_from
=
share_vars_from
)
self
.
_place
=
core
.
CUDAPlace
(
0
)
if
use_cuda
else
core
.
CPUPlace
()
self
.
_executor
=
executor
.
Executor
(
self
.
_place
)
self
.
_compiled_program
.
_compile
(
place
=
self
.
_place
,
scope
=
self
.
_scope
)
def
run
(
self
,
fetch_list
,
feed
=
None
,
feed_dict
=
None
,
return_numpy
=
True
):
"""
...
...
@@ -256,56 +180,11 @@ class ParallelExecutor(object):
loss = pe.run(feed=feeder.feed(cur_batch),
fetch_list=[avg_cost.name]))
"""
if
feed
is
None
and
feed_dict
is
not
None
:
feed
=
feed_dict
print
(
"`feed_dict` is deprecated. Please use `feed=`"
,
file
=
sys
.
stderr
)
if
isinstance
(
feed
,
dict
):
feed_tensor_dict
=
dict
()
for
feed_name
in
feed
:
feed_tensor
=
feed
[
feed_name
]
if
not
isinstance
(
feed_tensor
,
core
.
LoDTensor
):
feed_tensor
=
core
.
LoDTensor
()
# always set to CPU place, since the tensor need to be splitted
# it is fast in CPU
feed_tensor
.
set
(
feed
[
feed_name
],
core
.
CPUPlace
())
feed_tensor_dict
[
feed_name
]
=
feed_tensor
self
.
executor
.
feed_and_split_tensor_into_local_scopes
(
feed_tensor_dict
)
elif
isinstance
(
feed
,
list
)
or
isinstance
(
feed
,
tuple
):
if
len
(
feed
)
!=
len
(
self
.
_places
):
raise
ValueError
(
"Feed a list of tensor, the list should be the same size as places"
)
res
=
list
()
for
i
,
each
in
enumerate
(
feed
):
if
not
isinstance
(
each
,
dict
):
raise
TypeError
(
"Each element of feed list should be a dict"
)
res_dict
=
dict
()
for
feed_name
in
each
:
tensor
=
each
[
feed_name
]
if
not
isinstance
(
tensor
,
core
.
LoDTensor
):
tmp
=
core
.
LoDTensor
()
tmp
.
set
(
tensor
,
self
.
_places
[
i
])
tensor
=
tmp
res_dict
[
feed_name
]
=
tensor
res
.
append
(
res_dict
)
self
.
executor
.
feed_tensors_into_local_scopes
(
res
)
fetch_var_name
=
'fetch'
self
.
executor
.
run
(
fetch_list
,
fetch_var_name
)
arr
=
self
.
scope
.
find_var
(
fetch_var_name
).
get_lod_tensor_array
()
if
return_numpy
:
return
executor
.
as_numpy
(
arr
)
return
[
arr
[
i
]
for
i
in
range
(
len
(
arr
))]
return
self
.
_executor
.
run
(
program
=
self
.
_compiled_program
,
scope
=
self
.
_scope
,
feed
=
feed
,
fetch_list
=
fetch_list
,
return_numpy
=
return_numpy
)
@
property
def
device_count
(
self
):
...
...
python/paddle/fluid/tests/unittests/mkldnn/test_conv2d_mkldnn_op.py
浏览文件 @
d8a939d8
...
...
@@ -15,44 +15,139 @@
from
__future__
import
print_function
import
unittest
import
numpy
as
np
from
paddle.fluid.tests.unittests.test_conv2d_op
import
TestConv2dOp
,
TestWithPad
,
TestWithStride
,
TestWithGroup
,
TestWith1x1
,
TestWithInput1x1Filter1x1
import
paddle.fluid.core
as
core
from
paddle.fluid.tests.unittests.op_test
import
OpTest
from
paddle.fluid.tests.unittests.test_conv2d_op
import
TestConv2dOp
class
TestMKLDNN
(
TestConv2dOp
):
def
init_kernel_type
(
self
):
self
.
use_mkldnn
=
True
self
.
data_format
=
"NCHW"
def
conv2d_bias_naive
(
out
,
bias
):
_
,
out_c
,
_
,
_
=
out
.
shape
for
l
in
range
(
out_c
):
out
[:,
l
,
:,
:]
=
out
[:,
l
,
:,
:]
+
bias
[
l
]
return
out
class
TestMKLDNNWithPad
(
TestWithPad
):
def
init_kernel_type
(
self
):
self
.
use_mkldnn
=
True
self
.
data_format
=
"NCHW"
def
conv2d_residual_naive
(
out
,
residual
):
assert
out
.
shape
==
residual
.
shape
out
=
np
.
add
(
out
,
residual
)
return
out
class
TestMKLDNNWithStride
(
TestWithStride
):
def
init_kernel_type
(
self
):
self
.
use_mkldnn
=
True
self
.
data_format
=
"NCHW"
class
TestConv2dMKLDNNOp
(
TestConv2dOp
):
def
init_group
(
self
):
self
.
groups
=
1
class
TestMKLDNNWithGroup
(
TestWithGroup
):
def
init_kernel_type
(
self
):
self
.
use_mkldnn
=
True
self
.
data_format
=
"NCHW"
self
.
use_mkldnn
=
True
self
.
_cpu_only
=
True
def
init_test_case
(
self
):
self
.
pad
=
[
0
,
0
]
self
.
stride
=
[
1
,
1
]
self
.
input_size
=
[
2
,
3
,
5
,
5
]
# NCHW
assert
np
.
mod
(
self
.
input_size
[
1
],
self
.
groups
)
==
0
f_c
=
self
.
input_size
[
1
]
//
self
.
groups
self
.
filter_size
=
[
6
,
f_c
,
3
,
3
]
class
TestMKLDNNWith1x1
(
TestWith1x1
):
def
init_kernel_type
(
self
):
self
.
use_mkldnn
=
True
self
.
data_format
=
"NCHW"
def
setUp
(
self
):
self
.
fuse_bias
=
False
self
.
bias_size
=
None
self
.
fuse_relu
=
False
self
.
fuse_residual_connection
=
False
self
.
input_residual_size
=
None
TestConv2dOp
.
setUp
(
self
)
output
=
self
.
outputs
[
'Output'
]
class
TestMKLDNNWithInput1x1Filter1x1
(
TestWithInput1x1Filter1x1
):
def
init_kernel_type
(
self
):
self
.
use_mkldnn
=
True
self
.
data_format
=
"NCHW"
#mkldnn only support either conv-sum-relu, or conv-relu.
if
self
.
fuse_bias
and
self
.
bias_size
is
not
None
:
bias
=
np
.
random
.
random
(
self
.
bias_size
).
astype
(
self
.
dtype
)
output
=
conv2d_bias_naive
(
output
,
bias
)
output
=
output
.
astype
(
self
.
dtype
)
self
.
attrs
[
'fuse_bias'
]
=
self
.
fuse_bias
self
.
inputs
[
'Bias'
]
=
OpTest
.
np_dtype_to_fluid_dtype
(
bias
)
if
self
.
fuse_residual_connection
and
self
.
input_residual_size
is
not
None
:
input_residual
=
np
.
random
.
random
(
self
.
input_residual_size
).
astype
(
self
.
dtype
)
output
=
conv2d_residual_naive
(
output
,
input_residual
)
self
.
attrs
[
'fuse_residual_connection'
]
=
self
.
fuse_residual_connection
self
.
inputs
[
'ResidualData'
]
=
OpTest
.
np_dtype_to_fluid_dtype
(
input_residual
)
if
self
.
fuse_relu
:
output
=
np
.
maximum
(
output
,
0
).
astype
(
self
.
dsttype
)
output
=
output
.
astype
(
self
.
dtype
)
self
.
attrs
[
'fuse_bias'
]
=
self
.
fuse_bias
self
.
attrs
[
'fuse_relu'
]
=
self
.
fuse_relu
self
.
attrs
[
'fuse_residual_connection'
]
=
self
.
fuse_residual_connection
self
.
outputs
[
'Output'
]
=
output
class
TestWithFuse
(
TestConv2dMKLDNNOp
):
def
init_test_case
(
self
):
TestConv2dMKLDNNOp
.
init_test_case
(
self
)
self
.
pad
=
[
1
,
1
]
self
.
fuse_bias
=
True
self
.
bias_size
=
[
6
]
self
.
fuse_residual_connection
=
True
self
.
input_residual_size
=
[
2
,
6
,
5
,
5
]
def
test_check_grad
(
self
):
pass
def
test_check_grad_no_filter
(
self
):
pass
def
test_check_grad_no_input
(
self
):
pass
class
TestWithPadWithBias
(
TestConv2dMKLDNNOp
):
def
init_test_case
(
self
):
TestConv2dMKLDNNOp
.
init_test_case
(
self
)
self
.
pad
=
[
1
,
1
]
self
.
input_size
=
[
2
,
3
,
6
,
6
]
class
TestWithStride
(
TestConv2dMKLDNNOp
):
def
init_test_case
(
self
):
TestConv2dMKLDNNOp
.
init_test_case
(
self
)
self
.
pad
=
[
1
,
1
]
self
.
stride
=
[
2
,
2
]
self
.
input_size
=
[
2
,
3
,
6
,
6
]
class
TestWithGroup
(
TestConv2dMKLDNNOp
):
def
init_group
(
self
):
self
.
groups
=
3
class
TestWith1x1
(
TestConv2dMKLDNNOp
):
def
init_test_case
(
self
):
TestConv2dMKLDNNOp
.
init_test_case
(
self
)
self
.
filter_size
=
[
6
,
3
,
1
,
1
]
class
TestWithInput1x1Filter1x1
(
TestConv2dMKLDNNOp
):
def
init_test_case
(
self
):
TestConv2dMKLDNNOp
.
init_test_case
(
self
)
self
.
input_size
=
[
2
,
3
,
1
,
1
]
# NCHW
assert
np
.
mod
(
self
.
input_size
[
1
],
self
.
groups
)
==
0
f_c
=
self
.
input_size
[
1
]
//
self
.
groups
self
.
filter_size
=
[
6
,
f_c
,
1
,
1
]
def
init_group
(
self
):
self
.
groups
=
3
if
__name__
==
'__main__'
:
...
...
python/paddle/fluid/tests/unittests/mkldnn/test_pool2d_mkldnn_op.py
浏览文件 @
d8a939d8
...
...
@@ -18,6 +18,24 @@ import unittest
from
paddle.fluid.tests.unittests.test_pool2d_op
import
TestPool2D_Op
,
TestCase1
,
TestCase2
,
TestCase3
,
TestCase4
,
TestCase5
def
create_test_mkldnn_use_ceil_class
(
parent
):
class
TestMKLDNNPool2DUseCeilCase
(
parent
):
def
init_kernel_type
(
self
):
self
.
use_mkldnn
=
True
def
init_ceil_mode
(
self
):
self
.
ceil_mode
=
True
cls_name
=
"{0}_{1}"
.
format
(
parent
.
__name__
,
"MKLDNNCeilModeCast"
)
TestMKLDNNPool2DUseCeilCase
.
__name__
=
cls_name
globals
()[
cls_name
]
=
TestMKLDNNPool2DUseCeilCase
create_test_mkldnn_use_ceil_class
(
TestPool2D_Op
)
create_test_mkldnn_use_ceil_class
(
TestCase1
)
create_test_mkldnn_use_ceil_class
(
TestCase2
)
def
create_test_mkldnn_class
(
parent
):
class
TestMKLDNNCase
(
parent
):
def
init_kernel_type
(
self
):
...
...
tools/check_doc_approval.py
已删除
100644 → 0
浏览文件 @
d4674dab
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
os
import
sys
import
ast
import
hashlib
import
importlib
import
paddle.fluid
files
=
[
"paddle.fluid"
,
"paddle.fluid.average"
,
"paddle.fluid.backward"
,
"paddle.fluid.clip"
,
"paddle.fluid.data_feeder"
,
"paddle.fluid.executor"
,
"paddle.fluid.initializer"
,
"paddle.fluid.io"
,
"paddle.fluid.layers"
,
"paddle.fluid.metrics"
,
"paddle.fluid.nets"
,
"paddle.fluid.optimizer"
,
"paddle.fluid.profiler"
,
"paddle.fluid.recordio_writer"
,
"paddle.fluid.regularizer"
,
"paddle.fluid.transpiler"
]
def
md5
(
doc
):
hash
=
hashlib
.
md5
()
hash
.
update
(
str
(
doc
))
return
hash
.
hexdigest
()
def
get_module
():
for
fi
in
files
:
fi_lib
=
importlib
.
import_module
(
fi
)
doc_function
=
getattr
(
fi_lib
,
"__all__"
)
for
api
in
doc_function
:
api_name
=
fi
+
"."
+
api
try
:
doc_module
=
getattr
(
eval
(
api_name
),
"__doc__"
)
except
:
pass
doc_md5_code
=
md5
(
doc_module
)
doc_dict
[
api_name
]
=
doc_md5_code
def
doc_md5_dict
(
doc_md5_path
):
with
open
(
doc_md5_path
,
"rb"
)
as
f
:
doc_md5
=
f
.
read
()
doc_md5_dict
=
ast
.
literal_eval
(
doc_md5
)
return
doc_md5_dict
def
check_doc_md5
():
for
k
,
v
in
doc_dict
.
items
():
try
:
if
doc_ci_dict
[
k
]
!=
v
:
return
doc_dict
except
:
return
doc_dict
return
True
if
__name__
==
"__main__"
:
doc_dict
=
{}
doc_ci_dict
=
{}
doc_md5_file
=
"/root/.cache/doc_md5.txt"
if
not
os
.
path
.
exists
(
doc_md5_file
):
os
.
mknod
(
doc_md5_file
)
else
:
doc_ci_dict
=
doc_md5_dict
(
doc_md5_file
)
get_module
()
if
not
os
.
path
.
getsize
(
doc_md5_file
):
with
open
(
doc_md5_file
,
'w'
)
as
f
:
f
.
write
(
str
(
doc_dict
))
check_dic
=
True
print
(
check_dic
)
else
:
check_dic
=
check_doc_md5
()
print
(
check_dic
)
tools/diff_api.py
浏览文件 @
d8a939d8
...
...
@@ -26,4 +26,10 @@ for each_diff in result:
print
(
each_diff
)
if
error
:
print
(
'''If you modify/add/delete the API files, including code and comment, please follow these steps in order to pass the CI:
1. cd ${paddle_path}, compile paddle;
2. pip install build/python/dist/(build whl package);
3. run "python tools/print_signatures.py paddle.fluid, paddle.reader > paddle/fluid/API.spec"'''
)
sys
.
exit
(
1
)
tools/print_signatures.py
浏览文件 @
d8a939d8
...
...
@@ -24,12 +24,19 @@ import inspect
import
collections
import
sys
import
pydoc
import
hashlib
member_dict
=
collections
.
OrderedDict
()
experimental_namespace
=
{
"paddle.fluid.imperative"
}
def
md5
(
doc
):
hash
=
hashlib
.
md5
()
hash
.
update
(
str
(
doc
).
encode
(
'utf-8'
))
return
hash
.
hexdigest
()
def
visit_member
(
parent_name
,
member
):
cur_name
=
"."
.
join
([
parent_name
,
member
.
__name__
])
if
inspect
.
isclass
(
member
):
...
...
@@ -39,7 +46,10 @@ def visit_member(parent_name, member):
visit_member
(
cur_name
,
value
)
elif
callable
(
member
):
try
:
member_dict
[
cur_name
]
=
inspect
.
getargspec
(
member
)
doc
=
(
'document'
,
md5
(
member
.
__doc__
))
args
=
inspect
.
getargspec
(
member
)
all
=
(
args
,
doc
)
member_dict
[
cur_name
]
=
all
except
TypeError
:
# special for PyBind method
member_dict
[
cur_name
]
=
" "
.
join
([
line
.
strip
()
for
line
in
pydoc
.
render_doc
(
member
).
split
(
'
\n
'
)
...
...
tools/timeline.py
浏览文件 @
d8a939d8
...
...
@@ -131,7 +131,7 @@ class Timeline(object):
if
(
k
,
event
.
device_id
,
"CPU"
)
not
in
self
.
_devices
:
pid
=
self
.
_allocate_pid
()
self
.
_devices
[(
k
,
event
.
device_id
,
"CPU"
)]
=
pid
# -1 device id represents CUDA
api call
# -1 device id represents CUDA
API(RunTime) call.(e.g. cudaLaunch, cudaMemcpy)
if
event
.
device_id
==
-
1
:
self
.
_chrome_trace
.
emit_pid
(
"%s:cuda_api"
%
k
,
pid
)
else
:
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录