Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleDetection
提交
5306713b
P
PaddleDetection
项目概览
PaddlePaddle
/
PaddleDetection
大约 1 年 前同步成功
通知
695
Star
11112
Fork
2696
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
184
列表
看板
标记
里程碑
合并请求
40
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleDetection
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
184
Issue
184
列表
看板
标记
里程碑
合并请求
40
合并请求
40
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
5306713b
编写于
3月 22, 2019
作者:
P
phlrain
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
merge api spec; test=develop
上级
e9bf8bca
变更
1
显示空白变更内容
内联
并排
Showing
1 changed file
with
65 addition
and
65 deletion
+65
-65
paddle/fluid/API.spec
paddle/fluid/API.spec
+65
-65
未找到文件。
paddle/fluid/API.spec
浏览文件 @
5306713b
...
...
@@ -414,71 +414,71 @@ paddle.fluid.transpiler.RoundRobin.__init__ ArgSpec(args=['self', 'pserver_endpo
paddle.fluid.transpiler.RoundRobin.dispatch ArgSpec(args=['self', 'varlist'], varargs=None, keywords=None, defaults=None)
paddle.fluid.transpiler.RoundRobin.reset ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
paddle.fluid.transpiler.DistributeTranspilerConfig.__init__
paddle.fluid.nets.simple_img_conv_pool
(ArgSpec(args=['input', 'num_filters', 'filter_size', 'pool_size', 'pool_stride', 'pool_padding', 'pool_type', 'global_pooling', 'conv_stride', 'conv_padding', 'conv_dilation', 'conv_groups', 'param_attr', 'bias_attr', 'act', 'use_cudnn'], varargs=None, keywords=None, defaults=(0, 'max', False, 1, 0, 1, 1, None, None, None, True)), ('document', 'e0f67f35abf27f666f81003113b90244'
))
paddle.fluid.nets.sequence_conv_pool
(ArgSpec(args=['input', 'num_filters', 'filter_size', 'param_attr', 'act', 'pool_type', 'bias_attr'], varargs=None, keywords=None, defaults=(None, 'sigmoid', 'max', None)), ('document', '48c434dd7bb827f69d90e5135d77470f'
))
paddle.fluid.nets.glu
(ArgSpec(args=['input', 'dim'], varargs=None, keywords=None, defaults=(-1,)), ('document', '08c1c57e1db6b20bf87b264cb7cf3ca8'
))
paddle.fluid.nets.scaled_dot_product_attention
(ArgSpec(args=['queries', 'keys', 'values', 'num_heads', 'dropout_rate'], varargs=None, keywords=None, defaults=(1, 0.0)), ('document', '921714c9bfb351b41403418265393203'
))
paddle.fluid.nets.img_conv_group
(ArgSpec(args=['input', 'conv_num_filter', 'pool_size', 'conv_padding', 'conv_filter_size', 'conv_act', 'param_attr', 'conv_with_batchnorm', 'conv_batchnorm_drop_rate', 'pool_stride', 'pool_type', 'use_cudnn'], varargs=None, keywords=None, defaults=(1, 3, None, None, False, 0.0, 1, 'max', True)), ('document', '3802be78fbfb206dae64a2d9f8480970'
))
paddle.fluid.optimizer.SGDOptimizer.__init__
(ArgSpec(args=['self', 'learning_rate', 'regularization', 'name'], varargs=None, keywords=None, defaults=(None, None)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'
))
paddle.fluid.optimizer.SGDOptimizer.apply_gradients
(ArgSpec(args=['self', 'params_grads'], varargs=None, keywords=None, defaults=None), ('document', 'bfe7305918552aaecfdaa22411dbe871')
)
paddle.fluid.optimizer.SGDOptimizer.backward
(ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set', 'callbacks'], varargs=None, keywords=None, defaults=(None, None, None, None)), ('document', 'ba3a113d0229ff7bc9d39bda0a6d947f'
))
paddle.fluid.optimizer.SGDOptimizer.get_opti_var_name_list
(ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')
)
paddle.fluid.optimizer.SGDOptimizer.minimize
(ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set'], varargs=None, keywords=None, defaults=(None, None, None)), ('document', '35fd5d3330c97903528c7e0dacc7f6ea'
))
paddle.fluid.optimizer.MomentumOptimizer.__init__
(ArgSpec(args=['self', 'learning_rate', 'momentum', 'use_nesterov', 'regularization', 'name'], varargs=None, keywords=None, defaults=(False, None, None)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'
))
paddle.fluid.optimizer.MomentumOptimizer.apply_gradients
(ArgSpec(args=['self', 'params_grads'], varargs=None, keywords=None, defaults=None), ('document', 'bfe7305918552aaecfdaa22411dbe871')
)
paddle.fluid.optimizer.MomentumOptimizer.backward
(ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set', 'callbacks'], varargs=None, keywords=None, defaults=(None, None, None, None)), ('document', 'ba3a113d0229ff7bc9d39bda0a6d947f'
))
paddle.fluid.optimizer.MomentumOptimizer.get_opti_var_name_list
(ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')
)
paddle.fluid.optimizer.MomentumOptimizer.minimize
(ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set'], varargs=None, keywords=None, defaults=(None, None, None)), ('document', '35fd5d3330c97903528c7e0dacc7f6ea'
))
paddle.fluid.optimizer.AdagradOptimizer.__init__
(ArgSpec(args=['self', 'learning_rate', 'epsilon', 'regularization', 'name', 'initial_accumulator_value'], varargs=None, keywords=None, defaults=(1e-06, None, None, 0.0)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'
))
paddle.fluid.optimizer.AdagradOptimizer.apply_gradients
(ArgSpec(args=['self', 'params_grads'], varargs=None, keywords=None, defaults=None), ('document', 'bfe7305918552aaecfdaa22411dbe871')
)
paddle.fluid.optimizer.AdagradOptimizer.backward
(ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set', 'callbacks'], varargs=None, keywords=None, defaults=(None, None, None, None)), ('document', 'ba3a113d0229ff7bc9d39bda0a6d947f'
))
paddle.fluid.optimizer.AdagradOptimizer.get_opti_var_name_list
(ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')
)
paddle.fluid.optimizer.AdagradOptimizer.minimize
(ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set'], varargs=None, keywords=None, defaults=(None, None, None)), ('document', '35fd5d3330c97903528c7e0dacc7f6ea'
))
paddle.fluid.optimizer.AdamOptimizer.__init__
(ArgSpec(args=['self', 'learning_rate', 'beta1', 'beta2', 'epsilon', 'regularization', 'name', 'lazy_mode'], varargs=None, keywords=None, defaults=(0.001, 0.9, 0.999, 1e-08, None, None, False)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'
))
paddle.fluid.optimizer.AdamOptimizer.apply_gradients
(ArgSpec(args=['self', 'params_grads'], varargs=None, keywords=None, defaults=None), ('document', 'bfe7305918552aaecfdaa22411dbe871')
)
paddle.fluid.optimizer.AdamOptimizer.backward
(ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set', 'callbacks'], varargs=None, keywords=None, defaults=(None, None, None, None)), ('document', 'ba3a113d0229ff7bc9d39bda0a6d947f'
))
paddle.fluid.optimizer.AdamOptimizer.get_opti_var_name_list
(ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')
)
paddle.fluid.optimizer.AdamOptimizer.minimize
(ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set'], varargs=None, keywords=None, defaults=(None, None, None)), ('document', '35fd5d3330c97903528c7e0dacc7f6ea'
))
paddle.fluid.optimizer.AdamaxOptimizer.__init__
(ArgSpec(args=['self', 'learning_rate', 'beta1', 'beta2', 'epsilon', 'regularization', 'name'], varargs=None, keywords=None, defaults=(0.001, 0.9, 0.999, 1e-08, None, None)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'
))
paddle.fluid.optimizer.AdamaxOptimizer.apply_gradients
(ArgSpec(args=['self', 'params_grads'], varargs=None, keywords=None, defaults=None), ('document', 'bfe7305918552aaecfdaa22411dbe871')
)
paddle.fluid.optimizer.AdamaxOptimizer.backward
(ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set', 'callbacks'], varargs=None, keywords=None, defaults=(None, None, None, None)), ('document', 'ba3a113d0229ff7bc9d39bda0a6d947f'
))
paddle.fluid.optimizer.AdamaxOptimizer.get_opti_var_name_list
(ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')
)
paddle.fluid.optimizer.AdamaxOptimizer.minimize
(ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set'], varargs=None, keywords=None, defaults=(None, None, None)), ('document', '35fd5d3330c97903528c7e0dacc7f6ea'
))
paddle.fluid.optimizer.DecayedAdagradOptimizer.__init__
(ArgSpec(args=['self', 'learning_rate', 'decay', 'epsilon', 'regularization', 'name'], varargs=None, keywords=None, defaults=(0.95, 1e-06, None, None)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'
))
paddle.fluid.optimizer.DecayedAdagradOptimizer.apply_gradients
(ArgSpec(args=['self', 'params_grads'], varargs=None, keywords=None, defaults=None), ('document', 'bfe7305918552aaecfdaa22411dbe871')
)
paddle.fluid.optimizer.DecayedAdagradOptimizer.backward
(ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set', 'callbacks'], varargs=None, keywords=None, defaults=(None, None, None, None)), ('document', 'ba3a113d0229ff7bc9d39bda0a6d947f'
))
paddle.fluid.optimizer.DecayedAdagradOptimizer.get_opti_var_name_list
(ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')
)
paddle.fluid.optimizer.DecayedAdagradOptimizer.minimize
(ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set'], varargs=None, keywords=None, defaults=(None, None, None)), ('document', '35fd5d3330c97903528c7e0dacc7f6ea'
))
paddle.fluid.optimizer.FtrlOptimizer.__init__
(ArgSpec(args=['self', 'learning_rate', 'l1', 'l2', 'lr_power', 'regularization', 'name'], varargs=None, keywords=None, defaults=(0.0, 0.0, -0.5, None, None)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'
))
paddle.fluid.optimizer.FtrlOptimizer.apply_gradients
(ArgSpec(args=['self', 'params_grads'], varargs=None, keywords=None, defaults=None), ('document', 'bfe7305918552aaecfdaa22411dbe871')
)
paddle.fluid.optimizer.FtrlOptimizer.backward
(ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set', 'callbacks'], varargs=None, keywords=None, defaults=(None, None, None, None)), ('document', 'ba3a113d0229ff7bc9d39bda0a6d947f'
))
paddle.fluid.optimizer.FtrlOptimizer.get_opti_var_name_list
(ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')
)
paddle.fluid.optimizer.FtrlOptimizer.minimize
(ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set'], varargs=None, keywords=None, defaults=(None, None, None)), ('document', '35fd5d3330c97903528c7e0dacc7f6ea'
))
paddle.fluid.optimizer.RMSPropOptimizer.__init__
(ArgSpec(args=['self', 'learning_rate', 'rho', 'epsilon', 'momentum', 'centered', 'regularization', 'name'], varargs=None, keywords=None, defaults=(0.95, 1e-06, 0.0, False, None, None)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'
))
paddle.fluid.optimizer.RMSPropOptimizer.apply_gradients
(ArgSpec(args=['self', 'params_grads'], varargs=None, keywords=None, defaults=None), ('document', 'bfe7305918552aaecfdaa22411dbe871')
)
paddle.fluid.optimizer.RMSPropOptimizer.backward
(ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set', 'callbacks'], varargs=None, keywords=None, defaults=(None, None, None, None)), ('document', 'ba3a113d0229ff7bc9d39bda0a6d947f'
))
paddle.fluid.optimizer.RMSPropOptimizer.get_opti_var_name_list
(ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')
)
paddle.fluid.optimizer.RMSPropOptimizer.minimize
(ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set'], varargs=None, keywords=None, defaults=(None, None, None)), ('document', '35fd5d3330c97903528c7e0dacc7f6ea'
))
paddle.fluid.optimizer.AdadeltaOptimizer.__init__
(ArgSpec(args=['self', 'learning_rate', 'epsilon', 'rho', 'regularization', 'name'], varargs=None, keywords=None, defaults=(1e-06, 0.95, None, None)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'
))
paddle.fluid.optimizer.AdadeltaOptimizer.apply_gradients
(ArgSpec(args=['self', 'params_grads'], varargs=None, keywords=None, defaults=None), ('document', 'bfe7305918552aaecfdaa22411dbe871')
)
paddle.fluid.optimizer.AdadeltaOptimizer.backward
(ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set', 'callbacks'], varargs=None, keywords=None, defaults=(None, None, None, None)), ('document', 'ba3a113d0229ff7bc9d39bda0a6d947f'
))
paddle.fluid.optimizer.AdadeltaOptimizer.get_opti_var_name_list
(ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')
)
paddle.fluid.optimizer.AdadeltaOptimizer.minimize
(ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set'], varargs=None, keywords=None, defaults=(None, None, None)), ('document', '35fd5d3330c97903528c7e0dacc7f6ea'
))
paddle.fluid.optimizer.ModelAverage.__init__
(ArgSpec(args=['self', 'average_window_rate', 'min_average_window', 'max_average_window', 'regularization', 'name'], varargs=None, keywords=None, defaults=(10000, 10000, None, None)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'
))
paddle.fluid.optimizer.ModelAverage.apply
(ArgSpec(args=['self', 'executor', 'need_restore'], varargs=None, keywords=None, defaults=(True,)), ('document', '46234a5470590feb336346f70a3db715'
))
paddle.fluid.optimizer.ModelAverage.apply_gradients
(ArgSpec(args=['self', 'params_grads'], varargs=None, keywords=None, defaults=None), ('document', 'bfe7305918552aaecfdaa22411dbe871')
)
paddle.fluid.optimizer.ModelAverage.backward
(ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set', 'callbacks'], varargs=None, keywords=None, defaults=(None, None, None, None)), ('document', 'ba3a113d0229ff7bc9d39bda0a6d947f'
))
paddle.fluid.optimizer.ModelAverage.get_opti_var_name_list
(ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')
)
paddle.fluid.optimizer.ModelAverage.minimize
(ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set'], varargs=None, keywords=None, defaults=(None, None, None)), ('document', '35fd5d3330c97903528c7e0dacc7f6ea'
))
paddle.fluid.optimizer.ModelAverage.restore
(ArgSpec(args=['self', 'executor'], varargs=None, keywords=None, defaults=None), ('document', '18db9c70be9c4dd466f9844457b21bfe')
)
paddle.fluid.optimizer.LarsMomentumOptimizer.__init__
(ArgSpec(args=['self', 'learning_rate', 'momentum', 'lars_coeff', 'lars_weight_decay', 'regularization', 'name'], varargs=None, keywords=None, defaults=(0.001, 0.0005, None, None)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'
))
paddle.fluid.optimizer.LarsMomentumOptimizer.apply_gradients
(ArgSpec(args=['self', 'params_grads'], varargs=None, keywords=None, defaults=None), ('document', 'bfe7305918552aaecfdaa22411dbe871')
)
paddle.fluid.optimizer.LarsMomentumOptimizer.backward
(ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set', 'callbacks'], varargs=None, keywords=None, defaults=(None, None, None, None)), ('document', 'ba3a113d0229ff7bc9d39bda0a6d947f'
))
paddle.fluid.optimizer.LarsMomentumOptimizer.get_opti_var_name_list
(ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')
)
paddle.fluid.optimizer.LarsMomentumOptimizer.minimize
(ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set'], varargs=None, keywords=None, defaults=(None, None, None)), ('document', '35fd5d3330c97903528c7e0dacc7f6ea'
))
paddle.fluid.backward.append_backward
(ArgSpec(args=['loss', 'parameter_list', 'no_grad_set', 'callbacks'], varargs=None, keywords=None, defaults=(None, None, None)), ('document', '1a79bd7d10ae54ca763ec81bca36ba24'
))
paddle.fluid.regularizer.L1DecayRegularizer.__init__
(ArgSpec(args=['self', 'regularization_coeff'], varargs=None, keywords=None, defaults=(0.0,)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'
))
paddle.fluid.regularizer.L2DecayRegularizer.__init__
(ArgSpec(args=['self', 'regularization_coeff'], varargs=None, keywords=None, defaults=(0.0,)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'
))
paddle.fluid.nets.simple_img_conv_pool
ArgSpec(args=['input', 'num_filters', 'filter_size', 'pool_size', 'pool_stride', 'pool_padding', 'pool_type', 'global_pooling', 'conv_stride', 'conv_padding', 'conv_dilation', 'conv_groups', 'param_attr', 'bias_attr', 'act', 'use_cudnn'], varargs=None, keywords=None, defaults=(0, 'max', False, 1, 0, 1, 1, None, None, None, True
))
paddle.fluid.nets.sequence_conv_pool
ArgSpec(args=['input', 'num_filters', 'filter_size', 'param_attr', 'act', 'pool_type', 'bias_attr'], varargs=None, keywords=None, defaults=(None, 'sigmoid', 'max', None
))
paddle.fluid.nets.glu
ArgSpec(args=['input', 'dim'], varargs=None, keywords=None, defaults=(-1,
))
paddle.fluid.nets.scaled_dot_product_attention
ArgSpec(args=['queries', 'keys', 'values', 'num_heads', 'dropout_rate'], varargs=None, keywords=None, defaults=(1, 0.0
))
paddle.fluid.nets.img_conv_group
ArgSpec(args=['input', 'conv_num_filter', 'pool_size', 'conv_padding', 'conv_filter_size', 'conv_act', 'param_attr', 'conv_with_batchnorm', 'conv_batchnorm_drop_rate', 'pool_stride', 'pool_type', 'use_cudnn'], varargs=None, keywords=None, defaults=(1, 3, None, None, False, 0.0, 1, 'max', True
))
paddle.fluid.optimizer.SGDOptimizer.__init__
ArgSpec(args=['self', 'learning_rate', 'regularization', 'name'], varargs=None, keywords=None, defaults=(None, None
))
paddle.fluid.optimizer.SGDOptimizer.apply_gradients
ArgSpec(args=['self', 'params_grads'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.optimizer.SGDOptimizer.backward
ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set', 'callbacks'], varargs=None, keywords=None, defaults=(None, None, None, None
))
paddle.fluid.optimizer.SGDOptimizer.get_opti_var_name_list
ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.optimizer.SGDOptimizer.minimize
ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set'], varargs=None, keywords=None, defaults=(None, None, None
))
paddle.fluid.optimizer.MomentumOptimizer.__init__
ArgSpec(args=['self', 'learning_rate', 'momentum', 'use_nesterov', 'regularization', 'name'], varargs=None, keywords=None, defaults=(False, None, None
))
paddle.fluid.optimizer.MomentumOptimizer.apply_gradients
ArgSpec(args=['self', 'params_grads'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.optimizer.MomentumOptimizer.backward
ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set', 'callbacks'], varargs=None, keywords=None, defaults=(None, None, None, None
))
paddle.fluid.optimizer.MomentumOptimizer.get_opti_var_name_list
ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.optimizer.MomentumOptimizer.minimize
ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set'], varargs=None, keywords=None, defaults=(None, None, None
))
paddle.fluid.optimizer.AdagradOptimizer.__init__
ArgSpec(args=['self', 'learning_rate', 'epsilon', 'regularization', 'name'], varargs=None, keywords=None, defaults=(1e-06, None, None
))
paddle.fluid.optimizer.AdagradOptimizer.apply_gradients
ArgSpec(args=['self', 'params_grads'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.optimizer.AdagradOptimizer.backward
ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set', 'callbacks'], varargs=None, keywords=None, defaults=(None, None, None, None
))
paddle.fluid.optimizer.AdagradOptimizer.get_opti_var_name_list
ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.optimizer.AdagradOptimizer.minimize
ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set'], varargs=None, keywords=None, defaults=(None, None, None
))
paddle.fluid.optimizer.AdamOptimizer.__init__
ArgSpec(args=['self', 'learning_rate', 'beta1', 'beta2', 'epsilon', 'regularization', 'name', 'lazy_mode'], varargs=None, keywords=None, defaults=(0.001, 0.9, 0.999, 1e-08, None, None, False
))
paddle.fluid.optimizer.AdamOptimizer.apply_gradients
ArgSpec(args=['self', 'params_grads'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.optimizer.AdamOptimizer.backward
ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set', 'callbacks'], varargs=None, keywords=None, defaults=(None, None, None, None
))
paddle.fluid.optimizer.AdamOptimizer.get_opti_var_name_list
ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.optimizer.AdamOptimizer.minimize
ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set'], varargs=None, keywords=None, defaults=(None, None, None
))
paddle.fluid.optimizer.AdamaxOptimizer.__init__
ArgSpec(args=['self', 'learning_rate', 'beta1', 'beta2', 'epsilon', 'regularization', 'name'], varargs=None, keywords=None, defaults=(0.001, 0.9, 0.999, 1e-08, None, None
))
paddle.fluid.optimizer.AdamaxOptimizer.apply_gradients
ArgSpec(args=['self', 'params_grads'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.optimizer.AdamaxOptimizer.backward
ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set', 'callbacks'], varargs=None, keywords=None, defaults=(None, None, None, None
))
paddle.fluid.optimizer.AdamaxOptimizer.get_opti_var_name_list
ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.optimizer.AdamaxOptimizer.minimize
ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set'], varargs=None, keywords=None, defaults=(None, None, None
))
paddle.fluid.optimizer.DecayedAdagradOptimizer.__init__
ArgSpec(args=['self', 'learning_rate', 'decay', 'epsilon', 'regularization', 'name'], varargs=None, keywords=None, defaults=(0.95, 1e-06, None, None
))
paddle.fluid.optimizer.DecayedAdagradOptimizer.apply_gradients
ArgSpec(args=['self', 'params_grads'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.optimizer.DecayedAdagradOptimizer.backward
ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set', 'callbacks'], varargs=None, keywords=None, defaults=(None, None, None, None
))
paddle.fluid.optimizer.DecayedAdagradOptimizer.get_opti_var_name_list
ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.optimizer.DecayedAdagradOptimizer.minimize
ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set'], varargs=None, keywords=None, defaults=(None, None, None
))
paddle.fluid.optimizer.FtrlOptimizer.__init__
ArgSpec(args=['self', 'learning_rate', 'l1', 'l2', 'lr_power', 'regularization', 'name'], varargs=None, keywords=None, defaults=(0.0, 0.0, -0.5, None, None
))
paddle.fluid.optimizer.FtrlOptimizer.apply_gradients
ArgSpec(args=['self', 'params_grads'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.optimizer.FtrlOptimizer.backward
ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set', 'callbacks'], varargs=None, keywords=None, defaults=(None, None, None, None
))
paddle.fluid.optimizer.FtrlOptimizer.get_opti_var_name_list
ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.optimizer.FtrlOptimizer.minimize
ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set'], varargs=None, keywords=None, defaults=(None, None, None
))
paddle.fluid.optimizer.RMSPropOptimizer.__init__
ArgSpec(args=['self', 'learning_rate', 'rho', 'epsilon', 'momentum', 'centered', 'regularization', 'name'], varargs=None, keywords=None, defaults=(0.95, 1e-06, 0.0, False, None, None
))
paddle.fluid.optimizer.RMSPropOptimizer.apply_gradients
ArgSpec(args=['self', 'params_grads'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.optimizer.RMSPropOptimizer.backward
ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set', 'callbacks'], varargs=None, keywords=None, defaults=(None, None, None, None
))
paddle.fluid.optimizer.RMSPropOptimizer.get_opti_var_name_list
ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.optimizer.RMSPropOptimizer.minimize
ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set'], varargs=None, keywords=None, defaults=(None, None, None
))
paddle.fluid.optimizer.AdadeltaOptimizer.__init__
ArgSpec(args=['self', 'learning_rate', 'epsilon', 'rho', 'regularization', 'name'], varargs=None, keywords=None, defaults=(1e-06, 0.95, None, None
))
paddle.fluid.optimizer.AdadeltaOptimizer.apply_gradients
ArgSpec(args=['self', 'params_grads'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.optimizer.AdadeltaOptimizer.backward
ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set', 'callbacks'], varargs=None, keywords=None, defaults=(None, None, None, None
))
paddle.fluid.optimizer.AdadeltaOptimizer.get_opti_var_name_list
ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.optimizer.AdadeltaOptimizer.minimize
ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set'], varargs=None, keywords=None, defaults=(None, None, None
))
paddle.fluid.optimizer.ModelAverage.__init__
ArgSpec(args=['self', 'average_window_rate', 'min_average_window', 'max_average_window', 'regularization', 'name'], varargs=None, keywords=None, defaults=(10000, 10000, None, None
))
paddle.fluid.optimizer.ModelAverage.apply
ArgSpec(args=['self', 'executor', 'need_restore'], varargs=None, keywords=None, defaults=(True,
))
paddle.fluid.optimizer.ModelAverage.apply_gradients
ArgSpec(args=['self', 'params_grads'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.optimizer.ModelAverage.backward
ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set', 'callbacks'], varargs=None, keywords=None, defaults=(None, None, None, None
))
paddle.fluid.optimizer.ModelAverage.get_opti_var_name_list
ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.optimizer.ModelAverage.minimize
ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set'], varargs=None, keywords=None, defaults=(None, None, None
))
paddle.fluid.optimizer.ModelAverage.restore
ArgSpec(args=['self', 'executor'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.optimizer.LarsMomentumOptimizer.__init__
ArgSpec(args=['self', 'learning_rate', 'momentum', 'lars_coeff', 'lars_weight_decay', 'regularization', 'name'], varargs=None, keywords=None, defaults=(0.001, 0.0005, None, None
))
paddle.fluid.optimizer.LarsMomentumOptimizer.apply_gradients
ArgSpec(args=['self', 'params_grads'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.optimizer.LarsMomentumOptimizer.backward
ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set', 'callbacks'], varargs=None, keywords=None, defaults=(None, None, None, None
))
paddle.fluid.optimizer.LarsMomentumOptimizer.get_opti_var_name_list
ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None
)
paddle.fluid.optimizer.LarsMomentumOptimizer.minimize
ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set'], varargs=None, keywords=None, defaults=(None, None, None
))
paddle.fluid.backward.append_backward
ArgSpec(args=['loss', 'parameter_list', 'no_grad_set', 'callbacks'], varargs=None, keywords=None, defaults=(None, None, None
))
paddle.fluid.regularizer.L1DecayRegularizer.__init__
ArgSpec(args=['self', 'regularization_coeff'], varargs=None, keywords=None, defaults=(0.0,
))
paddle.fluid.regularizer.L2DecayRegularizer.__init__
ArgSpec(args=['self', 'regularization_coeff'], varargs=None, keywords=None, defaults=(0.0,
))
paddle.fluid.LoDTensor.__init__ 1. __init__(self: paddle.fluid.core.LoDTensor, arg0: List[List[int]]) -> None 2. __init__(self: paddle.fluid.core.LoDTensor) -> None
paddle.fluid.LoDTensor.has_valid_recursive_sequence_lengths has_valid_recursive_sequence_lengths(self: paddle.fluid.core.LoDTensor) -> bool
paddle.fluid.LoDTensor.lod lod(self: paddle.fluid.core.LoDTensor) -> List[List[int]]
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录