Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
9848f8f3
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
9848f8f3
编写于
3月 10, 2020
作者:
Z
zhangchunle
提交者:
GitHub
3月 10, 2020
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
add a white list of api (#22897)
上级
8d47162e
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
335 addition
and
191 deletion
+335
-191
tools/sampcd_processor.py
tools/sampcd_processor.py
+20
-191
tools/wlist.json
tools/wlist.json
+315
-0
未找到文件。
tools/sampcd_processor.py
浏览文件 @
9848f8f3
...
...
@@ -21,6 +21,7 @@ import platform
import
inspect
import
paddle
import
paddle.fluid
import
json
"""
please make sure to run in the tools path
usage: python sample_test.py {arg1}
...
...
@@ -529,203 +530,31 @@ def get_incrementapi():
f
.
write
(
'
\n
'
)
'''
Important constant lists:
wlist : a list of API that should not trigger the example check .
It is composed of wlist_temp + wlist_inneed + wlist_ignore.
srcfile: the source .py code file
'''
wlist_inneed
=
[
"append_LARS"
,
"BuildStrategy.debug_graphviz_path"
,
"BuildStrategy.enable_sequential_execution"
,
"BuildStrategy.fuse_elewise_add_act_ops"
,
"BuildStrategy.fuse_relu_depthwise_conv"
,
"BuildStrategy.gradient_scale_strategy"
,
"BuildStrategy.reduce_strategy"
,
"BuildStrategy.remove_unnecessary_lock"
,
"BuildStrategy.sync_batch_norm"
,
"DynamicRNN.step_input"
,
"DynamicRNN.static_input"
,
"DynamicRNN.block"
,
"DynamicRNN.update_memory"
,
"DynamicRNN.output"
,
"transpiler.DistributeTranspilerConfig"
,
"transpiler.DistributeTranspilerConfig.slice_var_up"
,
"transpiler.DistributeTranspilerConfig.split_method"
,
"transpiler.DistributeTranspilerConfig.min_block_size"
,
"DistributeTranspilerConfig.slice_var_up"
,
"DistributeTranspilerConfig.split_method"
,
"ModelAverage.apply"
,
"ModelAverage.restore"
,
"DistributeTranspilerConfig"
,
"DistributeTranspilerConfig.min_block_size"
,
"ExecutionStrategy.allow_op_delay"
,
"load"
,
"Accuracy.update"
,
"ChunkEvaluator.update"
,
"ExecutionStrategy.num_iteration_per_drop_scope"
,
"ExecutionStrategy.num_threads"
,
"CompiledProgram._with_inference_optimize"
,
"CompositeMetric.add_metric"
,
"CompositeMetric.update"
,
"CompositeMetric.eval"
,
"DetectionMAP.get_map_var"
,
"MetricBase"
,
"MetricBase.reset"
,
"MetricBase.get_config"
,
"MetricBase.update"
,
"MetricBase.eval"
,
"Accuracy.eval"
,
"Auc.update"
,
"Auc.eval"
,
"EditDistance.update"
,
"EditDistance.eval"
,
"ExponentialMovingAverage.apply"
,
"ExponentialMovingAverage.restore"
,
"ExponentialMovingAverage.update"
,
"StaticRNN.step"
,
"StaticRNN.step_input"
,
"StaticRNN.step_output"
,
"StaticRNN.update_memory"
,
"DetectionMAP.reset"
,
'StaticRNN.output'
,
"cuda_places"
,
"CUDAPinnedPlace"
,
"CUDAPlace"
,
"Program.parse_from_string"
]
wlist_nosample
=
[
'Compressor'
,
'Compressor.config'
,
'Compressor.run'
,
'run_check'
,
'HDFSClient.upload'
,
'HDFSClient.download'
,
'HDFSClient.is_exist'
,
'HDFSClient.is_dir'
,
'HDFSClient.delete'
,
'HDFSClient.rename'
,
'HDFSClient.makedirs'
,
'HDFSClient.ls'
,
'HDFSClient.lsr'
,
'multi_download'
,
'multi_upload'
,
'TrainingDecoder.block'
,
'QuantizeTranspiler.training_transpile'
,
'QuantizeTranspiler.freeze_program'
,
'AutoMixedPrecisionLists'
,
'Uniform.sample'
,
'Uniform.log_prob'
,
'Uniform.entropy'
,
'Categorical.kl_divergence'
,
'Categorical.entropy'
,
'MultivariateNormalDiag.entropy'
,
'MultivariateNormalDiag.kl_divergence'
,
'RNNCell'
,
'RNNCell.call'
,
'RNNCell.get_initial_states'
,
'GRUCell.call'
,
'LSTMCell.call'
,
'Decoder'
,
'Decoder.initialize'
,
'Decoder.step'
,
'Decoder.finalize'
,
'fused_elemwise_activation'
,
'search_pyramid_hash'
,
'convert_dist_to_sparse_program'
,
'load_persistables_for_increment'
,
'load_persistables_for_inference'
,
'cache'
,
'buffered'
,
'xmap_readers'
]
wlist_no_op_pass
=
[
'gelu'
,
'erf'
]
wlist_ci_nopass
=
[
'DecodeHelper'
,
'DecodeHelper.initialize'
,
'DecodeHelper.sample'
,
'DecodeHelper.next_inputs'
,
'TrainingHelper.initialize'
,
'TrainingHelper.sample'
,
'TrainingHelper.next_inputs'
,
'GreedyEmbeddingHelper.initialize'
,
'GreedyEmbeddingHelper.sample'
,
'GreedyEmbeddingHelper.next_inputs'
,
'LayerList.append'
,
'HDFSClient'
,
'InitState'
,
'TracedLayer'
,
'SampleEmbeddingHelper.sample'
,
'BasicDecoder.initialize'
,
'BasicDecoder.step'
,
'ParameterList.append'
,
'GreedyEmbeddingHelper'
,
'SampleEmbeddingHelper'
,
'BasicDecoder'
,
'lstm'
,
'partial_sum'
]
wlist_nopass
=
[
'StateCell'
,
'StateCell.compute_state'
,
'TrainingDecoder'
,
'TrainingDecoder.step_input'
,
'TrainingDecoder.static_input'
,
'TrainingDecoder.output'
,
'BeamSearchDecoder'
,
'GradClipByValue'
,
'GradClipByNorm'
,
'Variable.detach'
,
'Variable.numpy'
,
'Variable.set_value'
,
'Variable.gradient'
,
'BeamSearchDecoder.decode'
,
'BeamSearchDecoder.read_array'
,
'CompiledProgram'
,
'CompiledProgram.with_data_parallel'
,
'append_backward'
,
'guard'
,
'to_variable'
,
'op_freq_statistic'
,
'save_dygraph'
,
'load_dygraph'
,
'ParallelExecutor'
,
'ParallelExecutor.run'
,
'ParallelExecutor.drop_local_exe_scopes'
,
'GradClipByGlobalNorm'
,
'extend_with_decoupled_weight_decay'
,
'switch'
,
'Normal'
,
'memory_usage'
,
'decorate'
,
'PiecewiseDecay'
,
'InverseTimeDecay'
,
'PolynomialDecay'
,
'NoamDecay'
,
'start_profiler'
,
'profiler'
,
'tree_conv'
,
'multiclass_nms2'
,
'DataFeedDesc'
,
'Conv2D'
,
'Conv3D'
,
'Conv3DTranspose'
,
'Embedding'
,
'NCE'
,
'PRelu'
,
'BilinearTensorProduct'
,
'GroupNorm'
,
'SpectralNorm'
,
'TreeConv'
,
'prroi_pool'
]
wlist_temp
=
[
'ChunkEvaluator'
,
'EditDistance'
,
'ErrorClipByValue'
,
'Program.clone'
,
'cuda_pinned_places'
,
'DataFeeder'
,
'elementwise_floordiv'
,
'Layer'
,
'Layer.create_parameter'
,
'Layer.create_variable'
,
'Layer.sublayers'
,
'Layer.add_parameter'
,
'Layer.add_sublayer'
,
'Layer.parameters'
,
'Tracer'
,
'Layer.full_name'
,
'InMemoryDataset'
,
'layer_norm'
,
'bipartite_match'
,
'double_buffer'
,
'cumsum'
,
'thresholded_relu'
,
'group_norm'
,
'random_crop'
,
'py_func'
,
'row_conv'
,
'hard_shrink'
,
'ssd_loss'
,
'retinanet_target_assign'
,
'InMemoryDataset.global_shuffle'
,
'InMemoryDataset.get_memory_data_size'
,
'DetectionMAP'
,
'hash'
,
'InMemoryDataset.set_queue_num'
,
'LayerNorm'
,
'Preprocessor'
,
'chunk_eval'
,
'GRUUnit'
,
'ExponentialMovingAverage'
,
'QueueDataset.global_shuffle'
,
'NumpyArrayInitializer'
,
'create_py_reader_by_data'
,
'InMemoryDataset.local_shuffle'
,
'InMemoryDataset.get_shuffle_data_size'
,
'size'
,
'edit_distance'
,
'nce'
,
'BilinearInitializer'
,
'NaturalExpDecay'
,
'noam_decay'
,
'retinanet_detection_output'
,
'Pool2D'
,
'PipelineOptimizer'
,
'generate_mask_labels'
,
'isfinite'
,
'InMemoryDataset.set_fleet_send_batch_size'
,
'cuda_profiler'
,
'unfold'
,
'Executor'
,
'InMemoryDataset.load_into_memory'
,
'ExponentialDecay'
,
'BatchNorm'
,
'deformable_conv'
,
'InMemoryDataset.preload_into_memory'
,
'py_reader'
,
'linear_lr_warmup'
,
'InMemoryDataset.wait_preload_done'
,
'CosineDecay'
,
'roi_perspective_transform'
,
'unique'
,
'ones_like'
,
'LambOptimizer'
,
'InMemoryDataset.release_memory'
,
'Conv2DTranspose'
,
'QueueDataset.local_shuffle'
,
# wrong in dygraph/checkpoint.py ok in io.py [duplicated name]
'save_persistables@dygraph/checkpoint.py'
,
'load_persistables@dygraph/checkpoint.py'
]
'''
white list of private API/ redundant API
'''
wlist_ignore
=
[
'elementwise_pow'
,
'WeightedAverage.reset'
,
'ChunkEvaluator.eval'
,
'NCE.forward'
,
'elementwise_div'
,
'BilinearTensorProduct.forward'
,
'NoamDecay.step'
,
'elementwise_min'
,
'PiecewiseDecay.step'
,
'Conv3DTranspose.forward'
,
'elementwise_add'
,
'IfElse.output'
,
'IfElse.true_block'
,
'InverseTimeDecay.step'
,
'PolynomialDecay.step'
,
'Precision.eval'
,
'enabled'
,
'elementwise_max'
,
'stop_gperf_profiler'
,
'IfElse.false_block'
,
'WeightedAverage.add'
,
'Auc.trapezoid_area'
,
'elementwise_mul'
,
'GroupNorm.forward'
,
'SpectralNorm.forward'
,
'elementwise_sub'
,
'Switch.case'
,
'IfElse.input'
,
'prepare_context'
,
'PRelu.forward'
,
'Recall.update'
,
'start_gperf_profiler'
,
'TreeConv.forward'
,
'Conv2D.forward'
,
'Switch.default'
,
'elementwise_mod'
,
'Precision.update'
,
'WeightedAverage.eval'
,
'Conv3D.forward'
,
'Embedding.forward'
,
'Recall.eval'
,
'FC.forward'
,
'While.block'
,
'DGCMomentumOptimizer'
]
# only white on CPU
gpu_not_white
=
[
"deformable_conv"
,
"cuda_places"
,
"CUDAPinnedPlace"
,
"CUDAPlace"
,
"cuda_profiler"
,
'DGCMomentumOptimizer'
]
wlist
=
wlist_temp
+
wlist_inneed
+
wlist_ignore
+
wlist_nosample
+
wlist_nopass
+
wlist_no_op_pass
+
wlist_ci_nopass
def
get_wlist
():
'''
this function will get the white list of API.
Returns:
wlist: a list of API that should not trigger the example check .
'''
wlist
=
[]
with
open
(
"wlist.json"
,
'r'
)
as
load_f
:
load_dict
=
json
.
load
(
load_f
)
for
key
in
load_dict
:
wlist
=
wlist
+
load_dict
[
key
]
return
wlist
wlist
=
get_wlist
()
if
len
(
sys
.
argv
)
<
2
:
print
(
"Error: inadequate number of arguments"
)
...
...
tools/wlist.json
0 → 100644
浏览文件 @
9848f8f3
{
"wlist_inneed"
:[
"append_LARS"
,
"BuildStrategy.debug_graphviz_path"
,
"BuildStrategy.enable_sequential_execution"
,
"BuildStrategy.fuse_elewise_add_act_ops"
,
"BuildStrategy.fuse_relu_depthwise_conv"
,
"BuildStrategy.gradient_scale_strategy"
,
"BuildStrategy.reduce_strategy"
,
"BuildStrategy.remove_unnecessary_lock"
,
"BuildStrategy.sync_batch_norm"
,
"DynamicRNN.step_input"
,
"DynamicRNN.static_input"
,
"DynamicRNN.block"
,
"DynamicRNN.update_memory"
,
"DynamicRNN.output"
,
"transpiler.DistributeTranspilerConfig"
,
"transpiler.DistributeTranspilerConfig.slice_var_up"
,
"transpiler.DistributeTranspilerConfig.split_method"
,
"transpiler.DistributeTranspilerConfig.min_block_size"
,
"DistributeTranspilerConfig.slice_var_up"
,
"DistributeTranspilerConfig.split_method"
,
"ModelAverage.apply"
,
"ModelAverage.restore"
,
"DistributeTranspilerConfig"
,
"DistributeTranspilerConfig.min_block_size"
,
"ExecutionStrategy.allow_op_delay"
,
"load"
,
"Accuracy.update"
,
"ChunkEvaluator.update"
,
"ExecutionStrategy.num_iteration_per_drop_scope"
,
"ExecutionStrategy.num_threads"
,
"CompiledProgram._with_inference_optimize"
,
"CompositeMetric.add_metric"
,
"CompositeMetric.update"
,
"CompositeMetric.eval"
,
"DetectionMAP.get_map_var"
,
"MetricBase"
,
"MetricBase.reset"
,
"MetricBase.get_config"
,
"MetricBase.update"
,
"MetricBase.eval"
,
"Accuracy.eval"
,
"Auc.update"
,
"Auc.eval"
,
"EditDistance.update"
,
"EditDistance.eval"
,
"ExponentialMovingAverage.apply"
,
"ExponentialMovingAverage.restore"
,
"ExponentialMovingAverage.update"
,
"StaticRNN.step"
,
"StaticRNN.step_input"
,
"StaticRNN.step_output"
,
"StaticRNN.update_memory"
,
"DetectionMAP.reset"
,
"StaticRNN.output"
,
"cuda_places"
,
"CUDAPinnedPlace"
,
"CUDAPlace"
,
"Program.parse_from_string"
],
"wlist_nosample"
:[
"Compressor"
,
"Compressor.config"
,
"Compressor.run"
,
"run_check"
,
"HDFSClient.upload"
,
"HDFSClient.download"
,
"HDFSClient.is_exist"
,
"HDFSClient.is_dir"
,
"HDFSClient.delete"
,
"HDFSClient.rename"
,
"HDFSClient.makedirs"
,
"HDFSClient.ls"
,
"HDFSClient.lsr"
,
"multi_download"
,
"multi_upload"
,
"TrainingDecoder.block"
,
"QuantizeTranspiler.training_transpile"
,
"QuantizeTranspiler.freeze_program"
,
"AutoMixedPrecisionLists"
,
"Uniform.sample"
,
"Uniform.log_prob"
,
"Uniform.entropy"
,
"Categorical.kl_divergence"
,
"Categorical.entropy"
,
"MultivariateNormalDiag.entropy"
,
"MultivariateNormalDiag.kl_divergence"
,
"RNNCell"
,
"RNNCell.call"
,
"RNNCell.get_initial_states"
,
"GRUCell.call"
,
"LSTMCell.call"
,
"Decoder"
,
"Decoder.initialize"
,
"Decoder.step"
,
"Decoder.finalize"
,
"fused_elemwise_activation"
,
"search_pyramid_hash"
,
"convert_dist_to_sparse_program"
,
"load_persistables_for_increment"
,
"load_persistables_for_inference"
,
"cache"
,
"buffered"
,
"xmap_readers"
],
"wlist_no_op_pass"
:[
"gelu"
,
"erf"
],
"wlist_ci_nopass"
:[
"DecodeHelper"
,
"DecodeHelper.initialize"
,
"DecodeHelper.sample"
,
"DecodeHelper.next_inputs"
,
"TrainingHelper.initialize"
,
"TrainingHelper.sample"
,
"TrainingHelper.next_inputs"
,
"GreedyEmbeddingHelper.initialize"
,
"GreedyEmbeddingHelper.sample"
,
"GreedyEmbeddingHelper.next_inputs"
,
"LayerList.append"
,
"HDFSClient"
,
"InitState"
,
"TracedLayer"
,
"SampleEmbeddingHelper.sample"
,
"BasicDecoder.initialize"
,
"BasicDecoder.step"
,
"ParameterList.append"
,
"GreedyEmbeddingHelper"
,
"SampleEmbeddingHelper"
,
"BasicDecoder"
,
"lstm"
,
"partial_sum"
],
"wlist_nopass"
:[
"StateCell"
,
"StateCell.compute_state"
,
"TrainingDecoder"
,
"TrainingDecoder.step_input"
,
"TrainingDecoder.static_input"
,
"TrainingDecoder.output"
,
"BeamSearchDecoder"
,
"GradClipByValue"
,
"GradClipByNorm"
,
"Variable.detach"
,
"Variable.numpy"
,
"Variable.set_value"
,
"Variable.gradient"
,
"BeamSearchDecoder.decode"
,
"BeamSearchDecoder.read_array"
,
"CompiledProgram"
,
"CompiledProgram.with_data_parallel"
,
"append_backward"
,
"guard"
,
"to_variable"
,
"op_freq_statistic"
,
"save_dygraph"
,
"load_dygraph"
,
"ParallelExecutor"
,
"ParallelExecutor.run"
,
"ParallelExecutor.drop_local_exe_scopes"
,
"GradClipByGlobalNorm"
,
"extend_with_decoupled_weight_decay"
,
"switch"
,
"Normal"
,
"memory_usage"
,
"decorate"
,
"PiecewiseDecay"
,
"InverseTimeDecay"
,
"PolynomialDecay"
,
"NoamDecay"
,
"start_profiler"
,
"profiler"
,
"tree_conv"
,
"multiclass_nms2"
,
"DataFeedDesc"
,
"Conv2D"
,
"Conv3D"
,
"Conv3DTranspose"
,
"Embedding"
,
"NCE"
,
"PRelu"
,
"BilinearTensorProduct"
,
"GroupNorm"
,
"SpectralNorm"
,
"TreeConv"
,
"prroi_pool"
],
"wlist_temp"
:[
"ChunkEvaluator"
,
"EditDistance"
,
"ErrorClipByValue"
,
"Program.clone"
,
"cuda_pinned_places"
,
"DataFeeder"
,
"elementwise_floordiv"
,
"Layer"
,
"Layer.create_parameter"
,
"Layer.create_variable"
,
"Layer.sublayers"
,
"Layer.add_parameter"
,
"Layer.add_sublayer"
,
"Layer.parameters"
,
"Tracer"
,
"Layer.full_name"
,
"InMemoryDataset"
,
"layer_norm"
,
"bipartite_match"
,
"double_buffer"
,
"cumsum"
,
"thresholded_relu"
,
"group_norm"
,
"random_crop"
,
"py_func"
,
"row_conv"
,
"hard_shrink"
,
"ssd_loss"
,
"retinanet_target_assign"
,
"InMemoryDataset.global_shuffle"
,
"InMemoryDataset.get_memory_data_size"
,
"DetectionMAP"
,
"hash"
,
"InMemoryDataset.set_queue_num"
,
"LayerNorm"
,
"Preprocessor"
,
"chunk_eval"
,
"GRUUnit"
,
"ExponentialMovingAverage"
,
"QueueDataset.global_shuffle"
,
"NumpyArrayInitializer"
,
"create_py_reader_by_data"
,
"InMemoryDataset.local_shuffle"
,
"InMemoryDataset.get_shuffle_data_size"
,
"size"
,
"edit_distance"
,
"nce"
,
"BilinearInitializer"
,
"NaturalExpDecay"
,
"noam_decay"
,
"retinanet_detection_output"
,
"Pool2D"
,
"PipelineOptimizer"
,
"generate_mask_labels"
,
"isfinite"
,
"InMemoryDataset.set_fleet_send_batch_size"
,
"cuda_profiler"
,
"unfold"
,
"Executor"
,
"InMemoryDataset.load_into_memory"
,
"ExponentialDecay"
,
"BatchNorm"
,
"deformable_conv"
,
"InMemoryDataset.preload_into_memory"
,
"py_reader"
,
"linear_lr_warmup"
,
"InMemoryDataset.wait_preload_done"
,
"CosineDecay"
,
"roi_perspective_transform"
,
"unique"
,
"ones_like"
,
"LambOptimizer"
,
"InMemoryDataset.release_memory"
,
"Conv2DTranspose"
,
"QueueDataset.local_shuffle"
,
"save_persistables@dygraph/checkpoint.py"
,
"load_persistables@dygraph/checkpoint.py"
],
"wlist_ignore"
:[
"elementwise_pow"
,
"WeightedAverage.reset"
,
"ChunkEvaluator.eval"
,
"NCE.forward"
,
"elementwise_div"
,
"BilinearTensorProduct.forward"
,
"NoamDecay.step"
,
"elementwise_min"
,
"PiecewiseDecay.step"
,
"Conv3DTranspose.forward"
,
"elementwise_add"
,
"IfElse.output"
,
"IfElse.true_block"
,
"InverseTimeDecay.step"
,
"PolynomialDecay.step"
,
"Precision.eval"
,
"enabled"
,
"elementwise_max"
,
"stop_gperf_profiler"
,
"IfElse.false_block"
,
"WeightedAverage.add"
,
"Auc.trapezoid_area"
,
"elementwise_mul"
,
"GroupNorm.forward"
,
"SpectralNorm.forward"
,
"elementwise_sub"
,
"Switch.case"
,
"IfElse.input"
,
"prepare_context"
,
"PRelu.forward"
,
"Recall.update"
,
"start_gperf_profiler"
,
"TreeConv.forward"
,
"Conv2D.forward"
,
"Switch.default"
,
"elementwise_mod"
,
"Precision.update"
,
"WeightedAverage.eval"
,
"Conv3D.forward"
,
"Embedding.forward"
,
"Recall.eval"
,
"FC.forward"
,
"While.block"
,
"DGCMomentumOptimizer"
]
}
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录