未验证 提交 85489d39 编写于 作者: Z zyfncg 提交者: GitHub

Rename name of op and op_args in yaml to align python api (#46343)

* rename op in yaml

* fix test_layout_autotune

* fix layout autotune of transpose
上级 0ad7f537
......@@ -1023,7 +1023,7 @@ class DygraphForwardFunctionGenerator(DygraphFunctionGeneratorBase):
forward_outputs_position_map.keys()) - len(intermediate_outputs)
# for layout autotune attr
lightly_sensitive_attr = [
'axis', 'axes', 'dim', 'dims', 'start', 'end', 'stop'
'axis', 'axes', 'dim', 'dims', 'start', 'end', 'stop', 'perm'
]
heavily_sensitive_attr = ['data_format', 'data_layout']
layout_autotune_attr = []
......
......@@ -217,9 +217,9 @@
no_need_buffer : x
- backward_op : trunc_grad
forward : trunc (Tensor x) -> Tensor(out)
forward : trunc (Tensor input) -> Tensor(out)
args : (Tensor out_grad)
output : Tensor(x_grad)
output : Tensor(input_grad)
infer_meta :
func : UnchangedInferMeta
param : [out_grad]
......
......@@ -105,8 +105,8 @@
use_gpudnn: use_cudnn
- backward_op : amax_grad
forward: amax (Tensor x, int64_t[] dims={}, bool keep_dim=false) -> Tensor(out)
args : (Tensor x, Tensor out, Tensor out_grad, int64_t[] dims={}, bool keep_dim=false, bool reduce_all=false)
forward: amax (Tensor x, int64_t[] axis={}, bool keepdim=false) -> Tensor(out)
args : (Tensor x, Tensor out, Tensor out_grad, int64_t[] axis={}, bool keepdim=false, bool reduce_all=false)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
......@@ -115,8 +115,8 @@
func : amax_grad
- backward_op : amin_grad
forward: amin (Tensor x, int64_t[] dims={}, bool keep_dim=false) -> Tensor(out)
args : (Tensor x, Tensor out, Tensor out_grad, int64_t[] dims={}, bool keep_dim=false, bool reduce_all=false)
forward: amin (Tensor x, int64_t[] axis={}, bool keepdim=false) -> Tensor(out)
args : (Tensor x, Tensor out, Tensor out_grad, int64_t[] axis={}, bool keepdim=false, bool reduce_all=false)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
......@@ -311,19 +311,19 @@
inplace : (out_grad -> x_grad)
- backward_op : broadcast_tensors_grad
forward : broadcast_tensors (Tensor[] x) -> Tensor[](out)
args : (Tensor[] x, Tensor[] out_grad)
output : Tensor[](x_grad)
forward : broadcast_tensors (Tensor[] input) -> Tensor[](out)
args : (Tensor[] input, Tensor[] out_grad)
output : Tensor[](input_grad)
infer_meta :
func : UnchangedMultiInferMeta
param : [x]
param : [input]
kernel :
func : broadcast_tensors_grad
param : [out_grad]
no_need_buffer : x
no_need_buffer : input
- backward_op : cast_grad
forward : cast (Tensor x, DataType out_dtype) -> Tensor(out)
forward : cast (Tensor x, DataType dtype) -> Tensor(out)
args : (Tensor x, Tensor out_grad)
output : Tensor(x_grad)
invoke : cast (out_grad, x.dtype())
......@@ -386,14 +386,14 @@
inplace : (out_grad -> x_grad)
- backward_op : complex_grad
forward : complex (Tensor x, Tensor y) -> Tensor(out)
args : (Tensor x, Tensor y, Tensor out_grad)
output : Tensor(x_grad), Tensor(y_grad)
forward : complex (Tensor real, Tensor imag) -> Tensor(out)
args : (Tensor real, Tensor imag, Tensor out_grad)
output : Tensor(real_grad), Tensor(imag_grad)
infer_meta :
func : ComplexGradInferMeta
kernel :
func : complex_grad
data_type : x
data_type : real
- backward_op : concat_double_grad
forward : concat_grad (Tensor[] x, Tensor grad_out, Scalar axis) -> Tensor[](grad_x)
......@@ -663,7 +663,7 @@
skip_transform : out_w, out_w_grad
- backward_op : eigh_grad
forward : eigh (Tensor x, str uplo) -> Tensor(out_w), Tensor(out_v)
forward : eigh (Tensor x, str UPLO) -> Tensor(out_w), Tensor(out_v)
args : (Tensor out_w, Tensor out_v, Tensor out_w_grad, Tensor out_v_grad)
output : Tensor(x_grad)
infer_meta :
......@@ -788,7 +788,7 @@
inplace : (out_grad -> x_grad)
- backward_op : exponential__grad
forward : exponential_ (Tensor x, float lambda) -> Tensor(out)
forward : exponential_ (Tensor x, float lam) -> Tensor(out)
args : (Tensor out_grad)
output : Tensor(x_grad)
infer_meta :
......@@ -981,8 +981,8 @@
kernel :
func : gumbel_softmax_grad
- backward_op : hard_shrink_grad
forward : hard_shrink (Tensor x, float threshold) -> Tensor(out)
- backward_op : hardshrink_grad
forward : hardshrink (Tensor x, float threshold) -> Tensor(out)
args : (Tensor x, Tensor out_grad, float threshold)
output : Tensor(x_grad)
infer_meta :
......@@ -992,8 +992,8 @@
func : hard_shrink_grad
inplace : (out_grad -> x_grad)
- backward_op : hard_sigmoid_grad
forward : hard_sigmoid (Tensor x, float slope, float offset) -> Tensor(out)
- backward_op : hardsigmoid_grad
forward : hardsigmoid (Tensor x, float slope, float offset) -> Tensor(out)
args : (Tensor out, Tensor out_grad, float slope, float offset)
output : Tensor(x_grad)
infer_meta :
......@@ -1003,8 +1003,8 @@
func : hard_sigmoid_grad
inplace : (out_grad -> x_grad)
- backward_op : hard_swish_grad
forward : hard_swish (Tensor x, float threshold = 6.0, float scale = 6.0, float offset = 3.0) -> Tensor(out)
- backward_op : hardswish_grad
forward : hardswish (Tensor x, float threshold = 6.0, float scale = 6.0, float offset = 3.0) -> Tensor(out)
args : (Tensor x, Tensor out_grad, float threshold, float scale, float offset)
output : Tensor(x_grad)
infer_meta :
......@@ -1065,8 +1065,8 @@
no_need_buffer : x
- backward_op : index_select_grad
forward : index_select(Tensor x, Tensor index, int dim) -> Tensor(out)
args : (Tensor x, Tensor index, Tensor out_grad, int dim)
forward : index_select(Tensor x, Tensor index, int axis) -> Tensor(out)
args : (Tensor x, Tensor index, Tensor out_grad, int axis)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
......@@ -1164,8 +1164,8 @@
optional : scale, bias
- backward_op : leaky_relu_double_grad
forward : leaky_relu_grad (Tensor x, Tensor grad_out, float alpha) -> Tensor(grad_x)
args : (Tensor x, Tensor grad_x_grad, float alpha)
forward : leaky_relu_grad (Tensor x, Tensor grad_out, float negative_slope) -> Tensor(grad_x)
args : (Tensor x, Tensor grad_x_grad, float negative_slope)
output : Tensor(grad_out_grad)
infer_meta :
func : UnchangedInferMeta
......@@ -1175,8 +1175,8 @@
inplace : (grad_x_grad -> grad_out_grad)
- backward_op : leaky_relu_grad
forward : leaky_relu (Tensor x, float alpha) -> Tensor(out)
args : (Tensor x, Tensor out_grad, float alpha)
forward : leaky_relu (Tensor x, float negative_slope) -> Tensor(out)
args : (Tensor x, Tensor out_grad, float negative_slope)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
......@@ -1335,8 +1335,8 @@
func : lu_grad
- backward_op : lu_unpack_grad
forward : lu_unpack (Tensor x, Tensor pivots, bool unpack_ludata, bool unpack_pivots) -> Tensor(pmat), Tensor(l), Tensor(u)
args : (Tensor x, Tensor pivots, Tensor l, Tensor u, Tensor pmat, Tensor l_grad, Tensor u_grad, bool unpack_ludata, bool unpack_pivots)
forward : lu_unpack (Tensor x, Tensor y, bool unpack_ludata, bool unpack_pivots) -> Tensor(pmat), Tensor(l), Tensor(u)
args : (Tensor x, Tensor y, Tensor l, Tensor u, Tensor pmat, Tensor l_grad, Tensor u_grad, bool unpack_ludata, bool unpack_pivots)
output : Tensor(x_grad)
infer_meta :
func : LUUnpackGradInferMeta
......@@ -1411,8 +1411,8 @@
func : matrix_power_grad
- backward_op : max_grad
forward: max (Tensor x, IntArray dims={}, bool keep_dim=false) -> Tensor(out)
args : (Tensor x, Tensor out, Tensor out_grad, IntArray dims={}, bool keep_dim=false, bool reduce_all=false)
forward: max (Tensor x, IntArray axis={}, bool keepdim=false) -> Tensor(out)
args : (Tensor x, Tensor out, Tensor out_grad, IntArray axis={}, bool keepdim=false, bool reduce_all=false)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
......@@ -1469,14 +1469,14 @@
func : mean_all_grad
- backward_op : mean_double_grad
forward: mean_grad (Tensor x, Tensor grad_out, IntArray dims={}, bool keep_dim=false, bool reduce_all = false) -> Tensor(grad_x)
args : (Tensor grad_x_grad, IntArray dims={}, bool keep_dim=false)
forward: mean_grad (Tensor x, Tensor grad_out, IntArray axis={}, bool keepdim=false, bool reduce_all = false) -> Tensor(grad_x)
args : (Tensor grad_x_grad, IntArray axis={}, bool keepdim=false)
output : Tensor(grad_out_grad)
invoke : mean(grad_x_grad, dims, keep_dim)
invoke : mean(grad_x_grad, axis, keepdim)
- backward_op : mean_grad
forward: mean (Tensor x, IntArray dims={}, bool keep_dim=false) -> Tensor(out)
args : (Tensor x, Tensor out_grad, IntArray dims={}, bool keep_dim=false, bool reduce_all=false)
forward: mean (Tensor x, IntArray axis={}, bool keepdim=false) -> Tensor(out)
args : (Tensor x, Tensor out_grad, IntArray axis={}, bool keepdim=false, bool reduce_all=false)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
......@@ -1496,8 +1496,8 @@
func : meshgrid_grad
- backward_op : min_grad
forward: min (Tensor x, IntArray dims={}, bool keep_dim=false) -> Tensor(out)
args : (Tensor x, Tensor out, Tensor out_grad, IntArray dims={}, bool keep_dim=false, bool reduce_all=false)
forward: min (Tensor x, IntArray axis={}, bool keepdim=false) -> Tensor(out)
args : (Tensor x, Tensor out, Tensor out_grad, IntArray axis={}, bool keepdim=false, bool reduce_all=false)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
......@@ -1546,15 +1546,15 @@
func : multi_dot_grad
- backward_op : multiplex_grad
forward : multiplex (Tensor[] ins, Tensor ids) -> Tensor(out)
args : (Tensor[] ins, Tensor ids, Tensor out_grad)
output : Tensor[](ins_grad){ins.size()}
forward : multiplex (Tensor[] inputs, Tensor index) -> Tensor(out)
args : (Tensor[] inputs, Tensor index, Tensor out_grad)
output : Tensor[](inputs_grad){inputs.size()}
infer_meta :
func : MultiplexGradInferMeta
param : [ids, out_grad]
param : [index, out_grad]
kernel :
func : multiplex_grad
param : [ids, out_grad]
param : [index, out_grad]
- backward_op : multiply_double_grad
forward : multiply_grad (Tensor x, Tensor y, Tensor grad_out, int axis = -1) -> Tensor(grad_x), Tensor(grad_y)
......@@ -1734,8 +1734,8 @@
use_gpudnn : use_gpudnn
- backward_op : pow_grad
forward : pow(Tensor x, Scalar s) -> Tensor(out)
args : (Tensor x, Tensor out_grad, Scalar s=-1)
forward : pow(Tensor x, Scalar y) -> Tensor(out)
args : (Tensor x, Tensor out_grad, Scalar y=-1)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
......@@ -1768,12 +1768,12 @@
# output is optional
- backward_op : put_along_axis_grad
forward : put_along_axis (Tensor x, Tensor index, Tensor value, int axis, str reduce) -> Tensor(out)
args : (Tensor x, Tensor index, Tensor out_grad, int axis, str reduce)
output : Tensor(x_grad), Tensor(value_grad)
forward : put_along_axis (Tensor arr, Tensor index, Tensor value, int axis, str reduce) -> Tensor(out)
args : (Tensor arr, Tensor index, Tensor out_grad, int axis, str reduce)
output : Tensor(arr_grad), Tensor(value_grad)
infer_meta :
func : GeneralBinaryGradInferMeta
param : [x, index]
param : [arr, index]
kernel :
func : put_along_axis_grad
......@@ -1859,8 +1859,8 @@
func : renorm_grad
- backward_op : repeat_interleave_grad
forward : repeat_interleave(Tensor x, int repeats, int dim) -> Tensor(out)
args : (Tensor x, Tensor out_grad, int repeats, int dim)
forward : repeat_interleave(Tensor x, int repeats, int axis) -> Tensor(out)
args : (Tensor x, Tensor out_grad, int repeats, int axis)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
......@@ -1869,8 +1869,8 @@
func : repeat_interleave_grad
- backward_op : repeat_interleave_with_tensor_index_grad
forward : repeat_interleave_with_tensor_index(Tensor x, Tensor repeats, int dim) -> Tensor(out)
args : (Tensor x, Tensor repeats, Tensor out_grad, int dim)
forward : repeat_interleave_with_tensor_index(Tensor x, Tensor repeats, int axis) -> Tensor(out)
args : (Tensor x, Tensor repeats, Tensor out_grad, int axis)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
......@@ -2169,17 +2169,6 @@
kernel :
func : slogdeterminant_grad
- backward_op : soft_shrink_grad
forward : soft_shrink (Tensor x, float lambda) -> Tensor(out)
args : (Tensor x, Tensor out_grad, float lambda)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param : [x]
kernel :
func : soft_shrink_grad
inplace : (out_grad -> x_grad)
- backward_op : softmax_grad
forward : softmax (Tensor x, int axis) -> Tensor(out)
args : (Tensor out, Tensor out_grad, int axis)
......@@ -2202,6 +2191,17 @@
func : softplus_grad
inplace : (out_grad -> x_grad)
- backward_op : softshrink_grad
forward : softshrink (Tensor x, float threshold) -> Tensor(out)
args : (Tensor x, Tensor out_grad, float threshold)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param : [x]
kernel :
func : soft_shrink_grad
inplace : (out_grad -> x_grad)
- backward_op : softsign_grad
forward : softsign (Tensor x) -> Tensor(out)
args : (Tensor x, Tensor out_grad)
......@@ -2293,14 +2293,14 @@
func : squared_l2_norm_grad
- backward_op : squeeze_double_grad
forward : squeeze_grad(Tensor xshape, Tensor grad_out, IntArray axes) -> Tensor(grad_x)
args : (Tensor grad_x_grad, IntArray axes)
forward : squeeze_grad(Tensor xshape, Tensor grad_out, IntArray axis) -> Tensor(grad_x)
args : (Tensor grad_x_grad, IntArray axis)
output : Tensor(grad_out_grad)
invoke: squeeze(grad_x_grad, axes)
invoke: squeeze(grad_x_grad, axis)
- backward_op : squeeze_grad
forward : squeeze(Tensor x, IntArray axes) -> Tensor(out), Tensor(xshape)
args : (Tensor xshape, Tensor out_grad, IntArray axes)
forward : squeeze(Tensor x, IntArray axis) -> Tensor(out), Tensor(xshape)
args : (Tensor xshape, Tensor out_grad, IntArray axis)
output : Tensor(x_grad)
infer_meta :
func : KernelWithXShapeInferMeta
......@@ -2360,14 +2360,14 @@
inplace : (out_grad -> x_grad)
- backward_op : sum_double_grad
forward : sum_grad (Tensor x, Tensor grad_out, IntArray dims, bool keep_dim, bool reduce_all=false) -> Tensor(grad_x)
args : (Tensor grad_x_grad, IntArray dims={}, bool keep_dim=false)
forward : sum_grad (Tensor x, Tensor grad_out, IntArray axis, bool keepdim, bool reduce_all=false) -> Tensor(grad_x)
args : (Tensor grad_x_grad, IntArray axis={}, bool keepdim=false)
output : Tensor(grad_out_grad)
invoke : sum(grad_x_grad, dims, grad_x_grad.dtype(), keep_dim)
invoke : sum(grad_x_grad, axis, grad_x_grad.dtype(), keepdim)
- backward_op : sum_grad
forward : sum (Tensor x, IntArray dims={}, DataType out_dtype=DataType::UNDEFINED, bool keep_dim=false) -> Tensor(out)
args : (Tensor x, Tensor out_grad, IntArray dims, bool keep_dim, bool reduce_all=false)
forward : sum (Tensor x, IntArray axis={}, DataType dtype=DataType::UNDEFINED, bool keepdim=false) -> Tensor(out)
args : (Tensor x, Tensor out_grad, IntArray axis, bool keepdim, bool reduce_all=false)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
......@@ -2378,8 +2378,8 @@
backward : sum_double_grad
- backward_op : svd_grad
forward : svd (Tensor x, bool full) -> Tensor(u), Tensor(s), Tensor(vh)
args : (Tensor x, Tensor u, Tensor vh, Tensor s, Tensor u_grad, Tensor vh_grad, Tensor s_grad, bool full)
forward : svd (Tensor x, bool full_matrices) -> Tensor(u), Tensor(s), Tensor(vh)
args : (Tensor x, Tensor u, Tensor vh, Tensor s, Tensor u_grad, Tensor vh_grad, Tensor s_grad, bool full_matrices)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
......@@ -2412,12 +2412,12 @@
optional : reserve_space
- backward_op : take_along_axis_grad
forward : take_along_axis (Tensor x, Tensor index, int axis) -> Tensor(out)
args : (Tensor x, Tensor index, Tensor out_grad, int axis)
output : Tensor(x_grad)
forward : take_along_axis (Tensor arr, Tensor indices, int axis) -> Tensor(out)
args : (Tensor arr, Tensor indices, Tensor out_grad, int axis)
output : Tensor(arr_grad)
infer_meta :
func : UnchangedInferMeta
param : [x]
param : [arr]
kernel :
func : take_along_axis_grad
......@@ -2517,8 +2517,8 @@
no_need_buffer : x
backward : tile_double_grad
- backward_op : top_k_grad
forward : top_k (Tensor x, Scalar k, int axis = -1, bool largest = true, bool sorted = true) -> Tensor(out), Tensor(indices)
- backward_op : topk_grad
forward : topk (Tensor x, Scalar k, int axis = -1, bool largest = true, bool sorted = true) -> Tensor(out), Tensor(indices)
args : (Tensor x, Tensor indices, Tensor out_grad, Scalar k = -1, int axis = -1, bool largest = true, bool sorted = true)
output : Tensor(x_grad)
infer_meta :
......@@ -2528,18 +2528,18 @@
func : top_k_grad
- backward_op : transpose_double_grad
forward : transpose_grad (Tensor grad_out, int[] axis) -> Tensor(grad_x)
args : (Tensor grad_x_grad, int[] axis)
forward : transpose_grad (Tensor grad_out, int[] perm) -> Tensor(grad_x)
args : (Tensor grad_x_grad, int[] perm)
output : Tensor(grad_out_grad)
invoke : transpose(grad_x_grad, axis)
invoke : transpose(grad_x_grad, perm)
- backward_op : transpose_grad
forward : transpose (Tensor x, int[] axis) -> Tensor(out)
args : (Tensor out_grad, int[] axis)
forward : transpose (Tensor x, int[] perm) -> Tensor(out)
args : (Tensor out_grad, int[] perm)
output : Tensor(x_grad)
infer_meta :
func : TransposeGradInferMeta
param : [out_grad, axis]
param : [out_grad, perm]
kernel :
func : transpose_grad
backward : transpose_double_grad
......
......@@ -100,9 +100,9 @@
backward : add_grad
- op : add_n
args : (Tensor[] x)
args : (Tensor[] inputs)
output : Tensor
invoke : add_n_impl(x)
invoke : add_n_impl(inputs)
backward : add_n_grad
- op : addmm
......@@ -128,7 +128,7 @@
backward : affine_grid_grad
- op : all
args : (Tensor x, int64_t[] dims={}, bool keep_dim=false)
args : (Tensor x, int64_t[] axis={}, bool keepdim=false)
output : Tensor(out)
infer_meta :
func : ReduceInferMeta
......@@ -145,7 +145,7 @@
func : allclose
- op : amax
args : (Tensor x, int64_t[] dims={}, bool keep_dim=false)
args : (Tensor x, int64_t[] axis={}, bool keepdim=false)
output : Tensor(out)
infer_meta :
func : ReduceInferMeta
......@@ -154,7 +154,7 @@
backward : amax_grad
- op : amin
args : (Tensor x, int64_t[] dims={}, bool keep_dim=false)
args : (Tensor x, int64_t[] axis={}, bool keepdim=false)
output : Tensor(out)
infer_meta :
func : ReduceInferMeta
......@@ -172,7 +172,7 @@
backward : angle_grad
- op : any
args : (Tensor x, int64_t[] dims={}, bool keep_dim=false)
args : (Tensor x, int64_t[] axis={}, bool keepdim=false)
output : Tensor(out)
infer_meta :
func : ReduceInferMeta
......@@ -438,13 +438,13 @@
backward : brelu_grad
- op : cast
args : (Tensor x, DataType out_dtype)
args : (Tensor x, DataType dtype)
output : Tensor
infer_meta :
func : CastInferMeta
kernel :
func : cast
param : [x, out_dtype]
param : [x, dtype]
data_type : x
backward : cast_grad
......@@ -517,7 +517,7 @@
data_type : dtype
- op : complex
args : (Tensor x, Tensor y)
args : (Tensor real, Tensor imag)
output : Tensor
infer_meta :
func : ComplexInferMeta
......@@ -700,7 +700,7 @@
backward : det_grad
- op : diag_embed
args : (Tensor x, int offset, int dim1, int dim2)
args : (Tensor input, int offset, int dim1, int dim2)
output : Tensor(out)
infer_meta :
func : DiagEmbedInferMeta
......@@ -748,7 +748,7 @@
optional : hypslength, refslength
- op : eigh
args : (Tensor x, str uplo)
args : (Tensor x, str UPLO)
output : Tensor(out_w), Tensor(out_v)
infer_meta :
func : EighInferMeta
......@@ -896,7 +896,7 @@
backward : expm1_grad
- op : exponential_
args : (Tensor x, float lambda)
args : (Tensor x, float lam)
output : Tensor(out)
infer_meta :
func : UnchangedInferMeta
......@@ -1119,7 +1119,7 @@
func : gelu
backward : gelu_grad
- op : generate_proposals_v2
- op : generate_proposals
args : (Tensor scores, Tensor bbox_deltas, Tensor im_shape, Tensor anchors, Tensor variances, int pre_nms_top_n, int post_nms_top_n, float nms_thresh, float min_size, float eta, bool pixel_offset=true)
output : Tensor(rpn_rois), Tensor(rpn_roi_probs), Tensor(rpn_rois_num)
infer_meta :
......@@ -1196,7 +1196,7 @@
func : gumbel_softmax
backward : gumbel_softmax_grad
- op : hard_shrink
- op : hardshrink
args : (Tensor x, float threshold)
output : Tensor
infer_meta :
......@@ -1204,9 +1204,9 @@
param : [x]
kernel :
func : hard_shrink
backward : hard_shrink_grad
backward : hardshrink_grad
- op : hard_sigmoid
- op : hardsigmoid
args : (Tensor x, float slope, float offset)
output : Tensor
infer_meta :
......@@ -1214,9 +1214,9 @@
param : [x]
kernel :
func : hard_sigmoid
backward : hard_sigmoid_grad
backward : hardsigmoid_grad
- op : hard_swish
- op : hardswish
args : (Tensor x, float threshold = 6.0, float scale = 6.0, float offset = 3.0)
output : Tensor
infer_meta :
......@@ -1224,7 +1224,7 @@
param : [x]
kernel :
func : hard_swish
backward : hard_swish_grad
backward : hardswish_grad
- op : hierarchical_sigmoid
args : (Tensor x, Tensor w, Tensor label, Tensor path, Tensor code, Tensor bias, int num_classes, bool remote_prefetch, int trainer_id, int64_t[] height_sections, str[] epmap, str[] table_names, bool is_sparse)
......@@ -1238,7 +1238,7 @@
backward : hierarchical_sigmoid_grad
- op : histogram
args : (Tensor x, int64_t bins, int min, int max)
args : (Tensor input, int64_t bins, int min, int max)
output : Tensor(out)
infer_meta :
func : HistogramInferMeta
......@@ -1294,7 +1294,7 @@
backward : index_sample_grad
- op : index_select
args : (Tensor x, Tensor index, int dim)
args : (Tensor x, Tensor index, int axis)
output : Tensor(out)
infer_meta :
func : IndexSelectInferMeta
......@@ -1432,7 +1432,7 @@
optional : scale, bias
- op : leaky_relu
args : (Tensor x, float alpha)
args : (Tensor x, float negative_slope)
output : Tensor
infer_meta :
func : UnchangedInferMeta
......@@ -1632,7 +1632,7 @@
backward : lu_grad
- op : lu_unpack
args : (Tensor x, Tensor pivots, bool unpack_ludata, bool unpack_pivots)
args : (Tensor x, Tensor y, bool unpack_ludata, bool unpack_pivots)
output : Tensor(pmat), Tensor(l), Tensor(u)
infer_meta :
func : LUUnpackInferMeta
......@@ -1706,7 +1706,7 @@
func : matrix_rank_tol
- op : max
args : (Tensor x, IntArray dims={}, bool keep_dim=false)
args : (Tensor x, IntArray axis={}, bool keepdim=false)
output : Tensor(out)
infer_meta :
func : ReduceIntArrayAxisInferMeta
......@@ -1751,7 +1751,7 @@
backward : maxout_grad
- op : mean
args : (Tensor x, IntArray dims={}, bool keep_dim=false)
args : (Tensor x, IntArray axis={}, bool keepdim=false)
output : Tensor(out)
infer_meta :
func : ReduceIntArrayAxisInferMeta
......@@ -1808,7 +1808,7 @@
backward : meshgrid_grad
- op : min
args : (Tensor x, IntArray dims={}, bool keep_dim=false)
args : (Tensor x, IntArray axis={}, bool keepdim=false)
output : Tensor(out)
infer_meta :
func : ReduceIntArrayAxisInferMeta
......@@ -1882,13 +1882,13 @@
func : multinomial
- op : multiplex
args : (Tensor[] ins, Tensor ids)
args : (Tensor[] inputs, Tensor index)
output : Tensor
infer_meta :
func : MultiplexInferMeta
kernel :
func : multiplex
data_type : ins
data_type : inputs
backward : multiplex_grad
- op : multiply
......@@ -2028,7 +2028,7 @@
backward : pool3d_grad
- op : pow
args : (Tensor x, Scalar s)
args : (Tensor x, Scalar y)
output : Tensor(out)
infer_meta :
func : UnchangedInferMeta
......@@ -2066,15 +2066,15 @@
backward : psroi_pool_grad
- op : put_along_axis
args : (Tensor x, Tensor index, Tensor value, int axis, str reduce)
args : (Tensor arr, Tensor index, Tensor value, int axis, str reduce)
output : Tensor(out)
infer_meta :
func : UnchangedInferMeta
param : [x]
param : [arr]
kernel :
func : put_along_axis
data_type : x
inplace : (x -> out)
data_type : arr
inplace : (arr -> out)
backward : put_along_axis_grad
- op : qr
......@@ -2178,21 +2178,19 @@
backward : renorm_grad
- op : repeat_interleave
args : (Tensor x, int repeats, int dim)
args : (Tensor x, int repeats, int axis)
output : Tensor(out)
infer_meta :
func : RepeatInterleaveInferMeta
param : [x,repeats, dim]
kernel :
func : repeat_interleave
backward: repeat_interleave_grad
- op : repeat_interleave_with_tensor_index
args : (Tensor x, Tensor repeats, int dim)
args : (Tensor x, Tensor repeats, int axis)
output : Tensor(out)
infer_meta :
func : RepeatInterleaveWithTensorIndexInferMeta
param : [x,repeats, dim]
kernel :
func : repeat_interleave_with_tensor_index
data_type : x
......@@ -2316,7 +2314,7 @@
backward : scatter_nd_add_grad
- op : searchsorted
args : (Tensor sorted_sequence, Tensor value, bool out_int32, bool right)
args : (Tensor sorted_sequence, Tensor values, bool out_int32, bool right)
output : Tensor(out)
infer_meta :
func : SearchsortedInferMeta
......@@ -2371,7 +2369,7 @@
skip_transform : input
- op : shard_index
args : (Tensor in, int index_num, int nshards, int shard_id, int ignore_value)
args : (Tensor input, int index_num, int nshards, int shard_id, int ignore_value)
output : Tensor(out)
infer_meta :
func : ShardIndexInferMeta
......@@ -2432,7 +2430,7 @@
func : sinh
backward : sinh_grad
- op : size
- op : numel
args : (Tensor x)
output : Tensor(size)
infer_meta :
......@@ -2460,15 +2458,15 @@
func : slogdeterminant
backward : slogdet_grad
- op : soft_shrink
args : (Tensor x, float lambda)
- op : softshrink
args : (Tensor x, float threshold)
output : Tensor
infer_meta :
func : UnchangedInferMeta
param : [x]
kernel :
func : soft_shrink
backward : soft_shrink_grad
backward : softshrink_grad
- op : softmax
args : (Tensor x, int axis)
......@@ -2558,7 +2556,7 @@
backward : squared_l2_norm_grad
- op : squeeze
args : (Tensor x, IntArray axes)
args : (Tensor x, IntArray axis)
output : Tensor(out), Tensor(xshape)
infer_meta :
func : SqueezeWithXShapeInferMeta
......@@ -2598,7 +2596,7 @@
backward : subtract_grad
- op : sum
args : (Tensor x, IntArray dims={}, DataType out_dtype=DataType::UNDEFINED, bool keep_dim=false)
args : (Tensor x, IntArray axis={}, DataType dtype=DataType::UNDEFINED, bool keepdim=false)
output : Tensor(out)
infer_meta :
func : SumInferMeta
......@@ -2608,7 +2606,7 @@
backward : sum_grad
- op : svd
args : (Tensor x, bool full_metrices)
args : (Tensor x, bool full_matrices)
output : Tensor(u), Tensor(s), Tensor(vh)
infer_meta :
func : SvdInferMeta
......@@ -2639,14 +2637,14 @@
inplace : (mean -> mean_out), (variance -> variance_out)
- op : take_along_axis
args : (Tensor x, Tensor index, int axis)
args : (Tensor arr, Tensor indices, int axis)
output : Tensor
infer_meta :
func : UnchangedInferMeta
param : [index]
param : [indices]
kernel :
func : take_along_axis
data_type : x
data_type : arr
backward : take_along_axis_grad
- op : tan
......@@ -2705,17 +2703,17 @@
func : tile
backward : tile_grad
- op : top_k
- op : topk
args : (Tensor x, Scalar k, int axis = -1, bool largest = true, bool sorted = true)
output : Tensor(out), Tensor(indices)
infer_meta :
func : TopKInferMeta
kernel :
func : top_k
backward : top_k_grad
backward : topk_grad
- op : transpose
args : (Tensor x, int[] axis)
args : (Tensor x, int[] perm)
output : Tensor
infer_meta :
func : TransposeInferMeta
......@@ -2871,13 +2869,13 @@
backward : unstack_grad
- op : viterbi_decode
args : (Tensor input, Tensor transition, Tensor length, bool include_bos_eos_tag)
args : (Tensor potentials, Tensor transition_params, Tensor lengths, bool include_bos_eos_tag)
output : Tensor(scores), Tensor(path)
infer_meta :
func : ViterbiDecodeInferMeta
kernel :
func : viterbi_decode
data_type : input
data_type : potentials
- op : warpctc
args : (Tensor logits, Tensor label, Tensor logits_length, Tensor labels_length, int blank, bool norm_by_times)
......@@ -2939,8 +2937,8 @@
invoke : full_like(x, 0, dtype, place)
- op: broadcast_tensors
args: (Tensor[] x)
output: Tensor[]{x.size()}
args: (Tensor[] input)
output: Tensor[]{input.size()}
infer_meta:
func: BroadcastTensorsInferMeta
kernel:
......
......@@ -774,7 +774,7 @@
- op : trunc
inputs :
x : X
input : X
outputs :
out : Out
......
......@@ -192,7 +192,7 @@
backward : trace_grad
- op : trunc
args : (Tensor x)
args : (Tensor input)
output : Tensor
infer_meta :
func : UnchangedInferMeta
......
......@@ -12009,7 +12009,7 @@ def size(input):
"""
if in_dygraph_mode():
return _C_ops.size(input)
return _C_ops.numel(input)
if _in_legacy_dygraph():
return _legacy_C_ops.size(input)
......
......@@ -51,6 +51,7 @@ class LayoutAutoTune(unittest.TestCase):
self.assertEqual(paddle.fluid.core.use_layout_autotune(), True)
paddle.fluid.core.disable_layout_autotune()
self.assertEqual(paddle.fluid.core.use_layout_autotune(), False)
self.use_autoune()
def setUp(self):
self.use_autoune()
......
......@@ -228,7 +228,7 @@ def hardshrink(x, threshold=0.5, name=None):
"""
if in_dygraph_mode():
return _C_ops.hard_shrink(x, threshold)
return _C_ops.hardshrink(x, threshold)
if _in_legacy_dygraph():
return _legacy_C_ops.hard_shrink(x, 'threshold', threshold)
......@@ -336,7 +336,7 @@ def hardsigmoid(x, slope=0.1666667, offset=0.5, name=None):
"""
if in_dygraph_mode():
return _C_ops.hard_sigmoid(x, slope, offset)
return _C_ops.hardsigmoid(x, slope, offset)
if _in_legacy_dygraph():
return _legacy_C_ops.hard_sigmoid(x, 'slope', slope, 'offset', offset)
......@@ -393,7 +393,7 @@ def hardswish(x, name=None):
if _in_legacy_dygraph():
return _legacy_C_ops.hard_swish(x)
if in_dygraph_mode():
return _C_ops.hard_swish(x, 6, 6, 3)
return _C_ops.hardswish(x, 6, 6, 3)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
'hardswish')
......@@ -1248,7 +1248,7 @@ def softshrink(x, threshold=0.5, name=None):
threshold))
if in_dygraph_mode():
return _C_ops.soft_shrink(x, threshold)
return _C_ops.softshrink(x, threshold)
if _in_legacy_dygraph():
return _legacy_C_ops.softshrink(x, 'lambda', threshold)
......
......@@ -865,7 +865,7 @@ def topk(x, k, axis=None, largest=True, sorted=True, name=None):
if in_dygraph_mode():
if axis == None:
axis = -1
out, indices = _C_ops.top_k(x, k, axis, largest, sorted)
out, indices = _C_ops.topk(x, k, axis, largest, sorted)
return out, indices
if _non_static_mode():
......
......@@ -244,7 +244,7 @@ def numel(x, name=None):
"""
if in_dygraph_mode():
return _C_ops.size(x)
return _C_ops.numel(x)
elif _in_legacy_dygraph():
return _legacy_C_ops.size(x)
......
......@@ -1736,7 +1736,7 @@ def generate_proposals(scores,
assert return_rois_num, "return_rois_num should be True in dygraph mode."
attrs = (pre_nms_top_n, post_nms_top_n, nms_thresh, min_size, eta,
pixel_offset)
rpn_rois, rpn_roi_probs, rpn_rois_num = _C_ops.generate_proposals_v2(
rpn_rois, rpn_roi_probs, rpn_rois_num = _C_ops.generate_proposals(
scores, bbox_deltas, img_size, anchors, variances, *attrs)
return rpn_rois, rpn_roi_probs, rpn_rois_num
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册