提交 27d40f10 编写于 作者: P phlrain

add

上级 12fd2fd9
......@@ -467,28 +467,150 @@
# argsort; kernel why input?
- api : argsort
args : (Tensor x, int axis, bool descending)
output : Tensor(out), tensor(indices)
output : Tensor(out), Tensor(indices)
infer_meta :
func : XXXXInferMeta
kernel :
func : argsort
# auc
# auc; kernel why input?
- api : auc
args : (Tensor x, Tensor label, Tensor stat_pos, Tenosr stat_neg, string curve, int num_thresholds, int slide_steps)
output : Tensor(auc), Tensor(stat_pos_out), Tensor(stat_neg_out)
infer_meta :
func : AucInferMeta
kernel :
func : auc
# batch_norm
- api : batch_norm
args : (Tensor x, Tensor scale, Tensor bias, Tensor mean, Tensor variance, float momentum, float epsilon, string data_layout, bool is_test, bool use_global_stats, bool trainable_statistics, bool fuse_with_relu)
output : Tensor(out), Tensor(mean_out), Tensor(variance_out), Tensor(saved_mean), Tensor(saved_variance), Tensor(reserve_space)
infer_meta :
func : XXXXInferMeta
kernel :
func : batch_norm
backward: batch_norm_grad
# bernoulli
# bilinear_tensor_product
# bincount
- api : bernoulli
args : (Tensor x)
output : Tensor
infer_meta :
func : BernoulliInferMeta
kernel :
func : bernoulli
# bilinear_tensor_product ?? optional
- api : bilinear_tensor_product
args : (Tensor x, Tensor y, Tensor weight, Tensor bias)
output : Tensor
infer_meta :
func : BilinearTensorProductInferMeta
kernel :
func : bilinear_tensor_product
backward : bilinear_tensor_product_grad
# bincount ?? optional
- api : bincount
args : (Tensor x, Tensor weight, int minlength)
output : Tensor
infer_meta :
func : BincountInferMeta
kernel :
func : bincount
# bitwise_and
- api : bitwise_and
args : (Tensor x, Tensor y)
output : Tensor
infer_meta :
func : BitwiseInferMeta
kernel :
func : bitwise_and
# bitwise_or
- api : bitwise_or
args : (Tensor x, Tensor y)
output : Tensor
infer_meta :
func : BitwiseInferMeta
kernel :
func : bitwise_or
# bitwise_xor
- api : bitwise_xor
args : (Tensor x, Tensor y)
output : Tensor
infer_meta :
func : BitwiseInferMeta
kernel :
func : bitwise_xor
# bitwise_not
- api : bitwise_not
args : (Tensor x)
output : Tensor
infer_meta :
func : UnchangedInferMeta
kernel :
func : bitwise_not
# broadcast_tensors
- api : broadcast_tensors
args : (Tensor[] x)
output : Tensor[]
infer_meta :
func : BroadcastTensorsInferMeta
kernel :
func : broadcast_tensors
backward : broadcast_tensors_grad
# cholesky
- api : cholesky
args : (Tensor x, bool upper)
output : Tensor
infer_meta :
func : CholeskyInferMeta
kernel :
func : cholesky
backward : cholesky_grad
# cholesky_solve
- api : cholesky_solve
args : (Tensor x, Tensor y, bool upper)
output : Tensor
infer_meta :
func : CholeskySolveInferMeta
kernel :
func : cholesky_solve
backward : cholesky_solve_grad
# conv2d
# copy
# cumsum
- api : cumsum
args : (Tensor x, int axis, bool flatten, bool exclusive, bool reverse)
output : Tensor
infer_meta :
func : CumsumInferMeta
kernel :
func : cumsum
# depthwise_conv2d
# dropout
# dropout ?? optional, intermediate
- api : dropout
args : (Tensor x, Tensor seed_tensor, float p, bool is_test, string mode, int seed, bool fix_seed)
output : Tensor(out), Tensor(mask)
infer_meta :
func : DropoutInferMeta
kernel :
func : dropout
# embedding
# erf
# erfinv
......
......@@ -291,3 +291,64 @@
param : [x]
kernel :
func : argsort_grad
- backward_api : batch_norm_grad
forward : batch_norm (Tensor x, Tensor scale, Tensor bias, Tensor mean, Tensor variance, float momentum, float epsilon, string data_layout, bool is_test, bool use_global_stats, bool trainable_statistics, bool fuse_with_relu) -> Tensor(out), Tensor(mean_out), Tensor(variance_out), Tensor(saved_mean), Tensor(saved_variance), Tensor(reserve_space)
args : (Tensor indices, Tensor x, Tensor out_grad, int axis, bool descending)
output : Tensor(x_grad), Tensor(scale_grad), Tensor(bias_grad)
infer_meta :
func : GeneralTernaryGradInferMeta
param : [x, scale, bias]
kernel :
func : batch_norm_grad
- backward_api : bilinear_tensor_product_grad
forward : bilinear_tensor_product (Tensor x, Tensor y, Tensor weight, Tensor bias) -> Tensor(out)
args : (Tensor x, Tensor y, Tensor weight, Tensor out_grad)
output : Tensor(x_grad), Tensor(y_grad), Tensor(weight_grad), Tensor(bias_grad)
infer_meta :
func : FourXXXXInferMeta
param : [x, y, weight, bias]
kernel :
func : bilinear_tensor_product_grad
- backward_api : broadcast_tensor_grad
forward : broadcast_tensors (Tensor[] x) -> Tensor [] (out)
args : (Tensor [] out_grad)
output : Tensor [] (x_grad)
infer_meta :
func : XXXXInferMeta
param : [out_grad]
kernel :
func : broadcast_tensor_grad
- backward_api : cholesky_grad
forward : cholesky (Tensor x, bool upper) -> Tensor(out)
args : (Tensor out, Tensor out_grad, bool upper)
output : Tensor(x_grad)
infer_meta :
func : XXXXGradInferMeta
param : [out]
kernel :
func : cholesky_grad
- backward_api : cholesky_solve_grad
forward : cholesky (Tensor x, Tensor y, bool upper) -> Tensor(out)
args : (Tensor x, Tensor y, Tensor out, Tensor out_grad, bool upper)
output : Tensor(x_grad), Tensor(y_grad)
infer_meta :
func : GenerateBinnaryGradInferMeta
param : [x, y]
kernel :
func : cholesky_solve_grad
- backward_api : dropout_grad
forward : dropout (Tensor x, Tensor seed_tensor, float p, bool is_test, string mode, int seed, bool fix_seed) -> Tensor(out), Tensor(mask)
args : (Tensor mask, Tensor out_grad, float p, bool is_test, string mode)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param : [out_grad]
kernel :
func : dropout_grad
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册