static_backward.yaml 2.0 KB
Newer Older
HappyHeavyRain's avatar
HappyHeavyRain 已提交
1
# This file is to support those static ops different the dynamic.
2

3 4 5 6 7 8 9
- backward_op : assign_grad
  forward : assign (Tensor x) -> Tensor(out)
  args : (Tensor out_grad)
  output : Tensor(x_grad)
  composite: assign_grad(out_grad, x_grad)
  invoke : assign(out_grad)

10 11 12 13 14 15 16 17 18 19
- backward_op : deformable_conv_grad
  forward : deformable_conv (Tensor x, Tensor offset, Tensor filter, Tensor mask, int[] strides={1, 1}, int[] paddings={0, 0}, int[] dilations={1, 1}, int deformable_groups=1, int groups=1, int im2col_step=64) -> Tensor(out)
  args : (Tensor x, Tensor offset, Tensor filter, Tensor mask, Tensor out_grad, int[] strides, int[] paddings, int[] dilations, int deformable_groups, int groups, int im2col_step)
  output : Tensor(x_grad), Tensor(offset_grad), Tensor(filter_grad), Tensor(mask_grad)
  infer_meta :
    func : DeformableConvGradInferMeta
  kernel :
    func : deformable_conv_grad
    data_type : x

20 21 22 23 24 25 26 27 28 29 30 31 32 33 34
- backward_op : embedding_grad
  forward : embedding (Tensor x, Tensor weight, int64_t padding_idx=-1) -> Tensor(out)
  args : (Tensor x, Tensor weight, Tensor out_grad, int64_t padding_idx=-1)
  output : Tensor(weight_grad)
  infer_meta :
    func : EmbeddingGradInferMeta
    param : [x,weght]
  kernel :
    func : embedding_grad {dense, dense, dense -> dense}
           embedding_sparse_grad {dense, dense, dense -> selected_rows}
           sparse_weight_embedding_grad {selected_rows, dense, dense -> dense}
           sparse_weight_embedding_sparse_grad {selected_rows, dense, dense -> selected_rows}
    data_type : out_grad
  no_need_buffer : weight

35 36 37 38 39 40 41 42 43 44
- backward_op : frobenius_norm_grad
  forward: frobenius_norm (Tensor x, IntArray axis={0}, bool keepdim=false, bool reduce_all=false, int in_dtype=-1, int out_dtype=-1) -> Tensor(out)
  args : (Tensor x, Tensor out, Tensor out_grad, IntArray axis={0}, bool keepdim=false, bool reduce_all=false, int in_dtype=-1, int out_dtype=-1)
  output : Tensor(x_grad)
  infer_meta :
    func : UnchangedInferMeta
    param : [x]
  kernel :
    func : frobenius_norm_grad
    param : [x, out, out_grad, axis, keepdim, reduce_all]