static_backward.yaml 3.2 KB
Newer Older
HappyHeavyRain's avatar
HappyHeavyRain 已提交
1
# This file is to support those static ops different the dynamic.
2

3 4 5 6 7 8 9
- backward_op : assign_grad
  forward : assign (Tensor x) -> Tensor(out)
  args : (Tensor out_grad)
  output : Tensor(x_grad)
  composite: assign_grad(out_grad, x_grad)
  invoke : assign(out_grad)

10 11 12 13 14 15 16 17 18 19
- backward_op : deformable_conv_grad
  forward : deformable_conv (Tensor x, Tensor offset, Tensor filter, Tensor mask, int[] strides={1, 1}, int[] paddings={0, 0}, int[] dilations={1, 1}, int deformable_groups=1, int groups=1, int im2col_step=64) -> Tensor(out)
  args : (Tensor x, Tensor offset, Tensor filter, Tensor mask, Tensor out_grad, int[] strides, int[] paddings, int[] dilations, int deformable_groups, int groups, int im2col_step)
  output : Tensor(x_grad), Tensor(offset_grad), Tensor(filter_grad), Tensor(mask_grad)
  infer_meta :
    func : DeformableConvGradInferMeta
  kernel :
    func : deformable_conv_grad
    data_type : x

20 21 22 23 24 25 26 27 28 29 30 31 32 33 34
- backward_op : embedding_grad
  forward : embedding (Tensor x, Tensor weight, int64_t padding_idx=-1) -> Tensor(out)
  args : (Tensor x, Tensor weight, Tensor out_grad, int64_t padding_idx=-1)
  output : Tensor(weight_grad)
  infer_meta :
    func : EmbeddingGradInferMeta
    param : [x,weght]
  kernel :
    func : embedding_grad {dense, dense, dense -> dense}
           embedding_sparse_grad {dense, dense, dense -> selected_rows}
           sparse_weight_embedding_grad {selected_rows, dense, dense -> dense}
           sparse_weight_embedding_sparse_grad {selected_rows, dense, dense -> selected_rows}
    data_type : out_grad
  no_need_buffer : weight

35 36 37 38 39 40 41 42 43 44
- backward_op : frobenius_norm_grad
  forward: frobenius_norm (Tensor x, IntArray axis={0}, bool keepdim=false, bool reduce_all=false, int in_dtype=-1, int out_dtype=-1) -> Tensor(out)
  args : (Tensor x, Tensor out, Tensor out_grad, IntArray axis={0}, bool keepdim=false, bool reduce_all=false, int in_dtype=-1, int out_dtype=-1)
  output : Tensor(x_grad)
  infer_meta :
    func : UnchangedInferMeta
    param : [x]
  kernel :
    func : frobenius_norm_grad
    param : [x, out, out_grad, axis, keepdim, reduce_all]
45 46 47 48 49 50 51 52 53 54 55

- backward_op : rnn_grad
  forward : rnn (Tensor x, Tensor[] pre_state, Tensor[] weight_list, Tensor sequence_length, float dropout_prob=0.0, bool is_bidirec=false, int input_size=10, int hidden_size=100, int num_layers=1, str mode="RNN_TANH", int seed=0, bool is_test=false) -> Tensor(out), Tensor(dropout_state_out), Tensor[](state), Tensor(reserve)
  args : (Tensor x, Tensor[] pre_state, Tensor[] weight_list, Tensor sequence_length, Tensor out, Tensor dropout_state_out, Tensor reserve, Tensor out_grad, Tensor[] state_grad, float dropout_prob, bool is_bidirec, int input_size, int hidden_size, int num_layers, str mode, int seed, bool is_test)
  output : Tensor(x_grad), Tensor[](pre_state_grad){pre_state.size()}, Tensor[](weight_list_grad){weight_list.size()}
  infer_meta :
    func : RnnGradInferMeta
    param : [x, pre_state, weight_list]
  kernel :
    func : rnn_grad
    data_type: out_grad
56 57 58 59 60 61 62 63 64 65 66

- backward_op : softmax_grad
  forward : softmax (Tensor x, int axis=-1) -> Tensor(out)
  args : (Tensor out, Tensor out_grad, int axis)
  output : Tensor(x_grad)
  infer_meta :
    func : UnchangedInferMeta
    param : [out]
  kernel :
    func : softmax_grad
  composite : softmax_grad(out, out_grad, axis, x_grad)