# This file is to support those static ops different the dynamic. - backward_op : assign_grad forward : assign (Tensor x) -> Tensor(out) args : (Tensor out_grad) output : Tensor(x_grad) composite: assign_grad(out_grad, x_grad) invoke : assign(out_grad) - backward_op : deformable_conv_grad forward : deformable_conv (Tensor x, Tensor offset, Tensor filter, Tensor mask, int[] strides={1, 1}, int[] paddings={0, 0}, int[] dilations={1, 1}, int deformable_groups=1, int groups=1, int im2col_step=64) -> Tensor(out) args : (Tensor x, Tensor offset, Tensor filter, Tensor mask, Tensor out_grad, int[] strides, int[] paddings, int[] dilations, int deformable_groups, int groups, int im2col_step) output : Tensor(x_grad), Tensor(offset_grad), Tensor(filter_grad), Tensor(mask_grad) infer_meta : func : DeformableConvGradInferMeta kernel : func : deformable_conv_grad data_type : x - backward_op : embedding_grad forward : embedding (Tensor x, Tensor weight, int64_t padding_idx=-1) -> Tensor(out) args : (Tensor x, Tensor weight, Tensor out_grad, int64_t padding_idx=-1) output : Tensor(weight_grad) infer_meta : func : EmbeddingGradInferMeta param : [x,weght] kernel : func : embedding_grad {dense, dense, dense -> dense} embedding_sparse_grad {dense, dense, dense -> selected_rows} sparse_weight_embedding_grad {selected_rows, dense, dense -> dense} sparse_weight_embedding_sparse_grad {selected_rows, dense, dense -> selected_rows} data_type : out_grad no_need_buffer : weight - backward_op : frobenius_norm_grad forward: frobenius_norm (Tensor x, IntArray axis={0}, bool keepdim=false, bool reduce_all=false, int in_dtype=-1, int out_dtype=-1) -> Tensor(out) args : (Tensor x, Tensor out, Tensor out_grad, IntArray axis={0}, bool keepdim=false, bool reduce_all=false, int in_dtype=-1, int out_dtype=-1) output : Tensor(x_grad) infer_meta : func : UnchangedInferMeta param : [x] kernel : func : frobenius_norm_grad param : [x, out, out_grad, axis, keepdim, reduce_all] - backward_op : rnn_grad forward : rnn (Tensor x, Tensor[] pre_state, Tensor[] weight_list, Tensor sequence_length, float dropout_prob=0.0, bool is_bidirec=false, int input_size=10, int hidden_size=100, int num_layers=1, str mode="RNN_TANH", int seed=0, bool is_test=false) -> Tensor(out), Tensor(dropout_state_out), Tensor[](state), Tensor(reserve) args : (Tensor x, Tensor[] pre_state, Tensor[] weight_list, Tensor sequence_length, Tensor out, Tensor dropout_state_out, Tensor reserve, Tensor out_grad, Tensor[] state_grad, float dropout_prob, bool is_bidirec, int input_size, int hidden_size, int num_layers, str mode, int seed, bool is_test) output : Tensor(x_grad), Tensor[](pre_state_grad){pre_state.size()}, Tensor[](weight_list_grad){weight_list.size()} infer_meta : func : RnnGradInferMeta param : [x, pre_state, weight_list] kernel : func : rnn_grad data_type: out_grad - backward_op : softmax_grad forward : softmax (Tensor x, int axis=-1) -> Tensor(out) args : (Tensor out, Tensor out_grad, int axis) output : Tensor(x_grad) infer_meta : func : UnchangedInferMeta param : [out] kernel : func : softmax_grad composite : softmax_grad(out, out_grad, axis, x_grad) - backward_op : tril_triu_grad forward : tril_triu (Tensor x, int diagonal = 0, bool lower = false) -> Tensor(out) args : (Tensor out_grad, int diagonal, bool lower) output : Tensor(x_grad) infer_meta : func : UnchangedInferMeta param : [out_grad] kernel: func : tril_triu_grad - backward_op: unpool_grad forward: unpool (Tensor x, Tensor indices, int[] ksize, str unpooling_type, int[] strides = {1,1}, int[] paddings ={0,0} ,IntArray output_size = {0,0}, str data_format="NCHW") -> Tensor(out) args: (Tensor x, Tensor indices, Tensor out, Tensor out_grad, int[] ksize, int[] strides, int[] paddings, IntArray output_size, str data_format) output: Tensor(x_grad) infer_meta: func: UnchangedInferMeta param : [x] kernel: func: unpool_grad data_type: x