# This file is designed for fusion C++ farward operators, which manages the # generated code for dynamic mode and static mode. # The operators in the file have extra configuration item "support_dygraph_mode". # If one operator have "support_dygraph_mode : True", it supports dygraph mode. - op : embedding_with_eltwise_add_xpu args : (Tensor[] ids, Tensor[] tables, int64_t padding_idx) output: Tensor infer_meta : func: EmbeddingWithEltwiseAddXPUInferMeta kernel: func: embedding_with_eltwise_add_xpu data_type: tables - op : fc_xpu args : (Tensor x, Tensor x_max, Tensor w, Tensor w_max, Tensor bias, int in_num_col_dims, bool transpose_x, float alpha, float beta, int act_type, float act_alpha) output : Tensor(out), Tensor(out_max) infer_meta : func : FcXPUInferMeta kernel : func : fc_xpu data_type : x optional : bias, x_max - op : fused_dropout_add args : (Tensor x, Tensor y, Scalar p, bool is_test, str mode, int seed, bool fix_seed) output : Tensor(out), Tensor(seed_offset) infer_meta : func : FusedDropoutAddInferMeta kernel : func : fused_dropout_add data_type : x backward : fused_dropout_add_grad support_dygraph_mode : true - op : fused_linear_param_grad_add args : (Tensor x, Tensor dout, Tensor dweight, Tensor dbias, bool multi_precision = true) output : Tensor(dweight_out), Tensor(dbias_out) infer_meta: func : FusedLinearParamGradAddInferMeta optional : dweight, dbias kernel: func : fused_linear_param_grad_add data_type : dout support_dygraph_mode : true - op : fused_multi_transformer_xpu args : (Tensor x, Tensor[] ln_scale, Tensor[] ln_bias, Tensor[] qkvw, Tensor[] qkvw_max, Tensor[] qkv_bias, Tensor[] out_linear_w, Tensor[] out_linear_wmax, Tensor[] out_linear_bias, Tensor[] ffn_ln_scale, Tensor[] ffn_ln_bias, Tensor[] ffn1_weight, Tensor[] ffn1_weight_max, Tensor[] ffn1_bias, Tensor[] ffn2_weight, Tensor[] ffn2_weight_max, Tensor[] ffn2_bias, Tensor[] cache_kv, Tensor[] pre_caches, Tensor rotary_pos_emb, Tensor time_step, Tensor seq_lengths, Tensor src_mask, bool pre_layer_norm, int rotary_emb_dims, float epsilon, float dropout_rate, bool is_test, str dropout_implementation, str act_method, bool trans_qkvw, int ring_id) output : Tensor(out), Tensor[](cache_kv_out){out_linear_w.size()} infer_meta : func : FusedMultiTransformerXpuInferMeta kernel : func : fused_multi_transformer_xpu data_type : x optional : cache_kv, pre_caches, rotary_pos_emb, time_step, seq_lengths, src_mask - op : generate_sequence_xpu args : (Tensor x, DataType dtype) output : Tensor infer_meta : func : GenerateSequenceXPUInferMeta kernel : func : generate_sequence_xpu data_type : dtype - op : multi_encoder_xpu args : (Tensor x, Tensor[] fc_weight, Tensor[] fc_weight_max, Tensor[] fc_bias, Tensor[] ln_scale, Tensor[] ln_bias, Tensor mask, int layer_num, bool norm_before, int hidden_dim, int head_num, int size_per_head, int ffn_hidden_dim_scale, int act_type, int relative_type, int slice_idx) output : Tensor(out), Tensor(x_fp16), Tensor(out_fp16) infer_meta : func : MultiEncoderXPUInferMeta kernel : func : multi_encoder_xpu data_type : x optional : mask, x_fp16, out_fp16