static_ops.yaml 1.2 KB
Newer Older
1 2 3 4 5 6 7
- op : fc_xpu
  args : (Tensor x, Tensor w, Tensor w_max, Tensor bias, int in_num_col_dims, bool transpose_x, float alpha, float beta, int act_type, float act_alpha)
  output : Tensor
  infer_meta :
    func : FcXPUInferMeta
  kernel :
    func : fc_xpu
8
    data_type : x
9 10
  optional : bias

11 12 13 14 15 16 17 18 19
- op : generate_sequence_xpu
  args : (Tensor x, DataType dtype)
  output : Tensor
  infer_meta :
    func : GenerateSequenceXPUInferMeta
  kernel :
    func : generate_sequence_xpu
    data_type : dtype

20 21 22 23 24 25 26 27 28 29
- op : multi_encoder_xpu
  args : (Tensor x, Tensor[] fc_weight, Tensor[] fc_weight_max, Tensor[] fc_bias, Tensor[] ln_scale, Tensor[] ln_bias, Tensor mask, int layer_num, bool norm_before, int hidden_dim, int head_num, int size_per_head, int ffn_hidden_dim_scale, int act_type, int relative_type, int slice_idx)
  output : Tensor(out), Tensor(x_fp16), Tensor(out_fp16)
  infer_meta :
    func : MultiEncoderXPUInferMeta
  kernel :
    func : multi_encoder_xpu
    data_type : x
  optional : mask, x_fp16, out_fp16

30 31 32 33 34 35 36
- op : share_buffer
  args : (Tensor[] x, bool[] share_dims_and_dtype={})
  output : Tensor[](out){x.size()}, Tensor[](xout){x.size()}
  infer_meta :
    func : ShareBufferInferMeta
  kernel :
    func : share_buffer