api_compat.yaml 6.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
- api : abs
  backward : abs_grad
  extra :
    attrs : [bool use_cudnn = false, bool use_mkldnn = false]

- api : addmm
  backward : addmm_grad
  extra :
    attrs : [bool use_mkldnn = false]

- api : affine_grid
  backward : affine_grid_grad
  extra :
    attrs : [bool use_cudnn = true]

- api : angle
  backward : angle_grad
  extra :
    attrs : [bool use_cudnn = false, bool use_mkldnn = false]
20

21 22
- api : atan2
  inputs :
23
    {x : X1, y : X2}
24 25 26
  outputs :
    out : Out

27 28 29 30 31
- api : batch_norm
  backward : batch_norm_grad
  extra :
    attrs : [bool use_mkldnn = false, bool fuse_with_relu = false]

32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49
- api : bernoulli
  inputs :
    x : X
  outputs :
    out : Out

- api : cholesky
  inputs :
    x : X
  outputs :
    out : Out

- api : cholesky_solve
  inputs :
    {x : X, y : Y}
  outputs :
    out : Out

50 51 52 53 54 55 56 57 58 59
- api : clip
  backward : clip_grad
  extra :
    attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32"]

- api : concat
  backward : concat_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_quantizer = false, str mkldnn_data_type = "float32"]

60
- api : conv2d
61
  backward : conv2d_grad
62
  extra :
63
    attrs : [bool is_test = false, bool use_cudnn = true, bool fuse_relu_before_depthwise_conv = false, bool use_mkldnn = false,
64
             bool use_quantizer = false, str mkldnn_data_type = "float32", bool fuse_relu = false,
65
             str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f, bool use_addto = false,
66 67
             bool fuse_residual_connection = false, float Scale_in = 1.0f, float Scale_out = 1.0f,
             float Scale_in_eltwise = 1.0f, 'float[] Scale_weights = {1.0f}', bool force_fp32_output = false,
68
             int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB(), bool exhaustive_search = false]
69

70
- api : conv2d_fusion
F
Feiyu Chan 已提交
71
  extra :
72
    attrs : [bool is_test = false, bool use_cudnn = false, bool fuse_relu_before_depthwise_conv = false, bool use_mkldnn = false,
F
Feiyu Chan 已提交
73
             bool use_quantizer = false, str mkldnn_data_type = "float32", bool fuse_relu = false,
74
             str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f, bool use_addto = false,
F
Feiyu Chan 已提交
75 76
             bool fuse_residual_connection = false, float Scale_in = 1.0f, float Scale_out = 1.0f,
             float Scale_in_eltwise = 1.0f, 'float[] Scale_weights = {1.0f}', bool force_fp32_output = false,
77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98
             int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB(), bool exhaustive_search = false]

- api : conv2d_transpose
  backward : conv2d_transpose_grad
  extra :
    attrs : [bool is_test = false, bool use_cudnn = true, bool use_mkldnn = false, bool force_fp32_output = false,
             str mkldnn_data_type = "float32", bool fuse_relu = false,
             str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f,
             int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB()]

- api : conv3d
  backward : conv3d_grad
  extra :
    attrs : [bool is_test = false, bool use_cudnn = true, bool use_mkldnn = false, str mkldnn_data_type = "float32", bool fuse_relu = false,
             str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f,
             bool use_addto = false, bool fuse_residual_connection = false, bool force_fp32_output = false,
             int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB(), bool exhaustive_search = false]

- api : conv3d_transpose
  backward : conv3d_transpose_grad
  extra :
    attrs : [bool use_cudnn = true, bool use_mkldnn = false, int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB()]
F
Feiyu Chan 已提交
99

100
- api : cross
101 102
  inputs :
    {x : X, y : Y}
103 104 105 106 107
  attrs :
    axis : dim
  outputs :
    out : Out

108 109 110
- api : depthwise_conv2d
  backward : depthwise_conv2d_grad
  extra :
111
    attrs : [bool is_test = false, bool fuse_relu_before_depthwise_conv = false, bool use_mkldnn = false,
112 113 114 115
             bool use_quantizer = false, str mkldnn_data_type = "float32", bool fuse_relu = false,
             str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f, bool use_addto = false,
             bool fuse_residual_connection = false, float Scale_in = 1.0f, float Scale_out = 1.0f,
             float Scale_in_eltwise = 1.0f, 'float[] Scale_weights = {1.0f}', bool force_fp32_output = false,
116 117 118 119 120 121 122 123 124
             int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB(), bool exhaustive_search = false]

- api : depthwise_conv2d_transpose
  backward : depthwise_conv2d_transpose_grad
  extra :
    attrs : [bool is_test = false, bool use_cudnn = false, bool use_mkldnn = false, bool force_fp32_output = false,
             str mkldnn_data_type = "float32", bool fuse_relu = false,
             str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f,
             int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB()]
125

126 127
- api : diag (diag_v2)
  backward : diag_grad (diag_v2_grad)
128 129 130 131 132
  inputs :
    x : X
  outputs :
    out : Out

133 134 135 136 137 138
- api : diagonal
  inputs :
    x : Input
  outputs :
    out : Out

139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162
- api : digamma
  inputs :
    x : X
  outputs :
    out : Out

- api : dist
  inputs :
    {x : X, y : Y}
  outputs :
    out : Out

- api : dot
  inputs :
    {x : X, y : Y}
  outputs :
    out : Out

- api : erf
  inputs :
    x : X
  outputs :
    out : Out

163 164 165 166 167 168
- api : erfinv
  inputs :
    x : X
  outputs :
    out : Out

169 170 171 172 173
- api : inplace_abn
  backward : inplace_abn_grad
  extra :
    attrs : [bool use_mkldnn = false, bool fuse_with_relu = false]

174 175 176 177 178 179
- api : lgamma
  inputs :
    x : X
  outputs :
    out : Out

180 181 182 183 184 185 186 187 188 189 190 191
- api : mv
  inputs :
    {x : X, vec : Vec}
  outputs :
    out : Out

- api : poisson
  inputs :
    x : X
  outputs :
    out : Out

192 193 194 195 196 197
- api : solve
  inputs :
    {x : X, y : Y}
  outputs :
    out : Out

198 199 200 201 202
- api : sync_batch_norm
  backward : sync_batch_norm_grad
  extra :
    attrs : [bool use_mkldnn = false, bool fuse_with_relu = false]

203 204 205 206 207
- api : trace
  inputs :
    x : Input
  outputs :
    out : Out
208

209 210 211 212 213
- api : trunc
  inputs :
    x : X
  outputs :
    out : Out
F
Feiyu Chan 已提交
214 215 216 217 218 219 220 221 222 223 224 225

- api: fft_c2c
  inputs: {x: X}
  outputs: {out: Out}

- api: fft_c2r
  inputs: {x: X}
  outputs: {out: Out}

- api: fft_r2c
  inputs: {x: X}
  outputs: {out: Out}