op_compat.yaml 14.2 KB
Newer Older
1
- op : abs
2 3 4 5
  backward : abs_grad
  extra :
    attrs : [bool use_cudnn = false, bool use_mkldnn = false]

6 7 8 9 10 11
- op : acosh
  backward : acosh_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

- op : addmm
12 13 14 15
  backward : addmm_grad
  extra :
    attrs : [bool use_mkldnn = false]

16
- op : affine_grid
17 18 19 20
  backward : affine_grid_grad
  extra :
    attrs : [bool use_cudnn = true]

21
- op : angle
22 23 24
  backward : angle_grad
  extra :
    attrs : [bool use_cudnn = false, bool use_mkldnn = false]
25

26 27 28 29 30 31
- op : asinh
  backward : asinh_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

- op : atan2
32
  inputs :
33
    {x : X1, y : X2}
34 35 36
  outputs :
    out : Out

37 38 39 40 41 42
- op : atanh
  backward : atanh_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

- op : batch_norm
43 44 45 46
  backward : batch_norm_grad
  extra :
    attrs : [bool use_mkldnn = false, bool fuse_with_relu = false]

47
- op : bernoulli
48 49 50 51 52
  inputs :
    x : X
  outputs :
    out : Out

53
- op : bicubic_interp (bicubic_interp_v2)
54 55 56 57
  backward : bicubic_interp_grad (bicubic_interp_v2_grad)
  extra :
    attrs : [bool use_mkldnn = false]

58
- op : bilinear_interp (bilinear_interp_v2)
59 60 61 62
  backward : bilinear_interp_grad (bilinear_interp_v2_grad)
  extra :
    attrs : [bool use_mkldnn = false]

63 64 65 66 67 68
- op : ceil
  backward : ceil_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

- op : cholesky
69 70 71 72 73
  inputs :
    x : X
  outputs :
    out : Out

74
- op : cholesky_solve
75 76 77 78 79
  inputs :
    {x : X, y : Y}
  outputs :
    out : Out

80
- op : clip
81 82 83 84
  backward : clip_grad
  extra :
    attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32"]

85
- op : concat
86 87 88 89
  backward : concat_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_quantizer = false, str mkldnn_data_type = "float32"]

90
- op : conv2d
91
  backward : conv2d_grad
92
  extra :
93
    attrs : [bool is_test = false, bool use_cudnn = true, bool fuse_relu_before_depthwise_conv = false, bool use_mkldnn = false,
94
             bool use_quantizer = false, str mkldnn_data_type = "float32", bool fuse_relu = false,
95
             str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f, bool use_addto = false,
96 97
             bool fuse_residual_connection = false, float Scale_in = 1.0f, float Scale_out = 1.0f,
             float Scale_in_eltwise = 1.0f, 'float[] Scale_weights = {1.0f}', bool force_fp32_output = false,
98
             int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB(), bool exhaustive_search = false]
99

100
- op : conv2d_fusion
F
Feiyu Chan 已提交
101
  extra :
102
    attrs : [bool is_test = false, bool use_cudnn = false, bool fuse_relu_before_depthwise_conv = false, bool use_mkldnn = false,
F
Feiyu Chan 已提交
103
             bool use_quantizer = false, str mkldnn_data_type = "float32", bool fuse_relu = false,
104
             str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f, bool use_addto = false,
F
Feiyu Chan 已提交
105 106
             bool fuse_residual_connection = false, float Scale_in = 1.0f, float Scale_out = 1.0f,
             float Scale_in_eltwise = 1.0f, 'float[] Scale_weights = {1.0f}', bool force_fp32_output = false,
107 108
             int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB(), bool exhaustive_search = false]

109
- op : conv2d_transpose
110 111 112 113 114 115 116
  backward : conv2d_transpose_grad
  extra :
    attrs : [bool is_test = false, bool use_cudnn = true, bool use_mkldnn = false, bool force_fp32_output = false,
             str mkldnn_data_type = "float32", bool fuse_relu = false,
             str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f,
             int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB()]

117
- op : conv3d
118 119 120 121 122 123 124
  backward : conv3d_grad
  extra :
    attrs : [bool is_test = false, bool use_cudnn = true, bool use_mkldnn = false, str mkldnn_data_type = "float32", bool fuse_relu = false,
             str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f,
             bool use_addto = false, bool fuse_residual_connection = false, bool force_fp32_output = false,
             int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB(), bool exhaustive_search = false]

125
- op : conv3d_transpose
126 127 128
  backward : conv3d_transpose_grad
  extra :
    attrs : [bool use_cudnn = true, bool use_mkldnn = false, int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB()]
F
Feiyu Chan 已提交
129

130 131 132 133 134 135 136 137 138 139 140
- op : cos
  backward : cos_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

- op : cosh
  backward : cosh_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

- op : cross
141 142
  inputs :
    {x : X, y : Y}
143 144 145 146 147
  attrs :
    axis : dim
  outputs :
    out : Out

148
- op : data_norm
149 150 151 152
  backward : data_norm_grad
  extra :
    attrs : [bool use_mkldnn = false]

153
- op : depthwise_conv2d
154 155
  backward : depthwise_conv2d_grad
  extra :
156
    attrs : [bool is_test = false, bool fuse_relu_before_depthwise_conv = false, bool use_mkldnn = false,
157 158 159 160
             bool use_quantizer = false, str mkldnn_data_type = "float32", bool fuse_relu = false,
             str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f, bool use_addto = false,
             bool fuse_residual_connection = false, float Scale_in = 1.0f, float Scale_out = 1.0f,
             float Scale_in_eltwise = 1.0f, 'float[] Scale_weights = {1.0f}', bool force_fp32_output = false,
161 162
             int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB(), bool exhaustive_search = false]

163
- op : depthwise_conv2d_transpose
164 165 166 167 168 169
  backward : depthwise_conv2d_transpose_grad
  extra :
    attrs : [bool is_test = false, bool use_cudnn = false, bool use_mkldnn = false, bool force_fp32_output = false,
             str mkldnn_data_type = "float32", bool fuse_relu = false,
             str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f,
             int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB()]
170

171
- op : diag (diag_v2)
172
  backward : diag_grad (diag_v2_grad)
173 174 175 176 177
  inputs :
    x : X
  outputs :
    out : Out

178
- op : diagonal
179 180 181 182 183
  inputs :
    x : Input
  outputs :
    out : Out

184
- op : digamma
185 186 187 188 189
  inputs :
    x : X
  outputs :
    out : Out

190
- op : dist
191 192 193 194 195
  inputs :
    {x : X, y : Y}
  outputs :
    out : Out

196
- op : dot
197 198 199 200 201
  inputs :
    {x : X, y : Y}
  outputs :
    out : Out

202
- op : dropout
203 204 205 206
  backward : dropout_grad
  extra :
    attrs : [bool fix_seed = false, int seed = 0]

207
- op : dropout_nd
208 209 210 211
  backward : dropout_nd_grad
  extra :
    attrs : [bool fix_seed = false, int seed = 0]

212 213 214 215 216 217
- op : elu
  backward : elu_grad
  extra :
    attrs : [bool use_mkldnn = false]

- op : erf
218 219 220 221 222
  inputs :
    x : X
  outputs :
    out : Out

223
- op : erfinv
224 225 226 227 228
  inputs :
    x : X
  outputs :
    out : Out

229 230 231 232 233 234 235 236 237 238 239
- op : exp
  backward : exp_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

- op : expm1
  backward : expm1_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

- op : fft_c2c
240 241 242
  inputs: {x: X}
  outputs: {out: Out}

243
- op : fft_c2r
244 245 246
  inputs: {x: X}
  outputs: {out: Out}

247
- op : fft_r2c
248 249 250
  inputs: {x: X}
  outputs: {out: Out}

251 252 253 254 255 256
- op : floor
  backward : floor_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

- op : frobenius_norm
257 258 259 260
  backward : frobenius_norm_grad
  extra :
    attrs : [bool use_mkldnn = false]

261
- op : gelu
262 263 264 265
  backward : gelu_grad
  extra :
    attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32", bool use_cudnn = false]

266
- op : grid_sampler
267 268 269 270
  backward : grid_sampler_grad
  extra :
    attrs : [bool use_cudnn = true]

271
- op : gru
272 273 274 275
  backward : gru_grad
  extra :
    attrs : [bool is_test = false]

276 277 278 279 280 281
- op : hard_swish
  backward : hard_swish_grad
  extra :
    attrs : [bool use_mkldnn = false]

- op : inplace_abn
282 283 284 285
  backward : inplace_abn_grad
  extra :
    attrs : [bool use_mkldnn = false, bool fuse_with_relu = false]

286
- op : layer_norm
287 288 289 290
  backward : layer_norm_grad
  extra :
    attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32", bool is_test = false]

291 292 293 294 295 296
- op : leaky_relu
  backward : leaky_relu_grad
  extra :
    attrs : [bool use_mkldnn = false]

- op : lgamma
297 298 299 300 301
  inputs :
    x : X
  outputs :
    out : Out

302
- op : linear_interp (linear_interp_v2)
303 304 305 306
  backward : linear_interp_grad (linear_interp_v2_grad)
  extra :
    attrs : [bool use_mkldnn = false]

307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327
- op : log
  backward : log_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

- op : log10
  backward : log10_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

- op : log1p
  backward : log1p_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

- op : log2
  backward : log2_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

- op : log_softmax
328 329 330 331
  backward : log_softmax_grad
  extra :
    attrs : [bool use_mkldnn = false]

332 333 334 335 336 337
- op : logsigmoid
  backward : logsigmoid_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

- op : lrn
338 339 340 341
  backward : lrn_grad
  extra :
    attrs : [bool use_mkldnn = false, bool is_test = false]

342
- op : matmul (matmul_v2)
343 344 345 346 347 348
  backward : matmul_grad (matmul_v2_grad)
  extra :
    attrs : [bool use_mkldnn = false, 'int[] fused_reshape_Out = {}', 'int[] fused_transpose_Out = {}',
             str mkldnn_data_type = "float32", 'int[] fused_reshape_X = {}', 'int[] fused_reshape_Y = {}',
             'int[] fused_transpose_X = {}', 'int[] fused_transpose_Y = {}',]

349 350 351 352 353 354
- op : mish
  backward : mish_grad
  extra :
    attrs : [bool use_mkldnn = false]

- op : mv
355 356 357 358 359
  inputs :
    {x : X, vec : Vec}
  outputs :
    out : Out

360
- op : nearest_interp (nearest_interp_v2)
361 362 363 364
  backward : nearest_interp_grad (nearest_interp_v2_grad)
  extra :
    attrs : [bool use_mkldnn = false]

365
- op : pad2d
366 367 368 369
  backward : pad2d_grad
  extra :
    attrs : [bool use_mkldnn = false]

370
- op : pad3d
371 372 373 374
  backward : pad3d_grad
  extra :
    attrs : [bool use_mkldnn = false]

375
- op : partial_sum
376 377 378 379
  backward : partial_sum_grad
  extra :
    attrs : [bool use_mkldnn = false]

380
- op : poisson
381 382 383 384 385
  inputs :
    x : X
  outputs :
    out : Out

386 387 388 389 390 391 392 393 394 395 396
- op : prelu
  backward : prelu_grad
  extra :
    attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32", bool is_test = false]

- op : reciprocal
  backward : reciprocal_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

- op : reduce_all
397 398 399
  extra :
    attrs : [bool use_mkldnn = false]

400
- op : reduce_amax
401 402 403 404
  backward : reduce_amax_grad
  extra :
    attrs : [bool use_mkldnn = false]

405
- op : reduce_amin
406 407 408 409
  backward : reduce_amin_grad
  extra :
    attrs : [bool use_mkldnn = false]

410
- op : reduce_any
411 412 413
  extra :
    attrs : [bool use_mkldnn = false]

414
- op : reduce_max
415 416 417 418
  backward : reduce_max_grad
  extra :
    attrs : [bool use_mkldnn = false]

419
- op : reduce_mean
420 421 422 423
  backward : reduce_mean_grad
  extra :
    attrs : [bool use_mkldnn = false]

424
- op : reduce_min
425 426 427 428
  backward : reduce_min_grad
  extra :
    attrs : [bool use_mkldnn = false]

429
- op : reduce_prod
430 431 432 433
  backward : reduce_prod_grad
  extra :
    attrs : [bool use_mkldnn = false]

434
- op : reduce_sum
435 436 437 438
  backward : reduce_sum_grad
  extra :
    attrs : [bool use_mkldnn = false]

439 440 441 442 443 444 445 446 447 448 449
- op : relu
  backward : relu_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

- op : relu6
  backward : relu6_grad
  extra :
    attrs : [bool use_mkldnn = false]

- op : renorm
450 451 452 453
  backward : renorm_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

454
- op : rnn
455 456 457 458
  backward : rnn_grad
  extra :
    attrs : [bool is_test = false]

459 460 461 462 463 464 465 466 467 468 469
- op : round
  backward : round_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

- op : rsqrt
  backward : rsqrt_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

- op : seed
470 471 472
  extra :
    attrs : [bool deterministic = false, str rng_name = "", bool force_cpu = false]

473
- op : shape
474 475 476
  extra :
    attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32"]

477
- op : shuffle_channel
478 479 480 481
  backward : shuffle_channel_grad
  extra :
    attrs : [bool use_mkldnn = false]

482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502
- op : sigmoid
  backward : sigmoid_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

- op : silu
  backward : silu_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

- op : sin
  backward : sin_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

- op : sinh
  backward : sinh_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

- op : slice
503 504 505 506
  backward : slice_grad
  extra :
    attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32"]

507
- op : softmax
508 509 510
  backward : softmax_grad
  extra :
    attrs : [bool use_cudnn = false, bool use_mkldnn = false, str mkldnn_data_type = "float32", bool is_test = false]
511 512 513

- op : softplus
  backward : softplus_grad
514
  extra :
515 516 517 518 519 520 521
    attrs : [bool use_mkldnn = false, bool use_cudnn = false, str fuse_activation_type = "", float fuse_activation_alpha = 0.0f,
             float fuse_activation_beta = 0.0f, float fuse_activation_scale = 1.0f]

- op : softsign
  backward : softsign_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]
522

523
- op : solve
524 525 526 527 528
  inputs :
    {x : X, y : Y}
  outputs :
    out : Out

529 530 531 532 533 534 535 536 537 538 539
- op : sqrt
  backward : sqrt_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

- op : square
  backward : square_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

- op : squeeze (squeeze2)
540 541 542 543
  backward : squeeze_grad (squeeze2_grad)
  extra :
    attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32"]

544
- op : stack
545 546 547 548
  backward : stack_grad
  extra :
    attrs : [bool use_mkldnn = false]

549 550 551 552 553 554
- op : swish
  backward : swish_grad
  extra :
    attrs : [bool use_mkldnn = false]

- op : sync_batch_norm
555 556 557 558
  backward : sync_batch_norm_grad
  extra :
    attrs : [bool use_mkldnn = false, bool fuse_with_relu = false]

559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574
- op : tan
  backward : tan_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

- op : tanh
  backward : tanh_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

- op : tanh_shrink
  backward : tanh_shrink_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

- op : trace
575 576 577 578
  inputs :
    x : Input
  outputs :
    out : Out
579

580
- op : trilinear_interp (trilinear_interp_v2)
581 582 583 584
  backward : trilinear_interp_grad (trilinear_interp_v2_grad)
  extra :
    attrs : [bool use_mkldnn = false]

585
- op : trunc
586 587 588 589
  inputs :
    x : X
  outputs :
    out : Out