op_compat.yaml 14.2 KB
Newer Older
1
- op : abs
2 3 4 5
  backward : abs_grad
  extra :
    attrs : [bool use_cudnn = false, bool use_mkldnn = false]

6
- op : acosh
7 8 9 10
  backward : acosh_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

11
- op : addmm
12 13 14 15
  backward : addmm_grad
  extra :
    attrs : [bool use_mkldnn = false]

16
- op : affine_grid
17 18 19 20
  backward : affine_grid_grad
  extra :
    attrs : [bool use_cudnn = true]

21
- op : angle
22 23 24
  backward : angle_grad
  extra :
    attrs : [bool use_cudnn = false, bool use_mkldnn = false]
25

26
- op : asinh
27 28 29 30
  backward : asinh_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

31
- op : atan2
32
  inputs :
33
    {x : X1, y : X2}
34 35 36
  outputs :
    out : Out

37
- op : atanh
38 39 40 41
  backward : atanh_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

42
- op : batch_norm
43 44 45 46
  backward : batch_norm_grad
  extra :
    attrs : [bool use_mkldnn = false, bool fuse_with_relu = false]

47
- op : bernoulli
48 49 50 51 52
  inputs :
    x : X
  outputs :
    out : Out

53
- op : bicubic_interp (bicubic_interp_v2)
54 55 56 57
  backward : bicubic_interp_grad (bicubic_interp_v2_grad)
  extra :
    attrs : [bool use_mkldnn = false]

58
- op : bilinear_interp (bilinear_interp_v2)
59 60 61 62
  backward : bilinear_interp_grad (bilinear_interp_v2_grad)
  extra :
    attrs : [bool use_mkldnn = false]

63
- op : ceil
64 65 66 67
  backward : ceil_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

68
- op : cholesky
69 70 71 72 73
  inputs :
    x : X
  outputs :
    out : Out

74
- op : cholesky_solve
75 76 77 78 79
  inputs :
    {x : X, y : Y}
  outputs :
    out : Out

80
- op : clip
81 82 83 84
  backward : clip_grad
  extra :
    attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32"]

85
- op : concat
86 87 88 89
  backward : concat_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_quantizer = false, str mkldnn_data_type = "float32"]

90
- op : conv2d
91
  backward : conv2d_grad
92
  extra :
93
    attrs : [bool is_test = false, bool use_cudnn = true, bool fuse_relu_before_depthwise_conv = false, bool use_mkldnn = false,
94
             bool use_quantizer = false, str mkldnn_data_type = "float32", bool fuse_relu = false,
95
             str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f, bool use_addto = false,
96 97
             bool fuse_residual_connection = false, float Scale_in = 1.0f, float Scale_out = 1.0f,
             float Scale_in_eltwise = 1.0f, 'float[] Scale_weights = {1.0f}', bool force_fp32_output = false,
98
             int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB(), bool exhaustive_search = false]
99

100
- op : conv2d_fusion
F
Feiyu Chan 已提交
101
  extra :
102
    attrs : [bool is_test = false, bool use_cudnn = false, bool fuse_relu_before_depthwise_conv = false, bool use_mkldnn = false,
F
Feiyu Chan 已提交
103
             bool use_quantizer = false, str mkldnn_data_type = "float32", bool fuse_relu = false,
104
             str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f, bool use_addto = false,
F
Feiyu Chan 已提交
105 106
             bool fuse_residual_connection = false, float Scale_in = 1.0f, float Scale_out = 1.0f,
             float Scale_in_eltwise = 1.0f, 'float[] Scale_weights = {1.0f}', bool force_fp32_output = false,
107 108
             int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB(), bool exhaustive_search = false]

109
- op : conv2d_transpose
110 111 112 113 114 115 116
  backward : conv2d_transpose_grad
  extra :
    attrs : [bool is_test = false, bool use_cudnn = true, bool use_mkldnn = false, bool force_fp32_output = false,
             str mkldnn_data_type = "float32", bool fuse_relu = false,
             str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f,
             int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB()]

117
- op : conv3d
118 119 120 121 122 123 124
  backward : conv3d_grad
  extra :
    attrs : [bool is_test = false, bool use_cudnn = true, bool use_mkldnn = false, str mkldnn_data_type = "float32", bool fuse_relu = false,
             str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f,
             bool use_addto = false, bool fuse_residual_connection = false, bool force_fp32_output = false,
             int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB(), bool exhaustive_search = false]

125
- op : conv3d_transpose
126 127 128
  backward : conv3d_transpose_grad
  extra :
    attrs : [bool use_cudnn = true, bool use_mkldnn = false, int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB()]
F
Feiyu Chan 已提交
129

130
- op : cos
131 132 133 134
  backward : cos_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

135
- op : cosh
136 137 138 139
  backward : cosh_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

140
- op : cross
141 142
  inputs :
    {x : X, y : Y}
143 144 145 146 147
  attrs :
    axis : dim
  outputs :
    out : Out

148
- op : data_norm
149 150 151 152
  backward : data_norm_grad
  extra :
    attrs : [bool use_mkldnn = false]

153
- op : depthwise_conv2d
154 155
  backward : depthwise_conv2d_grad
  extra :
156
    attrs : [bool is_test = false, bool fuse_relu_before_depthwise_conv = false, bool use_mkldnn = false,
157 158 159 160
             bool use_quantizer = false, str mkldnn_data_type = "float32", bool fuse_relu = false,
             str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f, bool use_addto = false,
             bool fuse_residual_connection = false, float Scale_in = 1.0f, float Scale_out = 1.0f,
             float Scale_in_eltwise = 1.0f, 'float[] Scale_weights = {1.0f}', bool force_fp32_output = false,
161 162
             int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB(), bool exhaustive_search = false]

163
- op : depthwise_conv2d_transpose
164 165 166 167 168 169
  backward : depthwise_conv2d_transpose_grad
  extra :
    attrs : [bool is_test = false, bool use_cudnn = false, bool use_mkldnn = false, bool force_fp32_output = false,
             str mkldnn_data_type = "float32", bool fuse_relu = false,
             str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f,
             int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB()]
170

171
- op : diag (diag_v2)
172
  backward : diag_grad (diag_v2_grad)
173 174 175 176 177
  inputs :
    x : X
  outputs :
    out : Out

178
- op : diagonal
179 180 181 182 183
  inputs :
    x : Input
  outputs :
    out : Out

184
- op : digamma
185 186 187 188 189
  inputs :
    x : X
  outputs :
    out : Out

190
- op : dist
191 192 193 194 195
  inputs :
    {x : X, y : Y}
  outputs :
    out : Out

196
- op : dot
197 198 199 200 201
  inputs :
    {x : X, y : Y}
  outputs :
    out : Out

202
- op : dropout
203 204 205 206
  backward : dropout_grad
  extra :
    attrs : [bool fix_seed = false, int seed = 0]

207
- op : dropout_nd
208 209 210 211
  backward : dropout_nd_grad
  extra :
    attrs : [bool fix_seed = false, int seed = 0]

212
- op : elu
213 214 215 216
  backward : elu_grad
  extra :
    attrs : [bool use_mkldnn = false]

217
- op : erf
218 219 220 221 222
  inputs :
    x : X
  outputs :
    out : Out

223
- op : erfinv
224 225 226 227 228
  inputs :
    x : X
  outputs :
    out : Out

229
- op : exp
230 231 232 233
  backward : exp_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

234
- op : expm1
235 236 237 238
  backward : expm1_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

239
- op : fft_c2c
240 241 242
  inputs: {x: X}
  outputs: {out: Out}

243
- op : fft_c2r
244 245 246
  inputs: {x: X}
  outputs: {out: Out}

247
- op : fft_r2c
248 249 250
  inputs: {x: X}
  outputs: {out: Out}

251
- op : floor
252 253 254 255
  backward : floor_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

256
- op : frobenius_norm
257 258 259 260
  backward : frobenius_norm_grad
  extra :
    attrs : [bool use_mkldnn = false]

261
- op : gelu
262 263 264 265
  backward : gelu_grad
  extra :
    attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32", bool use_cudnn = false]

266
- op : grid_sampler
267 268 269 270
  backward : grid_sampler_grad
  extra :
    attrs : [bool use_cudnn = true]

271
- op : gru
272 273 274 275
  backward : gru_grad
  extra :
    attrs : [bool is_test = false]

276
- op : hard_swish
277 278 279 280
  backward : hard_swish_grad
  extra :
    attrs : [bool use_mkldnn = false]

281
- op : inplace_abn
282 283 284 285
  backward : inplace_abn_grad
  extra :
    attrs : [bool use_mkldnn = false, bool fuse_with_relu = false]

286
- op : layer_norm
287 288 289 290
  backward : layer_norm_grad
  extra :
    attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32", bool is_test = false]

291
- op : leaky_relu
292 293 294 295
  backward : leaky_relu_grad
  extra :
    attrs : [bool use_mkldnn = false]

296
- op : lgamma
297 298 299 300 301
  inputs :
    x : X
  outputs :
    out : Out

302
- op : linear_interp (linear_interp_v2)
303 304 305 306
  backward : linear_interp_grad (linear_interp_v2_grad)
  extra :
    attrs : [bool use_mkldnn = false]

307
- op : log
308 309 310 311
  backward : log_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

312
- op : log10
313 314 315 316
  backward : log10_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

317
- op : log1p
318 319 320 321
  backward : log1p_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

322
- op : log2
323 324 325 326
  backward : log2_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

327
- op : log_softmax
328 329 330 331
  backward : log_softmax_grad
  extra :
    attrs : [bool use_mkldnn = false]

332
- op : logsigmoid
333 334 335 336
  backward : logsigmoid_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

337
- op : lrn
338 339 340 341
  backward : lrn_grad
  extra :
    attrs : [bool use_mkldnn = false, bool is_test = false]

342
- op : matmul (matmul_v2)
343 344 345 346 347 348
  backward : matmul_grad (matmul_v2_grad)
  extra :
    attrs : [bool use_mkldnn = false, 'int[] fused_reshape_Out = {}', 'int[] fused_transpose_Out = {}',
             str mkldnn_data_type = "float32", 'int[] fused_reshape_X = {}', 'int[] fused_reshape_Y = {}',
             'int[] fused_transpose_X = {}', 'int[] fused_transpose_Y = {}',]

349
- op : mish
350 351 352 353
  backward : mish_grad
  extra :
    attrs : [bool use_mkldnn = false]

354
- op : mv
355 356 357 358 359
  inputs :
    {x : X, vec : Vec}
  outputs :
    out : Out

360
- op : nearest_interp (nearest_interp_v2)
361 362 363 364
  backward : nearest_interp_grad (nearest_interp_v2_grad)
  extra :
    attrs : [bool use_mkldnn = false]

365
- op : pad2d
366 367 368 369
  backward : pad2d_grad
  extra :
    attrs : [bool use_mkldnn = false]

370
- op : pad3d
371 372 373 374
  backward : pad3d_grad
  extra :
    attrs : [bool use_mkldnn = false]

375
- op : partial_sum
376 377 378 379
  backward : partial_sum_grad
  extra :
    attrs : [bool use_mkldnn = false]

380
- op : poisson
381 382 383 384 385
  inputs :
    x : X
  outputs :
    out : Out

386
- op : prelu
387 388 389 390
  backward : prelu_grad
  extra :
    attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32", bool is_test = false]

391
- op : reciprocal
392 393 394 395
  backward : reciprocal_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

396
- op : reduce_all
397 398 399
  extra :
    attrs : [bool use_mkldnn = false]

400
- op : reduce_amax
401 402 403 404
  backward : reduce_amax_grad
  extra :
    attrs : [bool use_mkldnn = false]

405
- op : reduce_amin
406 407 408 409
  backward : reduce_amin_grad
  extra :
    attrs : [bool use_mkldnn = false]

410
- op : reduce_any
411 412 413
  extra :
    attrs : [bool use_mkldnn = false]

414
- op : reduce_max
415 416 417 418
  backward : reduce_max_grad
  extra :
    attrs : [bool use_mkldnn = false]

419
- op : reduce_mean
420 421 422 423
  backward : reduce_mean_grad
  extra :
    attrs : [bool use_mkldnn = false]

424
- op : reduce_min
425 426 427 428
  backward : reduce_min_grad
  extra :
    attrs : [bool use_mkldnn = false]

429
- op : reduce_prod
430 431 432 433
  backward : reduce_prod_grad
  extra :
    attrs : [bool use_mkldnn = false]

434
- op : reduce_sum
435 436 437 438
  backward : reduce_sum_grad
  extra :
    attrs : [bool use_mkldnn = false]

439
- op : relu
440 441 442 443
  backward : relu_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

444
- op : relu6
445 446 447 448
  backward : relu6_grad
  extra :
    attrs : [bool use_mkldnn = false]

449
- op : renorm
450 451 452 453
  backward : renorm_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

454 455 456 457 458 459
- op : rnn
  backward : rnn_grad
  extra :
    attrs : [bool is_test = false]

- op : round
460
  backward : round_grad
461
  extra :
462 463
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

464
- op : rsqrt
465 466 467
  backward : rsqrt_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]
468

469
- op : seed
470 471 472
  extra :
    attrs : [bool deterministic = false, str rng_name = "", bool force_cpu = false]

473
- op : shape
474 475 476
  extra :
    attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32"]

477
- op : shuffle_channel
478 479 480 481
  backward : shuffle_channel_grad
  extra :
    attrs : [bool use_mkldnn = false]

482
- op : sigmoid
483 484 485 486
  backward : sigmoid_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

487
- op : silu
488 489 490 491
  backward : silu_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

492
- op : sin
493 494 495 496
  backward : sin_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

497
- op : sinh
498 499 500 501
  backward : sinh_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

502
- op : slice
503 504 505 506
  backward : slice_grad
  extra :
    attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32"]

507
- op : softmax
508 509 510
  backward : softmax_grad
  extra :
    attrs : [bool use_cudnn = false, bool use_mkldnn = false, str mkldnn_data_type = "float32", bool is_test = false]
511

512
- op : softplus
513
  backward : softplus_grad
514
  extra :
515 516 517
    attrs : [bool use_mkldnn = false, bool use_cudnn = false, str fuse_activation_type = "", float fuse_activation_alpha = 0.0f,
             float fuse_activation_beta = 0.0f, float fuse_activation_scale = 1.0f]

518
- op : softsign
519 520 521
  backward : softsign_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]
522

523
- op : solve
524 525 526 527 528
  inputs :
    {x : X, y : Y}
  outputs :
    out : Out

529
- op : sqrt
530 531 532 533
  backward : sqrt_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

534
- op : square
535 536 537 538
  backward : square_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

539
- op : squeeze (squeeze2)
540 541 542 543
  backward : squeeze_grad (squeeze2_grad)
  extra :
    attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32"]

544
- op : stack
545 546 547 548
  backward : stack_grad
  extra :
    attrs : [bool use_mkldnn = false]

549
- op : swish
550 551 552 553
  backward : swish_grad
  extra :
    attrs : [bool use_mkldnn = false]

554
- op : sync_batch_norm
555 556 557 558
  backward : sync_batch_norm_grad
  extra :
    attrs : [bool use_mkldnn = false, bool fuse_with_relu = false]

559
- op : tan
560 561 562 563
  backward : tan_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

564
- op : tanh
565 566 567 568
  backward : tanh_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

569
- op : tanh_shrink
570 571 572 573
  backward : tanh_shrink_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

574
- op : trace
575 576 577 578
  inputs :
    x : Input
  outputs :
    out : Out
579

580
- op : trilinear_interp (trilinear_interp_v2)
581 582 583 584
  backward : trilinear_interp_grad (trilinear_interp_v2_grad)
  extra :
    attrs : [bool use_mkldnn = false]

585
- op : trunc
586 587 588 589
  inputs :
    x : X
  outputs :
    out : Out