op_compat.yaml 18.1 KB
Newer Older
1
- op : abs
2 3 4 5
  backward : abs_grad
  extra :
    attrs : [bool use_cudnn = false, bool use_mkldnn = false]

6 7 8 9 10 11
- op : add (elementwise_add)
  backward : add_grad (elementwise_add_grad)
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

12
- op : acosh
13 14 15 16
  backward : acosh_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

17
- op : addmm
18 19 20 21
  backward : addmm_grad
  extra :
    attrs : [bool use_mkldnn = false]

22
- op : affine_grid
23 24 25 26
  backward : affine_grid_grad
  extra :
    attrs : [bool use_cudnn = true]

27
- op : angle
28 29 30
  backward : angle_grad
  extra :
    attrs : [bool use_cudnn = false, bool use_mkldnn = false]
31

32
- op : asinh
33 34 35 36
  backward : asinh_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

37
- op : atan2
38
  inputs :
39
    {x : X1, y : X2}
40 41 42
  outputs :
    out : Out

43
- op : atanh
44 45 46 47
  backward : atanh_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

48
- op : batch_norm
49 50 51 52
  backward : batch_norm_grad
  extra :
    attrs : [bool use_mkldnn = false, bool fuse_with_relu = false]

53
- op : bernoulli
54 55 56 57 58
  inputs :
    x : X
  outputs :
    out : Out

59
- op : bicubic_interp (bicubic_interp_v2)
60 61 62 63
  backward : bicubic_interp_grad (bicubic_interp_v2_grad)
  extra :
    attrs : [bool use_mkldnn = false]

64
- op : bilinear_interp (bilinear_interp_v2)
65 66 67 68
  backward : bilinear_interp_grad (bilinear_interp_v2_grad)
  extra :
    attrs : [bool use_mkldnn = false]

69
- op : ceil
70 71 72 73
  backward : ceil_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

74
- op : cholesky
75 76 77 78 79
  inputs :
    x : X
  outputs :
    out : Out

80
- op : cholesky_solve
81 82 83 84 85
  inputs :
    {x : X, y : Y}
  outputs :
    out : Out

86
- op : clip
87 88 89 90
  backward : clip_grad
  extra :
    attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32"]

91
- op : concat
92 93 94 95
  backward : concat_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_quantizer = false, str mkldnn_data_type = "float32"]

96
- op : conv2d
97
  backward : conv2d_grad
98
  extra :
99
    attrs : [bool is_test = false, bool use_cudnn = true, bool fuse_relu_before_depthwise_conv = false, bool use_mkldnn = false,
100
             bool use_quantizer = false, str mkldnn_data_type = "float32", bool fuse_relu = false,
101
             str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f, bool use_addto = false,
102 103
             bool fuse_residual_connection = false, float Scale_in = 1.0f, float Scale_out = 1.0f,
             float Scale_in_eltwise = 1.0f, 'float[] Scale_weights = {1.0f}', bool force_fp32_output = false,
104
             int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB(), bool exhaustive_search = false]
105

106
- op : conv2d_fusion
F
Feiyu Chan 已提交
107
  extra :
108
    attrs : [bool is_test = false, bool use_cudnn = false, bool fuse_relu_before_depthwise_conv = false, bool use_mkldnn = false,
F
Feiyu Chan 已提交
109
             bool use_quantizer = false, str mkldnn_data_type = "float32", bool fuse_relu = false,
110
             str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f, bool use_addto = false,
F
Feiyu Chan 已提交
111 112
             bool fuse_residual_connection = false, float Scale_in = 1.0f, float Scale_out = 1.0f,
             float Scale_in_eltwise = 1.0f, 'float[] Scale_weights = {1.0f}', bool force_fp32_output = false,
113 114
             int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB(), bool exhaustive_search = false]

115
- op : conv2d_transpose
116 117 118 119 120 121 122
  backward : conv2d_transpose_grad
  extra :
    attrs : [bool is_test = false, bool use_cudnn = true, bool use_mkldnn = false, bool force_fp32_output = false,
             str mkldnn_data_type = "float32", bool fuse_relu = false,
             str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f,
             int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB()]

123
- op : conv3d
124 125 126 127 128 129 130
  backward : conv3d_grad
  extra :
    attrs : [bool is_test = false, bool use_cudnn = true, bool use_mkldnn = false, str mkldnn_data_type = "float32", bool fuse_relu = false,
             str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f,
             bool use_addto = false, bool fuse_residual_connection = false, bool force_fp32_output = false,
             int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB(), bool exhaustive_search = false]

131
- op : conv3d_transpose
132 133 134
  backward : conv3d_transpose_grad
  extra :
    attrs : [bool use_cudnn = true, bool use_mkldnn = false, int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB()]
F
Feiyu Chan 已提交
135

136
- op : cos
137 138 139 140
  backward : cos_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

141
- op : cosh
142 143 144 145
  backward : cosh_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

146
- op : cross
147 148
  inputs :
    {x : X, y : Y}
149 150 151 152 153
  attrs :
    axis : dim
  outputs :
    out : Out

154
- op : data_norm
155 156 157 158
  backward : data_norm_grad
  extra :
    attrs : [bool use_mkldnn = false]

159
- op : depthwise_conv2d
160 161
  backward : depthwise_conv2d_grad
  extra :
162
    attrs : [bool is_test = false, bool fuse_relu_before_depthwise_conv = false, bool use_mkldnn = false,
163 164 165 166
             bool use_quantizer = false, str mkldnn_data_type = "float32", bool fuse_relu = false,
             str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f, bool use_addto = false,
             bool fuse_residual_connection = false, float Scale_in = 1.0f, float Scale_out = 1.0f,
             float Scale_in_eltwise = 1.0f, 'float[] Scale_weights = {1.0f}', bool force_fp32_output = false,
167 168
             int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB(), bool exhaustive_search = false]

169
- op : depthwise_conv2d_transpose
170 171 172 173 174 175
  backward : depthwise_conv2d_transpose_grad
  extra :
    attrs : [bool is_test = false, bool use_cudnn = false, bool use_mkldnn = false, bool force_fp32_output = false,
             str mkldnn_data_type = "float32", bool fuse_relu = false,
             str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f,
             int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB()]
176

177
- op : diag (diag_v2)
178
  backward : diag_grad (diag_v2_grad)
179 180 181 182 183
  inputs :
    x : X
  outputs :
    out : Out

184
- op : diagonal
185 186 187 188 189
  inputs :
    x : Input
  outputs :
    out : Out

190
- op : digamma
191 192 193 194 195
  inputs :
    x : X
  outputs :
    out : Out

196
- op : dist
197 198 199 200 201
  inputs :
    {x : X, y : Y}
  outputs :
    out : Out

202 203 204 205 206 207
- op : divide (elementwise_div)
  backward : divide_grad (elementwise_div)
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

208
- op : dot
209 210 211 212 213
  inputs :
    {x : X, y : Y}
  outputs :
    out : Out

214
- op : dropout
215 216 217 218
  backward : dropout_grad
  extra :
    attrs : [bool fix_seed = false, int seed = 0]

219
- op : dropout_nd
220 221 222 223
  backward : dropout_nd_grad
  extra :
    attrs : [bool fix_seed = false, int seed = 0]

224 225 226 227 228 229
- op : elementwise_pow
  backward : elementwise_pow_grad
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

230
- op : elu
231 232 233 234
  backward : elu_grad
  extra :
    attrs : [bool use_mkldnn = false]

235
- op : erf
236 237 238 239 240
  inputs :
    x : X
  outputs :
    out : Out

241
- op : erfinv
242 243 244 245 246
  inputs :
    x : X
  outputs :
    out : Out

247
- op : exp
248 249 250 251
  backward : exp_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

252
- op : expm1
253 254 255 256
  backward : expm1_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

257
- op : fft_c2c
258 259 260
  inputs: {x: X}
  outputs: {out: Out}

261
- op : fft_c2r
262 263 264
  inputs: {x: X}
  outputs: {out: Out}

265
- op : fft_r2c
266 267 268
  inputs: {x: X}
  outputs: {out: Out}

269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285
- op : floor_divide (elementwise_floordiv)
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

- op : fmax (elementwise_fmax)
  backward : fmax_grad (elementwise_fmax_grad)
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

- op : fmin (elementwise_fmin)
  backward : fmin_grad (elementwise_fmin_grad)
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

286
- op : floor
287 288 289 290
  backward : floor_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

291
- op : frobenius_norm
292 293 294 295
  backward : frobenius_norm_grad
  extra :
    attrs : [bool use_mkldnn = false]

296
- op : gelu
297 298 299 300
  backward : gelu_grad
  extra :
    attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32", bool use_cudnn = false]

301 302 303 304 305
- op : grad_add
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

306
- op : grid_sampler
307 308 309 310
  backward : grid_sampler_grad
  extra :
    attrs : [bool use_cudnn = true]

311
- op : gru
312 313 314 315
  backward : gru_grad
  extra :
    attrs : [bool is_test = false]

316 317 318 319 320 321
- op : heaviside (elementwise_heaviside)
  backward : heaviside_grad (elementwise_heaviside_grad)
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

322
- op : hard_swish
323 324 325 326
  backward : hard_swish_grad
  extra :
    attrs : [bool use_mkldnn = false]

327
- op : inplace_abn
328 329 330 331
  backward : inplace_abn_grad
  extra :
    attrs : [bool use_mkldnn = false, bool fuse_with_relu = false]

332
- op : layer_norm
333 334 335 336
  backward : layer_norm_grad
  extra :
    attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32", bool is_test = false]

337
- op : leaky_relu
338 339 340 341
  backward : leaky_relu_grad
  extra :
    attrs : [bool use_mkldnn = false]

342
- op : lgamma
343 344 345 346 347
  inputs :
    x : X
  outputs :
    out : Out

348
- op : linear_interp (linear_interp_v2)
349 350 351 352
  backward : linear_interp_grad (linear_interp_v2_grad)
  extra :
    attrs : [bool use_mkldnn = false]

353
- op : log
354 355 356 357
  backward : log_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

358
- op : log10
359 360 361 362
  backward : log10_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

363
- op : log1p
364 365 366 367
  backward : log1p_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

368
- op : log2
369 370 371 372
  backward : log2_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

373
- op : log_softmax
374 375 376 377
  backward : log_softmax_grad
  extra :
    attrs : [bool use_mkldnn = false]

378
- op : logsigmoid
379 380 381 382
  backward : logsigmoid_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

383
- op : lrn
384 385 386 387
  backward : lrn_grad
  extra :
    attrs : [bool use_mkldnn = false, bool is_test = false]

388
- op : matmul (matmul_v2)
389 390 391 392 393 394
  backward : matmul_grad (matmul_v2_grad)
  extra :
    attrs : [bool use_mkldnn = false, 'int[] fused_reshape_Out = {}', 'int[] fused_transpose_Out = {}',
             str mkldnn_data_type = "float32", 'int[] fused_reshape_X = {}', 'int[] fused_reshape_Y = {}',
             'int[] fused_transpose_X = {}', 'int[] fused_transpose_Y = {}',]

395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412
- op : maximum (elementwise_max)
  backward : maximum_grad (elementwise_max_grad)
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

- op : maximum (elementwise_min)
  backward : maximum_grad (elementwise_min_grad)
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

- op : multiply (elementwise_mul)
  backward : multiply_grad (elementwise_mul_grad)
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

413
- op : mish
414 415 416 417
  backward : mish_grad
  extra :
    attrs : [bool use_mkldnn = false]

418
- op : mv
419 420 421 422 423
  inputs :
    {x : X, vec : Vec}
  outputs :
    out : Out

424
- op : nearest_interp (nearest_interp_v2)
425 426 427 428
  backward : nearest_interp_grad (nearest_interp_v2_grad)
  extra :
    attrs : [bool use_mkldnn = false]

429
- op : pad2d
430 431 432 433
  backward : pad2d_grad
  extra :
    attrs : [bool use_mkldnn = false]

434
- op : pad3d
435 436 437 438
  backward : pad3d_grad
  extra :
    attrs : [bool use_mkldnn = false]

439
- op : partial_sum
440 441 442 443
  backward : partial_sum_grad
  extra :
    attrs : [bool use_mkldnn = false]

444
- op : poisson
445 446 447 448 449
  inputs :
    x : X
  outputs :
    out : Out

450
- op : prelu
451 452 453 454
  backward : prelu_grad
  extra :
    attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32", bool is_test = false]

455
- op : reciprocal
456 457 458 459
  backward : reciprocal_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

460
- op : reduce_all
461 462 463
  extra :
    attrs : [bool use_mkldnn = false]

464
- op : reduce_amax
465 466 467 468
  backward : reduce_amax_grad
  extra :
    attrs : [bool use_mkldnn = false]

469
- op : reduce_amin
470 471 472 473
  backward : reduce_amin_grad
  extra :
    attrs : [bool use_mkldnn = false]

474
- op : reduce_any
475 476 477
  extra :
    attrs : [bool use_mkldnn = false]

478
- op : reduce_max
479 480 481 482
  backward : reduce_max_grad
  extra :
    attrs : [bool use_mkldnn = false]

483
- op : reduce_mean
484 485 486 487
  backward : reduce_mean_grad
  extra :
    attrs : [bool use_mkldnn = false]

488
- op : reduce_min
489 490 491 492
  backward : reduce_min_grad
  extra :
    attrs : [bool use_mkldnn = false]

493
- op : reduce_prod
494 495 496 497
  backward : reduce_prod_grad
  extra :
    attrs : [bool use_mkldnn = false]

498
- op : reduce_sum
499 500 501 502
  backward : reduce_sum_grad
  extra :
    attrs : [bool use_mkldnn = false]

503 504 505 506 507
- op : remainder (elementwise_mod)
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

508
- op : relu
509 510 511 512
  backward : relu_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

513
- op : relu6
514 515 516 517
  backward : relu6_grad
  extra :
    attrs : [bool use_mkldnn = false]

518
- op : renorm
519 520 521 522
  backward : renorm_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

523 524 525 526 527 528
- op : rnn
  backward : rnn_grad
  extra :
    attrs : [bool is_test = false]

- op : round
529
  backward : round_grad
530
  extra :
531 532
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

533
- op : rsqrt
534 535 536
  backward : rsqrt_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]
537

538
- op : seed
539 540 541
  extra :
    attrs : [bool deterministic = false, str rng_name = "", bool force_cpu = false]

542
- op : shape
543 544 545
  extra :
    attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32"]

546
- op : shuffle_channel
547 548 549 550
  backward : shuffle_channel_grad
  extra :
    attrs : [bool use_mkldnn = false]

551
- op : sigmoid
552 553 554 555
  backward : sigmoid_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

556
- op : silu
557 558 559 560
  backward : silu_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

561
- op : sin
562 563 564 565
  backward : sin_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

566
- op : sinh
567 568 569 570
  backward : sinh_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

571
- op : slice
572 573 574 575
  backward : slice_grad
  extra :
    attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32"]

576
- op : softmax
577 578 579
  backward : softmax_grad
  extra :
    attrs : [bool use_cudnn = false, bool use_mkldnn = false, str mkldnn_data_type = "float32", bool is_test = false]
580

581
- op : softplus
582
  backward : softplus_grad
583
  extra :
584 585 586
    attrs : [bool use_mkldnn = false, bool use_cudnn = false, str fuse_activation_type = "", float fuse_activation_alpha = 0.0f,
             float fuse_activation_beta = 0.0f, float fuse_activation_scale = 1.0f]

587
- op : softsign
588 589 590
  backward : softsign_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]
591

592
- op : solve
593 594 595 596 597
  inputs :
    {x : X, y : Y}
  outputs :
    out : Out

598
- op : sqrt
599 600 601 602
  backward : sqrt_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

603
- op : square
604 605 606 607
  backward : square_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

608
- op : squeeze (squeeze2)
609 610 611 612
  backward : squeeze_grad (squeeze2_grad)
  extra :
    attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32"]

613
- op : stack
614 615 616 617
  backward : stack_grad
  extra :
    attrs : [bool use_mkldnn = false]

618 619 620 621 622 623
- op : subtract (elementwise_sub)
  backward : subtract_grad (elementwise_sub_grad)
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

624
- op : swish
625 626 627 628
  backward : swish_grad
  extra :
    attrs : [bool use_mkldnn = false]

629
- op : sync_batch_norm
630 631 632 633
  backward : sync_batch_norm_grad
  extra :
    attrs : [bool use_mkldnn = false, bool fuse_with_relu = false]

634
- op : tan
635 636 637 638
  backward : tan_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

639
- op : tanh
640 641 642 643
  backward : tanh_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

644
- op : tanh_shrink
645 646 647 648
  backward : tanh_shrink_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

649
- op : trace
650 651 652 653
  inputs :
    x : Input
  outputs :
    out : Out
654

655
- op : trilinear_interp (trilinear_interp_v2)
656 657 658 659
  backward : trilinear_interp_grad (trilinear_interp_v2_grad)
  extra :
    attrs : [bool use_mkldnn = false]

660
- op : trunc
661 662 663 664
  inputs :
    x : X
  outputs :
    out : Out