op_compat.yaml 25.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11
# All the configuration in this file are only for existing operators,
# which cannot be modified in principle. There's no need to configure
# this file for new operator.
#
# This file is used for two purposes:
# 1. Configure the mapping relationship of parameter names of operator
#    between the operators in ops.yaml and the old operators defined
#    in fluid.
# 2. Save the extra parameters in the OpMaker of operators temporarily,
#    which will be removed in the future.

12 13 14 15 16
# - op : rnn
#   backward : rnn_grad
#   extra :
#     attrs : [bool is_test = false]

17
- op : abs
18
  backward : abs_grad
19 20 21 22
  inputs :
    x : X
  outputs :
    out : Out
23
  extra :
H
HongyuJia 已提交
24
    attrs : [bool use_mkldnn = false]
25

26 27 28 29 30 31
- op : acos
  inputs :
    x : X
  outputs :
    out : Out

32
- op : acosh
33 34 35 36
  inputs :
    x : X
  outputs :
    out : Out
37 38 39 40
  backward : acosh_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

41 42 43 44 45 46
- op : add (elementwise_add)
  backward : add_grad (elementwise_add_grad)
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

47
- op : addmm
48 49 50 51
  backward : addmm_grad
  extra :
    attrs : [bool use_mkldnn = false]

52
- op : affine_grid
53 54 55 56
  backward : affine_grid_grad
  extra :
    attrs : [bool use_cudnn = true]

57
- op : angle
58
  backward : angle_grad
59 60 61 62
  inputs :
    x : X
  outputs :
    out : Out
63
  extra :
H
HongyuJia 已提交
64
    attrs : [bool use_mkldnn = false]
65

66 67 68 69 70 71 72
- op : argsort
  inputs :
    x : X
  outputs :
    out : Out
    indices : Indices

73 74 75 76 77 78 79 80 81 82 83 84
- op : as_complex
  inputs :
    x : X
  outputs :
    out : Out

- op : as_real
  inputs :
    x : X
  outputs :
    out : Out

85 86 87 88 89 90
- op : asin
  inputs :
    x : X
  outputs :
    out : Out

91
- op : asinh
92
  backward : asinh_grad
93 94 95 96
  inputs :
    x : X
  outputs :
    out : Out
97 98 99
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

100 101 102 103 104 105
- op : atan
  inputs :
    x : X
  outputs :
    out : Out

106
- op : atan2
107
  inputs :
108
    {x : X1, y : X2}
109 110 111
  outputs :
    out : Out

112
- op : atanh
113
  backward : atanh_grad
114 115 116 117
  inputs :
    x : X
  outputs :
    out : Out
118 119 120
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

121
- op : batch_norm
122 123 124 125
  backward : batch_norm_grad
  extra :
    attrs : [bool use_mkldnn = false, bool fuse_with_relu = false]

126
- op : bernoulli
127 128 129 130 131
  inputs :
    x : X
  outputs :
    out : Out

132
- op : bicubic_interp (bicubic_interp_v2)
133 134 135 136
  backward : bicubic_interp_grad (bicubic_interp_v2_grad)
  extra :
    attrs : [bool use_mkldnn = false]

137
- op : bilinear_interp (bilinear_interp_v2)
138 139 140 141
  backward : bilinear_interp_grad (bilinear_interp_v2_grad)
  extra :
    attrs : [bool use_mkldnn = false]

142 143 144 145 146 147
- op : bmm
  inputs :
    {x : X, y : Y}
  outputs :
    out : Out

148
- op : ceil
149
  backward : ceil_grad
150 151 152 153
  inputs :
    x : X
  outputs :
    out : Out
154 155 156
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

157 158 159 160 161 162 163
- op : celu
  backward : celu_grad, celu_double_grad(celu_grad_grad)
  inputs :
    x : X
  outputs :
    out : Out

164
- op : cholesky
165 166 167 168 169
  inputs :
    x : X
  outputs :
    out : Out

170
- op : cholesky_solve
171 172 173 174 175
  inputs :
    {x : X, y : Y}
  outputs :
    out : Out

176
- op : clip
177 178 179 180
  backward : clip_grad
  extra :
    attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32"]

181
- op : concat
182 183 184 185
  backward : concat_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_quantizer = false, str mkldnn_data_type = "float32"]

186 187 188 189 190
- op : conditional_block
  backward : conditional_block_grad
  extra :
    attrs : ['str[] skip_eager_deletion_vars = {}']

191
- op : conv2d
192
  backward : conv2d_grad
193
  extra :
194
    attrs : [bool is_test = false, bool use_cudnn = true, bool fuse_relu_before_depthwise_conv = false, bool use_mkldnn = false,
195
             bool use_quantizer = false, str mkldnn_data_type = "float32", bool fuse_relu = false,
196
             str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f, bool use_addto = false,
197 198
             bool fuse_residual_connection = false, float Scale_in = 1.0f, float Scale_out = 1.0f,
             float Scale_in_eltwise = 1.0f, 'float[] Scale_weights = {1.0f}', bool force_fp32_output = false,
199
             int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB(), bool exhaustive_search = false]
200

201
- op : conv2d_fusion
F
Feiyu Chan 已提交
202
  extra :
203
    attrs : [bool is_test = false, bool use_cudnn = false, bool fuse_relu_before_depthwise_conv = false, bool use_mkldnn = false,
F
Feiyu Chan 已提交
204
             bool use_quantizer = false, str mkldnn_data_type = "float32", bool fuse_relu = false,
205
             str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f, bool use_addto = false,
F
Feiyu Chan 已提交
206 207
             bool fuse_residual_connection = false, float Scale_in = 1.0f, float Scale_out = 1.0f,
             float Scale_in_eltwise = 1.0f, 'float[] Scale_weights = {1.0f}', bool force_fp32_output = false,
208 209
             int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB(), bool exhaustive_search = false]

210
- op : conv2d_transpose
211 212 213 214 215 216 217
  backward : conv2d_transpose_grad
  extra :
    attrs : [bool is_test = false, bool use_cudnn = true, bool use_mkldnn = false, bool force_fp32_output = false,
             str mkldnn_data_type = "float32", bool fuse_relu = false,
             str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f,
             int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB()]

218
- op : conv3d
219 220 221 222 223 224 225
  backward : conv3d_grad
  extra :
    attrs : [bool is_test = false, bool use_cudnn = true, bool use_mkldnn = false, str mkldnn_data_type = "float32", bool fuse_relu = false,
             str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f,
             bool use_addto = false, bool fuse_residual_connection = false, bool force_fp32_output = false,
             int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB(), bool exhaustive_search = false]

226
- op : conv3d_transpose
227 228 229
  backward : conv3d_transpose_grad
  extra :
    attrs : [bool use_cudnn = true, bool use_mkldnn = false, int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB()]
F
Feiyu Chan 已提交
230

231
- op : cos
232
  backward : cos_grad
233 234 235 236
  inputs :
    x : X
  outputs :
    out : Out
237 238 239
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

240
- op : cosh
241
  backward : cosh_grad
242 243 244 245
  inputs :
    x : X
  outputs :
    out : Out
246 247 248
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

249
- op : cross
250 251
  inputs :
    {x : X, y : Y}
252 253 254 255 256
  attrs :
    axis : dim
  outputs :
    out : Out

257
- op : data_norm
258 259 260 261
  backward : data_norm_grad
  extra :
    attrs : [bool use_mkldnn = false]

262
- op : depthwise_conv2d
263 264
  backward : depthwise_conv2d_grad
  extra :
265
    attrs : [bool is_test = false, bool fuse_relu_before_depthwise_conv = false, bool use_mkldnn = false,
266 267 268 269
             bool use_quantizer = false, str mkldnn_data_type = "float32", bool fuse_relu = false,
             str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f, bool use_addto = false,
             bool fuse_residual_connection = false, float Scale_in = 1.0f, float Scale_out = 1.0f,
             float Scale_in_eltwise = 1.0f, 'float[] Scale_weights = {1.0f}', bool force_fp32_output = false,
270 271
             int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB(), bool exhaustive_search = false]

272
- op : depthwise_conv2d_transpose
273 274 275 276 277 278
  backward : depthwise_conv2d_transpose_grad
  extra :
    attrs : [bool is_test = false, bool use_cudnn = false, bool use_mkldnn = false, bool force_fp32_output = false,
             str mkldnn_data_type = "float32", bool fuse_relu = false,
             str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f,
             int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB()]
279

280 281 282 283
- op : dequantize_linear
  extra :
    attrs : [float moving_rate = 0.9]

284 285 286 287 288 289 290
- op : det (determinant)
  backward : det_grad (determinant_grad)
  inputs :
    x : Input
  outputs :
    out : Out

291
- op : diag (diag_v2)
292
  backward : diag_grad (diag_v2_grad)
293 294 295 296 297
  inputs :
    x : X
  outputs :
    out : Out

298 299 300 301 302 303
- op : diag_embed
  inputs :
    input : Input
  outputs :
    out : Out

304
- op : diagonal
305 306 307 308 309
  inputs :
    x : Input
  outputs :
    out : Out

310
- op : digamma
311 312 313 314 315
  inputs :
    x : X
  outputs :
    out : Out

316
- op : dist
317 318 319 320 321
  inputs :
    {x : X, y : Y}
  outputs :
    out : Out

322 323 324 325
- op : distributed_push_sparse
  extra :
    attrs : ['int[] slots = {}']

326 327 328 329 330 331
- op : divide (elementwise_div)
  backward : divide_grad (elementwise_div)
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

332
- op : dot
333 334 335 336 337
  inputs :
    {x : X, y : Y}
  outputs :
    out : Out

338
- op : dropout
339 340 341 342
  backward : dropout_grad
  extra :
    attrs : [bool fix_seed = false, int seed = 0]

343
- op : dropout_nd
344 345 346 347
  backward : dropout_nd_grad
  extra :
    attrs : [bool fix_seed = false, int seed = 0]

348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367
- op : eig
  inputs :
    x : X
  outputs :
    out_w : Eigenvalues
    out_v : Eigenvectors

- op : eigh
  inputs :
    x : X
  outputs :
    out_w : Eigenvalues
    out_v : Eigenvectors

- op : eigvals
  inputs :
    x : X
  outputs :
    out : Out

368 369 370 371 372 373
- op : elementwise_pow
  backward : elementwise_pow_grad
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

374
- op : elu
375 376 377 378 379
  backward : elu_grad, elu_double_grad (elu_grad_grad)
  inputs :
    x : X
  outputs :
    out : Out
380 381 382
  extra :
    attrs : [bool use_mkldnn = false]

383 384 385 386 387 388 389
- op : embedding (lookup_table_v2)
  backward : embedding_grad (lookup_table_v2_grad)
  extra :
    attrs : [bool is_sparse = false, bool is_distributed = false, bool remote_prefetch = false,
             int trainer_id = 0, int slot = 0, 'int64_t[] height_sections = {}', 'str[] epmap = {}',
             'str[] table_names = {}']

390 391 392 393 394 395
- op : equal_all
  inputs :
    {x : X, y : Y}
  outputs :
    out : Out

396
- op : erf
397 398 399 400 401
  inputs :
    x : X
  outputs :
    out : Out

402
- op : erfinv
403 404 405 406 407
  inputs :
    x : X
  outputs :
    out : Out

408
- op : exp
409
  backward : exp_grad
410 411 412 413
  inputs :
    x : X
  outputs :
    out : Out
414 415
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]
416

417 418 419 420 421
- op : expand (expand_v2)
  backward : expand_grad (expand_v2_grad)
  extra :
    attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32"]

422
- op : expm1
423
  backward : expm1_grad
424 425 426 427
  inputs :
    x : X
  outputs :
    out : Out
428 429 430
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458
- op : fake_channel_wise_quantize_abs_max
  extra :
    attrs : [int round_type = 1]

- op : fake_channel_wise_quantize_dequantize_abs_max
  extra :
    attrs : [int round_type = 1]

- op : fake_quantize_abs_max
  extra :
    attrs : [int round_type = 1]

- op : fake_quantize_dequantize_abs_max
  extra :
    attrs : [int round_type = 1]

- op : fake_quantize_dequantize_moving_average_abs_max
  extra :
    attrs : [int round_type = 1]

- op : fake_quantize_moving_average_abs_max
  extra :
    attrs : [int round_type = 1]

- op : fake_quantize_range_abs_max
  extra :
    attrs : [int round_type = 1]

459
- op : fft_c2c
460 461 462
  inputs: {x: X}
  outputs: {out: Out}

463
- op : fft_c2r
464 465 466
  inputs: {x: X}
  outputs: {out: Out}

467
- op : fft_r2c
468 469 470
  inputs: {x: X}
  outputs: {out: Out}

471 472 473 474 475 476
- op : flip
  inputs :
    x : X
  outputs :
    out : Out

477 478
- op : floor
  backward : floor_grad
479 480 481 482
  inputs :
    x : X
  outputs :
    out : Out
483 484 485
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502
- op : floor_divide (elementwise_floordiv)
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

- op : fmax (elementwise_fmax)
  backward : fmax_grad (elementwise_fmax_grad)
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

- op : fmin (elementwise_fmin)
  backward : fmin_grad (elementwise_fmin_grad)
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

503
- op : frobenius_norm
504 505 506 507
  backward : frobenius_norm_grad
  extra :
    attrs : [bool use_mkldnn = false]

508 509 510 511 512 513 514 515 516
- op : full (fill_constant)
  extra :
    attrs : [bool use_mkldnn = false]

- op : gather
  backward : gather_grad
  extra :
    attrs : [bool overwrite = true]

517
- op : gelu
518 519
  backward : gelu_grad
  extra :
H
HongyuJia 已提交
520
    attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32"]
521

522 523 524 525 526
- op : grad_add
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

527
- op : grid_sampler
528 529 530 531
  backward : grid_sampler_grad
  extra :
    attrs : [bool use_cudnn = true]

532
- op : gru
533 534 535 536
  backward : gru_grad
  extra :
    attrs : [bool is_test = false]

537 538 539 540 541
- op : hard_swish
  backward : hard_swish_grad
  extra :
    attrs : [bool use_mkldnn = false]

542 543 544 545 546 547 548 549 550 551 552 553 554 555
- op : hardshrink (hard_shrink)
  backward : hardshrink_grad (hard_shrink_grad)
  inputs :
    x : X
  outputs :
    out : Out

- op : hardsigmoid (hard_sigmoid)
  backward : hardsigmoid_grad (hard_sigmoid_grad)
  inputs :
    x : X
  outputs :
    out : Out

556 557 558 559 560 561
- op : heaviside (elementwise_heaviside)
  backward : heaviside_grad (elementwise_heaviside_grad)
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

562
- op : inplace_abn
563 564 565 566
  backward : inplace_abn_grad
  extra :
    attrs : [bool use_mkldnn = false, bool fuse_with_relu = false]

567
- op : layer_norm
568 569 570 571
  backward : layer_norm_grad
  extra :
    attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32", bool is_test = false]

572
- op : leaky_relu
573 574 575 576 577 578 579
  backward : leaky_relu_grad, leaky_relu_double_grad (leaky_relu_grad_grad)
  inputs :
    x : X
  outputs :
    out : Out
  attrs:
    negative_slope : alpha
580 581 582
  extra :
    attrs : [bool use_mkldnn = false]

583
- op : lgamma
584 585 586 587 588
  inputs :
    x : X
  outputs :
    out : Out

589
- op : linear_interp (linear_interp_v2)
590 591 592 593
  backward : linear_interp_grad (linear_interp_v2_grad)
  extra :
    attrs : [bool use_mkldnn = false]

594
- op : log
595 596 597 598 599
  backward : log_grad, log_double_grad (log_grad_grad)
  inputs :
    x : X
  outputs :
    out : Out
600 601 602
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

603
- op : log10
604
  backward : log10_grad
605 606 607 608
  inputs :
    x : X
  outputs :
    out : Out
609 610 611
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

612
- op : log1p
613
  backward : log1p_grad
614 615 616 617
  inputs :
    x : X
  outputs :
    out : Out
618 619 620
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

621
- op : log2
622
  backward : log2_grad
623 624 625 626
  inputs :
    x : X
  outputs :
    out : Out
627 628 629
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

630
- op : log_softmax
631 632 633 634
  backward : log_softmax_grad
  extra :
    attrs : [bool use_mkldnn = false]

635 636 637 638 639 640 641 642 643 644 645 646
- op : logit
  inputs :
    x : X
  outputs :
    out : Out

- op : logsigmoid
  inputs :
    x : X
  outputs :
    out : Out

647
- op : logsigmoid
648 649 650 651
  backward : logsigmoid_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

652
- op : lrn
653 654 655 656
  backward : lrn_grad
  extra :
    attrs : [bool use_mkldnn = false, bool is_test = false]

657
- op : matmul (matmul_v2)
658 659 660 661
  backward : matmul_grad (matmul_v2_grad)
  extra :
    attrs : [bool use_mkldnn = false, 'int[] fused_reshape_Out = {}', 'int[] fused_transpose_Out = {}',
             str mkldnn_data_type = "float32", 'int[] fused_reshape_X = {}', 'int[] fused_reshape_Y = {}',
662
             'int[] fused_transpose_X = {}', 'int[] fused_transpose_Y = {}']
663

664 665 666 667 668 669
- op : matmul_with_flatten (mul)
  backward : matmul_with_flatten_grad (mul_grad)
  extra :
    attrs : [bool use_mkldnn = false, float scale_x = 1.0f, 'float[] scale_y = {1.0f}',
             float scale_out = 1.0f, bool force_fp32_output = false]

670 671 672 673 674 675 676 677 678 679 680 681
- op : maximum (elementwise_max)
  backward : maximum_grad (elementwise_max_grad)
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

- op : maximum (elementwise_min)
  backward : maximum_grad (elementwise_min_grad)
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

682 683 684 685 686
- op : mish
  backward : mish_grad
  extra :
    attrs : [bool use_mkldnn = false]

687 688 689 690 691 692
- op : multiply (elementwise_mul)
  backward : multiply_grad (elementwise_mul_grad)
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

693
- op : mv
694 695 696 697 698
  inputs :
    {x : X, vec : Vec}
  outputs :
    out : Out

699 700 701 702 703 704
- op : nce
  backward : nce_grad
  extra :
    attrs : [int trainer_id = 0, 'int64_t[] height_sections = {}', 'str[] epmap = {}',
             'str[] table_names = {}', 'int[] custom_neg_classes = {}']

705
- op : nearest_interp (nearest_interp_v2)
706 707 708 709
  backward : nearest_interp_grad (nearest_interp_v2_grad)
  extra :
    attrs : [bool use_mkldnn = false]

710
- op : pad2d
711 712 713 714
  backward : pad2d_grad
  extra :
    attrs : [bool use_mkldnn = false]

715
- op : pad3d
716 717 718 719
  backward : pad3d_grad
  extra :
    attrs : [bool use_mkldnn = false]

720
- op : partial_sum
721 722 723 724
  backward : partial_sum_grad
  extra :
    attrs : [bool use_mkldnn = false]

725
- op : poisson
726 727 728 729 730
  inputs :
    x : X
  outputs :
    out : Out

731 732 733 734 735 736 737 738 739 740 741
- op : pool2d
  backward : pool2d_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_quantizer = false,
              str mkldnn_data_type = "float32", bool is_test = false]

- op : pool3d
  backward : pool3d_grad
  extra :
    attrs : [bool use_mkldnn = false]

742
- op : prelu
743 744 745 746
  backward : prelu_grad
  extra :
    attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32", bool is_test = false]

747 748 749 750
- op : quantize_linear
  extra :
    attrs : [float moving_rate = 0.9]

751
- op : reciprocal
752
  backward : reciprocal_grad
753 754 755 756
  inputs :
    x : X
  outputs :
    out : Out
757 758 759
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

760
- op : reduce_all
761 762 763
  extra :
    attrs : [bool use_mkldnn = false]

764
- op : reduce_amax
765 766 767 768
  backward : reduce_amax_grad
  extra :
    attrs : [bool use_mkldnn = false]

769
- op : reduce_amin
770 771 772 773
  backward : reduce_amin_grad
  extra :
    attrs : [bool use_mkldnn = false]

774
- op : reduce_any
775 776 777
  extra :
    attrs : [bool use_mkldnn = false]

778
- op : reduce_max
779 780 781 782
  backward : reduce_max_grad
  extra :
    attrs : [bool use_mkldnn = false]

783
- op : reduce_mean
784 785 786 787
  backward : reduce_mean_grad
  extra :
    attrs : [bool use_mkldnn = false]

788
- op : reduce_min
789 790 791 792
  backward : reduce_min_grad
  extra :
    attrs : [bool use_mkldnn = false]

793
- op : reduce_prod
794 795 796 797
  backward : reduce_prod_grad
  extra :
    attrs : [bool use_mkldnn = false]

798
- op : reduce_sum
799 800 801 802
  backward : reduce_sum_grad
  extra :
    attrs : [bool use_mkldnn = false]

803
- op : relu
804 805 806 807 808
  backward : relu_grad, relu_double_grad (relu_grad_grad)
  inputs :
    x : X
  outputs :
    out : Out
809 810 811
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

812
- op : relu6
813 814 815 816
  backward : relu6_grad
  extra :
    attrs : [bool use_mkldnn = false]

817 818 819 820 821
- op : remainder (elementwise_mod)
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

822
- op : renorm
823 824 825 826
  backward : renorm_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

827
- op : round
828
  backward : round_grad
829 830 831 832
  inputs :
    x : X
  outputs :
    out : Out
833
  extra :
834 835
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

836
- op : rsqrt
837 838 839 840 841
  backward : rsqrt_grad, rsqrt_double_grad (rsqrt_grad_grad)
  inputs :
    x : X
  outputs :
    out : Out
842 843
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]
844

845 846 847 848
- op : scale
  extra :
    attrs : [bool use_mkldnn = false]

849
- op : seed
850 851 852
  extra :
    attrs : [bool deterministic = false, str rng_name = "", bool force_cpu = false]

853 854 855
- op : send_uv (graph_send_uv)
  backward : send_uv_grad (graph_send_uv_grad)

856 857 858 859 860
- op : sequence_softmax
  backward : sequence_softmax_grad
  extra :
    attrs : [str data_format = "AnyLayout"]

861
- op : shape
862 863 864
  extra :
    attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32"]

865
- op : shuffle_channel
866 867 868 869
  backward : shuffle_channel_grad
  extra :
    attrs : [bool use_mkldnn = false]

870
- op : sigmoid
871 872 873 874 875
  backward : sigmoid_grad, sigmoid_double_grad (sigmoid_grad_grad), sigmoid_triple_grad
  inputs :
    x : X
  outputs :
    out : Out
876 877 878
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

879
- op : silu
880
  backward : silu_grad
881 882 883 884
  inputs :
    x : X
  outputs :
    out : Out
885 886 887
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

888
- op : sin
889
  backward : sin_grad, sin_double_grad, sin_triple_grad
890 891 892 893
  inputs :
    x : X
  outputs :
    out : Out
894 895 896
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

897
- op : sinh
898
  backward : sinh_grad
899 900 901 902
  inputs :
    x : X
  outputs :
    out : Out
903 904 905
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

906
- op : slice
907 908 909 910
  backward : slice_grad
  extra :
    attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32"]

911
- op : softmax
912 913
  backward : softmax_grad
  extra :
914
    attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32", bool is_test = false]
915

916
- op : softplus
917
  backward : softplus_grad
918 919 920 921
  inputs :
    x : X
  outputs :
    out : Out
922
  extra :
923 924 925
    attrs : [bool use_mkldnn = false, bool use_cudnn = false, str fuse_activation_type = "", float fuse_activation_alpha = 0.0f,
             float fuse_activation_beta = 0.0f, float fuse_activation_scale = 1.0f]

926 927 928 929 930 931 932 933 934
- op : softshrink
  backward : softshrink_grad
  inputs :
    x : X
  outputs :
    out : Out
  attrs :
    threshold : lambda

935
- op : softsign
936
  backward : softsign_grad
937 938 939 940
  inputs :
    x : X
  outputs :
    out : Out
941 942
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]
943

944
- op : solve
945 946 947 948 949
  inputs :
    {x : X, y : Y}
  outputs :
    out : Out

950
- op : sqrt
951 952 953 954 955
  backward : sqrt_grad, sqrt_double_grad (sqrt_grad_grad)
  inputs :
    x : X
  outputs :
    out : Out
956 957 958
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

959
- op : square
960 961 962 963 964
  backward : square_grad, square_double_grad (square_grad_grad)
  inputs :
    x : X
  outputs :
    out : Out
965 966 967
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

968
- op : squeeze (squeeze2)
969 970 971 972
  backward : squeeze_grad (squeeze2_grad)
  extra :
    attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32"]

973
- op : stack
974 975 976 977
  backward : stack_grad
  extra :
    attrs : [bool use_mkldnn = false]

978 979 980 981 982
- op : stack
  backward : stack_grad
  extra :
    attrs : [bool use_mkldnn = false]

983 984 985 986 987 988
- op : subtract (elementwise_sub)
  backward : subtract_grad (elementwise_sub_grad)
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

989
- op : swish
990 991 992 993
  backward : swish_grad
  extra :
    attrs : [bool use_mkldnn = false]

994
- op : sync_batch_norm
995 996 997 998
  backward : sync_batch_norm_grad
  extra :
    attrs : [bool use_mkldnn = false, bool fuse_with_relu = false]

999
- op : tan
1000
  backward : tan_grad
1001 1002 1003 1004
  inputs :
    x : X
  outputs :
    out : Out
1005 1006 1007
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

1008
- op : tanh
1009 1010 1011 1012 1013
  backward : tanh_grad, tanh_double_grad (tanh_grad_grad), tanh_triple_grad
  inputs :
    x : X
  outputs :
    out : Out
1014 1015 1016
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

1017
- op : tanh_shrink
1018
  backward : tanh_shrink_grad
1019 1020 1021 1022
  inputs :
    x : X
  outputs :
    out : Out
1023 1024 1025
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

1026 1027 1028 1029 1030 1031
- op : thresholded_relu
  inputs :
    x : X
  outputs :
    out : Out

1032
- op : trace
1033 1034 1035 1036
  inputs :
    x : Input
  outputs :
    out : Out
1037

1038 1039 1040 1041 1042 1043
- op : transpose (transpose2)
  backward : transpose_grad (transpose2_grad)
  extra :
    attrs : [bool use_mkldnn = false, str data_format = "AnyLayout", bool use_quantizer = false,
              str mkldnn_data_type = "float32"]

1044
- op : trilinear_interp (trilinear_interp_v2)
1045 1046 1047 1048
  backward : trilinear_interp_grad (trilinear_interp_v2_grad)
  extra :
    attrs : [bool use_mkldnn = false]

1049
- op : trunc
1050
  inputs :
1051
    input : X
1052 1053
  outputs :
    out : Out
1054

1055 1056
- op : while
  backward : while_grad
1057
  extra :
1058
    attrs : ['str[] skip_eager_deletion_vars = {}']