op_compat.yaml 22.7 KB
Newer Older
1 2 3 4 5
# - op : rnn
#   backward : rnn_grad
#   extra :
#     attrs : [bool is_test = false]

6
- op : abs
7 8
  backward : abs_grad
  extra :
H
HongyuJia 已提交
9
    attrs : [bool use_mkldnn = false]
10

11 12 13 14 15 16
- op : acos
  inputs :
    x : X
  outputs :
    out : Out

17
- op : acosh
18 19 20 21
  inputs :
    x : X
  outputs :
    out : Out
22 23 24 25
  backward : acosh_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

26 27 28 29 30 31
- op : add (elementwise_add)
  backward : add_grad (elementwise_add_grad)
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

32
- op : addmm
33 34 35 36
  backward : addmm_grad
  extra :
    attrs : [bool use_mkldnn = false]

37
- op : affine_grid
38 39 40 41
  backward : affine_grid_grad
  extra :
    attrs : [bool use_cudnn = true]

42
- op : angle
43
  backward : angle_grad
44 45 46 47
  inputs :
    x : X
  outputs :
    out : Out
48
  extra :
H
HongyuJia 已提交
49
    attrs : [bool use_mkldnn = false]
50

51 52 53 54 55 56 57
- op : argsort
  inputs :
    x : X
  outputs :
    out : Out
    indices : Indices

58 59 60 61 62 63
- op : asin
  inputs :
    x : X
  outputs :
    out : Out

64
- op : asinh
65
  backward : asinh_grad
66 67 68 69
  inputs :
    x : X
  outputs :
    out : Out
70 71 72
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

73 74 75 76 77 78
- op : atan
  inputs :
    x : X
  outputs :
    out : Out

79
- op : atan2
80
  inputs :
81
    {x : X1, y : X2}
82 83 84
  outputs :
    out : Out

85
- op : atanh
86
  backward : atanh_grad
87 88 89 90
  inputs :
    x : X
  outputs :
    out : Out
91 92 93
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

94
- op : batch_norm
95 96 97 98
  backward : batch_norm_grad
  extra :
    attrs : [bool use_mkldnn = false, bool fuse_with_relu = false]

99
- op : bernoulli
100 101 102 103 104
  inputs :
    x : X
  outputs :
    out : Out

105
- op : bicubic_interp (bicubic_interp_v2)
106 107 108 109
  backward : bicubic_interp_grad (bicubic_interp_v2_grad)
  extra :
    attrs : [bool use_mkldnn = false]

110
- op : bilinear_interp (bilinear_interp_v2)
111 112 113 114
  backward : bilinear_interp_grad (bilinear_interp_v2_grad)
  extra :
    attrs : [bool use_mkldnn = false]

115 116 117 118 119 120
- op : bmm
  inputs :
    {x : X, y : Y}
  outputs :
    out : Out

121
- op : ceil
122
  backward : ceil_grad
123 124 125 126
  inputs :
    x : X
  outputs :
    out : Out
127 128 129
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

130
- op : cholesky
131 132 133 134 135
  inputs :
    x : X
  outputs :
    out : Out

136
- op : cholesky_solve
137 138 139 140 141
  inputs :
    {x : X, y : Y}
  outputs :
    out : Out

142
- op : clip
143 144 145 146
  backward : clip_grad
  extra :
    attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32"]

147
- op : concat
148 149 150 151
  backward : concat_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_quantizer = false, str mkldnn_data_type = "float32"]

152 153 154 155 156
- op : conditional_block
  backward : conditional_block_grad
  extra :
    attrs : ['str[] skip_eager_deletion_vars = {}']

157
- op : conv2d
158
  backward : conv2d_grad
159
  extra :
160
    attrs : [bool is_test = false, bool use_cudnn = true, bool fuse_relu_before_depthwise_conv = false, bool use_mkldnn = false,
161
             bool use_quantizer = false, str mkldnn_data_type = "float32", bool fuse_relu = false,
162
             str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f, bool use_addto = false,
163 164
             bool fuse_residual_connection = false, float Scale_in = 1.0f, float Scale_out = 1.0f,
             float Scale_in_eltwise = 1.0f, 'float[] Scale_weights = {1.0f}', bool force_fp32_output = false,
165
             int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB(), bool exhaustive_search = false]
166

167
- op : conv2d_fusion
F
Feiyu Chan 已提交
168
  extra :
169
    attrs : [bool is_test = false, bool use_cudnn = false, bool fuse_relu_before_depthwise_conv = false, bool use_mkldnn = false,
F
Feiyu Chan 已提交
170
             bool use_quantizer = false, str mkldnn_data_type = "float32", bool fuse_relu = false,
171
             str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f, bool use_addto = false,
F
Feiyu Chan 已提交
172 173
             bool fuse_residual_connection = false, float Scale_in = 1.0f, float Scale_out = 1.0f,
             float Scale_in_eltwise = 1.0f, 'float[] Scale_weights = {1.0f}', bool force_fp32_output = false,
174 175
             int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB(), bool exhaustive_search = false]

176
- op : conv2d_transpose
177 178 179 180 181 182 183
  backward : conv2d_transpose_grad
  extra :
    attrs : [bool is_test = false, bool use_cudnn = true, bool use_mkldnn = false, bool force_fp32_output = false,
             str mkldnn_data_type = "float32", bool fuse_relu = false,
             str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f,
             int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB()]

184
- op : conv3d
185 186 187 188 189 190 191
  backward : conv3d_grad
  extra :
    attrs : [bool is_test = false, bool use_cudnn = true, bool use_mkldnn = false, str mkldnn_data_type = "float32", bool fuse_relu = false,
             str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f,
             bool use_addto = false, bool fuse_residual_connection = false, bool force_fp32_output = false,
             int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB(), bool exhaustive_search = false]

192
- op : conv3d_transpose
193 194 195
  backward : conv3d_transpose_grad
  extra :
    attrs : [bool use_cudnn = true, bool use_mkldnn = false, int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB()]
F
Feiyu Chan 已提交
196

197
- op : cos
198
  backward : cos_grad
199 200 201 202
  inputs :
    x : X
  outputs :
    out : Out
203 204 205
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

206
- op : cosh
207
  backward : cosh_grad
208 209 210 211
  inputs :
    x : X
  outputs :
    out : Out
212 213 214
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

215
- op : cross
216 217
  inputs :
    {x : X, y : Y}
218 219 220 221 222
  attrs :
    axis : dim
  outputs :
    out : Out

223
- op : data_norm
224 225 226 227
  backward : data_norm_grad
  extra :
    attrs : [bool use_mkldnn = false]

228
- op : depthwise_conv2d
229 230
  backward : depthwise_conv2d_grad
  extra :
231
    attrs : [bool is_test = false, bool fuse_relu_before_depthwise_conv = false, bool use_mkldnn = false,
232 233 234 235
             bool use_quantizer = false, str mkldnn_data_type = "float32", bool fuse_relu = false,
             str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f, bool use_addto = false,
             bool fuse_residual_connection = false, float Scale_in = 1.0f, float Scale_out = 1.0f,
             float Scale_in_eltwise = 1.0f, 'float[] Scale_weights = {1.0f}', bool force_fp32_output = false,
236 237
             int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB(), bool exhaustive_search = false]

238
- op : depthwise_conv2d_transpose
239 240 241 242 243 244
  backward : depthwise_conv2d_transpose_grad
  extra :
    attrs : [bool is_test = false, bool use_cudnn = false, bool use_mkldnn = false, bool force_fp32_output = false,
             str mkldnn_data_type = "float32", bool fuse_relu = false,
             str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f,
             int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB()]
245

246 247 248 249
- op : dequantize_linear
  extra :
    attrs : [float moving_rate = 0.9]

250 251 252 253 254 255 256
- op : det (determinant)
  backward : det_grad (determinant_grad)
  inputs :
    x : Input
  outputs :
    out : Out

257
- op : diag (diag_v2)
258
  backward : diag_grad (diag_v2_grad)
259 260 261 262 263
  inputs :
    x : X
  outputs :
    out : Out

264
- op : diagonal
265 266 267 268 269
  inputs :
    x : Input
  outputs :
    out : Out

270
- op : digamma
271 272 273 274 275
  inputs :
    x : X
  outputs :
    out : Out

276
- op : dist
277 278 279 280 281
  inputs :
    {x : X, y : Y}
  outputs :
    out : Out

282 283 284 285
- op : distributed_push_sparse
  extra :
    attrs : ['int[] slots = {}']

286 287 288 289 290 291
- op : divide (elementwise_div)
  backward : divide_grad (elementwise_div)
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

292
- op : dot
293 294 295 296 297
  inputs :
    {x : X, y : Y}
  outputs :
    out : Out

298
- op : dropout
299 300 301 302
  backward : dropout_grad
  extra :
    attrs : [bool fix_seed = false, int seed = 0]

303
- op : dropout_nd
304 305 306 307
  backward : dropout_nd_grad
  extra :
    attrs : [bool fix_seed = false, int seed = 0]

308 309 310 311 312 313
- op : elementwise_pow
  backward : elementwise_pow_grad
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

314
- op : elu
315 316 317 318
  backward : elu_grad
  extra :
    attrs : [bool use_mkldnn = false]

319 320 321 322 323 324 325
- op : embedding (lookup_table_v2)
  backward : embedding_grad (lookup_table_v2_grad)
  extra :
    attrs : [bool is_sparse = false, bool is_distributed = false, bool remote_prefetch = false,
             int trainer_id = 0, int slot = 0, 'int64_t[] height_sections = {}', 'str[] epmap = {}',
             'str[] table_names = {}']

326
- op : erf
327 328 329 330 331
  inputs :
    x : X
  outputs :
    out : Out

332
- op : erfinv
333 334 335 336 337
  inputs :
    x : X
  outputs :
    out : Out

338
- op : exp
339
  backward : exp_grad
340 341 342 343
  inputs :
    x : X
  outputs :
    out : Out
344 345
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]
346

347 348 349 350 351
- op : expand (expand_v2)
  backward : expand_grad (expand_v2_grad)
  extra :
    attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32"]

352
- op : expm1
353
  backward : expm1_grad
354 355 356 357
  inputs :
    x : X
  outputs :
    out : Out
358 359 360
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388
- op : fake_channel_wise_quantize_abs_max
  extra :
    attrs : [int round_type = 1]

- op : fake_channel_wise_quantize_dequantize_abs_max
  extra :
    attrs : [int round_type = 1]

- op : fake_quantize_abs_max
  extra :
    attrs : [int round_type = 1]

- op : fake_quantize_dequantize_abs_max
  extra :
    attrs : [int round_type = 1]

- op : fake_quantize_dequantize_moving_average_abs_max
  extra :
    attrs : [int round_type = 1]

- op : fake_quantize_moving_average_abs_max
  extra :
    attrs : [int round_type = 1]

- op : fake_quantize_range_abs_max
  extra :
    attrs : [int round_type = 1]

389
- op : fft_c2c
390 391 392
  inputs: {x: X}
  outputs: {out: Out}

393
- op : fft_c2r
394 395 396
  inputs: {x: X}
  outputs: {out: Out}

397
- op : fft_r2c
398 399 400
  inputs: {x: X}
  outputs: {out: Out}

401 402 403 404 405 406
- op : flip
  inputs :
    x : X
  outputs :
    out : Out

407 408
- op : floor
  backward : floor_grad
409 410 411 412
  inputs :
    x : X
  outputs :
    out : Out
413 414 415
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432
- op : floor_divide (elementwise_floordiv)
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

- op : fmax (elementwise_fmax)
  backward : fmax_grad (elementwise_fmax_grad)
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

- op : fmin (elementwise_fmin)
  backward : fmin_grad (elementwise_fmin_grad)
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

433
- op : frobenius_norm
434 435 436 437
  backward : frobenius_norm_grad
  extra :
    attrs : [bool use_mkldnn = false]

438 439 440 441 442 443 444 445 446
- op : full (fill_constant)
  extra :
    attrs : [bool use_mkldnn = false]

- op : gather
  backward : gather_grad
  extra :
    attrs : [bool overwrite = true]

447
- op : gelu
448 449
  backward : gelu_grad
  extra :
H
HongyuJia 已提交
450
    attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32"]
451

452 453 454 455 456
- op : grad_add
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

457
- op : grid_sampler
458 459 460 461
  backward : grid_sampler_grad
  extra :
    attrs : [bool use_cudnn = true]

462
- op : gru
463 464 465 466
  backward : gru_grad
  extra :
    attrs : [bool is_test = false]

467 468 469 470 471
- op : hard_swish
  backward : hard_swish_grad
  extra :
    attrs : [bool use_mkldnn = false]

472 473 474 475 476 477 478 479 480 481 482 483 484 485
- op : hardshrink (hard_shrink)
  backward : hardshrink_grad (hard_shrink_grad)
  inputs :
    x : X
  outputs :
    out : Out

- op : hardsigmoid (hard_sigmoid)
  backward : hardsigmoid_grad (hard_sigmoid_grad)
  inputs :
    x : X
  outputs :
    out : Out

486 487 488 489 490 491
- op : heaviside (elementwise_heaviside)
  backward : heaviside_grad (elementwise_heaviside_grad)
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

492
- op : inplace_abn
493 494 495 496
  backward : inplace_abn_grad
  extra :
    attrs : [bool use_mkldnn = false, bool fuse_with_relu = false]

497
- op : layer_norm
498 499 500 501
  backward : layer_norm_grad
  extra :
    attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32", bool is_test = false]

502
- op : leaky_relu
503 504 505 506
  backward : leaky_relu_grad
  extra :
    attrs : [bool use_mkldnn = false]

507
- op : lgamma
508 509 510 511 512
  inputs :
    x : X
  outputs :
    out : Out

513
- op : linear_interp (linear_interp_v2)
514 515 516 517
  backward : linear_interp_grad (linear_interp_v2_grad)
  extra :
    attrs : [bool use_mkldnn = false]

518
- op : log
519 520 521 522
  backward : log_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

523
- op : log10
524
  backward : log10_grad
525 526 527 528
  inputs :
    x : X
  outputs :
    out : Out
529 530 531
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

532
- op : log1p
533
  backward : log1p_grad
534 535 536 537
  inputs :
    x : X
  outputs :
    out : Out
538 539 540
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

541
- op : log2
542
  backward : log2_grad
543 544 545 546
  inputs :
    x : X
  outputs :
    out : Out
547 548 549
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

550
- op : log_softmax
551 552 553 554
  backward : log_softmax_grad
  extra :
    attrs : [bool use_mkldnn = false]

555 556 557 558 559 560 561 562 563 564 565 566
- op : logit
  inputs :
    x : X
  outputs :
    out : Out

- op : logsigmoid
  inputs :
    x : X
  outputs :
    out : Out

567
- op : logsigmoid
568 569 570 571
  backward : logsigmoid_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

572
- op : lrn
573 574 575 576
  backward : lrn_grad
  extra :
    attrs : [bool use_mkldnn = false, bool is_test = false]

577
- op : matmul (matmul_v2)
578 579 580 581
  backward : matmul_grad (matmul_v2_grad)
  extra :
    attrs : [bool use_mkldnn = false, 'int[] fused_reshape_Out = {}', 'int[] fused_transpose_Out = {}',
             str mkldnn_data_type = "float32", 'int[] fused_reshape_X = {}', 'int[] fused_reshape_Y = {}',
582
             'int[] fused_transpose_X = {}', 'int[] fused_transpose_Y = {}']
583

584 585 586 587 588 589
- op : matmul_with_flatten (mul)
  backward : matmul_with_flatten_grad (mul_grad)
  extra :
    attrs : [bool use_mkldnn = false, float scale_x = 1.0f, 'float[] scale_y = {1.0f}',
             float scale_out = 1.0f, bool force_fp32_output = false]

590 591 592 593 594 595 596 597 598 599 600 601
- op : maximum (elementwise_max)
  backward : maximum_grad (elementwise_max_grad)
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

- op : maximum (elementwise_min)
  backward : maximum_grad (elementwise_min_grad)
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

602 603 604 605 606
- op : mish
  backward : mish_grad
  extra :
    attrs : [bool use_mkldnn = false]

607 608 609 610 611 612
- op : multiply (elementwise_mul)
  backward : multiply_grad (elementwise_mul_grad)
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

613
- op : mv
614 615 616 617 618
  inputs :
    {x : X, vec : Vec}
  outputs :
    out : Out

619 620 621 622 623 624
- op : nce
  backward : nce_grad
  extra :
    attrs : [int trainer_id = 0, 'int64_t[] height_sections = {}', 'str[] epmap = {}',
             'str[] table_names = {}', 'int[] custom_neg_classes = {}']

625
- op : nearest_interp (nearest_interp_v2)
626 627 628 629
  backward : nearest_interp_grad (nearest_interp_v2_grad)
  extra :
    attrs : [bool use_mkldnn = false]

630
- op : pad2d
631 632 633 634
  backward : pad2d_grad
  extra :
    attrs : [bool use_mkldnn = false]

635
- op : pad3d
636 637 638 639
  backward : pad3d_grad
  extra :
    attrs : [bool use_mkldnn = false]

640
- op : partial_sum
641 642 643 644
  backward : partial_sum_grad
  extra :
    attrs : [bool use_mkldnn = false]

645
- op : poisson
646 647 648 649 650
  inputs :
    x : X
  outputs :
    out : Out

651 652 653 654 655 656 657 658 659 660 661
- op : pool2d
  backward : pool2d_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_quantizer = false,
              str mkldnn_data_type = "float32", bool is_test = false]

- op : pool3d
  backward : pool3d_grad
  extra :
    attrs : [bool use_mkldnn = false]

662
- op : prelu
663 664 665 666
  backward : prelu_grad
  extra :
    attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32", bool is_test = false]

667 668 669 670
- op : quantize_linear
  extra :
    attrs : [float moving_rate = 0.9]

671
- op : reciprocal
672
  backward : reciprocal_grad
673 674 675 676
  inputs :
    x : X
  outputs :
    out : Out
677 678 679
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

680
- op : reduce_all
681 682 683
  extra :
    attrs : [bool use_mkldnn = false]

684
- op : reduce_amax
685 686 687 688
  backward : reduce_amax_grad
  extra :
    attrs : [bool use_mkldnn = false]

689
- op : reduce_amin
690 691 692 693
  backward : reduce_amin_grad
  extra :
    attrs : [bool use_mkldnn = false]

694
- op : reduce_any
695 696 697
  extra :
    attrs : [bool use_mkldnn = false]

698
- op : reduce_max
699 700 701 702
  backward : reduce_max_grad
  extra :
    attrs : [bool use_mkldnn = false]

703
- op : reduce_mean
704 705 706 707
  backward : reduce_mean_grad
  extra :
    attrs : [bool use_mkldnn = false]

708
- op : reduce_min
709 710 711 712
  backward : reduce_min_grad
  extra :
    attrs : [bool use_mkldnn = false]

713
- op : reduce_prod
714 715 716 717
  backward : reduce_prod_grad
  extra :
    attrs : [bool use_mkldnn = false]

718
- op : reduce_sum
719 720 721 722
  backward : reduce_sum_grad
  extra :
    attrs : [bool use_mkldnn = false]

723
- op : relu
724 725 726 727
  backward : relu_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

728
- op : relu6
729 730 731 732
  backward : relu6_grad
  extra :
    attrs : [bool use_mkldnn = false]

733 734 735 736 737
- op : remainder (elementwise_mod)
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

738
- op : renorm
739 740 741 742
  backward : renorm_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

743
- op : round
744
  backward : round_grad
745 746 747 748
  inputs :
    x : X
  outputs :
    out : Out
749
  extra :
750 751
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

752
- op : rsqrt
753 754 755
  backward : rsqrt_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]
756

757 758 759 760
- op : scale
  extra :
    attrs : [bool use_mkldnn = false]

761
- op : seed
762 763 764
  extra :
    attrs : [bool deterministic = false, str rng_name = "", bool force_cpu = false]

765 766 767
- op : send_uv (graph_send_uv)
  backward : send_uv_grad (graph_send_uv_grad)

768 769 770 771 772
- op : sequence_softmax
  backward : sequence_softmax_grad
  extra :
    attrs : [str data_format = "AnyLayout"]

773
- op : shape
774 775 776
  extra :
    attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32"]

777
- op : shuffle_channel
778 779 780 781
  backward : shuffle_channel_grad
  extra :
    attrs : [bool use_mkldnn = false]

782
- op : sigmoid
783 784 785 786
  backward : sigmoid_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

787
- op : silu
788
  backward : silu_grad
789 790 791 792
  inputs :
    x : X
  outputs :
    out : Out
793 794 795
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

796
- op : sin
797
  backward : sin_grad
798 799 800 801
  inputs :
    x : X
  outputs :
    out : Out
802 803 804
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

805
- op : sinh
806
  backward : sinh_grad
807 808 809 810
  inputs :
    x : X
  outputs :
    out : Out
811 812 813
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

814
- op : slice
815 816 817 818
  backward : slice_grad
  extra :
    attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32"]

819
- op : softmax
820 821
  backward : softmax_grad
  extra :
822
    attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32", bool is_test = false]
823

824
- op : softplus
825
  backward : softplus_grad
826
  extra :
827 828 829
    attrs : [bool use_mkldnn = false, bool use_cudnn = false, str fuse_activation_type = "", float fuse_activation_alpha = 0.0f,
             float fuse_activation_beta = 0.0f, float fuse_activation_scale = 1.0f]

830
- op : softsign
831 832 833
  backward : softsign_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]
834

835
- op : solve
836 837 838 839 840
  inputs :
    {x : X, y : Y}
  outputs :
    out : Out

841
- op : sqrt
842 843 844 845
  backward : sqrt_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

846
- op : square
847 848 849 850
  backward : square_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

851
- op : squeeze (squeeze2)
852 853 854 855
  backward : squeeze_grad (squeeze2_grad)
  extra :
    attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32"]

856
- op : stack
857 858 859 860
  backward : stack_grad
  extra :
    attrs : [bool use_mkldnn = false]

861 862 863 864 865
- op : stack
  backward : stack_grad
  extra :
    attrs : [bool use_mkldnn = false]

866 867 868 869 870 871
- op : subtract (elementwise_sub)
  backward : subtract_grad (elementwise_sub_grad)
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

872
- op : swish
873 874 875 876
  backward : swish_grad
  extra :
    attrs : [bool use_mkldnn = false]

877
- op : sync_batch_norm
878 879 880 881
  backward : sync_batch_norm_grad
  extra :
    attrs : [bool use_mkldnn = false, bool fuse_with_relu = false]

882
- op : tan
883
  backward : tan_grad
884 885 886 887
  inputs :
    x : X
  outputs :
    out : Out
888 889 890
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

891
- op : tanh
892 893 894 895
  backward : tanh_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

896
- op : tanh_shrink
897 898 899 900
  backward : tanh_shrink_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

901
- op : trace
902 903 904 905
  inputs :
    x : Input
  outputs :
    out : Out
906

907 908 909 910 911 912
- op : transpose (transpose2)
  backward : transpose_grad (transpose2_grad)
  extra :
    attrs : [bool use_mkldnn = false, str data_format = "AnyLayout", bool use_quantizer = false,
              str mkldnn_data_type = "float32"]

913
- op : trilinear_interp (trilinear_interp_v2)
914 915 916 917
  backward : trilinear_interp_grad (trilinear_interp_v2_grad)
  extra :
    attrs : [bool use_mkldnn = false]

918
- op : trunc
919
  inputs :
920
    input : X
921 922
  outputs :
    out : Out
923

924 925
- op : while
  backward : while_grad
926
  extra :
927
    attrs : ['str[] skip_eager_deletion_vars = {}']