op_compat.yaml 21.8 KB
Newer Older
1 2 3 4 5
# - op : rnn
#   backward : rnn_grad
#   extra :
#     attrs : [bool is_test = false]

6
- op : abs
7 8
  backward : abs_grad
  extra :
H
HongyuJia 已提交
9
    attrs : [bool use_mkldnn = false]
10

11 12 13 14 15 16
- op : acos
  inputs :
    x : X
  outputs :
    out : Out

17
- op : acosh
18 19 20 21
  inputs :
    x : X
  outputs :
    out : Out
22 23 24 25
  backward : acosh_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

26 27 28 29 30 31
- op : add (elementwise_add)
  backward : add_grad (elementwise_add_grad)
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

32
- op : addmm
33 34 35 36
  backward : addmm_grad
  extra :
    attrs : [bool use_mkldnn = false]

37
- op : affine_grid
38 39 40 41
  backward : affine_grid_grad
  extra :
    attrs : [bool use_cudnn = true]

42
- op : angle
43
  backward : angle_grad
44 45 46 47
  inputs :
    x : X
  outputs :
    out : Out
48
  extra :
H
HongyuJia 已提交
49
    attrs : [bool use_mkldnn = false]
50

51 52 53 54 55 56 57
- op : argsort
  inputs :
    x : X
  outputs :
    out : Out
    indices : Indices

58 59 60 61 62 63
- op : asin
  inputs :
    x : X
  outputs :
    out : Out

64
- op : asinh
65
  backward : asinh_grad
66 67 68 69
  inputs :
    x : X
  outputs :
    out : Out
70 71 72
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

73 74 75 76 77 78
- op : atan
  inputs :
    x : X
  outputs :
    out : Out

79
- op : atan2
80
  inputs :
81
    {x : X1, y : X2}
82 83 84
  outputs :
    out : Out

85
- op : atanh
86
  backward : atanh_grad
87 88 89 90
  inputs :
    x : X
  outputs :
    out : Out
91 92 93
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

94
- op : batch_norm
95 96 97 98
  backward : batch_norm_grad
  extra :
    attrs : [bool use_mkldnn = false, bool fuse_with_relu = false]

99
- op : bernoulli
100 101 102 103 104
  inputs :
    x : X
  outputs :
    out : Out

105
- op : bicubic_interp (bicubic_interp_v2)
106 107 108 109
  backward : bicubic_interp_grad (bicubic_interp_v2_grad)
  extra :
    attrs : [bool use_mkldnn = false]

110
- op : bilinear_interp (bilinear_interp_v2)
111 112 113 114
  backward : bilinear_interp_grad (bilinear_interp_v2_grad)
  extra :
    attrs : [bool use_mkldnn = false]

115 116 117 118 119 120
- op : bmm
  inputs :
    {x : X, y : Y}
  outputs :
    out : Out

121
- op : ceil
122 123 124 125
  backward : ceil_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

126
- op : cholesky
127 128 129 130 131
  inputs :
    x : X
  outputs :
    out : Out

132
- op : cholesky_solve
133 134 135 136 137
  inputs :
    {x : X, y : Y}
  outputs :
    out : Out

138
- op : clip
139 140 141 142
  backward : clip_grad
  extra :
    attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32"]

143
- op : concat
144 145 146 147
  backward : concat_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_quantizer = false, str mkldnn_data_type = "float32"]

148 149 150 151 152
- op : conditional_block
  backward : conditional_block_grad
  extra :
    attrs : ['str[] skip_eager_deletion_vars = {}']

153
- op : conv2d
154
  backward : conv2d_grad
155
  extra :
156
    attrs : [bool is_test = false, bool use_cudnn = true, bool fuse_relu_before_depthwise_conv = false, bool use_mkldnn = false,
157
             bool use_quantizer = false, str mkldnn_data_type = "float32", bool fuse_relu = false,
158
             str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f, bool use_addto = false,
159 160
             bool fuse_residual_connection = false, float Scale_in = 1.0f, float Scale_out = 1.0f,
             float Scale_in_eltwise = 1.0f, 'float[] Scale_weights = {1.0f}', bool force_fp32_output = false,
161
             int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB(), bool exhaustive_search = false]
162

163
- op : conv2d_fusion
F
Feiyu Chan 已提交
164
  extra :
165
    attrs : [bool is_test = false, bool use_cudnn = false, bool fuse_relu_before_depthwise_conv = false, bool use_mkldnn = false,
F
Feiyu Chan 已提交
166
             bool use_quantizer = false, str mkldnn_data_type = "float32", bool fuse_relu = false,
167
             str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f, bool use_addto = false,
F
Feiyu Chan 已提交
168 169
             bool fuse_residual_connection = false, float Scale_in = 1.0f, float Scale_out = 1.0f,
             float Scale_in_eltwise = 1.0f, 'float[] Scale_weights = {1.0f}', bool force_fp32_output = false,
170 171
             int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB(), bool exhaustive_search = false]

172
- op : conv2d_transpose
173 174 175 176 177 178 179
  backward : conv2d_transpose_grad
  extra :
    attrs : [bool is_test = false, bool use_cudnn = true, bool use_mkldnn = false, bool force_fp32_output = false,
             str mkldnn_data_type = "float32", bool fuse_relu = false,
             str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f,
             int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB()]

180
- op : conv3d
181 182 183 184 185 186 187
  backward : conv3d_grad
  extra :
    attrs : [bool is_test = false, bool use_cudnn = true, bool use_mkldnn = false, str mkldnn_data_type = "float32", bool fuse_relu = false,
             str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f,
             bool use_addto = false, bool fuse_residual_connection = false, bool force_fp32_output = false,
             int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB(), bool exhaustive_search = false]

188
- op : conv3d_transpose
189 190 191
  backward : conv3d_transpose_grad
  extra :
    attrs : [bool use_cudnn = true, bool use_mkldnn = false, int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB()]
F
Feiyu Chan 已提交
192

193
- op : cos
194
  backward : cos_grad
195 196 197 198
  inputs :
    x : X
  outputs :
    out : Out
199 200 201
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

202
- op : cosh
203
  backward : cosh_grad
204 205 206 207
  inputs :
    x : X
  outputs :
    out : Out
208 209 210
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

211
- op : cross
212 213
  inputs :
    {x : X, y : Y}
214 215 216 217 218
  attrs :
    axis : dim
  outputs :
    out : Out

219
- op : data_norm
220 221 222 223
  backward : data_norm_grad
  extra :
    attrs : [bool use_mkldnn = false]

224
- op : depthwise_conv2d
225 226
  backward : depthwise_conv2d_grad
  extra :
227
    attrs : [bool is_test = false, bool fuse_relu_before_depthwise_conv = false, bool use_mkldnn = false,
228 229 230 231
             bool use_quantizer = false, str mkldnn_data_type = "float32", bool fuse_relu = false,
             str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f, bool use_addto = false,
             bool fuse_residual_connection = false, float Scale_in = 1.0f, float Scale_out = 1.0f,
             float Scale_in_eltwise = 1.0f, 'float[] Scale_weights = {1.0f}', bool force_fp32_output = false,
232 233
             int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB(), bool exhaustive_search = false]

234
- op : depthwise_conv2d_transpose
235 236 237 238 239 240
  backward : depthwise_conv2d_transpose_grad
  extra :
    attrs : [bool is_test = false, bool use_cudnn = false, bool use_mkldnn = false, bool force_fp32_output = false,
             str mkldnn_data_type = "float32", bool fuse_relu = false,
             str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f,
             int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB()]
241

242 243 244 245
- op : dequantize_linear
  extra :
    attrs : [float moving_rate = 0.9]

246 247 248 249 250 251 252
- op : det (determinant)
  backward : det_grad (determinant_grad)
  inputs :
    x : Input
  outputs :
    out : Out

253
- op : diag (diag_v2)
254
  backward : diag_grad (diag_v2_grad)
255 256 257 258 259
  inputs :
    x : X
  outputs :
    out : Out

260
- op : diagonal
261 262 263 264 265
  inputs :
    x : Input
  outputs :
    out : Out

266
- op : digamma
267 268 269 270 271
  inputs :
    x : X
  outputs :
    out : Out

272
- op : dist
273 274 275 276 277
  inputs :
    {x : X, y : Y}
  outputs :
    out : Out

278 279 280 281
- op : distributed_push_sparse
  extra :
    attrs : ['int[] slots = {}']

282 283 284 285 286 287
- op : divide (elementwise_div)
  backward : divide_grad (elementwise_div)
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

288
- op : dot
289 290 291 292 293
  inputs :
    {x : X, y : Y}
  outputs :
    out : Out

294
- op : dropout
295 296 297 298
  backward : dropout_grad
  extra :
    attrs : [bool fix_seed = false, int seed = 0]

299
- op : dropout_nd
300 301 302 303
  backward : dropout_nd_grad
  extra :
    attrs : [bool fix_seed = false, int seed = 0]

304 305 306 307 308 309
- op : elementwise_pow
  backward : elementwise_pow_grad
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

310
- op : elu
311 312 313 314
  backward : elu_grad
  extra :
    attrs : [bool use_mkldnn = false]

315 316 317 318 319 320 321
- op : embedding (lookup_table_v2)
  backward : embedding_grad (lookup_table_v2_grad)
  extra :
    attrs : [bool is_sparse = false, bool is_distributed = false, bool remote_prefetch = false,
             int trainer_id = 0, int slot = 0, 'int64_t[] height_sections = {}', 'str[] epmap = {}',
             'str[] table_names = {}']

322
- op : erf
323 324 325 326 327
  inputs :
    x : X
  outputs :
    out : Out

328
- op : erfinv
329 330 331 332 333
  inputs :
    x : X
  outputs :
    out : Out

334
- op : exp
335
  backward : exp_grad
336 337 338 339
  inputs :
    x : X
  outputs :
    out : Out
340 341
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]
342

343 344 345 346 347
- op : expand (expand_v2)
  backward : expand_grad (expand_v2_grad)
  extra :
    attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32"]

348
- op : expm1
349 350 351 352
  backward : expm1_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380
- op : fake_channel_wise_quantize_abs_max
  extra :
    attrs : [int round_type = 1]

- op : fake_channel_wise_quantize_dequantize_abs_max
  extra :
    attrs : [int round_type = 1]

- op : fake_quantize_abs_max
  extra :
    attrs : [int round_type = 1]

- op : fake_quantize_dequantize_abs_max
  extra :
    attrs : [int round_type = 1]

- op : fake_quantize_dequantize_moving_average_abs_max
  extra :
    attrs : [int round_type = 1]

- op : fake_quantize_moving_average_abs_max
  extra :
    attrs : [int round_type = 1]

- op : fake_quantize_range_abs_max
  extra :
    attrs : [int round_type = 1]

381
- op : fft_c2c
382 383 384
  inputs: {x: X}
  outputs: {out: Out}

385
- op : fft_c2r
386 387 388
  inputs: {x: X}
  outputs: {out: Out}

389
- op : fft_r2c
390 391 392
  inputs: {x: X}
  outputs: {out: Out}

393 394 395 396 397 398
- op : flip
  inputs :
    x : X
  outputs :
    out : Out

399 400 401 402 403
- op : floor
  backward : floor_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420
- op : floor_divide (elementwise_floordiv)
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

- op : fmax (elementwise_fmax)
  backward : fmax_grad (elementwise_fmax_grad)
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

- op : fmin (elementwise_fmin)
  backward : fmin_grad (elementwise_fmin_grad)
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

421
- op : frobenius_norm
422 423 424 425
  backward : frobenius_norm_grad
  extra :
    attrs : [bool use_mkldnn = false]

426 427 428 429 430 431 432 433 434
- op : full (fill_constant)
  extra :
    attrs : [bool use_mkldnn = false]

- op : gather
  backward : gather_grad
  extra :
    attrs : [bool overwrite = true]

435
- op : gelu
436 437
  backward : gelu_grad
  extra :
H
HongyuJia 已提交
438
    attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32"]
439

440 441 442 443 444
- op : grad_add
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

445
- op : grid_sampler
446 447 448 449
  backward : grid_sampler_grad
  extra :
    attrs : [bool use_cudnn = true]

450
- op : gru
451 452 453 454
  backward : gru_grad
  extra :
    attrs : [bool is_test = false]

455 456 457 458 459
- op : hard_swish
  backward : hard_swish_grad
  extra :
    attrs : [bool use_mkldnn = false]

460 461 462 463 464 465
- op : heaviside (elementwise_heaviside)
  backward : heaviside_grad (elementwise_heaviside_grad)
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

466
- op : inplace_abn
467 468 469 470
  backward : inplace_abn_grad
  extra :
    attrs : [bool use_mkldnn = false, bool fuse_with_relu = false]

471
- op : layer_norm
472 473 474 475
  backward : layer_norm_grad
  extra :
    attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32", bool is_test = false]

476
- op : leaky_relu
477 478 479 480
  backward : leaky_relu_grad
  extra :
    attrs : [bool use_mkldnn = false]

481
- op : lgamma
482 483 484 485 486
  inputs :
    x : X
  outputs :
    out : Out

487
- op : linear_interp (linear_interp_v2)
488 489 490 491
  backward : linear_interp_grad (linear_interp_v2_grad)
  extra :
    attrs : [bool use_mkldnn = false]

492
- op : log
493 494 495 496
  backward : log_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

497
- op : log10
498 499 500 501
  backward : log10_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

502
- op : log1p
503 504 505 506
  backward : log1p_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

507
- op : log2
508 509 510 511
  backward : log2_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

512
- op : log_softmax
513 514 515 516
  backward : log_softmax_grad
  extra :
    attrs : [bool use_mkldnn = false]

517
- op : logsigmoid
518 519 520 521
  backward : logsigmoid_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

522
- op : lrn
523 524 525 526
  backward : lrn_grad
  extra :
    attrs : [bool use_mkldnn = false, bool is_test = false]

527
- op : matmul (matmul_v2)
528 529 530 531
  backward : matmul_grad (matmul_v2_grad)
  extra :
    attrs : [bool use_mkldnn = false, 'int[] fused_reshape_Out = {}', 'int[] fused_transpose_Out = {}',
             str mkldnn_data_type = "float32", 'int[] fused_reshape_X = {}', 'int[] fused_reshape_Y = {}',
532
             'int[] fused_transpose_X = {}', 'int[] fused_transpose_Y = {}']
533

534 535 536 537 538 539
- op : matmul_with_flatten (mul)
  backward : matmul_with_flatten_grad (mul_grad)
  extra :
    attrs : [bool use_mkldnn = false, float scale_x = 1.0f, 'float[] scale_y = {1.0f}',
             float scale_out = 1.0f, bool force_fp32_output = false]

540 541 542 543 544 545 546 547 548 549 550 551
- op : maximum (elementwise_max)
  backward : maximum_grad (elementwise_max_grad)
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

- op : maximum (elementwise_min)
  backward : maximum_grad (elementwise_min_grad)
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

552 553 554 555 556
- op : mish
  backward : mish_grad
  extra :
    attrs : [bool use_mkldnn = false]

557 558 559 560 561 562
- op : multiply (elementwise_mul)
  backward : multiply_grad (elementwise_mul_grad)
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

563
- op : mv
564 565 566 567 568
  inputs :
    {x : X, vec : Vec}
  outputs :
    out : Out

569 570 571 572 573 574
- op : nce
  backward : nce_grad
  extra :
    attrs : [int trainer_id = 0, 'int64_t[] height_sections = {}', 'str[] epmap = {}',
             'str[] table_names = {}', 'int[] custom_neg_classes = {}']

575
- op : nearest_interp (nearest_interp_v2)
576 577 578 579
  backward : nearest_interp_grad (nearest_interp_v2_grad)
  extra :
    attrs : [bool use_mkldnn = false]

580
- op : pad2d
581 582 583 584
  backward : pad2d_grad
  extra :
    attrs : [bool use_mkldnn = false]

585
- op : pad3d
586 587 588 589
  backward : pad3d_grad
  extra :
    attrs : [bool use_mkldnn = false]

590
- op : partial_sum
591 592 593 594
  backward : partial_sum_grad
  extra :
    attrs : [bool use_mkldnn = false]

595
- op : poisson
596 597 598 599 600
  inputs :
    x : X
  outputs :
    out : Out

601 602 603 604 605 606 607 608 609 610 611
- op : pool2d
  backward : pool2d_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_quantizer = false,
              str mkldnn_data_type = "float32", bool is_test = false]

- op : pool3d
  backward : pool3d_grad
  extra :
    attrs : [bool use_mkldnn = false]

612
- op : prelu
613 614 615 616
  backward : prelu_grad
  extra :
    attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32", bool is_test = false]

617 618 619 620
- op : quantize_linear
  extra :
    attrs : [float moving_rate = 0.9]

621
- op : reciprocal
622 623 624 625
  backward : reciprocal_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

626
- op : reduce_all
627 628 629
  extra :
    attrs : [bool use_mkldnn = false]

630
- op : reduce_amax
631 632 633 634
  backward : reduce_amax_grad
  extra :
    attrs : [bool use_mkldnn = false]

635
- op : reduce_amin
636 637 638 639
  backward : reduce_amin_grad
  extra :
    attrs : [bool use_mkldnn = false]

640
- op : reduce_any
641 642 643
  extra :
    attrs : [bool use_mkldnn = false]

644
- op : reduce_max
645 646 647 648
  backward : reduce_max_grad
  extra :
    attrs : [bool use_mkldnn = false]

649
- op : reduce_mean
650 651 652 653
  backward : reduce_mean_grad
  extra :
    attrs : [bool use_mkldnn = false]

654
- op : reduce_min
655 656 657 658
  backward : reduce_min_grad
  extra :
    attrs : [bool use_mkldnn = false]

659
- op : reduce_prod
660 661 662 663
  backward : reduce_prod_grad
  extra :
    attrs : [bool use_mkldnn = false]

664
- op : reduce_sum
665 666 667 668
  backward : reduce_sum_grad
  extra :
    attrs : [bool use_mkldnn = false]

669
- op : relu
670 671 672 673
  backward : relu_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

674
- op : relu6
675 676 677 678
  backward : relu6_grad
  extra :
    attrs : [bool use_mkldnn = false]

679 680 681 682 683
- op : remainder (elementwise_mod)
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

684
- op : renorm
685 686 687 688
  backward : renorm_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

689
- op : round
690
  backward : round_grad
691
  extra :
692 693
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

694
- op : rsqrt
695 696 697
  backward : rsqrt_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]
698

699 700 701 702
- op : scale
  extra :
    attrs : [bool use_mkldnn = false]

703
- op : seed
704 705 706
  extra :
    attrs : [bool deterministic = false, str rng_name = "", bool force_cpu = false]

707 708 709 710 711
- op : sequence_softmax
  backward : sequence_softmax_grad
  extra :
    attrs : [str data_format = "AnyLayout"]

712
- op : shape
713 714 715
  extra :
    attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32"]

716
- op : shuffle_channel
717 718 719 720
  backward : shuffle_channel_grad
  extra :
    attrs : [bool use_mkldnn = false]

721
- op : sigmoid
722 723 724 725
  backward : sigmoid_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

726
- op : silu
727 728 729 730
  backward : silu_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

731
- op : sin
732
  backward : sin_grad
733 734 735 736
  inputs :
    x : X
  outputs :
    out : Out
737 738 739
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

740
- op : sinh
741
  backward : sinh_grad
742 743 744 745
  inputs :
    x : X
  outputs :
    out : Out
746 747 748
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

749
- op : slice
750 751 752 753
  backward : slice_grad
  extra :
    attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32"]

754
- op : softmax
755 756
  backward : softmax_grad
  extra :
757
    attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32", bool is_test = false]
758

759
- op : softplus
760
  backward : softplus_grad
761
  extra :
762 763 764
    attrs : [bool use_mkldnn = false, bool use_cudnn = false, str fuse_activation_type = "", float fuse_activation_alpha = 0.0f,
             float fuse_activation_beta = 0.0f, float fuse_activation_scale = 1.0f]

765
- op : softsign
766 767 768
  backward : softsign_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]
769

770
- op : solve
771 772 773 774 775
  inputs :
    {x : X, y : Y}
  outputs :
    out : Out

776
- op : sqrt
777 778 779 780
  backward : sqrt_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

781
- op : square
782 783 784 785
  backward : square_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

786
- op : squeeze (squeeze2)
787 788 789 790
  backward : squeeze_grad (squeeze2_grad)
  extra :
    attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32"]

791
- op : stack
792 793 794 795
  backward : stack_grad
  extra :
    attrs : [bool use_mkldnn = false]

796 797 798 799 800
- op : stack
  backward : stack_grad
  extra :
    attrs : [bool use_mkldnn = false]

801 802 803 804 805 806
- op : subtract (elementwise_sub)
  backward : subtract_grad (elementwise_sub_grad)
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

807
- op : swish
808 809 810 811
  backward : swish_grad
  extra :
    attrs : [bool use_mkldnn = false]

812
- op : sync_batch_norm
813 814 815 816
  backward : sync_batch_norm_grad
  extra :
    attrs : [bool use_mkldnn = false, bool fuse_with_relu = false]

817
- op : tan
818
  backward : tan_grad
819 820 821 822
  inputs :
    x : X
  outputs :
    out : Out
823 824 825
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

826
- op : tanh
827 828 829 830
  backward : tanh_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

831
- op : tanh_shrink
832 833 834 835
  backward : tanh_shrink_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

836
- op : trace
837 838 839 840
  inputs :
    x : Input
  outputs :
    out : Out
841

842 843 844 845 846 847
- op : transpose (transpose2)
  backward : transpose_grad (transpose2_grad)
  extra :
    attrs : [bool use_mkldnn = false, str data_format = "AnyLayout", bool use_quantizer = false,
              str mkldnn_data_type = "float32"]

848
- op : trilinear_interp (trilinear_interp_v2)
849 850 851 852
  backward : trilinear_interp_grad (trilinear_interp_v2_grad)
  extra :
    attrs : [bool use_mkldnn = false]

853
- op : trunc
854
  inputs :
855
    input : X
856 857
  outputs :
    out : Out
858

859 860
- op : while
  backward : while_grad
861
  extra :
862
    attrs : ['str[] skip_eager_deletion_vars = {}']