op_compat.yaml 21.5 KB
Newer Older
1 2 3 4 5
# - op : rnn
#   backward : rnn_grad
#   extra :
#     attrs : [bool is_test = false]

6
- op : abs
7 8
  backward : abs_grad
  extra :
H
HongyuJia 已提交
9
    attrs : [bool use_mkldnn = false]
10

11 12 13 14 15 16
- op : acos
  inputs :
    x : X
  outputs :
    out : Out

17
- op : acosh
18 19 20 21
  inputs :
    x : X
  outputs :
    out : Out
22 23 24 25
  backward : acosh_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

26 27 28 29 30 31
- op : add (elementwise_add)
  backward : add_grad (elementwise_add_grad)
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

32
- op : addmm
33 34 35 36
  backward : addmm_grad
  extra :
    attrs : [bool use_mkldnn = false]

37
- op : affine_grid
38 39 40 41
  backward : affine_grid_grad
  extra :
    attrs : [bool use_cudnn = true]

42
- op : angle
43 44
  backward : angle_grad
  extra :
H
HongyuJia 已提交
45
    attrs : [bool use_mkldnn = false]
46

47 48 49 50 51 52
- op : asin
  inputs :
    x : X
  outputs :
    out : Out

53
- op : asinh
54
  backward : asinh_grad
55 56 57 58
  inputs :
    x : X
  outputs :
    out : Out
59 60 61
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

62 63 64 65 66 67
- op : atan
  inputs :
    x : X
  outputs :
    out : Out

68
- op : atan2
69
  inputs :
70
    {x : X1, y : X2}
71 72 73
  outputs :
    out : Out

74
- op : atanh
75
  backward : atanh_grad
76 77 78 79
  inputs :
    x : X
  outputs :
    out : Out
80 81 82
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

83
- op : batch_norm
84 85 86 87
  backward : batch_norm_grad
  extra :
    attrs : [bool use_mkldnn = false, bool fuse_with_relu = false]

88
- op : bernoulli
89 90 91 92 93
  inputs :
    x : X
  outputs :
    out : Out

94
- op : bicubic_interp (bicubic_interp_v2)
95 96 97 98
  backward : bicubic_interp_grad (bicubic_interp_v2_grad)
  extra :
    attrs : [bool use_mkldnn = false]

99
- op : bilinear_interp (bilinear_interp_v2)
100 101 102 103
  backward : bilinear_interp_grad (bilinear_interp_v2_grad)
  extra :
    attrs : [bool use_mkldnn = false]

104
- op : ceil
105 106 107 108
  backward : ceil_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

109
- op : cholesky
110 111 112 113 114
  inputs :
    x : X
  outputs :
    out : Out

115
- op : cholesky_solve
116 117 118 119 120
  inputs :
    {x : X, y : Y}
  outputs :
    out : Out

121
- op : clip
122 123 124 125
  backward : clip_grad
  extra :
    attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32"]

126
- op : concat
127 128 129 130
  backward : concat_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_quantizer = false, str mkldnn_data_type = "float32"]

131 132 133 134 135
- op : conditional_block
  backward : conditional_block_grad
  extra :
    attrs : ['str[] skip_eager_deletion_vars = {}']

136
- op : conv2d
137
  backward : conv2d_grad
138
  extra :
139
    attrs : [bool is_test = false, bool use_cudnn = true, bool fuse_relu_before_depthwise_conv = false, bool use_mkldnn = false,
140
             bool use_quantizer = false, str mkldnn_data_type = "float32", bool fuse_relu = false,
141
             str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f, bool use_addto = false,
142 143
             bool fuse_residual_connection = false, float Scale_in = 1.0f, float Scale_out = 1.0f,
             float Scale_in_eltwise = 1.0f, 'float[] Scale_weights = {1.0f}', bool force_fp32_output = false,
144
             int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB(), bool exhaustive_search = false]
145

146
- op : conv2d_fusion
F
Feiyu Chan 已提交
147
  extra :
148
    attrs : [bool is_test = false, bool use_cudnn = false, bool fuse_relu_before_depthwise_conv = false, bool use_mkldnn = false,
F
Feiyu Chan 已提交
149
             bool use_quantizer = false, str mkldnn_data_type = "float32", bool fuse_relu = false,
150
             str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f, bool use_addto = false,
F
Feiyu Chan 已提交
151 152
             bool fuse_residual_connection = false, float Scale_in = 1.0f, float Scale_out = 1.0f,
             float Scale_in_eltwise = 1.0f, 'float[] Scale_weights = {1.0f}', bool force_fp32_output = false,
153 154
             int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB(), bool exhaustive_search = false]

155
- op : conv2d_transpose
156 157 158 159 160 161 162
  backward : conv2d_transpose_grad
  extra :
    attrs : [bool is_test = false, bool use_cudnn = true, bool use_mkldnn = false, bool force_fp32_output = false,
             str mkldnn_data_type = "float32", bool fuse_relu = false,
             str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f,
             int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB()]

163
- op : conv3d
164 165 166 167 168 169 170
  backward : conv3d_grad
  extra :
    attrs : [bool is_test = false, bool use_cudnn = true, bool use_mkldnn = false, str mkldnn_data_type = "float32", bool fuse_relu = false,
             str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f,
             bool use_addto = false, bool fuse_residual_connection = false, bool force_fp32_output = false,
             int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB(), bool exhaustive_search = false]

171
- op : conv3d_transpose
172 173 174
  backward : conv3d_transpose_grad
  extra :
    attrs : [bool use_cudnn = true, bool use_mkldnn = false, int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB()]
F
Feiyu Chan 已提交
175

176
- op : cos
177
  backward : cos_grad
178 179 180 181
  inputs :
    x : X
  outputs :
    out : Out
182 183 184
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

185
- op : cosh
186
  backward : cosh_grad
187 188 189 190
  inputs :
    x : X
  outputs :
    out : Out
191 192 193
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

194
- op : cross
195 196
  inputs :
    {x : X, y : Y}
197 198 199 200 201
  attrs :
    axis : dim
  outputs :
    out : Out

202
- op : data_norm
203 204 205 206
  backward : data_norm_grad
  extra :
    attrs : [bool use_mkldnn = false]

207
- op : depthwise_conv2d
208 209
  backward : depthwise_conv2d_grad
  extra :
210
    attrs : [bool is_test = false, bool fuse_relu_before_depthwise_conv = false, bool use_mkldnn = false,
211 212 213 214
             bool use_quantizer = false, str mkldnn_data_type = "float32", bool fuse_relu = false,
             str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f, bool use_addto = false,
             bool fuse_residual_connection = false, float Scale_in = 1.0f, float Scale_out = 1.0f,
             float Scale_in_eltwise = 1.0f, 'float[] Scale_weights = {1.0f}', bool force_fp32_output = false,
215 216
             int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB(), bool exhaustive_search = false]

217
- op : depthwise_conv2d_transpose
218 219 220 221 222 223
  backward : depthwise_conv2d_transpose_grad
  extra :
    attrs : [bool is_test = false, bool use_cudnn = false, bool use_mkldnn = false, bool force_fp32_output = false,
             str mkldnn_data_type = "float32", bool fuse_relu = false,
             str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f,
             int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB()]
224

225 226 227 228
- op : dequantize_linear
  extra :
    attrs : [float moving_rate = 0.9]

229
- op : diag (diag_v2)
230
  backward : diag_grad (diag_v2_grad)
231 232 233 234 235
  inputs :
    x : X
  outputs :
    out : Out

236
- op : diagonal
237 238 239 240 241
  inputs :
    x : Input
  outputs :
    out : Out

242
- op : digamma
243 244 245 246 247
  inputs :
    x : X
  outputs :
    out : Out

248
- op : dist
249 250 251 252 253
  inputs :
    {x : X, y : Y}
  outputs :
    out : Out

254 255 256 257
- op : distributed_push_sparse
  extra :
    attrs : ['int[] slots = {}']

258 259 260 261 262 263
- op : divide (elementwise_div)
  backward : divide_grad (elementwise_div)
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

264
- op : dot
265 266 267 268 269
  inputs :
    {x : X, y : Y}
  outputs :
    out : Out

270
- op : dropout
271 272 273 274
  backward : dropout_grad
  extra :
    attrs : [bool fix_seed = false, int seed = 0]

275
- op : dropout_nd
276 277 278 279
  backward : dropout_nd_grad
  extra :
    attrs : [bool fix_seed = false, int seed = 0]

280 281 282 283 284 285
- op : elementwise_pow
  backward : elementwise_pow_grad
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

286
- op : elu
287 288 289 290
  backward : elu_grad
  extra :
    attrs : [bool use_mkldnn = false]

291 292 293 294 295 296 297
- op : embedding (lookup_table_v2)
  backward : embedding_grad (lookup_table_v2_grad)
  extra :
    attrs : [bool is_sparse = false, bool is_distributed = false, bool remote_prefetch = false,
             int trainer_id = 0, int slot = 0, 'int64_t[] height_sections = {}', 'str[] epmap = {}',
             'str[] table_names = {}']

298
- op : erf
299 300 301 302 303
  inputs :
    x : X
  outputs :
    out : Out

304
- op : erfinv
305 306 307 308 309
  inputs :
    x : X
  outputs :
    out : Out

310
- op : exp
311
  backward : exp_grad
312 313 314 315
  inputs :
    x : X
  outputs :
    out : Out
316 317
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]
318

319 320 321 322 323
- op : expand (expand_v2)
  backward : expand_grad (expand_v2_grad)
  extra :
    attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32"]

324
- op : expm1
325 326 327 328
  backward : expm1_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356
- op : fake_channel_wise_quantize_abs_max
  extra :
    attrs : [int round_type = 1]

- op : fake_channel_wise_quantize_dequantize_abs_max
  extra :
    attrs : [int round_type = 1]

- op : fake_quantize_abs_max
  extra :
    attrs : [int round_type = 1]

- op : fake_quantize_dequantize_abs_max
  extra :
    attrs : [int round_type = 1]

- op : fake_quantize_dequantize_moving_average_abs_max
  extra :
    attrs : [int round_type = 1]

- op : fake_quantize_moving_average_abs_max
  extra :
    attrs : [int round_type = 1]

- op : fake_quantize_range_abs_max
  extra :
    attrs : [int round_type = 1]

357
- op : fft_c2c
358 359 360
  inputs: {x: X}
  outputs: {out: Out}

361
- op : fft_c2r
362 363 364
  inputs: {x: X}
  outputs: {out: Out}

365
- op : fft_r2c
366 367 368
  inputs: {x: X}
  outputs: {out: Out}

369 370 371 372 373 374
- op : flip
  inputs :
    x : X
  outputs :
    out : Out

375 376 377 378 379
- op : floor
  backward : floor_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396
- op : floor_divide (elementwise_floordiv)
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

- op : fmax (elementwise_fmax)
  backward : fmax_grad (elementwise_fmax_grad)
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

- op : fmin (elementwise_fmin)
  backward : fmin_grad (elementwise_fmin_grad)
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

397
- op : frobenius_norm
398 399 400 401
  backward : frobenius_norm_grad
  extra :
    attrs : [bool use_mkldnn = false]

402 403 404 405 406 407 408 409 410
- op : full (fill_constant)
  extra :
    attrs : [bool use_mkldnn = false]

- op : gather
  backward : gather_grad
  extra :
    attrs : [bool overwrite = true]

411
- op : gelu
412 413
  backward : gelu_grad
  extra :
H
HongyuJia 已提交
414
    attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32"]
415

416 417 418 419 420
- op : grad_add
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

421
- op : grid_sampler
422 423 424 425
  backward : grid_sampler_grad
  extra :
    attrs : [bool use_cudnn = true]

426
- op : gru
427 428 429 430
  backward : gru_grad
  extra :
    attrs : [bool is_test = false]

431 432 433 434 435
- op : hard_swish
  backward : hard_swish_grad
  extra :
    attrs : [bool use_mkldnn = false]

436 437 438 439 440 441
- op : heaviside (elementwise_heaviside)
  backward : heaviside_grad (elementwise_heaviside_grad)
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

442
- op : inplace_abn
443 444 445 446
  backward : inplace_abn_grad
  extra :
    attrs : [bool use_mkldnn = false, bool fuse_with_relu = false]

447
- op : layer_norm
448 449 450 451
  backward : layer_norm_grad
  extra :
    attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32", bool is_test = false]

452
- op : leaky_relu
453 454 455 456
  backward : leaky_relu_grad
  extra :
    attrs : [bool use_mkldnn = false]

457
- op : lgamma
458 459 460 461 462
  inputs :
    x : X
  outputs :
    out : Out

463
- op : linear_interp (linear_interp_v2)
464 465 466 467
  backward : linear_interp_grad (linear_interp_v2_grad)
  extra :
    attrs : [bool use_mkldnn = false]

468
- op : log
469 470 471 472
  backward : log_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

473
- op : log10
474 475 476 477
  backward : log10_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

478
- op : log1p
479 480 481 482
  backward : log1p_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

483
- op : log2
484 485 486 487
  backward : log2_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

488
- op : log_softmax
489 490 491 492
  backward : log_softmax_grad
  extra :
    attrs : [bool use_mkldnn = false]

493
- op : logsigmoid
494 495 496 497
  backward : logsigmoid_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

498
- op : lrn
499 500 501 502
  backward : lrn_grad
  extra :
    attrs : [bool use_mkldnn = false, bool is_test = false]

503
- op : matmul (matmul_v2)
504 505 506 507
  backward : matmul_grad (matmul_v2_grad)
  extra :
    attrs : [bool use_mkldnn = false, 'int[] fused_reshape_Out = {}', 'int[] fused_transpose_Out = {}',
             str mkldnn_data_type = "float32", 'int[] fused_reshape_X = {}', 'int[] fused_reshape_Y = {}',
508
             'int[] fused_transpose_X = {}', 'int[] fused_transpose_Y = {}']
509

510 511 512 513 514 515
- op : matmul_with_flatten (mul)
  backward : matmul_with_flatten_grad (mul_grad)
  extra :
    attrs : [bool use_mkldnn = false, float scale_x = 1.0f, 'float[] scale_y = {1.0f}',
             float scale_out = 1.0f, bool force_fp32_output = false]

516 517 518 519 520 521 522 523 524 525 526 527
- op : maximum (elementwise_max)
  backward : maximum_grad (elementwise_max_grad)
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

- op : maximum (elementwise_min)
  backward : maximum_grad (elementwise_min_grad)
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

528 529 530 531 532
- op : mish
  backward : mish_grad
  extra :
    attrs : [bool use_mkldnn = false]

533 534 535 536 537 538
- op : multiply (elementwise_mul)
  backward : multiply_grad (elementwise_mul_grad)
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

539
- op : mv
540 541 542 543 544
  inputs :
    {x : X, vec : Vec}
  outputs :
    out : Out

545 546 547 548 549 550
- op : nce
  backward : nce_grad
  extra :
    attrs : [int trainer_id = 0, 'int64_t[] height_sections = {}', 'str[] epmap = {}',
             'str[] table_names = {}', 'int[] custom_neg_classes = {}']

551
- op : nearest_interp (nearest_interp_v2)
552 553 554 555
  backward : nearest_interp_grad (nearest_interp_v2_grad)
  extra :
    attrs : [bool use_mkldnn = false]

556
- op : pad2d
557 558 559 560
  backward : pad2d_grad
  extra :
    attrs : [bool use_mkldnn = false]

561
- op : pad3d
562 563 564 565
  backward : pad3d_grad
  extra :
    attrs : [bool use_mkldnn = false]

566
- op : partial_sum
567 568 569 570
  backward : partial_sum_grad
  extra :
    attrs : [bool use_mkldnn = false]

571
- op : poisson
572 573 574 575 576
  inputs :
    x : X
  outputs :
    out : Out

577 578 579 580 581 582 583 584 585 586 587
- op : pool2d
  backward : pool2d_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_quantizer = false,
              str mkldnn_data_type = "float32", bool is_test = false]

- op : pool3d
  backward : pool3d_grad
  extra :
    attrs : [bool use_mkldnn = false]

588
- op : prelu
589 590 591 592
  backward : prelu_grad
  extra :
    attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32", bool is_test = false]

593 594 595 596
- op : quantize_linear
  extra :
    attrs : [float moving_rate = 0.9]

597
- op : reciprocal
598 599 600 601
  backward : reciprocal_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

602
- op : reduce_all
603 604 605
  extra :
    attrs : [bool use_mkldnn = false]

606
- op : reduce_amax
607 608 609 610
  backward : reduce_amax_grad
  extra :
    attrs : [bool use_mkldnn = false]

611
- op : reduce_amin
612 613 614 615
  backward : reduce_amin_grad
  extra :
    attrs : [bool use_mkldnn = false]

616
- op : reduce_any
617 618 619
  extra :
    attrs : [bool use_mkldnn = false]

620
- op : reduce_max
621 622 623 624
  backward : reduce_max_grad
  extra :
    attrs : [bool use_mkldnn = false]

625
- op : reduce_mean
626 627 628 629
  backward : reduce_mean_grad
  extra :
    attrs : [bool use_mkldnn = false]

630
- op : reduce_min
631 632 633 634
  backward : reduce_min_grad
  extra :
    attrs : [bool use_mkldnn = false]

635
- op : reduce_prod
636 637 638 639
  backward : reduce_prod_grad
  extra :
    attrs : [bool use_mkldnn = false]

640
- op : reduce_sum
641 642 643 644
  backward : reduce_sum_grad
  extra :
    attrs : [bool use_mkldnn = false]

645
- op : relu
646 647 648 649
  backward : relu_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

650
- op : relu6
651 652 653 654
  backward : relu6_grad
  extra :
    attrs : [bool use_mkldnn = false]

655 656 657 658 659
- op : remainder (elementwise_mod)
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

660
- op : renorm
661 662 663 664
  backward : renorm_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

665
- op : round
666
  backward : round_grad
667
  extra :
668 669
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

670
- op : rsqrt
671 672 673
  backward : rsqrt_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]
674

675 676 677 678
- op : scale
  extra :
    attrs : [bool use_mkldnn = false]

679
- op : seed
680 681 682
  extra :
    attrs : [bool deterministic = false, str rng_name = "", bool force_cpu = false]

683 684 685 686 687
- op : sequence_softmax
  backward : sequence_softmax_grad
  extra :
    attrs : [str data_format = "AnyLayout"]

688
- op : shape
689 690 691
  extra :
    attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32"]

692
- op : shuffle_channel
693 694 695 696
  backward : shuffle_channel_grad
  extra :
    attrs : [bool use_mkldnn = false]

697
- op : sigmoid
698 699 700 701
  backward : sigmoid_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

702
- op : silu
703 704 705 706
  backward : silu_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

707
- op : sin
708
  backward : sin_grad
709 710 711 712
  inputs :
    x : X
  outputs :
    out : Out
713 714 715
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

716
- op : sinh
717
  backward : sinh_grad
718 719 720 721
  inputs :
    x : X
  outputs :
    out : Out
722 723 724
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

725
- op : slice
726 727 728 729
  backward : slice_grad
  extra :
    attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32"]

730
- op : softmax
731 732
  backward : softmax_grad
  extra :
733
    attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32", bool is_test = false]
734

735
- op : softplus
736
  backward : softplus_grad
737
  extra :
738 739 740
    attrs : [bool use_mkldnn = false, bool use_cudnn = false, str fuse_activation_type = "", float fuse_activation_alpha = 0.0f,
             float fuse_activation_beta = 0.0f, float fuse_activation_scale = 1.0f]

741
- op : softsign
742 743 744
  backward : softsign_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]
745

746
- op : solve
747 748 749 750 751
  inputs :
    {x : X, y : Y}
  outputs :
    out : Out

752
- op : sqrt
753 754 755 756
  backward : sqrt_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

757
- op : square
758 759 760 761
  backward : square_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

762
- op : squeeze (squeeze2)
763 764 765 766
  backward : squeeze_grad (squeeze2_grad)
  extra :
    attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32"]

767
- op : stack
768 769 770 771
  backward : stack_grad
  extra :
    attrs : [bool use_mkldnn = false]

772 773 774 775 776
- op : stack
  backward : stack_grad
  extra :
    attrs : [bool use_mkldnn = false]

777 778 779 780 781 782
- op : subtract (elementwise_sub)
  backward : subtract_grad (elementwise_sub_grad)
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

783
- op : swish
784 785 786 787
  backward : swish_grad
  extra :
    attrs : [bool use_mkldnn = false]

788
- op : sync_batch_norm
789 790 791 792
  backward : sync_batch_norm_grad
  extra :
    attrs : [bool use_mkldnn = false, bool fuse_with_relu = false]

793
- op : tan
794
  backward : tan_grad
795 796 797 798
  inputs :
    x : X
  outputs :
    out : Out
799 800 801
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

802
- op : tanh
803 804 805 806
  backward : tanh_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

807
- op : tanh_shrink
808 809 810 811
  backward : tanh_shrink_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

812
- op : trace
813 814 815 816
  inputs :
    x : Input
  outputs :
    out : Out
817

818 819 820 821 822 823
- op : transpose (transpose2)
  backward : transpose_grad (transpose2_grad)
  extra :
    attrs : [bool use_mkldnn = false, str data_format = "AnyLayout", bool use_quantizer = false,
              str mkldnn_data_type = "float32"]

824
- op : trilinear_interp (trilinear_interp_v2)
825 826 827 828
  backward : trilinear_interp_grad (trilinear_interp_v2_grad)
  extra :
    attrs : [bool use_mkldnn = false]

829
- op : trunc
830
  inputs :
831
    input : X
832 833
  outputs :
    out : Out
834

835 836
- op : while
  backward : while_grad
837
  extra :
838
    attrs : ['str[] skip_eager_deletion_vars = {}']