op_compat.yaml 21.0 KB
Newer Older
1 2 3 4 5
# - op : rnn
#   backward : rnn_grad
#   extra :
#     attrs : [bool is_test = false]

6
- op : abs
7 8
  backward : abs_grad
  extra :
H
HongyuJia 已提交
9
    attrs : [bool use_mkldnn = false]
10

11 12 13 14 15
- op : acosh
  backward : acosh_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

16 17 18 19 20 21
- op : add (elementwise_add)
  backward : add_grad (elementwise_add_grad)
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

22
- op : addmm
23 24 25 26
  backward : addmm_grad
  extra :
    attrs : [bool use_mkldnn = false]

27
- op : affine_grid
28 29 30 31
  backward : affine_grid_grad
  extra :
    attrs : [bool use_cudnn = true]

32
- op : angle
33 34
  backward : angle_grad
  extra :
H
HongyuJia 已提交
35
    attrs : [bool use_mkldnn = false]
36

37
- op : asinh
38 39 40 41
  backward : asinh_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

42
- op : atan2
43
  inputs :
44
    {x : X1, y : X2}
45 46 47
  outputs :
    out : Out

48
- op : atanh
49 50 51 52
  backward : atanh_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

53
- op : batch_norm
54 55 56 57
  backward : batch_norm_grad
  extra :
    attrs : [bool use_mkldnn = false, bool fuse_with_relu = false]

58
- op : bernoulli
59 60 61 62 63
  inputs :
    x : X
  outputs :
    out : Out

64
- op : bicubic_interp (bicubic_interp_v2)
65 66 67 68
  backward : bicubic_interp_grad (bicubic_interp_v2_grad)
  extra :
    attrs : [bool use_mkldnn = false]

69
- op : bilinear_interp (bilinear_interp_v2)
70 71 72 73
  backward : bilinear_interp_grad (bilinear_interp_v2_grad)
  extra :
    attrs : [bool use_mkldnn = false]

74
- op : ceil
75 76 77 78
  backward : ceil_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

79
- op : cholesky
80 81 82 83 84
  inputs :
    x : X
  outputs :
    out : Out

85
- op : cholesky_solve
86 87 88 89 90
  inputs :
    {x : X, y : Y}
  outputs :
    out : Out

91
- op : clip
92 93 94 95
  backward : clip_grad
  extra :
    attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32"]

96
- op : concat
97 98 99 100
  backward : concat_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_quantizer = false, str mkldnn_data_type = "float32"]

101 102 103 104 105
- op : conditional_block
  backward : conditional_block_grad
  extra :
    attrs : ['str[] skip_eager_deletion_vars = {}']

106
- op : conv2d
107
  backward : conv2d_grad
108
  extra :
109
    attrs : [bool is_test = false, bool use_cudnn = true, bool fuse_relu_before_depthwise_conv = false, bool use_mkldnn = false,
110
             bool use_quantizer = false, str mkldnn_data_type = "float32", bool fuse_relu = false,
111
             str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f, bool use_addto = false,
112 113
             bool fuse_residual_connection = false, float Scale_in = 1.0f, float Scale_out = 1.0f,
             float Scale_in_eltwise = 1.0f, 'float[] Scale_weights = {1.0f}', bool force_fp32_output = false,
114
             int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB(), bool exhaustive_search = false]
115

116
- op : conv2d_fusion
F
Feiyu Chan 已提交
117
  extra :
118
    attrs : [bool is_test = false, bool use_cudnn = false, bool fuse_relu_before_depthwise_conv = false, bool use_mkldnn = false,
F
Feiyu Chan 已提交
119
             bool use_quantizer = false, str mkldnn_data_type = "float32", bool fuse_relu = false,
120
             str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f, bool use_addto = false,
F
Feiyu Chan 已提交
121 122
             bool fuse_residual_connection = false, float Scale_in = 1.0f, float Scale_out = 1.0f,
             float Scale_in_eltwise = 1.0f, 'float[] Scale_weights = {1.0f}', bool force_fp32_output = false,
123 124
             int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB(), bool exhaustive_search = false]

125
- op : conv2d_transpose
126 127 128 129 130 131 132
  backward : conv2d_transpose_grad
  extra :
    attrs : [bool is_test = false, bool use_cudnn = true, bool use_mkldnn = false, bool force_fp32_output = false,
             str mkldnn_data_type = "float32", bool fuse_relu = false,
             str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f,
             int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB()]

133
- op : conv3d
134 135 136 137 138 139 140
  backward : conv3d_grad
  extra :
    attrs : [bool is_test = false, bool use_cudnn = true, bool use_mkldnn = false, str mkldnn_data_type = "float32", bool fuse_relu = false,
             str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f,
             bool use_addto = false, bool fuse_residual_connection = false, bool force_fp32_output = false,
             int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB(), bool exhaustive_search = false]

141
- op : conv3d_transpose
142 143 144
  backward : conv3d_transpose_grad
  extra :
    attrs : [bool use_cudnn = true, bool use_mkldnn = false, int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB()]
F
Feiyu Chan 已提交
145

146
- op : cos
147 148 149 150
  backward : cos_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

151
- op : cosh
152 153 154 155
  backward : cosh_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

156
- op : cross
157 158
  inputs :
    {x : X, y : Y}
159 160 161 162 163
  attrs :
    axis : dim
  outputs :
    out : Out

164
- op : data_norm
165 166 167 168
  backward : data_norm_grad
  extra :
    attrs : [bool use_mkldnn = false]

169
- op : depthwise_conv2d
170 171
  backward : depthwise_conv2d_grad
  extra :
172
    attrs : [bool is_test = false, bool fuse_relu_before_depthwise_conv = false, bool use_mkldnn = false,
173 174 175 176
             bool use_quantizer = false, str mkldnn_data_type = "float32", bool fuse_relu = false,
             str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f, bool use_addto = false,
             bool fuse_residual_connection = false, float Scale_in = 1.0f, float Scale_out = 1.0f,
             float Scale_in_eltwise = 1.0f, 'float[] Scale_weights = {1.0f}', bool force_fp32_output = false,
177 178
             int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB(), bool exhaustive_search = false]

179
- op : depthwise_conv2d_transpose
180 181 182 183 184 185
  backward : depthwise_conv2d_transpose_grad
  extra :
    attrs : [bool is_test = false, bool use_cudnn = false, bool use_mkldnn = false, bool force_fp32_output = false,
             str mkldnn_data_type = "float32", bool fuse_relu = false,
             str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f,
             int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB()]
186

187 188 189 190
- op : dequantize_linear
  extra :
    attrs : [float moving_rate = 0.9]

191
- op : diag (diag_v2)
192
  backward : diag_grad (diag_v2_grad)
193 194 195 196 197
  inputs :
    x : X
  outputs :
    out : Out

198
- op : diagonal
199 200 201 202 203
  inputs :
    x : Input
  outputs :
    out : Out

204
- op : digamma
205 206 207 208 209
  inputs :
    x : X
  outputs :
    out : Out

210
- op : dist
211 212 213 214 215
  inputs :
    {x : X, y : Y}
  outputs :
    out : Out

216 217 218 219
- op : distributed_push_sparse
  extra :
    attrs : ['int[] slots = {}']

220 221 222 223 224 225
- op : divide (elementwise_div)
  backward : divide_grad (elementwise_div)
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

226
- op : dot
227 228 229 230 231
  inputs :
    {x : X, y : Y}
  outputs :
    out : Out

232
- op : dropout
233 234 235 236
  backward : dropout_grad
  extra :
    attrs : [bool fix_seed = false, int seed = 0]

237
- op : dropout_nd
238 239 240 241
  backward : dropout_nd_grad
  extra :
    attrs : [bool fix_seed = false, int seed = 0]

242 243 244 245 246 247
- op : elementwise_pow
  backward : elementwise_pow_grad
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

248
- op : elu
249 250 251 252
  backward : elu_grad
  extra :
    attrs : [bool use_mkldnn = false]

253 254 255 256 257 258 259
- op : embedding (lookup_table_v2)
  backward : embedding_grad (lookup_table_v2_grad)
  extra :
    attrs : [bool is_sparse = false, bool is_distributed = false, bool remote_prefetch = false,
             int trainer_id = 0, int slot = 0, 'int64_t[] height_sections = {}', 'str[] epmap = {}',
             'str[] table_names = {}']

260
- op : erf
261 262 263 264 265
  inputs :
    x : X
  outputs :
    out : Out

266
- op : erfinv
267 268 269 270 271
  inputs :
    x : X
  outputs :
    out : Out

272
- op : exp
273 274 275 276
  backward : exp_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

277 278 279 280 281 282
- op : exp
  inputs :
    x : X
  outputs :
    out : Out

283 284 285 286 287
- op : expand (expand_v2)
  backward : expand_grad (expand_v2_grad)
  extra :
    attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32"]

288
- op : expm1
289 290 291 292
  backward : expm1_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320
- op : fake_channel_wise_quantize_abs_max
  extra :
    attrs : [int round_type = 1]

- op : fake_channel_wise_quantize_dequantize_abs_max
  extra :
    attrs : [int round_type = 1]

- op : fake_quantize_abs_max
  extra :
    attrs : [int round_type = 1]

- op : fake_quantize_dequantize_abs_max
  extra :
    attrs : [int round_type = 1]

- op : fake_quantize_dequantize_moving_average_abs_max
  extra :
    attrs : [int round_type = 1]

- op : fake_quantize_moving_average_abs_max
  extra :
    attrs : [int round_type = 1]

- op : fake_quantize_range_abs_max
  extra :
    attrs : [int round_type = 1]

321
- op : fft_c2c
322 323 324
  inputs: {x: X}
  outputs: {out: Out}

325
- op : fft_c2r
326 327 328
  inputs: {x: X}
  outputs: {out: Out}

329
- op : fft_r2c
330 331 332
  inputs: {x: X}
  outputs: {out: Out}

333 334 335 336 337 338
- op : flip
  inputs :
    x : X
  outputs :
    out : Out

339 340 341 342 343
- op : floor
  backward : floor_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360
- op : floor_divide (elementwise_floordiv)
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

- op : fmax (elementwise_fmax)
  backward : fmax_grad (elementwise_fmax_grad)
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

- op : fmin (elementwise_fmin)
  backward : fmin_grad (elementwise_fmin_grad)
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

361
- op : frobenius_norm
362 363 364 365
  backward : frobenius_norm_grad
  extra :
    attrs : [bool use_mkldnn = false]

366 367 368 369 370 371 372 373 374
- op : full (fill_constant)
  extra :
    attrs : [bool use_mkldnn = false]

- op : gather
  backward : gather_grad
  extra :
    attrs : [bool overwrite = true]

375
- op : gelu
376 377
  backward : gelu_grad
  extra :
H
HongyuJia 已提交
378
    attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32"]
379

380 381 382 383 384
- op : grad_add
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

385
- op : grid_sampler
386 387 388 389
  backward : grid_sampler_grad
  extra :
    attrs : [bool use_cudnn = true]

390
- op : gru
391 392 393 394
  backward : gru_grad
  extra :
    attrs : [bool is_test = false]

395 396 397 398 399
- op : hard_swish
  backward : hard_swish_grad
  extra :
    attrs : [bool use_mkldnn = false]

400 401 402 403 404 405
- op : heaviside (elementwise_heaviside)
  backward : heaviside_grad (elementwise_heaviside_grad)
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

406
- op : inplace_abn
407 408 409 410
  backward : inplace_abn_grad
  extra :
    attrs : [bool use_mkldnn = false, bool fuse_with_relu = false]

411
- op : layer_norm
412 413 414 415
  backward : layer_norm_grad
  extra :
    attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32", bool is_test = false]

416
- op : leaky_relu
417 418 419 420
  backward : leaky_relu_grad
  extra :
    attrs : [bool use_mkldnn = false]

421
- op : lgamma
422 423 424 425 426
  inputs :
    x : X
  outputs :
    out : Out

427
- op : linear_interp (linear_interp_v2)
428 429 430 431
  backward : linear_interp_grad (linear_interp_v2_grad)
  extra :
    attrs : [bool use_mkldnn = false]

432
- op : log
433 434 435 436
  backward : log_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

437
- op : log10
438 439 440 441
  backward : log10_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

442
- op : log1p
443 444 445 446
  backward : log1p_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

447
- op : log2
448 449 450 451
  backward : log2_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

452
- op : log_softmax
453 454 455 456
  backward : log_softmax_grad
  extra :
    attrs : [bool use_mkldnn = false]

457
- op : logsigmoid
458 459 460 461
  backward : logsigmoid_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

462
- op : lrn
463 464 465 466
  backward : lrn_grad
  extra :
    attrs : [bool use_mkldnn = false, bool is_test = false]

467
- op : matmul (matmul_v2)
468 469 470 471
  backward : matmul_grad (matmul_v2_grad)
  extra :
    attrs : [bool use_mkldnn = false, 'int[] fused_reshape_Out = {}', 'int[] fused_transpose_Out = {}',
             str mkldnn_data_type = "float32", 'int[] fused_reshape_X = {}', 'int[] fused_reshape_Y = {}',
472
             'int[] fused_transpose_X = {}', 'int[] fused_transpose_Y = {}']
473

474 475 476 477 478 479
- op : matmul_with_flatten (mul)
  backward : matmul_with_flatten_grad (mul_grad)
  extra :
    attrs : [bool use_mkldnn = false, float scale_x = 1.0f, 'float[] scale_y = {1.0f}',
             float scale_out = 1.0f, bool force_fp32_output = false]

480 481 482 483 484 485 486 487 488 489 490 491
- op : maximum (elementwise_max)
  backward : maximum_grad (elementwise_max_grad)
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

- op : maximum (elementwise_min)
  backward : maximum_grad (elementwise_min_grad)
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

492 493 494 495 496
- op : mish
  backward : mish_grad
  extra :
    attrs : [bool use_mkldnn = false]

497 498 499 500 501 502
- op : multiply (elementwise_mul)
  backward : multiply_grad (elementwise_mul_grad)
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

503
- op : mv
504 505 506 507 508
  inputs :
    {x : X, vec : Vec}
  outputs :
    out : Out

509 510 511 512 513 514
- op : nce
  backward : nce_grad
  extra :
    attrs : [int trainer_id = 0, 'int64_t[] height_sections = {}', 'str[] epmap = {}',
             'str[] table_names = {}', 'int[] custom_neg_classes = {}']

515
- op : nearest_interp (nearest_interp_v2)
516 517 518 519
  backward : nearest_interp_grad (nearest_interp_v2_grad)
  extra :
    attrs : [bool use_mkldnn = false]

520
- op : pad2d
521 522 523 524
  backward : pad2d_grad
  extra :
    attrs : [bool use_mkldnn = false]

525
- op : pad3d
526 527 528 529
  backward : pad3d_grad
  extra :
    attrs : [bool use_mkldnn = false]

530
- op : partial_sum
531 532 533 534
  backward : partial_sum_grad
  extra :
    attrs : [bool use_mkldnn = false]

535
- op : poisson
536 537 538 539 540
  inputs :
    x : X
  outputs :
    out : Out

541 542 543 544 545 546 547 548 549 550 551
- op : pool2d
  backward : pool2d_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_quantizer = false,
              str mkldnn_data_type = "float32", bool is_test = false]

- op : pool3d
  backward : pool3d_grad
  extra :
    attrs : [bool use_mkldnn = false]

552
- op : prelu
553 554 555 556
  backward : prelu_grad
  extra :
    attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32", bool is_test = false]

557 558 559 560
- op : quantize_linear
  extra :
    attrs : [float moving_rate = 0.9]

561
- op : reciprocal
562 563 564 565
  backward : reciprocal_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

566
- op : reduce_all
567 568 569
  extra :
    attrs : [bool use_mkldnn = false]

570
- op : reduce_amax
571 572 573 574
  backward : reduce_amax_grad
  extra :
    attrs : [bool use_mkldnn = false]

575
- op : reduce_amin
576 577 578 579
  backward : reduce_amin_grad
  extra :
    attrs : [bool use_mkldnn = false]

580
- op : reduce_any
581 582 583
  extra :
    attrs : [bool use_mkldnn = false]

584
- op : reduce_max
585 586 587 588
  backward : reduce_max_grad
  extra :
    attrs : [bool use_mkldnn = false]

589
- op : reduce_mean
590 591 592 593
  backward : reduce_mean_grad
  extra :
    attrs : [bool use_mkldnn = false]

594
- op : reduce_min
595 596 597 598
  backward : reduce_min_grad
  extra :
    attrs : [bool use_mkldnn = false]

599
- op : reduce_prod
600 601 602 603
  backward : reduce_prod_grad
  extra :
    attrs : [bool use_mkldnn = false]

604
- op : reduce_sum
605 606 607 608
  backward : reduce_sum_grad
  extra :
    attrs : [bool use_mkldnn = false]

609
- op : relu
610 611 612 613
  backward : relu_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

614
- op : relu6
615 616 617 618
  backward : relu6_grad
  extra :
    attrs : [bool use_mkldnn = false]

619 620 621 622 623
- op : remainder (elementwise_mod)
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

624
- op : renorm
625 626 627 628
  backward : renorm_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

629
- op : round
630
  backward : round_grad
631
  extra :
632 633
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

634
- op : rsqrt
635 636 637
  backward : rsqrt_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]
638

639 640 641 642
- op : scale
  extra :
    attrs : [bool use_mkldnn = false]

643
- op : seed
644 645 646
  extra :
    attrs : [bool deterministic = false, str rng_name = "", bool force_cpu = false]

647 648 649 650 651
- op : sequence_softmax
  backward : sequence_softmax_grad
  extra :
    attrs : [str data_format = "AnyLayout"]

652
- op : shape
653 654 655
  extra :
    attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32"]

656
- op : shuffle_channel
657 658 659 660
  backward : shuffle_channel_grad
  extra :
    attrs : [bool use_mkldnn = false]

661
- op : sigmoid
662 663 664 665
  backward : sigmoid_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

666
- op : silu
667 668 669 670
  backward : silu_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

671
- op : sin
672 673 674 675
  backward : sin_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

676
- op : sinh
677 678 679 680
  backward : sinh_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

681
- op : slice
682 683 684 685
  backward : slice_grad
  extra :
    attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32"]

686
- op : softmax
687 688
  backward : softmax_grad
  extra :
689
    attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32", bool is_test = false]
690

691
- op : softplus
692
  backward : softplus_grad
693
  extra :
694 695 696
    attrs : [bool use_mkldnn = false, bool use_cudnn = false, str fuse_activation_type = "", float fuse_activation_alpha = 0.0f,
             float fuse_activation_beta = 0.0f, float fuse_activation_scale = 1.0f]

697
- op : softsign
698 699 700
  backward : softsign_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]
701

702
- op : solve
703 704 705 706 707
  inputs :
    {x : X, y : Y}
  outputs :
    out : Out

708
- op : sqrt
709 710 711 712
  backward : sqrt_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

713
- op : square
714 715 716 717
  backward : square_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

718
- op : squeeze (squeeze2)
719 720 721 722
  backward : squeeze_grad (squeeze2_grad)
  extra :
    attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32"]

723
- op : stack
724 725 726 727
  backward : stack_grad
  extra :
    attrs : [bool use_mkldnn = false]

728 729 730 731 732
- op : stack
  backward : stack_grad
  extra :
    attrs : [bool use_mkldnn = false]

733 734 735 736 737 738
- op : subtract (elementwise_sub)
  backward : subtract_grad (elementwise_sub_grad)
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

739
- op : swish
740 741 742 743
  backward : swish_grad
  extra :
    attrs : [bool use_mkldnn = false]

744
- op : sync_batch_norm
745 746 747 748
  backward : sync_batch_norm_grad
  extra :
    attrs : [bool use_mkldnn = false, bool fuse_with_relu = false]

749
- op : tan
750 751 752 753
  backward : tan_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

754
- op : tanh
755 756 757 758
  backward : tanh_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

759
- op : tanh_shrink
760 761 762 763
  backward : tanh_shrink_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

764
- op : trace
765 766 767 768
  inputs :
    x : Input
  outputs :
    out : Out
769

770 771 772 773 774 775
- op : transpose (transpose2)
  backward : transpose_grad (transpose2_grad)
  extra :
    attrs : [bool use_mkldnn = false, str data_format = "AnyLayout", bool use_quantizer = false,
              str mkldnn_data_type = "float32"]

776
- op : trilinear_interp (trilinear_interp_v2)
777 778 779 780
  backward : trilinear_interp_grad (trilinear_interp_v2_grad)
  extra :
    attrs : [bool use_mkldnn = false]

781
- op : trunc
782
  inputs :
783
    input : X
784 785
  outputs :
    out : Out
786

787 788
- op : while
  backward : while_grad
789
  extra :
790
    attrs : ['str[] skip_eager_deletion_vars = {}']