op_compat.yaml 20.6 KB
Newer Older
1
- op : abs
2 3 4 5
  backward : abs_grad
  extra :
    attrs : [bool use_cudnn = false, bool use_mkldnn = false]

6 7 8 9 10
- op : acosh
  backward : acosh_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

11 12 13 14 15 16
- op : add (elementwise_add)
  backward : add_grad (elementwise_add_grad)
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

17
- op : addmm
18 19 20 21
  backward : addmm_grad
  extra :
    attrs : [bool use_mkldnn = false]

22
- op : affine_grid
23 24 25 26
  backward : affine_grid_grad
  extra :
    attrs : [bool use_cudnn = true]

27
- op : angle
28 29 30
  backward : angle_grad
  extra :
    attrs : [bool use_cudnn = false, bool use_mkldnn = false]
31

32
- op : asinh
33 34 35 36
  backward : asinh_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

37
- op : atan2
38
  inputs :
39
    {x : X1, y : X2}
40 41 42
  outputs :
    out : Out

43
- op : atanh
44 45 46 47
  backward : atanh_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

48
- op : batch_norm
49 50 51 52
  backward : batch_norm_grad
  extra :
    attrs : [bool use_mkldnn = false, bool fuse_with_relu = false]

53
- op : bernoulli
54 55 56 57 58
  inputs :
    x : X
  outputs :
    out : Out

59
- op : bicubic_interp (bicubic_interp_v2)
60 61 62 63
  backward : bicubic_interp_grad (bicubic_interp_v2_grad)
  extra :
    attrs : [bool use_mkldnn = false]

64
- op : bilinear_interp (bilinear_interp_v2)
65 66 67 68
  backward : bilinear_interp_grad (bilinear_interp_v2_grad)
  extra :
    attrs : [bool use_mkldnn = false]

69
- op : ceil
70 71 72 73
  backward : ceil_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

74
- op : cholesky
75 76 77 78 79
  inputs :
    x : X
  outputs :
    out : Out

80
- op : cholesky_solve
81 82 83 84 85
  inputs :
    {x : X, y : Y}
  outputs :
    out : Out

86
- op : clip
87 88 89 90
  backward : clip_grad
  extra :
    attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32"]

91
- op : concat
92 93 94 95
  backward : concat_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_quantizer = false, str mkldnn_data_type = "float32"]

96 97 98 99 100
- op : conditional_block
  backward : conditional_block_grad
  extra :
    attrs : ['str[] skip_eager_deletion_vars = {}']

101
- op : conv2d
102
  backward : conv2d_grad
103
  extra :
104
    attrs : [bool is_test = false, bool use_cudnn = true, bool fuse_relu_before_depthwise_conv = false, bool use_mkldnn = false,
105
             bool use_quantizer = false, str mkldnn_data_type = "float32", bool fuse_relu = false,
106
             str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f, bool use_addto = false,
107 108
             bool fuse_residual_connection = false, float Scale_in = 1.0f, float Scale_out = 1.0f,
             float Scale_in_eltwise = 1.0f, 'float[] Scale_weights = {1.0f}', bool force_fp32_output = false,
109
             int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB(), bool exhaustive_search = false]
110

111
- op : conv2d_fusion
F
Feiyu Chan 已提交
112
  extra :
113
    attrs : [bool is_test = false, bool use_cudnn = false, bool fuse_relu_before_depthwise_conv = false, bool use_mkldnn = false,
F
Feiyu Chan 已提交
114
             bool use_quantizer = false, str mkldnn_data_type = "float32", bool fuse_relu = false,
115
             str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f, bool use_addto = false,
F
Feiyu Chan 已提交
116 117
             bool fuse_residual_connection = false, float Scale_in = 1.0f, float Scale_out = 1.0f,
             float Scale_in_eltwise = 1.0f, 'float[] Scale_weights = {1.0f}', bool force_fp32_output = false,
118 119
             int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB(), bool exhaustive_search = false]

120
- op : conv2d_transpose
121 122 123 124 125 126 127
  backward : conv2d_transpose_grad
  extra :
    attrs : [bool is_test = false, bool use_cudnn = true, bool use_mkldnn = false, bool force_fp32_output = false,
             str mkldnn_data_type = "float32", bool fuse_relu = false,
             str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f,
             int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB()]

128
- op : conv3d
129 130 131 132 133 134 135
  backward : conv3d_grad
  extra :
    attrs : [bool is_test = false, bool use_cudnn = true, bool use_mkldnn = false, str mkldnn_data_type = "float32", bool fuse_relu = false,
             str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f,
             bool use_addto = false, bool fuse_residual_connection = false, bool force_fp32_output = false,
             int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB(), bool exhaustive_search = false]

136
- op : conv3d_transpose
137 138 139
  backward : conv3d_transpose_grad
  extra :
    attrs : [bool use_cudnn = true, bool use_mkldnn = false, int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB()]
F
Feiyu Chan 已提交
140

141
- op : cos
142 143 144 145
  backward : cos_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

146
- op : cosh
147 148 149 150
  backward : cosh_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

151
- op : cross
152 153
  inputs :
    {x : X, y : Y}
154 155 156 157 158
  attrs :
    axis : dim
  outputs :
    out : Out

159
- op : data_norm
160 161 162 163
  backward : data_norm_grad
  extra :
    attrs : [bool use_mkldnn = false]

164
- op : depthwise_conv2d
165 166
  backward : depthwise_conv2d_grad
  extra :
167
    attrs : [bool is_test = false, bool fuse_relu_before_depthwise_conv = false, bool use_mkldnn = false,
168 169 170 171
             bool use_quantizer = false, str mkldnn_data_type = "float32", bool fuse_relu = false,
             str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f, bool use_addto = false,
             bool fuse_residual_connection = false, float Scale_in = 1.0f, float Scale_out = 1.0f,
             float Scale_in_eltwise = 1.0f, 'float[] Scale_weights = {1.0f}', bool force_fp32_output = false,
172 173
             int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB(), bool exhaustive_search = false]

174
- op : depthwise_conv2d_transpose
175 176 177 178 179 180
  backward : depthwise_conv2d_transpose_grad
  extra :
    attrs : [bool is_test = false, bool use_cudnn = false, bool use_mkldnn = false, bool force_fp32_output = false,
             str mkldnn_data_type = "float32", bool fuse_relu = false,
             str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f,
             int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB()]
181

182 183 184 185
- op : dequantize_linear
  extra :
    attrs : [float moving_rate = 0.9]

186
- op : diag (diag_v2)
187
  backward : diag_grad (diag_v2_grad)
188 189 190 191 192
  inputs :
    x : X
  outputs :
    out : Out

193
- op : diagonal
194 195 196 197 198
  inputs :
    x : Input
  outputs :
    out : Out

199
- op : digamma
200 201 202 203 204
  inputs :
    x : X
  outputs :
    out : Out

205
- op : dist
206 207 208 209 210
  inputs :
    {x : X, y : Y}
  outputs :
    out : Out

211 212 213 214 215 216
- op : divide (elementwise_div)
  backward : divide_grad (elementwise_div)
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

217
- op : dot
218 219 220 221 222
  inputs :
    {x : X, y : Y}
  outputs :
    out : Out

223
- op : dropout
224 225 226 227
  backward : dropout_grad
  extra :
    attrs : [bool fix_seed = false, int seed = 0]

228
- op : dropout_nd
229 230 231 232
  backward : dropout_nd_grad
  extra :
    attrs : [bool fix_seed = false, int seed = 0]

233 234 235 236 237 238
- op : elementwise_pow
  backward : elementwise_pow_grad
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

239
- op : elu
240 241 242 243
  backward : elu_grad
  extra :
    attrs : [bool use_mkldnn = false]

244 245 246 247 248 249 250
- op : embedding (lookup_table_v2)
  backward : embedding_grad (lookup_table_v2_grad)
  extra :
    attrs : [bool is_sparse = false, bool is_distributed = false, bool remote_prefetch = false,
             int trainer_id = 0, int slot = 0, 'int64_t[] height_sections = {}', 'str[] epmap = {}',
             'str[] table_names = {}']

251
- op : erf
252 253 254 255 256
  inputs :
    x : X
  outputs :
    out : Out

257
- op : erfinv
258 259 260 261 262
  inputs :
    x : X
  outputs :
    out : Out

263
- op : exp
264 265 266 267
  backward : exp_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

268 269 270 271 272
- op : expand (expand_v2)
  backward : expand_grad (expand_v2_grad)
  extra :
    attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32"]

273
- op : expm1
274 275 276 277
  backward : expm1_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305
- op : fake_channel_wise_quantize_abs_max
  extra :
    attrs : [int round_type = 1]

- op : fake_channel_wise_quantize_dequantize_abs_max
  extra :
    attrs : [int round_type = 1]

- op : fake_quantize_abs_max
  extra :
    attrs : [int round_type = 1]

- op : fake_quantize_dequantize_abs_max
  extra :
    attrs : [int round_type = 1]

- op : fake_quantize_dequantize_moving_average_abs_max
  extra :
    attrs : [int round_type = 1]

- op : fake_quantize_moving_average_abs_max
  extra :
    attrs : [int round_type = 1]

- op : fake_quantize_range_abs_max
  extra :
    attrs : [int round_type = 1]

306
- op : fft_c2c
307 308 309
  inputs: {x: X}
  outputs: {out: Out}

310
- op : fft_c2r
311 312 313
  inputs: {x: X}
  outputs: {out: Out}

314
- op : fft_r2c
315 316 317
  inputs: {x: X}
  outputs: {out: Out}

318 319 320 321 322
- op : floor
  backward : floor_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339
- op : floor_divide (elementwise_floordiv)
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

- op : fmax (elementwise_fmax)
  backward : fmax_grad (elementwise_fmax_grad)
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

- op : fmin (elementwise_fmin)
  backward : fmin_grad (elementwise_fmin_grad)
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

340
- op : frobenius_norm
341 342 343 344
  backward : frobenius_norm_grad
  extra :
    attrs : [bool use_mkldnn = false]

345 346 347 348 349 350 351 352 353
- op : full (fill_constant)
  extra :
    attrs : [bool use_mkldnn = false]

- op : gather
  backward : gather_grad
  extra :
    attrs : [bool overwrite = true]

354
- op : gelu
355 356 357 358
  backward : gelu_grad
  extra :
    attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32", bool use_cudnn = false]

359 360 361 362 363
- op : grad_add
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

364
- op : grid_sampler
365 366 367 368
  backward : grid_sampler_grad
  extra :
    attrs : [bool use_cudnn = true]

369
- op : gru
370 371 372 373
  backward : gru_grad
  extra :
    attrs : [bool is_test = false]

374 375 376 377 378
- op : hard_swish
  backward : hard_swish_grad
  extra :
    attrs : [bool use_mkldnn = false]

379 380 381 382 383 384
- op : heaviside (elementwise_heaviside)
  backward : heaviside_grad (elementwise_heaviside_grad)
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

385
- op : inplace_abn
386 387 388 389
  backward : inplace_abn_grad
  extra :
    attrs : [bool use_mkldnn = false, bool fuse_with_relu = false]

390
- op : layer_norm
391 392 393 394
  backward : layer_norm_grad
  extra :
    attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32", bool is_test = false]

395
- op : leaky_relu
396 397 398 399
  backward : leaky_relu_grad
  extra :
    attrs : [bool use_mkldnn = false]

400
- op : lgamma
401 402 403 404 405
  inputs :
    x : X
  outputs :
    out : Out

406
- op : linear_interp (linear_interp_v2)
407 408 409 410
  backward : linear_interp_grad (linear_interp_v2_grad)
  extra :
    attrs : [bool use_mkldnn = false]

411
- op : log
412 413 414 415
  backward : log_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

416
- op : log10
417 418 419 420
  backward : log10_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

421
- op : log1p
422 423 424 425
  backward : log1p_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

426
- op : log2
427 428 429 430
  backward : log2_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

431
- op : log_softmax
432 433 434 435
  backward : log_softmax_grad
  extra :
    attrs : [bool use_mkldnn = false]

436
- op : logsigmoid
437 438 439 440
  backward : logsigmoid_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

441
- op : lrn
442 443 444 445
  backward : lrn_grad
  extra :
    attrs : [bool use_mkldnn = false, bool is_test = false]

446
- op : matmul (matmul_v2)
447 448 449 450
  backward : matmul_grad (matmul_v2_grad)
  extra :
    attrs : [bool use_mkldnn = false, 'int[] fused_reshape_Out = {}', 'int[] fused_transpose_Out = {}',
             str mkldnn_data_type = "float32", 'int[] fused_reshape_X = {}', 'int[] fused_reshape_Y = {}',
451
             'int[] fused_transpose_X = {}', 'int[] fused_transpose_Y = {}']
452

453 454 455 456 457 458
- op : matmul_with_flatten (mul)
  backward : matmul_with_flatten_grad (mul_grad)
  extra :
    attrs : [bool use_mkldnn = false, float scale_x = 1.0f, 'float[] scale_y = {1.0f}',
             float scale_out = 1.0f, bool force_fp32_output = false]

459 460 461 462 463 464 465 466 467 468 469 470
- op : maximum (elementwise_max)
  backward : maximum_grad (elementwise_max_grad)
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

- op : maximum (elementwise_min)
  backward : maximum_grad (elementwise_min_grad)
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

471 472 473 474 475
- op : mish
  backward : mish_grad
  extra :
    attrs : [bool use_mkldnn = false]

476 477 478 479 480 481
- op : multiply (elementwise_mul)
  backward : multiply_grad (elementwise_mul_grad)
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

482
- op : mv
483 484 485 486 487
  inputs :
    {x : X, vec : Vec}
  outputs :
    out : Out

488
- op : nearest_interp (nearest_interp_v2)
489 490 491 492
  backward : nearest_interp_grad (nearest_interp_v2_grad)
  extra :
    attrs : [bool use_mkldnn = false]

493
- op : pad2d
494 495 496 497
  backward : pad2d_grad
  extra :
    attrs : [bool use_mkldnn = false]

498
- op : pad3d
499 500 501 502
  backward : pad3d_grad
  extra :
    attrs : [bool use_mkldnn = false]

503
- op : partial_sum
504 505 506 507
  backward : partial_sum_grad
  extra :
    attrs : [bool use_mkldnn = false]

508
- op : poisson
509 510 511 512 513
  inputs :
    x : X
  outputs :
    out : Out

514 515 516 517 518 519 520 521 522 523 524
- op : pool2d
  backward : pool2d_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_quantizer = false,
              str mkldnn_data_type = "float32", bool is_test = false]

- op : pool3d
  backward : pool3d_grad
  extra :
    attrs : [bool use_mkldnn = false]

525
- op : prelu
526 527 528 529
  backward : prelu_grad
  extra :
    attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32", bool is_test = false]

530 531 532 533
- op : quantize_linear
  extra :
    attrs : [float moving_rate = 0.9]

534
- op : reciprocal
535 536 537 538
  backward : reciprocal_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

539
- op : reduce_all
540 541 542
  extra :
    attrs : [bool use_mkldnn = false]

543
- op : reduce_amax
544 545 546 547
  backward : reduce_amax_grad
  extra :
    attrs : [bool use_mkldnn = false]

548
- op : reduce_amin
549 550 551 552
  backward : reduce_amin_grad
  extra :
    attrs : [bool use_mkldnn = false]

553
- op : reduce_any
554 555 556
  extra :
    attrs : [bool use_mkldnn = false]

557
- op : reduce_max
558 559 560 561
  backward : reduce_max_grad
  extra :
    attrs : [bool use_mkldnn = false]

562
- op : reduce_mean
563 564 565 566
  backward : reduce_mean_grad
  extra :
    attrs : [bool use_mkldnn = false]

567
- op : reduce_min
568 569 570 571
  backward : reduce_min_grad
  extra :
    attrs : [bool use_mkldnn = false]

572
- op : reduce_prod
573 574 575 576
  backward : reduce_prod_grad
  extra :
    attrs : [bool use_mkldnn = false]

577
- op : reduce_sum
578 579 580 581
  backward : reduce_sum_grad
  extra :
    attrs : [bool use_mkldnn = false]

582
- op : relu
583 584 585 586
  backward : relu_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

587
- op : relu6
588 589 590 591
  backward : relu6_grad
  extra :
    attrs : [bool use_mkldnn = false]

592 593 594 595 596
- op : remainder (elementwise_mod)
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

597
- op : renorm
598 599 600 601
  backward : renorm_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

602 603 604 605 606 607
- op : rnn
  backward : rnn_grad
  extra :
    attrs : [bool is_test = false]

- op : round
608
  backward : round_grad
609
  extra :
610 611
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

612
- op : rsqrt
613 614 615
  backward : rsqrt_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]
616

617 618 619 620
- op : scale
  extra :
    attrs : [bool use_mkldnn = false]

621
- op : seed
622 623 624
  extra :
    attrs : [bool deterministic = false, str rng_name = "", bool force_cpu = false]

625
- op : shape
626 627 628
  extra :
    attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32"]

629
- op : shuffle_channel
630 631 632 633
  backward : shuffle_channel_grad
  extra :
    attrs : [bool use_mkldnn = false]

634
- op : sigmoid
635 636 637 638
  backward : sigmoid_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

639
- op : silu
640 641 642 643
  backward : silu_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

644
- op : sin
645 646 647 648
  backward : sin_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

649
- op : sinh
650 651 652 653
  backward : sinh_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

654
- op : slice
655 656 657 658
  backward : slice_grad
  extra :
    attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32"]

659
- op : softmax
660 661 662
  backward : softmax_grad
  extra :
    attrs : [bool use_cudnn = false, bool use_mkldnn = false, str mkldnn_data_type = "float32", bool is_test = false]
663

664
- op : softplus
665
  backward : softplus_grad
666
  extra :
667 668 669
    attrs : [bool use_mkldnn = false, bool use_cudnn = false, str fuse_activation_type = "", float fuse_activation_alpha = 0.0f,
             float fuse_activation_beta = 0.0f, float fuse_activation_scale = 1.0f]

670
- op : softsign
671 672 673
  backward : softsign_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]
674

675
- op : solve
676 677 678 679 680
  inputs :
    {x : X, y : Y}
  outputs :
    out : Out

681
- op : sqrt
682 683 684 685
  backward : sqrt_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

686
- op : square
687 688 689 690
  backward : square_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

691
- op : squeeze (squeeze2)
692 693 694 695
  backward : squeeze_grad (squeeze2_grad)
  extra :
    attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32"]

696
- op : stack
697 698 699 700
  backward : stack_grad
  extra :
    attrs : [bool use_mkldnn = false]

701 702 703 704 705
- op : stack
  backward : stack_grad
  extra :
    attrs : [bool use_mkldnn = false]

706 707 708 709 710 711
- op : subtract (elementwise_sub)
  backward : subtract_grad (elementwise_sub_grad)
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

712
- op : swish
713 714 715 716
  backward : swish_grad
  extra :
    attrs : [bool use_mkldnn = false]

717
- op : sync_batch_norm
718 719 720 721
  backward : sync_batch_norm_grad
  extra :
    attrs : [bool use_mkldnn = false, bool fuse_with_relu = false]

722
- op : tan
723 724 725 726
  backward : tan_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

727
- op : tanh
728 729 730 731
  backward : tanh_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

732
- op : tanh_shrink
733 734 735 736
  backward : tanh_shrink_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

737
- op : trace
738 739 740 741
  inputs :
    x : Input
  outputs :
    out : Out
742

743 744 745 746 747 748
- op : transpose (transpose2)
  backward : transpose_grad (transpose2_grad)
  extra :
    attrs : [bool use_mkldnn = false, str data_format = "AnyLayout", bool use_quantizer = false,
              str mkldnn_data_type = "float32"]

749
- op : trilinear_interp (trilinear_interp_v2)
750 751 752 753
  backward : trilinear_interp_grad (trilinear_interp_v2_grad)
  extra :
    attrs : [bool use_mkldnn = false]

754
- op : trunc
755 756 757 758
  inputs :
    x : X
  outputs :
    out : Out
759

760 761
- op : while
  backward : while_grad
762
  extra :
763
    attrs : ['str[] skip_eager_deletion_vars = {}']