op_compat.yaml 22.9 KB
Newer Older
1 2 3 4 5
# - op : rnn
#   backward : rnn_grad
#   extra :
#     attrs : [bool is_test = false]

6
- op : abs
7
  backward : abs_grad
8 9 10 11
  inputs :
    x : X
  outputs :
    out : Out
12
  extra :
H
HongyuJia 已提交
13
    attrs : [bool use_mkldnn = false]
14

15 16 17 18 19 20
- op : acos
  inputs :
    x : X
  outputs :
    out : Out

21
- op : acosh
22 23 24 25
  inputs :
    x : X
  outputs :
    out : Out
26 27 28 29
  backward : acosh_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

30 31 32 33 34 35
- op : add (elementwise_add)
  backward : add_grad (elementwise_add_grad)
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

36
- op : addmm
37 38 39 40
  backward : addmm_grad
  extra :
    attrs : [bool use_mkldnn = false]

41
- op : affine_grid
42 43 44 45
  backward : affine_grid_grad
  extra :
    attrs : [bool use_cudnn = true]

46
- op : angle
47
  backward : angle_grad
48 49 50 51
  inputs :
    x : X
  outputs :
    out : Out
52
  extra :
H
HongyuJia 已提交
53
    attrs : [bool use_mkldnn = false]
54

55 56 57 58 59 60 61
- op : argsort
  inputs :
    x : X
  outputs :
    out : Out
    indices : Indices

62 63 64 65 66 67
- op : asin
  inputs :
    x : X
  outputs :
    out : Out

68
- op : asinh
69
  backward : asinh_grad
70 71 72 73
  inputs :
    x : X
  outputs :
    out : Out
74 75 76
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

77 78 79 80 81 82
- op : atan
  inputs :
    x : X
  outputs :
    out : Out

83
- op : atan2
84
  inputs :
85
    {x : X1, y : X2}
86 87 88
  outputs :
    out : Out

89
- op : atanh
90
  backward : atanh_grad
91 92 93 94
  inputs :
    x : X
  outputs :
    out : Out
95 96 97
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

98
- op : batch_norm
99 100 101 102
  backward : batch_norm_grad
  extra :
    attrs : [bool use_mkldnn = false, bool fuse_with_relu = false]

103
- op : bernoulli
104 105 106 107 108
  inputs :
    x : X
  outputs :
    out : Out

109
- op : bicubic_interp (bicubic_interp_v2)
110 111 112 113
  backward : bicubic_interp_grad (bicubic_interp_v2_grad)
  extra :
    attrs : [bool use_mkldnn = false]

114
- op : bilinear_interp (bilinear_interp_v2)
115 116 117 118
  backward : bilinear_interp_grad (bilinear_interp_v2_grad)
  extra :
    attrs : [bool use_mkldnn = false]

119 120 121 122 123 124
- op : bmm
  inputs :
    {x : X, y : Y}
  outputs :
    out : Out

125
- op : ceil
126
  backward : ceil_grad
127 128 129 130
  inputs :
    x : X
  outputs :
    out : Out
131 132 133
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

134
- op : cholesky
135 136 137 138 139
  inputs :
    x : X
  outputs :
    out : Out

140
- op : cholesky_solve
141 142 143 144 145
  inputs :
    {x : X, y : Y}
  outputs :
    out : Out

146
- op : clip
147 148 149 150
  backward : clip_grad
  extra :
    attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32"]

151
- op : concat
152 153 154 155
  backward : concat_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_quantizer = false, str mkldnn_data_type = "float32"]

156 157 158 159 160
- op : conditional_block
  backward : conditional_block_grad
  extra :
    attrs : ['str[] skip_eager_deletion_vars = {}']

161
- op : conv2d
162
  backward : conv2d_grad
163
  extra :
164
    attrs : [bool is_test = false, bool use_cudnn = true, bool fuse_relu_before_depthwise_conv = false, bool use_mkldnn = false,
165
             bool use_quantizer = false, str mkldnn_data_type = "float32", bool fuse_relu = false,
166
             str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f, bool use_addto = false,
167 168
             bool fuse_residual_connection = false, float Scale_in = 1.0f, float Scale_out = 1.0f,
             float Scale_in_eltwise = 1.0f, 'float[] Scale_weights = {1.0f}', bool force_fp32_output = false,
169
             int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB(), bool exhaustive_search = false]
170

171
- op : conv2d_fusion
F
Feiyu Chan 已提交
172
  extra :
173
    attrs : [bool is_test = false, bool use_cudnn = false, bool fuse_relu_before_depthwise_conv = false, bool use_mkldnn = false,
F
Feiyu Chan 已提交
174
             bool use_quantizer = false, str mkldnn_data_type = "float32", bool fuse_relu = false,
175
             str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f, bool use_addto = false,
F
Feiyu Chan 已提交
176 177
             bool fuse_residual_connection = false, float Scale_in = 1.0f, float Scale_out = 1.0f,
             float Scale_in_eltwise = 1.0f, 'float[] Scale_weights = {1.0f}', bool force_fp32_output = false,
178 179
             int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB(), bool exhaustive_search = false]

180
- op : conv2d_transpose
181 182 183 184 185 186 187
  backward : conv2d_transpose_grad
  extra :
    attrs : [bool is_test = false, bool use_cudnn = true, bool use_mkldnn = false, bool force_fp32_output = false,
             str mkldnn_data_type = "float32", bool fuse_relu = false,
             str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f,
             int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB()]

188
- op : conv3d
189 190 191 192 193 194 195
  backward : conv3d_grad
  extra :
    attrs : [bool is_test = false, bool use_cudnn = true, bool use_mkldnn = false, str mkldnn_data_type = "float32", bool fuse_relu = false,
             str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f,
             bool use_addto = false, bool fuse_residual_connection = false, bool force_fp32_output = false,
             int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB(), bool exhaustive_search = false]

196
- op : conv3d_transpose
197 198 199
  backward : conv3d_transpose_grad
  extra :
    attrs : [bool use_cudnn = true, bool use_mkldnn = false, int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB()]
F
Feiyu Chan 已提交
200

201
- op : cos
202
  backward : cos_grad
203 204 205 206
  inputs :
    x : X
  outputs :
    out : Out
207 208 209
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

210
- op : cosh
211
  backward : cosh_grad
212 213 214 215
  inputs :
    x : X
  outputs :
    out : Out
216 217 218
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

219
- op : cross
220 221
  inputs :
    {x : X, y : Y}
222 223 224 225 226
  attrs :
    axis : dim
  outputs :
    out : Out

227
- op : data_norm
228 229 230 231
  backward : data_norm_grad
  extra :
    attrs : [bool use_mkldnn = false]

232
- op : depthwise_conv2d
233 234
  backward : depthwise_conv2d_grad
  extra :
235
    attrs : [bool is_test = false, bool fuse_relu_before_depthwise_conv = false, bool use_mkldnn = false,
236 237 238 239
             bool use_quantizer = false, str mkldnn_data_type = "float32", bool fuse_relu = false,
             str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f, bool use_addto = false,
             bool fuse_residual_connection = false, float Scale_in = 1.0f, float Scale_out = 1.0f,
             float Scale_in_eltwise = 1.0f, 'float[] Scale_weights = {1.0f}', bool force_fp32_output = false,
240 241
             int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB(), bool exhaustive_search = false]

242
- op : depthwise_conv2d_transpose
243 244 245 246 247 248
  backward : depthwise_conv2d_transpose_grad
  extra :
    attrs : [bool is_test = false, bool use_cudnn = false, bool use_mkldnn = false, bool force_fp32_output = false,
             str mkldnn_data_type = "float32", bool fuse_relu = false,
             str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f,
             int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB()]
249

250 251 252 253
- op : dequantize_linear
  extra :
    attrs : [float moving_rate = 0.9]

254 255 256 257 258 259 260
- op : det (determinant)
  backward : det_grad (determinant_grad)
  inputs :
    x : Input
  outputs :
    out : Out

261
- op : diag (diag_v2)
262
  backward : diag_grad (diag_v2_grad)
263 264 265 266 267
  inputs :
    x : X
  outputs :
    out : Out

268
- op : diagonal
269 270 271 272 273
  inputs :
    x : Input
  outputs :
    out : Out

274
- op : digamma
275 276 277 278 279
  inputs :
    x : X
  outputs :
    out : Out

280
- op : dist
281 282 283 284 285
  inputs :
    {x : X, y : Y}
  outputs :
    out : Out

286 287 288 289
- op : distributed_push_sparse
  extra :
    attrs : ['int[] slots = {}']

290 291 292 293 294 295
- op : divide (elementwise_div)
  backward : divide_grad (elementwise_div)
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

296
- op : dot
297 298 299 300 301
  inputs :
    {x : X, y : Y}
  outputs :
    out : Out

302
- op : dropout
303 304 305 306
  backward : dropout_grad
  extra :
    attrs : [bool fix_seed = false, int seed = 0]

307
- op : dropout_nd
308 309 310 311
  backward : dropout_nd_grad
  extra :
    attrs : [bool fix_seed = false, int seed = 0]

312 313 314 315 316 317
- op : elementwise_pow
  backward : elementwise_pow_grad
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

318
- op : elu
319 320 321 322
  backward : elu_grad
  extra :
    attrs : [bool use_mkldnn = false]

323 324 325 326 327 328 329
- op : embedding (lookup_table_v2)
  backward : embedding_grad (lookup_table_v2_grad)
  extra :
    attrs : [bool is_sparse = false, bool is_distributed = false, bool remote_prefetch = false,
             int trainer_id = 0, int slot = 0, 'int64_t[] height_sections = {}', 'str[] epmap = {}',
             'str[] table_names = {}']

330
- op : erf
331 332 333 334 335
  inputs :
    x : X
  outputs :
    out : Out

336
- op : erfinv
337 338 339 340 341
  inputs :
    x : X
  outputs :
    out : Out

342
- op : exp
343
  backward : exp_grad
344 345 346 347
  inputs :
    x : X
  outputs :
    out : Out
348 349
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]
350

351 352 353 354 355
- op : expand (expand_v2)
  backward : expand_grad (expand_v2_grad)
  extra :
    attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32"]

356
- op : expm1
357
  backward : expm1_grad
358 359 360 361
  inputs :
    x : X
  outputs :
    out : Out
362 363 364
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392
- op : fake_channel_wise_quantize_abs_max
  extra :
    attrs : [int round_type = 1]

- op : fake_channel_wise_quantize_dequantize_abs_max
  extra :
    attrs : [int round_type = 1]

- op : fake_quantize_abs_max
  extra :
    attrs : [int round_type = 1]

- op : fake_quantize_dequantize_abs_max
  extra :
    attrs : [int round_type = 1]

- op : fake_quantize_dequantize_moving_average_abs_max
  extra :
    attrs : [int round_type = 1]

- op : fake_quantize_moving_average_abs_max
  extra :
    attrs : [int round_type = 1]

- op : fake_quantize_range_abs_max
  extra :
    attrs : [int round_type = 1]

393
- op : fft_c2c
394 395 396
  inputs: {x: X}
  outputs: {out: Out}

397
- op : fft_c2r
398 399 400
  inputs: {x: X}
  outputs: {out: Out}

401
- op : fft_r2c
402 403 404
  inputs: {x: X}
  outputs: {out: Out}

405 406 407 408 409 410
- op : flip
  inputs :
    x : X
  outputs :
    out : Out

411 412
- op : floor
  backward : floor_grad
413 414 415 416
  inputs :
    x : X
  outputs :
    out : Out
417 418 419
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436
- op : floor_divide (elementwise_floordiv)
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

- op : fmax (elementwise_fmax)
  backward : fmax_grad (elementwise_fmax_grad)
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

- op : fmin (elementwise_fmin)
  backward : fmin_grad (elementwise_fmin_grad)
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

437
- op : frobenius_norm
438 439 440 441
  backward : frobenius_norm_grad
  extra :
    attrs : [bool use_mkldnn = false]

442 443 444 445 446 447 448 449 450
- op : full (fill_constant)
  extra :
    attrs : [bool use_mkldnn = false]

- op : gather
  backward : gather_grad
  extra :
    attrs : [bool overwrite = true]

451
- op : gelu
452 453
  backward : gelu_grad
  extra :
H
HongyuJia 已提交
454
    attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32"]
455

456 457 458 459 460
- op : grad_add
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

461
- op : grid_sampler
462 463 464 465
  backward : grid_sampler_grad
  extra :
    attrs : [bool use_cudnn = true]

466
- op : gru
467 468 469 470
  backward : gru_grad
  extra :
    attrs : [bool is_test = false]

471 472 473 474 475
- op : hard_swish
  backward : hard_swish_grad
  extra :
    attrs : [bool use_mkldnn = false]

476 477 478 479 480 481 482 483 484 485 486 487 488 489
- op : hardshrink (hard_shrink)
  backward : hardshrink_grad (hard_shrink_grad)
  inputs :
    x : X
  outputs :
    out : Out

- op : hardsigmoid (hard_sigmoid)
  backward : hardsigmoid_grad (hard_sigmoid_grad)
  inputs :
    x : X
  outputs :
    out : Out

490 491 492 493 494 495
- op : heaviside (elementwise_heaviside)
  backward : heaviside_grad (elementwise_heaviside_grad)
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

496
- op : inplace_abn
497 498 499 500
  backward : inplace_abn_grad
  extra :
    attrs : [bool use_mkldnn = false, bool fuse_with_relu = false]

501
- op : layer_norm
502 503 504 505
  backward : layer_norm_grad
  extra :
    attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32", bool is_test = false]

506
- op : leaky_relu
507 508 509 510
  backward : leaky_relu_grad
  extra :
    attrs : [bool use_mkldnn = false]

511
- op : lgamma
512 513 514 515 516
  inputs :
    x : X
  outputs :
    out : Out

517
- op : linear_interp (linear_interp_v2)
518 519 520 521
  backward : linear_interp_grad (linear_interp_v2_grad)
  extra :
    attrs : [bool use_mkldnn = false]

522
- op : log
523 524 525 526
  backward : log_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

527
- op : log10
528
  backward : log10_grad
529 530 531 532
  inputs :
    x : X
  outputs :
    out : Out
533 534 535
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

536
- op : log1p
537
  backward : log1p_grad
538 539 540 541
  inputs :
    x : X
  outputs :
    out : Out
542 543 544
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

545
- op : log2
546
  backward : log2_grad
547 548 549 550
  inputs :
    x : X
  outputs :
    out : Out
551 552 553
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

554
- op : log_softmax
555 556 557 558
  backward : log_softmax_grad
  extra :
    attrs : [bool use_mkldnn = false]

559 560 561 562 563 564 565 566 567 568 569 570
- op : logit
  inputs :
    x : X
  outputs :
    out : Out

- op : logsigmoid
  inputs :
    x : X
  outputs :
    out : Out

571
- op : logsigmoid
572 573 574 575
  backward : logsigmoid_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

576
- op : lrn
577 578 579 580
  backward : lrn_grad
  extra :
    attrs : [bool use_mkldnn = false, bool is_test = false]

581
- op : matmul (matmul_v2)
582 583 584 585
  backward : matmul_grad (matmul_v2_grad)
  extra :
    attrs : [bool use_mkldnn = false, 'int[] fused_reshape_Out = {}', 'int[] fused_transpose_Out = {}',
             str mkldnn_data_type = "float32", 'int[] fused_reshape_X = {}', 'int[] fused_reshape_Y = {}',
586
             'int[] fused_transpose_X = {}', 'int[] fused_transpose_Y = {}']
587

588 589 590 591 592 593
- op : matmul_with_flatten (mul)
  backward : matmul_with_flatten_grad (mul_grad)
  extra :
    attrs : [bool use_mkldnn = false, float scale_x = 1.0f, 'float[] scale_y = {1.0f}',
             float scale_out = 1.0f, bool force_fp32_output = false]

594 595 596 597 598 599 600 601 602 603 604 605
- op : maximum (elementwise_max)
  backward : maximum_grad (elementwise_max_grad)
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

- op : maximum (elementwise_min)
  backward : maximum_grad (elementwise_min_grad)
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

606 607 608 609 610
- op : mish
  backward : mish_grad
  extra :
    attrs : [bool use_mkldnn = false]

611 612 613 614 615 616
- op : multiply (elementwise_mul)
  backward : multiply_grad (elementwise_mul_grad)
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

617
- op : mv
618 619 620 621 622
  inputs :
    {x : X, vec : Vec}
  outputs :
    out : Out

623 624 625 626 627 628
- op : nce
  backward : nce_grad
  extra :
    attrs : [int trainer_id = 0, 'int64_t[] height_sections = {}', 'str[] epmap = {}',
             'str[] table_names = {}', 'int[] custom_neg_classes = {}']

629
- op : nearest_interp (nearest_interp_v2)
630 631 632 633
  backward : nearest_interp_grad (nearest_interp_v2_grad)
  extra :
    attrs : [bool use_mkldnn = false]

634
- op : pad2d
635 636 637 638
  backward : pad2d_grad
  extra :
    attrs : [bool use_mkldnn = false]

639
- op : pad3d
640 641 642 643
  backward : pad3d_grad
  extra :
    attrs : [bool use_mkldnn = false]

644
- op : partial_sum
645 646 647 648
  backward : partial_sum_grad
  extra :
    attrs : [bool use_mkldnn = false]

649
- op : poisson
650 651 652 653 654
  inputs :
    x : X
  outputs :
    out : Out

655 656 657 658 659 660 661 662 663 664 665
- op : pool2d
  backward : pool2d_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_quantizer = false,
              str mkldnn_data_type = "float32", bool is_test = false]

- op : pool3d
  backward : pool3d_grad
  extra :
    attrs : [bool use_mkldnn = false]

666
- op : prelu
667 668 669 670
  backward : prelu_grad
  extra :
    attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32", bool is_test = false]

671 672 673 674
- op : quantize_linear
  extra :
    attrs : [float moving_rate = 0.9]

675
- op : reciprocal
676
  backward : reciprocal_grad
677 678 679 680
  inputs :
    x : X
  outputs :
    out : Out
681 682 683
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

684
- op : reduce_all
685 686 687
  extra :
    attrs : [bool use_mkldnn = false]

688
- op : reduce_amax
689 690 691 692
  backward : reduce_amax_grad
  extra :
    attrs : [bool use_mkldnn = false]

693
- op : reduce_amin
694 695 696 697
  backward : reduce_amin_grad
  extra :
    attrs : [bool use_mkldnn = false]

698
- op : reduce_any
699 700 701
  extra :
    attrs : [bool use_mkldnn = false]

702
- op : reduce_max
703 704 705 706
  backward : reduce_max_grad
  extra :
    attrs : [bool use_mkldnn = false]

707
- op : reduce_mean
708 709 710 711
  backward : reduce_mean_grad
  extra :
    attrs : [bool use_mkldnn = false]

712
- op : reduce_min
713 714 715 716
  backward : reduce_min_grad
  extra :
    attrs : [bool use_mkldnn = false]

717
- op : reduce_prod
718 719 720 721
  backward : reduce_prod_grad
  extra :
    attrs : [bool use_mkldnn = false]

722
- op : reduce_sum
723 724 725 726
  backward : reduce_sum_grad
  extra :
    attrs : [bool use_mkldnn = false]

727
- op : relu
728 729 730 731
  backward : relu_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

732
- op : relu6
733 734 735 736
  backward : relu6_grad
  extra :
    attrs : [bool use_mkldnn = false]

737 738 739 740 741
- op : remainder (elementwise_mod)
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

742
- op : renorm
743 744 745 746
  backward : renorm_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

747
- op : round
748
  backward : round_grad
749 750 751 752
  inputs :
    x : X
  outputs :
    out : Out
753
  extra :
754 755
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

756
- op : rsqrt
757 758 759
  backward : rsqrt_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]
760

761 762 763 764
- op : scale
  extra :
    attrs : [bool use_mkldnn = false]

765
- op : seed
766 767 768
  extra :
    attrs : [bool deterministic = false, str rng_name = "", bool force_cpu = false]

769 770 771
- op : send_uv (graph_send_uv)
  backward : send_uv_grad (graph_send_uv_grad)

772 773 774 775 776
- op : sequence_softmax
  backward : sequence_softmax_grad
  extra :
    attrs : [str data_format = "AnyLayout"]

777
- op : shape
778 779 780
  extra :
    attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32"]

781
- op : shuffle_channel
782 783 784 785
  backward : shuffle_channel_grad
  extra :
    attrs : [bool use_mkldnn = false]

786
- op : sigmoid
787 788 789 790
  backward : sigmoid_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

791
- op : silu
792
  backward : silu_grad
793 794 795 796
  inputs :
    x : X
  outputs :
    out : Out
797 798 799
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

800
- op : sin
801
  backward : sin_grad, sin_double_grad
802 803 804 805
  inputs :
    x : X
  outputs :
    out : Out
806 807 808
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

809
- op : sinh
810
  backward : sinh_grad
811 812 813 814
  inputs :
    x : X
  outputs :
    out : Out
815 816 817
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

818
- op : slice
819 820 821 822
  backward : slice_grad
  extra :
    attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32"]

823
- op : softmax
824 825
  backward : softmax_grad
  extra :
826
    attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32", bool is_test = false]
827

828
- op : softplus
829
  backward : softplus_grad
830
  extra :
831 832 833
    attrs : [bool use_mkldnn = false, bool use_cudnn = false, str fuse_activation_type = "", float fuse_activation_alpha = 0.0f,
             float fuse_activation_beta = 0.0f, float fuse_activation_scale = 1.0f]

834
- op : softsign
835 836 837
  backward : softsign_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]
838

839
- op : solve
840 841 842 843 844
  inputs :
    {x : X, y : Y}
  outputs :
    out : Out

845
- op : sqrt
846 847 848 849
  backward : sqrt_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

850
- op : square
851 852 853 854
  backward : square_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

855
- op : squeeze (squeeze2)
856 857 858 859
  backward : squeeze_grad (squeeze2_grad)
  extra :
    attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32"]

860
- op : stack
861 862 863 864
  backward : stack_grad
  extra :
    attrs : [bool use_mkldnn = false]

865 866 867 868 869
- op : stack
  backward : stack_grad
  extra :
    attrs : [bool use_mkldnn = false]

870 871 872 873 874 875
- op : subtract (elementwise_sub)
  backward : subtract_grad (elementwise_sub_grad)
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

876
- op : swish
877 878 879 880
  backward : swish_grad
  extra :
    attrs : [bool use_mkldnn = false]

881
- op : sync_batch_norm
882 883 884 885
  backward : sync_batch_norm_grad
  extra :
    attrs : [bool use_mkldnn = false, bool fuse_with_relu = false]

886
- op : tan
887
  backward : tan_grad
888 889 890 891
  inputs :
    x : X
  outputs :
    out : Out
892 893 894
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

895
- op : tanh
896 897 898 899 900
  backward : tanh_grad, tanh_double_grad (tanh_grad_grad), tanh_triple_grad
  inputs :
    x : X
  outputs :
    out : Out
901 902 903
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

904
- op : tanh_shrink
905 906 907 908
  backward : tanh_shrink_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

909
- op : trace
910 911 912 913
  inputs :
    x : Input
  outputs :
    out : Out
914

915 916 917 918 919 920
- op : transpose (transpose2)
  backward : transpose_grad (transpose2_grad)
  extra :
    attrs : [bool use_mkldnn = false, str data_format = "AnyLayout", bool use_quantizer = false,
              str mkldnn_data_type = "float32"]

921
- op : trilinear_interp (trilinear_interp_v2)
922 923 924 925
  backward : trilinear_interp_grad (trilinear_interp_v2_grad)
  extra :
    attrs : [bool use_mkldnn = false]

926
- op : trunc
927
  inputs :
928
    input : X
929 930
  outputs :
    out : Out
931

932 933
- op : while
  backward : while_grad
934
  extra :
935
    attrs : ['str[] skip_eager_deletion_vars = {}']