op_compat.yaml 19.3 KB
Newer Older
1
- op : abs
2 3 4 5
  backward : abs_grad
  extra :
    attrs : [bool use_cudnn = false, bool use_mkldnn = false]

6 7 8 9 10
- op : acosh
  backward : acosh_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

11 12 13 14 15 16
- op : add (elementwise_add)
  backward : add_grad (elementwise_add_grad)
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

17
- op : addmm
18 19 20 21
  backward : addmm_grad
  extra :
    attrs : [bool use_mkldnn = false]

22
- op : affine_grid
23 24 25 26
  backward : affine_grid_grad
  extra :
    attrs : [bool use_cudnn = true]

27
- op : angle
28 29 30
  backward : angle_grad
  extra :
    attrs : [bool use_cudnn = false, bool use_mkldnn = false]
31

32
- op : asinh
33 34 35 36
  backward : asinh_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

37
- op : atan2
38
  inputs :
39
    {x : X1, y : X2}
40 41 42
  outputs :
    out : Out

43
- op : atanh
44 45 46 47
  backward : atanh_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

48
- op : batch_norm
49 50 51 52
  backward : batch_norm_grad
  extra :
    attrs : [bool use_mkldnn = false, bool fuse_with_relu = false]

53
- op : bernoulli
54 55 56 57 58
  inputs :
    x : X
  outputs :
    out : Out

59
- op : bicubic_interp (bicubic_interp_v2)
60 61 62 63
  backward : bicubic_interp_grad (bicubic_interp_v2_grad)
  extra :
    attrs : [bool use_mkldnn = false]

64
- op : bilinear_interp (bilinear_interp_v2)
65 66 67 68
  backward : bilinear_interp_grad (bilinear_interp_v2_grad)
  extra :
    attrs : [bool use_mkldnn = false]

69
- op : ceil
70 71 72 73
  backward : ceil_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

74
- op : cholesky
75 76 77 78 79
  inputs :
    x : X
  outputs :
    out : Out

80
- op : cholesky_solve
81 82 83 84 85
  inputs :
    {x : X, y : Y}
  outputs :
    out : Out

86
- op : clip
87 88 89 90
  backward : clip_grad
  extra :
    attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32"]

91
- op : concat
92 93 94 95
  backward : concat_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_quantizer = false, str mkldnn_data_type = "float32"]

96
- op : conv2d
97
  backward : conv2d_grad
98
  extra :
99
    attrs : [bool is_test = false, bool use_cudnn = true, bool fuse_relu_before_depthwise_conv = false, bool use_mkldnn = false,
100
             bool use_quantizer = false, str mkldnn_data_type = "float32", bool fuse_relu = false,
101
             str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f, bool use_addto = false,
102 103
             bool fuse_residual_connection = false, float Scale_in = 1.0f, float Scale_out = 1.0f,
             float Scale_in_eltwise = 1.0f, 'float[] Scale_weights = {1.0f}', bool force_fp32_output = false,
104
             int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB(), bool exhaustive_search = false]
105

106
- op : conv2d_fusion
F
Feiyu Chan 已提交
107
  extra :
108
    attrs : [bool is_test = false, bool use_cudnn = false, bool fuse_relu_before_depthwise_conv = false, bool use_mkldnn = false,
F
Feiyu Chan 已提交
109
             bool use_quantizer = false, str mkldnn_data_type = "float32", bool fuse_relu = false,
110
             str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f, bool use_addto = false,
F
Feiyu Chan 已提交
111 112
             bool fuse_residual_connection = false, float Scale_in = 1.0f, float Scale_out = 1.0f,
             float Scale_in_eltwise = 1.0f, 'float[] Scale_weights = {1.0f}', bool force_fp32_output = false,
113 114
             int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB(), bool exhaustive_search = false]

115
- op : conv2d_transpose
116 117 118 119 120 121 122
  backward : conv2d_transpose_grad
  extra :
    attrs : [bool is_test = false, bool use_cudnn = true, bool use_mkldnn = false, bool force_fp32_output = false,
             str mkldnn_data_type = "float32", bool fuse_relu = false,
             str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f,
             int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB()]

123
- op : conv3d
124 125 126 127 128 129 130
  backward : conv3d_grad
  extra :
    attrs : [bool is_test = false, bool use_cudnn = true, bool use_mkldnn = false, str mkldnn_data_type = "float32", bool fuse_relu = false,
             str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f,
             bool use_addto = false, bool fuse_residual_connection = false, bool force_fp32_output = false,
             int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB(), bool exhaustive_search = false]

131
- op : conv3d_transpose
132 133 134
  backward : conv3d_transpose_grad
  extra :
    attrs : [bool use_cudnn = true, bool use_mkldnn = false, int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB()]
F
Feiyu Chan 已提交
135

136
- op : cos
137 138 139 140
  backward : cos_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

141
- op : cosh
142 143 144 145
  backward : cosh_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

146
- op : cross
147 148
  inputs :
    {x : X, y : Y}
149 150 151 152 153
  attrs :
    axis : dim
  outputs :
    out : Out

154
- op : data_norm
155 156 157 158
  backward : data_norm_grad
  extra :
    attrs : [bool use_mkldnn = false]

159
- op : depthwise_conv2d
160 161
  backward : depthwise_conv2d_grad
  extra :
162
    attrs : [bool is_test = false, bool fuse_relu_before_depthwise_conv = false, bool use_mkldnn = false,
163 164 165 166
             bool use_quantizer = false, str mkldnn_data_type = "float32", bool fuse_relu = false,
             str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f, bool use_addto = false,
             bool fuse_residual_connection = false, float Scale_in = 1.0f, float Scale_out = 1.0f,
             float Scale_in_eltwise = 1.0f, 'float[] Scale_weights = {1.0f}', bool force_fp32_output = false,
167 168
             int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB(), bool exhaustive_search = false]

169
- op : depthwise_conv2d_transpose
170 171 172 173 174 175
  backward : depthwise_conv2d_transpose_grad
  extra :
    attrs : [bool is_test = false, bool use_cudnn = false, bool use_mkldnn = false, bool force_fp32_output = false,
             str mkldnn_data_type = "float32", bool fuse_relu = false,
             str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f,
             int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB()]
176

177
- op : diag (diag_v2)
178
  backward : diag_grad (diag_v2_grad)
179 180 181 182 183
  inputs :
    x : X
  outputs :
    out : Out

184
- op : diagonal
185 186 187 188 189
  inputs :
    x : Input
  outputs :
    out : Out

190
- op : digamma
191 192 193 194 195
  inputs :
    x : X
  outputs :
    out : Out

196
- op : dist
197 198 199 200 201
  inputs :
    {x : X, y : Y}
  outputs :
    out : Out

202 203 204 205 206 207
- op : divide (elementwise_div)
  backward : divide_grad (elementwise_div)
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

208
- op : dot
209 210 211 212 213
  inputs :
    {x : X, y : Y}
  outputs :
    out : Out

214
- op : dropout
215 216 217 218
  backward : dropout_grad
  extra :
    attrs : [bool fix_seed = false, int seed = 0]

219
- op : dropout_nd
220 221 222 223
  backward : dropout_nd_grad
  extra :
    attrs : [bool fix_seed = false, int seed = 0]

224 225 226 227 228 229
- op : elementwise_pow
  backward : elementwise_pow_grad
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

230
- op : elu
231 232 233 234
  backward : elu_grad
  extra :
    attrs : [bool use_mkldnn = false]

235
- op : erf
236 237 238 239 240
  inputs :
    x : X
  outputs :
    out : Out

241
- op : erfinv
242 243 244 245 246
  inputs :
    x : X
  outputs :
    out : Out

247
- op : exp
248 249 250 251
  backward : exp_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

252
- op : expm1
253 254 255 256
  backward : expm1_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

257
- op : fft_c2c
258 259 260
  inputs: {x: X}
  outputs: {out: Out}

261
- op : fft_c2r
262 263 264
  inputs: {x: X}
  outputs: {out: Out}

265
- op : fft_r2c
266 267 268
  inputs: {x: X}
  outputs: {out: Out}

269 270 271 272 273
- op : floor
  backward : floor_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290
- op : floor_divide (elementwise_floordiv)
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

- op : fmax (elementwise_fmax)
  backward : fmax_grad (elementwise_fmax_grad)
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

- op : fmin (elementwise_fmin)
  backward : fmin_grad (elementwise_fmin_grad)
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

291
- op : frobenius_norm
292 293 294 295
  backward : frobenius_norm_grad
  extra :
    attrs : [bool use_mkldnn = false]

296
- op : gelu
297 298 299 300
  backward : gelu_grad
  extra :
    attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32", bool use_cudnn = false]

301 302 303 304 305
- op : grad_add
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

306
- op : grid_sampler
307 308 309 310
  backward : grid_sampler_grad
  extra :
    attrs : [bool use_cudnn = true]

311
- op : gru
312 313 314 315
  backward : gru_grad
  extra :
    attrs : [bool is_test = false]

316 317 318 319 320
- op : hard_swish
  backward : hard_swish_grad
  extra :
    attrs : [bool use_mkldnn = false]

321 322 323 324 325 326
- op : heaviside (elementwise_heaviside)
  backward : heaviside_grad (elementwise_heaviside_grad)
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

327
- op : inplace_abn
328 329 330 331
  backward : inplace_abn_grad
  extra :
    attrs : [bool use_mkldnn = false, bool fuse_with_relu = false]

332
- op : layer_norm
333 334 335 336
  backward : layer_norm_grad
  extra :
    attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32", bool is_test = false]

337
- op : leaky_relu
338 339 340 341
  backward : leaky_relu_grad
  extra :
    attrs : [bool use_mkldnn = false]

342
- op : lgamma
343 344 345 346 347
  inputs :
    x : X
  outputs :
    out : Out

348
- op : linear_interp (linear_interp_v2)
349 350 351 352
  backward : linear_interp_grad (linear_interp_v2_grad)
  extra :
    attrs : [bool use_mkldnn = false]

353
- op : log
354 355 356 357
  backward : log_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

358
- op : log10
359 360 361 362
  backward : log10_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

363
- op : log1p
364 365 366 367
  backward : log1p_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

368
- op : log2
369 370 371 372
  backward : log2_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

373
- op : log_softmax
374 375 376 377
  backward : log_softmax_grad
  extra :
    attrs : [bool use_mkldnn = false]

378
- op : logsigmoid
379 380 381 382
  backward : logsigmoid_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

383
- op : lrn
384 385 386 387
  backward : lrn_grad
  extra :
    attrs : [bool use_mkldnn = false, bool is_test = false]

388
- op : matmul (matmul_v2)
389 390 391 392
  backward : matmul_grad (matmul_v2_grad)
  extra :
    attrs : [bool use_mkldnn = false, 'int[] fused_reshape_Out = {}', 'int[] fused_transpose_Out = {}',
             str mkldnn_data_type = "float32", 'int[] fused_reshape_X = {}', 'int[] fused_reshape_Y = {}',
393
             'int[] fused_transpose_X = {}', 'int[] fused_transpose_Y = {}']
394

395 396 397 398 399 400 401 402 403 404 405 406
- op : maximum (elementwise_max)
  backward : maximum_grad (elementwise_max_grad)
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

- op : maximum (elementwise_min)
  backward : maximum_grad (elementwise_min_grad)
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

407 408 409 410 411
- op : mish
  backward : mish_grad
  extra :
    attrs : [bool use_mkldnn = false]

412 413 414 415 416 417
- op : multiply (elementwise_mul)
  backward : multiply_grad (elementwise_mul_grad)
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

418
- op : mv
419 420 421 422 423
  inputs :
    {x : X, vec : Vec}
  outputs :
    out : Out

424
- op : nearest_interp (nearest_interp_v2)
425 426 427 428
  backward : nearest_interp_grad (nearest_interp_v2_grad)
  extra :
    attrs : [bool use_mkldnn = false]

429
- op : pad2d
430 431 432 433
  backward : pad2d_grad
  extra :
    attrs : [bool use_mkldnn = false]

434
- op : pad3d
435 436 437 438
  backward : pad3d_grad
  extra :
    attrs : [bool use_mkldnn = false]

439
- op : partial_sum
440 441 442 443
  backward : partial_sum_grad
  extra :
    attrs : [bool use_mkldnn = false]

444
- op : poisson
445 446 447 448 449
  inputs :
    x : X
  outputs :
    out : Out

450
- op : prelu
451 452 453 454
  backward : prelu_grad
  extra :
    attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32", bool is_test = false]

455
- op : reciprocal
456 457 458 459
  backward : reciprocal_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

460
- op : reduce_all
461 462 463
  extra :
    attrs : [bool use_mkldnn = false]

464
- op : reduce_amax
465 466 467 468
  backward : reduce_amax_grad
  extra :
    attrs : [bool use_mkldnn = false]

469
- op : reduce_amin
470 471 472 473
  backward : reduce_amin_grad
  extra :
    attrs : [bool use_mkldnn = false]

474
- op : reduce_any
475 476 477
  extra :
    attrs : [bool use_mkldnn = false]

478
- op : reduce_max
479 480 481 482
  backward : reduce_max_grad
  extra :
    attrs : [bool use_mkldnn = false]

483
- op : reduce_mean
484 485 486 487
  backward : reduce_mean_grad
  extra :
    attrs : [bool use_mkldnn = false]

488
- op : reduce_min
489 490 491 492
  backward : reduce_min_grad
  extra :
    attrs : [bool use_mkldnn = false]

493
- op : reduce_prod
494 495 496 497
  backward : reduce_prod_grad
  extra :
    attrs : [bool use_mkldnn = false]

498
- op : reduce_sum
499 500 501 502
  backward : reduce_sum_grad
  extra :
    attrs : [bool use_mkldnn = false]

503
- op : relu
504 505 506 507
  backward : relu_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

508
- op : relu6
509 510 511 512
  backward : relu6_grad
  extra :
    attrs : [bool use_mkldnn = false]

513 514 515 516 517
- op : remainder (elementwise_mod)
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

518
- op : renorm
519 520 521 522
  backward : renorm_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

523 524 525 526 527 528
- op : rnn
  backward : rnn_grad
  extra :
    attrs : [bool is_test = false]

- op : round
529
  backward : round_grad
530
  extra :
531 532
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

533
- op : rsqrt
534 535 536
  backward : rsqrt_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]
537

538 539 540 541
- op : scale
  extra :
    attrs : [bool use_mkldnn = false]

542
- op : seed
543 544 545
  extra :
    attrs : [bool deterministic = false, str rng_name = "", bool force_cpu = false]

546
- op : shape
547 548 549
  extra :
    attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32"]

550
- op : shuffle_channel
551 552 553 554
  backward : shuffle_channel_grad
  extra :
    attrs : [bool use_mkldnn = false]

555
- op : sigmoid
556 557 558 559
  backward : sigmoid_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

560
- op : silu
561 562 563 564
  backward : silu_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

565
- op : sin
566 567 568 569
  backward : sin_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

570
- op : sinh
571 572 573 574
  backward : sinh_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

575
- op : slice
576 577 578 579
  backward : slice_grad
  extra :
    attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32"]

580
- op : softmax
581 582 583
  backward : softmax_grad
  extra :
    attrs : [bool use_cudnn = false, bool use_mkldnn = false, str mkldnn_data_type = "float32", bool is_test = false]
584

585
- op : softplus
586
  backward : softplus_grad
587
  extra :
588 589 590
    attrs : [bool use_mkldnn = false, bool use_cudnn = false, str fuse_activation_type = "", float fuse_activation_alpha = 0.0f,
             float fuse_activation_beta = 0.0f, float fuse_activation_scale = 1.0f]

591
- op : softsign
592 593 594
  backward : softsign_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]
595

596
- op : solve
597 598 599 600 601
  inputs :
    {x : X, y : Y}
  outputs :
    out : Out

602
- op : sqrt
603 604 605 606
  backward : sqrt_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

607
- op : square
608 609 610 611
  backward : square_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

612
- op : squeeze (squeeze2)
613 614 615 616
  backward : squeeze_grad (squeeze2_grad)
  extra :
    attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32"]

617
- op : stack
618 619 620 621
  backward : stack_grad
  extra :
    attrs : [bool use_mkldnn = false]

622 623 624 625 626 627
- op : subtract (elementwise_sub)
  backward : subtract_grad (elementwise_sub_grad)
  extra :
    attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
             bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]

628
- op : swish
629 630 631 632
  backward : swish_grad
  extra :
    attrs : [bool use_mkldnn = false]

633
- op : sync_batch_norm
634 635 636 637
  backward : sync_batch_norm_grad
  extra :
    attrs : [bool use_mkldnn = false, bool fuse_with_relu = false]

638
- op : tan
639 640 641 642
  backward : tan_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

643
- op : tanh
644 645 646 647
  backward : tanh_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

648
- op : tanh_shrink
649 650 651 652
  backward : tanh_shrink_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

653
- op : trace
654 655 656 657
  inputs :
    x : Input
  outputs :
    out : Out
658

659
- op : trilinear_interp (trilinear_interp_v2)
660 661 662 663
  backward : trilinear_interp_grad (trilinear_interp_v2_grad)
  extra :
    attrs : [bool use_mkldnn = false]

664
- op : trunc
665 666 667 668
  inputs :
    x : X
  outputs :
    out : Out
669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711

- op : expand (expand_v2)
  backward : expand_grad (expand_v2_grad)
  extra :
    attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32"]

- op : full (fill_constant)
  extra :
    attrs : [bool use_mkldnn = false]

- op : gather
  backward : gather_grad
  extra :
    attrs : [bool overwrite = true]

- op : matmul_with_flatten (mul)
  backward : matmul_with_flatten_grad (mul_grad)
  extra :
    attrs : [bool use_mkldnn = false, float scale_x = 1.0f, 'float[] scale_y = {1.0f}',
             float scale_out = 1.0f, bool force_fp32_output = false]

- op : pool2d
  backward : pool2d_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_quantizer = false,
              str mkldnn_data_type = "float32", bool is_test = false]

- op : pool3d
  backward : pool3d_grad
  extra :
    attrs : [bool use_mkldnn = false]

- op : stack
  backward : stack_grad
  extra :
    attrs : [bool use_mkldnn = false]


- op : transpose (transpose2)
  backward : transpose_grad (transpose2_grad)
  extra :
    attrs : [bool use_mkldnn = false, str data_format = "AnyLayout", bool use_quantizer = false,
              str mkldnn_data_type = "float32"]