op_compat.yaml 14.4 KB
Newer Older
1 2 3 4 5
- api : abs
  backward : abs_grad
  extra :
    attrs : [bool use_cudnn = false, bool use_mkldnn = false]

6 7 8 9 10
- api : acosh
  backward : acosh_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

11 12 13 14 15 16 17 18 19 20 21 22 23 24
- api : addmm
  backward : addmm_grad
  extra :
    attrs : [bool use_mkldnn = false]

- api : affine_grid
  backward : affine_grid_grad
  extra :
    attrs : [bool use_cudnn = true]

- api : angle
  backward : angle_grad
  extra :
    attrs : [bool use_cudnn = false, bool use_mkldnn = false]
25

26 27 28 29 30
- api : asinh
  backward : asinh_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

31 32
- api : atan2
  inputs :
33
    {x : X1, y : X2}
34 35 36
  outputs :
    out : Out

37 38 39 40 41
- api : atanh
  backward : atanh_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

42 43 44 45 46
- api : batch_norm
  backward : batch_norm_grad
  extra :
    attrs : [bool use_mkldnn = false, bool fuse_with_relu = false]

47 48 49 50 51 52
- api : bernoulli
  inputs :
    x : X
  outputs :
    out : Out

53 54 55 56 57 58 59 60 61 62
- api : bicubic_interp (bicubic_interp_v2)
  backward : bicubic_interp_grad (bicubic_interp_v2_grad)
  extra :
    attrs : [bool use_mkldnn = false]

- api : bilinear_interp (bilinear_interp_v2)
  backward : bilinear_interp_grad (bilinear_interp_v2_grad)
  extra :
    attrs : [bool use_mkldnn = false]

63 64 65 66 67
- api : ceil
  backward : ceil_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

68 69 70 71 72 73 74 75 76 77 78 79
- api : cholesky
  inputs :
    x : X
  outputs :
    out : Out

- api : cholesky_solve
  inputs :
    {x : X, y : Y}
  outputs :
    out : Out

80 81 82 83 84 85 86 87 88 89
- api : clip
  backward : clip_grad
  extra :
    attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32"]

- api : concat
  backward : concat_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_quantizer = false, str mkldnn_data_type = "float32"]

90
- api : conv2d
91
  backward : conv2d_grad
92
  extra :
93
    attrs : [bool is_test = false, bool use_cudnn = true, bool fuse_relu_before_depthwise_conv = false, bool use_mkldnn = false,
94
             bool use_quantizer = false, str mkldnn_data_type = "float32", bool fuse_relu = false,
95
             str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f, bool use_addto = false,
96 97
             bool fuse_residual_connection = false, float Scale_in = 1.0f, float Scale_out = 1.0f,
             float Scale_in_eltwise = 1.0f, 'float[] Scale_weights = {1.0f}', bool force_fp32_output = false,
98
             int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB(), bool exhaustive_search = false]
99

100
- api : conv2d_fusion
F
Feiyu Chan 已提交
101
  extra :
102
    attrs : [bool is_test = false, bool use_cudnn = false, bool fuse_relu_before_depthwise_conv = false, bool use_mkldnn = false,
F
Feiyu Chan 已提交
103
             bool use_quantizer = false, str mkldnn_data_type = "float32", bool fuse_relu = false,
104
             str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f, bool use_addto = false,
F
Feiyu Chan 已提交
105 106
             bool fuse_residual_connection = false, float Scale_in = 1.0f, float Scale_out = 1.0f,
             float Scale_in_eltwise = 1.0f, 'float[] Scale_weights = {1.0f}', bool force_fp32_output = false,
107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128
             int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB(), bool exhaustive_search = false]

- api : conv2d_transpose
  backward : conv2d_transpose_grad
  extra :
    attrs : [bool is_test = false, bool use_cudnn = true, bool use_mkldnn = false, bool force_fp32_output = false,
             str mkldnn_data_type = "float32", bool fuse_relu = false,
             str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f,
             int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB()]

- api : conv3d
  backward : conv3d_grad
  extra :
    attrs : [bool is_test = false, bool use_cudnn = true, bool use_mkldnn = false, str mkldnn_data_type = "float32", bool fuse_relu = false,
             str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f,
             bool use_addto = false, bool fuse_residual_connection = false, bool force_fp32_output = false,
             int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB(), bool exhaustive_search = false]

- api : conv3d_transpose
  backward : conv3d_transpose_grad
  extra :
    attrs : [bool use_cudnn = true, bool use_mkldnn = false, int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB()]
F
Feiyu Chan 已提交
129

130 131 132 133 134 135 136 137 138 139
- api : cos
  backward : cos_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

- api : cosh
  backward : cosh_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

140
- api : cross
141 142
  inputs :
    {x : X, y : Y}
143 144 145 146 147
  attrs :
    axis : dim
  outputs :
    out : Out

148 149 150 151 152
- api : data_norm
  backward : data_norm_grad
  extra :
    attrs : [bool use_mkldnn = false]

153 154 155
- api : depthwise_conv2d
  backward : depthwise_conv2d_grad
  extra :
156
    attrs : [bool is_test = false, bool fuse_relu_before_depthwise_conv = false, bool use_mkldnn = false,
157 158 159 160
             bool use_quantizer = false, str mkldnn_data_type = "float32", bool fuse_relu = false,
             str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f, bool use_addto = false,
             bool fuse_residual_connection = false, float Scale_in = 1.0f, float Scale_out = 1.0f,
             float Scale_in_eltwise = 1.0f, 'float[] Scale_weights = {1.0f}', bool force_fp32_output = false,
161 162 163 164 165 166 167 168 169
             int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB(), bool exhaustive_search = false]

- api : depthwise_conv2d_transpose
  backward : depthwise_conv2d_transpose_grad
  extra :
    attrs : [bool is_test = false, bool use_cudnn = false, bool use_mkldnn = false, bool force_fp32_output = false,
             str mkldnn_data_type = "float32", bool fuse_relu = false,
             str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f,
             int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB()]
170

171 172
- api : diag (diag_v2)
  backward : diag_grad (diag_v2_grad)
173 174 175 176 177
  inputs :
    x : X
  outputs :
    out : Out

178 179 180 181 182 183
- api : diagonal
  inputs :
    x : Input
  outputs :
    out : Out

184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201
- api : digamma
  inputs :
    x : X
  outputs :
    out : Out

- api : dist
  inputs :
    {x : X, y : Y}
  outputs :
    out : Out

- api : dot
  inputs :
    {x : X, y : Y}
  outputs :
    out : Out

202 203 204 205 206 207 208 209 210 211
- api : dropout
  backward : dropout_grad
  extra :
    attrs : [bool fix_seed = false, int seed = 0]

- api : dropout_nd
  backward : dropout_nd_grad
  extra :
    attrs : [bool fix_seed = false, int seed = 0]

212 213 214 215 216
- api : elu
  backward : elu_grad
  extra :
    attrs : [bool use_mkldnn = false]

217 218 219 220 221 222
- api : erf
  inputs :
    x : X
  outputs :
    out : Out

223 224 225 226 227 228
- api : erfinv
  inputs :
    x : X
  outputs :
    out : Out

229 230 231 232 233 234 235 236 237 238
- api : exp
  backward : exp_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

- api : expm1
  backward : expm1_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

239 240 241 242 243 244 245 246 247 248 249 250
- api : fft_c2c
  inputs: {x: X}
  outputs: {out: Out}

- api : fft_c2r
  inputs: {x: X}
  outputs: {out: Out}

- api : fft_r2c
  inputs: {x: X}
  outputs: {out: Out}

251 252 253 254 255
- api : floor
  backward : floor_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

256 257 258 259 260
- api : frobenius_norm
  backward : frobenius_norm_grad
  extra :
    attrs : [bool use_mkldnn = false]

261 262 263 264 265 266 267 268 269 270 271 272 273 274 275
- api : gelu
  backward : gelu_grad
  extra :
    attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32", bool use_cudnn = false]

- api : grid_sampler
  backward : grid_sampler_grad
  extra :
    attrs : [bool use_cudnn = true]

- api : gru
  backward : gru_grad
  extra :
    attrs : [bool is_test = false]

276 277 278 279 280
- api : hard_swish
  backward : hard_swish_grad
  extra :
    attrs : [bool use_mkldnn = false]

281 282 283 284 285
- api : inplace_abn
  backward : inplace_abn_grad
  extra :
    attrs : [bool use_mkldnn = false, bool fuse_with_relu = false]

286 287 288 289 290
- api : layer_norm
  backward : layer_norm_grad
  extra :
    attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32", bool is_test = false]

291 292 293 294 295
- api : leaky_relu
  backward : leaky_relu_grad
  extra :
    attrs : [bool use_mkldnn = false]

296 297 298 299 300 301
- api : lgamma
  inputs :
    x : X
  outputs :
    out : Out

302 303 304 305 306
- api : linear_interp (linear_interp_v2)
  backward : linear_interp_grad (linear_interp_v2_grad)
  extra :
    attrs : [bool use_mkldnn = false]

307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326
- api : log
  backward : log_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

- api : log10
  backward : log10_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

- api : log1p
  backward : log1p_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

- api : log2
  backward : log2_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

327 328 329 330 331
- api : log_softmax
  backward : log_softmax_grad
  extra :
    attrs : [bool use_mkldnn = false]

332 333 334 335 336
- api : logsigmoid
  backward : logsigmoid_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

337 338 339 340 341
- api : lrn
  backward : lrn_grad
  extra :
    attrs : [bool use_mkldnn = false, bool is_test = false]

342 343 344 345 346 347 348
- api : matmul (matmul_v2)
  backward : matmul_grad (matmul_v2_grad)
  extra :
    attrs : [bool use_mkldnn = false, 'int[] fused_reshape_Out = {}', 'int[] fused_transpose_Out = {}',
             str mkldnn_data_type = "float32", 'int[] fused_reshape_X = {}', 'int[] fused_reshape_Y = {}',
             'int[] fused_transpose_X = {}', 'int[] fused_transpose_Y = {}',]

349 350 351 352 353
- api : mish
  backward : mish_grad
  extra :
    attrs : [bool use_mkldnn = false]

354 355 356 357 358 359
- api : mv
  inputs :
    {x : X, vec : Vec}
  outputs :
    out : Out

360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379
- api : nearest_interp (nearest_interp_v2)
  backward : nearest_interp_grad (nearest_interp_v2_grad)
  extra :
    attrs : [bool use_mkldnn = false]

- api : pad2d
  backward : pad2d_grad
  extra :
    attrs : [bool use_mkldnn = false]

- api : pad3d
  backward : pad3d_grad
  extra :
    attrs : [bool use_mkldnn = false]

- api : partial_sum
  backward : partial_sum_grad
  extra :
    attrs : [bool use_mkldnn = false]

380 381 382 383 384 385
- api : poisson
  inputs :
    x : X
  outputs :
    out : Out

386 387 388 389 390 391 392 393 394 395 396 397 398 399 400
- api : prelu
  backward : prelu_grad
  extra :
    attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32", bool is_test = false]

- api : prelu
  backward : prelu_grad
  extra :
    attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32", bool is_test = false]

- api : reciprocal
  backward : reciprocal_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443
- api : reduce_all
  extra :
    attrs : [bool use_mkldnn = false]

- api : reduce_amax
  backward : reduce_amax_grad
  extra :
    attrs : [bool use_mkldnn = false]

- api : reduce_amin
  backward : reduce_amin_grad
  extra :
    attrs : [bool use_mkldnn = false]

- api : reduce_any
  extra :
    attrs : [bool use_mkldnn = false]

- api : reduce_max
  backward : reduce_max_grad
  extra :
    attrs : [bool use_mkldnn = false]

- api : reduce_mean
  backward : reduce_mean_grad
  extra :
    attrs : [bool use_mkldnn = false]

- api : reduce_min
  backward : reduce_min_grad
  extra :
    attrs : [bool use_mkldnn = false]

- api : reduce_prod
  backward : reduce_prod_grad
  extra :
    attrs : [bool use_mkldnn = false]

- api : reduce_sum
  backward : reduce_sum_grad
  extra :
    attrs : [bool use_mkldnn = false]

444 445 446 447 448 449 450 451 452 453
- api : relu
  backward : relu_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

- api : relu6
  backward : relu6_grad
  extra :
    attrs : [bool use_mkldnn = false]

454 455 456 457 458
- api : renorm
  backward : renorm_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

459 460
- api : round
  backward : round_grad
461
  extra :
462 463 464 465 466 467
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

- api : rsqrt
  backward : rsqrt_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]
468 469 470 471 472 473 474 475 476 477 478 479 480 481

- api : seed
  extra :
    attrs : [bool deterministic = false, str rng_name = "", bool force_cpu = false]

- api : shape
  extra :
    attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32"]

- api : shuffle_channel
  backward : shuffle_channel_grad
  extra :
    attrs : [bool use_mkldnn = false]

482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501
- api : sigmoid
  backward : sigmoid_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

- api : silu
  backward : silu_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

- api : sin
  backward : sin_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

- api : sinh
  backward : sinh_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

502 503 504 505 506 507 508 509 510
- api : slice
  backward : slice_grad
  extra :
    attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32"]

- api : softmax
  backward : softmax_grad
  extra :
    attrs : [bool use_cudnn = false, bool use_mkldnn = false, str mkldnn_data_type = "float32", bool is_test = false]
511 512 513

- api : softplus
  backward : softplus_grad
514
  extra :
515 516 517 518 519 520 521 522 523 524 525
    attrs : [bool use_mkldnn = false, bool use_cudnn = false, str fuse_activation_type = "", float fuse_activation_alpha = 0.0f,
             float fuse_activation_beta = 0.0f, float fuse_activation_scale = 1.0f]

- api : softsign
  backward : softsign_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]
- api : rnn
  backward : rnn_grad
  extra :
    attrs : [bool is_test = false]
526

527 528 529 530 531 532
- api : solve
  inputs :
    {x : X, y : Y}
  outputs :
    out : Out

533 534 535 536 537 538 539 540 541 542
- api : sqrt
  backward : sqrt_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

- api : square
  backward : square_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

543 544 545 546 547
- api : squeeze (squeeze2)
  backward : squeeze_grad (squeeze2_grad)
  extra :
    attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32"]

548 549 550 551 552
- api : stack
  backward : stack_grad
  extra :
    attrs : [bool use_mkldnn = false]

553 554 555 556 557
- api : swish
  backward : swish_grad
  extra :
    attrs : [bool use_mkldnn = false]

558 559 560 561 562
- api : sync_batch_norm
  backward : sync_batch_norm_grad
  extra :
    attrs : [bool use_mkldnn = false, bool fuse_with_relu = false]

563 564 565 566 567 568 569 570 571 572 573 574 575 576 577
- api : tan
  backward : tan_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

- api : tanh
  backward : tanh_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

- api : tanh_shrink
  backward : tanh_shrink_grad
  extra :
    attrs : [bool use_mkldnn = false, bool use_cudnn = false]

578 579 580 581 582
- api : trace
  inputs :
    x : Input
  outputs :
    out : Out
583

584 585 586 587 588
- api : trilinear_interp (trilinear_interp_v2)
  backward : trilinear_interp_grad (trilinear_interp_v2_grad)
  extra :
    attrs : [bool use_mkldnn = false]

589 590 591 592 593
- api : trunc
  inputs :
    x : X
  outputs :
    out : Out