op_version.yaml 15.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
- op : adam_
  version :
    - checkpoint : Upgrade adam add 1 attribute [multi_precision].
      action :
        - add_attr : multi_precision
          comment : (bool) Whether to use multi-precision during weight updating.
          default : "false"
    - checkpoint : Upgrade adam, add 1 dispensable input [EpsilonTensor].
      action :
        - add_input : EpsilonTensor
          comment : If provided, Adam will use this as epsilon, this has a higher priority than attr(epsilon). For better performance in npu kernel.
    - checkpoint : Upgrade adam, add 1 attribute [use_global_beta_pow].
      action :
        - add_attr : use_global_beta_pow
          comment : If true, Adam will use global beta_pow for whole model instead of creating beta_pow for each parameter. In that case, the outputs(Beta1PowOut, Beta2PowOut) will not be used in adam op, and beta_pow will be updated after all adam op in the model.
          default : "false"
    - checkpoint : Upgrade adam, add 1 dispensable input [SkipUpdate].
      action :
        - add_input : SkipUpdate
          comment : If the value is true, Adam will skip the update.

22 23 24 25 26 27 28 29
- op : affine_grid
  version :
    - checkpoint : Compatible upgrade of affine_grid, add a new attribute [align_corners].
      action :
        - add_attr : align_corners
          comment : Whether to align the corners of input and output.
          default : "true"

30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58
- op : allclose
  version :
    - checkpoint : Upgrade allclose, add two new inputs [Rtol] and [Atol].
      action:
        - add_input : Rtol
          comment : The added input 'Rtol' is not dispensable.
        - add_input : Atol
          comment : The added input 'Atol' is not dispensable.
    - checkpoint : Delete two float attributes [rtol] and [atol],
        then add 2 string attributes [atol, rtol]. Don't be surprised.
        This is because float cannot represent hight-precision
        floating-point values, and our framework doesn't support
        the use of double attributes. As a result, string instead
        of double is used here to represent high-precision
        floating-point values.
      action :
        - add_attr : rtol
          comment : The relative tolerance. Default::math:`1e-5` .
          default : std::string("1e-5")
        - delete_attr : rtol
          comment : The attribute 'rtol' is deleted. The reason why it is deleted is that
                    attributes do not support a float64 value and it is changed to a tensor.
        - add_attr : atol
          comment : (string) The absolute tolerance. Default::math:`1e-8` .
          default : std::string("1e-5")
        - delete_attr : atol
          comment : The attribute 'atol' is deleted. The reason why it is deleted is that
                    attributes do not support a float64 value and it is changed to a tensor.

59 60 61 62 63 64 65
- op : auc
  version :
    - checkpoint :  Upgrade auc, add a new input [InsTagWeight].
      action :
        - add_input : ValueTensor
          comment : In order to support multi-tag task.

66 67 68 69 70 71 72 73 74
- op : clip
  version :
    - checkpoint :  Upgrade clip add a new input [Min]
      action :
        - add_input : Min
          comment : Pass the mix, min value as input, not attribute. Min is dispensable.
        - add_input : Max
          comment : Pass the mix, min value as input, not attribute. Max is dispensable.

75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92
- op : coalesce_tensor
  version :
    - checkpoint : "Upgrade coalesce_tensor: add a new attribute [use_align]."
      action :
        - add_attr : use_align
          comment : In order to optionally take memory alignment into account when
            coalescing tensors. The default value is true to be compatible
            with before.
          default : "true"
    - checkpoint : "Upgrade coalesce_tensor: add a new attribute [align_size]."
      action :
        - add_attr : align_size
          comment : In order to optionally take memory alignment into account when
            coalescing tensors. The default value is -1 and use the default
            align_size
            of each place to be compatible with before.
          default : -1

93 94 95 96 97 98 99 100 101
- op : conv2d
  version :
    - checkpoint : Upgrade conv2d, add a new attribute [use_addto].
      action :
        - add_attr : use_addto
          comment : In order to support new feature (inplace addto strategy) for
            gradient accumulation.
          default : "false"

102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117
- op : conv2d_transpose
  version :
    - checkpoint : Upgrade convtranspose add a new attribute [output_padding].
      action :
        - add_attr : output_padding
          comment : In order to add additional size to one side of each dimension in the output.
          default : "std::vector<int>{}"
    - checkpoint : Upgrade conv2d transpose to add a new attributes [force_fp32_output, mkldnn_data_type].
      action :
        - add_attr : force_fp32_output
          comment : Force BF16 kernel output FP32, only used in MKL-DNN BF16.
          default : "false"
        - add_attr : mkldnn_data_type
          comment : Data type of mkldnn kernel.
          default : "\"float32\""

118 119 120 121 122 123 124 125 126
- op : conv3d
  version :
    - checkpoint : Upgrade conv3d, add a new attribute [use_addto].
      action :
        - add_attr : use_addto
          comment : In order to support new feature (inplace addto strategy) for
            gradient accumulation.
          default : "false"

127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142
- op : conv3d_transpose
  version :
    - checkpoint : Upgrade convtranspose add a new attribute [output_padding].
      action :
        - add_attr : output_padding
          comment : In order to add additional size to one side of each dimension in the output.
          default : "std::vector<int>{}"

- op : conv_transpose
  version :
    - checkpoint : Upgrade convtranspose add a new attribute [output_padding].
      action :
        - add_attr : output_padding
          comment : In order to add additional size to one side of each dimension in the output.
          default : "std::vector<int>{}"

143 144 145 146 147 148 149 150 151
- op : depthwise_conv2d
  version :
    - checkpoint : Upgrade depthwise_conv2d, add a new attribute [use_addto].
      action :
        - add_attr : use_addto
          comment : In order to support new feature (inplace addto strategy) for
            gradient accumulation.
          default : "false"

152 153 154 155 156 157 158 159
- op : depthwise_conv2d_transpose
  version :
    - checkpoint : Upgrade convtranspose add a new attribute [output_padding].
      action :
        - add_attr : output_padding
          comment : In order to add additional size to one side of each dimension in the output.
          default : "std::vector<int>{}"

160 161 162 163 164 165 166 167
- op : elementwise_max
  version :
    - checkpoint : Register elementwise_max for adding the attribute of Scale_y.
      action :
        - add_attr : Scale_y
          comment : In order to support the function of scaling the input Y when using the operator of elementwise_max.
          default : 1.0

168 169 170 171 172 173 174
- op : embedding
  version :
    - checkpoint : Upgrade flip, add new attr [axis] and delete attr [dims]
      action :
        - fix_bug : fix_bug
          comment : lookup_table_v2 support input type `int64`; after support input type `int32/int64`

175 176 177 178 179 180 181 182
- op : equal
  version :
    - checkpoint : Upgrade compare ops, add a new attribute [force_cpu]
      action :
        - modify_attr : force_cpu
          comment : In order to force fill output variable to gpu memory.
          default : "false"

183 184 185 186 187 188 189
- op : expand_as_v2
  version :
    - checkpoint : fix expand_as_v2 and add new input [Y].
      action :
        - add_input : Y
          comment : Expand X according to the shape of Y.

190 191 192 193 194 195 196 197 198 199
- op : flip
  version :
    - checkpoint : Upgrade flip, add new attr [axis] and delete attr [dims]
      action :
        - add_attr : axis
          comment : The added attr 'axis' doesn't set default value
          default : paddle::none
        - delete_attr : dims
          comment : The attr 'dims' is deleted.

200 201 202 203 204 205 206 207 208 209 210 211 212
- op : gaussian_random
  version :
    - checkpoint : Upgrade gaussian_random add new inputs [ShapeTensor] and [ShapeTensorList]
               and modify the attribute of [shape]
      action :
        - add_input : ShapeTensor
          comment : The output shape supports Tensor type. ShapeTensor is dispensable.
        - add_input : ShapeTensorList
          comment : The output shape supports list filled with Tensor. ShapeTensorList is dispensable.
        - modify_attr : shape
          comment : "The arg 'default_value' of attr 'shape' is changed: from 'None' to '{}'."
          default : std::vector<int64_t>{}

213 214 215 216 217 218 219 220
- op : generate_proposals
  version :
    - checkpoint : Registe generate_proposals_v2 for adding the attribute of pixel_offset
      action :
        - add_attr : pixel_offset
          comment : If true, im_shape pixel offset is 1.
          default : "true"

221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236
- op : greater_equal
  version :
    - checkpoint : Upgrade compare ops, add a new attribute [force_cpu]
      action :
        - modify_attr : force_cpu
          comment : In order to force fill output variable to gpu memory.
          default : "false"

- op : greater_than
  version :
    - checkpoint : Upgrade compare ops, add a new attribute [force_cpu]
      action :
        - modify_attr : force_cpu
          comment : In order to force fill output variable to gpu memory.
          default : "false"

237 238 239 240 241 242 243 244
- op : grid_sample
  version :
    - checkpoint : Upgrade grid_sampler add a new attribute [mode]
      action :
        - add_attr : mode
          comment : In order to specify interpolation mode
          default : std::string("bilinear")

245 246 247 248 249 250 251 252 253 254 255
- op : instance_norm
  version :
    - checkpoint : Change dispensable of attribute from False to True in instance_norm.
      action :
        - modify_attr : Bias
          comment : "The arg 'dispensable' of Input 'Bias' is changed: from 'False' to 'True'."
          default : "true"
        - modify_attr : Scale
          comment : "The arg 'dispensable' of Input 'Scale' is changed: from 'False' to 'True'."
          default : "true"

256 257 258 259 260 261 262 263 264
- op : lamb
  version :
    - checkpoint : Upgrade lamb, add two new outputs [Beta1PowOut] and [Beta2PowOut].
      action :
        - add_output : Beta1PowOut
          comment : The Output beta1 power accumulator. 'Beta1PowOut' is dispensable.
        - add_output : Beta2PowOut
          comment : The Output beta2 power accumulator. 'Beta2PowOut' is dispensable.

265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280
- op : less_equal
  version :
    - checkpoint : Upgrade compare ops, add a new attribute [force_cpu]
      action :
        - modify_attr : force_cpu
          comment : In order to force fill output variable to gpu memory.
          default : "false"

- op : less_than
  version :
    - checkpoint : Upgrade compare ops, add a new attribute [force_cpu]
      action :
        - modify_attr : force_cpu
          comment : In order to force fill output variable to gpu memory.
          default : "false"

281 282 283 284 285 286 287 288
- op : linspace
  version :
    - checkpoint : Upgrade linspace to add a new attribute [dtype]
      action :
        - add_attr : dtype
          comment : In order to change output data type
          default : 5

289 290 291 292 293 294 295
- op : lstsq
  version :
    - checkpoint : Upgrade lstsq, add 1 outputs [Residuals].
      action :
        - add_output : Residuals
          comment : Output tensor of lstsq operator, meaning the squared residuals of the calculated solutions.

296 297 298 299 300 301 302
- op : matrix_nms
  version :
    - checkpoint : Upgrade matrix_nms, add a new output [RoisNum].
      action :
        - add_output : RoisNum
          comment : The number of RoIs in each image.

303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323
- op : momentum
  version :
    - checkpoint : Upgrade momentum add 4 attributes [regularization_method, regularization_coeff, multi_precision, rescale_grad].
      action :
        - add_input : MasterParam
          comment : FP32 master weight for AMP.
        - add_output : MasterParamOut
          comment : The updated FP32 master weight for AMP. It shared memory with Input(MasterParam).
        - add_attr : regularization_method
          comment : (string) regularization_method, right now only support l2decay or none
          default : std::string("")
        - add_attr : regularization_coeff
          comment : (float) regularization_coeff
          default : 0.0
        - add_attr : multi_precision
          comment : (bool) Whether to use multi-precision during weight updating.
          default : "false"
        - add_attr : rescale_grad
          comment : (float) Multiply the gradient with `rescale_grad` before updating. Often choose to be `1.0/batch_size`.
          default : 1.0

324 325 326 327 328 329 330 331
- op : not_equal
  version :
    - checkpoint : Upgrade compare ops, add a new attribute [force_cpu]
      action :
        - modify_attr : force_cpu
          comment : In order to force fill output variable to gpu memory.
          default : "false"

332 333 334 335 336 337 338 339
- op : p_norm
  version :
    - checkpoint : Upgrade p_norm, add 1 attribute [asvector].
      action :
        - add_attr : asvector
          comment : Compute as vector when axis is None and input is matrix.
          default : "false"

340 341 342 343
- op : pixel_shuffle
  version :
    - checkpoint : Compatible upgrade of pixel_shuffle, add a new attribute [data_format]
      action :
344
        - add_attr : data_format
345 346 347
          comment : Specify the data format of the input data
          default : "true"

348 349 350 351 352 353 354 355 356 357 358 359 360 361
- op : roll
  version :
    - checkpoint : Upgrade roll add 1 attribute [axis], delete 1 attribute[dims].
      action :
        - add_attr : axis
          comment : Axis along which to roll. It must have the same size with shifts, or size = 0.
          default : std::vector<float>()
        - delete_attr : dims
          comment : Dims along which to roll. It must have the same size with shifts, or size = 0
    - checkpoint : Upgrade roll add a dispensable input "ShiftsTensor"
      action :
        - add_input : ShiftsTensor
          comment : The number of places by which the elements of the tensor are shifted.

362 363 364 365 366 367 368 369
- op : softmax_with_cross_entropy
  version :
    - checkpoint : Add a new attribute [use_softmax]
      action :
        - add_attr : use_softmax
          comment : A flag to indicate whether to do softmax
          default : "true"

370
- op : trace
371 372 373 374 375 376
  version :
    - checkpoint : Upgrade trace add a new attribute [axis2]
      action :
        - add_attr : axis1
          comment : The added attribute 'axis1' is not yet registered.
          default : std::vector<float>{0.0f}
377
        - add_attr : axis2
378 379 380 381 382 383
          comment : The added attribute 'axis2' is not yet registered.
          default : std::vector<float>{1.0f}
        - delete_attr : dim1
          comment : The attribute 'dim1' is not recommend according to the specification 2.0.
        - delete_attr : dim2
          comment : The attribute 'dim2' is not recommend according to the specification 2.0.
384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399

- op : unique_consecutive
  version :
    - checkpoint : Upgrade unique_consecutive, add 2 outputs [Indices, Counts] and 3 attribute [return_inverse, return_counts, axis].
      action :
        - add_output : Counts
          comment : The counts for each unique element.
        - add_attr : return_inverse
          comment : If True, also return the indices for where elements in the original input ended up in the returned unique tensor.
          default : "false"
        - add_attr : return_counts
          comment : If True, also return the counts for each unique element.
          default : "false"
        - add_attr : axis
          comment : The axis to apply unique. If None, the input will be flattened.
          default : std::vector<int>{}
400 401 402 403 404 405 406 407 408 409 410

- op : yolo_box
  version :
    - checkpoint : Upgrade yolo box to add new attribute [iou_aware, iou_aware_factor].
      action :
        - add_attr : iou_aware
          comment : Whether use iou aware.
          default : "false"
        - add_attr : iou_aware_factor
          comment : iou aware factor.
          default : 0.5f