提交 2a6ce84a 编写于 作者: A A. Unique TensorFlower 提交者: TensorFlower Gardener

Update ops-related pbtxt files.

Change: 144331488
上级 dd9684fd
......@@ -24830,6 +24830,1318 @@ op {
}
}
}
op {
name: "ResourceApplyAdadelta"
input_arg {
name: "var"
type: DT_RESOURCE
}
input_arg {
name: "accum"
type: DT_RESOURCE
}
input_arg {
name: "accum_update"
type: DT_RESOURCE
}
input_arg {
name: "lr"
type_attr: "T"
}
input_arg {
name: "rho"
type_attr: "T"
}
input_arg {
name: "epsilon"
type_attr: "T"
}
input_arg {
name: "grad"
type_attr: "T"
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: false
}
}
}
op {
name: "ResourceApplyAdagrad"
input_arg {
name: "var"
type: DT_RESOURCE
}
input_arg {
name: "accum"
type: DT_RESOURCE
}
input_arg {
name: "lr"
type_attr: "T"
}
input_arg {
name: "grad"
type_attr: "T"
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: false
}
}
}
op {
name: "ResourceApplyAdagradDA"
input_arg {
name: "var"
type: DT_RESOURCE
}
input_arg {
name: "gradient_accumulator"
type: DT_RESOURCE
}
input_arg {
name: "gradient_squared_accumulator"
type: DT_RESOURCE
}
input_arg {
name: "grad"
type_attr: "T"
}
input_arg {
name: "lr"
type_attr: "T"
}
input_arg {
name: "l1"
type_attr: "T"
}
input_arg {
name: "l2"
type_attr: "T"
}
input_arg {
name: "global_step"
type: DT_INT64
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: false
}
}
}
op {
name: "ResourceApplyAdam"
input_arg {
name: "var"
type: DT_RESOURCE
}
input_arg {
name: "m"
type: DT_RESOURCE
}
input_arg {
name: "v"
type: DT_RESOURCE
}
input_arg {
name: "beta1_power"
type_attr: "T"
}
input_arg {
name: "beta2_power"
type_attr: "T"
}
input_arg {
name: "lr"
type_attr: "T"
}
input_arg {
name: "beta1"
type_attr: "T"
}
input_arg {
name: "beta2"
type_attr: "T"
}
input_arg {
name: "epsilon"
type_attr: "T"
}
input_arg {
name: "grad"
type_attr: "T"
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: false
}
}
}
op {
name: "ResourceApplyCenteredRMSProp"
input_arg {
name: "var"
type: DT_RESOURCE
}
input_arg {
name: "mg"
type: DT_RESOURCE
}
input_arg {
name: "ms"
type: DT_RESOURCE
}
input_arg {
name: "mom"
type: DT_RESOURCE
}
input_arg {
name: "lr"
type_attr: "T"
}
input_arg {
name: "rho"
type_attr: "T"
}
input_arg {
name: "momentum"
type_attr: "T"
}
input_arg {
name: "epsilon"
type_attr: "T"
}
input_arg {
name: "grad"
type_attr: "T"
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: false
}
}
}
op {
name: "ResourceApplyFtrl"
input_arg {
name: "var"
type: DT_RESOURCE
}
input_arg {
name: "accum"
type: DT_RESOURCE
}
input_arg {
name: "linear"
type: DT_RESOURCE
}
input_arg {
name: "grad"
type_attr: "T"
}
input_arg {
name: "lr"
type_attr: "T"
}
input_arg {
name: "l1"
type_attr: "T"
}
input_arg {
name: "l2"
type_attr: "T"
}
input_arg {
name: "lr_power"
type_attr: "T"
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: false
}
}
}
op {
name: "ResourceApplyGradientDescent"
input_arg {
name: "var"
type: DT_RESOURCE
}
input_arg {
name: "alpha"
type_attr: "T"
}
input_arg {
name: "delta"
type_attr: "T"
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: false
}
}
}
op {
name: "ResourceApplyMomentum"
input_arg {
name: "var"
type: DT_RESOURCE
}
input_arg {
name: "accum"
type: DT_RESOURCE
}
input_arg {
name: "lr"
type_attr: "T"
}
input_arg {
name: "grad"
type_attr: "T"
}
input_arg {
name: "momentum"
type_attr: "T"
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: false
}
}
attr {
name: "use_nesterov"
type: "bool"
default_value {
b: false
}
}
}
op {
name: "ResourceApplyProximalAdagrad"
input_arg {
name: "var"
type: DT_RESOURCE
}
input_arg {
name: "accum"
type: DT_RESOURCE
}
input_arg {
name: "lr"
type_attr: "T"
}
input_arg {
name: "l1"
type_attr: "T"
}
input_arg {
name: "l2"
type_attr: "T"
}
input_arg {
name: "grad"
type_attr: "T"
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: false
}
}
}
op {
name: "ResourceApplyProximalGradientDescent"
input_arg {
name: "var"
type: DT_RESOURCE
}
input_arg {
name: "alpha"
type_attr: "T"
}
input_arg {
name: "l1"
type_attr: "T"
}
input_arg {
name: "l2"
type_attr: "T"
}
input_arg {
name: "delta"
type_attr: "T"
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: false
}
}
}
op {
name: "ResourceApplyRMSProp"
input_arg {
name: "var"
type: DT_RESOURCE
}
input_arg {
name: "ms"
type: DT_RESOURCE
}
input_arg {
name: "mom"
type: DT_RESOURCE
}
input_arg {
name: "lr"
type_attr: "T"
}
input_arg {
name: "rho"
type_attr: "T"
}
input_arg {
name: "momentum"
type_attr: "T"
}
input_arg {
name: "epsilon"
type_attr: "T"
}
input_arg {
name: "grad"
type_attr: "T"
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: false
}
}
}
op {
name: "ResourceSparseApplyAdadelta"
input_arg {
name: "var"
type: DT_RESOURCE
}
input_arg {
name: "accum"
type: DT_RESOURCE
}
input_arg {
name: "accum_update"
type: DT_RESOURCE
}
input_arg {
name: "lr"
type_attr: "T"
}
input_arg {
name: "rho"
type_attr: "T"
}
input_arg {
name: "epsilon"
type_attr: "T"
}
input_arg {
name: "grad"
type_attr: "T"
}
input_arg {
name: "indices"
type_attr: "Tindices"
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "Tindices"
type: "type"
allowed_values {
list {
type: DT_INT32
type: DT_INT64
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: false
}
}
}
op {
name: "ResourceSparseApplyAdagrad"
input_arg {
name: "var"
type: DT_RESOURCE
}
input_arg {
name: "accum"
type: DT_RESOURCE
}
input_arg {
name: "lr"
type_attr: "T"
}
input_arg {
name: "grad"
type_attr: "T"
}
input_arg {
name: "indices"
type_attr: "Tindices"
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "Tindices"
type: "type"
allowed_values {
list {
type: DT_INT32
type: DT_INT64
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: false
}
}
}
op {
name: "ResourceSparseApplyAdagradDA"
input_arg {
name: "var"
type: DT_RESOURCE
}
input_arg {
name: "gradient_accumulator"
type: DT_RESOURCE
}
input_arg {
name: "gradient_squared_accumulator"
type: DT_RESOURCE
}
input_arg {
name: "grad"
type_attr: "T"
}
input_arg {
name: "indices"
type_attr: "Tindices"
}
input_arg {
name: "lr"
type_attr: "T"
}
input_arg {
name: "l1"
type_attr: "T"
}
input_arg {
name: "l2"
type_attr: "T"
}
input_arg {
name: "global_step"
type: DT_INT64
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "Tindices"
type: "type"
allowed_values {
list {
type: DT_INT32
type: DT_INT64
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: false
}
}
}
op {
name: "ResourceSparseApplyCenteredRMSProp"
input_arg {
name: "var"
type: DT_RESOURCE
}
input_arg {
name: "mg"
type: DT_RESOURCE
}
input_arg {
name: "ms"
type: DT_RESOURCE
}
input_arg {
name: "mom"
type: DT_RESOURCE
}
input_arg {
name: "lr"
type_attr: "T"
}
input_arg {
name: "rho"
type_attr: "T"
}
input_arg {
name: "momentum"
type_attr: "T"
}
input_arg {
name: "epsilon"
type_attr: "T"
}
input_arg {
name: "grad"
type_attr: "T"
}
input_arg {
name: "indices"
type_attr: "Tindices"
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "Tindices"
type: "type"
allowed_values {
list {
type: DT_INT32
type: DT_INT64
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: false
}
}
}
op {
name: "ResourceSparseApplyFtrl"
input_arg {
name: "var"
type: DT_RESOURCE
}
input_arg {
name: "accum"
type: DT_RESOURCE
}
input_arg {
name: "linear"
type: DT_RESOURCE
}
input_arg {
name: "grad"
type_attr: "T"
}
input_arg {
name: "indices"
type_attr: "Tindices"
}
input_arg {
name: "lr"
type_attr: "T"
}
input_arg {
name: "l1"
type_attr: "T"
}
input_arg {
name: "l2"
type_attr: "T"
}
input_arg {
name: "lr_power"
type_attr: "T"
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "Tindices"
type: "type"
allowed_values {
list {
type: DT_INT32
type: DT_INT64
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: false
}
}
}
op {
name: "ResourceSparseApplyMomentum"
input_arg {
name: "var"
type: DT_RESOURCE
}
input_arg {
name: "accum"
type: DT_RESOURCE
}
input_arg {
name: "lr"
type_attr: "T"
}
input_arg {
name: "grad"
type_attr: "T"
}
input_arg {
name: "indices"
type_attr: "Tindices"
}
input_arg {
name: "momentum"
type_attr: "T"
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "Tindices"
type: "type"
allowed_values {
list {
type: DT_INT32
type: DT_INT64
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: false
}
}
attr {
name: "use_nesterov"
type: "bool"
default_value {
b: false
}
}
}
op {
name: "ResourceSparseApplyProximalAdagrad"
input_arg {
name: "var"
type: DT_RESOURCE
}
input_arg {
name: "accum"
type: DT_RESOURCE
}
input_arg {
name: "lr"
type_attr: "T"
}
input_arg {
name: "l1"
type_attr: "T"
}
input_arg {
name: "l2"
type_attr: "T"
}
input_arg {
name: "grad"
type_attr: "T"
}
input_arg {
name: "indices"
type_attr: "Tindices"
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "Tindices"
type: "type"
allowed_values {
list {
type: DT_INT32
type: DT_INT64
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: false
}
}
}
op {
name: "ResourceSparseApplyProximalGradientDescent"
input_arg {
name: "var"
type: DT_RESOURCE
}
input_arg {
name: "alpha"
type_attr: "T"
}
input_arg {
name: "l1"
type_attr: "T"
}
input_arg {
name: "l2"
type_attr: "T"
}
input_arg {
name: "grad"
type_attr: "T"
}
input_arg {
name: "indices"
type_attr: "Tindices"
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "Tindices"
type: "type"
allowed_values {
list {
type: DT_INT32
type: DT_INT64
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: false
}
}
}
op {
name: "ResourceSparseApplyRMSProp"
input_arg {
name: "var"
type: DT_RESOURCE
}
input_arg {
name: "ms"
type: DT_RESOURCE
}
input_arg {
name: "mom"
type: DT_RESOURCE
}
input_arg {
name: "lr"
type_attr: "T"
}
input_arg {
name: "rho"
type_attr: "T"
}
input_arg {
name: "momentum"
type_attr: "T"
}
input_arg {
name: "epsilon"
type_attr: "T"
}
input_arg {
name: "grad"
type_attr: "T"
}
input_arg {
name: "indices"
type_attr: "Tindices"
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "Tindices"
type: "type"
allowed_values {
list {
type: DT_INT32
type: DT_INT64
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: false
}
}
}
op {
name: "Restore"
input_arg {
......
......@@ -15933,6 +15933,1513 @@ op {
}
summary: "Computes the gradient of nearest neighbor interpolation."
}
op {
name: "ResourceApplyAdadelta"
input_arg {
name: "var"
description: "Should be from a Variable()."
type: DT_RESOURCE
}
input_arg {
name: "accum"
description: "Should be from a Variable()."
type: DT_RESOURCE
}
input_arg {
name: "accum_update"
description: "Should be from a Variable()."
type: DT_RESOURCE
}
input_arg {
name: "lr"
description: "Scaling factor. Must be a scalar."
type_attr: "T"
}
input_arg {
name: "rho"
description: "Decay factor. Must be a scalar."
type_attr: "T"
}
input_arg {
name: "epsilon"
description: "Constant factor. Must be a scalar."
type_attr: "T"
}
input_arg {
name: "grad"
description: "The gradient."
type_attr: "T"
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: false
}
description: "If True, updating of the var, accum and update_accum tensors will be protected by\na lock; otherwise the behavior is undefined, but may exhibit less contention."
}
summary: "Update \'*var\' according to the adadelta scheme."
description: "accum = rho() * accum + (1 - rho()) * grad.square();\nupdate = (update_accum + epsilon).sqrt() * (accum + epsilon()).rsqrt() * grad;\nupdate_accum = rho() * update_accum + (1 - rho()) * update.square();\nvar -= update;"
}
op {
name: "ResourceApplyAdagrad"
input_arg {
name: "var"
description: "Should be from a Variable()."
type: DT_RESOURCE
}
input_arg {
name: "accum"
description: "Should be from a Variable()."
type: DT_RESOURCE
}
input_arg {
name: "lr"
description: "Scaling factor. Must be a scalar."
type_attr: "T"
}
input_arg {
name: "grad"
description: "The gradient."
type_attr: "T"
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: false
}
description: "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention."
}
summary: "Update \'*var\' according to the adagrad scheme."
description: "accum += grad * grad\nvar -= lr * grad * (1 / sqrt(accum))"
}
op {
name: "ResourceApplyAdagradDA"
input_arg {
name: "var"
description: "Should be from a Variable()."
type: DT_RESOURCE
}
input_arg {
name: "gradient_accumulator"
description: "Should be from a Variable()."
type: DT_RESOURCE
}
input_arg {
name: "gradient_squared_accumulator"
description: "Should be from a Variable()."
type: DT_RESOURCE
}
input_arg {
name: "grad"
description: "The gradient."
type_attr: "T"
}
input_arg {
name: "lr"
description: "Scaling factor. Must be a scalar."
type_attr: "T"
}
input_arg {
name: "l1"
description: "L1 regularization. Must be a scalar."
type_attr: "T"
}
input_arg {
name: "l2"
description: "L2 regularization. Must be a scalar."
type_attr: "T"
}
input_arg {
name: "global_step"
description: "Training step number. Must be a scalar."
type: DT_INT64
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: false
}
description: "If True, updating of the var and accum tensors will be protected by\na lock; otherwise the behavior is undefined, but may exhibit less contention."
}
summary: "Update \'*var\' according to the proximal adagrad scheme."
}
op {
name: "ResourceApplyAdam"
input_arg {
name: "var"
description: "Should be from a Variable()."
type: DT_RESOURCE
}
input_arg {
name: "m"
description: "Should be from a Variable()."
type: DT_RESOURCE
}
input_arg {
name: "v"
description: "Should be from a Variable()."
type: DT_RESOURCE
}
input_arg {
name: "beta1_power"
description: "Must be a scalar."
type_attr: "T"
}
input_arg {
name: "beta2_power"
description: "Must be a scalar."
type_attr: "T"
}
input_arg {
name: "lr"
description: "Scaling factor. Must be a scalar."
type_attr: "T"
}
input_arg {
name: "beta1"
description: "Momentum factor. Must be a scalar."
type_attr: "T"
}
input_arg {
name: "beta2"
description: "Momentum factor. Must be a scalar."
type_attr: "T"
}
input_arg {
name: "epsilon"
description: "Ridge term. Must be a scalar."
type_attr: "T"
}
input_arg {
name: "grad"
description: "The gradient."
type_attr: "T"
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: false
}
description: "If `True`, updating of the var, m, and v tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention."
}
summary: "Update \'*var\' according to the Adam algorithm."
description: "lr_t <- learning_rate * sqrt(1 - beta2^t) / (1 - beta1^t)\nm_t <- beta1 * m_{t-1} + (1 - beta1) * g_t\nv_t <- beta2 * v_{t-1} + (1 - beta2) * g_t * g_t\nvariable <- variable - lr_t * m_t / (sqrt(v_t) + epsilon)"
}
op {
name: "ResourceApplyCenteredRMSProp"
input_arg {
name: "var"
description: "Should be from a Variable()."
type: DT_RESOURCE
}
input_arg {
name: "mg"
description: "Should be from a Variable()."
type: DT_RESOURCE
}
input_arg {
name: "ms"
description: "Should be from a Variable()."
type: DT_RESOURCE
}
input_arg {
name: "mom"
description: "Should be from a Variable()."
type: DT_RESOURCE
}
input_arg {
name: "lr"
description: "Scaling factor. Must be a scalar."
type_attr: "T"
}
input_arg {
name: "rho"
description: "Decay rate. Must be a scalar."
type_attr: "T"
}
input_arg {
name: "momentum"
type_attr: "T"
}
input_arg {
name: "epsilon"
description: "Ridge term. Must be a scalar."
type_attr: "T"
}
input_arg {
name: "grad"
description: "The gradient."
type_attr: "T"
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: false
}
description: "If `True`, updating of the var, mg, ms, and mom tensors is\nprotected by a lock; otherwise the behavior is undefined, but may exhibit less\ncontention."
}
summary: "Update \'*var\' according to the centered RMSProp algorithm."
description: "The centered RMSProp algorithm uses an estimate of the centered second moment\n(i.e., the variance) for normalization, as opposed to regular RMSProp, which\nuses the (uncentered) second moment. This often helps with training, but is\nslightly more expensive in terms of computation and memory.\n\nNote that in dense implementation of this algorithm, mg, ms, and mom will\nupdate even if the grad is zero, but in this sparse implementation, mg, ms,\nand mom will not update in iterations during which the grad is zero.\n\nmean_square = decay * mean_square + (1-decay) * gradient ** 2\nmean_grad = decay * mean_grad + (1-decay) * gradient\n\nDelta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2)\n\nmg <- rho * mg_{t-1} + (1-rho) * grad\nms <- rho * ms_{t-1} + (1-rho) * grad * grad\nmom <- momentum * mom_{t-1} + lr * grad / sqrt(ms - mg * mg + epsilon)\nvar <- var - mom"
}
op {
name: "ResourceApplyFtrl"
input_arg {
name: "var"
description: "Should be from a Variable()."
type: DT_RESOURCE
}
input_arg {
name: "accum"
description: "Should be from a Variable()."
type: DT_RESOURCE
}
input_arg {
name: "linear"
description: "Should be from a Variable()."
type: DT_RESOURCE
}
input_arg {
name: "grad"
description: "The gradient."
type_attr: "T"
}
input_arg {
name: "lr"
description: "Scaling factor. Must be a scalar."
type_attr: "T"
}
input_arg {
name: "l1"
description: "L1 regulariation. Must be a scalar."
type_attr: "T"
}
input_arg {
name: "l2"
description: "L2 regulariation. Must be a scalar."
type_attr: "T"
}
input_arg {
name: "lr_power"
description: "Scaling factor. Must be a scalar."
type_attr: "T"
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: false
}
description: "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention."
}
summary: "Update \'*var\' according to the Ftrl-proximal scheme."
description: "accum_new = accum + grad * grad\nlinear += grad + (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var\nquadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2\nvar = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0\naccum = accum_new"
}
op {
name: "ResourceApplyGradientDescent"
input_arg {
name: "var"
description: "Should be from a Variable()."
type: DT_RESOURCE
}
input_arg {
name: "alpha"
description: "Scaling factor. Must be a scalar."
type_attr: "T"
}
input_arg {
name: "delta"
description: "The change."
type_attr: "T"
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: false
}
description: "If `True`, the subtraction will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention."
}
summary: "Update \'*var\' by subtracting \'alpha\' * \'delta\' from it."
}
op {
name: "ResourceApplyMomentum"
input_arg {
name: "var"
description: "Should be from a Variable()."
type: DT_RESOURCE
}
input_arg {
name: "accum"
description: "Should be from a Variable()."
type: DT_RESOURCE
}
input_arg {
name: "lr"
description: "Scaling factor. Must be a scalar."
type_attr: "T"
}
input_arg {
name: "grad"
description: "The gradient."
type_attr: "T"
}
input_arg {
name: "momentum"
description: "Momentum. Must be a scalar."
type_attr: "T"
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: false
}
description: "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention."
}
attr {
name: "use_nesterov"
type: "bool"
default_value {
b: false
}
description: "If `True`, the tensor passed to compute grad will be\nvar - lr * momentum * accum, so in the end, the var you get is actually\nvar - lr * momentum * accum."
}
summary: "Update \'*var\' according to the momentum scheme. Set use_nesterov = True if you"
description: "want to use Nesterov momentum.\n\naccum = accum * momentum + grad\nvar -= lr * accum"
}
op {
name: "ResourceApplyProximalAdagrad"
input_arg {
name: "var"
description: "Should be from a Variable()."
type: DT_RESOURCE
}
input_arg {
name: "accum"
description: "Should be from a Variable()."
type: DT_RESOURCE
}
input_arg {
name: "lr"
description: "Scaling factor. Must be a scalar."
type_attr: "T"
}
input_arg {
name: "l1"
description: "L1 regularization. Must be a scalar."
type_attr: "T"
}
input_arg {
name: "l2"
description: "L2 regularization. Must be a scalar."
type_attr: "T"
}
input_arg {
name: "grad"
description: "The gradient."
type_attr: "T"
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: false
}
description: "If True, updating of the var and accum tensors will be protected by\na lock; otherwise the behavior is undefined, but may exhibit less contention."
}
summary: "Update \'*var\' and \'*accum\' according to FOBOS with Adagrad learning rate."
description: "accum += grad * grad\nprox_v = var - lr * grad * (1 / sqrt(accum))\nvar = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0}"
}
op {
name: "ResourceApplyProximalGradientDescent"
input_arg {
name: "var"
description: "Should be from a Variable()."
type: DT_RESOURCE
}
input_arg {
name: "alpha"
description: "Scaling factor. Must be a scalar."
type_attr: "T"
}
input_arg {
name: "l1"
description: "L1 regularization. Must be a scalar."
type_attr: "T"
}
input_arg {
name: "l2"
description: "L2 regularization. Must be a scalar."
type_attr: "T"
}
input_arg {
name: "delta"
description: "The change."
type_attr: "T"
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: false
}
description: "If True, the subtraction will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention."
}
summary: "Update \'*var\' as FOBOS algorithm with fixed learning rate."
description: "prox_v = var - alpha * delta\nvar = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0}"
}
op {
name: "ResourceApplyRMSProp"
input_arg {
name: "var"
description: "Should be from a Variable()."
type: DT_RESOURCE
}
input_arg {
name: "ms"
description: "Should be from a Variable()."
type: DT_RESOURCE
}
input_arg {
name: "mom"
description: "Should be from a Variable()."
type: DT_RESOURCE
}
input_arg {
name: "lr"
description: "Scaling factor. Must be a scalar."
type_attr: "T"
}
input_arg {
name: "rho"
description: "Decay rate. Must be a scalar."
type_attr: "T"
}
input_arg {
name: "momentum"
type_attr: "T"
}
input_arg {
name: "epsilon"
description: "Ridge term. Must be a scalar."
type_attr: "T"
}
input_arg {
name: "grad"
description: "The gradient."
type_attr: "T"
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: false
}
description: "If `True`, updating of the var, ms, and mom tensors is protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention."
}
summary: "Update \'*var\' according to the RMSProp algorithm."
description: "Note that in dense implementation of this algorithm, ms and mom will\nupdate even if the grad is zero, but in this sparse implementation, ms\nand mom will not update in iterations during which the grad is zero.\n\nmean_square = decay * mean_square + (1-decay) * gradient ** 2\nDelta = learning_rate * gradient / sqrt(mean_square + epsilon)\n\nms <- rho * ms_{t-1} + (1-rho) * grad * grad\nmom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)\nvar <- var - mom"
}
op {
name: "ResourceSparseApplyAdadelta"
input_arg {
name: "var"
type: DT_RESOURCE
}
input_arg {
name: "accum"
description: "Should be from a Variable()."
type: DT_RESOURCE
}
input_arg {
name: "accum_update"
description: ": Should be from a Variable()."
type: DT_RESOURCE
}
input_arg {
name: "lr"
description: "Learning rate. Must be a scalar."
type_attr: "T"
}
input_arg {
name: "rho"
description: "Decay factor. Must be a scalar."
type_attr: "T"
}
input_arg {
name: "epsilon"
description: "Constant factor. Must be a scalar."
type_attr: "T"
}
input_arg {
name: "grad"
description: "The gradient."
type_attr: "T"
}
input_arg {
name: "indices"
description: "A vector of indices into the first dimension of var and accum."
type_attr: "Tindices"
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "Tindices"
type: "type"
allowed_values {
list {
type: DT_INT32
type: DT_INT64
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: false
}
description: "If True, updating of the var and accum tensors will be protected by\na lock; otherwise the behavior is undefined, but may exhibit less contention."
}
summary: "var: Should be from a Variable()."
}
op {
name: "ResourceSparseApplyAdagrad"
input_arg {
name: "var"
description: "Should be from a Variable()."
type: DT_RESOURCE
}
input_arg {
name: "accum"
description: "Should be from a Variable()."
type: DT_RESOURCE
}
input_arg {
name: "lr"
description: "Learning rate. Must be a scalar."
type_attr: "T"
}
input_arg {
name: "grad"
description: "The gradient."
type_attr: "T"
}
input_arg {
name: "indices"
description: "A vector of indices into the first dimension of var and accum."
type_attr: "Tindices"
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "Tindices"
type: "type"
allowed_values {
list {
type: DT_INT32
type: DT_INT64
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: false
}
description: "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention."
}
summary: "Update relevant entries in \'*var\' and \'*accum\' according to the adagrad scheme."
description: "That is for rows we have grad for, we update var and accum as follows:\naccum += grad * grad\nvar -= lr * grad * (1 / sqrt(accum))"
}
op {
name: "ResourceSparseApplyAdagradDA"
input_arg {
name: "var"
description: "Should be from a Variable()."
type: DT_RESOURCE
}
input_arg {
name: "gradient_accumulator"
description: "Should be from a Variable()."
type: DT_RESOURCE
}
input_arg {
name: "gradient_squared_accumulator"
description: "Should be from a Variable()."
type: DT_RESOURCE
}
input_arg {
name: "grad"
description: "The gradient."
type_attr: "T"
}
input_arg {
name: "indices"
description: "A vector of indices into the first dimension of var and accum."
type_attr: "Tindices"
}
input_arg {
name: "lr"
description: "Learning rate. Must be a scalar."
type_attr: "T"
}
input_arg {
name: "l1"
description: "L1 regularization. Must be a scalar."
type_attr: "T"
}
input_arg {
name: "l2"
description: "L2 regularization. Must be a scalar."
type_attr: "T"
}
input_arg {
name: "global_step"
description: "Training step number. Must be a scalar."
type: DT_INT64
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "Tindices"
type: "type"
allowed_values {
list {
type: DT_INT32
type: DT_INT64
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: false
}
description: "If True, updating of the var and accum tensors will be protected by\na lock; otherwise the behavior is undefined, but may exhibit less contention."
}
summary: "Update entries in \'*var\' and \'*accum\' according to the proximal adagrad scheme."
}
op {
name: "ResourceSparseApplyCenteredRMSProp"
input_arg {
name: "var"
description: "Should be from a Variable()."
type: DT_RESOURCE
}
input_arg {
name: "mg"
description: "Should be from a Variable()."
type: DT_RESOURCE
}
input_arg {
name: "ms"
description: "Should be from a Variable()."
type: DT_RESOURCE
}
input_arg {
name: "mom"
description: "Should be from a Variable()."
type: DT_RESOURCE
}
input_arg {
name: "lr"
description: "Scaling factor. Must be a scalar."
type_attr: "T"
}
input_arg {
name: "rho"
description: "Decay rate. Must be a scalar."
type_attr: "T"
}
input_arg {
name: "momentum"
type_attr: "T"
}
input_arg {
name: "epsilon"
description: "Ridge term. Must be a scalar."
type_attr: "T"
}
input_arg {
name: "grad"
description: "The gradient."
type_attr: "T"
}
input_arg {
name: "indices"
description: "A vector of indices into the first dimension of var, ms and mom."
type_attr: "Tindices"
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "Tindices"
type: "type"
allowed_values {
list {
type: DT_INT32
type: DT_INT64
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: false
}
description: "If `True`, updating of the var, mg, ms, and mom tensors is\nprotected by a lock; otherwise the behavior is undefined, but may exhibit less\ncontention."
}
summary: "Update \'*var\' according to the centered RMSProp algorithm."
description: "The centered RMSProp algorithm uses an estimate of the centered second moment\n(i.e., the variance) for normalization, as opposed to regular RMSProp, which\nuses the (uncentered) second moment. This often helps with training, but is\nslightly more expensive in terms of computation and memory.\n\nNote that in dense implementation of this algorithm, mg, ms, and mom will\nupdate even if the grad is zero, but in this sparse implementation, mg, ms,\nand mom will not update in iterations during which the grad is zero.\n\nmean_square = decay * mean_square + (1-decay) * gradient ** 2\nmean_grad = decay * mean_grad + (1-decay) * gradient\nDelta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2)\n\nms <- rho * ms_{t-1} + (1-rho) * grad * grad\nmom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)\nvar <- var - mom"
}
op {
name: "ResourceSparseApplyFtrl"
input_arg {
name: "var"
description: "Should be from a Variable()."
type: DT_RESOURCE
}
input_arg {
name: "accum"
description: "Should be from a Variable()."
type: DT_RESOURCE
}
input_arg {
name: "linear"
description: "Should be from a Variable()."
type: DT_RESOURCE
}
input_arg {
name: "grad"
description: "The gradient."
type_attr: "T"
}
input_arg {
name: "indices"
description: "A vector of indices into the first dimension of var and accum."
type_attr: "Tindices"
}
input_arg {
name: "lr"
description: "Scaling factor. Must be a scalar."
type_attr: "T"
}
input_arg {
name: "l1"
description: "L1 regularization. Must be a scalar."
type_attr: "T"
}
input_arg {
name: "l2"
description: "L2 regularization. Must be a scalar."
type_attr: "T"
}
input_arg {
name: "lr_power"
description: "Scaling factor. Must be a scalar."
type_attr: "T"
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "Tindices"
type: "type"
allowed_values {
list {
type: DT_INT32
type: DT_INT64
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: false
}
description: "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention."
}
summary: "Update relevant entries in \'*var\' according to the Ftrl-proximal scheme."
description: "That is for rows we have grad for, we update var, accum and linear as follows:\naccum_new = accum + grad * grad\nlinear += grad + (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var\nquadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2\nvar = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0\naccum = accum_new"
}
op {
name: "ResourceSparseApplyMomentum"
input_arg {
name: "var"
description: "Should be from a Variable()."
type: DT_RESOURCE
}
input_arg {
name: "accum"
description: "Should be from a Variable()."
type: DT_RESOURCE
}
input_arg {
name: "lr"
description: "Learning rate. Must be a scalar."
type_attr: "T"
}
input_arg {
name: "grad"
description: "The gradient."
type_attr: "T"
}
input_arg {
name: "indices"
description: "A vector of indices into the first dimension of var and accum."
type_attr: "Tindices"
}
input_arg {
name: "momentum"
description: "Momentum. Must be a scalar."
type_attr: "T"
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "Tindices"
type: "type"
allowed_values {
list {
type: DT_INT32
type: DT_INT64
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: false
}
description: "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention."
}
attr {
name: "use_nesterov"
type: "bool"
default_value {
b: false
}
description: "If `True`, the tensor passed to compute grad will be\nvar - lr * momentum * accum, so in the end, the var you get is actually\nvar - lr * momentum * accum."
}
summary: "Update relevant entries in \'*var\' and \'*accum\' according to the momentum scheme."
description: "Set use_nesterov = True if you want to use Nesterov momentum.\n\nThat is for rows we have grad for, we update var and accum as follows:\n\naccum = accum * momentum + grad\nvar -= lr * accum"
}
op {
name: "ResourceSparseApplyProximalAdagrad"
input_arg {
name: "var"
description: "Should be from a Variable()."
type: DT_RESOURCE
}
input_arg {
name: "accum"
description: "Should be from a Variable()."
type: DT_RESOURCE
}
input_arg {
name: "lr"
description: "Learning rate. Must be a scalar."
type_attr: "T"
}
input_arg {
name: "l1"
description: "L1 regularization. Must be a scalar."
type_attr: "T"
}
input_arg {
name: "l2"
description: "L2 regularization. Must be a scalar."
type_attr: "T"
}
input_arg {
name: "grad"
description: "The gradient."
type_attr: "T"
}
input_arg {
name: "indices"
description: "A vector of indices into the first dimension of var and accum."
type_attr: "Tindices"
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "Tindices"
type: "type"
allowed_values {
list {
type: DT_INT32
type: DT_INT64
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: false
}
description: "If True, updating of the var and accum tensors will be protected by\na lock; otherwise the behavior is undefined, but may exhibit less contention."
}
summary: "Sparse update entries in \'*var\' and \'*accum\' according to FOBOS algorithm."
description: "That is for rows we have grad for, we update var and accum as follows:\naccum += grad * grad\nprox_v = var\nprox_v -= lr * grad * (1 / sqrt(accum))\nvar = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0}"
}
op {
name: "ResourceSparseApplyProximalGradientDescent"
input_arg {
name: "var"
description: "Should be from a Variable()."
type: DT_RESOURCE
}
input_arg {
name: "alpha"
description: "Scaling factor. Must be a scalar."
type_attr: "T"
}
input_arg {
name: "l1"
description: "L1 regularization. Must be a scalar."
type_attr: "T"
}
input_arg {
name: "l2"
description: "L2 regularization. Must be a scalar."
type_attr: "T"
}
input_arg {
name: "grad"
description: "The gradient."
type_attr: "T"
}
input_arg {
name: "indices"
description: "A vector of indices into the first dimension of var and accum."
type_attr: "Tindices"
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "Tindices"
type: "type"
allowed_values {
list {
type: DT_INT32
type: DT_INT64
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: false
}
description: "If True, the subtraction will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention."
}
summary: "Sparse update \'*var\' as FOBOS algorithm with fixed learning rate."
description: "That is for rows we have grad for, we update var as follows:\nprox_v = var - alpha * grad\nvar = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0}"
}
op {
name: "ResourceSparseApplyRMSProp"
input_arg {
name: "var"
description: "Should be from a Variable()."
type: DT_RESOURCE
}
input_arg {
name: "ms"
description: "Should be from a Variable()."
type: DT_RESOURCE
}
input_arg {
name: "mom"
description: "Should be from a Variable()."
type: DT_RESOURCE
}
input_arg {
name: "lr"
description: "Scaling factor. Must be a scalar."
type_attr: "T"
}
input_arg {
name: "rho"
description: "Decay rate. Must be a scalar."
type_attr: "T"
}
input_arg {
name: "momentum"
type_attr: "T"
}
input_arg {
name: "epsilon"
description: "Ridge term. Must be a scalar."
type_attr: "T"
}
input_arg {
name: "grad"
description: "The gradient."
type_attr: "T"
}
input_arg {
name: "indices"
description: "A vector of indices into the first dimension of var, ms and mom."
type_attr: "Tindices"
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "Tindices"
type: "type"
allowed_values {
list {
type: DT_INT32
type: DT_INT64
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: false
}
description: "If `True`, updating of the var, ms, and mom tensors is protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention."
}
summary: "Update \'*var\' according to the RMSProp algorithm."
description: "Note that in dense implementation of this algorithm, ms and mom will\nupdate even if the grad is zero, but in this sparse implementation, ms\nand mom will not update in iterations during which the grad is zero.\n\nmean_square = decay * mean_square + (1-decay) * gradient ** 2\nDelta = learning_rate * gradient / sqrt(mean_square + epsilon)\n\nms <- rho * ms_{t-1} + (1-rho) * grad * grad\nmom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)\nvar <- var - mom"
}
op {
name: "Restore"
input_arg {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册