提交 ba333e8a 编写于 作者: E emailweixu 提交者: GitHub

Merge pull request #2219 from emailweixu/sqrt-reciprocal

Add Sqrt and Reciprocal activation
...@@ -396,6 +396,44 @@ Error __must_check backward(Argument& act) { ...@@ -396,6 +396,44 @@ Error __must_check backward(Argument& act) {
} }
END_DEFINE_ACTIVATION(exponential) END_DEFINE_ACTIVATION(exponential)
/**
* @brief Reciprocal Activation.
* \f[
* f(z) = 1/z
* \f]
*/
BEGIN_DEFINE_ACTIVATION(reciprocal)
Error __must_check forward(Argument& act) {
act.value->reciprocal2();
return Error();
}
Error __must_check backward(Argument& act) {
act.grad->dotMulSquare(*act.value);
act.grad->neg();
return Error();
}
END_DEFINE_ACTIVATION(reciprocal)
/**
* @brief Square Root Activation.
* \f[
* f(z) = sqrt(z)
* \f]
*/
BEGIN_DEFINE_ACTIVATION(sqrt)
Error __must_check forward(Argument& act) {
act.value->sqrt2();
return Error();
}
Error __must_check backward(Argument& act) {
act.grad->dotDiv(*act.grad, *act.value);
act.grad->mulScalar(0.5);
return Error();
}
END_DEFINE_ACTIVATION(sqrt)
/** /**
* @brief Logarithm Activation. * @brief Logarithm Activation.
* \f[ * \f[
......
...@@ -17,7 +17,7 @@ __all__ = [ ...@@ -17,7 +17,7 @@ __all__ = [
"IdentityActivation", "LinearActivation", 'SequenceSoftmaxActivation', "IdentityActivation", "LinearActivation", 'SequenceSoftmaxActivation',
'ExpActivation', "ReluActivation", "BReluActivation", "SoftReluActivation", 'ExpActivation', "ReluActivation", "BReluActivation", "SoftReluActivation",
"STanhActivation", "AbsActivation", "SquareActivation", "BaseActivation", "STanhActivation", "AbsActivation", "SquareActivation", "BaseActivation",
"LogActivation" "LogActivation", "SqrtActivation", "ReciprocalActivation"
] ]
...@@ -224,3 +224,27 @@ class LogActivation(BaseActivation): ...@@ -224,3 +224,27 @@ class LogActivation(BaseActivation):
def __init__(self): def __init__(self):
BaseActivation.__init__(self, 'log', False) BaseActivation.__init__(self, 'log', False)
class SqrtActivation(BaseActivation):
"""
Square Root Activation.
.. math::
f(z) = sqrt(z)
"""
def __init__(self):
BaseActivation.__init__(self, 'sqrt', False)
class ReciprocalActivation(BaseActivation):
"""
Reciprocal Activation.
.. math::
f(z) = 1/z
"""
def __init__(self):
BaseActivation.__init__(self, 'reciprocal', False)
...@@ -40,6 +40,8 @@ register_unary_math_op('sigmoid', act.SigmoidActivation()) ...@@ -40,6 +40,8 @@ register_unary_math_op('sigmoid', act.SigmoidActivation())
register_unary_math_op('tanh', act.TanhActivation()) register_unary_math_op('tanh', act.TanhActivation())
register_unary_math_op('square', act.SquareActivation()) register_unary_math_op('square', act.SquareActivation())
register_unary_math_op('relu', act.ReluActivation()) register_unary_math_op('relu', act.ReluActivation())
register_unary_math_op('sqrt', act.SqrtActivation())
register_unary_math_op('reciprocal', act.ReciprocalActivation())
def add(layeroutput, other): def add(layeroutput, other):
......
...@@ -4,6 +4,8 @@ settings(batch_size=1000, learning_rate=1e-5) ...@@ -4,6 +4,8 @@ settings(batch_size=1000, learning_rate=1e-5)
x = data_layer(name='data', size=100) x = data_layer(name='data', size=100)
x = layer_math.exp(x) x = layer_math.exp(x)
x = layer_math.sqrt(x)
x = layer_math.reciprocal(x)
x = layer_math.log(x) x = layer_math.log(x)
x = layer_math.abs(x) x = layer_math.abs(x)
x = layer_math.sigmoid(x) x = layer_math.sigmoid(x)
......
...@@ -20,13 +20,43 @@ layers { ...@@ -20,13 +20,43 @@ layers {
} }
} }
} }
layers {
name: "__sqrt_0__"
type: "mixed"
size: 100
active_type: "sqrt"
inputs {
input_layer_name: "__exp_0__"
proj_conf {
type: "identity"
name: "___sqrt_0__.w0"
input_size: 100
output_size: 100
}
}
}
layers {
name: "__reciprocal_0__"
type: "mixed"
size: 100
active_type: "reciprocal"
inputs {
input_layer_name: "__sqrt_0__"
proj_conf {
type: "identity"
name: "___reciprocal_0__.w0"
input_size: 100
output_size: 100
}
}
}
layers { layers {
name: "__log_0__" name: "__log_0__"
type: "mixed" type: "mixed"
size: 100 size: 100
active_type: "log" active_type: "log"
inputs { inputs {
input_layer_name: "__exp_0__" input_layer_name: "__reciprocal_0__"
proj_conf { proj_conf {
type: "identity" type: "identity"
name: "___log_0__.w0" name: "___log_0__.w0"
...@@ -351,6 +381,8 @@ sub_models { ...@@ -351,6 +381,8 @@ sub_models {
name: "root" name: "root"
layer_names: "data" layer_names: "data"
layer_names: "__exp_0__" layer_names: "__exp_0__"
layer_names: "__sqrt_0__"
layer_names: "__reciprocal_0__"
layer_names: "__log_0__" layer_names: "__log_0__"
layer_names: "__abs_0__" layer_names: "__abs_0__"
layer_names: "__sigmoid_0__" layer_names: "__sigmoid_0__"
......
...@@ -177,7 +177,7 @@ class SGD(object): ...@@ -177,7 +177,7 @@ class SGD(object):
Testing method. Will test input data. Testing method. Will test input data.
:param reader: A reader that reads and yeilds data items. :param reader: A reader that reads and yeilds data items.
:type reader: collections.Iterable :type reader: collections.Iterable
:param feeding: Feeding is a map of neural network input name and array :param feeding: Feeding is a map of neural network input name and array
index that reader returns. index that reader returns.
:type feeding: dict :type feeding: dict
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册