提交 ebb153b0 编写于 作者: H Haonan 提交者: GitHub

Merge pull request #416 from yu239/hl_activetype

change the act.name for LinearActivation() to "linear" so that it won't fail in hl_activetype; also fix the hasinputsset in submodel
......@@ -378,7 +378,7 @@ hl_activation_mode_t hlActiveType(const std::string& type) {
return HL_ACTIVATION_RELU;
} else if (type == "tanh") {
return HL_ACTIVATION_TANH;
} else if (type == "linear") {
} else if (type == "linear" || type == "") {
return HL_ACTIVATION_LINEAR;
} else {
LOG(FATAL) << "Do not support activation type " << type;
......
......@@ -218,7 +218,7 @@ def Inputs(*args):
@config_func
def HasInputsSet():
return len(g_config.model_config.input_layer_names) != 0
return len(g_current_submodel.input_layer_names) != 0
# Define the name of the output layers of the NeuralNetwork.
......@@ -1170,14 +1170,14 @@ def parse_block_expand(block_expand, input_layer_name, block_expand_conf):
block_expand_conf.output_x = 0
else:
block_expand_conf.output_x = cnn_output_size(
block_expand.img_size_x, block_expand.block_x,
block_expand.img_size_x, block_expand.block_x,
block_expand.padding_x, block_expand.stride_x, False)
if block_expand_conf.img_size_y == 0:
block_expand_conf.output_y = 0
else:
block_expand_conf.output_y = cnn_output_size(
block_expand.img_size_y, block_expand.block_y,
block_expand.img_size_y, block_expand.block_y,
block_expand.padding_y, block_expand.stride_y, False)
def parse_maxout(maxout, input_layer_name, maxout_conf):
......@@ -1185,7 +1185,7 @@ def parse_maxout(maxout, input_layer_name, maxout_conf):
maxout_conf.groups = maxout.groups
maxout_conf.img_size_x = maxout.img_size_x
maxout_conf.img_size_y = maxout.img_size_y
# Define an evaluator
@config_func
def Evaluator(
......@@ -1881,7 +1881,7 @@ class MaxOutLayer(LayerBase):
self.config.inputs[0].maxout_conf)
maxout_conf = self.config.inputs[0].maxout_conf
self.set_layer_size(g_layer_map[input_layer.name].size / maxout_conf.groups)
# key: cost type
# value: cost class
g_cost_map = {}
......
......@@ -23,9 +23,9 @@ __all__ = ["TanhActivation", "SigmoidActivation",
class BaseActivation(object):
"""
A mark for activation class.
A mark for activation class.
Each activation inherit BaseActivation, which has two parameters.
:param name: activation name in paddle config.
:type name: basestring
:param support_hppl: True if supported by hppl. HPPL is a library used by paddle
......@@ -194,7 +194,7 @@ class SquareActivation(BaseActivation):
class ExpActivation(BaseActivation):
"""
Exponential Activation.
.. math::
f(z) = e^z.
"""
......
......@@ -1766,7 +1766,7 @@ def img_pool_layer(input, pool_size, name=None,
:type pool_size_y: int|None
:param num_channels: number of input channel.
:type num_channels: int
:param pool_type: pooling type. MaxPooling or AveragePooling. Default is
:param pool_type: pooling type. MaxPooling or AvgPooling. Default is
MaxPooling.
:type pool_type: BasePoolingType
:param stride: stride width of pooling.
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册