config_parser.py 140.8 KB
Newer Older
1
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
Z
zhangjinchao01 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import print_function
'''
The following functions are available in the config file:

Bias: define bias. To be used as value of bias argument in Layer().

Data: define data provider.

Input: define input layer for a layer. To be used as element of inputs argument
       in Layer().

Conv: define a convolution operation for an input of a layer.

Norm: define a normalization operation for an input of a layer.

Pool: define a pooling operation for an input of a layer.

Layer: define a layer.

Parameter: define a parameter.

Import: import another config file. If the imported config file name is
        a relative path, then it will be searched under the directory of the
        current config file.

Inputs(layer_names...):
    Define the name of the input layers of the NeuralNetwork.
    The type of these layers must be "data".
    These layers will be provided with the DataBatch obtained
    from DataProvider. The data streams from DataProvider must
    have the same order.

Outputs(layer_names...):
    Define the name of the output layers of the NeuralNetwork.
    Usually the output is simply the cost layer.
    You can specify other layers as outputs and  calculate the
    cost (and its derivative) yourself.


default_initial_std(val)
default_initial_mean(val)
default_momentum(val):
default_decay_rate(val): Set the default value for these parameters


get_config_arg(name, type, default): Get the value for a config parameter.


*** customized extension to config_parser ***
The functionality of the config_parser can be extended.
If the config_arg_str for parse_config() contains
extension_module_name=[MODULE_NAME], then config_parser will call
MODULE_NAME.get_config_funcs(g_config)
MODULE_NAME.get_config_funcs() should return a dictionary of name to functions,
those functions will be available in the config file.
See trainer/tests/config_parser_test.py for example

To use this from paddle_trainer, paddle_trainer should be called with
--config_args=extension_module_name=[MODULE_NAME]

'''
import copy
import logging
import os
import sys
import traceback
import math
import shutil

try:
    from paddle.proto.DataConfig_pb2 import DataConfig
    from paddle.proto.ModelConfig_pb2 import ModelConfig
    from paddle.proto.ModelConfig_pb2 import LayerConfig
    from paddle.proto.ModelConfig_pb2 import LayerInputConfig
    from paddle.proto.ModelConfig_pb2 import ProjectionConfig
    from paddle.proto.ModelConfig_pb2 import OperatorConfig
    from paddle.proto.ModelConfig_pb2 import GeneratorConfig
    from paddle.proto.ModelConfig_pb2 import LinkConfig
    from paddle.proto.ParameterConfig_pb2 import ParameterConfig
    from paddle.proto.ParameterConfig_pb2 import ParameterUpdaterHookConfig
    from paddle.proto.TrainerConfig_pb2 import TrainerConfig

except Exception as e:
    traceback.print_exc()
    raise

logging.basicConfig(
Q
qijun 已提交
102
    format='[%(levelname)s %(asctime)s %(filename)s:%(lineno)s] %(message)s', )
Z
zhangjinchao01 已提交
103 104 105
logger = logging.getLogger('paddle')
logger.setLevel(logging.INFO)
__real_print__ = print
Q
qijun 已提交
106
print = logger.info
Z
zhangjinchao01 已提交
107 108 109 110

# from layer type name to layer class
g_layer_type_map = {}

Q
qijun 已提交
111

Z
zhangjinchao01 已提交
112 113 114
# Initialize global variables. We use this function so that we can
# call parse_config() multiple times
def init_config_environment(
Q
qijun 已提交
115 116 117 118 119 120 121 122 123 124 125 126 127 128
        g_default_momentum=None,
        g_default_decay_rate=None,
        g_default_initial_mean=0.,
        g_default_initial_std=0.01,
        g_default_num_batches_regularization=None,
        g_default_initial_strategy=0,
        g_default_initial_smart=False,
        g_default_gradient_clipping_threshold=None,
        g_default_device=None,
        g_default_update_hooks=None,
        g_default_compact_func=None,
        g_config=TrainerConfig(),
        g_layer_map={},
        g_parameter_map={},
X
xuwei06 已提交
129
        g_parameter_initializer_map={},
Q
qijun 已提交
130
        g_extended_config_funcs={},
Z
zhangjinchao01 已提交
131 132

        # store command args of paddle_trainer
Q
qijun 已提交
133
        g_command_config_args={},
Z
zhangjinchao01 已提交
134 135

        # Used for PyDataProvider to avoid duplicate module name
Q
qijun 已提交
136 137 138 139 140
        g_py_module_name_list=[],
        g_current_submodel=None,
        g_root_submodel=None,
        g_submodel_map={},
        g_submodel_stack=[],
141
        g_add_submodel_suffix=False, ):
Z
zhangjinchao01 已提交
142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157

    for k, v in locals().iteritems():
        globals()[k] = copy.deepcopy(v)


# Because type is widely used as a variable name in this code.
# we need a different function name for the builtin type()
def type_of(x):
    return type(x)


# Check a condition derived config file
def config_assert(b, msg):
    if not b:
        logger.fatal(msg)

Q
qijun 已提交
158

Z
zhangjinchao01 已提交
159 160
g_config_funcs = {}

Q
qijun 已提交
161

Z
zhangjinchao01 已提交
162 163 164 165 166
# decorator for indicating a function which can be used in config file
def config_func(func):
    g_config_funcs[func.func_name] = func
    return func

Q
qijun 已提交
167

Z
zhangjinchao01 已提交
168 169 170 171 172
# decorator for indicating a class which can be used in config file
def config_class(cls):
    g_config_funcs[cls.__name__] = cls
    return cls

Q
qijun 已提交
173

Z
zhangjinchao01 已提交
174 175 176 177 178 179
# decorator for indicating a class for a layer type
def config_layer(layer_type):
    def wrap(cls):
        g_config_funcs[cls.__name__] = cls
        g_layer_type_map[layer_type] = cls
        return cls
Q
qijun 已提交
180

Z
zhangjinchao01 已提交
181 182
    return wrap

Q
qijun 已提交
183

Z
zhangjinchao01 已提交
184 185 186
def gen_parameter_name(layer_name, input_index):
    return '_%s.w%d' % (layer_name, input_index)

Q
qijun 已提交
187

Z
zhangjinchao01 已提交
188 189 190
def gen_bias_parameter_name(layer_name):
    return '_%s.wbias' % layer_name

Q
qijun 已提交
191

Z
zhangjinchao01 已提交
192 193 194
def default(x, default_value):
    return default_value if x is None else x

Q
qijun 已提交
195

Z
zhangjinchao01 已提交
196 197 198 199 200 201
class Cfg(object):
    def add_keys(self, locals):
        for k, v in locals.iteritems():
            if not k.startswith('_'):
                self.__setattr__(k, v)

Q
qijun 已提交
202

Z
zhangjinchao01 已提交
203 204
# functions available in config file

Q
qijun 已提交
205

Z
zhangjinchao01 已提交
206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223
# Define the name of the input layers of the NeuralNetwork.
# The type of these layers must be "data".
# These layers will be provided with the DataBatch obtained
# from DataProvider. The data streams from DataProvider must
# have the same order.
@config_func
def Inputs(*args):
    for name in args:
        name = MakeLayerNameInSubmodel(name)
        global g_current_submodel, g_root_submodel
        if g_current_submodel.is_recurrent_layer_group:
            config_assert(False, "Do not set Inputs in recurrent layer group")
        else:
            g_current_submodel.input_layer_names.append(name)

        if g_current_submodel is g_root_submodel:
            g_config.model_config.input_layer_names.append(name)

Q
qijun 已提交
224

225 226
@config_func
def HasInputsSet():
227
    return len(g_current_submodel.input_layer_names) != 0
228

Z
zhangjinchao01 已提交
229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252

# Define the name of the output layers of the NeuralNetwork.
# Usually the output is simply the cost layer.
# You can specify other layers as outputs and calculate the
# cost (and its derivative) yourself.
@config_func
def Outputs(*args):
    for name in args:
        name = MakeLayerNameInSubmodel(name)
        global g_current_submodel, g_root_submodel
        if g_current_submodel.is_recurrent_layer_group:
            config_assert(False, "Do not set Outputs in recurrent layer group")
        else:
            g_current_submodel.output_layer_names.append(name)

        if g_current_submodel is g_root_submodel:
            g_config.model_config.output_layer_names.append(name)


@config_func
def SubModelBegin(name):
    global g_current_submodel, g_root_submodel, g_submodel_stack
    g_submodel_stack.append(g_current_submodel)

Q
qijun 已提交
253
    name = MakeLayerNameInParentSubmodel(name)  #rename in nested submodel
Z
zhangjinchao01 已提交
254 255 256 257 258 259 260 261 262

    config_assert(name not in g_submodel_map,
                  'Duplicated submodel name: %s' % name)

    sub_model = g_config.model_config.sub_models.add()
    sub_model.name = name
    g_submodel_map[name] = sub_model
    g_current_submodel = sub_model

Q
qijun 已提交
263

Z
zhangjinchao01 已提交
264
@config_func
Q
qijun 已提交
265
def SubModelEnd(name=None):
Z
zhangjinchao01 已提交
266
    global g_current_submodel, g_root_submodel, g_submodel_stack
Q
qijun 已提交
267 268
    config_assert(g_current_submodel is not g_root_submodel,
                  "submodel not begin")
Z
zhangjinchao01 已提交
269
    if name is not None:
Q
qijun 已提交
270 271 272
        config_assert(
            g_current_submodel.name == MakeLayerNameInParentSubmodel(name),
            "submodel name error")
Z
zhangjinchao01 已提交
273 274 275

    g_current_submodel = g_submodel_stack.pop()

Q
qijun 已提交
276

Z
zhangjinchao01 已提交
277 278
def MakeLayerNameInParentSubmodel(name):
    suffix = ""
279 280
    if len(g_submodel_stack) > 1:
        suffix = "@" + g_submodel_stack[-1].name
Z
zhangjinchao01 已提交
281 282
    return name + suffix

Q
qijun 已提交
283

Z
zhangjinchao01 已提交
284 285 286
def GetLayerBaseName(name):
    return name.split('@')[0]

Q
qijun 已提交
287 288

def MakeLayerNameInSubmodel(name, submodel_name=None):
Z
zhangjinchao01 已提交
289 290
    global g_current_submodel
    global g_add_submodel_suffix
Q
qijun 已提交
291 292
    if (submodel_name is None and not g_add_submodel_suffix and
            not g_current_submodel.is_recurrent_layer_group):
Z
zhangjinchao01 已提交
293 294 295 296 297
        return name
    if submodel_name is None:
        submodel_name = g_current_submodel.name
    return name + "@" + submodel_name

Q
qijun 已提交
298

Z
zhangjinchao01 已提交
299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321
# Define a recurrent layer group begin with RecurrentLayerGroupBegin
# and end with RecurrentLayerGroupEnd.
# A recurrent layer group forward/backward one frame after previous frame
# forward/backward through all layers in layer group.
# in_links are names of layer used as input layer in the layer group.
# out_links are names of layer in layer group used as outside layer's input.
#
# If generator is set, the layer group need one or more than one outlinks.
# The first outlink should always be the generated token ids.
# If generator.num_results_per_sample is not set, the output for one sample is
# a ids sequence. Else if num_results_per_sample is more than one,
# the output for one sample is up to #num_results_per_sample generated
# sequences, which are packed in one sequence in output ids vector. Each
# generated sequence has a generation probability. The probabilities for one
# sample are stored in one row of output value matrix.
# Packed generated sequences format, for each i:
#   seq_i_length: one interger, seq_i content length,
#   [seq_i content], length = seq_i_length
#   seq_i_end_mark: one interger, for format check, always -1
# You can use "seq_text_printer" to print the output of the generator.
@config_func
def RecurrentLayerGroupWithoutOutLinksBegin(name,
                                            in_links,
322 323
                                            seq_reversed=False,
                                            target_inlinkname=""):
Z
zhangjinchao01 已提交
324 325 326 327 328 329 330 331
    global g_current_submodel
    config_assert(g_config.model_config.type == "recurrent_nn",
                  "RecurrentLayerGroup should be used only in recurrent_nn")
    RecurrentLayerGroup(name=name)  # add to father model
    SubModelBegin(name)
    g_current_submodel.is_recurrent_layer_group = True
    g_current_submodel.reversed = seq_reversed
    in_links_count = 0
332
    for linkid, link in enumerate(in_links):
Z
zhangjinchao01 已提交
333 334 335 336
        if isinstance(link, basestring):
            name = link
        else:
            name = link.link_name
337

Z
zhangjinchao01 已提交
338 339 340
        in_links_count += 1
        layer_name = MakeLayerNameInParentSubmodel(name)
        layer = g_layer_map[layer_name]
341
        ScatterAgentLayer(name=name, size=layer.size)
342

Z
zhangjinchao01 已提交
343 344 345 346
        pair = g_current_submodel.in_links.add()
        pair.layer_name = layer_name
        pair.link_name = MakeLayerNameInSubmodel(name)

Q
qijun 已提交
347

Z
zhangjinchao01 已提交
348 349 350 351 352 353 354 355 356 357 358 359 360
@config_func
def RecurrentLayerGroupSetOutLink(link):
    if isinstance(link, basestring):
        name = link
    else:
        name = link.link_name
    layer_name = MakeLayerNameInParentSubmodel(name)
    pair = g_current_submodel.out_links.add()
    pair.layer_name = MakeLayerNameInSubmodel(name)
    pair.link_name = layer_name


def RecurrentLayerGroupSetGenerator(generator=None):
Q
qijun 已提交
361
    generator.eos_layer_name = MakeLayerNameInSubmodel(generator.eos_layer_name)
Z
zhangjinchao01 已提交
362 363 364 365 366 367 368 369
    g_current_submodel.generator.CopyFrom(generator)


@config_func
def RecurrentLayerGroupBegin(name,
                             in_links,
                             out_links,
                             generator=None,
370
                             target_inlinkname="",
Z
zhangjinchao01 已提交
371
                             seq_reversed=False):
372
    RecurrentLayerGroupWithoutOutLinksBegin(name, in_links, seq_reversed)
Z
zhangjinchao01 已提交
373 374 375 376 377
    for link in out_links:
        RecurrentLayerGroupSetOutLink(link)

    if generator is not None:
        RecurrentLayerGroupSetGenerator(generator)
Q
qijun 已提交
378 379 380 381 382
        config_assert(
            len(in_links) == 0, "no in_links should be passed to generator")
        config_assert(
            len(out_links) >= 1,
            "one or more than one out_links should be passed to generator")
Z
zhangjinchao01 已提交
383 384 385 386 387 388 389


@config_func
def RecurrentLayerGroupEnd(name):
    global g_current_submodel
    config_assert(g_current_submodel.is_recurrent_layer_group,
                  "RecurrentLayerGroup not begin")
Q
qijun 已提交
390
    for pair in g_current_submodel.memories:  #check exist
Z
zhangjinchao01 已提交
391
        layer = g_layer_map[pair.layer_name]
Y
Yu Yang 已提交
392 393
        config_assert(layer is not None,
                      "memory declare wrong name:%s" % pair.layer_name)
Z
zhangjinchao01 已提交
394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409
        memory_link = g_layer_map[pair.link_name]
        config_assert(layer.size == memory_link.size,
                      "memory declare wrong size:%d" % memory_link.size)

    prev_submodel = g_current_submodel
    SubModelEnd(name)

    for pair in prev_submodel.out_links:
        layer = g_layer_map[pair.layer_name]
        # add out agent to father model
        agent_name = GetLayerBaseName(pair.link_name)
        if prev_submodel.HasField("generator"):
            DataLayer(name=agent_name, size=layer.size)
        else:
            GatherAgentLayer(name=agent_name, size=layer.size)

Q
qijun 已提交
410

Z
zhangjinchao01 已提交
411 412 413 414 415 416
# Define the model type
# currently, the paddle supports "nn", "recurrent_nn", "recursive_nn" and "multi_nn"
@config_func
def model_type(name):
    g_config.model_config.type = name

Q
qijun 已提交
417

Z
zhangjinchao01 已提交
418 419
@config_class
class Bias(Cfg):
X
xuwei06 已提交
420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435
    def __init__(self,
                 parameter_name=None,
                 learning_rate=None,
                 momentum=None,
                 decay_rate=None,
                 decay_rate_l1=None,
                 initial_mean=None,
                 initial_std=None,
                 initial_strategy=None,
                 initial_smart=None,
                 num_batches_regularization=None,
                 sparse_remote_update=None,
                 gradient_clipping_threshold=None,
                 is_static=None,
                 is_shared=None,
                 initializer=None):
Z
zhangjinchao01 已提交
436 437
        self.add_keys(locals())

Q
qijun 已提交
438

Z
zhangjinchao01 已提交
439 440 441 442 443 444 445
# Define one input for a layer
@config_class
class Input(Cfg):
    def __init__(
            self,
            input_layer_name,
            parameter_name=None,
X
xuwei06 已提交
446
            initializer=None,
Z
zhangjinchao01 已提交
447 448 449 450 451 452 453 454 455 456 457 458 459
            learning_rate=None,
            momentum=None,
            decay_rate=None,
            decay_rate_l1=None,
            initial_mean=None,
            initial_std=None,
            initial_strategy=None,
            initial_smart=None,
            num_batches_regularization=None,
            sparse_remote_update=None,
            sparse_update=None,
            gradient_clipping_threshold=None,
            conv=None,
L
liaogang 已提交
460
            bilinear_interp=None,
Z
zhangjinchao01 已提交
461 462 463 464
            norm=None,
            pool=None,
            image=None,
            block_expand=None,
465
            maxout=None,
Q
qijun 已提交
466
            spp=None,
D
dangqingqing 已提交
467
            pad=None,
Z
zhangjinchao01 已提交
468 469 470 471 472
            format=None,
            nnz=None,
            is_static=None,
            is_shared=None,
            update_hooks=None,
473
            input_layer_argument=None,
D
dangqingqing 已提交
474 475 476 477 478
            make_layer_name_in_submodel=True, ):
        """
        @param make_layer_name_in_submodel True by defalut, you might need to
        set it carefully when adding Input in config_parser.py.
        """
Z
zhangjinchao01 已提交
479
        self.add_keys(locals())
D
dangqingqing 已提交
480 481 482
        self.input_layer_name = MakeLayerNameInSubmodel(
            input_layer_name
        ) if make_layer_name_in_submodel else input_layer_name
Z
zhangjinchao01 已提交
483

Q
qijun 已提交
484

Z
zhangjinchao01 已提交
485 486 487
# Define a projection for iexed layer
@config_class
class Projection(Input):
Q
qijun 已提交
488 489
    type = None  # subclass should set it correctly

Z
zhangjinchao01 已提交
490 491 492
    def __init__(
            self,
            input_layer_name,
Q
qijun 已提交
493
            size=0,  # projection output size
Z
zhangjinchao01 已提交
494 495 496 497 498 499 500 501 502
            parameter_name=None,
            learning_rate=None,
            momentum=None,
            decay_rate=None,
            decay_rate_l1=None,
            initial_mean=None,
            initial_std=None,
            initial_strategy=None,
            initial_smart=None,
X
xuwei06 已提交
503
            initializer=None,
Z
zhangjinchao01 已提交
504 505 506 507 508 509 510 511 512 513
            num_batches_regularization=None,
            sparse_remote_update=None,
            sparse_update=None,
            gradient_clipping_threshold=None,
            ptype=None,
            format=None,
            nnz=None,
            is_static=None,
            is_shared=None,
            update_hooks=None,
Q
qijun 已提交
514
            input_layer_argument=None, ):
Z
zhangjinchao01 已提交
515 516 517 518 519 520 521 522 523 524 525 526 527
        self.add_keys(locals())
        self.input_layer_name = MakeLayerNameInSubmodel(input_layer_name)

        self.proj_conf = ProjectionConfig()
        if ptype is not None:
            self.proj_conf.type = ptype
        else:
            self.proj_conf.type = self.type

    # calculate the output_size given input_size. return 0
    # to indicate using the size from Layer config
    def calc_output_size(self, input_layer_config):
        return self.size
Q
qijun 已提交
528

Z
zhangjinchao01 已提交
529 530
    def calc_parameter_size(self, input_size, output_size):
        raise NotimplementedError
Q
qijun 已提交
531

Z
zhangjinchao01 已提交
532 533 534 535 536 537 538 539 540 541
    def calc_parameter_dims(self, input_size, output_size):
        raise NotimplementedError


@config_class
class IdentityProjection(Projection):
    type = 'identity'

    def calc_output_size(self, input_layer_config):
        return input_layer_config.size
Q
qijun 已提交
542

Z
zhangjinchao01 已提交
543 544
    def calc_parameter_size(self, input_size, output_size):
        return 0
Q
qijun 已提交
545

Z
zhangjinchao01 已提交
546 547 548
    def calc_parameter_dims(self, input_size, output_size):
        return []

Q
qijun 已提交
549

Z
zhangjinchao01 已提交
550 551 552 553 554 555
# Like IdentityProjection, but layer size may smaller than input size,
# the projection select dimesions [offset, offset+layer_size) from input
@config_class
class IdentityOffsetProjection(Projection):
    type = 'identity_offset'

Q
qijun 已提交
556 557 558
    def __init__(self, input_layer_name, offset, **xargs):
        super(IdentityOffsetProjection, self).__init__(input_layer_name,
                                                       **xargs)
Z
zhangjinchao01 已提交
559 560 561 562
        self.proj_conf.offset = offset

    def calc_parameter_size(self, input_size, output_size):
        return 0
Q
qijun 已提交
563

Z
zhangjinchao01 已提交
564 565 566
    def calc_parameter_dims(self, input_size, output_size):
        return []

Q
qijun 已提交
567

568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596
@config_class
class SliceProjection(Projection):
    type = 'slice'

    def __init__(self, input_layer_name, slices, **xargs):
        super(SliceProjection, self).__init__(input_layer_name, **xargs)
        input = g_layer_map[input_layer_name]
        if input.type in ["exconv", "cudnn_conv"]:
            # the slice operator is for the channel dimension
            assert input.num_filters is not None
            channels = input.num_filters
            image_size = input.size / channels
            assert slices[len(slices) - 1][1] <= channels
            for i in xrange(len(slices)):
                slice = self.proj_conf.slices.add()
                slice.start = slices[i][0] * image_size
                slice.end = slices[i][1] * image_size
                self.size += slice.end - slice.start
        else:
            config_assert(False,
                          'Currently the input should be convolution layer')

    def calc_parameter_size(self, input_size, output_size):
        return 0

    def calc_parameter_dims(self, input_size, output_size):
        return []


Z
zhangjinchao01 已提交
597 598 599 600 601 602 603
# DotMulProjection performs element-wise multiplication with weight
@config_class
class DotMulProjection(Projection):
    type = 'dot_mul'

    def calc_output_size(self, input_layer_config):
        return input_layer_config.size
Q
qijun 已提交
604

Z
zhangjinchao01 已提交
605 606
    def calc_parameter_size(self, input_size, output_size):
        return output_size
Q
qijun 已提交
607

Z
zhangjinchao01 已提交
608 609 610
    def calc_parameter_dims(self, input_size, output_size):
        return [1, output_size]

L
Luo Tao 已提交
611

X
xuwei06 已提交
612 613 614 615 616 617 618 619 620 621 622 623 624 625
# ScalingProjection
@config_class
class ScalingProjection(Projection):
    type = 'scaling'

    def calc_output_size(self, input_layer_config):
        return input_layer_config.size

    def calc_parameter_size(self, input_size, output_size):
        return 1

    def calc_parameter_dims(self, input_size, output_size):
        return [1, 1]

Q
qijun 已提交
626

Z
zhangjinchao01 已提交
627 628 629 630 631 632
@config_class
class TableProjection(Projection):
    type = 'table'

    def calc_parameter_size(self, input_size, output_size):
        return input_size * output_size
Q
qijun 已提交
633

Z
zhangjinchao01 已提交
634 635 636
    def calc_parameter_dims(self, input_size, output_size):
        return [input_size, output_size]

Q
qijun 已提交
637

Z
zhangjinchao01 已提交
638 639 640 641 642 643
@config_class
class FullMatrixProjection(Projection):
    type = 'fc'

    def calc_parameter_size(self, input_size, output_size):
        return input_size * output_size
Q
qijun 已提交
644

Z
zhangjinchao01 已提交
645 646 647
    def calc_parameter_dims(self, input_size, output_size):
        return [input_size, output_size]

Q
qijun 已提交
648

Z
zhangjinchao01 已提交
649 650 651 652 653 654
@config_class
class TransposedFullMatrixProjection(Projection):
    type = 'trans_fc'

    def calc_parameter_size(self, input_size, output_size):
        return input_size * output_size
Q
qijun 已提交
655

Z
zhangjinchao01 已提交
656 657 658
    def calc_parameter_dims(self, input_size, output_size):
        return [output_size, input_size]

Q
qijun 已提交
659

Z
zhangjinchao01 已提交
660 661 662 663
@config_class
class ContextProjection(Projection):
    type = 'context'

Q
qijun 已提交
664 665
    def __init__(self, input_layer_name, context_start, context_length,
                 trainable_padding, **xargs):
Z
zhangjinchao01 已提交
666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688
        super(ContextProjection, self).__init__(input_layer_name, **xargs)
        self.proj_conf.context_start = context_start
        self.proj_conf.context_length = context_length
        self.proj_conf.trainable_padding = trainable_padding
        self._total_pad = max(0, -self.proj_conf.context_start) \
                          + max(0, self.proj_conf.context_start \
                                + self.proj_conf.context_length - 1)

    def calc_output_size(self, input_layer_config):
        return input_layer_config.size * self.proj_conf.context_length

    def calc_parameter_size(self, input_size, output_size):
        if self.proj_conf.trainable_padding == False:
            return 0
        else:
            return input_size * self._total_pad

    def calc_parameter_dims(self, input_size, output_size):
        return [self._total_pad, input_size]

    _total_pad = 0


689
@config_class
690
class ConvBaseProjection(Projection):
Q
qijun 已提交
691 692 693 694 695
    def __init__(self,
                 input_layer_name,
                 num_filters=None,
                 conv_conf=None,
                 **xargs):
696
        super(ConvBaseProjection, self).__init__(input_layer_name, **xargs)
697 698 699 700 701 702 703 704 705 706 707 708

        if num_filters is not None:
            self.proj_conf.num_filters = num_filters

    def calc_output_size(self, input_layer_config):
        return self.proj_conf.output_size

    def calc_parameter_size(self, input_size, output_size):
        co = self.proj_conf.num_filters
        ci = self.proj_conf.conv_conf.channels
        fh = self.proj_conf.conv_conf.filter_size
        fw = self.proj_conf.conv_conf.filter_size_y
709 710
        gr = self.proj_conf.conv_conf.groups
        return co * ci * fh * fw / gr
711 712 713 714 715 716 717

    def calc_bias_size(self):
        return self.proj_conf.num_filters

    def calc_parameter_dims(self, input_size, output_size):
        return None

Q
qijun 已提交
718

719 720 721 722 723 724 725 726 727
@config_class
class ConvProjection(ConvBaseProjection):
    type = 'conv'

    def __init__(self,
                 input_layer_name,
                 num_filters=None,
                 conv_conf=None,
                 **xargs):
728 729
        super(ConvProjection, self).__init__(input_layer_name, num_filters,
                                             conv_conf, **xargs)
730

731
        parse_conv(conv_conf, self.input_layer_name, self.proj_conf.conv_conf,
732 733 734 735 736 737 738 739 740 741 742 743 744 745 746
                   num_filters)
        self.proj_conf.output_size = self.proj_conf.conv_conf.output_x * \
                                     self.proj_conf.conv_conf.output_y * \
                                     num_filters


@config_class
class ConvTransProjection(ConvBaseProjection):
    type = 'convt'

    def __init__(self,
                 input_layer_name,
                 num_filters=None,
                 conv_conf=None,
                 **xargs):
747 748
        super(ConvTransProjection, self).__init__(input_layer_name, num_filters,
                                                  conv_conf, **xargs)
749 750 751

        parse_conv(
            conv_conf,
752
            self.input_layer_name,
753 754 755 756 757 758 759 760
            self.proj_conf.conv_conf,
            num_filters,
            trans=True)
        self.proj_conf.output_size = self.proj_conf.conv_conf.img_size_y * \
                                     self.proj_conf.conv_conf.img_size * \
                                     num_filters


Z
zhangjinchao01 已提交
761 762 763
# Define a operator for mixed layer
@config_class
class Operator(Cfg):
Q
qijun 已提交
764 765
    type = None  # subclass should set it correctly

Z
zhangjinchao01 已提交
766 767
    def __init__(
            self,
Q
qijun 已提交
768
            input_layer_names, ):
Z
zhangjinchao01 已提交
769 770 771 772 773 774 775 776 777 778
        self.add_keys(locals())
        self.operator_conf = OperatorConfig()
        self.operator_conf.type = self.type

    def check_dims(self):
        pass

    def calc_output_size(self, input_sizes):
        return 0

Q
qijun 已提交
779

Z
zhangjinchao01 已提交
780 781 782
@config_class
class DotMulOperator(Operator):
    type = 'dot_mul'
Q
qijun 已提交
783 784 785

    def __init__(self, input_layer_names, scale=None, **xargs):
        super(DotMulOperator, self).__init__(input_layer_names, **xargs)
Z
zhangjinchao01 已提交
786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803
        if scale is not None:
            self.operator_conf.dotmul_scale = scale

        config_assert(len(input_layer_names) == 2, "DotMul is binary operator")

    def check_dims(self):
        for i in range(2):
            config_assert(self.operator_conf.input_sizes[i] ==
                          self.operator_conf.output_size,
                          "DotMul input_size != output_size")

    def calc_output_size(self, input_sizes):
        return input_sizes[0]


@config_class
class ConvOperator(Operator):
    type = 'conv'
Q
qijun 已提交
804 805 806 807 808 809 810

    def __init__(self,
                 input_layer_names,
                 num_filters=None,
                 conv_conf=None,
                 **xargs):
        super(ConvOperator, self).__init__(input_layer_names, **xargs)
Z
zhangjinchao01 已提交
811 812 813
        if num_filters is not None:
            self.operator_conf.num_filters = num_filters

814 815
        parse_conv(conv_conf,
                   MakeLayerNameInSubmodel(input_layer_names[0]),
Q
qijun 已提交
816
                   self.operator_conf.conv_conf, num_filters)
L
Luo Tao 已提交
817 818 819
        self.operator_conf.output_size = self.operator_conf.conv_conf.output_x * \
                                         self.operator_conf.conv_conf.output_y * \
                                         num_filters
Z
zhangjinchao01 已提交
820 821 822

        config_assert(len(input_layer_names) == 2, "Conv is binary operator")

823 824
    def calc_output_size(self, input_sizes):
        return self.operator_conf.output_size
Z
zhangjinchao01 已提交
825 826


827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856
@config_class
class ConvTransOperator(Operator):
    type = 'convt'

    def __init__(self,
                 input_layer_names,
                 num_filters=None,
                 conv_conf=None,
                 **xargs):
        super(ConvTransOperator, self).__init__(input_layer_names, **xargs)
        if num_filters is not None:
            self.operator_conf.num_filters = num_filters

        parse_conv(
            conv_conf,
            MakeLayerNameInSubmodel(input_layer_names[0]),
            self.operator_conf.conv_conf,
            num_filters,
            trans=True)
        self.operator_conf.output_size = \
            self.operator_conf.conv_conf.img_size * \
            self.operator_conf.conv_conf.img_size_y * \
            num_filters

        config_assert(len(input_layer_names) == 2, "Conv is binary operator")

    def calc_output_size(self, input_sizes):
        return self.operator_conf.output_size


Z
zhangjinchao01 已提交
857 858 859
# please refer to the comments in proto/ModelConfig.proto
@config_class
class Conv(Cfg):
Q
qijun 已提交
860 861 862 863 864 865 866 867 868 869 870 871 872
    def __init__(self,
                 filter_size,
                 channels,
                 padding=None,
                 stride=None,
                 groups=None,
                 filter_channels=None,
                 output_x=None,
                 img_size=None,
                 caffe_mode=True,
                 filter_size_y=None,
                 padding_y=None,
                 stride_y=None):
Z
zhangjinchao01 已提交
873 874
        self.add_keys(locals())
        if filter_size_y is None:
Q
qijun 已提交
875
            self.filter_size_y = filter_size
Z
zhangjinchao01 已提交
876
        if padding_y is None:
Q
qijun 已提交
877
            self.padding_y = padding
Z
zhangjinchao01 已提交
878
        if stride_y is None:
Q
qijun 已提交
879
            self.stride_y = stride
Z
zhangjinchao01 已提交
880
        if output_x is not None:
Q
qijun 已提交
881 882
            config_assert(output_x <= 0)

Z
zhangjinchao01 已提交
883

884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903
# please refer to the comments in proto/ModelConfig.proto
@config_class
class Conv3D(Cfg):
    def __init__(self,
                 filter_size,
                 channels,
                 padding=None,
                 stride=None,
                 groups=None,
                 filter_channels=None,
                 output_x=None,
                 img_size=None,
                 caffe_mode=True,
                 filter_size_y=None,
                 padding_y=None,
                 stride_y=None,
                 filter_size_z=None,
                 padding_z=None,
                 stride_z=None):
        self.add_keys(locals())
C
chengduoZH 已提交
904 905 906 907 908 909
        self.filter_size_y = filter_size_y if filter_size_y else filter_size
        self.filter_size_z = filter_size_z if filter_size_z else filter_size
        self.padding_y = padding_y if padding_y else padding
        self.padding_z = padding_z if padding_z else padding
        self.stride_y = stride_y if stride_y else stride
        self.stride_z = stride_z if stride_z else stride
910 911 912 913
        if output_x is not None:
            config_assert(output_x <= 0)


L
liaogang 已提交
914 915
@config_class
class BilinearInterp(Cfg):
L
Luo Tao 已提交
916
    def __init__(self, out_size_x=None, out_size_y=None, channels=None):
L
liaogang 已提交
917 918
        self.add_keys(locals())

Q
qijun 已提交
919

Z
zhangjinchao01 已提交
920 921
@config_class
class Pool(Cfg):
D
dangqingqing 已提交
922 923 924 925 926 927 928 929 930 931 932
    def __init__(
            self,
            pool_type,
            channels,
            size_x,
            size_y=None,
            start=None,
            stride=None,  # 1 by defalut in protobuf
            stride_y=None,
            padding=None,  # 0 by defalut in protobuf
            padding_y=None):
Z
zhangjinchao01 已提交
933
        self.add_keys(locals())
Q
qijun 已提交
934 935


Q
qijun 已提交
936
@config_class
Q
qijun 已提交
937
class SpatialPyramidPool(Cfg):
L
Luo Tao 已提交
938
    def __init__(self, pool_type, pyramid_height, channels):
Q
qijun 已提交
939
        self.add_keys(locals())
Z
zhangjinchao01 已提交
940

Q
qijun 已提交
941

D
dangqingqing 已提交
942 943 944 945 946 947
@config_class
class Pad(Cfg):
    def __init__(self, channels, pad_c, pad_h, pad_w):
        self.add_keys(locals())


Z
zhangjinchao01 已提交
948 949
@config_class
class Norm(Cfg):
Q
qijun 已提交
950 951 952 953 954 955 956 957 958
    def __init__(self,
                 norm_type,
                 channels,
                 size,
                 scale,
                 pow,
                 output_x=None,
                 img_size=None,
                 blocked=None):
Z
zhangjinchao01 已提交
959 960
        self.add_keys(locals())

Q
qijun 已提交
961

Z
zhangjinchao01 已提交
962 963
@config_class
class Image(Cfg):
Q
qijun 已提交
964
    def __init__(self, channels, img_size=None):
Z
zhangjinchao01 已提交
965 966
        self.add_keys(locals())

Q
qijun 已提交
967

Z
zhangjinchao01 已提交
968 969
@config_class
class BlockExpand(Cfg):
Q
qijun 已提交
970 971 972 973 974 975 976 977 978 979 980 981
    def __init__(self,
                 channels,
                 padding_x=0,
                 padding_y=0,
                 stride_x=0,
                 stride_y=0,
                 block_x=0,
                 block_y=0,
                 img_size_x=0,
                 img_size_y=0,
                 output_x=0,
                 output_y=0):
Z
zhangjinchao01 已提交
982 983
        self.add_keys(locals())

Q
qijun 已提交
984

985 986
@config_class
class MaxOut(Cfg):
Q
qijun 已提交
987
    def __init__(self, channels, groups, img_size_x=0, img_size_y=0):
988 989
        self.add_keys(locals())

Q
qijun 已提交
990

991
def create_data_config_proto(async_load_data=False,
992
                             constant_slots=None,
王益 已提交
993 994 995
                             data_ratio=1,
                             is_main_data=True,
                             usage_ratio=None):
Z
zhangjinchao01 已提交
996 997 998 999 1000 1001 1002 1003
    # default: all sub dataproviders are treat as "main data".
    # see proto/DataConfig.proto for is_main_data
    data_config = DataConfig()

    data_config.async_load_data = async_load_data

    if constant_slots:
        data_config.constant_slots.extend(constant_slots)
Q
qijun 已提交
1004 1005
    data_config.data_ratio = data_ratio
    data_config.is_main_data = is_main_data
Z
zhangjinchao01 已提交
1006

Q
qijun 已提交
1007
    usage_ratio = default(usage_ratio, settings_deprecated["usage_ratio"])
Z
zhangjinchao01 已提交
1008 1009 1010 1011 1012 1013
    config_assert(usage_ratio >= 0 and usage_ratio <= 1,
                  "The range of usage_ratio is [0, 1]")
    data_config.usage_ratio = usage_ratio

    return data_config

Q
qijun 已提交
1014

Z
zhangjinchao01 已提交
1015
@config_func
Q
qijun 已提交
1016 1017 1018 1019 1020
def SimpleData(files=None,
               feat_dim=None,
               context_len=None,
               buffer_capacity=None,
               **xargs):
1021
    data_config = create_data_config_proto(**xargs)
Z
zhangjinchao01 已提交
1022 1023 1024 1025 1026 1027 1028 1029 1030
    data_config.type = 'simple'
    data_config.files = files
    data_config.feat_dim = feat_dim
    if context_len is not None:
        data_config.context_len = context_len
    if buffer_capacity:
        data_config.buffer_capacity = buffer_capacity
    return data_config

Q
qijun 已提交
1031

Z
zhangjinchao01 已提交
1032
@config_func
Q
qijun 已提交
1033 1034 1035 1036 1037 1038 1039 1040 1041 1042
def PyData(files=None,
           type=None,
           file_group_queue_capacity=None,
           load_data_module=None,
           load_data_object=None,
           load_data_args="",
           load_file_count=None,
           constant_slots=None,
           load_thread_num=None,
           **xargs):
1043
    data_config = create_data_config_proto(**xargs)
Z
zhangjinchao01 已提交
1044 1045
    data_config.type = 'py'
    if load_data_module in g_py_module_name_list:
Q
qijun 已提交
1046

Z
zhangjinchao01 已提交
1047 1048 1049
        def get_path(module):
            m = __import__(load_data_module)
            return os.path.split(os.path.realpath(m.__file__))[0]
Q
qijun 已提交
1050

Z
zhangjinchao01 已提交
1051 1052 1053
        # python C-api is not thread safe, one module can only be import once,
        # so here we nedd to copy the module with different names if it has to be
        # imported several times.
Q
qijun 已提交
1054 1055
        module_new_name = "%s_copy_%d" % (load_data_module,
                                          len(g_py_module_name_list))
Z
zhangjinchao01 已提交
1056
        g_py_module_name_list.append(module_new_name)
Q
qijun 已提交
1057 1058 1059 1060
        module_path = "%s/%s.py" % (get_path(load_data_module),
                                    load_data_module)
        new_module_path = "%s/%s.py" % (get_path(load_data_module),
                                        module_new_name)
Z
zhangjinchao01 已提交
1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084
        if os.path.isfile(module_path) == False:
            raise Exception("File %s is not exist." % module_path)
        shutil.copy2(module_path, new_module_path)
        load_data_module = module_new_name
    else:
        g_py_module_name_list.append(load_data_module)
    if load_data_module is not None and load_data_object is not None:
        data_config.load_data_module = load_data_module
        data_config.load_data_object = load_data_object
    else:
        raise ValueError('load_data_module, load_data_object is not defined.')
    data_config.load_data_args = load_data_args

    data_config.files = files or ''
    if file_group_queue_capacity is not None:
        data_config.file_group_conf.queue_capacity = file_group_queue_capacity
    if load_file_count is not None:
        data_config.file_group_conf.load_file_count = load_file_count
    if load_thread_num is not None:
        data_config.file_group_conf.load_thread_num = load_thread_num
    if constant_slots:
        data_config.constant_slots.extend(constant_slots)
    return data_config

Q
qijun 已提交
1085

Z
zhangjinchao01 已提交
1086
@config_func
Q
qijun 已提交
1087 1088 1089 1090 1091 1092 1093
def ProtoData(files=None,
              type=None,
              file_group_queue_capacity=None,
              load_file_count=None,
              constant_slots=None,
              load_thread_num=None,
              **xargs):
1094
    data_config = create_data_config_proto(**xargs)
Z
zhangjinchao01 已提交
1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113
    if type is None:
        data_config.type = 'proto'
    else:
        data_config.type = type
    data_config.files = files

    # When type="proto_group", one data provider contains at most
    # load_file_count files, and there are at most
    # (queue_capacity + load_thread_num + 1) data providers in memory
    if file_group_queue_capacity is not None:
        data_config.file_group_conf.queue_capacity = file_group_queue_capacity
    if load_file_count is not None:
        data_config.file_group_conf.load_file_count = load_file_count
    if load_thread_num is not None:
        data_config.file_group_conf.load_thread_num = load_thread_num
    if constant_slots:
        data_config.constant_slots.extend(constant_slots)
    return data_config

Q
qijun 已提交
1114

Z
zhangjinchao01 已提交
1115 1116
#real data for training is actually provided by "sub_data" data providers.
@config_func
Q
qijun 已提交
1117
def MultiData(sub_data=[]):
Z
zhangjinchao01 已提交
1118 1119 1120 1121 1122
    data_config = DataConfig()
    data_config.type = 'multi'
    data_config.sub_data_configs.extend(sub_data)
    return data_config

Q
qijun 已提交
1123

Z
zhangjinchao01 已提交
1124
@config_func
Q
qijun 已提交
1125 1126 1127 1128 1129 1130 1131
def Data(type,
         files=None,
         feat_dim=None,
         slot_dims=None,
         context_len=None,
         buffer_capacity=None,
         **xargs):
Z
zhangjinchao01 已提交
1132

1133
    data_config = create_data_config_proto(**xargs)
Z
zhangjinchao01 已提交
1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166
    data_config.type = type
    data_config.files = files
    data_config.feat_dim = feat_dim
    data_config.slot_dims.extend(slot_dims)
    if context_len is not None:
        data_config.context_len = context_len
    data_config.buffer_capacity = buffer_capacity
    return data_config


@config_func
def TrainData(data_config, async_load_data=None):
    config_assert(not g_config.HasField('data_config'),
                  'Only one TrainData definition is allowed')
    g_config.data_config.CopyFrom(data_config)
    g_config.data_config.for_test = False
    if async_load_data is not None:
        logger.warning("Deprecated: async_load_data should be used inside"
                       " Data definition")
        g_config.data_config.async_load_data = async_load_data


@config_func
def TestData(data_config, async_load_data=None):
    config_assert(not g_config.HasField('test_data_config'),
                  'Only one TestData definition is allowed')
    g_config.test_data_config.CopyFrom(data_config)
    g_config.test_data_config.for_test = True
    if async_load_data is not None:
        logger.warning("Deprecated: async_load_data should be used inside"
                       " Data definition")
        g_config.test_data_config.async_load_data = async_load_data

Q
qijun 已提交
1167

L
Luo Tao 已提交
1168 1169
#caffe_mode: compute the output size using floor instead of ceil,
#            which is consistent of caffe and CuDNN's convention.
1170 1171 1172 1173 1174 1175 1176
def cnn_output_size(img_size, filter_size, padding, stride, caffe_mode):
    output = (2 * padding + img_size - filter_size) / float(stride)
    if caffe_mode:
        return 1 + int(math.floor(output))
    else:
        return 1 + int(math.ceil(output))

Q
qijun 已提交
1177

1178
#calcualte image_size based on output_size for de-convolution (ConvTransLayer).
L
Luo Tao 已提交
1179
#It is the reverse function of cnn_output_size
1180
def cnn_image_size(output_size, filter_size, padding, stride, caffe_mode):
L
Luo Tao 已提交
1181 1182 1183
    img_size = (output_size - 1) * stride + filter_size - 2 * padding
    if not caffe_mode:
        img_size = img_size + 1
1184 1185
    return img_size

Q
qijun 已提交
1186

L
Luo Tao 已提交
1187
def get_img_size(input_layer_name, channels):
L
Luo Tao 已提交
1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199
    input = g_layer_map[input_layer_name]
    img_pixels = input.size / channels
    img_size = input.width if input.width > 0 else int(img_pixels**0.5)
    img_size_y = input.height if input.height > 0 else int(img_pixels /
                                                           img_size)
    config_assert(
        img_size * img_size_y == img_pixels,
        "Input layer %s: Incorrect input image size %d * %d for input image pixels %d"
        % (input_layer_name, img_size, img_size_y, img_pixels))
    return img_size, img_size_y


1200 1201 1202
def get_img3d_size(input_layer_name, channels):
    input = g_layer_map[input_layer_name]
    img_pixels = input.size / channels
C
chengduoZH 已提交
1203 1204 1205 1206
    img_size = input.width
    img_size_y = input.height
    img_size_z = input.depth

1207 1208 1209 1210 1211 1212 1213
    config_assert(
        img_size * img_size_y * img_size_z == img_pixels,
        "Input layer %s: Incorrect input image size %d * %d * %d for input image pixels %d"
        % (input_layer_name, img_size, img_size_y, img_size_z, img_pixels))
    return img_size, img_size_y, img_size_z


L
Luo Tao 已提交
1214 1215 1216 1217 1218 1219
def parse_bilinear(bilinear, input_layer_name, bilinear_conf):
    parse_image(bilinear, input_layer_name, bilinear_conf.image_conf)
    bilinear_conf.out_size_x = bilinear.out_size_x
    bilinear_conf.out_size_y = bilinear.out_size_y


1220
def parse_pool(pool, input_layer_name, pool_conf, ceil_mode):
Z
zhangjinchao01 已提交
1221
    pool_conf.pool_type = pool.pool_type
Q
qijun 已提交
1222 1223 1224
    config_assert(pool.pool_type in [
        'max-projection', 'avg-projection', 'cudnn-max-pool', 'cudnn-avg-pool'
    ], "pool-type %s is not in "
Z
zhangjinchao01 已提交
1225
                  "['max-projection', 'avg-projection', "
Q
qijun 已提交
1226
                  "'cudnn-max-pool', 'cudnn-avg-pool']" % pool.pool_type)
Z
zhangjinchao01 已提交
1227 1228 1229 1230 1231 1232

    pool_conf.channels = pool.channels
    pool_conf.size_x = pool.size_x
    pool_conf.stride = pool.stride

    pool_conf.size_y = default(pool.size_y, pool_conf.size_x)
Q
qijun 已提交
1233
    pool_conf.stride_y = default(pool.stride_y, pool_conf.stride)
Z
zhangjinchao01 已提交
1234

L
Luo Tao 已提交
1235
    pool_conf.img_size, pool_conf.img_size_y = \
L
Luo Tao 已提交
1236
        get_img_size(input_layer_name, pool.channels)
Z
zhangjinchao01 已提交
1237

1238
    config_assert(not pool.start, "start is deprecated in pooling.")
Z
zhangjinchao01 已提交
1239

1240
    if pool.padding is not None:
Z
zhangjinchao01 已提交
1241
        pool_conf.padding = pool.padding
1242
    pool_conf.padding_y = default(pool.padding_y, pool_conf.padding)
D
dangqingqing 已提交
1243 1244
    pool_conf.output_x = cnn_output_size(pool_conf.img_size, pool_conf.size_x,
                                         pool_conf.padding, pool_conf.stride,
1245
                                         not ceil_mode)
D
dangqingqing 已提交
1246 1247
    pool_conf.output_y = cnn_output_size(pool_conf.img_size_y, pool_conf.size_y,
                                         pool_conf.padding_y,
1248
                                         pool_conf.stride_y, not ceil_mode)
Q
qijun 已提交
1249

Z
zhangjinchao01 已提交
1250

Q
qijun 已提交
1251
def parse_spp(spp, input_layer_name, spp_conf):
L
Luo Tao 已提交
1252
    parse_image(spp, input_layer_name, spp_conf.image_conf)
Q
qijun 已提交
1253 1254
    spp_conf.pool_type = spp.pool_type
    config_assert(spp.pool_type in ['max-projection', 'avg-projection'],
Q
qijun 已提交
1255 1256
                  "pool-type %s is not in "
                  "['max-projection', 'avg-projection']" % spp.pool_type)
Q
qijun 已提交
1257
    spp_conf.pyramid_height = spp.pyramid_height
Q
qijun 已提交
1258

Q
qijun 已提交
1259

Z
zhangjinchao01 已提交
1260 1261
def parse_image(image, input_layer_name, image_conf):
    image_conf.channels = image.channels
L
Luo Tao 已提交
1262
    image_conf.img_size, image_conf.img_size_y = \
L
Luo Tao 已提交
1263
        get_img_size(input_layer_name, image_conf.channels)
Q
qijun 已提交
1264

Z
zhangjinchao01 已提交
1265 1266 1267

def parse_norm(norm, input_layer_name, norm_conf):
    norm_conf.norm_type = norm.norm_type
1268 1269 1270 1271 1272
    config_assert(
        norm.norm_type in
        ['rnorm', 'cmrnorm-projection', 'cross-channel-norm'],
        "norm-type %s is not in [rnorm, cmrnorm-projection, cross-channel-norm]"
        % norm.norm_type)
Z
zhangjinchao01 已提交
1273 1274 1275 1276 1277 1278
    norm_conf.channels = norm.channels
    norm_conf.size = norm.size
    norm_conf.scale = norm.scale
    norm_conf.pow = norm.pow
    norm_conf.blocked = norm.blocked

L
Luo Tao 已提交
1279
    norm_conf.img_size, norm_conf.img_size_y = \
L
Luo Tao 已提交
1280
        get_img_size(input_layer_name, norm.channels)
Z
zhangjinchao01 已提交
1281
    norm_conf.output_x = norm_conf.img_size
L
Luo Tao 已提交
1282
    norm_conf.output_y = norm_conf.img_size_y
Z
zhangjinchao01 已提交
1283 1284 1285
    if norm.norm_type in ['cmrnorm-projection']:
        norm_conf.scale /= norm.size
    else:
Q
qijun 已提交
1286 1287
        norm_conf.scale /= norm.size**2

1288

L
Luo Tao 已提交
1289 1290
#caffe_mode: compute the output size using floor instead of ceil,
#            which is consistent of caffe and CuDNN's convention.
1291
def parse_conv(conv, input_layer_name, conv_conf, num_filters, trans=False):
Z
zhangjinchao01 已提交
1292 1293 1294 1295 1296 1297 1298 1299 1300
    conv_conf.filter_size = conv.filter_size
    conv_conf.filter_size_y = conv.filter_size_y
    conv_conf.channels = conv.channels
    conv_conf.padding = conv.padding
    conv_conf.padding_y = conv.padding_y
    conv_conf.stride = conv.stride
    conv_conf.stride_y = conv.stride_y
    conv_conf.groups = conv.groups
    conv_conf.caffe_mode = conv.caffe_mode
Q
qijun 已提交
1301

1302
    if not trans:
1303
        conv_conf.filter_channels = conv.channels / conv.groups
L
Luo Tao 已提交
1304
        conv_conf.img_size, conv_conf.img_size_y = \
L
Luo Tao 已提交
1305
            get_img_size(input_layer_name, conv.channels)
1306
        conv_conf.output_x = cnn_output_size(
Q
qijun 已提交
1307 1308
            conv_conf.img_size, conv_conf.filter_size, conv_conf.padding,
            conv_conf.stride, conv_conf.caffe_mode)
L
Luo Tao 已提交
1309 1310 1311
        conv_conf.output_y = cnn_output_size(
            conv_conf.img_size_y, conv_conf.filter_size_y, conv_conf.padding_y,
            conv_conf.stride_y, conv_conf.caffe_mode)
1312
    else:
1313
        conv_conf.filter_channels = num_filters / conv.groups
L
Luo Tao 已提交
1314
        conv_conf.output_x, conv_conf.output_y = \
L
Luo Tao 已提交
1315
            get_img_size(input_layer_name, conv.channels)
1316
        conv_conf.img_size = cnn_image_size(
Q
qijun 已提交
1317 1318
            conv_conf.output_x, conv_conf.filter_size, conv_conf.padding,
            conv_conf.stride, conv_conf.caffe_mode)
L
Luo Tao 已提交
1319
        conv_conf.img_size_y = cnn_image_size(
L
Luo Tao 已提交
1320 1321
            conv_conf.output_y, conv_conf.filter_size_y, conv_conf.padding_y,
            conv_conf.stride_y, conv_conf.caffe_mode)
Q
qijun 已提交
1322

1323

1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367
#caffe_mode: compute the output size using floor instead of ceil,
#            which is consistent of caffe and CuDNN's convention.
def parse_conv3d(conv, input_layer_name, conv_conf, num_filters, trans=False):
    conv_conf.filter_size = conv.filter_size
    conv_conf.filter_size_y = conv.filter_size_y
    conv_conf.filter_size_z = conv.filter_size_z
    conv_conf.channels = conv.channels
    conv_conf.padding = conv.padding
    conv_conf.padding_y = conv.padding_y
    conv_conf.padding_z = conv.padding_z
    conv_conf.stride = conv.stride
    conv_conf.stride_y = conv.stride_y
    conv_conf.stride_z = conv.stride_z
    conv_conf.groups = conv.groups
    conv_conf.caffe_mode = conv.caffe_mode

    if not trans:
        conv_conf.filter_channels = conv.channels / conv.groups
        conv_conf.img_size, conv_conf.img_size_y, conv_conf.img_size_z = \
            get_img3d_size(input_layer_name, conv.channels)
        conv_conf.output_x = cnn_output_size(
            conv_conf.img_size, conv_conf.filter_size, conv_conf.padding,
            conv_conf.stride, conv_conf.caffe_mode)
        conv_conf.output_y = cnn_output_size(
            conv_conf.img_size_y, conv_conf.filter_size_y, conv_conf.padding_y,
            conv_conf.stride_y, conv_conf.caffe_mode)
        conv_conf.output_z = cnn_output_size(
            conv_conf.img_size_z, conv_conf.filter_size_z, conv_conf.padding_z,
            conv_conf.stride_z, conv_conf.caffe_mode)
    else:
        conv_conf.filter_channels = num_filters / conv.groups
        conv_conf.output_x, conv_conf.output_y, conv_conf.output_z = \
            get_img3d_size(input_layer_name, conv.channels)
        conv_conf.img_size = cnn_image_size(
            conv_conf.output_x, conv_conf.filter_size, conv_conf.padding,
            conv_conf.stride, conv_conf.caffe_mode)
        conv_conf.img_size_y = cnn_image_size(
            conv_conf.output_y, conv_conf.filter_size_y, conv_conf.padding_y,
            conv_conf.stride_y, conv_conf.caffe_mode)
        conv_conf.img_size_z = cnn_image_size(
            conv_conf.output_z, conv_conf.filter_size_z, conv_conf.padding_z,
            conv_conf.stride_z, conv_conf.caffe_mode)


Z
zhangjinchao01 已提交
1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380
def parse_block_expand(block_expand, input_layer_name, block_expand_conf):
    block_expand_conf.channels = block_expand.channels
    block_expand_conf.stride_x = block_expand.stride_x
    block_expand_conf.stride_y = block_expand.stride_y
    block_expand_conf.padding_x = block_expand.padding_x
    block_expand_conf.padding_y = block_expand.padding_y
    block_expand_conf.block_x = block_expand.block_x
    block_expand_conf.block_y = block_expand.block_y
    block_expand_conf.img_size_x = block_expand.img_size_x
    block_expand_conf.img_size_y = block_expand.img_size_y
    if block_expand_conf.img_size_x == 0:
        block_expand_conf.output_x = 0
    else:
1381
        block_expand_conf.output_x = cnn_output_size(
1382
            block_expand.img_size_x, block_expand.block_x,
1383
            block_expand.padding_x, block_expand.stride_x, False)
Z
zhangjinchao01 已提交
1384 1385

    if block_expand_conf.img_size_y == 0:
1386
        block_expand_conf.output_y = 0
Z
zhangjinchao01 已提交
1387
    else:
1388
        block_expand_conf.output_y = cnn_output_size(
1389
            block_expand.img_size_y, block_expand.block_y,
1390
            block_expand.padding_y, block_expand.stride_y, False)
Z
zhangjinchao01 已提交
1391

Q
qijun 已提交
1392

1393
def parse_maxout(maxout, input_layer_name, maxout_conf):
L
Luo Tao 已提交
1394
    parse_image(maxout, input_layer_name, maxout_conf.image_conf)
1395
    maxout_conf.groups = maxout.groups
1396

Q
qijun 已提交
1397

Z
zhangjinchao01 已提交
1398 1399
# Define an evaluator
@config_func
Y
yangyaming 已提交
1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416
def Evaluator(name,
              type,
              inputs,
              chunk_scheme=None,
              num_chunk_types=None,
              classification_threshold=None,
              positive_label=None,
              dict_file=None,
              result_file=None,
              num_results=None,
              top_k=None,
              delimited=None,
              excluded_chunk_types=None,
              overlap_threshold=None,
              background_id=None,
              evaluate_difficult=None,
              ap_type=None):
Z
zhangjinchao01 已提交
1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430
    evaluator = g_config.model_config.evaluators.add()
    evaluator.type = type
    evaluator.name = MakeLayerNameInSubmodel(name)
    if type_of(inputs) == str:
        inputs = [inputs]

    evaluator.input_layers.extend(
        [MakeLayerNameInSubmodel(name) for name in inputs])

    if chunk_scheme is not None:
        evaluator.chunk_scheme = chunk_scheme
        evaluator.num_chunk_types = num_chunk_types
    g_current_submodel.evaluator_names.append(evaluator.name)

1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441
    if classification_threshold is not None:
        evaluator.classification_threshold = classification_threshold
    if positive_label is not None:
        evaluator.positive_label = positive_label
    if dict_file is not None:
        evaluator.dict_file = dict_file

    if result_file is not None:
        evaluator.result_file = result_file
    if num_results is not None:
        evaluator.num_results = num_results
L
Liang Zhao 已提交
1442 1443
    if top_k is not None:
        evaluator.top_k = top_k
1444 1445
    if delimited is not None:
        evaluator.delimited = delimited
Z
zhangjinchao01 已提交
1446

1447 1448 1449
    if excluded_chunk_types:
        evaluator.excluded_chunk_types.extend(excluded_chunk_types)

Y
yangyaming 已提交
1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461
    if overlap_threshold is not None:
        evaluator.overlap_threshold = overlap_threshold

    if background_id is not None:
        evaluator.background_id = background_id

    if evaluate_difficult is not None:
        evaluator.evaluate_difficult = evaluate_difficult

    if ap_type is not None:
        evaluator.ap_type = ap_type

Q
qijun 已提交
1462

Z
zhangjinchao01 已提交
1463 1464 1465 1466 1467
class LayerBase(object):
    def __init__(
            self,
            name,
            type,
Q
qijun 已提交
1468
            size,  # size can be 0. In this case, subclass should set it.
Z
zhangjinchao01 已提交
1469 1470 1471 1472
            inputs,
            device=None,
            active_type="",
            drop_rate=0.,
C
caoying03 已提交
1473 1474
            coeff=None,
            error_clipping_threshold=None):
Z
zhangjinchao01 已提交
1475
        config_assert('@' not in name,
Q
qijun 已提交
1476
                      "layer name: %s contain special character @" % name)
Z
zhangjinchao01 已提交
1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491
        global g_current_submodel
        name = MakeLayerNameInSubmodel(name)

        config_assert(name not in g_layer_map,
                      'Duplicated layer name: %s' % name)

        self.inputs = copy.deepcopy(inputs)
        self.operators = []

        if self.inputs is None:
            self.inputs = []
        elif type_of(self.inputs) != list:
            self.inputs = [self.inputs]

        self.config = g_config.model_config.layers.add()
1492
        assert isinstance(self.config, LayerConfig)
Z
zhangjinchao01 已提交
1493 1494 1495
        self.config.name = name
        self.config.type = type
        self.config.active_type = active_type
1496 1497
        if coeff is not None:
            self.config.coeff = float(coeff)
Z
zhangjinchao01 已提交
1498 1499 1500 1501 1502 1503 1504
        if size != 0:
            self.config.size = size
        if drop_rate != 0:
            self.config.drop_rate = drop_rate

        if device is not None:
            self.config.device = device
1505
        elif g_default_device is not None:
Z
zhangjinchao01 已提交
1506 1507
            self.config.device = g_default_device

C
caoying03 已提交
1508 1509 1510
        if error_clipping_threshold is not None:
            self.config.error_clipping_threshold = error_clipping_threshold

Z
zhangjinchao01 已提交
1511 1512 1513 1514 1515 1516 1517
        for input_index in xrange(len(self.inputs)):
            input = self.inputs[input_index]
            input_config = None
            input_layer_name = ''
            if type_of(input) == str:
                input_layer_name = input
                input_config = Input(
Q
qijun 已提交
1518 1519
                    input_layer_name=input,
                    parameter_name=gen_parameter_name(name, input_index))
Z
zhangjinchao01 已提交
1520 1521 1522 1523 1524 1525 1526 1527
                input_layer_name = input_config.input_layer_name
            elif isinstance(input, Input):
                input_layer_name = input.input_layer_name
                input_config = input
                if input_config.parameter_name is None:
                    input_config.parameter_name = \
                        gen_parameter_name(name, input_index)
            elif isinstance(input, Operator):
Q
qijun 已提交
1528
                self.operators.append(input)
Z
zhangjinchao01 已提交
1529 1530 1531 1532
                input.operator_conf.input_indices.append(input_index)
                input_config = Input(input.input_layer_names[0])
                input_layer_name = input_config.input_layer_name
            else:
Q
qijun 已提交
1533
                raise ValueError('Wrong type for inputs: %s' % type_of(input))
Z
zhangjinchao01 已提交
1534
            config_assert(input_layer_name in g_layer_map,
Q
qijun 已提交
1535 1536
                          "Unknown input layer '%s' for layer %s" %
                          (input_layer_name, name))
Z
zhangjinchao01 已提交
1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553
            self.inputs[input_index] = input_config
            layer_input = self.config.inputs.add()
            layer_input.input_layer_name = input_config.input_layer_name
            if input_config.input_layer_argument is not None:
                layer_input.input_layer_argument = \
                    input_config.input_layer_argument

        g_layer_map[name] = self.config

        g_current_submodel.layer_names.append(self.config.name)

    def get_input_layer(self, input_index):
        return g_layer_map[self.config.inputs[input_index].input_layer_name]

    # will return the bias created if not *for_self*
    def create_bias_parameter(
            self,
Q
qijun 已提交
1554
            bias,  # True/False or BiasCfg
Z
zhangjinchao01 已提交
1555
            size,
Q
qijun 已提交
1556 1557 1558
            dims=None,
            for_self=True,  # whether create bias for layer self
    ):
Z
zhangjinchao01 已提交
1559 1560 1561 1562 1563 1564

        if size == 0:
            return
        if dims is None:
            dims = [1, size]

Q
qijun 已提交
1565 1566 1567
        config_assert(
            type_of(bias) == bool or type_of(bias) == Bias,
            'Incorrect type for bias: %s' % type_of(bias))
Z
zhangjinchao01 已提交
1568 1569 1570 1571 1572 1573 1574 1575 1576

        if type_of(bias) == bool:
            if bias:
                bias = Bias()

        if type_of(bias) == Bias:
            if bias.parameter_name is None:
                bias.parameter_name = gen_bias_parameter_name(self.config.name)
            if bias.parameter_name not in g_parameter_map:
1577 1578
                assert isinstance(self.config, LayerConfig)

Z
zhangjinchao01 已提交
1579 1580 1581
                Parameter(
                    bias.parameter_name,
                    size,
Q
qijun 已提交
1582 1583
                    self.config.device
                    if self.config.HasField('device') else None,
Z
zhangjinchao01 已提交
1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594
                    dims,
                    bias.learning_rate,
                    bias.momentum,
                    decay_rate=bias.decay_rate,
                    decay_rate_l1=bias.decay_rate_l1,
                    initial_mean=bias.initial_mean,
                    initial_std=bias.initial_std,
                    initial_strategy=bias.initial_strategy,
                    initial_smart=bias.initial_smart,
                    num_batches_regularization=bias.num_batches_regularization,
                    sparse_remote_update=bias.sparse_remote_update,
Q
qijun 已提交
1595 1596
                    gradient_clipping_threshold=bias.
                    gradient_clipping_threshold,
Z
zhangjinchao01 已提交
1597
                    is_static=bias.is_static,
X
xuwei06 已提交
1598 1599
                    is_shared=bias.is_shared,
                    initializer=bias.initializer)
Z
zhangjinchao01 已提交
1600 1601 1602 1603 1604
            if for_self:
                self.config.bias_parameter_name = bias.parameter_name
            else:
                return bias.parameter_name

Q
qijun 已提交
1605 1606 1607 1608 1609 1610
    def create_input_parameter(self,
                               input_index,
                               size,
                               dims=None,
                               sparse=None,
                               format=None):
Z
zhangjinchao01 已提交
1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624
        if dims is None:
            # TODO(yuyang18): print warning and callstack here!
            dims = list()

        if size == 0:
            return

        input_config = self.inputs[input_index]

        self.config.inputs[input_index].input_parameter_name = \
            input_config.parameter_name

        if input_config.parameter_name in g_parameter_map:
            para = g_parameter_map[input_config.parameter_name]
Q
qijun 已提交
1625 1626
            config_assert(size == para.size, (
                'Shared parameter "%s" does not ' + 'have same size: %s vs. %s')
Z
zhangjinchao01 已提交
1627 1628
                          % (input_config.parameter_name, para.size, size))

Q
qijun 已提交
1629 1630
            config_assert(dims == para.dims, (
                'Shared parameter "%s" does not ' + 'have same dims: %s vs. %s')
Z
zhangjinchao01 已提交
1631 1632 1633 1634 1635 1636
                          % (input_config.parameter_name, para.dims, dims))
            return

        Parameter(
            input_config.parameter_name,
            size,
1637
            self.config.device if self.config.HasField("device") else None,
Z
zhangjinchao01 已提交
1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649
            dims,
            input_config.learning_rate,
            input_config.momentum,
            decay_rate=input_config.decay_rate,
            decay_rate_l1=input_config.decay_rate_l1,
            initial_mean=input_config.initial_mean,
            initial_std=input_config.initial_std,
            initial_strategy=input_config.initial_strategy,
            initial_smart=input_config.initial_smart,
            num_batches_regularization=input_config.num_batches_regularization,
            sparse_remote_update=input_config.sparse_remote_update,
            sparse_update=input_config.sparse_update,
Q
qijun 已提交
1650 1651
            gradient_clipping_threshold=input_config.
            gradient_clipping_threshold,
Z
zhangjinchao01 已提交
1652 1653 1654 1655
            sparse=sparse,
            format=format,
            is_static=input_config.is_static,
            is_shared=input_config.is_shared,
X
xuwei06 已提交
1656 1657
            update_hooks=input_config.update_hooks,
            initializer=input_config.initializer)
Z
zhangjinchao01 已提交
1658 1659 1660 1661 1662 1663 1664 1665 1666

    def set_layer_size(self, size):
        if self.config.size == 0:
            self.config.size = size
        else:
            config_assert(self.config.size == size,
                          'Different inputs result in' +
                          'different layer size at layer %s' % self.config.name)

L
Luo Tao 已提交
1667 1668 1669 1670
    def set_layer_height_width(self, height, width):
        self.config.height = height
        self.config.width = width

1671 1672 1673
    def set_layer_depth(self, depth):
        self.config.depth = depth

L
Luo Tao 已提交
1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686
    def set_cnn_layer(self,
                      input_layer_name,
                      height,
                      width,
                      channels,
                      is_print=True):
        size = height * width * channels
        self.set_layer_size(size)
        self.set_layer_height_width(height, width)
        if is_print:
            print("output for %s: c = %d, h = %d, w = %d, size = %d" %
                  (input_layer_name, channels, height, width, size))

Q
qijun 已提交
1687

Z
zhangjinchao01 已提交
1688 1689
@config_layer('multi_class_cross_entropy_with_selfnorm')
class MultiClassCrossEntropySelfNormCostLayer(LayerBase):
Q
qijun 已提交
1690 1691 1692
    def __init__(self, name, inputs, softmax_selfnorm_alpha=0.1, **xargs):
        super(MultiClassCrossEntropySelfNormCostLayer, self).__init__(
            name, 'multi_class_cross_entropy_with_selfnorm', 0, inputs, **xargs)
Z
zhangjinchao01 已提交
1693 1694
        self.config.softmax_selfnorm_alpha = softmax_selfnorm_alpha

Q
qijun 已提交
1695

Z
zhangjinchao01 已提交
1696 1697
@config_layer('fc')
class FCLayer(LayerBase):
L
lianxiaochen 已提交
1698 1699 1700 1701 1702 1703 1704
    def __init__(self,
                 name,
                 size,
                 inputs,
                 bias=True,
                 error_clipping_threshold=None,
                 **xargs):
Z
zhangjinchao01 已提交
1705 1706 1707 1708 1709 1710 1711 1712 1713 1714
        super(FCLayer, self).__init__(name, 'fc', size, inputs=inputs, **xargs)
        for input_index in xrange(len(self.inputs)):
            input_layer = self.get_input_layer(input_index)
            psize = self.config.size * input_layer.size
            dims = [input_layer.size, self.config.size]
            format = self.inputs[input_index].format
            sparse = format == "csr" or format == "csc"

            if sparse:
                psize = self.inputs[input_index].nnz
1715 1716
            else:
                sparse = None
Z
zhangjinchao01 已提交
1717

Q
qijun 已提交
1718 1719
            self.create_input_parameter(input_index, psize, dims, sparse,
                                        format)
Z
zhangjinchao01 已提交
1720
        self.create_bias_parameter(bias, self.config.size)
L
lianxiaochen 已提交
1721 1722
        if error_clipping_threshold is not None:
            self.config.error_clipping_threshold = error_clipping_threshold
Z
zhangjinchao01 已提交
1723

Q
qijun 已提交
1724

Z
zhangjinchao01 已提交
1725 1726
@config_layer('selective_fc')
class SelectiveFCLayer(LayerBase):
Q
qijun 已提交
1727 1728 1729 1730 1731 1732 1733 1734 1735 1736
    def __init__(self,
                 name,
                 size,
                 inputs,
                 bias=True,
                 selective_fc_pass_generation=False,
                 has_selected_colums=True,
                 selective_fc_full_mul_ratio=0.02,
                 selective_fc_parallel_plain_mul_thread_num=None,
                 **xargs):
Z
zhangjinchao01 已提交
1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756
        super(SelectiveFCLayer, self).__init__(
            name, 'selective_fc', size, inputs=inputs, **xargs)
        # user MUST know if selctive fc is used in training,
        # parameter matrices saved by this layer are automatically transposed,
        # BUT bias is not.

        # if selective_fc is used only in testing mode, and parameters for
        # this layer are trained by fully connected layers,
        # then TranposedFullMatrixProjectin MUST be used in training
        # to avoid manual transpose in testing.

        self.config.selective_fc_pass_generation = selective_fc_pass_generation
        self.config.has_selected_colums = has_selected_colums
        self.config.selective_fc_full_mul_ratio = selective_fc_full_mul_ratio
        if selective_fc_parallel_plain_mul_thread_num is not None:
            self.config.selective_fc_parallel_plain_mul_thread_num = selective_fc_parallel_plain_mul_thread_num

        input_num = len(self.inputs)
        if has_selected_colums:
            config_assert(input_num >= 2,
Q
qijun 已提交
1757 1758
                          ("if indices of selected columns are not specified, "
                           "selective_fc Layer has at least two inputs"))
Z
zhangjinchao01 已提交
1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770
            input_num -= 1

        for input_index in xrange(input_num):
            input_layer = self.get_input_layer(input_index)
            psize = self.config.size * input_layer.size
            dims = [input_layer.size, self.config.size]
            dims = dims[::-1]  # transpose the parameter
            format = self.inputs[input_index].format
            sparse = format == "csr" or format == "csc"
            if sparse:
                psize = self.inputs[input_index].nnz

Q
qijun 已提交
1771 1772
            self.create_input_parameter(input_index, psize, dims, sparse,
                                        format)
Z
zhangjinchao01 已提交
1773 1774
        self.create_bias_parameter(bias, self.config.size)

Q
qijun 已提交
1775

1776 1777
@config_layer('print')
class PrintLayer(LayerBase):
1778
    def __init__(self, name, inputs, format=None):
1779
        super(PrintLayer, self).__init__(name, 'print', 0, inputs)
1780 1781 1782 1783 1784 1785
        if format is None:
            format = "\n".join([
                "layer=" + input.input_layer_name + " %s"
                for input in self.inputs
            ])
        self.config.user_arg = format
1786

Q
qijun 已提交
1787

Y
yuan 已提交
1788 1789
@config_layer('priorbox')
class PriorBoxLayer(LayerBase):
G
gaoyuan 已提交
1790 1791
    def __init__(self, name, inputs, size, min_size, max_size, aspect_ratio,
                 variance):
Y
yuan 已提交
1792
        super(PriorBoxLayer, self).__init__(name, 'priorbox', 0, inputs)
G
gaoyuan 已提交
1793
        config_assert(len(inputs) == 2, 'PriorBoxLayer must have 2 inputs')
G
gaoyuan 已提交
1794 1795 1796 1797 1798 1799 1800
        input_layer = self.get_input_layer(1)
        config_assert(
            input_layer.type == 'data',
            'Expecting the second input layer of an priorbox layer to be '
            'a data layer')
        config_assert(input_layer.width > 0, 'The data layer must set width')
        config_assert(input_layer.height > 0, 'The data layer must set height')
G
gaoyuan 已提交
1801
        config_assert(len(variance) == 4, 'The variance must have 4 inputs')
Y
yuan 已提交
1802 1803 1804 1805 1806 1807
        self.config.inputs[0].priorbox_conf.min_size.extend(min_size)
        self.config.inputs[0].priorbox_conf.max_size.extend(max_size)
        self.config.inputs[0].priorbox_conf.aspect_ratio.extend(aspect_ratio)
        self.config.inputs[0].priorbox_conf.variance.extend(variance)
        self.config.size = size

Q
qijun 已提交
1808

1809 1810 1811
@config_layer('multibox_loss')
class MultiBoxLossLayer(LayerBase):
    def __init__(self, name, inputs, input_num, num_classes, overlap_threshold,
1812
                 neg_pos_ratio, neg_overlap, background_id, **xargs):
1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833
        super(MultiBoxLossLayer, self).__init__(name, 'multibox_loss', 0,
                                                inputs)
        config_assert(
            len(inputs) == (input_num * 2 + 2),
            'MultiBoxLossLayer does not have enough inputs')
        config_assert(num_classes > background_id,
                      'Classes number must greater than background ID')
        self.config.inputs[0].multibox_loss_conf.num_classes = num_classes
        self.config.inputs[
            0].multibox_loss_conf.overlap_threshold = overlap_threshold
        self.config.inputs[0].multibox_loss_conf.neg_pos_ratio = neg_pos_ratio
        self.config.inputs[0].multibox_loss_conf.neg_overlap = neg_overlap
        self.config.inputs[0].multibox_loss_conf.background_id = background_id
        self.config.inputs[0].multibox_loss_conf.input_num = input_num
        self.config.size = 1


@config_layer('detection_output')
class DetectionOutputLayer(LayerBase):
    def __init__(self, name, inputs, size, input_num, num_classes,
                 nms_threshold, nms_top_k, keep_top_k, confidence_threshold,
1834
                 background_id, **xargs):
1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854
        super(DetectionOutputLayer, self).__init__(name, 'detection_output', 0,
                                                   inputs)
        config_assert(
            len(inputs) == (input_num * 2 + 1),
            'DetectionOutputLayer does not have enough inputs')
        config_assert(num_classes > background_id,
                      'Classes number must greater than background ID')
        self.config.inputs[0].detection_output_conf.num_classes = num_classes
        self.config.inputs[
            0].detection_output_conf.nms_threshold = nms_threshold
        self.config.inputs[0].detection_output_conf.nms_top_k = nms_top_k
        self.config.inputs[0].detection_output_conf.keep_top_k = keep_top_k
        self.config.inputs[
            0].detection_output_conf.confidence_threshold = confidence_threshold
        self.config.inputs[
            0].detection_output_conf.background_id = background_id
        self.config.inputs[0].detection_output_conf.input_num = input_num
        self.config.size = size


Z
zhangjinchao01 已提交
1855 1856
@config_layer('data')
class DataLayer(LayerBase):
1857 1858 1859 1860 1861 1862 1863
    def __init__(self,
                 name,
                 size,
                 height=None,
                 width=None,
                 depth=None,
                 device=None):
Q
qijun 已提交
1864 1865
        super(DataLayer, self).__init__(
            name, 'data', size, inputs=[], device=device)
L
Luo Tao 已提交
1866 1867
        if height and width:
            self.set_layer_height_width(height, width)
1868 1869
        if depth:
            self.set_layer_depth(depth)
Q
qijun 已提交
1870

Z
zhangjinchao01 已提交
1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897

'''
DataNormLayer: A layer for data normalization
Input: One and only one input layer is accepted. The input layer must
       be DataLayer with dense data type
Output: The normalization of the input data

Reference:
    LA Shalabi, Z Shaaban, B Kasasbeh. Data mining: A preprocessing engine

Example:
    Layer(
        name = "norm_input_layer",
        type = "data_norm",
        inputs = [Input("input_layer",
                        parameter_name = "_slot0.stats")],
        data_norm_strategy = "z-score",
    )

Note:
  (1) The parameter has been calculated in the preprocessing stage,
      and should be initialized by --init_model_path when training.
  (2) Three data normalization methoeds are considered
          z-score: y = (x-mean)/std
          min-max: y = (x-min)/(max-min)
          decimal-scaling: y = x/10^j, where j is the smallest integer such that max(|y|)<1
'''
Q
qijun 已提交
1898 1899


Z
zhangjinchao01 已提交
1900 1901
@config_layer('data_norm')
class DataNormLayer(LayerBase):
Q
qijun 已提交
1902
    def __init__(self, name, inputs, data_norm_strategy="z-score", device=None):
Z
zhangjinchao01 已提交
1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913
        super(DataNormLayer, self).__init__(
            name, 'data_norm', 0, inputs=inputs, device=device)
        self.config.data_norm_strategy = data_norm_strategy
        config_assert(len(inputs) == 1, 'DataNormLayer must have 1 input')
        input_layer = self.get_input_layer(0)
        self.set_layer_size(input_layer.size)
        para_size = 5 * input_layer.size
        para_dims = [5, input_layer.size]
        self.inputs[0].is_static = True
        self.create_input_parameter(0, para_size, para_dims)

Q
qijun 已提交
1914

Z
zhangjinchao01 已提交
1915 1916 1917
@config_layer('prelu')
class ParameterReluLayer(LayerBase):
    layer_type = 'prelu'
Q
qijun 已提交
1918 1919

    def __init__(self, name, inputs, partial_sum=1, **args):
Z
zhangjinchao01 已提交
1920 1921 1922
        super(ParameterReluLayer, self).__init__(
            name, self.layer_type, 0, inputs=inputs, **args)
        input_layer = self.get_input_layer(0)
1923 1924 1925
        config_assert(len(self.inputs) == 1, "prelu layer has only one input.")
        config_assert(input_layer.size % partial_sum == 0,
                      "a wrong setting for partial_sum")
Z
zhangjinchao01 已提交
1926 1927 1928
        self.set_layer_size(input_layer.size)
        self.create_input_parameter(0, input_layer.size / partial_sum)

Q
qijun 已提交
1929

Z
zhangjinchao01 已提交
1930 1931 1932
@config_layer('conv')
class ConvLayerBase(LayerBase):
    layer_type = 'conv'
Q
qijun 已提交
1933 1934 1935 1936 1937 1938 1939 1940

    def __init__(self,
                 name,
                 inputs=[],
                 bias=True,
                 num_filters=None,
                 shared_biases=False,
                 **xargs):
Z
zhangjinchao01 已提交
1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956
        super(ConvLayerBase, self).__init__(
            name, self.layer_type, 0, inputs=inputs, **xargs)

        if num_filters is not None:
            self.config.num_filters = num_filters

        use_gpu = int(g_command_config_args.get("use_gpu", 0))
        parallel_nn = int(g_command_config_args.get("parallel_nn", 0))

        # Automatically select cudnn_type for GPU and exconv for CPU
        # if set type=conv, but still reserve the way user specify
        # exconv or cudnn_conv manually.
        if self.layer_type == "cudnn_conv":
            config_assert(use_gpu, "cudnn_conv only support GPU")

        if (use_gpu == 1 and self.layer_type != "exconv" and
Q
qijun 已提交
1957
            (parallel_nn == 0 or self.config.device > -1)):
Z
zhangjinchao01 已提交
1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969
            self.layer_type = "cudnn_conv"
        else:
            self.layer_type = "exconv"
        # need to specify layer in config
        self.config.type = self.layer_type

        if shared_biases is not None:
            self.config.shared_biases = shared_biases

        for input_index in xrange(len(self.inputs)):
            input_layer = self.get_input_layer(input_index)
            conv_conf = self.config.inputs[input_index].conv_conf
L
Luo Tao 已提交
1970 1971
            parse_conv(self.inputs[input_index].conv, input_layer.name,
                       conv_conf, num_filters)
Z
zhangjinchao01 已提交
1972 1973
            psize = self.calc_parameter_size(conv_conf)
            self.create_input_parameter(input_index, psize)
L
Luo Tao 已提交
1974 1975
            self.set_cnn_layer(name, conv_conf.output_y, conv_conf.output_x,
                               self.config.num_filters)
Z
zhangjinchao01 已提交
1976 1977 1978 1979 1980 1981 1982 1983

        psize = self.config.size
        if shared_biases:
            psize = self.config.num_filters
        self.create_bias_parameter(bias, psize, [psize, 1])

    def calc_parameter_size(self, conv_conf):
        return self.config.num_filters * conv_conf.filter_channels \
1984
               * (conv_conf.filter_size * conv_conf.filter_size_y)
Z
zhangjinchao01 已提交
1985

Q
qijun 已提交
1986

Z
zhangjinchao01 已提交
1987 1988 1989 1990
@config_layer('exconv')
class ConvLayer(ConvLayerBase):
    layer_type = 'exconv'

Q
qijun 已提交
1991

Z
zhangjinchao01 已提交
1992 1993 1994 1995
@config_layer('cudnn_conv')
class ConvLayer(ConvLayerBase):
    layer_type = 'cudnn_conv'

1996

C
chengduoZH 已提交
1997 1998 1999 2000
@config_layer('convt')
class ConvTransLayerBase(LayerBase):
    layer_type = 'convt'

2001 2002 2003 2004 2005 2006 2007
    def __init__(self,
                 name,
                 inputs=[],
                 bias=True,
                 num_filters=None,
                 shared_biases=False,
                 **xargs):
C
chengduoZH 已提交
2008
        super(ConvTransLayerBase, self).__init__(
2009 2010 2011 2012 2013 2014 2015 2016
            name, self.layer_type, 0, inputs=inputs, **xargs)

        if num_filters is not None:
            self.config.num_filters = num_filters

        use_gpu = int(g_command_config_args.get("use_gpu", 0))
        parallel_nn = int(g_command_config_args.get("parallel_nn", 0))

C
chengduoZH 已提交
2017 2018 2019 2020 2021
        # Automatically select cudnn_type for GPU and exconvt for CPU
        # if set type=exconvt, but still reserve the way user specify
        # exconvt or cudnn_convt manually.
        if self.layer_type == "cudnn_convt":
            config_assert(use_gpu, "cudnn_convt only support GPU")
2022

C
chengduoZH 已提交
2023 2024 2025 2026 2027
        if (use_gpu == 1 and self.layer_type != "exconvt" and
            (parallel_nn == 0 or self.config.device > -1)):
            self.layer_type = "cudnn_convt"
        else:
            self.layer_type = "exconvt"
2028 2029 2030 2031 2032 2033 2034 2035
        # need to specify layer in config
        self.config.type = self.layer_type

        if shared_biases is not None:
            self.config.shared_biases = shared_biases

        for input_index in xrange(len(self.inputs)):
            input_layer = self.get_input_layer(input_index)
C
chengduoZH 已提交
2036 2037 2038 2039 2040 2041
            parse_conv(
                self.inputs[input_index].conv,
                input_layer.name,
                self.config.inputs[input_index].conv_conf,
                num_filters,
                trans=True)
2042 2043 2044
            conv_conf = self.config.inputs[input_index].conv_conf
            psize = self.calc_parameter_size(conv_conf)
            self.create_input_parameter(input_index, psize)
C
chengduoZH 已提交
2045 2046
            self.set_cnn_layer(name, conv_conf.img_size_y, conv_conf.img_size,
                               self.config.num_filters)
2047 2048 2049 2050 2051 2052 2053

        psize = self.config.size
        if shared_biases:
            psize = self.config.num_filters
        self.create_bias_parameter(bias, psize, [psize, 1])

    def calc_parameter_size(self, conv_conf):
C
chengduoZH 已提交
2054 2055
        return conv_conf.channels * conv_conf.filter_channels \
                    * (conv_conf.filter_size * conv_conf.filter_size_y)
2056 2057


C
chengduoZH 已提交
2058 2059 2060
@config_layer('exconvt')
class ConvTransLayer(ConvTransLayerBase):
    layer_type = 'exconvt'
2061 2062


C
chengduoZH 已提交
2063 2064 2065
@config_layer('cudnn_convt')
class ConvTransLayer(ConvTransLayerBase):
    layer_type = 'cudnn_convt'
2066 2067


C
chengduoZH 已提交
2068 2069
@config_layer('conv_3d')
class Conv3DLayerBase(LayerBase):
2070 2071 2072 2073 2074
    def __init__(self,
                 name,
                 inputs=[],
                 bias=True,
                 num_filters=None,
C
chengduoZH 已提交
2075
                 shared_biases=True,
2076
                 **xargs):
C
chengduoZH 已提交
2077
        super(Conv3DLayerBase, self).__init__(
2078 2079 2080 2081 2082 2083 2084 2085
            name, self.layer_type, 0, inputs=inputs, **xargs)

        if num_filters is not None:
            self.config.num_filters = num_filters

        # need to specify layer in config
        self.config.type = self.layer_type

C
chengduoZH 已提交
2086 2087 2088 2089
        trans = False
        if self.config.type == "deconv3d":
            trans = True

2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100
        if shared_biases is not None:
            self.config.shared_biases = shared_biases

        for input_index in xrange(len(self.inputs)):
            input_layer = self.get_input_layer(input_index)
            conv_conf = self.config.inputs[input_index].conv_conf
            parse_conv3d(
                self.inputs[input_index].conv,
                input_layer.name,
                conv_conf,
                num_filters,
C
chengduoZH 已提交
2101
                trans=trans
2102 2103 2104
            )  # for z-axis pad:0, strid:1, filter_size:1, img_size:1
            psize = self.calc_parameter_size(conv_conf)
            self.create_input_parameter(input_index, psize)
C
chengduoZH 已提交
2105 2106 2107 2108 2109 2110 2111
            if trans:
                self.set_cnn_layer(name, conv_conf.img_size_z,
                                   conv_conf.img_size_y, conv_conf.img_size,
                                   self.config.num_filters)
            else:
                self.set_cnn_layer(name, conv_conf.output_z, conv_conf.output_y,
                                   conv_conf.output_x, self.config.num_filters)
2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131

        psize = self.config.size
        if shared_biases:
            psize = self.config.num_filters
        self.create_bias_parameter(bias, psize, [psize, 1])

    def calc_parameter_size(self, conv_conf):
        return self.config.num_filters * conv_conf.filter_channels \
               * (conv_conf.filter_size * conv_conf.filter_size_y \
                  * conv_conf.filter_size_z)

    def set_cnn_layer(self,
                      input_layer_name,
                      depth,
                      height,
                      width,
                      channels,
                      is_print=True):
        size = depth * height * width * channels
        self.set_layer_size(size)
C
chengduoZH 已提交
2132 2133
        self.set_layer_height_width(height, width)
        self.set_layer_depth(depth)
2134 2135 2136 2137 2138
        if is_print:
            print("output for %s: c = %d, d = %d, h = %d, w = %d, size = %d" %
                  (input_layer_name, channels, depth, height, width, size))


C
chengduoZH 已提交
2139 2140 2141
@config_layer('conv3d')
class Conv3DLayer(Conv3DLayerBase):
    layer_type = 'conv3d'
2142

Q
qijun 已提交
2143

C
chengduoZH 已提交
2144 2145 2146
@config_layer('deconv3d')
class Conv3DLayer(Conv3DLayerBase):
    layer_type = 'deconv3d'
2147 2148


Z
zhangjinchao01 已提交
2149 2150
@config_layer('norm')
class NormLayer(LayerBase):
2151 2152
    def __init__(self, name, inputs, **xargs):
        super(NormLayer, self).__init__(name, 'norm', 0, inputs=inputs, **xargs)
Z
zhangjinchao01 已提交
2153 2154 2155
        for input_index in xrange(len(self.inputs)):
            input_layer = self.get_input_layer(input_index)
            norm_conf = self.config.inputs[input_index].norm_conf
L
Luo Tao 已提交
2156 2157 2158 2159
            parse_norm(self.inputs[input_index].norm, input_layer.name,
                       norm_conf)
            self.set_cnn_layer(name, norm_conf.output_y, norm_conf.output_x,
                               norm_conf.channels, False)
2160 2161 2162
            if norm_conf.norm_type == "cross-channel-norm":
                self.create_input_parameter(0, norm_conf.channels,
                                            [norm_conf.channels, 1])
Q
qijun 已提交
2163

Z
zhangjinchao01 已提交
2164 2165 2166

@config_layer('pool')
class PoolLayer(LayerBase):
2167 2168
    def __init__(self, name, inputs, ceil_mode=True, **xargs):
        super(PoolLayer, self).__init__(name, 'pool', 0, inputs=inputs, **xargs)
Z
zhangjinchao01 已提交
2169 2170 2171
        for input_index in xrange(len(self.inputs)):
            input_layer = self.get_input_layer(input_index)
            pool_conf = self.config.inputs[input_index].pool_conf
L
Luo Tao 已提交
2172
            parse_pool(self.inputs[input_index].pool, input_layer.name,
2173
                       pool_conf, ceil_mode)
L
Luo Tao 已提交
2174 2175
            self.set_cnn_layer(name, pool_conf.output_y, pool_conf.output_x,
                               pool_conf.channels)
Q
qijun 已提交
2176

Z
zhangjinchao01 已提交
2177

Q
qijun 已提交
2178 2179
@config_layer('spp')
class SpatialPyramidPoolLayer(LayerBase):
2180
    def __init__(self, name, inputs, **xargs):
Q
qijun 已提交
2181
        super(SpatialPyramidPoolLayer, self).__init__(
2182
            name, 'spp', 0, inputs=inputs, **xargs)
Q
qijun 已提交
2183 2184 2185
        for input_index in xrange(len(self.inputs)):
            input_layer = self.get_input_layer(input_index)
            spp_conf = self.config.inputs[input_index].spp_conf
L
Luo Tao 已提交
2186 2187 2188
            parse_spp(self.inputs[input_index].spp, input_layer.name, spp_conf)
            output_x = (pow(4, spp_conf.pyramid_height) - 1) / (4 - 1)
            self.set_cnn_layer(name, 1, output_x, spp_conf.image_conf.channels)
Q
qijun 已提交
2189

Q
qijun 已提交
2190

D
dangqingqing 已提交
2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209
@config_layer('pad')
class PadLayer(LayerBase):
    def __init__(self, name, inputs, **xargs):
        super(PadLayer, self).__init__(name, 'pad', 0, inputs=inputs, **xargs)
        pad = self.inputs[0].pad
        self.config.inputs[0].pad_conf.pad_c.extend(pad.pad_c)
        self.config.inputs[0].pad_conf.pad_h.extend(pad.pad_h)
        self.config.inputs[0].pad_conf.pad_w.extend(pad.pad_w)

        input_layer = self.get_input_layer(0)
        image_conf = self.config.inputs[0].pad_conf.image_conf
        parse_image(pad, input_layer.name, image_conf)
        out_ch = pad.channels + pad.pad_c[0] + pad.pad_c[1]
        out_h = image_conf.img_size_y + pad.pad_h[0] + pad.pad_h[1]
        out_w = image_conf.img_size + pad.pad_w[0] + pad.pad_w[1]
        self.set_cnn_layer(name, out_h, out_w, out_ch)
        self.config.size = out_ch * out_h * out_w


2210 2211
@config_layer('crop')
class CropLayer(LayerBase):
2212
    def __init__(self, name, inputs, axis, offset, shape, **xargs):
2213
        super(CropLayer, self).__init__(name, 'crop', 0, inputs=inputs, **xargs)
2214 2215 2216
        self.config.axis = axis
        self.config.offset.extend(offset)
        self.config.shape.extend(shape)
2217 2218 2219 2220 2221 2222 2223 2224 2225 2226

        # get channel, width and height from input_0 layer
        input_layer = self.get_input_layer(0)
        image_conf = self.config.inputs[0].image_conf
        image_conf.img_size = input_layer.width
        image_conf.img_size_y = input_layer.height
        image_conf.channels = input_layer.size / (input_layer.width *
                                                  input_layer.height)


Z
zhangjinchao01 已提交
2227 2228 2229
@config_layer('batch_norm')
class BatchNormLayer(LayerBase):
    layer_type = 'batch_norm'
Q
qijun 已提交
2230 2231 2232 2233 2234 2235 2236 2237 2238

    def __init__(self,
                 name,
                 inputs,
                 bias=True,
                 use_global_stats=True,
                 moving_average_fraction=0.9,
                 batch_norm_type=None,
                 **xargs):
Z
zhangjinchao01 已提交
2239 2240 2241 2242
        if inputs is None:
            inputs = []
        elif not isinstance(inputs, list):
            inputs = [inputs]
Q
qijun 已提交
2243 2244
        config_assert(
            len(inputs) == 1, "BatchNormLayer must have one and only one input")
Z
zhangjinchao01 已提交
2245 2246 2247 2248 2249 2250 2251 2252
        # Create Input for moving mean and std,
        # in batch normalization layer.
        # These paras no need to update, so set is_static is true.
        # If not use is_static, even set learning_rate = 0, decay_rate = 0,
        # these paras will change if set average_window in configure.
        use_gpu = bool(int(g_command_config_args.get("use_gpu", 0)))
        is_shared = True if not use_gpu else False
        for i in xrange(2):
Q
qijun 已提交
2253 2254 2255 2256 2257 2258
            inputs.append(
                Input(
                    inputs[0].input_layer_name,
                    initial_std=0.0,
                    initial_mean=0.0,
                    is_static=True,
2259
                    is_shared=is_shared,
D
dangqingqing 已提交
2260
                    make_layer_name_in_submodel=False, ))
Z
zhangjinchao01 已提交
2261 2262 2263 2264 2265 2266

        parallel_nn = bool(int(g_command_config_args.get("parallel_nn", 0)))
        cudnn_version = int(g_command_config_args.get("cudnn_version", 0))
        # Automatically select cudnn_batch_norm for GPU and batch_norm for CPU.
        # Also based on cudnn version.
        use_cudnn = use_gpu and batch_norm_type != "batch_norm" and \
2267
                ((not parallel_nn) or self.config.device > -1)
Z
zhangjinchao01 已提交
2268
        self.layer_type = "cudnn_batch_norm" if use_cudnn else "batch_norm"
Q
qijun 已提交
2269
        super(BatchNormLayer, self).__init__(
X
xuwei06 已提交
2270
            name, self.layer_type, 0, inputs=inputs, **xargs)
Z
zhangjinchao01 已提交
2271 2272 2273 2274 2275 2276

        if use_global_stats is not None:
            self.config.use_global_stats = use_global_stats
        if moving_average_fraction is not None:
            self.config.moving_average_fraction = moving_average_fraction

Q
qijun 已提交
2277
        input_layer = self.get_input_layer(0)
Z
zhangjinchao01 已提交
2278
        image_conf = self.config.inputs[0].image_conf
L
Luo Tao 已提交
2279
        parse_image(self.inputs[0].image, input_layer.name, image_conf)
2280

2281 2282
        # Only pass the width and height of input to batch_norm layer
        # when either of it is non-zero.
2283 2284
        if input_layer.width != 0 or input_layer.height != 0:
            self.set_cnn_layer(name, image_conf.img_size_y, image_conf.img_size,
D
dangqingqing 已提交
2285
                               image_conf.channels, False)
2286 2287
        else:
            self.set_layer_size(input_layer.size)
Z
zhangjinchao01 已提交
2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299

        psize = self.calc_parameter_size(image_conf)
        dims = [1, psize]
        self.create_input_parameter(0, psize)
        self.create_input_parameter(1, psize, dims)
        self.create_input_parameter(2, psize, dims)

        self.create_bias_parameter(bias, psize)

    def calc_parameter_size(self, image_conf):
        return image_conf.channels

Q
qijun 已提交
2300

Z
zhangjinchao01 已提交
2301 2302
@config_layer('trans')
class TransLayer(LayerBase):
2303
    def __init__(self, name, inputs, **xargs):
Q
qijun 已提交
2304
        super(TransLayer, self).__init__(
2305
            name, 'trans', 0, inputs=inputs, **xargs)
Q
qijun 已提交
2306 2307 2308
        config_assert(
            len(self.inputs) == 1,
            'TransLayer must have one and only one input')
Z
zhangjinchao01 已提交
2309 2310
        self.set_layer_size(self.get_input_layer(0).size)

Q
qijun 已提交
2311

Z
zhangjinchao01 已提交
2312 2313
@config_layer('resize')
class ResizeLayer(LayerBase):
2314
    def __init__(self, name, size, inputs, **xargs):
Q
qijun 已提交
2315
        super(ResizeLayer, self).__init__(
2316
            name, 'resize', size=size, inputs=inputs, **xargs)
Q
qijun 已提交
2317 2318 2319 2320
        config_assert(
            len(self.inputs) == 1,
            'ResizeLayer must have one and only one input')

Z
zhangjinchao01 已提交
2321

2322 2323
@config_layer('rotate')
class RotateLayer(LayerBase):
H
Haonan 已提交
2324
    def __init__(self, name, inputs, height, width, device=None):
2325 2326 2327 2328 2329
        super(RotateLayer, self).__init__(
            name, 'rotate', 0, inputs=inputs, device=device)
        config_assert(
            len(self.inputs) == 1,
            'RotateLayer must have one and only one input')
H
Haonan 已提交
2330
        self.set_layer_height_width(height, width)
2331 2332 2333
        self.set_layer_size(self.get_input_layer(0).size)


Z
zhangjinchao01 已提交
2334 2335
@config_layer('blockexpand')
class BlockExpandLayer(LayerBase):
2336
    def __init__(self, name, inputs, **xargs):
Q
qijun 已提交
2337
        super(BlockExpandLayer, self).__init__(
2338
            name, 'blockexpand', 0, inputs=inputs, **xargs)
Z
zhangjinchao01 已提交
2339 2340
        for input_index in xrange(len(self.inputs)):
            input_layer = self.get_input_layer(input_index)
Q
qijun 已提交
2341 2342
            parse_block_expand(
                self.inputs[input_index].block_expand, input_layer.name,
Z
zhangjinchao01 已提交
2343
                self.config.inputs[input_index].block_expand_conf)
Q
qijun 已提交
2344 2345 2346 2347 2348 2349
            block_expand_conf = self.config.inputs[
                input_index].block_expand_conf
            self.set_layer_size(block_expand_conf.block_x *
                                block_expand_conf.block_y *
                                block_expand_conf.channels)

Z
zhangjinchao01 已提交
2350

2351 2352
@config_layer('maxout')
class MaxOutLayer(LayerBase):
Q
qijun 已提交
2353 2354 2355
    def __init__(self, name, inputs, **xargs):
        super(MaxOutLayer, self).__init__(
            name, 'maxout', 0, inputs=inputs, **xargs)
2356 2357
        input_layer = self.get_input_layer(0)
        maxout_conf = self.config.inputs[0].maxout_conf
L
Luo Tao 已提交
2358
        parse_maxout(self.inputs[0].maxout, input_layer.name, maxout_conf)
L
Luo Tao 已提交
2359 2360 2361
        out_channels = maxout_conf.image_conf.channels / maxout_conf.groups
        self.set_cnn_layer(name, g_layer_map[input_layer.name].height,
                           g_layer_map[input_layer.name].width, out_channels)
Q
qijun 已提交
2362

2363

D
dangqingqing 已提交
2364 2365 2366 2367
@config_layer('row_conv')
class RowConvLayer(LayerBase):
    def __init__(self, name, inputs, context_length, **xargs):
        super(RowConvLayer, self).__init__(
2368
            name, 'row_conv', 0, inputs=inputs, **xargs)
D
dangqingqing 已提交
2369 2370
        config_assert(
            len(self.inputs) == 1,
2371
            'row convolution layer must have one and only one input.')
D
dangqingqing 已提交
2372 2373 2374 2375 2376 2377 2378 2379 2380
        input_layer = self.get_input_layer(0)
        row_conv_conf = self.config.inputs[0].row_conv_conf
        row_conv_conf.context_length = context_length
        self.set_layer_size(input_layer.size)
        psize = context_length * input_layer.size
        dims = [context_length, input_layer.size]
        self.create_input_parameter(0, psize, dims)


G
guosheng 已提交
2381 2382
@config_layer('clip')
class ClipLayer(LayerBase):
2383 2384
    def __init__(self, name, inputs, min, max, **xargs):
        super(ClipLayer, self).__init__(name, 'clip', 0, inputs=inputs, **xargs)
G
guosheng 已提交
2385 2386
        config_assert(
            len(self.inputs) == 1,
2387 2388
            'ClipLayer must have one and only one input.')
        config_assert(min < max, 'min must be less than max.')
G
guosheng 已提交
2389 2390
        input_layer = self.get_input_layer(0)
        self.set_layer_size(input_layer.size)
2391 2392
        self.config.inputs[0].clip_conf.min = min
        self.config.inputs[0].clip_conf.max = max
G
guosheng 已提交
2393 2394


Z
zhangjinchao01 已提交
2395 2396 2397 2398
# key: cost type
# value: cost class
g_cost_map = {}

Q
qijun 已提交
2399

Z
zhangjinchao01 已提交
2400 2401 2402
# define a cost layer without any parameters
def define_cost(class_name, cost_type):
    def init(cls, name, inputs, device=None, coeff=1.):
Q
qijun 已提交
2403 2404
        super(type(cls), cls).__init__(
            name, cost_type, 1, inputs, device=device, coeff=coeff)
Z
zhangjinchao01 已提交
2405

Q
qijun 已提交
2406
    cls = type(class_name, (LayerBase, ), dict(__init__=init))
Z
zhangjinchao01 已提交
2407 2408 2409
    global g_cost_map
    g_cost_map[cost_type] = cls

Q
qijun 已提交
2410

Z
zhangjinchao01 已提交
2411 2412 2413 2414 2415 2416 2417 2418
define_cost('MultiClassCrossEntropy', 'multi-class-cross-entropy')
define_cost('RankingCost', 'rank-cost')
define_cost('AucValidation', 'auc-validation')
define_cost('PnpairValidation', 'pnpair-validation')
define_cost('SumOfSquaresCostLayer', 'square_error')
define_cost('MultiBinaryLabelCrossEntropy', 'multi_binary_label_cross_entropy')
define_cost('SoftBinaryClassCrossEntropy', 'soft_binary_class_cross_entropy')
define_cost('HuberTwoClass', 'huber')
X
xuwei06 已提交
2419
define_cost('SumCost', 'sum_cost')
D
dangqingqing 已提交
2420
define_cost('SmoothL1Cost', 'smooth_l1')
Z
zhangjinchao01 已提交
2421

Q
qijun 已提交
2422

Z
zhangjinchao01 已提交
2423 2424
@config_layer('hsigmoid')
class HierarchicalSigmoidLayer(LayerBase):
Q
qijun 已提交
2425
    def __init__(self, name, num_classes, inputs, device=None, bias=True):
Z
zhangjinchao01 已提交
2426 2427
        super(HierarchicalSigmoidLayer, self).__init__(
            name, 'hsigmoid', 1, inputs=inputs, device=device)
Q
qijun 已提交
2428 2429 2430
        config_assert(
            len(self.inputs) >= 2,
            'HierarchicalSigmoidLayer must have at least 2 inputs')
Z
zhangjinchao01 已提交
2431 2432 2433 2434 2435 2436 2437 2438
        self.config.num_classes = num_classes
        for input_index in xrange(len(self.inputs) - 1):
            input_layer = self.get_input_layer(input_index)
            psize = (num_classes - 1) * input_layer.size
            dims = [num_classes - 1, input_layer.size]
            self.create_input_parameter(input_index, psize, dims)
        self.create_bias_parameter(bias, num_classes - 1)

Q
qijun 已提交
2439

Z
zhangjinchao01 已提交
2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463
'''
lambdaCost for lambdaRank LTR approach

Usage:
  Example: Layer(name = "cost", type = "lambda_cost", NDCG_num = 8,
             max_sort_size = -1, inputs = ["output", "score"])

  Input data: Samples of the same query should be loaded as a sequence,
          by ProtoDataProvider or PyDataProvider etc.. User should provide
          scores for each sample. The score slot should be the 2nd
          input of lambdaRank layer.

  NDCG_num = the size of NDCG, e.g., 5 for NDCG@5.
    Note: NDCG_num must be less than or equal to the minimum
          size of lists.

  max_sort_size = the size of partial sorting in calculating gradient.
    Note: If max_sort_size = -1, then for each list, the algorithm will
          sort the entire list to get gradient.
          In other cases, max_sort_size must be greater than or equal
          to NDCG_num.
          max_sort_size can be greater than the size of a list, in which
          case the algorithm will sort the entire list to get gradient.
'''
Q
qijun 已提交
2464 2465


Z
zhangjinchao01 已提交
2466 2467
@config_layer('lambda_cost')
class LambdaCost(LayerBase):
Q
qijun 已提交
2468
    def __init__(self, name, inputs, NDCG_num=5, max_sort_size=-1, device=None):
Z
zhangjinchao01 已提交
2469 2470
        super(LambdaCost, self).__init__(
            name, 'lambda_cost', 1, inputs=inputs, device=device)
Q
qijun 已提交
2471
        config_assert(len(self.inputs) == 2, 'lambdaCost must have 2 inputs')
Z
zhangjinchao01 已提交
2472 2473
        self.config.NDCG_num = NDCG_num
        if max_sort_size != -1:
Q
qijun 已提交
2474 2475 2476
            config_assert(
                NDCG_num <= max_sort_size,
                'NDCG_num must be less than or equal to max_sort_size')
Z
zhangjinchao01 已提交
2477 2478
        self.config.max_sort_size = max_sort_size

Q
qijun 已提交
2479

Z
zhangjinchao01 已提交
2480 2481
@config_layer('nce')
class NCELayer(LayerBase):
Q
qijun 已提交
2482 2483 2484 2485 2486 2487 2488 2489
    def __init__(self,
                 name,
                 num_classes,
                 inputs,
                 num_neg_samples=10,
                 neg_sampling_dist=None,
                 bias=True,
                 **xargs):
Z
zhangjinchao01 已提交
2490
        super(NCELayer, self).__init__(name, 'nce', 1, inputs=inputs, **xargs)
Q
qijun 已提交
2491 2492
        config_assert(
            len(self.inputs) >= 2, 'NCELayer must have at least 2 inputs')
Z
zhangjinchao01 已提交
2493 2494
        self.config.num_classes = num_classes
        if neg_sampling_dist is not None:
Q
qijun 已提交
2495 2496 2497 2498
            config_assert(
                len(neg_sampling_dist) == num_classes,
                'len(neg_sampling_dist)(%s) is not same as num_classes (%s)' %
                (len(neg_sampling_dist), num_classes))
Z
zhangjinchao01 已提交
2499
            s = sum(neg_sampling_dist)
Q
qijun 已提交
2500 2501 2502
            config_assert(
                abs(s - 1) < 1e-5,
                'The sum of neg_sampling_dist (%s) is not 1' % s)
Z
zhangjinchao01 已提交
2503 2504 2505 2506 2507

            self.config.neg_sampling_dist.extend(neg_sampling_dist)

        self.config.num_neg_samples = num_neg_samples
        num_real_inputs = len(self.inputs) - 1
Q
qijun 已提交
2508
        input_layer = self.get_input_layer(num_real_inputs)
Z
zhangjinchao01 已提交
2509 2510 2511 2512
        config_assert(input_layer.type == 'data',
                      'Expecting the last input layer of an nce layer to be '
                      'a data layer')

Q
qijun 已提交
2513 2514
        if (num_real_inputs > 1 and input_layer.size == 1 and
                self.get_input_layer(num_real_inputs - 1).type == 'data'):
Z
zhangjinchao01 已提交
2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527
            # This input layer is assumed to be a sample weight layer
            num_real_inputs -= 1

        for input_index in xrange(num_real_inputs):
            input_layer = self.get_input_layer(input_index)
            psize = num_classes * input_layer.size
            dims = [num_classes, input_layer.size]
            self.create_input_parameter(input_index, psize, dims)
        self.create_bias_parameter(bias, num_classes)


@config_layer('addto')
class AddToLayer(LayerBase):
Q
qijun 已提交
2528
    def __init__(self, name, inputs, bias=True, **xargs):
Z
zhangjinchao01 已提交
2529 2530
        super(AddToLayer, self).__init__(
            name, 'addto', 0, inputs=inputs, **xargs)
Q
qijun 已提交
2531
        config_assert(len(inputs) > 0, 'inputs cannot be empty for AddToLayer')
Z
zhangjinchao01 已提交
2532 2533 2534 2535 2536
        for input_index in xrange(len(self.inputs)):
            input_layer = self.get_input_layer(input_index)
            self.set_layer_size(input_layer.size)
        self.create_bias_parameter(bias, self.config.size)

Q
qijun 已提交
2537

Z
zhangjinchao01 已提交
2538 2539
@config_layer('agent')
class AgentLayer(LayerBase):
Q
qijun 已提交
2540 2541 2542 2543
    def __init__(self, name, size, device=None):
        super(AgentLayer, self).__init__(
            name, 'agent', size, inputs=[], device=device)

Z
zhangjinchao01 已提交
2544 2545 2546

@config_layer('gather_agent')
class GatherAgentLayer(LayerBase):
Q
qijun 已提交
2547
    def __init__(self, name, size, device=None):
Z
zhangjinchao01 已提交
2548 2549 2550
        super(GatherAgentLayer, self).__init__(
            name, 'gather_agent', size, inputs=[], device=device)

Q
qijun 已提交
2551

Z
zhangjinchao01 已提交
2552 2553
@config_layer('scatter_agent')
class ScatterAgentLayer(LayerBase):
Q
qijun 已提交
2554
    def __init__(self, name, size, device=None):
Z
zhangjinchao01 已提交
2555 2556 2557
        super(ScatterAgentLayer, self).__init__(
            name, 'scatter_agent', size, inputs=[], device=device)

Q
qijun 已提交
2558

Z
zhangjinchao01 已提交
2559 2560
@config_layer('multiplex')
class MultiplexLayer(LayerBase):
Q
qijun 已提交
2561 2562 2563 2564 2565
    def __init__(self, name, inputs, size, device=None):
        super(MultiplexLayer, self).__init__(
            name, 'multiplex', size, inputs=inputs, device=device)
        config_assert(
            len(inputs) > 2, 'MultiplexLayer should have more than 2 inputs.')
Z
zhangjinchao01 已提交
2566
        for i in range(1, len(inputs)):
Q
qijun 已提交
2567 2568 2569 2570 2571
            config_assert(
                self.get_input_layer(i).size == size,
                "All the input layers except the first one should"
                "have the same size as the MultiplexLayer.")

Z
zhangjinchao01 已提交
2572 2573

@config_func
2574 2575 2576 2577
def Link(name, has_subseq=False):
    """
    Still keeping has_subseq for backward compatibility
    """
Z
zhangjinchao01 已提交
2578 2579 2580 2581
    link_config = LinkConfig()
    link_config.link_name = name
    return link_config

Q
qijun 已提交
2582

Z
zhangjinchao01 已提交
2583 2584
# memory for recurrent layer group.
# *name* and *size* are actual layer's name and size.
2585 2586 2587 2588
# If *name* is None, need to provide *memory_name* and need to use
# SetMemoryInput() later to specify the layer which this memory remembers.
#
# return the name of the memory,
Z
zhangjinchao01 已提交
2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599
# use this name if you assign the memory as other layer's input
#
# boot frame of memory is zeroed by default,
# or initialize by boot layer output if *boot_layer* set,
# or initialize by trainable bias if *boot_bias* set,
# or initialize by a constant id if *boot_with_const_id* set
#
# Memory can be a sequence if *is_sequence* set, this type of memory
# can only be initailized by a *boot_layer* which is a sequence.
#
@config_func
2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611
def Memory(name,
           size,
           is_sequence=False,
           boot_layer=None,
           boot_bias=False,
           boot_bias_active_type="",
           boot_with_const_id=None,
           memory_name=None):
    if not memory_name:
        config_assert(name is not None, "name needs cannot be None")
        memory_name = name + "+delay1"
    agent_name = memory_name
2612
    agent_layer = AgentLayer(agent_name, size)
Z
zhangjinchao01 已提交
2613
    config_assert(g_current_submodel.is_recurrent_layer_group,
Q
qijun 已提交
2614
                  'Memory should be used in recurrent layer group only')
Z
zhangjinchao01 已提交
2615
    memory = g_current_submodel.memories.add()
2616 2617
    if name is not None:
        memory.layer_name = MakeLayerNameInSubmodel(name)
Z
zhangjinchao01 已提交
2618
    memory.link_name = MakeLayerNameInSubmodel(agent_name)
Q
qijun 已提交
2619
    options = sum((boot_layer is not None, bool(boot_bias),
Z
zhangjinchao01 已提交
2620
                   boot_with_const_id is not None))
Q
qijun 已提交
2621 2622 2623 2624
    config_assert(
        options <= 1,
        'take one option at most from boot_layer, boot_bias, or boot_with_const_id'
    )
Z
zhangjinchao01 已提交
2625 2626 2627
    if boot_layer is not None:
        boot_layer = MakeLayerNameInParentSubmodel(boot_layer)
        config_assert(boot_layer in g_layer_map,
Q
qijun 已提交
2628 2629
                      'boot_layer "%s" does not correspond to a layer name' %
                      boot_layer)
Z
zhangjinchao01 已提交
2630 2631 2632
        memory.boot_layer_name = boot_layer
    elif boot_bias:
        memory.boot_bias_parameter_name = agent_layer.create_bias_parameter(
Q
qijun 已提交
2633
            boot_bias, size, for_self=False)
Z
zhangjinchao01 已提交
2634 2635 2636 2637 2638
        memory.boot_bias_active_type = boot_bias_active_type
    elif boot_with_const_id is not None:
        memory.boot_with_const_id = boot_with_const_id
    return agent_name

Q
qijun 已提交
2639

2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650
@config_func
def SetMemoryInput(memory_name, layer_name):
    memory_name = MakeLayerNameInSubmodel(memory_name)
    layer_name = MakeLayerNameInSubmodel(layer_name)
    for mem in g_current_submodel.memories:
        if mem.link_name == memory_name:
            mem.layer_name = layer_name
            return
    logger.fatal("Nonexistent memory name: " + memory_name)


Z
zhangjinchao01 已提交
2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661
# Generator for recurrent layer group, to use it:
#  1. define a id layer as output of layer group
#  2. define a memory of this id layer, and assign a boot id(begin of sequence)
#  3. define a eos check layer and fill its name in generator's *eos_layer_name*
# Sequence generation will stop when eos check return 1 or *max_num_frames* reached.
# If *beam_size* is greater than one, generator will use beam search.
#   in beam search, if *num_results_per_sample* set, one sample sequence can output
#   multiple results each with a probility.
@config_func
def Generator(
        max_num_frames,
Q
qijun 已提交
2662 2663 2664 2665
        eos_layer_name="eos_check",
        num_results_per_sample=1,
        beam_size=1,
        log_prob=None, ):
Z
zhangjinchao01 已提交
2666 2667 2668 2669 2670 2671 2672 2673 2674
    generator_config = GeneratorConfig()
    generator_config.max_num_frames = max_num_frames
    generator_config.eos_layer_name = eos_layer_name
    generator_config.num_results_per_sample = num_results_per_sample
    generator_config.beam_size = beam_size
    if log_prob is not None:
        generator_config.log_prob = log_prob
    return generator_config

Q
qijun 已提交
2675

Z
zhangjinchao01 已提交
2676 2677
@config_layer('expand')
class ExpandLayer(LayerBase):
2678
    def __init__(self, name, inputs, trans_type='non-seq', bias=False, **xargs):
Q
qijun 已提交
2679
        super(ExpandLayer, self).__init__(
2680
            name, 'expand', 0, inputs=inputs, **xargs)
Q
qijun 已提交
2681 2682 2683 2684 2685 2686 2687 2688
        config_assert(
            len(self.inputs) == 2, 'ExpandLayer takes 2 and only 2 inputs')
        self.config.trans_type = trans_type
        for input_index in xrange(len(self.inputs)):
            input_layer = self.get_input_layer(input_index)
        self.set_layer_size(self.get_input_layer(0).size)
        self.create_bias_parameter(bias, self.config.size)

Z
zhangjinchao01 已提交
2689 2690 2691

@config_layer('featmap_expand')
class FeatMapExpandLayer(LayerBase):
X
xuwei06 已提交
2692 2693 2694 2695 2696
    def __init__(self,
                 name,
                 inputs,
                 num_filters=None,
                 as_row_vector=True,
X
xuwei06 已提交
2697 2698
                 bias=False,
                 **xargs):
Q
qijun 已提交
2699
        super(FeatMapExpandLayer, self).__init__(
X
xuwei06 已提交
2700
            name, 'featmap_expand', 0, inputs=inputs, **xargs)
Q
qijun 已提交
2701 2702 2703
        config_assert(
            len(self.inputs) == 1, 'ExpandLayer takes 1 and only 1 inputs')
        if num_filters is not None:
Z
zhangjinchao01 已提交
2704
            self.config.num_filters = num_filters
Q
qijun 已提交
2705
        else:
Z
zhangjinchao01 已提交
2706
            logger.fatal("FeatMapExpandLayer must specify num_filters.")
X
xuwei06 已提交
2707 2708
        if not as_row_vector:
            self.config.user_arg = "as_col_vec"
Q
qijun 已提交
2709
        self.set_layer_size(self.get_input_layer(0).size * num_filters)
Z
zhangjinchao01 已提交
2710 2711 2712 2713


@config_layer('max')
class MaxLayer(LayerBase):
Q
qijun 已提交
2714 2715 2716 2717 2718
    def __init__(self,
                 name,
                 inputs,
                 trans_type='non-seq',
                 bias=False,
2719
                 output_max_index=None,
2720
                 stride=-1,
2721
                 **xargs):
2722
        super(MaxLayer, self).__init__(name, 'max', 0, inputs=inputs, **xargs)
Z
zhangjinchao01 已提交
2723
        config_assert(len(self.inputs) == 1, 'MaxLayer must have 1 input')
2724 2725
        if trans_type == 'seq':
            config_assert(stride == -1, 'subseq does not support stride window')
Q
qijun 已提交
2726
        self.config.trans_type = trans_type
2727
        self.config.seq_pool_stride = stride
Z
zhangjinchao01 已提交
2728 2729 2730 2731
        for input_index in xrange(len(self.inputs)):
            input_layer = self.get_input_layer(input_index)
            self.set_layer_size(input_layer.size)
        self.create_bias_parameter(bias, self.config.size)
2732 2733
        if output_max_index is not None:
            self.config.output_max_index = output_max_index
Z
zhangjinchao01 已提交
2734 2735 2736 2737


@config_layer('maxid')
class MaxIdLayer(LayerBase):
Q
qijun 已提交
2738
    def __init__(self, name, inputs, beam_size=None, device=None):
Z
zhangjinchao01 已提交
2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755
        super(MaxIdLayer, self).__init__(
            name, 'maxid', 0, inputs=inputs, device=device)
        config_assert(len(self.inputs) == 1, 'MaxIdLayer must have 1 input')
        for input_index in xrange(len(self.inputs)):
            input_layer = self.get_input_layer(input_index)
            self.set_layer_size(input_layer.size)

        if beam_size is None:
            global g_current_submodel
            if g_current_submodel.HasField("generator"):
                self.config.beam_size = g_current_submodel.generator.beam_size
        else:
            self.config.beam_size = beam_size


@config_layer('eos_id')
class EosIdLayer(LayerBase):
Q
qijun 已提交
2756
    def __init__(self, name, inputs, eos_id, device=None):
Z
zhangjinchao01 已提交
2757 2758 2759
        super(EosIdLayer, self).__init__(
            name, 'eos_id', 0, inputs=inputs, device=device)
        config_assert(len(self.inputs) == 1, 'EosIdLayer must have 1 input')
Q
qijun 已提交
2760
        self.set_layer_size(2)  # boolean output
Z
zhangjinchao01 已提交
2761 2762
        self.config.eos_id = eos_id

Q
qijun 已提交
2763

Z
zhangjinchao01 已提交
2764 2765
@config_layer('seqlastins')
class SequenceLastInstanceLayer(LayerBase):
Q
qijun 已提交
2766 2767 2768 2769
    def __init__(self,
                 name,
                 inputs,
                 trans_type='non-seq',
2770
                 bias=False,
2771
                 stride=-1,
2772
                 **xargs):
Q
qijun 已提交
2773
        super(SequenceLastInstanceLayer, self).__init__(
X
xuwei06 已提交
2774
            name, 'seqlastins', 0, inputs=inputs, **xargs)
Q
qijun 已提交
2775 2776
        config_assert(
            len(inputs) == 1, 'SequenceLastInstanceLayer must have 1 input')
2777
        if trans_type == 'seq':
L
Luo Tao 已提交
2778
            config_assert(stride == -1, 'subseq does not support stride window')
Q
qijun 已提交
2779
        self.config.trans_type = trans_type
2780 2781
        self.config.seq_pool_stride = stride
        self.set_layer_size(self.get_input_layer(0).size)
Z
zhangjinchao01 已提交
2782 2783
        self.create_bias_parameter(bias, self.config.size)

Q
qijun 已提交
2784

Z
zhangjinchao01 已提交
2785 2786
@config_layer('seqfirstins')
class SequenceFirstInstanceLayer(SequenceLastInstanceLayer):
2787 2788 2789 2790 2791
    def __init__(self,
                 name,
                 inputs,
                 trans_type='non-seq',
                 bias=False,
2792
                 stride=-1,
2793
                 **xargs):
Q
qijun 已提交
2794
        super(SequenceFirstInstanceLayer, self).__init__(
2795 2796 2797 2798 2799 2800
            name,
            inputs=inputs,
            trans_type=trans_type,
            bias=bias,
            stride=stride,
            **xargs)
Z
zhangjinchao01 已提交
2801 2802
        self.config.select_first = True

Q
qijun 已提交
2803

Z
zhangjinchao01 已提交
2804 2805
@config_layer('seqconcat')
class SequenceConcatLayer(LayerBase):
X
xuwei06 已提交
2806
    def __init__(self, name, inputs, bias=False, **xargs):
Q
qijun 已提交
2807
        super(SequenceConcatLayer, self).__init__(
X
xuwei06 已提交
2808
            name, 'seqconcat', 0, inputs=inputs, **xargs)
Q
qijun 已提交
2809 2810
        config_assert(
            len(inputs) == 2, 'SequenceConcatLayer must have 2 inputs')
Z
zhangjinchao01 已提交
2811 2812 2813 2814 2815
        for input_index in xrange(len(self.inputs)):
            input_layer = self.get_input_layer(input_index)
            self.set_layer_size(input_layer.size)
        self.create_bias_parameter(bias, self.config.size)

Q
qijun 已提交
2816

Z
zhangjinchao01 已提交
2817 2818
@config_layer('seqreshape')
class SequenceReshapeLayer(LayerBase):
X
xuwei06 已提交
2819
    def __init__(self, name, size, inputs, bias=False, **xargs):
Q
qijun 已提交
2820
        super(SequenceReshapeLayer, self).__init__(
X
xuwei06 已提交
2821
            name, 'seqreshape', size, inputs=inputs, **xargs)
Q
qijun 已提交
2822 2823
        config_assert(
            len(inputs) == 1, 'SequenceReshapeLayer must have 1 inputs')
Z
zhangjinchao01 已提交
2824 2825 2826
        self.set_layer_size(size)
        self.create_bias_parameter(bias, size)

Q
qijun 已提交
2827

Z
zhangjinchao01 已提交
2828 2829
@config_layer('subseq')
class SubSequenceLayer(LayerBase):
X
xuwei06 已提交
2830
    def __init__(self, name, inputs, bias=False, **xargs):
Q
qijun 已提交
2831
        super(SubSequenceLayer, self).__init__(
X
xuwei06 已提交
2832
            name, 'subseq', 0, inputs=inputs, **xargs)
Z
zhangjinchao01 已提交
2833 2834 2835 2836 2837 2838
        config_assert(len(inputs) == 3, 'SubSequenceLayer must have 3 inputs')
        input_layer0 = self.get_input_layer(0)
        size = input_layer0.size
        self.set_layer_size(size)
        self.create_bias_parameter(bias, size)

Q
qijun 已提交
2839

C
caoying03 已提交
2840 2841
@config_layer('sub_nested_seq')
class SubNestedSequenceLayer(LayerBase):
2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853
    def __init__(self, name, inputs, selected_indices, bias=False, **xargs):
        if isinstance(inputs, list):
            assert len(inputs) == 1, ('the first input of sub_nested_seq '
                                      'layer is a single nested sequence.')
            inputs = inputs[0]
        if isinstance(selected_indices, list):
            assert len(selected_indices) == 1, (
                'the second input of '
                'sub_nested_seq layer is a single layer which is a '
                'set of selected indices.')
            selected_indices = selected_indices[0]

C
caoying03 已提交
2854
        super(SubNestedSequenceLayer, self).__init__(
2855 2856 2857 2858 2859
            name,
            'sub_nested_seq',
            0,
            inputs=[inputs, selected_indices],
            **xargs)
C
caoying03 已提交
2860 2861 2862 2863 2864
        input_layer0 = self.get_input_layer(0)
        size = input_layer0.size
        self.set_layer_size(size)


Z
zhangjinchao01 已提交
2865 2866
@config_layer('out_prod')
class OuterProdLayer(LayerBase):
Q
qijun 已提交
2867 2868 2869
    def __init__(self, name, inputs, device=None):
        super(OuterProdLayer, self).__init__(
            name, 'out_prod', 0, inputs=inputs, device=device)
Z
zhangjinchao01 已提交
2870 2871 2872 2873 2874
        config_assert(len(inputs) == 2, 'OuterProdLayer must have 2 inputs')
        input_layer0 = self.get_input_layer(0)
        input_layer1 = self.get_input_layer(1)
        self.set_layer_size(input_layer0.size * input_layer1.size)

Q
qijun 已提交
2875

Z
zhangjinchao01 已提交
2876 2877
@config_layer('power')
class PowerLayer(LayerBase):
Q
qijun 已提交
2878 2879 2880
    def __init__(self, name, inputs, device=None):
        super(PowerLayer, self).__init__(
            name, 'power', 0, inputs=inputs, device=device)
Z
zhangjinchao01 已提交
2881 2882 2883 2884
        config_assert(len(inputs) == 2, 'PowerLayer must have 2 inputs')
        input_layer1 = self.get_input_layer(1)
        self.set_layer_size(input_layer1.size)
        input_layer0 = self.get_input_layer(0)
Q
qijun 已提交
2885 2886 2887
        config_assert(1 == input_layer0.size,
                      'The left input is the exponent and should be of size 1')

Z
zhangjinchao01 已提交
2888 2889 2890

@config_layer('slope_intercept')
class SlopeInterceptLayer(LayerBase):
Q
qijun 已提交
2891 2892 2893
    def __init__(self, name, inputs, slope=1.0, intercept=0.0, device=None):
        super(SlopeInterceptLayer, self).__init__(
            name, 'slope_intercept', 0, inputs=inputs, device=device)
Z
zhangjinchao01 已提交
2894 2895 2896 2897 2898 2899
        self.config.slope = slope
        self.config.intercept = intercept
        config_assert(len(inputs) == 1, 'SlopeInterceptLayer must have 1 input')
        input_layer0 = self.get_input_layer(0)
        self.set_layer_size(input_layer0.size)

Q
qijun 已提交
2900

Z
zhangjinchao01 已提交
2901 2902
@config_layer('scaling')
class ScalingLayer(LayerBase):
Q
qijun 已提交
2903 2904 2905
    def __init__(self, name, inputs, device=None):
        super(ScalingLayer, self).__init__(
            name, 'scaling', 0, inputs=inputs, device=device)
Z
zhangjinchao01 已提交
2906 2907 2908 2909
        config_assert(len(inputs) == 2, 'ScalingLayer must have 2 inputs')
        input_layer1 = self.get_input_layer(1)
        self.set_layer_size(input_layer1.size)
        input_layer0 = self.get_input_layer(0)
Q
qijun 已提交
2910 2911 2912
        config_assert(1 == input_layer0.size,
                      'The left input should be of size 1')

Z
zhangjinchao01 已提交
2913 2914 2915

@config_layer('conv_shift')
class ConvShiftLayer(LayerBase):
Q
qijun 已提交
2916 2917 2918
    def __init__(self, name, inputs, device=None):
        super(ConvShiftLayer, self).__init__(
            name, 'conv_shift', 0, inputs=inputs, device=device)
Z
zhangjinchao01 已提交
2919 2920 2921 2922
        config_assert(len(inputs) == 2, 'ConvShiftLayer must have 2 inputs')
        input_layer0 = self.get_input_layer(0)
        self.set_layer_size(input_layer0.size)

Q
qijun 已提交
2923

Z
zhangjinchao01 已提交
2924 2925
@config_layer('convex_comb')
class ConvexCombinationLayer(LayerBase):
Q
qijun 已提交
2926
    def __init__(self, name, size, inputs, device=None):
Z
zhangjinchao01 已提交
2927
        super(ConvexCombinationLayer, self).__init__(
Q
qijun 已提交
2928 2929 2930
            name, 'convex_comb', size, inputs=inputs, device=device)
        config_assert(
            len(self.inputs) == 2, 'ConvexCombinationLayer must have 2 inputs')
2931 2932 2933
        config_assert(
            size * self.get_input_layer(0).size == self.get_input_layer(1).size,
            'Wrong input size for ConvexCombinationLayer')
Z
zhangjinchao01 已提交
2934 2935
        self.set_layer_size(size)

Q
qijun 已提交
2936

Z
zhangjinchao01 已提交
2937 2938
@config_layer('interpolation')
class InterpolationLayer(LayerBase):
Q
qijun 已提交
2939
    def __init__(self, name, inputs, device=None):
Z
zhangjinchao01 已提交
2940 2941
        super(InterpolationLayer, self).__init__(
            name, 'interpolation', 0, inputs=inputs, device=device)
Q
qijun 已提交
2942 2943
        config_assert(
            len(self.inputs) == 3, 'InterpolationLayer must have 3 inputs')
Z
zhangjinchao01 已提交
2944 2945 2946 2947 2948 2949 2950 2951
        input_layer0 = self.get_input_layer(0)
        input_layer1 = self.get_input_layer(1)
        input_layer2 = self.get_input_layer(2)
        self.set_layer_size(input_layer1.size)
        config_assert(input_layer0.size == 1, 'weight should be of size 1')
        config_assert(input_layer1.size == input_layer2.size,
                      'the two vector inputs should be of the same size')

Q
qijun 已提交
2952

L
liaogang 已提交
2953 2954
@config_layer('bilinear_interp')
class BilinearInterpLayer(LayerBase):
Q
qijun 已提交
2955
    def __init__(self, name, inputs, **xargs):
L
liaogang 已提交
2956
        super(BilinearInterpLayer, self).__init__(
L
liaogang 已提交
2957
            name, 'bilinear_interp', 0, inputs=inputs, **xargs)
L
liaogang 已提交
2958
        input_layer = self.get_input_layer(0)
L
Luo Tao 已提交
2959 2960 2961 2962
        conf = self.config.inputs[0].bilinear_interp_conf
        parse_bilinear(self.inputs[0].bilinear_interp, input_layer.name, conf)
        self.set_cnn_layer(name, conf.out_size_y, conf.out_size_x,
                           conf.image_conf.channels)
Q
qijun 已提交
2963

L
liaogang 已提交
2964

Z
zhangjinchao01 已提交
2965 2966
@config_layer('sum_to_one_norm')
class SumToOneNormLayer(LayerBase):
Q
qijun 已提交
2967
    def __init__(self, name, inputs, device=None):
Z
zhangjinchao01 已提交
2968
        super(SumToOneNormLayer, self).__init__(
Q
qijun 已提交
2969 2970 2971
            name, 'sum_to_one_norm', 0, inputs=inputs, device=device)
        config_assert(
            len(self.inputs) == 1, 'SumToOneNormLayer must have 1 input')
Z
zhangjinchao01 已提交
2972 2973 2974
        input_layer0 = self.get_input_layer(0)
        self.set_layer_size(input_layer0.size)

Q
qijun 已提交
2975

G
guosheng 已提交
2976 2977
@config_layer('row_l2_norm')
class RowL2NormLayer(LayerBase):
2978
    def __init__(self, name, inputs, **xargs):
G
guosheng 已提交
2979
        super(RowL2NormLayer, self).__init__(
2980
            name, 'row_l2_norm', 0, inputs=inputs, **xargs)
G
guosheng 已提交
2981
        config_assert(len(self.inputs) == 1, 'RowL2NormLayer must have 1 input')
2982 2983
        input_layer = self.get_input_layer(0)
        self.set_layer_size(input_layer.size)
G
guosheng 已提交
2984 2985


Z
zhangjinchao01 已提交
2986 2987
@config_layer('cos_vm')
class CosSimVecMatLayer(LayerBase):
Q
qijun 已提交
2988
    def __init__(self, name, size, inputs, cos_scale=1.0, device=None):
Z
zhangjinchao01 已提交
2989
        super(CosSimVecMatLayer, self).__init__(
Q
qijun 已提交
2990
            name, 'cos_vm', size, inputs=inputs, device=device)
Z
zhangjinchao01 已提交
2991
        self.config.cos_scale = cos_scale
Q
qijun 已提交
2992 2993
        config_assert(
            len(self.inputs) == 2, 'CosSimVecMatLayer must have 2 inputs')
2994 2995 2996
        config_assert(
            size * self.get_input_layer(0).size == self.get_input_layer(1).size,
            'Wrong input size for CosSimVecMatLayer')
Z
zhangjinchao01 已提交
2997

Q
qijun 已提交
2998

Z
zhangjinchao01 已提交
2999 3000
@config_layer('sampling_id')
class SamplingIdLayer(LayerBase):
Q
qijun 已提交
3001
    def __init__(self, name, inputs, device=None):
Z
zhangjinchao01 已提交
3002 3003
        super(SamplingIdLayer, self).__init__(
            name, 'sampling_id', 0, inputs=inputs, device=device)
Q
qijun 已提交
3004 3005
        config_assert(
            len(self.inputs) == 1, 'SamplingIdLayer must have 1 input')
Z
zhangjinchao01 已提交
3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017
        for input_index in xrange(len(self.inputs)):
            input_layer = self.get_input_layer(input_index)
            self.set_layer_size(input_layer.size)


# AverageLayer: "average" for each sample within a sequence.
# average_stratrgy: set to one of the following:
# 'average': plain average.
# 'sum': sum each sample instead of average (which is divide by sample_num).
# 'squarerootn': sum each sample, but divide by sqrt(sample_num).
@config_layer('average')
class AverageLayer(LayerBase):
Q
qijun 已提交
3018 3019 3020 3021 3022
    def __init__(self,
                 name,
                 inputs,
                 average_strategy='average',
                 trans_type='non-seq',
3023
                 bias=False,
3024
                 stride=-1,
3025
                 **xargs):
Q
qijun 已提交
3026
        super(AverageLayer, self).__init__(
X
xuwei06 已提交
3027
            name, 'average', 0, inputs=inputs, **xargs)
Z
zhangjinchao01 已提交
3028
        self.config.average_strategy = average_strategy
3029 3030
        if trans_type == 'seq':
            config_assert(stride == -1, 'subseq does not support stride window')
Q
qijun 已提交
3031
        self.config.trans_type = trans_type
3032
        self.config.seq_pool_stride = stride
Z
zhangjinchao01 已提交
3033 3034 3035 3036 3037 3038
        config_assert(len(inputs) == 1, 'AverageLayer must have 1 input')
        for input_index in xrange(len(self.inputs)):
            input_layer = self.get_input_layer(input_index)
            self.set_layer_size(input_layer.size)
        self.create_bias_parameter(bias, self.config.size)

Q
qijun 已提交
3039

Z
zhangjinchao01 已提交
3040 3041
@config_layer('cos')
class CosSimLayer(LayerBase):
3042
    def __init__(self, name, inputs, cos_scale=1, device=None):
Z
zhangjinchao01 已提交
3043 3044 3045 3046 3047 3048
        super(CosSimLayer, self).__init__(
            name, 'cos', 1, inputs=inputs, device=device)
        config_assert(len(self.inputs) == 2, 'CosSimLayer must have 2 inputs')
        config_assert(
            self.get_input_layer(0).size == self.get_input_layer(1).size,
            'inputs of CosSimLayer must have same dim')
3049
        self.config.cos_scale = cos_scale
Z
zhangjinchao01 已提交
3050 3051 3052 3053


@config_layer('tensor')
class TensorLayer(LayerBase):
3054
    def __init__(self, name, size, inputs, bias=True, **xargs):
Q
qijun 已提交
3055
        super(TensorLayer, self).__init__(
3056
            name, 'tensor', size, inputs=inputs, **xargs)
Z
zhangjinchao01 已提交
3057 3058
        config_assert(len(self.inputs) == 2, 'TensorLayer must have 2 inputs')
        config_assert(size > 0, 'size must be positive')
Q
qijun 已提交
3059 3060
        config_assert(inputs[1].parameter_name == None,
                      'second parameter should be None.')
Z
zhangjinchao01 已提交
3061 3062 3063 3064 3065 3066 3067 3068 3069 3070
        input_layer0 = self.get_input_layer(0)
        input_layer1 = self.get_input_layer(1)
        psize = size * input_layer0.size * input_layer1.size
        dims = [input_layer0.size, input_layer1.size, size]
        self.create_input_parameter(0, psize, dims)
        self.create_bias_parameter(bias, size)


@config_layer('mixed')
class MixedLayer(LayerBase):
C
caoying03 已提交
3071
    def __init__(self, name, inputs, size=0, bias=True, **xargs):
Z
zhangjinchao01 已提交
3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088
        config_assert(inputs, 'inputs cannot be empty')
        super(MixedLayer, self).__init__(
            name, 'mixed', size, inputs=inputs, **xargs)
        operator_input_index = []
        for operator in self.operators:
            operator_conf = operator.operator_conf
            for i in xrange(1, len(operator.input_layer_names)):
                input_index = len(self.config.inputs)
                operator_conf.input_indices.append(input_index)
                input_config = Input(operator.input_layer_names[i])
                self.inputs.append(input_config)
                layer_input = self.config.inputs.add()
                layer_input.input_layer_name = input_config.input_layer_name
            for input_index in operator_conf.input_indices:
                input_layer = self.get_input_layer(input_index)
                operator_conf.input_sizes.append(input_layer.size)
                operator_input_index.append(input_index)
3089
            if self.config.size == 0:
Z
zhangjinchao01 已提交
3090 3091 3092
                size = operator.calc_output_size(operator_conf.input_sizes)
                if size != 0:
                    self.set_layer_size(size)
3093
            else:
3094 3095
                sz = operator.calc_output_size(operator_conf.input_sizes)
                if sz != 0:
Q
qijun 已提交
3096 3097 3098 3099
                    config_assert(
                        sz == self.config.size,
                        "different inputs have different size: %s vs. %s" %
                        (sz, self.config.size))
Z
zhangjinchao01 已提交
3100 3101 3102 3103
        for input_index in xrange(len(self.inputs)):
            input_layer = self.get_input_layer(input_index)
            input = self.inputs[input_index]
            if input_index not in operator_input_index:
Q
qijun 已提交
3104 3105 3106
                config_assert(
                    isinstance(input, Projection),
                    "input should be projection or operation")
3107
            if self.config.size == 0 and isinstance(input, Projection):
Z
zhangjinchao01 已提交
3108 3109 3110
                size = input.calc_output_size(input_layer)
                if size != 0:
                    self.set_layer_size(size)
3111
            elif isinstance(input, Projection):
Q
qijun 已提交
3112 3113 3114 3115 3116 3117
                sz = input.calc_output_size(input_layer)
                if sz != 0:
                    config_assert(
                        sz == self.config.size,
                        "different inputs have different size: %s vs. %s" %
                        (sz, self.config.size))
Z
zhangjinchao01 已提交
3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128
        config_assert(size != 0, "size is not set")

        for input_index in xrange(len(self.inputs)):
            input = self.inputs[input_index]
            if isinstance(input, Projection):
                input_layer = self.get_input_layer(input_index)
                input.proj_conf.input_size = input_layer.size
                input.proj_conf.output_size = size

                input_config = self.config.inputs[input_index]
                input_config.proj_conf.CopyFrom(input.proj_conf)
Q
qijun 已提交
3129 3130
                input_config.proj_conf.name = gen_parameter_name(name,
                                                                 input_index)
Z
zhangjinchao01 已提交
3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141
                psize = input.calc_parameter_size(input_layer.size, size)
                dims = input.calc_parameter_dims(input_layer.size, size)
                self.create_input_parameter(input_index, psize, dims)

        for operator in self.operators:
            operator_conf = operator.operator_conf
            operator_conf.output_size = self.config.size
            operator.check_dims()
            record_operator_conf = self.config.operator_confs.add()
            record_operator_conf.CopyFrom(operator_conf)

3142 3143 3144 3145 3146 3147
        psize = self.config.size
        if isinstance(self.inputs[0], ConvProjection):
            self.config.shared_biases = True
            psize = 0
            for input in self.inputs:
                psize += input.calc_bias_size()
Z
zhangjinchao01 已提交
3148

3149 3150 3151
        if bias:
            self.config.bias_size = psize
            self.create_bias_parameter(bias, psize)
Z
zhangjinchao01 已提交
3152

Q
qijun 已提交
3153

Z
zhangjinchao01 已提交
3154 3155
# like MixedLayer, but no bias parameter
@config_func
Q
qijun 已提交
3156
def ExpressionLayer(name, inputs, **xargs):
Z
zhangjinchao01 已提交
3157 3158
    MixedLayer(name, inputs, bias=False, **xargs)

Q
qijun 已提交
3159

Z
zhangjinchao01 已提交
3160 3161
@config_layer('concat')
class ConcatenateLayer(LayerBase):
Q
qijun 已提交
3162
    def __init__(self, name, inputs, bias=False, **xargs):
Z
zhangjinchao01 已提交
3163
        config_assert(inputs, 'inputs cannot be empty')
3164
        config_assert(not bias, 'ConcatenateLayer cannot support bias.')
Z
zhangjinchao01 已提交
3165 3166 3167 3168 3169 3170
        super(ConcatenateLayer, self).__init__(
            name, 'concat', 0, inputs=inputs, **xargs)
        size = 0
        for input_index in xrange(len(self.inputs)):
            input_layer = self.get_input_layer(input_index)
            input = self.inputs[input_index]
Q
qijun 已提交
3171
            if self.config.size == 0:
Z
zhangjinchao01 已提交
3172 3173 3174 3175
                size += input_layer.size

        self.set_layer_size(size)

Q
qijun 已提交
3176

Z
zhangjinchao01 已提交
3177 3178 3179
# like concat layer, but each input layer was processed by a Projection.
@config_layer('concat2')
class ConcatenateLayer2(LayerBase):
Q
qijun 已提交
3180
    def __init__(self, name, inputs, bias=False, **xargs):
Z
zhangjinchao01 已提交
3181 3182 3183
        config_assert(inputs, 'inputs cannot be empty')
        super(ConcatenateLayer2, self).__init__(
            name, 'concat2', 0, inputs=inputs, **xargs)
3184 3185

        if isinstance(self.inputs[0], ConvProjection):
Q
qijun 已提交
3186 3187 3188 3189 3190 3191
            for input_index in xrange(len(self.inputs) - 1):
                input = self.inputs[input_index + 1]
                config_assert(
                    isinstance(input, ConvProjection),
                    "The first input of ConcatenateLayer2 is ConvProjection, "
                    "the other inputs should also be ConvProjection.")
3192

Z
zhangjinchao01 已提交
3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212
        size = 0
        for input_index in xrange(len(self.inputs)):
            input_layer = self.get_input_layer(input_index)
            input = self.inputs[input_index]
            output_size = input.calc_output_size(input_layer)
            config_assert(output_size != 0, "proj output size is not set")
            size += output_size

        self.set_layer_size(size)

        for input_index in xrange(len(self.inputs)):
            input_layer = self.get_input_layer(input_index)
            input = self.inputs[input_index]
            input.proj_conf.input_size = input_layer.size
            input.proj_conf.output_size = input.calc_output_size(input_layer)

            input_config = self.config.inputs[input_index]
            input_config.proj_conf.CopyFrom(input.proj_conf)
            input_config.proj_conf.name = gen_parameter_name(name, input_index)
            psize = input.calc_parameter_size(input.proj_conf.input_size,
Q
qijun 已提交
3213
                                              input.proj_conf.output_size)
Z
zhangjinchao01 已提交
3214
            dims = input.calc_parameter_dims(input.proj_conf.input_size,
Q
qijun 已提交
3215
                                             input.proj_conf.output_size)
Z
zhangjinchao01 已提交
3216 3217
            self.create_input_parameter(input_index, psize, dims)

3218 3219 3220 3221 3222 3223 3224
        psize = self.config.size
        if isinstance(self.inputs[0], ConvProjection):
            self.config.shared_biases = True
            psize = 0
            for input in self.inputs:
                psize += input.calc_bias_size()

3225 3226 3227
        if bias:
            self.config.bias_size = psize
            self.create_bias_parameter(bias, psize)
3228

Q
qijun 已提交
3229

Z
zhangjinchao01 已提交
3230 3231
@config_layer('recurrent')
class RecurrentLayer(LayerBase):
Q
qijun 已提交
3232
    def __init__(self, name, inputs, reversed=False, bias=True, **xargs):
Y
Yu Yang 已提交
3233 3234
        super(RecurrentLayer, self).__init__(name, 'recurrent', 0, inputs,
                                             **xargs)
Z
zhangjinchao01 已提交
3235 3236 3237 3238 3239 3240 3241 3242 3243
        config_assert(len(self.inputs) == 1, 'RecurrentLayer must have 1 input')
        input_layer = self.get_input_layer(0)
        size = input_layer.size
        self.set_layer_size(size)
        self.config.reversed = reversed
        dims = [size, size]
        self.create_input_parameter(0, size * size, dims)
        self.create_bias_parameter(bias, self.config.size)

Q
qijun 已提交
3244

Z
zhangjinchao01 已提交
3245 3246
@config_layer('lstmemory')
class LstmLayer(LayerBase):
Q
qijun 已提交
3247 3248 3249 3250 3251 3252 3253 3254
    def __init__(self,
                 name,
                 inputs,
                 reversed=False,
                 active_gate_type="sigmoid",
                 active_state_type="sigmoid",
                 bias=True,
                 **xargs):
Z
zhangjinchao01 已提交
3255 3256 3257 3258 3259 3260 3261 3262
        super(LstmLayer, self).__init__(name, 'lstmemory', 0, inputs, **xargs)
        config_assert(len(self.inputs) == 1, 'LstmLayer must have 1 input')
        input_layer = self.get_input_layer(0)
        #check input_layer.size is divided by 4
        config_assert(input_layer.size % 4 == 0, "size % 4 should be 0!")
        size = input_layer.size / 4
        self.set_layer_size(size)
        self.config.reversed = reversed
Q
qijun 已提交
3263
        self.config.active_gate_type = active_gate_type
Z
zhangjinchao01 已提交
3264 3265 3266 3267 3268
        self.config.active_state_type = active_state_type
        self.create_input_parameter(0, size * size * 4, [size, size, 4])
        #bias includes 3 kinds of peephole, 4 + 3 = 7
        self.create_bias_parameter(bias, size * 7)

Q
qijun 已提交
3269

Z
zhangjinchao01 已提交
3270 3271
@config_layer('lstm_step')
class LstmStepLayer(LayerBase):
Q
qijun 已提交
3272 3273 3274 3275 3276 3277 3278 3279 3280 3281
    def __init__(self,
                 name,
                 size,
                 inputs,
                 active_gate_type="sigmoid",
                 active_state_type="sigmoid",
                 bias=True,
                 **xargs):
        super(LstmStepLayer, self).__init__(name, 'lstm_step', size, inputs,
                                            **xargs)
Z
zhangjinchao01 已提交
3282 3283 3284
        config_assert(len(inputs) == 2, 'LstmStepLayer must have 2 inputs')
        input_layer0 = self.get_input_layer(0)
        input_layer1 = self.get_input_layer(1)
Q
qijun 已提交
3285 3286 3287 3288 3289
        config_assert(input_layer0.size == 4 * size,
                      'input_layer0.size != 4 * layer.size')
        config_assert(input_layer1.size == size,
                      'input_layer1.size != layer.size')
        self.config.active_gate_type = active_gate_type
Z
zhangjinchao01 已提交
3290 3291 3292
        self.config.active_state_type = active_state_type
        self.create_bias_parameter(bias, size * 3)

Q
qijun 已提交
3293

Z
zhangjinchao01 已提交
3294 3295 3296
# get the specific output from the input layer.
@config_layer('get_output')
class GetOutputLayer(LayerBase):
Q
qijun 已提交
3297 3298 3299 3300
    def __init__(self, name, size, inputs):
        super(GetOutputLayer, self).__init__(name, 'get_output', size, inputs)
        config_assert(
            len(self.inputs) == 1, 'GetOutputLayer must have 1 inputs')
Z
zhangjinchao01 已提交
3301 3302 3303 3304
        inputs = self.inputs[0]
        config_assert(inputs.input_layer_argument,
                      'input_layer_argument cannot be empty')

Q
qijun 已提交
3305

Z
zhangjinchao01 已提交
3306 3307
@config_layer('mdlstmemory')
class MDLstmLayer(LayerBase):
Q
qijun 已提交
3308 3309 3310 3311 3312 3313 3314 3315
    def __init__(self,
                 name,
                 inputs,
                 directions=True,
                 active_gate_type="sigmoid",
                 active_state_type="sigmoid",
                 bias=True,
                 **xargs):
Y
Yu Yang 已提交
3316 3317
        super(MDLstmLayer, self).__init__(name, 'mdlstmemory', 0, inputs,
                                          **xargs)
Z
zhangjinchao01 已提交
3318 3319 3320 3321
        config_assert(len(self.inputs) == 1, 'MDLstmLayer must have 1 input')
        input_layer = self.get_input_layer(0)
        dim_num = len(directions)
        #check input_layer.size is divided by (3+dim_num)
Y
Yu Yang 已提交
3322 3323
        config_assert(input_layer.size % (3 + dim_num) == 0,
                      "size % (dim_num) should be 0!")
Q
qijun 已提交
3324
        size = input_layer.size / (3 + dim_num)
Z
zhangjinchao01 已提交
3325
        self.set_layer_size(size)
Q
qijun 已提交
3326
        self.config.active_gate_type = active_gate_type
Z
zhangjinchao01 已提交
3327 3328 3329
        self.config.active_state_type = active_state_type
        for i in xrange(len(directions)):
            self.config.directions.append(int(directions[i]))
Y
Yu Yang 已提交
3330 3331
        self.create_input_parameter(0, size * size * (3 + dim_num),
                                    [size, size, 3 + dim_num])
Z
zhangjinchao01 已提交
3332
        #bias includes 3 kinds of peephole, 3+dim_num+2+dim_num
Q
qijun 已提交
3333 3334
        self.create_bias_parameter(bias, size * (5 + 2 * dim_num))

Z
zhangjinchao01 已提交
3335 3336 3337

@config_layer('gated_recurrent')
class GatedRecurrentLayer(LayerBase):
Q
qijun 已提交
3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348
    def __init__(self,
                 name,
                 inputs,
                 reversed=False,
                 active_gate_type="sigmoid",
                 bias=True,
                 **xargs):
        super(GatedRecurrentLayer, self).__init__(name, 'gated_recurrent', 0,
                                                  inputs, **xargs)
        config_assert(
            len(self.inputs) == 1, 'GatedRecurrentLayer must have 1 input')
Z
zhangjinchao01 已提交
3349 3350 3351 3352 3353 3354
        input_layer = self.get_input_layer(0)
        #check input_layer.size is divided by 3
        config_assert(input_layer.size % 3 == 0, "size % 3 should be 0!")
        size = input_layer.size / 3
        self.set_layer_size(size)
        self.config.reversed = reversed
Q
qijun 已提交
3355
        self.config.active_gate_type = active_gate_type
Z
zhangjinchao01 已提交
3356 3357 3358
        self.create_input_parameter(0, size * size * 3, [size, size * 3])
        self.create_bias_parameter(bias, size * 3)

Q
qijun 已提交
3359

Z
zhangjinchao01 已提交
3360 3361
@config_layer('gru_step')
class GruStepLayer(LayerBase):
Q
qijun 已提交
3362 3363 3364 3365 3366 3367 3368
    def __init__(self,
                 name,
                 size,
                 inputs,
                 active_gate_type="sigmoid",
                 bias=True,
                 **xargs):
Y
Yu Yang 已提交
3369 3370
        super(GruStepLayer, self).__init__(name, 'gru_step', size, inputs,
                                           **xargs)
Z
zhangjinchao01 已提交
3371 3372 3373
        config_assert(len(self.inputs) == 2, 'GruStepLayer must have 2 input')
        input_layer0 = self.get_input_layer(0)
        input_layer1 = self.get_input_layer(1)
Q
qijun 已提交
3374 3375 3376 3377 3378
        config_assert(input_layer0.size == 3 * size,
                      'input_layer0.size != 3 * layer.size')
        config_assert(input_layer1.size == size,
                      'input_layer1.size != layer.size')
        self.config.active_gate_type = active_gate_type
H
Haonan 已提交
3379
        self.create_input_parameter(0, size * size * 3, [size, size * 3])
Z
zhangjinchao01 已提交
3380 3381
        self.create_bias_parameter(bias, size * 3)

Q
qijun 已提交
3382

Z
zhangjinchao01 已提交
3383 3384 3385 3386 3387 3388 3389
'''
 A layer for calculating the cost of sequential conditional random field model.
 Example: CRFLayer(name="crf_cost", size=label_num,
                   inputs=["output", "label", "weight"])
          where "weight" is optional, one weight for each sequence
 @param coeff: weight of the layer
'''
Q
qijun 已提交
3390 3391


Z
zhangjinchao01 已提交
3392 3393
@config_layer('crf')
class CRFLayer(LayerBase):
Q
qijun 已提交
3394
    def __init__(self, name, size, inputs, coeff=1.0, device=None):
Z
zhangjinchao01 已提交
3395
        super(CRFLayer, self).__init__(name, 'crf', size, inputs, device=device)
Q
qijun 已提交
3396 3397
        config_assert(2 <= len(self.inputs) <= 3,
                      'CRFLayer must have 2 or 3 inputs')
3398
        self.create_input_parameter(0, size * (size + 2), [size + 2, size])
Z
zhangjinchao01 已提交
3399 3400
        self.config.coeff = coeff

Q
qijun 已提交
3401

Z
zhangjinchao01 已提交
3402 3403 3404 3405 3406 3407 3408 3409
'''
 A layer for calculating the decoding sequence of sequential conditional
 random field model.
 The decoding sequence is stored in output_.ids
 If a second input is provided, it is treated as the ground-truth label, and
 this layer will also calculate error, output_.value[i] is 1 for incorrect
 decoding or 0 for correct decoding
'''
Q
qijun 已提交
3410 3411


Z
zhangjinchao01 已提交
3412 3413
@config_layer('crf_decoding')
class CRFDecodingLayer(LayerBase):
Q
qijun 已提交
3414
    def __init__(self, name, size, inputs, device=None):
Z
zhangjinchao01 已提交
3415 3416 3417 3418 3419
        super(CRFDecodingLayer, self).__init__(
            name, 'crf_decoding', size, inputs, device=device)
        config_assert(
            len(self.inputs) <= 2,
            'CRFDecodingLayer cannot have more than 2 inputs')
3420
        self.create_input_parameter(0, size * (size + 2), [size + 2, size])
Z
zhangjinchao01 已提交
3421

Q
qijun 已提交
3422

Z
zhangjinchao01 已提交
3423 3424
@config_layer('ctc')
class CTCLayer(LayerBase):
Q
qijun 已提交
3425
    def __init__(self, name, size, inputs, norm_by_times=False, device=None):
Z
zhangjinchao01 已提交
3426 3427 3428 3429
        super(CTCLayer, self).__init__(name, 'ctc', size, inputs, device=device)
        self.config.norm_by_times = norm_by_times
        config_assert(len(self.inputs) == 2, 'CTCLayer must have 2 inputs')

Q
qijun 已提交
3430

3431 3432 3433 3434 3435 3436 3437 3438 3439 3440
@config_layer('kmax_seq_score')
class KmaxSeqScoreLayer(LayerBase):
    def __init__(self, name, inputs, beam_size, **xargs):
        super(KmaxSeqScoreLayer, self).__init__(
            name, 'kmax_seq_score', 0, inputs=inputs, **xargs)
        config_assert(
            len(self.inputs) == 1, 'KmaxSeqScoreLayer has only one input.')
        self.config.beam_size = beam_size


3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461
@config_layer('warp_ctc')
class WarpCTCLayer(LayerBase):
    def __init__(self,
                 name,
                 size,
                 inputs,
                 blank=0,
                 norm_by_times=False,
                 device=None):
        super(WarpCTCLayer, self).__init__(
            name, 'warp_ctc', size=size, inputs=inputs, device=device)
        self.config.blank = blank
        self.config.norm_by_times = norm_by_times
        config_assert(len(self.inputs) == 2, 'WarpCTCLayer must have 2 inputs')
        input_layer = self.get_input_layer(0)
        config_assert(
            (input_layer.active_type == '' or
             input_layer.active_type == 'linear'),
            "Expecting the active_type of input layer to be linear or null")


Z
zhangjinchao01 已提交
3462 3463
@config_layer('recurrent_layer_group')
class RecurrentLayerGroup(LayerBase):
Q
qijun 已提交
3464
    def __init__(self, name, device=None):
Z
zhangjinchao01 已提交
3465 3466 3467 3468 3469 3470
        super(RecurrentLayerGroup, self).__init__(
            name, 'recurrent_layer_group', 0, inputs=[], device=device)


# Deprecated, use a new layer specific class instead
@config_func
Q
qijun 已提交
3471
def Layer(name, type, **xargs):
Z
zhangjinchao01 已提交
3472 3473 3474 3475
    layers = {}
    layers.update(g_cost_map)
    layers.update(g_layer_type_map)
    layer_func = layers.get(type)
Q
qijun 已提交
3476
    config_assert(layer_func, "layer type '%s' not supported." % type)
X
xuwei06 已提交
3477
    return layer_func(name, **xargs)
Z
zhangjinchao01 已提交
3478

Q
qijun 已提交
3479

Z
zhangjinchao01 已提交
3480
@config_func
Q
qijun 已提交
3481
def ParameterHook(type, **kwargs):
3482
    if type == 'pruning':
Z
zhangjinchao01 已提交
3483 3484
        hook = ParameterUpdaterHookConfig()
        hook.type = type
X
xzl 已提交
3485
        sparsity_ratio = kwargs.get('sparsity_ratio', None)
X
xzl 已提交
3486 3487
        if sparsity_ratio is not None:
            hook.sparsity_ratio = sparsity_ratio
Z
zhangjinchao01 已提交
3488
        return hook
3489 3490 3491 3492
    elif type == 'dpruning':
        hook = ParameterUpdaterHookConfig()
        hook.type = type
        return hook
Z
zhangjinchao01 已提交
3493 3494 3495 3496 3497
    else:
        return None


@config_func
Q
qijun 已提交
3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518
def Parameter(name,
              size,
              device,
              dims,
              learning_rate=None,
              momentum=None,
              decay_rate=None,
              decay_rate_l1=None,
              initial_mean=None,
              initial_std=None,
              initial_strategy=None,
              initial_smart=None,
              num_batches_regularization=None,
              sparse_remote_update=None,
              sparse_update=None,
              gradient_clipping_threshold=None,
              sparse=None,
              format=None,
              need_compact=None,
              is_static=None,
              is_shared=None,
X
xuwei06 已提交
3519 3520
              update_hooks=None,
              initializer=None):
Z
zhangjinchao01 已提交
3521 3522 3523 3524 3525 3526 3527

    config_assert(name not in g_parameter_map,
                  'Duplicated parameter name: ' + name)

    para = g_config.model_config.parameters.add()
    para.name = name
    para.size = size
3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538
    if device is not None:
        para.device = int(device)
    para.dims.extend(dims)

    if learning_rate is not None:
        para.learning_rate = float(learning_rate)

    momentum = default(momentum, g_default_momentum)
    if momentum is not None:
        para.momentum = float(momentum)

Z
zhangjinchao01 已提交
3539 3540
    config_assert(not momentum or not decay_rate_l1,
                  "momentum and decay_rate_l1 cannot both be non-zero")
3541 3542 3543 3544 3545

    decay_rate = default(decay_rate, g_default_decay_rate)
    if decay_rate is not None:
        para.decay_rate = decay_rate

Z
zhangjinchao01 已提交
3546 3547 3548 3549
    if decay_rate_l1 is not None:
        para.decay_rate_l1 = decay_rate_l1
    para.initial_std = default(initial_std, g_default_initial_std)
    para.initial_mean = default(initial_mean, g_default_initial_mean)
3550

Q
qijun 已提交
3551 3552
    num_batches_regularization = default(num_batches_regularization,
                                         g_default_num_batches_regularization)
3553 3554 3555
    if num_batches_regularization is not None:
        para.num_batches_regularization = int(num_batches_regularization)

Z
zhangjinchao01 已提交
3556 3557 3558 3559 3560 3561
    if sparse_remote_update is not None:
        para.sparse_remote_update = sparse_remote_update
        if sparse_remote_update:
            g_config.opt_config.use_sparse_remote_updater = True
    if sparse_update is not None:
        para.sparse_update = sparse_update
Q
qijun 已提交
3562 3563
    gradient_clipping_threshold = default(gradient_clipping_threshold,
                                          g_default_gradient_clipping_threshold)
3564 3565
    if gradient_clipping_threshold is not None:
        para.gradient_clipping_threshold = gradient_clipping_threshold
Q
qijun 已提交
3566 3567
    para.initial_strategy = default(initial_strategy,
                                    g_default_initial_strategy)
Z
zhangjinchao01 已提交
3568 3569 3570 3571 3572 3573
    para.initial_smart = default(initial_smart, g_default_initial_smart)
    if para.initial_smart:
        para.initial_mean = 0.
        if len(para.dims) != 0:
            para.initial_std = 1. / math.sqrt(para.dims[0])
        else:
Q
qijun 已提交
3574 3575 3576
            print(
                "Use initial_smart, but dims not set. Initial_smart may not be used in this layer"
            )
Z
zhangjinchao01 已提交
3577 3578 3579 3580
            traceback.print_exc()
            para.initial_std = 1. / math.sqrt(para.size)
    if g_default_compact_func is not None:
        sparse, format, need_compact = g_default_compact_func(para.name)
3581 3582 3583 3584 3585 3586 3587

    if sparse is not None:
        para.is_sparse = sparse
    if format is not None:
        para.format = format
    if need_compact is not None:
        para.need_compact = need_compact
Z
zhangjinchao01 已提交
3588 3589 3590 3591
    if is_static is not None:
        para.is_static = is_static
    config_assert(not para.sparse_remote_update or not para.is_static,
                  "sparse_remote_update and is_static cannot both be true")
3592 3593
    if is_shared is not None:
        para.is_shared = is_shared
Z
zhangjinchao01 已提交
3594 3595 3596 3597 3598

    update_hooks = default(update_hooks, g_default_update_hooks)

    if update_hooks is not None:
        if hasattr(update_hooks, '__call__'):
X
xzl 已提交
3599
            update_hooks = update_hooks()
Z
zhangjinchao01 已提交
3600 3601 3602 3603 3604

        if isinstance(update_hooks, list):
            for hook in update_hooks:
                para.update_hooks.extend([hook])
        else:
X
xzl 已提交
3605
            para.update_hooks.extend([update_hooks])
Z
zhangjinchao01 已提交
3606 3607

    g_parameter_map[name] = para
X
xuwei06 已提交
3608 3609 3610 3611 3612
    if initializer is not None:
        config_assert(
            callable(initializer),
            "parameter initializer should be a callable object")
        g_parameter_initializer_map[name] = initializer
Z
zhangjinchao01 已提交
3613 3614 3615 3616 3617 3618 3619


@config_func
def default_initial_std(val):
    global g_default_initial_std
    g_default_initial_std = val

Q
qijun 已提交
3620

Z
zhangjinchao01 已提交
3621 3622 3623 3624 3625
@config_func
def default_initial_mean(val):
    global g_default_initial_mean
    g_default_initial_mean = val

Q
qijun 已提交
3626

Z
zhangjinchao01 已提交
3627 3628 3629 3630 3631
@config_func
def default_initial_strategy(val):
    global g_default_initial_strategy
    g_default_initial_strategy = val

Q
qijun 已提交
3632

Z
zhangjinchao01 已提交
3633 3634 3635 3636 3637
@config_func
def default_initial_smart(val):
    global g_default_initial_smart
    g_default_initial_smart = val

Q
qijun 已提交
3638

Z
zhangjinchao01 已提交
3639 3640 3641 3642 3643
@config_func
def default_momentum(val):
    global g_default_momentum
    g_default_momentum = val

Q
qijun 已提交
3644

Z
zhangjinchao01 已提交
3645 3646 3647 3648 3649
@config_func
def default_decay_rate(val):
    global g_default_decay_rate
    g_default_decay_rate = val

Q
qijun 已提交
3650

Z
zhangjinchao01 已提交
3651 3652 3653 3654 3655
@config_func
def default_num_batches_regularization(val):
    global g_default_num_batches_regularization
    g_default_num_batches_regularization = val

Q
qijun 已提交
3656

Z
zhangjinchao01 已提交
3657 3658 3659 3660 3661
@config_func
def default_gradient_clipping_threshold(val):
    global g_default_gradient_clipping_threshold
    g_default_gradient_clipping_threshold = val

Q
qijun 已提交
3662

Z
zhangjinchao01 已提交
3663 3664 3665 3666 3667
@config_func
def default_device(val):
    global g_default_device
    g_default_device = val

Q
qijun 已提交
3668

Z
zhangjinchao01 已提交
3669 3670 3671 3672 3673
@config_func
def default_update_hooks(val):
    global g_default_update_hooks
    g_default_update_hooks = val

Q
qijun 已提交
3674

Z
zhangjinchao01 已提交
3675 3676 3677 3678 3679
@config_func
def default_compact_func(val):
    global g_default_compact_func
    g_default_compact_func = val

Q
qijun 已提交
3680

Z
zhangjinchao01 已提交
3681 3682 3683 3684 3685
def make_importer(config_dir, config_args):
    def Import(config_file, local_args={}):
        if not config_file.startswith('/'):
            config_file = config_dir + '/' + config_file
            g_config.config_files.append(config_file)
Q
qijun 已提交
3686 3687 3688
        execfile(config_file,
                 make_config_environment(config_file, config_args), local_args)

Z
zhangjinchao01 已提交
3689 3690
    return Import

Q
qijun 已提交
3691

X
xuwei06 已提交
3692
DEFAULT_SETTING = dict(
Z
zhangjinchao01 已提交
3693 3694 3695 3696 3697
    batch_size=None,
    mini_batch_size=None,
    algorithm='async_sgd',
    async_lagged_grad_discard_ratio=1.5,
    learning_method='momentum',
3698
    gradient_clipping_threshold=None,
Z
zhangjinchao01 已提交
3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720
    num_batches_per_send_parameter=None,
    num_batches_per_get_parameter=None,
    center_parameter_update_method=None,
    learning_rate=1.,
    learning_rate_decay_a=0.,
    learning_rate_decay_b=0.,
    learning_rate_schedule='poly',
    learning_rate_args='',
    l1weight=0.1,
    l2weight=0.,
    l2weight_zero_iter=0,
    c1=0.0001,
    backoff=0.5,
    owlqn_steps=10,
    max_backoff=5,
    average_window=0,
    do_average_in_cpu=False,
    max_average_window=None,
    ada_epsilon=1e-6,
    ada_rou=0.95,
    delta_add_rate=1.0,
    shrink_parameter_value=0,
Q
qijun 已提交
3721 3722 3723
    adam_beta1=0.9,
    adam_beta2=0.999,
    adam_epsilon=1e-8, )
Z
zhangjinchao01 已提交
3724

X
xuwei06 已提交
3725
settings = copy.deepcopy(DEFAULT_SETTING)
X
xuwei06 已提交
3726

Q
qijun 已提交
3727
settings_deprecated = dict(usage_ratio=1., )
Z
zhangjinchao01 已提交
3728 3729 3730 3731

trainer_settings = dict(
    save_dir="./output/model",
    init_model_path=None,
Q
qijun 已提交
3732 3733
    start_pass=0, )

Z
zhangjinchao01 已提交
3734 3735 3736 3737 3738

@config_func
def Settings(**args):
    for k, v in args.iteritems():
        if k == "usage_ratio":
Q
qijun 已提交
3739 3740
            logger.warning(
                "Deprecated: define usage_ratio in DataConfig instead")
Z
zhangjinchao01 已提交
3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751
            if g_config.HasField("data_config"):
                g_config.data_config.__setattr__(k, v)
            settings_deprecated[k] = v
            continue
        elif k in settings:
            settings[k] = v
        elif k in trainer_settings:
            trainer_settings[k] = v
        else:
            logger.fatal('Unkown setting: %s' % k)

Q
qijun 已提交
3752

Z
zhangjinchao01 已提交
3753 3754 3755 3756
@config_func
def cluster_config(**args):
    pass

Q
qijun 已提交
3757

Z
zhangjinchao01 已提交
3758 3759 3760 3761 3762 3763 3764 3765 3766
@config_func
def EnableSubmodelSuffix(flag=True):
    """
    If enabled, the layer and evaluator names in submodel will be automatically
    appended with @submodel_name
    """
    global g_add_submodel_suffix
    g_add_submodel_suffix = flag

Q
qijun 已提交
3767

Z
zhangjinchao01 已提交
3768 3769 3770 3771
def make_config_environment(config_file, config_args):
    def make_setter(k):
        def setter(v):
            logger.fatal("Obsolete: use Settings(%s=%s, ...) instead" % (k, v))
Q
qijun 已提交
3772

Z
zhangjinchao01 已提交
3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787
        return setter

    funcs = {}
    funcs.update(g_config_funcs)

    for k in settings.iterkeys():
        funcs[k] = make_setter(k)
    for k in settings_deprecated.iterkeys():
        funcs[k] = make_setter(k)
    config_dir = os.path.dirname(config_file)
    if not config_dir:
        config_dir = '.'

    funcs.update(
        Import=make_importer(config_dir, config_args),
Q
qijun 已提交
3788
        get_config_arg=make_get_config_arg(config_args), )
Z
zhangjinchao01 已提交
3789 3790 3791 3792 3793

    funcs.update(g_extended_config_funcs)

    return funcs

Q
qijun 已提交
3794

Z
zhangjinchao01 已提交
3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810
def make_get_config_arg(config_args):
    def get_config_arg(name, type, default=None):
        if type == bool:
            s = config_args.get(name)
            if not s:
                return default
            if s == 'True' or s == '1' or s == 'true':
                return True
            if s == 'False' or s == '0' or s == 'false':
                return False
            raise ValueError('Value of config_arg %s is not boolean' % name)
        else:
            return type(config_args.get(name, default))

    return get_config_arg

Q
qijun 已提交
3811

Z
zhangjinchao01 已提交
3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823
def importlib(name):
    __import__(name)
    return sys.modules[name]


def find_caller():
    stack = traceback.extract_stack()
    for s in stack[-4::-1]:
        if not s[0].endswith('config_parser.py'):
            return s[0], s[1], s[2]
    return "(unknown file)", 0, "(unknown function)"

Q
qijun 已提交
3824

Z
zhangjinchao01 已提交
3825 3826 3827 3828
def my_fatal(s):
    logger.critical(s)
    raise Exception()

Y
Yu Yang 已提交
3829

3830
_parse_config_hooks = set()
Y
Yu Yang 已提交
3831 3832


3833 3834 3835 3836 3837 3838 3839
def register_parse_config_hook(f):
    """
    Register a hook function for parse_config. parse_config will invoke the hook
    at the beginning of parse. This make it possible to reset global state for
    for constructing the model.
    """
    _parse_config_hooks.add(f)
Q
qijun 已提交
3840

Y
Yu Yang 已提交
3841

3842
def update_g_config():
Z
zhangjinchao01 已提交
3843
    '''
3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866
    Update g_config after execute config_file or config_functions.
    '''
    for k, v in settings.iteritems():
        if v is None:
            continue
        g_config.opt_config.__setattr__(k, v)

    for k, v in trainer_settings.iteritems():
        if v is None:
            continue
        g_config.__setattr__(k, v)

    for name in g_config.model_config.input_layer_names:
        assert name in g_layer_map, \
            'input name "%s" does not correspond to a layer name' % name
        assert (g_layer_map[name].type == "data" or g_layer_map[name].type == "data_trim"), \
            'The type of input layer "%s" is not "data"' % name
    for name in g_config.model_config.output_layer_names:
        assert name in g_layer_map, \
            'input name "%s" does not correspond to a layer name' % name
    return g_config


3867
def begin_parse():
Z
zhangjinchao01 已提交
3868
    init_config_environment()
3869 3870
    for hook in _parse_config_hooks:
        hook()
Z
zhangjinchao01 已提交
3871 3872 3873 3874 3875

    logger.findCaller = find_caller
    logger.fatal = my_fatal

    g_config.model_config.type = "nn"
X
xuwei06 已提交
3876 3877 3878 3879 3880 3881 3882 3883 3884

    global g_current_submodel, g_root_submodel
    g_root_submodel = g_config.model_config.sub_models.add()
    g_root_submodel.name = 'root'
    g_root_submodel.is_recurrent_layer_group = False
    g_current_submodel = g_root_submodel


def parse_config(trainer_config, config_arg_str):
3885 3886 3887 3888
    '''
    @param config_arg_str: a string of the form var1=val1,var2=val2. It will be
    passed to config script as a dictionary CONFIG_ARGS
    '''
X
xuwei06 已提交
3889

3890
    begin_parse()
X
xuwei06 已提交
3891 3892
    config_args = {}

Z
zhangjinchao01 已提交
3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904
    if config_arg_str:
        config_args = dict([f.split('=') for f in config_arg_str.split(',')])

    global g_command_config_args
    g_command_config_args.update(config_args)

    extension_module_name = config_args.get('extension_module_name')
    if extension_module_name:
        global g_extended_config_funcs
        extension_module = importlib(extension_module_name)
        g_extended_config_funcs = extension_module.get_config_funcs(g_config)

3905 3906
    if hasattr(trainer_config, '__call__'):
        trainer_config.func_globals.update(
L
Luo Tao 已提交
3907
            make_config_environment("", config_args))
3908
        trainer_config()
H
hanchao 已提交
3909
    else:
3910 3911
        execfile(trainer_config,
                 make_config_environment(trainer_config, config_args))
Z
zhangjinchao01 已提交
3912

3913
    return update_g_config()
Z
zhangjinchao01 已提交
3914 3915


3916
def parse_config_and_serialize(trainer_config, config_arg_str):
Z
zhangjinchao01 已提交
3917
    try:
3918
        config = parse_config(trainer_config, config_arg_str)
Z
zhangjinchao01 已提交
3919 3920 3921 3922 3923 3924
        #logger.info(config)
        return config.SerializeToString()
    except:
        traceback.print_exc()
        raise

Q
qijun 已提交
3925

Z
zhangjinchao01 已提交
3926 3927 3928 3929 3930 3931 3932 3933
if __name__ == '__main__':
    try:
        config = parse_config(sys.argv[1], '')
        config.SerializeToString()
        __real_print__(str(config))
    except:
        traceback.print_exc()
        raise