From 9cb833cbd8e745b0b7ad5276d7211da74698df4f Mon Sep 17 00:00:00 2001 From: caoying03 Date: Fri, 1 Sep 2017 13:46:30 +0800 Subject: [PATCH] rename sequence into seq to keep a constant style. --- .../paddle/trainer_config_helpers/layers.py | 12 +++++----- .../tests/configs/file_list.sh | 2 +- .../test_cross_entropy_over_beam.protostr | 22 +++++++++---------- .../test_kmax_seq_socre_layer.protostr | 8 +++---- .../configs/test_cross_entropy_over_beam.py | 6 ++--- .../configs/test_kmax_seq_socre_layer.py | 2 +- ...py => test_sub_nested_seq_select_layer.py} | 0 7 files changed, 26 insertions(+), 26 deletions(-) rename python/paddle/trainer_config_helpers/tests/configs/{test_seq_select_layers.py => test_sub_nested_seq_select_layer.py} (100%) diff --git a/python/paddle/trainer_config_helpers/layers.py b/python/paddle/trainer_config_helpers/layers.py index fdf4136aa..2bd274fad 100644 --- a/python/paddle/trainer_config_helpers/layers.py +++ b/python/paddle/trainer_config_helpers/layers.py @@ -137,7 +137,7 @@ __all__ = [ 'clip_layer', 'slice_projection', 'seq_slice_layer', - 'kmax_sequence_score_layer', + 'kmax_seq_score_layer', 'img_pool3d_layer', 'scale_shift_layer', 'img_conv3d_layer', @@ -5994,7 +5994,7 @@ def cross_entropy_over_beam(input, name=None): Note that, if gold falls off the beam at search step t, then the cost is calculated over the beam at step t. - This cost layer always works together with kmax_sequence_score_layer, + This cost layer always works together with kmax_seq_score_layer, sub_nested_seq_layer, and sequence_slice_layer to trim the input to form a sub-search space. @@ -6597,14 +6597,14 @@ def seq_slice_layer(input, starts, ends, name=None): @wrap_name_default() @layer_support() -def kmax_sequence_score_layer(input, name=None, beam_size=1): +def kmax_seq_score_layer(input, name=None, beam_size=1): """ This layer accepts one input which are scores over a sequence or a nested sequence, and returns indices of beam_size sequences with highest scores. .. code-block:: python - kmax_indices = kmax_sequence_score_layer(input=input_layer, beam_size) + kmax_indices = kmax_seq_score_layer(input=input_layer, beam_size) :param name: The Layer Name. @@ -6617,10 +6617,10 @@ def kmax_sequence_score_layer(input, name=None, beam_size=1): :return: LayerOutput object. :rtype: LayerOutput """ - assert isinstance(input, LayerOutput), ("kmax_sequence_score_layer " + assert isinstance(input, LayerOutput), ("kmax_seq_score_layer " "accepts only one input.") assert input.size == 1, ( - "input of kmax_sequence_score_layer is a score" + "input of kmax_seq_score_layer is a score " "over a sequence or a nested sequence, so its width must be 1.") Layer( diff --git a/python/paddle/trainer_config_helpers/tests/configs/file_list.sh b/python/paddle/trainer_config_helpers/tests/configs/file_list.sh index 7b132c23d..df872a90f 100755 --- a/python/paddle/trainer_config_helpers/tests/configs/file_list.sh +++ b/python/paddle/trainer_config_helpers/tests/configs/file_list.sh @@ -8,7 +8,7 @@ test_spp_layer test_bilinear_interp test_maxout test_bi_grumemory math_ops test_seq_concat_reshape test_pad test_smooth_l1 test_multiplex_layer test_prelu_layer test_row_conv test_detection_output_layer test_multibox_loss_layer test_recursive_topology test_gated_unit_layer test_clip_layer test_row_l2_norm_layer -test_kmax_seq_socre_layer test_seq_select_layers test_scale_shift_layer +test_kmax_seq_socre_layer test_sub_nested_seq_select_layer test_scale_shift_layer test_seq_slice_layer test_cross_entropy_over_beam test_pooling3D_layer test_conv3d_layer test_deconv3d_layer) diff --git a/python/paddle/trainer_config_helpers/tests/configs/protostr/test_cross_entropy_over_beam.protostr b/python/paddle/trainer_config_helpers/tests/configs/protostr/test_cross_entropy_over_beam.protostr index c43fc48e2..a60256969 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/protostr/test_cross_entropy_over_beam.protostr +++ b/python/paddle/trainer_config_helpers/tests/configs/protostr/test_cross_entropy_over_beam.protostr @@ -12,7 +12,7 @@ layers { active_type: "" } layers { - name: "__kmax_sequence_score_layer_0__" + name: "__kmax_seq_score_layer_0__" type: "kmax_seq_score" active_type: "" inputs { @@ -29,7 +29,7 @@ layers { input_layer_name: "sentence_states" } inputs { - input_layer_name: "__kmax_sequence_score_layer_0__" + input_layer_name: "__kmax_seq_score_layer_0__" } } layers { @@ -44,7 +44,7 @@ layers { bias_parameter_name: "___fc_layer_0__.wbias" } layers { - name: "__kmax_sequence_score_layer_1__" + name: "__kmax_seq_score_layer_1__" type: "kmax_seq_score" active_type: "" inputs { @@ -61,7 +61,7 @@ layers { input_layer_name: "__sub_nested_seq_layer_0__" } inputs { - input_layer_name: "__kmax_sequence_score_layer_1__" + input_layer_name: "__kmax_seq_score_layer_1__" } select_first: true } @@ -77,7 +77,7 @@ layers { bias_parameter_name: "___fc_layer_1__.wbias" } layers { - name: "__kmax_sequence_score_layer_2__" + name: "__kmax_seq_score_layer_2__" type: "kmax_seq_score" active_type: "" inputs { @@ -111,7 +111,7 @@ layers { input_layer_name: "sentence_scores" } inputs { - input_layer_name: "__kmax_sequence_score_layer_0__" + input_layer_name: "__kmax_seq_score_layer_0__" } inputs { input_layer_name: "sentences_ids" @@ -120,7 +120,7 @@ layers { input_layer_name: "__fc_layer_0__" } inputs { - input_layer_name: "__kmax_sequence_score_layer_1__" + input_layer_name: "__kmax_seq_score_layer_1__" } inputs { input_layer_name: "start_ids" @@ -129,7 +129,7 @@ layers { input_layer_name: "__fc_layer_1__" } inputs { - input_layer_name: "__kmax_sequence_score_layer_2__" + input_layer_name: "__kmax_seq_score_layer_2__" } inputs { input_layer_name: "end_ids" @@ -185,13 +185,13 @@ sub_models { name: "root" layer_names: "sentence_states" layer_names: "sentence_scores" - layer_names: "__kmax_sequence_score_layer_0__" + layer_names: "__kmax_seq_score_layer_0__" layer_names: "__sub_nested_seq_layer_0__" layer_names: "__fc_layer_0__" - layer_names: "__kmax_sequence_score_layer_1__" + layer_names: "__kmax_seq_score_layer_1__" layer_names: "__seq_slice_layer_0__" layer_names: "__fc_layer_1__" - layer_names: "__kmax_sequence_score_layer_2__" + layer_names: "__kmax_seq_score_layer_2__" layer_names: "sentences_ids" layer_names: "start_ids" layer_names: "end_ids" diff --git a/python/paddle/trainer_config_helpers/tests/configs/protostr/test_kmax_seq_socre_layer.protostr b/python/paddle/trainer_config_helpers/tests/configs/protostr/test_kmax_seq_socre_layer.protostr index 3d32220bf..f93d368c8 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/protostr/test_kmax_seq_socre_layer.protostr +++ b/python/paddle/trainer_config_helpers/tests/configs/protostr/test_kmax_seq_socre_layer.protostr @@ -17,7 +17,7 @@ layers { bias_parameter_name: "___fc_layer_0__.wbias" } layers { - name: "__kmax_sequence_score_layer_0__" + name: "__kmax_seq_score_layer_0__" type: "kmax_seq_score" active_type: "" inputs { @@ -46,14 +46,14 @@ parameters { initial_smart: false } input_layer_names: "input_seq" -output_layer_names: "__kmax_sequence_score_layer_0__" +output_layer_names: "__kmax_seq_score_layer_0__" sub_models { name: "root" layer_names: "input_seq" layer_names: "__fc_layer_0__" - layer_names: "__kmax_sequence_score_layer_0__" + layer_names: "__kmax_seq_score_layer_0__" input_layer_names: "input_seq" - output_layer_names: "__kmax_sequence_score_layer_0__" + output_layer_names: "__kmax_seq_score_layer_0__" is_recurrent_layer_group: false } diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_cross_entropy_over_beam.py b/python/paddle/trainer_config_helpers/tests/configs/test_cross_entropy_over_beam.py index 240e703dc..4a5bdf118 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/test_cross_entropy_over_beam.py +++ b/python/paddle/trainer_config_helpers/tests/configs/test_cross_entropy_over_beam.py @@ -7,14 +7,14 @@ beam_size = 5 # the first beam expansion. sentence_states = data_layer(name="sentence_states", size=32) sentence_scores = data_layer(name="sentence_scores", size=1) -topk_sentence_ids = kmax_sequence_score_layer( +topk_sentence_ids = kmax_seq_score_layer( input=sentence_scores, beam_size=beam_size) # the second beam expansion. topk_sen = sub_nested_seq_layer( input=sentence_states, selected_indices=topk_sentence_ids) start_pos_scores = fc_layer(input=topk_sen, size=1, act=LinearActivation()) -topk_start_pos_ids = kmax_sequence_score_layer( +topk_start_pos_ids = kmax_seq_score_layer( input=sentence_scores, beam_size=beam_size) # the final beam expansion. @@ -22,7 +22,7 @@ topk_start_spans = seq_slice_layer( input=topk_sen, starts=topk_start_pos_ids, ends=None) end_pos_scores = fc_layer( input=topk_start_spans, size=1, act=LinearActivation()) -topk_end_pos_ids = kmax_sequence_score_layer( +topk_end_pos_ids = kmax_seq_score_layer( input=end_pos_scores, beam_size=beam_size) # define the cost diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_kmax_seq_socre_layer.py b/python/paddle/trainer_config_helpers/tests/configs/test_kmax_seq_socre_layer.py index 48d0cd55d..171da10f7 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/test_kmax_seq_socre_layer.py +++ b/python/paddle/trainer_config_helpers/tests/configs/test_kmax_seq_socre_layer.py @@ -4,6 +4,6 @@ from paddle.trainer_config_helpers import * data = data_layer(name="input_seq", size=128) scores = fc_layer(input=data, size=1, act=ExpActivation()) -kmax_seq_id = kmax_sequence_score_layer(input=scores, beam_size=5) +kmax_seq_id = kmax_seq_score_layer(input=scores, beam_size=5) outputs(kmax_seq_id) diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_seq_select_layers.py b/python/paddle/trainer_config_helpers/tests/configs/test_sub_nested_seq_select_layer.py similarity index 100% rename from python/paddle/trainer_config_helpers/tests/configs/test_seq_select_layers.py rename to python/paddle/trainer_config_helpers/tests/configs/test_sub_nested_seq_select_layer.py -- GitLab