From b57a2b989bbee80a46d28792e55bbf9b784fc9b5 Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Mon, 17 Sep 2018 17:04:14 +0800 Subject: [PATCH] remove test_seq_concat_op --- paddle/fluid/API.spec | 11 +- paddle/fluid/operators/sequence_concat_op.cc | 62 ++++----- .../tests/unittests/test_seq_concat_op.py | 124 ------------------ 3 files changed, 35 insertions(+), 162 deletions(-) delete mode 100644 python/paddle/fluid/tests/unittests/test_seq_concat_op.py diff --git a/paddle/fluid/API.spec b/paddle/fluid/API.spec index c4c336424e..8c4a4b6f45 100644 --- a/paddle/fluid/API.spec +++ b/paddle/fluid/API.spec @@ -61,11 +61,11 @@ paddle.fluid.DistributeTranspiler.get_pserver_programs ArgSpec(args=['self', 'en paddle.fluid.DistributeTranspiler.get_startup_program ArgSpec(args=['self', 'endpoint', 'pserver_program', 'startup_program'], varargs=None, keywords=None, defaults=(None, None)) paddle.fluid.DistributeTranspiler.get_trainer_program ArgSpec(args=['self', 'wait_port'], varargs=None, keywords=None, defaults=(True,)) paddle.fluid.DistributeTranspiler.transpile ArgSpec(args=['self', 'trainer_id', 'program', 'pservers', 'trainers', 'sync_mode', 'startup_program'], varargs=None, keywords=None, defaults=(None, '127.0.0.1:6174', 1, True, None)) -paddle.fluid.InferenceTranspiler.__init__ +paddle.fluid.InferenceTranspiler.__init__ paddle.fluid.InferenceTranspiler.transpile ArgSpec(args=['self', 'program', 'place', 'scope'], varargs=None, keywords=None, defaults=(None,)) paddle.fluid.memory_optimize ArgSpec(args=['input_program', 'skip_opt_set', 'print_log', 'level'], varargs=None, keywords=None, defaults=(None, False, 0)) paddle.fluid.release_memory ArgSpec(args=['input_program', 'skip_opt_set'], varargs=None, keywords=None, defaults=(None,)) -paddle.fluid.DistributeTranspilerConfig.__init__ +paddle.fluid.DistributeTranspilerConfig.__init__ paddle.fluid.ParallelExecutor.__init__ ArgSpec(args=['self', 'use_cuda', 'loss_name', 'main_program', 'share_vars_from', 'exec_strategy', 'build_strategy', 'num_trainers', 'trainer_id', 'scope'], varargs=None, keywords='kwargs', defaults=(None, None, None, None, None, 1, 0, None)) paddle.fluid.ParallelExecutor.run ArgSpec(args=['self', 'fetch_list', 'feed', 'feed_dict', 'return_numpy'], varargs=None, keywords=None, defaults=(None, None, True)) paddle.fluid.ExecutionStrategy.__init__ __init__(self: paddle.fluid.core.ExecutionStrategy) -> None @@ -174,6 +174,7 @@ paddle.fluid.layers.stack ArgSpec(args=['x', 'axis'], varargs=None, keywords=Non paddle.fluid.layers.pad2d ArgSpec(args=['input', 'paddings', 'mode', 'pad_value', 'data_format', 'name'], varargs=None, keywords=None, defaults=([0, 0, 0, 0], 'constant', 0.0, 'NCHW', None)) paddle.fluid.layers.unstack ArgSpec(args=['x', 'axis', 'num'], varargs=None, keywords=None, defaults=(0, None)) paddle.fluid.layers.sequence_enumerate ArgSpec(args=['input', 'win_size', 'pad_value', 'name'], varargs=None, keywords=None, defaults=(0, None)) +paddle.fluid.layers.sequence_concat ArgSpec(args=['input', 'name'], varargs=None, keywords=None, defaults=(None,)) paddle.fluid.layers.data ArgSpec(args=['name', 'shape', 'append_batch_size', 'dtype', 'lod_level', 'type', 'stop_gradient'], varargs=None, keywords=None, defaults=(True, 'float32', 0, VarType.LOD_TENSOR, True)) paddle.fluid.layers.open_recordio_file ArgSpec(args=['filename', 'shapes', 'lod_levels', 'dtypes', 'pass_num', 'for_parallel'], varargs=None, keywords=None, defaults=(1, True)) paddle.fluid.layers.open_files ArgSpec(args=['filenames', 'shapes', 'lod_levels', 'dtypes', 'thread_num', 'buffer_size', 'pass_num', 'is_test'], varargs=None, keywords=None, defaults=(None, None, 1, None)) @@ -348,7 +349,7 @@ paddle.fluid.transpiler.DistributeTranspiler.get_pserver_programs ArgSpec(args=[ paddle.fluid.transpiler.DistributeTranspiler.get_startup_program ArgSpec(args=['self', 'endpoint', 'pserver_program', 'startup_program'], varargs=None, keywords=None, defaults=(None, None)) paddle.fluid.transpiler.DistributeTranspiler.get_trainer_program ArgSpec(args=['self', 'wait_port'], varargs=None, keywords=None, defaults=(True,)) paddle.fluid.transpiler.DistributeTranspiler.transpile ArgSpec(args=['self', 'trainer_id', 'program', 'pservers', 'trainers', 'sync_mode', 'startup_program'], varargs=None, keywords=None, defaults=(None, '127.0.0.1:6174', 1, True, None)) -paddle.fluid.transpiler.InferenceTranspiler.__init__ +paddle.fluid.transpiler.InferenceTranspiler.__init__ paddle.fluid.transpiler.InferenceTranspiler.transpile ArgSpec(args=['self', 'program', 'place', 'scope'], varargs=None, keywords=None, defaults=(None,)) paddle.fluid.transpiler.memory_optimize ArgSpec(args=['input_program', 'skip_opt_set', 'print_log', 'level'], varargs=None, keywords=None, defaults=(None, False, 0)) paddle.fluid.transpiler.release_memory ArgSpec(args=['input_program', 'skip_opt_set'], varargs=None, keywords=None, defaults=(None,)) @@ -358,7 +359,7 @@ paddle.fluid.transpiler.HashName.reset ArgSpec(args=['self'], varargs=None, keyw paddle.fluid.transpiler.RoundRobin.__init__ ArgSpec(args=['self', 'pserver_endpoints'], varargs=None, keywords=None, defaults=None) paddle.fluid.transpiler.RoundRobin.dispatch ArgSpec(args=['self', 'varlist'], varargs=None, keywords=None, defaults=None) paddle.fluid.transpiler.RoundRobin.reset ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None) -paddle.fluid.transpiler.DistributeTranspilerConfig.__init__ +paddle.fluid.transpiler.DistributeTranspilerConfig.__init__ paddle.fluid.nets.simple_img_conv_pool ArgSpec(args=['input', 'num_filters', 'filter_size', 'pool_size', 'pool_stride', 'pool_padding', 'pool_type', 'global_pooling', 'conv_stride', 'conv_padding', 'conv_dilation', 'conv_groups', 'param_attr', 'bias_attr', 'act', 'use_cudnn', 'use_mkldnn'], varargs=None, keywords=None, defaults=(0, 'max', False, 1, 0, 1, 1, None, None, None, True, False)) paddle.fluid.nets.sequence_conv_pool ArgSpec(args=['input', 'num_filters', 'filter_size', 'param_attr', 'act', 'pool_type'], varargs=None, keywords=None, defaults=(None, 'sigmoid', 'max')) paddle.fluid.nets.glu ArgSpec(args=['input', 'dim'], varargs=None, keywords=None, defaults=(-1,)) @@ -425,4 +426,4 @@ paddle.fluid.Scope.__init__ __init__(self: paddle.fluid.core.Scope) -> None paddle.fluid.Scope.drop_kids drop_kids(self: paddle.fluid.core.Scope) -> None paddle.fluid.Scope.find_var find_var(self: paddle.fluid.core.Scope, arg0: unicode) -> paddle.fluid.core.Variable paddle.fluid.Scope.new_scope new_scope(self: paddle.fluid.core.Scope) -> paddle.fluid.core.Scope -paddle.fluid.Scope.var var(self: paddle.fluid.core.Scope, arg0: unicode) -> paddle.fluid.core.Variable \ No newline at end of file +paddle.fluid.Scope.var var(self: paddle.fluid.core.Scope, arg0: unicode) -> paddle.fluid.core.Variable diff --git a/paddle/fluid/operators/sequence_concat_op.cc b/paddle/fluid/operators/sequence_concat_op.cc index c989c1cb38..397a318295 100644 --- a/paddle/fluid/operators/sequence_concat_op.cc +++ b/paddle/fluid/operators/sequence_concat_op.cc @@ -37,42 +37,38 @@ class SeqConcatOpMaker : public framework::OpProtoAndCheckerMaker { class SeqConcatShapeInferer : public framework::InferShapeBase { public: void operator()(framework::InferShapeContext *context) const override { - try { - PADDLE_ENFORCE(context->HasInputs("X"), - "Input(X) of Sequence Concat Op should not be null."); - PADDLE_ENFORCE(context->HasOutput("Out"), - "Output(Out) of Sequence Concat Op should not be null."); + PADDLE_ENFORCE(context->HasInputs("X"), + "Input(X) of Sequence Concat Op should not be null."); + PADDLE_ENFORCE(context->HasOutput("Out"), + "Output(Out) of Sequence Concat Op should not be null."); - PADDLE_ENFORCE_GT(context->HasInputs("X"), 1, - "The number of input sequences is at least two."); - auto x_dims = context->GetInputsDim("X"); - int64_t batch_size = 0; - int64_t feature_size = 0; - std::vector out_dims; - for (auto &x_dim : x_dims) { - if (out_dims.empty()) { - out_dims = framework::vectorize(x_dim); - } - batch_size += x_dim[0]; - if (feature_size == 0) { - feature_size = framework::product(x_dim) / x_dim[0]; - } else { - PADDLE_ENFORCE_EQ( - feature_size, framework::product(x_dim) / x_dim[0], - "Inputs of sequence concat must have same feature size"); - } + PADDLE_ENFORCE_GT(context->Inputs("X").size(), 1, + "The number of input sequences is at least two."); + auto x_dims = context->GetInputsDim("X"); + int64_t batch_size = 0; + int64_t feature_size = 0; + std::vector out_dims; + for (auto &x_dim : x_dims) { + if (out_dims.empty()) { + out_dims = framework::vectorize(x_dim); } - if (batch_size < 0) { - batch_size = -1; // Normalize batch size for compile time. + batch_size += x_dim[0]; + if (feature_size == 0) { + feature_size = framework::product(x_dim) / x_dim[0]; + } else { + PADDLE_ENFORCE_EQ( + feature_size, framework::product(x_dim) / x_dim[0], + "Inputs of sequence concat must have same feature size"); } - out_dims[0] = batch_size; - context->SetOutputDim("Out", framework::make_ddim(out_dims)); - if (!context->IsRuntime()) { // Runtime LoD infershape will be computed - // in Kernel. - context->ShareLoD("X", "Out"); - } - } catch (...) { - PADDLE_THROW("Unknown error"); + } + if (batch_size < 0) { + batch_size = -1; // Normalize batch size for compile time. + } + out_dims[0] = batch_size; + context->SetOutputDim("Out", framework::make_ddim(out_dims)); + if (!context->IsRuntime()) { // Runtime LoD infershape will be computed + // in Kernel. + context->ShareLoD("X", "Out"); } } }; diff --git a/python/paddle/fluid/tests/unittests/test_seq_concat_op.py b/python/paddle/fluid/tests/unittests/test_seq_concat_op.py deleted file mode 100644 index 9d1d139721..0000000000 --- a/python/paddle/fluid/tests/unittests/test_seq_concat_op.py +++ /dev/null @@ -1,124 +0,0 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import print_function - -import unittest -import numpy as np -import sys -from op_test import OpTest - - -def to_abs_offset_lod(lod): - offset_lod = [[0] for i in lod] - for i, level in enumerate(lod): - for seq_len in level: - offset_lod[i].append(offset_lod[i][-1] + seq_len) - - if len(offset_lod) == 0 or len(offset_lod) == 1: - return offset_lod - import copy - new_offset_lod = copy.deepcopy(offset_lod) - for idx, val in enumerate(offset_lod[0]): - new_offset_lod[0][idx] = offset_lod[1][val] - return new_offset_lod - - -def seq_concat(inputs, level): - lod0 = inputs['X'][0][1][1] - lod1 = inputs['X'][1][1][1] - x0 = inputs['X'][0][1][0] - x1 = inputs['X'][1][1][0] - level_idx = len(lod0) - level - 1 - outs = [] - for i in range(len(lod0[level_idx])): - sub_x0 = x0[to_abs_offset_lod(lod0)[level_idx][i]:to_abs_offset_lod( - lod0)[level_idx][i + 1], :] - sub_x1 = x1[to_abs_offset_lod(lod1)[level_idx][i]:to_abs_offset_lod( - lod1)[level_idx][i + 1], :] - outs.append(np.concatenate((sub_x0, sub_x1), axis=0)) - return np.concatenate(outs, axis=0) - - -class TestSeqConcatOp(OpTest): - def set_data(self): - # two level, batch size is 3 - x0 = np.random.random((4, 6, 3)).astype('float32') - lod0 = [[2, 2], [1, 1, 1, 1]] - x1 = np.random.random((4, 8, 3)).astype('float32') - lod1 = [[2, 2], [1, 1, 1, 1]] - axis = 1 - level = 1 - self.inputs = {'X': [('x0', (x0, lod0)), ('x1', (x1, lod1))]} - self.attrs = {'axis': axis, 'level': level} - self.outputs = {'Out': (np.concatenate([x0, x1], axis=1), lod0)} - - def setUp(self): - self.op_type = "sequence_concat" - self.set_data() - - def test_check_output(self): - self.check_output() - - def test_check_grad(self): - self.check_grad(['x0'], 'Out') - - -class TestSeqConcatOpLevelZeroNestedSequence(TestSeqConcatOp): - def set_data(self): - # two level, batch size is 3 - x0 = np.random.random((4, 6, 3)).astype('float32') - lod0 = [[2, 2], [1, 1, 1, 1]] - x1 = np.random.random((7, 6, 3)).astype('float32') - lod1 = [[2, 2], [1, 2, 2, 2]] - axis = 0 - level = 0 - self.inputs = {'X': [('x0', (x0, lod0)), ('x1', (x1, lod1))]} - self.attrs = {'axis': axis, 'level': level} - out_lod = [[2, 2], [2, 3, 3, 3]] - self.outputs = {'Out': (seq_concat(self.inputs, level), out_lod)} - - -class TestSeqConcatOplevelOneNestedSequence(TestSeqConcatOp): - def set_data(self): - # two level, batch size is 3 - x0 = np.random.random((4, 6, 3)).astype('float32') - lod0 = [[2, 2], [1, 1, 1, 1]] - x1 = np.random.random((7, 6, 3)).astype('float32') - lod1 = [[3, 1], [1, 2, 2, 2]] - axis = 0 - level = 1 - self.inputs = {'X': [('x0', (x0, lod0)), ('x1', (x1, lod1))]} - self.attrs = {'axis': axis, 'level': level} - out_lod = [[5, 3], [1, 1, 1, 2, 2, 1, 1, 2]] - self.outputs = {'Out': (seq_concat(self.inputs, level), out_lod)} - - -class TestSeqConcatOpLevelZeroSequence(TestSeqConcatOp): - def set_data(self): - # two level, batch size is 3 - x0 = np.random.random((4, 3, 4)).astype('float32') - lod0 = [[1, 1, 1, 1]] - x1 = np.random.random((7, 3, 4)).astype('float32') - lod1 = [[1, 2, 2, 2]] - axis = 0 - level = 0 - self.inputs = {'X': [('x0', (x0, lod0)), ('x1', (x1, lod1))]} - self.attrs = {'axis': axis, 'level': level} - out_lod = [[2, 3, 3, 3]] - self.outputs = {'Out': (seq_concat(self.inputs, level), out_lod)} - - -if __name__ == '__main__': - unittest.main() -- GitLab