From 33fa2dfbdeb1f1a2f10b50960a914582bfcb9276 Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Mon, 27 Nov 2017 14:17:36 +0800 Subject: [PATCH] Compelete max_sequence_len_op (#5913) --- paddle/operators/CMakeLists.txt | 2 + paddle/operators/max_sequence_len_op.cc | 66 +++++++++++++++++++ python/paddle/v2/fluid/layers.py | 14 ++++ .../fluid/tests/test_lod_tensor_array_ops.py | 47 ++++++++++--- 4 files changed, 121 insertions(+), 8 deletions(-) create mode 100644 paddle/operators/max_sequence_len_op.cc diff --git a/paddle/operators/CMakeLists.txt b/paddle/operators/CMakeLists.txt index 05d4ea2606..a4c4374cf2 100644 --- a/paddle/operators/CMakeLists.txt +++ b/paddle/operators/CMakeLists.txt @@ -200,6 +200,7 @@ set(DEPS_OPS lod_rank_table_op lod_tensor_to_array_op array_to_lod_tensor_op + max_sequence_len_op lstm_op tensor_array_read_write_op gru_op @@ -222,6 +223,7 @@ op_library(pool_with_index_op DEPS pooling) op_library(lod_rank_table_op SRCS lod_rank_table_op.cc DEPS lod_rank_table) op_library(lod_tensor_to_array_op SRCS lod_tensor_to_array_op.cc DEPS lod_rank_table_op) op_library(array_to_lod_tensor_op SRCS array_to_lod_tensor_op.cc DEPS lod_rank_table_op) +op_library(max_sequence_len_op SRCS max_sequence_len_op.cc DEPS lod_rank_table) op_library(tensor_array_read_write_op SRCS tensor_array_read_write_op.cc) if(WITH_GPU) op_library(nccl_op DEPS nccl_common) diff --git a/paddle/operators/max_sequence_len_op.cc b/paddle/operators/max_sequence_len_op.cc new file mode 100644 index 0000000000..798022c9dd --- /dev/null +++ b/paddle/operators/max_sequence_len_op.cc @@ -0,0 +1,66 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#include "paddle/framework/lod_rank_table.h" +#include "paddle/framework/op_registry.h" +#include "paddle/framework/operator.h" + +namespace paddle { +namespace operators { + +class MaxSeqenceLenOp : public framework::OperatorBase { + public: + MaxSeqenceLenOp(const std::string &type, + const framework::VariableNameMap &inputs, + const framework::VariableNameMap &outputs, + const framework::AttributeMap &attrs) + : OperatorBase(type, inputs, outputs, attrs) {} + + void Run(const framework::Scope &scope, + const platform::DeviceContext &dev_ctx) const override { + auto &rank_table = + scope.FindVar(Input("RankTable"))->Get(); + auto *out = + scope.FindVar(Output("Out"))->GetMutable(); + int64_t *out_ptr = out->mutable_data({1}, platform::CPUPlace()); + *out_ptr = rank_table.items()[0].length; + } +}; + +class MaxSeqenceLenOpProtoMaker : public framework::OpProtoAndCheckerMaker { + public: + MaxSeqenceLenOpProtoMaker(framework::OpProto *proto, + framework::OpAttrChecker *op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("RankTable", "The lod_rank_table."); + AddOutput("Out", "The max sequence length."); + AddComment( + R"DOC(Calculate the max sequence length through lod_rank_table.)DOC"); + } +}; + +class MaxSeqenceLenInferShape : public framework::InferShapeBase { + public: + void operator()(framework::InferShapeContext *context) const override { + PADDLE_ENFORCE(context->HasInput("RankTable")); + context->SetOutputDim("Out", {1}); + } +}; +} // namespace operators +} // namespace paddle + +REGISTER_OPERATOR(max_sequence_len, paddle::operators::MaxSeqenceLenOp, + paddle::operators::MaxSeqenceLenOpProtoMaker, + paddle::operators::MaxSeqenceLenInferShape, + paddle::framework::EmptyGradOpMaker); diff --git a/python/paddle/v2/fluid/layers.py b/python/paddle/v2/fluid/layers.py index db388c142f..28bc3d214b 100644 --- a/python/paddle/v2/fluid/layers.py +++ b/python/paddle/v2/fluid/layers.py @@ -1354,6 +1354,20 @@ def lod_rank_table(x, level=0, main_program=None): return table +def max_sequence_len(rank_table, main_program=None): + """ + This function creates an operator to calculate the length of + max seqence through input rank_table(should be a lod_rank_table) + """ + helper = LayerHelper("max_seqence_len", **locals()) + res = helper.create_tmp_variable(dtype="int64") + helper.append_op( + type="max_sequence_len", + inputs={"RankTable": rank_table}, + outputs={"Out": res}) + return res + + def topk(input, k, main_program=None, startup_program=None): helper = LayerHelper('topk', **locals()) topk_out = helper.create_tmp_variable(dtype=input.data_type) diff --git a/python/paddle/v2/fluid/tests/test_lod_tensor_array_ops.py b/python/paddle/v2/fluid/tests/test_lod_tensor_array_ops.py index 032922a08a..0a916a55bc 100644 --- a/python/paddle/v2/fluid/tests/test_lod_tensor_array_ops.py +++ b/python/paddle/v2/fluid/tests/test_lod_tensor_array_ops.py @@ -18,7 +18,11 @@ class TestCPULoDTensorArrayOps(unittest.TestCase): tensor.set_lod([[0, 3, 9, 10]]) expect = map(lambda x: numpy.array(x).astype('int32'), [[3, 0, 9], [4, 1], [5, 2], [6], [7], [8]]) - self.main(tensor=tensor, expect_array=expect, expect_lod=[] * 6) + self.main( + tensor=tensor, + expect_array=expect, + expect_lod=[] * 6, + expect_max_len=6) def test_lod_tensor_to_array_level_0_empty_seq(self): tensor = core.LoDTensor() @@ -27,7 +31,11 @@ class TestCPULoDTensorArrayOps(unittest.TestCase): tensor.set_lod([[0, 3, 9, 9, 10]]) expect = map(lambda x: numpy.array(x).astype('int32'), [[3, 0, 9], [4, 1], [5, 2], [6], [7], [8]]) - self.main(tensor=tensor, expect_array=expect, expect_lod=[] * 6) + self.main( + tensor=tensor, + expect_array=expect, + expect_lod=[] * 6, + expect_max_len=6) def test_lod_tensor_to_array_level_1(self): tensor = core.LoDTensor() @@ -44,7 +52,11 @@ class TestCPULoDTensorArrayOps(unittest.TestCase): ] lod = [[[0, 2, 5]], [[0, 6, 12]], [[0, 3]]] - self.main(tensor=tensor, expect_array=expect, expect_lod=lod) + self.main( + tensor=tensor, + expect_array=expect, + expect_lod=lod, + expect_max_len=3) def test_lod_tensor_to_array_level_1_empty_seq(self): tensor = core.LoDTensor() @@ -63,7 +75,11 @@ class TestCPULoDTensorArrayOps(unittest.TestCase): ] lod = [[[0, 5, 8, 8, 15]], [[0, 2, 6, 7, 8]], [[0, 2, 6]], [[0, 2]]] - self.main(tensor=tensor, expect_array=expect, expect_lod=lod) + self.main( + tensor=tensor, + expect_array=expect, + expect_lod=lod, + expect_max_len=4) def test_lod_tensor_to_array_level_2(self): tensor = core.LoDTensor() @@ -80,7 +96,11 @@ class TestCPULoDTensorArrayOps(unittest.TestCase): ] lod = [[[0, 1, 3, 4], [0, 1, 4, 8, 12]], [[0, 4, 7], [0, 1, 5, 9, 17, 21, 27, 31]], [[0, 2], [0, 6, 7]]] - self.main(tensor=tensor, expect_array=expect, expect_lod=lod) + self.main( + tensor=tensor, + expect_array=expect, + expect_lod=lod, + expect_max_len=3) def test_lod_tensor_to_array_level_2_skip_level(self): tensor = core.LoDTensor() @@ -88,14 +108,21 @@ class TestCPULoDTensorArrayOps(unittest.TestCase): numpy.arange(50).reshape(50, 1).astype('int32'), self.place()) tensor.set_lod([[0, 2, 5, 6], [0, 2, 5, 6, 10, 12, 13], [0, 3, 7, 11, 17, 21, 22, 23, 27, 31, 39, 45, 46, 50]]) - self.main(tensor=tensor, expect_array=None, expect_lod=None, level=1) - - def main(self, tensor, expect_array, expect_lod, level=0): + self.main( + tensor=tensor, + expect_array=None, + expect_lod=None, + expect_max_len=4, + level=1) + + def main(self, tensor, expect_array, expect_lod, expect_max_len, level=0): place = self.place() program = Program() x = layers.data(name='x', shape=[10], main_program=program) x.persistable = True table = layers.lod_rank_table(x, level=level, main_program=program) + max_len = layers.max_sequence_len(table, main_program=program) + max_len.persistable = True array = layers.lod_tensor_to_array(x, table, main_program=program) array.persistable = True @@ -110,6 +137,10 @@ class TestCPULoDTensorArrayOps(unittest.TestCase): self.check_array_same(array, expect_array, expect_lod) self.check_tensor_same(scope.find_var(result.name).get_tensor(), tensor) + self.assertEqual( + numpy.array(scope.find_var(max_len.name).get_tensor())[0], + expect_max_len) + def check_array_same(self, array, expect_tensor, expect_lod): self.assertEqual(len(expect_tensor), len(array)) for i, exp in enumerate(zip(expect_tensor, expect_lod)): -- GitLab