From 1945b729b6f24ca66ddabb3b9b1ff720e08ef801 Mon Sep 17 00:00:00 2001 From: Abhinav Arora Date: Wed, 2 May 2018 00:34:30 -0700 Subject: [PATCH] Fix CPPLint issues with math/sequence_padding (#10317) * Fix cpplint issues in sequence_padding * Fix typo in cu file * Fix dependencies of sequence_padding * Add include --- .../fluid/operators/math/sequence_padding.cc | 16 ++++++------ .../fluid/operators/math/sequence_padding.cu | 25 ++++++++++--------- .../fluid/operators/math/sequence_padding.h | 5 ++-- .../operators/math/sequence_padding_test.cc | 4 +-- paddle/fluid/operators/warpctc_op.h | 4 +-- 5 files changed, 28 insertions(+), 26 deletions(-) diff --git a/paddle/fluid/operators/math/sequence_padding.cc b/paddle/fluid/operators/math/sequence_padding.cc index 38bd3b9975..d63c6c4ed5 100644 --- a/paddle/fluid/operators/math/sequence_padding.cc +++ b/paddle/fluid/operators/math/sequence_padding.cc @@ -22,7 +22,7 @@ template class PaddingLoDTensorFunctor { public: void operator()(const platform::CPUDeviceContext& context, - const framework::LoDTensor& seq, framework::Tensor& padding, + const framework::LoDTensor& seq, framework::Tensor* padding, bool norm_by_times) { auto lod = seq.lod(); PADDLE_ENFORCE_GT(lod.size(), 0UL, @@ -37,7 +37,7 @@ class PaddingLoDTensorFunctor { "The first dimension of LoDTensor seq should be " "equal to the sum of all sequences's length."); - auto padding_dims = padding.dims(); + auto padding_dims = padding->dims(); PADDLE_ENFORCE_EQ(padding_dims.size(), 3UL, "The input padding should be a 3-D Tensor of shape " "[max_sequence_length, num_sequences, sequence_width]."); @@ -58,7 +58,7 @@ class PaddingLoDTensorFunctor { "width of sequence in LoDTensor seq."); const T* seq_data = seq.data(); - T* padding_data = padding.data(); + T* padding_data = padding->data(); for (int64_t i = 0; i < max_sequence_length; ++i) { for (int64_t j = 0; j < num_sequences; ++j) { int64_t start_pos = abs_offset_lod[level][j]; @@ -84,16 +84,16 @@ template class UnpaddingLoDTensorFunctor { public: void operator()(const platform::CPUDeviceContext& context, - framework::LoDTensor& seq, const framework::Tensor& padding, + framework::LoDTensor* seq, const framework::Tensor& padding, bool norm_by_times) { - auto lod = seq.lod(); + auto lod = seq->lod(); PADDLE_ENFORCE_GT(lod.size(), 0UL, "The LoD of LoDTensor seq should not be null."); const size_t level = 0; framework::LoD abs_offset_lod = framework::ToAbsOffset(lod); - auto seq_dims = seq.dims(); + auto seq_dims = seq->dims(); PADDLE_ENFORCE_EQ(seq_dims[0], static_cast(abs_offset_lod[level].back()), "The first dimension of LoDTensor seq should be " @@ -114,13 +114,13 @@ class UnpaddingLoDTensorFunctor { "The second dimension of Tensor padding should be " "the number of sequences in LoDTensor seq."); - const int64_t sequence_width = seq.numel() / seq_dims[0]; + const int64_t sequence_width = seq->numel() / seq_dims[0]; PADDLE_ENFORCE_EQ(padding_dims[2], sequence_width, "The third dimension of Tensor padding should be the " "width of sequence in LoDTensor seq."); const T* padding_data = padding.data(); - T* seq_data = seq.data(); + T* seq_data = seq->data(); for (int64_t i = 0; i < num_sequences; ++i) { int64_t start_pos = abs_offset_lod[level][i]; int64_t sequence_length = abs_offset_lod[level][i + 1] - start_pos; diff --git a/paddle/fluid/operators/math/sequence_padding.cu b/paddle/fluid/operators/math/sequence_padding.cu index c044e6fc32..0956a0c17d 100644 --- a/paddle/fluid/operators/math/sequence_padding.cu +++ b/paddle/fluid/operators/math/sequence_padding.cu @@ -12,6 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#include #include "paddle/fluid/operators/math/sequence_padding.h" namespace paddle { @@ -61,7 +62,7 @@ template class PaddingLoDTensorFunctor { public: void operator()(const platform::CUDADeviceContext& context, - const framework::LoDTensor& seq, framework::Tensor& padding, + const framework::LoDTensor& seq, framework::Tensor* padding, bool norm_by_times) { auto lod = seq.lod(); PADDLE_ENFORCE_GT(lod.size(), 0UL, @@ -76,7 +77,7 @@ class PaddingLoDTensorFunctor { "The first dimension of LoDTensor seq should be " "equal to the sum of all sequences's length."); - auto padding_dims = padding.dims(); + auto padding_dims = padding->dims(); PADDLE_ENFORCE_EQ(padding_dims.size(), 3UL, "The input padding should be a 3-D Tensor of shape " "[max_sequence_length, num_sequences, sequence_width]."); @@ -97,8 +98,8 @@ class PaddingLoDTensorFunctor { "width of sequence in LoDTensor seq."); if (!norm_by_times && num_sequences == 1UL) { - TensorCopy(seq, context.GetPlace(), context, &padding); - padding.Resize(padding_dims); + TensorCopy(seq, context.GetPlace(), context, padding); + padding->Resize(padding_dims); return; } @@ -117,7 +118,7 @@ class PaddingLoDTensorFunctor { dim3 grid(grid_dim_x, grid_dim_y); const T* seq_data = seq.data(); - T* padding_data = padding.data(); + T* padding_data = padding->data(); if (norm_by_times) { SequencePaddingKernel<<>>( padding_data, const_cast(seq_data), @@ -136,16 +137,16 @@ template class UnpaddingLoDTensorFunctor { public: void operator()(const platform::CUDADeviceContext& context, - framework::LoDTensor& seq, const framework::Tensor& padding, + framework::LoDTensor* seq, const framework::Tensor& padding, bool norm_by_times) { - auto lod = seq.lod(); + auto lod = seq->lod(); PADDLE_ENFORCE_GT(lod.size(), 0UL, "The lod of LoDTensor seq should not be null."); const size_t level = 0; framework::LoD abs_offset_lod = framework::ToAbsOffset(lod); - auto seq_dims = seq.dims(); + auto seq_dims = seq->dims(); PADDLE_ENFORCE_EQ(seq_dims[0], static_cast(abs_offset_lod[level].back()), "The first dimension of LoDTensor seq should be " @@ -166,14 +167,14 @@ class UnpaddingLoDTensorFunctor { "The second dimension of Tensor padding should be " "the number of sequences in LoDTensor seq."); - const int64_t sequence_width = seq.numel() / seq_dims[0]; + const int64_t sequence_width = seq->numel() / seq_dims[0]; PADDLE_ENFORCE_EQ(padding_dims[2], sequence_width, "The third dimension of Tensor padding should be the " "width of sequence in LoDTensor seq."); if (!norm_by_times && num_sequences == 1UL) { - TensorCopy(padding, context.GetPlace(), context, &seq); - seq.Resize(seq_dims); + TensorCopy(padding, context.GetPlace(), context, seq); + seq->Resize(seq_dims); return; } @@ -192,7 +193,7 @@ class UnpaddingLoDTensorFunctor { dim3 grid(grid_dim_x, grid_dim_y); const T* padding_data = padding.data(); - T* seq_data = seq.data(); + T* seq_data = seq->data(); if (norm_by_times) { SequencePaddingKernel<<>>( const_cast(padding_data), seq_data, diff --git a/paddle/fluid/operators/math/sequence_padding.h b/paddle/fluid/operators/math/sequence_padding.h index 17f044b9d6..b56e6db1eb 100644 --- a/paddle/fluid/operators/math/sequence_padding.h +++ b/paddle/fluid/operators/math/sequence_padding.h @@ -14,6 +14,7 @@ limitations under the License. */ #pragma once +#include #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/platform/device_context.h" @@ -64,13 +65,13 @@ template class PaddingLoDTensorFunctor { public: void operator()(const DeviceContext& context, const framework::LoDTensor& seq, - framework::Tensor& padding, bool norm_by_times); + framework::Tensor* padding, bool norm_by_times); }; template class UnpaddingLoDTensorFunctor { public: - void operator()(const DeviceContext& context, framework::LoDTensor& seq, + void operator()(const DeviceContext& context, framework::LoDTensor* seq, const framework::Tensor& padding, bool norm_by_times); }; diff --git a/paddle/fluid/operators/math/sequence_padding_test.cc b/paddle/fluid/operators/math/sequence_padding_test.cc index e3d6214485..b9a1b9ae4d 100644 --- a/paddle/fluid/operators/math/sequence_padding_test.cc +++ b/paddle/fluid/operators/math/sequence_padding_test.cc @@ -54,12 +54,12 @@ void TestSequencePadding(const paddle::framework::LoD& lod, static_cast(sequence_width)}); padding.mutable_data(padding_dims, *place); paddle::operators::math::PaddingLoDTensorFunctor()( - *context, seq, padding, false); + *context, seq, &padding, false); seq_back.set_lod(lod); seq_back.mutable_data(seq_dims, *place); paddle::operators::math::UnpaddingLoDTensorFunctor()( - *context, seq_back, padding, false); + *context, &seq_back, padding, false); if (paddle::platform::is_cpu_place(*place)) { cpu_seq_back = seq_back; diff --git a/paddle/fluid/operators/warpctc_op.h b/paddle/fluid/operators/warpctc_op.h index 85131d0025..705cc894c0 100644 --- a/paddle/fluid/operators/warpctc_op.h +++ b/paddle/fluid/operators/warpctc_op.h @@ -162,7 +162,7 @@ class WarpCTCKernel : public framework::OpKernel { static_cast(sequence_width)}); warpctc_logits.mutable_data(warpctc_logits_dims, ctx.GetPlace()); math::PaddingLoDTensorFunctor()( - ctx.template device_context(), *logits, warpctc_logits, + ctx.template device_context(), *logits, &warpctc_logits, false); const T* warpctc_logits_data = warpctc_logits.data(); @@ -217,7 +217,7 @@ class WarpCTCGradKernel : public framework::OpKernel { logits_grad->mutable_data(ctx.GetPlace()); bool norm_by_times = ctx.Attr("norm_by_times"); math::UnpaddingLoDTensorFunctor()( - ctx.template device_context(), *logits_grad, + ctx.template device_context(), logits_grad, *warpctc_grad, norm_by_times); const T* loss_grad_data = loss_grad->data(); -- GitLab