From 90bd038d358ebcf30520da457d9672b0c4513b0e Mon Sep 17 00:00:00 2001 From: dengkaipeng Date: Mon, 25 Mar 2019 19:58:18 +0800 Subject: [PATCH] fix format. test=develop --- paddle/fluid/API.spec | 2 +- paddle/fluid/operators/jit/more/mix/mix.cc | 6 ++++-- paddle/fluid/operators/jit/more/mkl/mkl.cc | 14 ++++++++------ paddle/fluid/operators/jit/more/mkl/mkl.h | 2 +- paddle/fluid/operators/jit/refer/refer.h | 4 ++-- paddle/fluid/operators/jit/test.cc | 8 ++++---- paddle/fluid/operators/math/softmax.h | 2 +- paddle/fluid/operators/math/softmax_impl.h | 5 +++-- paddle/fluid/operators/softmax_op.cc | 6 ++---- paddle/fluid/operators/softmax_op.h | 10 ++++++---- paddle/fluid/operators/warpctc_cudnn_op.cu.cc | 3 ++- 11 files changed, 34 insertions(+), 28 deletions(-) diff --git a/paddle/fluid/API.spec b/paddle/fluid/API.spec index 8849e31025..51c3c7bbf9 100644 --- a/paddle/fluid/API.spec +++ b/paddle/fluid/API.spec @@ -86,7 +86,7 @@ paddle.fluid.layers.conv2d (ArgSpec(args=['input', 'num_filters', 'filter_size', paddle.fluid.layers.conv3d (ArgSpec(args=['input', 'num_filters', 'filter_size', 'stride', 'padding', 'dilation', 'groups', 'param_attr', 'bias_attr', 'use_cudnn', 'act', 'name'], varargs=None, keywords=None, defaults=(1, 0, 1, None, None, None, True, None, None)), ('document', '37042620f9bd3a2da6e5d3138b2f724b')) paddle.fluid.layers.sequence_pool (ArgSpec(args=['input', 'pool_type', 'is_test'], varargs=None, keywords=None, defaults=(False,)), ('document', 'a194fb80614023f543df3949fbd0d0b8')) paddle.fluid.layers.sequence_softmax (ArgSpec(args=['input', 'use_cudnn', 'name'], varargs=None, keywords=None, defaults=(False, None)), ('document', '19ef6f9cdd27feac8a1ae060f19c10b4')) -paddle.fluid.layers.softmax (ArgSpec(args=['input', 'use_cudnn', 'name', 'axis'], varargs=None, keywords=None, defaults=(False, None, -1)), ('document', '85f9690b1b285def19077a41d9dba36c')) +paddle.fluid.layers.softmax (ArgSpec(args=['input', 'use_cudnn', 'name', 'axis'], varargs=None, keywords=None, defaults=(False, None, -1)), ('document', '502bad9e8bc7ef24817d0d4b20f61df3')) paddle.fluid.layers.pool2d (ArgSpec(args=['input', 'pool_size', 'pool_type', 'pool_stride', 'pool_padding', 'global_pooling', 'use_cudnn', 'ceil_mode', 'name', 'exclusive'], varargs=None, keywords=None, defaults=(-1, 'max', 1, 0, False, True, False, None, True)), ('document', 'bbd84e855e660cd1084bb71a2fd0cdaa')) paddle.fluid.layers.pool3d (ArgSpec(args=['input', 'pool_size', 'pool_type', 'pool_stride', 'pool_padding', 'global_pooling', 'use_cudnn', 'ceil_mode', 'name', 'exclusive'], varargs=None, keywords=None, defaults=(-1, 'max', 1, 0, False, True, False, None, True)), ('document', '043de7333b79ee0ac55053c14ed81625')) paddle.fluid.layers.adaptive_pool2d (ArgSpec(args=['input', 'pool_size', 'pool_type', 'require_index', 'name'], varargs=None, keywords=None, defaults=('max', False, None)), ('document', '859b887174d06f361658f69cb7c06d95')) diff --git a/paddle/fluid/operators/jit/more/mix/mix.cc b/paddle/fluid/operators/jit/more/mix/mix.cc index 4f309501b6..1a9fc9ed7b 100644 --- a/paddle/fluid/operators/jit/more/mix/mix.cc +++ b/paddle/fluid/operators/jit/more/mix/mix.cc @@ -54,8 +54,10 @@ void Softmax(const T* x, T* y, int n, int bs, int remain) { auto compute_hmax = KernelFuncs, CPUPlace>::Cache().At(n); auto compute_hsum = KernelFuncs, CPUPlace>::Cache().At(n); auto compute_vscal = KernelFuncs, CPUPlace>::Cache().At(n); - auto compute_stridesum = KernelFuncs, CPUPlace>::Cache().At(n); - auto compute_stridescal = KernelFuncs, CPUPlace>::Cache().At(n); + auto compute_stridesum = + KernelFuncs, CPUPlace>::Cache().At(n); + auto compute_stridescal = + KernelFuncs, CPUPlace>::Cache().At(n); auto compute_vaddbias = KernelFuncs, CPUPlace>::Cache().At(n); auto compute_vexp = KernelFuncs, CPUPlace>::Cache().At(n); diff --git a/paddle/fluid/operators/jit/more/mkl/mkl.cc b/paddle/fluid/operators/jit/more/mkl/mkl.cc index fc8800ec72..75ebddb125 100644 --- a/paddle/fluid/operators/jit/more/mkl/mkl.cc +++ b/paddle/fluid/operators/jit/more/mkl/mkl.cc @@ -79,18 +79,20 @@ void VScal(const double* a, const double* x, double* y, int n) { } template <> -void StrideScal(const float* a, const float* x, float* y, int n, int stride) { +void StrideScal(const float* a, const float* x, float* y, int n, + int stride) { if (x == y) { - platform::dynload::cblas_sscal(n/stride, *a, y, stride); + platform::dynload::cblas_sscal(n / stride, *a, y, stride); } else { refer::StrideScal(a, x, y, n, stride); } } template <> -void StrideScal(const double* a, const double* x, double* y, int n, int stride) { +void StrideScal(const double* a, const double* x, double* y, int n, + int stride) { if (x == y) { - platform::dynload::cblas_dscal(n/stride, *a, y, stride); + platform::dynload::cblas_dscal(n / stride, *a, y, stride); } else { refer::StrideScal(a, x, y, n, stride); } @@ -148,12 +150,12 @@ void ASum(const double* x, double* res, int n) { template <> void StrideASum(const float* x, float* res, int n, int stride) { - res[0] = platform::dynload::cblas_sasum(n/stride, x, stride); + res[0] = platform::dynload::cblas_sasum(n / stride, x, stride); } template <> void StrideASum(const double* x, double* res, int n, int stride) { - res[0] = platform::dynload::cblas_dasum(n/stride, x, stride); + res[0] = platform::dynload::cblas_dasum(n / stride, x, stride); } // TODO(TJ): tuning me carefully on AVX, AVX2 and AVX512 diff --git a/paddle/fluid/operators/jit/more/mkl/mkl.h b/paddle/fluid/operators/jit/more/mkl/mkl.h index 1fbb87b0cf..968895bb6f 100644 --- a/paddle/fluid/operators/jit/more/mkl/mkl.h +++ b/paddle/fluid/operators/jit/more/mkl/mkl.h @@ -135,7 +135,7 @@ template void StrideScal(const T* a, const T* x, T* y, int n, int stride); template -void Softmax(const T* x, T* y, int n, int bs, int remain=1) { +void Softmax(const T* x, T* y, int n, int bs, int remain = 1) { std::vector entities(bs); for (int i = 0; i < bs; ++i) { entities[i] = x[i * n]; diff --git a/paddle/fluid/operators/jit/refer/refer.h b/paddle/fluid/operators/jit/refer/refer.h index c62925232b..4aeb2fd628 100644 --- a/paddle/fluid/operators/jit/refer/refer.h +++ b/paddle/fluid/operators/jit/refer/refer.h @@ -414,13 +414,13 @@ void HSum(const T* x, T* res, int n) { template void StrideASum(const T* x, T* res, int n, int stride) { res[0] = x[0]; - for (int i = stride; i < n; i+=stride) { + for (int i = stride; i < n; i += stride) { res[0] += std::abs(x[i]); } } template -void StrideScal(const T* a, const T* x, T* y, int n , int stride) { +void StrideScal(const T* a, const T* x, T* y, int n, int stride) { for (int i = 0; i < n; ++i) { if (i % stride == 0) { y[i] = x[i] * a[0]; diff --git a/paddle/fluid/operators/jit/test.cc b/paddle/fluid/operators/jit/test.cc index 1397e5be18..d8a0b2cbf5 100644 --- a/paddle/fluid/operators/jit/test.cc +++ b/paddle/fluid/operators/jit/test.cc @@ -723,7 +723,7 @@ void TestKernelSoftmax() { VLOG(10) << "Test JITKernel: " << jit::to_string(KernelTuple::kernel_type); for (int bs : {1, 2, 10}) { for (int n : TestSizes()) { - for (int m : {1, 2, 3}) { // remain + for (int m : {1, 2, 3}) { // remain if (m > n || n % m != 0) { continue; } @@ -770,7 +770,7 @@ void TestKernelStrideASum() { using T = typename KernelTuple::data_type; VLOG(10) << "Test JITKernel: " << jit::to_string(KernelTuple::kernel_type); for (int d : TestSizes()) { - for (int m : {1, 2, 3}) { // stride + for (int m : {1, 2, 3}) { // stride if (m > d || d % m != 0) { continue; } @@ -782,7 +782,7 @@ void TestKernelStrideASum() { ref(x.data(), &ref_res, d, m); auto verifier = [](const typename KernelTuple::func_type tgt, - const std::vector& x, const T ref_res, + const std::vector& x, const T ref_res, const int m) { EXPECT_TRUE(tgt != nullptr); T tgt_res; @@ -801,7 +801,7 @@ void TestKernelStrideScal() { // for (int d : TestSizes()) { // for (int m : {1, 2, 3}) { // stride for (int d : {4}) { - for (int m : {2}) { // stride + for (int m : {2}) { // stride if (m > d || d % m != 0) { continue; } diff --git a/paddle/fluid/operators/math/softmax.h b/paddle/fluid/operators/math/softmax.h index f8e250fa2e..a7a30a71e4 100644 --- a/paddle/fluid/operators/math/softmax.h +++ b/paddle/fluid/operators/math/softmax.h @@ -31,7 +31,7 @@ template class SoftmaxGradFunctor { public: void operator()(const DeviceContext& context, const int axis_dim, - const framework::Tensor* y, const framework::Tensor* y_grad, + const framework::Tensor* y, const framework::Tensor* y_grad, framework::Tensor* x_grad); }; diff --git a/paddle/fluid/operators/math/softmax_impl.h b/paddle/fluid/operators/math/softmax_impl.h index dea8142cc8..6f6f33345f 100644 --- a/paddle/fluid/operators/math/softmax_impl.h +++ b/paddle/fluid/operators/math/softmax_impl.h @@ -94,8 +94,9 @@ class SoftmaxFunctor> { template void SoftmaxGradFunctor::operator()( - const DeviceContext& context, const int axis_dim, const framework::Tensor* y, - const framework::Tensor* y_grad, framework::Tensor* x_grad) { + const DeviceContext& context, const int axis_dim, + const framework::Tensor* y, const framework::Tensor* y_grad, + framework::Tensor* x_grad) { auto softmax = EigenMatrix::From(*y); auto softmax_grad = EigenMatrix::From(*y_grad); auto logits_grad = EigenMatrix::From(*x_grad); diff --git a/paddle/fluid/operators/softmax_op.cc b/paddle/fluid/operators/softmax_op.cc index 9cbb6691f4..b812d2cdeb 100644 --- a/paddle/fluid/operators/softmax_op.cc +++ b/paddle/fluid/operators/softmax_op.cc @@ -49,10 +49,8 @@ class SoftmaxOp : public framework::OperatorWithKernel { auto use_cudnn = ctx->Attrs().Get("use_cudnn"); auto use_mkldnn = ctx->Attrs().Get("use_mkldnn"); if (axis != rank_x - 1 && axis != -1) { - PADDLE_ENFORCE(!use_cudnn, - "CUDNN kernel only support axis as -1."); - PADDLE_ENFORCE(!use_mkldnn, - "MKLDNN kernel only support axis as -1."); + PADDLE_ENFORCE(!use_cudnn, "CUDNN kernel only support axis as -1."); + PADDLE_ENFORCE(!use_mkldnn, "MKLDNN kernel only support axis as -1."); } ctx->SetOutputDim("Out", ctx->GetInputDim("X")); diff --git a/paddle/fluid/operators/softmax_op.h b/paddle/fluid/operators/softmax_op.h index bbea935101..a964c3b57a 100644 --- a/paddle/fluid/operators/softmax_op.h +++ b/paddle/fluid/operators/softmax_op.h @@ -66,10 +66,12 @@ class SoftmaxKernel : public framework::OpKernel { #ifdef PADDLE_ON_INFERENCE math::SoftmaxFunctor()( - context.template device_context(), axis_dim, &X_2d, &Out_2d); + context.template device_context(), axis_dim, &X_2d, + &Out_2d); #else math::SoftmaxFunctor()( - context.template device_context(), axis_dim, &X_2d, &Out_2d); + context.template device_context(), axis_dim, &X_2d, + &Out_2d); #endif } }; @@ -96,8 +98,8 @@ class SoftmaxGradKernel : public framework::OpKernel { dOut_2d.ShareDataWith(*dOut).Resize({n, d}); math::SoftmaxGradFunctor()( - context.template device_context(), axis_dim, &Out_2d, &dOut_2d, - &dX_2d); + context.template device_context(), axis_dim, &Out_2d, + &dOut_2d, &dX_2d); } }; diff --git a/paddle/fluid/operators/warpctc_cudnn_op.cu.cc b/paddle/fluid/operators/warpctc_cudnn_op.cu.cc index 716faf2995..8d97396fda 100644 --- a/paddle/fluid/operators/warpctc_cudnn_op.cu.cc +++ b/paddle/fluid/operators/warpctc_cudnn_op.cu.cc @@ -69,7 +69,8 @@ class CudnnCTCKernel : public framework::OpKernel { int rank = logits->dims().size(); Tensor in_2d = framework::ReshapeToMatrix(*logits, rank - 1); Tensor out_2d = framework::ReshapeToMatrix(softmax_logits, rank - 1); - math::SoftmaxFunctor()(dev_ctx, -1, &in_2d, &out_2d); + math::SoftmaxFunctor()(dev_ctx, -1, &in_2d, + &out_2d); // ctc needs sequences data stored in transposed padding format // logits and grad using padding data of layout 'TNC' -- GitLab