提交 90bd038d 编写于 作者: D dengkaipeng

fix format. test=develop

上级 f45aced5
...@@ -86,7 +86,7 @@ paddle.fluid.layers.conv2d (ArgSpec(args=['input', 'num_filters', 'filter_size', ...@@ -86,7 +86,7 @@ paddle.fluid.layers.conv2d (ArgSpec(args=['input', 'num_filters', 'filter_size',
paddle.fluid.layers.conv3d (ArgSpec(args=['input', 'num_filters', 'filter_size', 'stride', 'padding', 'dilation', 'groups', 'param_attr', 'bias_attr', 'use_cudnn', 'act', 'name'], varargs=None, keywords=None, defaults=(1, 0, 1, None, None, None, True, None, None)), ('document', '37042620f9bd3a2da6e5d3138b2f724b')) paddle.fluid.layers.conv3d (ArgSpec(args=['input', 'num_filters', 'filter_size', 'stride', 'padding', 'dilation', 'groups', 'param_attr', 'bias_attr', 'use_cudnn', 'act', 'name'], varargs=None, keywords=None, defaults=(1, 0, 1, None, None, None, True, None, None)), ('document', '37042620f9bd3a2da6e5d3138b2f724b'))
paddle.fluid.layers.sequence_pool (ArgSpec(args=['input', 'pool_type', 'is_test'], varargs=None, keywords=None, defaults=(False,)), ('document', 'a194fb80614023f543df3949fbd0d0b8')) paddle.fluid.layers.sequence_pool (ArgSpec(args=['input', 'pool_type', 'is_test'], varargs=None, keywords=None, defaults=(False,)), ('document', 'a194fb80614023f543df3949fbd0d0b8'))
paddle.fluid.layers.sequence_softmax (ArgSpec(args=['input', 'use_cudnn', 'name'], varargs=None, keywords=None, defaults=(False, None)), ('document', '19ef6f9cdd27feac8a1ae060f19c10b4')) paddle.fluid.layers.sequence_softmax (ArgSpec(args=['input', 'use_cudnn', 'name'], varargs=None, keywords=None, defaults=(False, None)), ('document', '19ef6f9cdd27feac8a1ae060f19c10b4'))
paddle.fluid.layers.softmax (ArgSpec(args=['input', 'use_cudnn', 'name', 'axis'], varargs=None, keywords=None, defaults=(False, None, -1)), ('document', '85f9690b1b285def19077a41d9dba36c')) paddle.fluid.layers.softmax (ArgSpec(args=['input', 'use_cudnn', 'name', 'axis'], varargs=None, keywords=None, defaults=(False, None, -1)), ('document', '502bad9e8bc7ef24817d0d4b20f61df3'))
paddle.fluid.layers.pool2d (ArgSpec(args=['input', 'pool_size', 'pool_type', 'pool_stride', 'pool_padding', 'global_pooling', 'use_cudnn', 'ceil_mode', 'name', 'exclusive'], varargs=None, keywords=None, defaults=(-1, 'max', 1, 0, False, True, False, None, True)), ('document', 'bbd84e855e660cd1084bb71a2fd0cdaa')) paddle.fluid.layers.pool2d (ArgSpec(args=['input', 'pool_size', 'pool_type', 'pool_stride', 'pool_padding', 'global_pooling', 'use_cudnn', 'ceil_mode', 'name', 'exclusive'], varargs=None, keywords=None, defaults=(-1, 'max', 1, 0, False, True, False, None, True)), ('document', 'bbd84e855e660cd1084bb71a2fd0cdaa'))
paddle.fluid.layers.pool3d (ArgSpec(args=['input', 'pool_size', 'pool_type', 'pool_stride', 'pool_padding', 'global_pooling', 'use_cudnn', 'ceil_mode', 'name', 'exclusive'], varargs=None, keywords=None, defaults=(-1, 'max', 1, 0, False, True, False, None, True)), ('document', '043de7333b79ee0ac55053c14ed81625')) paddle.fluid.layers.pool3d (ArgSpec(args=['input', 'pool_size', 'pool_type', 'pool_stride', 'pool_padding', 'global_pooling', 'use_cudnn', 'ceil_mode', 'name', 'exclusive'], varargs=None, keywords=None, defaults=(-1, 'max', 1, 0, False, True, False, None, True)), ('document', '043de7333b79ee0ac55053c14ed81625'))
paddle.fluid.layers.adaptive_pool2d (ArgSpec(args=['input', 'pool_size', 'pool_type', 'require_index', 'name'], varargs=None, keywords=None, defaults=('max', False, None)), ('document', '859b887174d06f361658f69cb7c06d95')) paddle.fluid.layers.adaptive_pool2d (ArgSpec(args=['input', 'pool_size', 'pool_type', 'require_index', 'name'], varargs=None, keywords=None, defaults=('max', False, None)), ('document', '859b887174d06f361658f69cb7c06d95'))
......
...@@ -54,8 +54,10 @@ void Softmax(const T* x, T* y, int n, int bs, int remain) { ...@@ -54,8 +54,10 @@ void Softmax(const T* x, T* y, int n, int bs, int remain) {
auto compute_hmax = KernelFuncs<HMaxTuple<T>, CPUPlace>::Cache().At(n); auto compute_hmax = KernelFuncs<HMaxTuple<T>, CPUPlace>::Cache().At(n);
auto compute_hsum = KernelFuncs<HSumTuple<T>, CPUPlace>::Cache().At(n); auto compute_hsum = KernelFuncs<HSumTuple<T>, CPUPlace>::Cache().At(n);
auto compute_vscal = KernelFuncs<VScalTuple<T>, CPUPlace>::Cache().At(n); auto compute_vscal = KernelFuncs<VScalTuple<T>, CPUPlace>::Cache().At(n);
auto compute_stridesum = KernelFuncs<StrideASumTuple<T>, CPUPlace>::Cache().At(n); auto compute_stridesum =
auto compute_stridescal = KernelFuncs<StrideScalTuple<T>, CPUPlace>::Cache().At(n); KernelFuncs<StrideASumTuple<T>, CPUPlace>::Cache().At(n);
auto compute_stridescal =
KernelFuncs<StrideScalTuple<T>, CPUPlace>::Cache().At(n);
auto compute_vaddbias = auto compute_vaddbias =
KernelFuncs<VAddBiasTuple<T>, CPUPlace>::Cache().At(n); KernelFuncs<VAddBiasTuple<T>, CPUPlace>::Cache().At(n);
auto compute_vexp = KernelFuncs<VExpTuple<T>, CPUPlace>::Cache().At(n); auto compute_vexp = KernelFuncs<VExpTuple<T>, CPUPlace>::Cache().At(n);
......
...@@ -79,18 +79,20 @@ void VScal<double>(const double* a, const double* x, double* y, int n) { ...@@ -79,18 +79,20 @@ void VScal<double>(const double* a, const double* x, double* y, int n) {
} }
template <> template <>
void StrideScal<float>(const float* a, const float* x, float* y, int n, int stride) { void StrideScal<float>(const float* a, const float* x, float* y, int n,
int stride) {
if (x == y) { if (x == y) {
platform::dynload::cblas_sscal(n/stride, *a, y, stride); platform::dynload::cblas_sscal(n / stride, *a, y, stride);
} else { } else {
refer::StrideScal<float>(a, x, y, n, stride); refer::StrideScal<float>(a, x, y, n, stride);
} }
} }
template <> template <>
void StrideScal<double>(const double* a, const double* x, double* y, int n, int stride) { void StrideScal<double>(const double* a, const double* x, double* y, int n,
int stride) {
if (x == y) { if (x == y) {
platform::dynload::cblas_dscal(n/stride, *a, y, stride); platform::dynload::cblas_dscal(n / stride, *a, y, stride);
} else { } else {
refer::StrideScal<double>(a, x, y, n, stride); refer::StrideScal<double>(a, x, y, n, stride);
} }
...@@ -148,12 +150,12 @@ void ASum<double>(const double* x, double* res, int n) { ...@@ -148,12 +150,12 @@ void ASum<double>(const double* x, double* res, int n) {
template <> template <>
void StrideASum<float>(const float* x, float* res, int n, int stride) { void StrideASum<float>(const float* x, float* res, int n, int stride) {
res[0] = platform::dynload::cblas_sasum(n/stride, x, stride); res[0] = platform::dynload::cblas_sasum(n / stride, x, stride);
} }
template <> template <>
void StrideASum<double>(const double* x, double* res, int n, int stride) { void StrideASum<double>(const double* x, double* res, int n, int stride) {
res[0] = platform::dynload::cblas_dasum(n/stride, x, stride); res[0] = platform::dynload::cblas_dasum(n / stride, x, stride);
} }
// TODO(TJ): tuning me carefully on AVX, AVX2 and AVX512 // TODO(TJ): tuning me carefully on AVX, AVX2 and AVX512
......
...@@ -135,7 +135,7 @@ template <typename T> ...@@ -135,7 +135,7 @@ template <typename T>
void StrideScal(const T* a, const T* x, T* y, int n, int stride); void StrideScal(const T* a, const T* x, T* y, int n, int stride);
template <typename T> template <typename T>
void Softmax(const T* x, T* y, int n, int bs, int remain=1) { void Softmax(const T* x, T* y, int n, int bs, int remain = 1) {
std::vector<T> entities(bs); std::vector<T> entities(bs);
for (int i = 0; i < bs; ++i) { for (int i = 0; i < bs; ++i) {
entities[i] = x[i * n]; entities[i] = x[i * n];
......
...@@ -414,13 +414,13 @@ void HSum(const T* x, T* res, int n) { ...@@ -414,13 +414,13 @@ void HSum(const T* x, T* res, int n) {
template <typename T> template <typename T>
void StrideASum(const T* x, T* res, int n, int stride) { void StrideASum(const T* x, T* res, int n, int stride) {
res[0] = x[0]; res[0] = x[0];
for (int i = stride; i < n; i+=stride) { for (int i = stride; i < n; i += stride) {
res[0] += std::abs(x[i]); res[0] += std::abs(x[i]);
} }
} }
template <typename T> template <typename T>
void StrideScal(const T* a, const T* x, T* y, int n , int stride) { void StrideScal(const T* a, const T* x, T* y, int n, int stride) {
for (int i = 0; i < n; ++i) { for (int i = 0; i < n; ++i) {
if (i % stride == 0) { if (i % stride == 0) {
y[i] = x[i] * a[0]; y[i] = x[i] * a[0];
......
...@@ -94,8 +94,9 @@ class SoftmaxFunctor<DeviceContext, float, true, enable_if_CPU<DeviceContext>> { ...@@ -94,8 +94,9 @@ class SoftmaxFunctor<DeviceContext, float, true, enable_if_CPU<DeviceContext>> {
template <typename DeviceContext, typename T> template <typename DeviceContext, typename T>
void SoftmaxGradFunctor<DeviceContext, T>::operator()( void SoftmaxGradFunctor<DeviceContext, T>::operator()(
const DeviceContext& context, const int axis_dim, const framework::Tensor* y, const DeviceContext& context, const int axis_dim,
const framework::Tensor* y_grad, framework::Tensor* x_grad) { const framework::Tensor* y, const framework::Tensor* y_grad,
framework::Tensor* x_grad) {
auto softmax = EigenMatrix<T>::From(*y); auto softmax = EigenMatrix<T>::From(*y);
auto softmax_grad = EigenMatrix<T>::From(*y_grad); auto softmax_grad = EigenMatrix<T>::From(*y_grad);
auto logits_grad = EigenMatrix<T>::From(*x_grad); auto logits_grad = EigenMatrix<T>::From(*x_grad);
......
...@@ -49,10 +49,8 @@ class SoftmaxOp : public framework::OperatorWithKernel { ...@@ -49,10 +49,8 @@ class SoftmaxOp : public framework::OperatorWithKernel {
auto use_cudnn = ctx->Attrs().Get<bool>("use_cudnn"); auto use_cudnn = ctx->Attrs().Get<bool>("use_cudnn");
auto use_mkldnn = ctx->Attrs().Get<bool>("use_mkldnn"); auto use_mkldnn = ctx->Attrs().Get<bool>("use_mkldnn");
if (axis != rank_x - 1 && axis != -1) { if (axis != rank_x - 1 && axis != -1) {
PADDLE_ENFORCE(!use_cudnn, PADDLE_ENFORCE(!use_cudnn, "CUDNN kernel only support axis as -1.");
"CUDNN kernel only support axis as -1."); PADDLE_ENFORCE(!use_mkldnn, "MKLDNN kernel only support axis as -1.");
PADDLE_ENFORCE(!use_mkldnn,
"MKLDNN kernel only support axis as -1.");
} }
ctx->SetOutputDim("Out", ctx->GetInputDim("X")); ctx->SetOutputDim("Out", ctx->GetInputDim("X"));
......
...@@ -66,10 +66,12 @@ class SoftmaxKernel : public framework::OpKernel<T> { ...@@ -66,10 +66,12 @@ class SoftmaxKernel : public framework::OpKernel<T> {
#ifdef PADDLE_ON_INFERENCE #ifdef PADDLE_ON_INFERENCE
math::SoftmaxFunctor<DeviceContext, T, true>()( math::SoftmaxFunctor<DeviceContext, T, true>()(
context.template device_context<DeviceContext>(), axis_dim, &X_2d, &Out_2d); context.template device_context<DeviceContext>(), axis_dim, &X_2d,
&Out_2d);
#else #else
math::SoftmaxFunctor<DeviceContext, T, false>()( math::SoftmaxFunctor<DeviceContext, T, false>()(
context.template device_context<DeviceContext>(), axis_dim, &X_2d, &Out_2d); context.template device_context<DeviceContext>(), axis_dim, &X_2d,
&Out_2d);
#endif #endif
} }
}; };
...@@ -96,8 +98,8 @@ class SoftmaxGradKernel : public framework::OpKernel<T> { ...@@ -96,8 +98,8 @@ class SoftmaxGradKernel : public framework::OpKernel<T> {
dOut_2d.ShareDataWith(*dOut).Resize({n, d}); dOut_2d.ShareDataWith(*dOut).Resize({n, d});
math::SoftmaxGradFunctor<DeviceContext, T>()( math::SoftmaxGradFunctor<DeviceContext, T>()(
context.template device_context<DeviceContext>(), axis_dim, &Out_2d, &dOut_2d, context.template device_context<DeviceContext>(), axis_dim, &Out_2d,
&dX_2d); &dOut_2d, &dX_2d);
} }
}; };
......
...@@ -69,7 +69,8 @@ class CudnnCTCKernel : public framework::OpKernel<T> { ...@@ -69,7 +69,8 @@ class CudnnCTCKernel : public framework::OpKernel<T> {
int rank = logits->dims().size(); int rank = logits->dims().size();
Tensor in_2d = framework::ReshapeToMatrix(*logits, rank - 1); Tensor in_2d = framework::ReshapeToMatrix(*logits, rank - 1);
Tensor out_2d = framework::ReshapeToMatrix(softmax_logits, rank - 1); Tensor out_2d = framework::ReshapeToMatrix(softmax_logits, rank - 1);
math::SoftmaxFunctor<DeviceContext, T, false>()(dev_ctx, -1, &in_2d, &out_2d); math::SoftmaxFunctor<DeviceContext, T, false>()(dev_ctx, -1, &in_2d,
&out_2d);
// ctc needs sequences data stored in transposed padding format // ctc needs sequences data stored in transposed padding format
// logits and grad using padding data of layout 'TNC' // logits and grad using padding data of layout 'TNC'
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册