diff --git a/paddle/operators/math/maxouting.cu b/paddle/operators/math/maxouting.cu index 1a8fc465cc3ffef6ebf204384536290faa80a1de..336a1bd8b5b0c93bab2c7cbe0e50ac0aa6005b23 100644 --- a/paddle/operators/math/maxouting.cu +++ b/paddle/operators/math/maxouting.cu @@ -37,7 +37,7 @@ __global__ void KernelMaxOut(const int nthreads, const T* input_data, (batch_idx * size + channel_idx * feat_len) * groups + feat_idx; T ele = static_cast(-FLT_MAX); for (int g = 0; g < groups; ++g) { - T x=input_data[data_idx + g * feat_len]; + T x = input_data[data_idx + g * feat_len]; ele = ele > x ? ele : x; } output_data[i] = ele; diff --git a/python/paddle/v2/framework/tests/test_maxout_op.py b/python/paddle/v2/framework/tests/test_maxout_op.py deleted file mode 100644 index a7c47108f114fd9dc45eeae64a0dd17d11ce2c0d..0000000000000000000000000000000000000000 --- a/python/paddle/v2/framework/tests/test_maxout_op.py +++ /dev/null @@ -1,41 +0,0 @@ -import unittest -import numpy as np -from op_test import OpTest - - -def maxout_forward_naive(input, groups,num_channels): - s0, s1, s2, s3 = input.shape - return np.ndarray([s0, s1 / groups, groups, s2, s3], \ - buffer = input, dtype=input.dtype).max(axis=(2)) - - -class TestMaxOutOp(OpTest): - def setUp(self): - self.op_type = "maxout" - self.init_test_case() - input = np.random.random(self.shape).astype("float32") - output = self.MaxOut_forward_naive(input, self.groups, - self.num_channels).astype("float32") - - self.inputs = {'X': input} - self.attrs = {'groups': self.groups, 'num_channels': self.num_channels} - - self.outputs = {'Out': output.astype('float32')} - - def test_check_output(self): - self.check_output() - - def test_check_grad(self): - self.check_grad(['X'], 'Out') - - def init_test_case(self): - self.MaxOut_forward_naive = maxout_forward_naive - self.shape = [100, 6, 2, 2] - self.groups=2 - self.num_channels=6 - - - - -if __name__ == '__main__': - unittest.main()