提交 836d6138 编写于 作者: Y Yu Yang

Update pre-commit-config

* Check all files by pre commit hooks
上级 f2392ada
......@@ -6,4 +6,19 @@
sha: v0.13.2
hooks:
- id: yapf
- repo: https://github.com/pre-commit/pre-commit-hooks
sha: 4ef03c4223ad322c7adaa6c6c0efb26b57df3b71
hooks:
- id: check-added-large-files
- id: check-merge-conflict
- id: check-symlinks
- id: detect-private-key
- id: end-of-file-fixer
# TODO(yuyang): trailing whitespace has some bugs on markdown
# files now, please not add it to pre-commit hook now
# - id: trailing-whitespace
#
# TODO(yuyang): debug-statements not fit for Paddle, because
# not all of our python code is runnable. Some are used for
# documenation
# - id: debug-statements
This folder contains scripts used in PaddlePaddle introduction.
- use `bash train.sh` to train a simple linear regression model
- use `python evaluate_model.py` to read model parameters. You can see that `w` and `b` are very close to [2, 0.3].
......@@ -19,4 +19,3 @@ done
cd $DIR
rm -f *.list
python generate_list.py
......@@ -14,4 +14,3 @@
"fields": ["id", "title", "genres"]
}
}
......@@ -37,4 +37,3 @@ paddle train \
--use_gpu=false \
--config_args=is_test=1 \
2>&1 | tee 'test.log'
......@@ -24,4 +24,3 @@ paddle train \
--show_parameter_stats_period=10 \
--test_all_data_in_one_period=1 \
2>&1 | tee 'train.log'
......@@ -98,4 +98,3 @@ There, you have recovered the underlying pattern between `X` and `Y` only from o
- <a href="../build/index.html"> Build and Installation </a>
- <a href="../demo/quick_start/index_en.html">Quick Start</a>
- <a href="../demo/index.html">Example and Demo</a>
......@@ -17,5 +17,3 @@ endif()
if(WITH_SWIG_PY)
add_subdirectory(api)
endif()
......@@ -65,4 +65,3 @@ struct ArgumentsPrivate {
return *(std::shared_ptr<T>*)(rawPtr);
}
};
......@@ -69,8 +69,8 @@ class TestMatrix(unittest.TestCase):
def test_numpy(self):
numpy_mat = np.matrix([[1, 2], [3, 4], [5, 6]], dtype="float32")
m = swig_paddle.Matrix.createCpuDenseFromNumpy(numpy_mat)
self.assertEqual(
(int(m.getHeight()), int(m.getWidth())), numpy_mat.shape)
self.assertEqual((int(m.getHeight()), int(m.getWidth())),
numpy_mat.shape)
# the numpy matrix and paddle matrix shared the same memory.
numpy_mat[0, 1] = 342.23
......
......@@ -254,4 +254,3 @@ extern __thread cudaStream_t default_stream;
#endif /* __NVCC__ */
#endif /* HL_BASE_H_ */
......@@ -199,4 +199,3 @@ inline void hl_batch_norm_backward(hl_tensor_descriptor inputDesc,
real *savedInvVar) {}
#endif // HL_CUDA_CUDNN_STUB_H_
......@@ -718,4 +718,3 @@ void sincos256_ps(v8sf x, v8sf *s, v8sf *c) {
*s = _mm256_xor_ps(xmm1, sign_bit_sin);
*c = _mm256_xor_ps(xmm2, sign_bit_cos);
}
......@@ -48,4 +48,3 @@ public:
};
} // namespace paddle
......@@ -80,4 +80,3 @@ void vTanh(const int n, const T* a, T* r);
} // namespace paddle
#endif // MATHFUNCTIONS_H_
此差异已折叠。
......@@ -33,5 +33,3 @@ cmake .. -DWITH_GPU=ON -DWITH_SWIG_PY=ON -DWITH_AVX=OFF -DCUDNN_ROOT=/usr/
make -j `nproc`
cpack -D CPACK_GENERATOR='DEB' ..
mv *.deb ~/dist/gpu-noavx
......@@ -58,4 +58,3 @@ m4 -DPADDLE_WITH_GPU=ON -DPADDLE_IS_DEVEL=ON -DPADDLE_WITH_DEMO=ON \
-DPADDLE_BASE_IMAGE=nvidia/cuda:7.5-cudnn5-devel-ubuntu14.04 \
-DPADDLE_WITH_AVX=OFF \
Dockerfile.m4 > Dockerfile.gpu-noavx-demo
......@@ -2,4 +2,3 @@
set -e
mkdir -p ../../../build
cd ../../../build
......@@ -998,4 +998,3 @@ from IN B-PP
Friday NNP B-NP
's POS B-NP
Tokyo NNP I-NP
......@@ -4998,4 +4998,3 @@ However RB B-ADVP
the DT B-NP
disclosure NN I-NP
of IN B-PP
......@@ -109,4 +109,3 @@ int main(int argc, char** argv) {
}
#endif
......@@ -410,8 +410,8 @@ def RecurrentLayerGroupEnd(name):
"RecurrentLayerGroup not begin")
for pair in g_current_submodel.memories: #check exist
layer = g_layer_map[pair.layer_name]
config_assert(layer is not None, "memory declare wrong name:%s" %
pair.layer_name)
config_assert(layer is not None,
"memory declare wrong name:%s" % pair.layer_name)
memory_link = g_layer_map[pair.link_name]
config_assert(layer.size == memory_link.size,
"memory declare wrong size:%d" % memory_link.size)
......@@ -672,8 +672,8 @@ class ConvProjection(Projection):
parse_conv(conv_conf, input_layer_name, self.proj_conf.conv_conf,
num_filters)
# TODO: support rectangle input
self.proj_conf.output_size = (self.proj_conf.conv_conf.output_x**
2) * num_filters
self.proj_conf.output_size = (self.proj_conf.conv_conf.output_x
**2) * num_filters
def calc_output_size(self, input_layer_config):
return self.proj_conf.output_size
......@@ -2779,8 +2779,8 @@ class ConcatenateLayer2(LayerBase):
@config_layer('recurrent')
class RecurrentLayer(LayerBase):
def __init__(self, name, inputs, reversed=False, bias=True, **xargs):
super(RecurrentLayer, self).__init__(name, 'recurrent', 0, inputs, **
xargs)
super(RecurrentLayer, self).__init__(name, 'recurrent', 0, inputs,
**xargs)
config_assert(len(self.inputs) == 1, 'RecurrentLayer must have 1 input')
input_layer = self.get_input_layer(0)
size = input_layer.size
......@@ -2862,22 +2862,22 @@ class MDLstmLayer(LayerBase):
active_state_type="sigmoid",
bias=True,
**xargs):
super(MDLstmLayer, self).__init__(name, 'mdlstmemory', 0, inputs, **
xargs)
super(MDLstmLayer, self).__init__(name, 'mdlstmemory', 0, inputs,
**xargs)
config_assert(len(self.inputs) == 1, 'MDLstmLayer must have 1 input')
input_layer = self.get_input_layer(0)
dim_num = len(directions)
#check input_layer.size is divided by (3+dim_num)
config_assert(input_layer.size %
(3 + dim_num) == 0, "size % (dim_num) should be 0!")
config_assert(input_layer.size % (3 + dim_num) == 0,
"size % (dim_num) should be 0!")
size = input_layer.size / (3 + dim_num)
self.set_layer_size(size)
self.config.active_gate_type = active_gate_type
self.config.active_state_type = active_state_type
for i in xrange(len(directions)):
self.config.directions.append(int(directions[i]))
self.create_input_parameter(0, size * size *
(3 + dim_num), [size, size, 3 + dim_num])
self.create_input_parameter(0, size * size * (3 + dim_num),
[size, size, 3 + dim_num])
#bias includes 3 kinds of peephole, 3+dim_num+2+dim_num
self.create_bias_parameter(bias, size * (5 + 2 * dim_num))
......@@ -2915,8 +2915,8 @@ class GruStepLayer(LayerBase):
active_gate_type="sigmoid",
bias=True,
**xargs):
super(GruStepLayer, self).__init__(name, 'gru_step', size, inputs, **
xargs)
super(GruStepLayer, self).__init__(name, 'gru_step', size, inputs,
**xargs)
config_assert(len(self.inputs) == 2, 'GruStepLayer must have 2 input')
input_layer0 = self.get_input_layer(0)
input_layer1 = self.get_input_layer(1)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册