Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
d50c71f3
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
d50c71f3
编写于
7月 19, 2017
作者:
X
xzl
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'develop' of
https://github.com/PaddlePaddle/Paddle
into mobilenet_gpu
上级
66520af9
c1ebb314
变更
7
显示空白变更内容
内联
并排
Showing
7 changed file
with
146 addition
and
9 deletion
+146
-9
paddle/gserver/layers/Layer.cpp
paddle/gserver/layers/Layer.cpp
+4
-5
paddle/scripts/travis/check_style.sh
paddle/scripts/travis/check_style.sh
+3
-2
python/paddle/trainer/config_parser.py
python/paddle/trainer/config_parser.py
+9
-1
python/paddle/v2/dataset/__init__.py
python/paddle/v2/dataset/__init__.py
+2
-1
python/paddle/v2/dataset/tests/voc2012_test.py
python/paddle/v2/dataset/tests/voc2012_test.py
+42
-0
python/paddle/v2/dataset/voc2012.py
python/paddle/v2/dataset/voc2012.py
+85
-0
python/setup.py.in
python/setup.py.in
+1
-0
未找到文件。
paddle/gserver/layers/Layer.cpp
浏览文件 @
d50c71f3
...
@@ -359,12 +359,11 @@ void Layer::backwardActivation() {
...
@@ -359,12 +359,11 @@ void Layer::backwardActivation() {
/* Do error clipping */
/* Do error clipping */
if
(
config_
.
error_clipping_threshold
()
>
0.0
f
)
{
if
(
config_
.
error_clipping_threshold
()
>
0.0
f
)
{
if
(
FLAGS_log_error_clipping
)
{
if
(
FLAGS_log_error_clipping
)
{
CpuVector
outGradVec
(
0
,
nullptr
);
VectorPtr
outGradVec
=
Vector
::
create
(
outGradVec
.
subVecFrom
(
output_
.
grad
->
getData
(),
output_
.
grad
->
getElementCnt
(),
useGpu_
);
output_
.
grad
->
getData
(),
0
,
output_
.
grad
->
getElementCnt
());
real
maxAbsGrad
=
outGradVec
->
getAbsMax
();
real
maxAbsGrad
=
outGradVec
.
getAbsMax
();
if
(
maxAbsGrad
>
config_
.
error_clipping_threshold
())
{
if
(
maxAbsGrad
>
config_
.
error_clipping_threshold
())
{
real
avgAbsGrad
=
outGradVec
.
getAbsSum
()
/
outGradVec
.
getSize
();
real
avgAbsGrad
=
outGradVec
->
getAbsSum
()
/
outGradVec
->
getSize
();
LOG
(
INFO
)
<<
" layer="
<<
config_
.
name
()
<<
" need clipping,"
LOG
(
INFO
)
<<
" layer="
<<
config_
.
name
()
<<
" need clipping,"
<<
" max error="
<<
maxAbsGrad
<<
" avg error="
<<
avgAbsGrad
;
<<
" max error="
<<
maxAbsGrad
<<
" avg error="
<<
avgAbsGrad
;
}
}
...
...
paddle/scripts/travis/check_style.sh
浏览文件 @
d50c71f3
#!/bin/bash
#!/bin/bash
function
abort
(){
function
abort
(){
echo
"Your change doesn't follow PaddlePaddle's code style."
1>&2
echo
"Your change doesn't follow PaddlePaddle's code style."
1>&2
echo
"Please use pre-commit to
reformat your code and git push again
."
1>&2
echo
"Please use pre-commit to
check what is wrong
."
1>&2
exit
1
exit
1
}
}
...
@@ -19,7 +19,8 @@ ln -sf $TRAVIS_BUILD_DIR $GOPATH/src/github.com/PaddlePaddle/Paddle
...
@@ -19,7 +19,8 @@ ln -sf $TRAVIS_BUILD_DIR $GOPATH/src/github.com/PaddlePaddle/Paddle
cd
$GOPATH
/src/github.com/PaddlePaddle/Paddle/go
;
glide
install
;
cd
-
cd
$GOPATH
/src/github.com/PaddlePaddle/Paddle/go
;
glide
install
;
cd
-
if
!
pre-commit run
-a
;
then
if
!
pre-commit run
-a
;
then
git diff
--exit-code
git diff
exit
1
fi
fi
trap
: 0
trap
: 0
python/paddle/trainer/config_parser.py
浏览文件 @
d50c71f3
...
@@ -1575,7 +1575,13 @@ class MultiClassCrossEntropySelfNormCostLayer(LayerBase):
...
@@ -1575,7 +1575,13 @@ class MultiClassCrossEntropySelfNormCostLayer(LayerBase):
@
config_layer
(
'fc'
)
@
config_layer
(
'fc'
)
class
FCLayer
(
LayerBase
):
class
FCLayer
(
LayerBase
):
def
__init__
(
self
,
name
,
size
,
inputs
,
bias
=
True
,
**
xargs
):
def
__init__
(
self
,
name
,
size
,
inputs
,
bias
=
True
,
error_clipping_threshold
=
None
,
**
xargs
):
super
(
FCLayer
,
self
).
__init__
(
name
,
'fc'
,
size
,
inputs
=
inputs
,
**
xargs
)
super
(
FCLayer
,
self
).
__init__
(
name
,
'fc'
,
size
,
inputs
=
inputs
,
**
xargs
)
for
input_index
in
xrange
(
len
(
self
.
inputs
)):
for
input_index
in
xrange
(
len
(
self
.
inputs
)):
input_layer
=
self
.
get_input_layer
(
input_index
)
input_layer
=
self
.
get_input_layer
(
input_index
)
...
@@ -1592,6 +1598,8 @@ class FCLayer(LayerBase):
...
@@ -1592,6 +1598,8 @@ class FCLayer(LayerBase):
self
.
create_input_parameter
(
input_index
,
psize
,
dims
,
sparse
,
self
.
create_input_parameter
(
input_index
,
psize
,
dims
,
sparse
,
format
)
format
)
self
.
create_bias_parameter
(
bias
,
self
.
config
.
size
)
self
.
create_bias_parameter
(
bias
,
self
.
config
.
size
)
if
error_clipping_threshold
is
not
None
:
self
.
config
.
error_clipping_threshold
=
error_clipping_threshold
@
config_layer
(
'selective_fc'
)
@
config_layer
(
'selective_fc'
)
...
...
python/paddle/v2/dataset/__init__.py
浏览文件 @
d50c71f3
...
@@ -26,8 +26,9 @@ import sentiment
...
@@ -26,8 +26,9 @@ import sentiment
import
wmt14
import
wmt14
import
mq2007
import
mq2007
import
flowers
import
flowers
import
voc2012
__all__
=
[
__all__
=
[
'mnist'
,
'imikolov'
,
'imdb'
,
'cifar'
,
'movielens'
,
'conll05'
,
'sentiment'
'mnist'
,
'imikolov'
,
'imdb'
,
'cifar'
,
'movielens'
,
'conll05'
,
'sentiment'
'uci_housing'
,
'wmt14'
,
'mq2007'
,
'flowers'
'uci_housing'
,
'wmt14'
,
'mq2007'
,
'flowers'
,
'voc2012'
]
]
python/paddle/v2/dataset/tests/voc2012_test.py
0 → 100644
浏览文件 @
d50c71f3
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
paddle.v2.dataset.voc2012
import
unittest
class
TestVOC
(
unittest
.
TestCase
):
def
check_reader
(
self
,
reader
):
sum
=
0
label
=
0
for
l
in
reader
():
self
.
assertEqual
(
l
[
0
].
size
,
3
*
l
[
1
].
size
)
sum
+=
1
return
sum
def
test_train
(
self
):
count
=
self
.
check_reader
(
paddle
.
v2
.
dataset
.
voc_seg
.
train
())
self
.
assertEqual
(
count
,
2913
)
def
test_test
(
self
):
count
=
self
.
check_reader
(
paddle
.
v2
.
dataset
.
voc_seg
.
test
())
self
.
assertEqual
(
count
,
1464
)
def
test_val
(
self
):
count
=
self
.
check_reader
(
paddle
.
v2
.
dataset
.
voc_seg
.
val
())
self
.
assertEqual
(
count
,
1449
)
if
__name__
==
'__main__'
:
unittest
.
main
()
python/paddle/v2/dataset/voc2012.py
0 → 100644
浏览文件 @
d50c71f3
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Image dataset for segmentation.
The 2012 dataset contains images from 2008-2011 for which additional
segmentations have been prepared. As in previous years the assignment
to training/test sets has been maintained. The total number of images
with segmentation has been increased from 7,062 to 9,993.
"""
import
tarfile
import
io
import
numpy
as
np
from
paddle.v2.dataset.common
import
download
from
paddle.v2.image
import
*
from
PIL
import
Image
__all__
=
[
'train'
,
'test'
,
'val'
]
VOC_URL
=
'http://host.robots.ox.ac.uk/pascal/VOC/voc2012/
\
VOCtrainval_11-May-2012.tar'
VOC_MD5
=
'6cd6e144f989b92b3379bac3b3de84fd'
SET_FILE
=
'VOCdevkit/VOC2012/ImageSets/Segmentation/{}.txt'
DATA_FILE
=
'VOCdevkit/VOC2012/JPEGImages/{}.jpg'
LABEL_FILE
=
'VOCdevkit/VOC2012/SegmentationClass/{}.png'
CACHE_DIR
=
'voc2012'
def
reader_creator
(
filename
,
sub_name
):
tarobject
=
tarfile
.
open
(
filename
)
name2mem
=
{}
for
ele
in
tarobject
.
getmembers
():
name2mem
[
ele
.
name
]
=
ele
def
reader
():
set_file
=
SET_FILE
.
format
(
sub_name
)
sets
=
tarobject
.
extractfile
(
name2mem
[
set_file
])
for
line
in
sets
:
line
=
line
.
strip
()
data_file
=
DATA_FILE
.
format
(
line
)
label_file
=
LABEL_FILE
.
format
(
line
)
data
=
tarobject
.
extractfile
(
name2mem
[
data_file
]).
read
()
label
=
tarobject
.
extractfile
(
name2mem
[
label_file
]).
read
()
data
=
Image
.
open
(
io
.
BytesIO
(
data
))
label
=
Image
.
open
(
io
.
BytesIO
(
label
))
data
=
np
.
array
(
data
)
label
=
np
.
array
(
label
)
yield
data
,
label
return
reader
def
train
():
"""
Create a train dataset reader containing 2913 images in HWC order.
"""
return
reader_creator
(
download
(
VOC_URL
,
CACHE_DIR
,
VOC_MD5
),
'trainval'
)
def
test
():
"""
Create a test dataset reader containing 1464 images in HWC order.
"""
return
reader_creator
(
download
(
VOC_URL
,
CACHE_DIR
,
VOC_MD5
),
'train'
)
def
val
():
"""
Create a val dataset reader containing 1449 images in HWC order.
"""
return
reader_creator
(
download
(
VOC_URL
,
CACHE_DIR
,
VOC_MD5
),
'val'
)
python/setup.py.in
浏览文件 @
d50c71f3
...
@@ -20,6 +20,7 @@ setup_requires=["requests",
...
@@ -20,6 +20,7 @@ setup_requires=["requests",
"matplotlib",
"matplotlib",
"rarfile",
"rarfile",
"scipy>=0.19.0",
"scipy>=0.19.0",
"Pillow",
"nltk"]
"nltk"]
if '${CMAKE_SYSTEM_PROCESSOR}' not in ['arm', 'armv7-a', 'aarch64']:
if '${CMAKE_SYSTEM_PROCESSOR}' not in ['arm', 'armv7-a', 'aarch64']:
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录