Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
3ec6d60c
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
3ec6d60c
编写于
8月 08, 2018
作者:
M
minqiyang
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Fix write bytes in dataset download
上级
e6ae1e4f
变更
5
隐藏空白更改
内联
并排
Showing
5 changed file
with
24 addition
and
12 deletion
+24
-12
python/paddle/dataset/cifar.py
python/paddle/dataset/cifar.py
+5
-0
python/paddle/dataset/common.py
python/paddle/dataset/common.py
+10
-4
python/paddle/dataset/conll05.py
python/paddle/dataset/conll05.py
+7
-6
python/paddle/fluid/executor.py
python/paddle/fluid/executor.py
+1
-1
python/paddle/fluid/tests/book/high-level-api/image_classification/test_image_classification_resnet.py
.../image_classification/test_image_classification_resnet.py
+1
-1
未找到文件。
python/paddle/dataset/cifar.py
浏览文件 @
3ec6d60c
...
@@ -59,6 +59,11 @@ def reader_creator(filename, sub_name, cycle=False):
...
@@ -59,6 +59,11 @@ def reader_creator(filename, sub_name, cycle=False):
while
True
:
while
True
:
for
name
in
names
:
for
name
in
names
:
import
sys
print
(
name
)
sys
.
stdout
.
flush
()
print
(
f
.
extractfile
(
name
))
sys
.
stdout
.
flush
()
batch
=
pickle
.
load
(
f
.
extractfile
(
name
))
batch
=
pickle
.
load
(
f
.
extractfile
(
name
))
for
item
in
read_batch
(
batch
):
for
item
in
read_batch
(
batch
):
yield
item
yield
item
...
...
python/paddle/dataset/common.py
浏览文件 @
3ec6d60c
...
@@ -86,15 +86,21 @@ def download(url, module_name, md5sum, save_name=None):
...
@@ -86,15 +86,21 @@ def download(url, module_name, md5sum, save_name=None):
total_length
=
r
.
headers
.
get
(
'content-length'
)
total_length
=
r
.
headers
.
get
(
'content-length'
)
if
total_length
is
None
:
if
total_length
is
None
:
with
open
(
filename
,
'w'
)
as
f
:
with
open
(
filename
,
'wb'
)
as
f
:
shutil
.
copyfileobj
(
r
.
raw
,
f
)
import
sys
print
(
"write follow block"
)
sys
.
stdout
.
flush
()
shutil
.
copyfileobj
(
cpt
.
to_bytes
(
r
.
raw
),
f
)
else
:
else
:
with
open
(
filename
,
'w'
)
as
f
:
with
open
(
filename
,
'wb'
)
as
f
:
import
sys
print
(
"write follow length"
)
sys
.
stdout
.
flush
()
dl
=
0
dl
=
0
total_length
=
int
(
total_length
)
total_length
=
int
(
total_length
)
for
data
in
r
.
iter_content
(
chunk_size
=
4096
):
for
data
in
r
.
iter_content
(
chunk_size
=
4096
):
dl
+=
len
(
data
)
dl
+=
len
(
data
)
f
.
write
(
cpt
.
to_
literal_str
(
data
))
f
.
write
(
cpt
.
to_
bytes
(
data
))
done
=
int
(
50
*
dl
/
total_length
)
done
=
int
(
50
*
dl
/
total_length
)
sys
.
stdout
.
write
(
"
\r
[%s%s]"
%
(
'='
*
done
,
sys
.
stdout
.
write
(
"
\r
[%s%s]"
%
(
'='
*
done
,
' '
*
(
50
-
done
)))
' '
*
(
50
-
done
)))
...
...
python/paddle/dataset/conll05.py
浏览文件 @
3ec6d60c
...
@@ -24,19 +24,20 @@ import tarfile
...
@@ -24,19 +24,20 @@ import tarfile
import
gzip
import
gzip
import
itertools
import
itertools
import
paddle.dataset.common
import
paddle.dataset.common
import
paddle.fluid.compat
as
cpt
from
six.moves
import
zip
,
range
from
six.moves
import
zip
,
range
__all__
=
[
'test, get_dict'
,
'get_embedding'
,
'convert'
]
__all__
=
[
'test, get_dict'
,
'get_embedding'
,
'convert'
]
DATA_URL
=
'http://www.cs.upc.edu/~srlconll/conll05st-tests.tar.gz'
DATA_URL
=
'http://www.cs.upc.edu/~srlconll/conll05st-tests.tar.gz'
DATA_MD5
=
'387719152ae52d60422c016e92a742fc'
DATA_MD5
=
'387719152ae52d60422c016e92a742fc'
WORDDICT_URL
=
'http://paddle
paddle.bj.bcebos.com/demo/srl_dict_and_embedding
/wordDict.txt'
WORDDICT_URL
=
'http://paddle
models.bj.bcebos.com/conll05st
/wordDict.txt'
WORDDICT_MD5
=
'ea7fb7d4c75cc6254716f0177a506baa'
WORDDICT_MD5
=
'ea7fb7d4c75cc6254716f0177a506baa'
VERBDICT_URL
=
'http://paddle
paddle.bj.bcebos.com/demo/srl_dict_and_embedding
/verbDict.txt'
VERBDICT_URL
=
'http://paddle
models.bj.bcebos.com/conll05st
/verbDict.txt'
VERBDICT_MD5
=
'0d2977293bbb6cbefab5b0f97db1e77c'
VERBDICT_MD5
=
'0d2977293bbb6cbefab5b0f97db1e77c'
TRGDICT_URL
=
'http://paddle
paddle.bj.bcebos.com/demo/srl_dict_and_embedding
/targetDict.txt'
TRGDICT_URL
=
'http://paddle
models.bj.bcebos.com/conll05st
/targetDict.txt'
TRGDICT_MD5
=
'd8c7f03ceb5fc2e5a0fa7503a4353751'
TRGDICT_MD5
=
'd8c7f03ceb5fc2e5a0fa7503a4353751'
EMB_URL
=
'http://paddle
paddle.bj.bcebos.com/demo/srl_dict_and_embedding
/emb'
EMB_URL
=
'http://paddle
models.bj.bcebos.com/conll05st
/emb'
EMB_MD5
=
'bf436eb0faa1f6f9103017f8be57cdb7'
EMB_MD5
=
'bf436eb0faa1f6f9103017f8be57cdb7'
UNK_IDX
=
0
UNK_IDX
=
0
...
@@ -89,8 +90,8 @@ def corpus_reader(data_path, words_name, props_name):
...
@@ -89,8 +90,8 @@ def corpus_reader(data_path, words_name, props_name):
labels
=
[]
labels
=
[]
one_seg
=
[]
one_seg
=
[]
for
word
,
label
in
zip
(
words_file
,
props_file
):
for
word
,
label
in
zip
(
words_file
,
props_file
):
word
=
word
.
strip
(
)
word
=
cpt
.
to_literal_str
(
word
.
strip
()
)
label
=
label
.
strip
().
split
(
)
label
=
cpt
.
to_literal_str
(
label
.
strip
().
split
()
)
if
len
(
label
)
==
0
:
# end of sentence
if
len
(
label
)
==
0
:
# end of sentence
for
i
in
range
(
len
(
one_seg
[
0
])):
for
i
in
range
(
len
(
one_seg
[
0
])):
...
...
python/paddle/fluid/executor.py
浏览文件 @
3ec6d60c
...
@@ -320,7 +320,7 @@ class Executor(object):
...
@@ -320,7 +320,7 @@ class Executor(object):
# append fetch_operators
# append fetch_operators
if
not
has_fetch_operators
(
global_block
,
fetch_list
,
fetch_var_name
):
if
not
has_fetch_operators
(
global_block
,
fetch_list
,
fetch_var_name
):
for
i
,
var
in
enumerate
(
fetch_list
):
for
i
,
var
in
enumerate
(
fetch_list
):
assert
isinstance
(
var
,
Variable
)
or
isinstance
(
var
,
s
tr
),
(
assert
isinstance
(
var
,
Variable
)
or
isinstance
(
var
,
s
ix
.
text_type
),
(
"Wrong type for fetch_list[%s]: %s"
%
(
i
,
type
(
var
)))
"Wrong type for fetch_list[%s]: %s"
%
(
i
,
type
(
var
)))
global_block
.
append_op
(
global_block
.
append_op
(
type
=
'fetch'
,
type
=
'fetch'
,
...
...
python/paddle/fluid/tests/book/high-level-api/image_classification/test_image_classification_resnet.py
浏览文件 @
3ec6d60c
...
@@ -55,7 +55,7 @@ def resnet_cifar10(input, depth=32):
...
@@ -55,7 +55,7 @@ def resnet_cifar10(input, depth=32):
return
tmp
return
tmp
assert
(
depth
-
2
)
%
6
==
0
assert
(
depth
-
2
)
%
6
==
0
n
=
(
depth
-
2
)
/
6
n
=
(
depth
-
2
)
/
/
6
conv1
=
conv_bn_layer
(
conv1
=
conv_bn_layer
(
input
=
input
,
ch_out
=
16
,
filter_size
=
3
,
stride
=
1
,
padding
=
1
)
input
=
input
,
ch_out
=
16
,
filter_size
=
3
,
stride
=
1
,
padding
=
1
)
res1
=
layer_warp
(
basicblock
,
conv1
,
16
,
16
,
n
,
1
)
res1
=
layer_warp
(
basicblock
,
conv1
,
16
,
16
,
n
,
1
)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录