Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleClas
提交
b0b9ca0d
P
PaddleClas
项目概览
PaddlePaddle
/
PaddleClas
大约 1 年 前同步成功
通知
115
Star
4999
Fork
1114
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
19
列表
看板
标记
里程碑
合并请求
6
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleClas
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
19
Issue
19
列表
看板
标记
里程碑
合并请求
6
合并请求
6
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
b0b9ca0d
编写于
9月 07, 2020
作者:
L
littletomatodonkey
提交者:
GitHub
9月 07, 2020
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #265 from littletomatodonkey/dyg/fix_pypath
remove python path config and support cpu train/val/infer
上级
c7a8c89f
670eaf31
变更
9
隐藏空白更改
内联
并排
Showing
9 changed file
with
130 addition
and
79 deletion
+130
-79
tools/download.py
tools/download.py
+6
-2
tools/eval.py
tools/eval.py
+22
-14
tools/eval.sh
tools/eval.sh
+3
-3
tools/infer/infer.py
tools/infer/infer.py
+44
-20
tools/infer/predict.py
tools/infer/predict.py
+8
-9
tools/infer/py_infer.py
tools/infer/py_infer.py
+31
-17
tools/run.sh
tools/run.sh
+0
-2
tools/run_download.sh
tools/run_download.sh
+0
-2
tools/train.py
tools/train.py
+16
-10
未找到文件。
tools/download.py
浏览文件 @
b0b9ca0d
...
...
@@ -12,9 +12,13 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import
argparse
from
ppcls
import
model_zoo
import
argparse
import
os
import
sys
__dir__
=
os
.
path
.
dirname
(
os
.
path
.
abspath
(
__file__
))
sys
.
path
.
append
(
__dir__
)
sys
.
path
.
append
(
os
.
path
.
abspath
(
os
.
path
.
join
(
__dir__
,
'..'
)))
def
parse_args
():
...
...
tools/eval.py
浏览文件 @
b0b9ca0d
...
...
@@ -13,19 +13,22 @@
# limitations under the License.
from
__future__
import
absolute_import
import
program
from
ppcls.utils
import
logger
from
ppcls.utils.save_load
import
init_model
from
ppcls.utils.config
import
get_config
from
ppcls.data
import
Reader
import
paddle.fluid
as
fluid
import
paddle
import
argparse
from
__future__
import
division
from
__future__
import
print_function
import
os
import
argparse
from
ppcls.data
import
Reader
from
ppcls.utils.config
import
get_config
from
ppcls.utils.save_load
import
init_model
from
ppcls.utils
import
logger
from
paddle.fluid.incubate.fleet.collective
import
fleet
from
paddle.fluid.incubate.fleet.base
import
role_maker
import
sys
__dir__
=
os
.
path
.
dirname
(
os
.
path
.
abspath
(
__file__
))
sys
.
path
.
append
(
__dir__
)
sys
.
path
.
append
(
os
.
path
.
abspath
(
os
.
path
.
join
(
__dir__
,
'..'
)))
def
parse_args
():
...
...
@@ -47,21 +50,26 @@ def parse_args():
def
main
(
args
):
# assign the place
gpu_id
=
fluid
.
dygraph
.
parallel
.
Env
().
dev_id
place
=
fluid
.
CUDAPlace
(
gpu_id
)
config
=
get_config
(
args
.
config
,
overrides
=
args
.
override
,
show
=
True
)
# assign place
use_gpu
=
config
.
get
(
"use_gpu"
,
True
)
if
use_gpu
:
gpu_id
=
fluid
.
dygraph
.
ParallelEnv
().
dev_id
place
=
fluid
.
CUDAPlace
(
gpu_id
)
else
:
place
=
fluid
.
CPUPlace
()
with
fluid
.
dygraph
.
guard
(
place
):
pre_weights_dict
=
fluid
.
dygraph
.
load_dygraph
(
config
.
pretrained_model
)[
0
]
strategy
=
fluid
.
dygraph
.
parallel
.
prepare_context
()
net
=
program
.
create_model
(
config
.
ARCHITECTURE
,
config
.
classes_num
)
net
=
fluid
.
dygraph
.
parallel
.
DataParallel
(
net
,
strategy
)
net
.
set_dict
(
pre_weights_dict
)
init_model
(
config
,
net
,
optimizer
=
None
)
valid_dataloader
=
program
.
create_dataloader
()
valid_reader
=
Reader
(
config
,
'valid'
)()
valid_dataloader
.
set_sample_list_generator
(
valid_reader
,
place
)
net
.
eval
()
top1_acc
=
program
.
run
(
valid_dataloader
,
config
,
net
,
None
,
0
,
'valid'
)
if
__name__
==
'__main__'
:
args
=
parse_args
()
main
(
args
)
tools/eval.sh
浏览文件 @
b0b9ca0d
export
PYTHONPATH
=
$PWD
:
$PYTHONPATH
python
-m
paddle.distributed.launch
\
--selected_gpus
=
"0"
\
tools/eval.py
\
-c
./configs/eval.yaml
-c
./configs/eval.yaml
\
-o
load_static_weights
=
True
\
-o
use_gpu
=
False
tools/infer/infer.py
浏览文件 @
b0b9ca0d
...
...
@@ -12,13 +12,17 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import
utils
import
argparse
import
numpy
as
np
import
paddle.fluid
as
fluid
from
ppcls.modeling
import
architectures
from
ppcls.utils.save_load
import
load_dygraph_pretrain
from
ppcls.modeling
import
architectures
import
paddle.fluid
as
fluid
import
numpy
as
np
import
argparse
import
utils
import
os
import
sys
__dir__
=
os
.
path
.
dirname
(
os
.
path
.
abspath
(
__file__
))
sys
.
path
.
append
(
__dir__
)
sys
.
path
.
append
(
os
.
path
.
abspath
(
os
.
path
.
join
(
__dir__
,
'../..'
)))
def
parse_args
():
...
...
@@ -66,6 +70,23 @@ def postprocess(outputs, topk=5):
return
zip
(
index
,
prob
[
index
])
def
get_image_list
(
img_file
):
imgs_lists
=
[]
if
img_file
is
None
or
not
os
.
path
.
exists
(
img_file
):
raise
Exception
(
"not found any img file in {}"
.
format
(
img_file
))
img_end
=
[
'jpg'
,
'png'
,
'jpeg'
,
'JPEG'
,
'JPG'
,
'bmp'
]
if
os
.
path
.
isfile
(
img_file
)
and
img_file
.
split
(
'.'
)[
-
1
]
in
img_end
:
imgs_lists
.
append
(
img_file
)
elif
os
.
path
.
isdir
(
img_file
):
for
single_file
in
os
.
listdir
(
img_file
):
if
single_file
.
split
(
'.'
)[
-
1
]
in
img_end
:
imgs_lists
.
append
(
os
.
path
.
join
(
img_file
,
single_file
))
if
len
(
imgs_lists
)
==
0
:
raise
Exception
(
"not found any img file in {}"
.
format
(
img_file
))
return
imgs_lists
def
main
():
args
=
parse_args
()
operators
=
create_operators
()
...
...
@@ -78,22 +99,25 @@ def main():
with
fluid
.
dygraph
.
guard
(
place
):
net
=
architectures
.
__dict__
[
args
.
model
]()
data
=
preprocess
(
args
.
image_file
,
operators
)
data
=
np
.
expand_dims
(
data
,
axis
=
0
)
data
=
fluid
.
dygraph
.
to_variable
(
data
)
load_dygraph_pretrain
(
net
,
args
.
pretrained_model
,
args
.
load_static_weights
)
net
.
eval
()
outputs
=
net
(
data
)
outputs
=
fluid
.
layers
.
softmax
(
outputs
)
outputs
=
outputs
.
numpy
()
probs
=
postprocess
(
outputs
)
rank
=
1
for
idx
,
prob
in
probs
:
print
(
"top{:d}, class id: {:d}, probability: {:.4f}"
.
format
(
rank
,
idx
,
prob
))
rank
+=
1
image_list
=
get_image_list
(
args
.
image_file
)
for
idx
,
filename
in
enumerate
(
image_list
):
data
=
preprocess
(
filename
,
operators
)
data
=
np
.
expand_dims
(
data
,
axis
=
0
)
data
=
fluid
.
dygraph
.
to_variable
(
data
)
net
.
eval
()
outputs
=
net
(
data
)
outputs
=
fluid
.
layers
.
softmax
(
outputs
)
outputs
=
outputs
.
numpy
()
probs
=
postprocess
(
outputs
)
rank
=
1
print
(
"Current image file: {}"
.
format
(
filename
))
for
idx
,
prob
in
probs
:
print
(
"
\t
top{:d}, class id: {:d}, probability: {:.4f}"
.
format
(
rank
,
idx
,
prob
))
rank
+=
1
return
...
...
tools/infer/predict.py
浏览文件 @
b0b9ca0d
...
...
@@ -15,13 +15,10 @@
import
argparse
import
utils
import
numpy
as
np
import
logging
import
time
from
paddle.fluid.core
import
AnalysisConfig
from
paddle.fluid.core
import
create_paddle_predictor
logging
.
basicConfig
(
level
=
logging
.
INFO
)
logger
=
logging
.
getLogger
(
__name__
)
def
parse_args
():
...
...
@@ -101,7 +98,6 @@ def main():
else
:
assert
args
.
use_gpu
is
True
assert
args
.
model_name
is
not
None
assert
args
.
use_tensorrt
is
True
# HALF precission predict only work when using tensorrt
if
args
.
use_fp16
is
True
:
assert
args
.
use_tensorrt
is
True
...
...
@@ -130,8 +126,9 @@ def main():
output
=
output
.
flatten
()
cls
=
np
.
argmax
(
output
)
score
=
output
[
cls
]
logger
.
info
(
"class: {0}"
.
format
(
cls
))
logger
.
info
(
"score: {0}"
.
format
(
score
))
print
(
"Current image file: {}"
.
format
(
args
.
image_file
))
print
(
"
\t
top-1 class: {0}"
.
format
(
cls
))
print
(
"
\t
top-1 score: {0}"
.
format
(
score
))
else
:
for
i
in
range
(
0
,
test_num
+
10
):
inputs
=
np
.
random
.
rand
(
args
.
batch_size
,
3
,
224
,
...
...
@@ -145,11 +142,13 @@ def main():
output
=
output
.
flatten
()
if
i
>=
10
:
test_time
+=
time
.
time
()
-
start_time
time
.
sleep
(
0.01
)
# sleep for T4 GPU
fp_message
=
"FP16"
if
args
.
use_fp16
else
"FP32"
logger
.
info
(
"{0}
\t
{1}
\t
batch size: {2}
\t
time(ms): {3}"
.
format
(
args
.
model_name
,
fp_message
,
args
.
batch_size
,
1000
*
test_time
/
test_num
))
trt_msg
=
"using tensorrt"
if
args
.
use_tensorrt
else
"not using tensorrt"
print
(
"{0}
\t
{1}
\t
{2}
\t
batch size: {3}
\t
time(ms): {4}"
.
format
(
args
.
model_name
,
trt_msg
,
fp_message
,
args
.
batch_size
,
1000
*
test_time
/
test_num
))
if
__name__
==
"__main__"
:
...
...
tools/infer/py_infer.py
浏览文件 @
b0b9ca0d
...
...
@@ -12,6 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import
os
import
utils
import
argparse
import
numpy
as
np
...
...
@@ -26,8 +27,6 @@ def parse_args():
parser
=
argparse
.
ArgumentParser
()
parser
.
add_argument
(
"-i"
,
"--image_file"
,
type
=
str
)
parser
.
add_argument
(
"-d"
,
"--model_dir"
,
type
=
str
)
parser
.
add_argument
(
"-m"
,
"--model_file"
,
type
=
str
)
parser
.
add_argument
(
"-p"
,
"--params_file"
,
type
=
str
)
parser
.
add_argument
(
"--use_gpu"
,
type
=
str2bool
,
default
=
True
)
return
parser
.
parse_args
()
...
...
@@ -41,10 +40,7 @@ def create_predictor(args):
exe
=
fluid
.
Executor
(
place
)
[
program
,
feed_names
,
fetch_lists
]
=
fluid
.
io
.
load_inference_model
(
args
.
model_dir
,
exe
,
model_filename
=
args
.
model_file
,
params_filename
=
args
.
params_file
)
args
.
model_dir
,
exe
,
model_filename
=
"model"
,
params_filename
=
"params"
)
compiled_program
=
fluid
.
compiler
.
CompiledProgram
(
program
)
return
exe
,
compiled_program
,
feed_names
,
fetch_lists
...
...
@@ -70,7 +66,6 @@ def preprocess(fname, ops):
data
=
open
(
fname
,
'rb'
).
read
()
for
op
in
ops
:
data
=
op
(
data
)
return
data
...
...
@@ -81,21 +76,40 @@ def postprocess(outputs, topk=5):
return
zip
(
index
,
prob
[
index
])
def
get_image_list
(
img_file
):
imgs_lists
=
[]
if
img_file
is
None
or
not
os
.
path
.
exists
(
img_file
):
raise
Exception
(
"not found any img file in {}"
.
format
(
img_file
))
img_end
=
[
'jpg'
,
'png'
,
'jpeg'
,
'JPEG'
,
'JPG'
,
'bmp'
]
if
os
.
path
.
isfile
(
img_file
)
and
img_file
.
split
(
'.'
)[
-
1
]
in
img_end
:
imgs_lists
.
append
(
img_file
)
elif
os
.
path
.
isdir
(
img_file
):
for
single_file
in
os
.
listdir
(
img_file
):
if
single_file
.
split
(
'.'
)[
-
1
]
in
img_end
:
imgs_lists
.
append
(
os
.
path
.
join
(
img_file
,
single_file
))
if
len
(
imgs_lists
)
==
0
:
raise
Exception
(
"not found any img file in {}"
.
format
(
img_file
))
return
imgs_lists
def
main
():
args
=
parse_args
()
operators
=
create_operators
()
exe
,
program
,
feed_names
,
fetch_lists
=
create_predictor
(
args
)
data
=
preprocess
(
args
.
image_file
,
operators
)
data
=
np
.
expand_dims
(
data
,
axis
=
0
)
outputs
=
exe
.
run
(
program
,
feed
=
{
feed_names
[
0
]:
data
},
fetch_list
=
fetch_lists
,
return_numpy
=
False
)
probs
=
postprocess
(
outputs
)
for
idx
,
prob
in
probs
:
print
(
"class id: {:d}, probability: {:.4f}"
.
format
(
idx
,
prob
))
image_list
=
get_image_list
(
args
.
image_file
)
for
idx
,
filename
in
enumerate
(
image_list
):
data
=
preprocess
(
filename
,
operators
)
data
=
np
.
expand_dims
(
data
,
axis
=
0
)
outputs
=
exe
.
run
(
program
,
feed
=
{
feed_names
[
0
]:
data
},
fetch_list
=
fetch_lists
,
return_numpy
=
False
)
probs
=
postprocess
(
outputs
)
print
(
"Current image file: {}"
.
format
(
filename
))
for
idx
,
prob
in
probs
:
print
(
"
\t
class id: {:d}, probability: {:.4f}"
.
format
(
idx
,
prob
))
if
__name__
==
"__main__"
:
...
...
tools/run.sh
浏览文件 @
b0b9ca0d
#!/usr/bin/env bash
export
PYTHONPATH
=
$PWD
:
$PYTHONPATH
python
-m
paddle.distributed.launch
\
--selected_gpus
=
"0,1,2,3"
\
tools/train.py
\
...
...
tools/run_download.sh
浏览文件 @
b0b9ca0d
#!/usr/bin/env bash
export
PYTHONPATH
=
$PWD
:
$PYTHONPATH
python tools/download.py
-a
ResNet34
-p
./pretrained/
-d
1
tools/train.py
浏览文件 @
b0b9ca0d
...
...
@@ -13,19 +13,21 @@
# limitations under the License.
from
__future__
import
absolute_import
import
program
from
ppcls.utils
import
logger
from
ppcls.utils.save_load
import
init_model
,
save_model
from
ppcls.utils.config
import
get_config
from
ppcls.data
import
Reader
import
paddle.fluid
as
fluid
from
__future__
import
division
from
__future__
import
print_function
import
argparse
import
os
import
paddle.fluid
as
fluid
from
ppcls.data
import
Reader
from
ppcls.utils.config
import
get_config
from
ppcls.utils.save_load
import
init_model
,
save_model
from
ppcls.utils
import
logger
import
program
import
sys
__dir__
=
os
.
path
.
dirname
(
os
.
path
.
abspath
(
__file__
))
sys
.
path
.
append
(
__dir__
)
sys
.
path
.
append
(
os
.
path
.
abspath
(
os
.
path
.
join
(
__dir__
,
'..'
)))
def
parse_args
():
...
...
@@ -49,8 +51,12 @@ def parse_args():
def
main
(
args
):
config
=
get_config
(
args
.
config
,
overrides
=
args
.
override
,
show
=
True
)
# assign the place
gpu_id
=
fluid
.
dygraph
.
parallel
.
Env
().
dev_id
place
=
fluid
.
CUDAPlace
(
gpu_id
)
use_gpu
=
config
.
get
(
"use_gpu"
,
True
)
if
use_gpu
:
gpu_id
=
fluid
.
dygraph
.
ParallelEnv
().
dev_id
place
=
fluid
.
CUDAPlace
(
gpu_id
)
else
:
place
=
fluid
.
CPUPlace
()
use_data_parallel
=
int
(
os
.
getenv
(
"PADDLE_TRAINERS_NUM"
,
1
))
!=
1
config
[
"use_data_parallel"
]
=
use_data_parallel
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录