Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleHub
提交
fb495c0b
P
PaddleHub
项目概览
PaddlePaddle
/
PaddleHub
大约 2 年 前同步成功
通知
285
Star
12117
Fork
2091
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
200
列表
看板
标记
里程碑
合并请求
4
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleHub
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
200
Issue
200
列表
看板
标记
里程碑
合并请求
4
合并请求
4
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
fb495c0b
编写于
4月 18, 2019
作者:
W
wuzewu
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
update finetune api and demo
上级
1023c320
变更
30
隐藏空白更改
内联
并排
Showing
30 changed file
with
440 addition
and
1941 deletion
+440
-1941
demo/image-classification/create_module.py
demo/image-classification/create_module.py
+0
-80
demo/image-classification/create_module.sh
demo/image-classification/create_module.sh
+0
-38
demo/image-classification/finetune.sh
demo/image-classification/finetune.sh
+0
-9
demo/image-classification/img_classifier.py
demo/image-classification/img_classifier.py
+122
-0
demo/image-classification/infer.sh
demo/image-classification/infer.sh
+0
-1
demo/image-classification/nets/__init__.py
demo/image-classification/nets/__init__.py
+0
-4
demo/image-classification/nets/mobilenet_v2.py
demo/image-classification/nets/mobilenet_v2.py
+0
-170
demo/image-classification/nets/resnet.py
demo/image-classification/nets/resnet.py
+0
-162
demo/image-classification/processor.py
demo/image-classification/processor.py
+0
-128
demo/image-classification/resources/download.sh
demo/image-classification/resources/download.sh
+0
-35
demo/image-classification/resources/label_list.txt
demo/image-classification/resources/label_list.txt
+0
-1000
demo/image-classification/resources/module_info.yml
demo/image-classification/resources/module_info.yml
+0
-6
demo/image-classification/retrain.py
demo/image-classification/retrain.py
+0
-31
demo/image-classification/run_classifier.sh
demo/image-classification/run_classifier.sh
+31
-0
demo/image-classification/run_predict.sh
demo/image-classification/run_predict.sh
+25
-0
demo/image-classification/test/test.txt
demo/image-classification/test/test.txt
+0
-0
demo/image-classification/test/test.yml
demo/image-classification/test/test.yml
+0
-0
demo/image-classification/test/test_img_bird.jpg
demo/image-classification/test/test_img_bird.jpg
+0
-0
demo/image-classification/test/test_img_cat.jpg
demo/image-classification/test/test_img_cat.jpg
+0
-0
demo/image-classification/test/test_img_daisy.jpg
demo/image-classification/test/test_img_daisy.jpg
+0
-0
demo/image-classification/test/test_img_roses.jpg
demo/image-classification/test/test_img_roses.jpg
+0
-0
demo/image-classification/test/test_img_sheep.jpg
demo/image-classification/test/test_img_sheep.jpg
+0
-0
demo/image-classification/utility.py
demo/image-classification/utility.py
+0
-63
demo/sequence-labeling/sequence_label.py
demo/sequence-labeling/sequence_label.py
+36
-40
demo/text-classification/predict.py
demo/text-classification/predict.py
+5
-7
demo/text-classification/text_classifier.py
demo/text-classification/text_classifier.py
+30
-34
paddlehub/finetune/evaluate.py
paddlehub/finetune/evaluate.py
+4
-4
paddlehub/finetune/task.py
paddlehub/finetune/task.py
+147
-104
paddlehub/reader/cv_reader.py
paddlehub/reader/cv_reader.py
+39
-24
paddlehub/version.py
paddlehub/version.py
+1
-1
未找到文件。
demo/image-classification/create_module.py
已删除
100644 → 0
浏览文件 @
1023c320
from
__future__
import
absolute_import
from
__future__
import
division
from
__future__
import
print_function
import
os
import
functools
import
argparse
import
paddle
import
paddle.fluid
as
fluid
import
nets
import
paddlehub
as
hub
import
processor
from
utility
import
add_arguments
,
print_arguments
parser
=
argparse
.
ArgumentParser
(
description
=
__doc__
)
add_arg
=
functools
.
partial
(
add_arguments
,
argparser
=
parser
)
# yapf: disable
add_arg
(
'model'
,
str
,
"ResNet50"
,
"Set the network to use."
)
add_arg
(
'pretrained_model'
,
str
,
None
,
"Whether to use pretrained model."
)
# yapf: enable
def
build_program
(
args
):
image_shape
=
[
3
,
224
,
224
]
model_name
=
args
.
model
model
=
nets
.
__dict__
[
model_name
]()
image
=
fluid
.
layers
.
data
(
name
=
"image"
,
shape
=
image_shape
,
dtype
=
"float32"
)
predition
,
feature_map
=
model
.
net
(
input
=
image
,
class_dim
=
1000
)
return
image
,
predition
,
feature_map
def
create_module
(
args
):
# parameters from arguments
model_name
=
args
.
model
pretrained_model
=
args
.
pretrained_model
image
,
predition
,
feature_map
=
build_program
(
args
)
place
=
fluid
.
CPUPlace
()
exe
=
fluid
.
Executor
(
place
)
exe
.
run
(
fluid
.
default_startup_program
())
# load pretrained model param
def
if_exist
(
var
):
return
os
.
path
.
exists
(
os
.
path
.
join
(
pretrained_model
,
var
.
name
))
fluid
.
io
.
load_vars
(
exe
,
pretrained_model
,
predicate
=
if_exist
)
# create paddle hub module
assets
=
[
"resources/label_list.txt"
]
sign1
=
hub
.
create_signature
(
"classification"
,
inputs
=
[
image
],
outputs
=
[
predition
],
for_predict
=
True
)
sign2
=
hub
.
create_signature
(
"feature_map"
,
inputs
=
[
image
],
outputs
=
[
feature_map
])
hub
.
create_module
(
sign_arr
=
[
sign1
,
sign2
],
module_dir
=
args
.
model
+
".hub_module"
,
module_info
=
"resources/module_info.yml"
,
processor
=
processor
.
Processor
,
assets
=
assets
,
extra_info
=
{
'excepted_image_width'
:
224
,
'excepted_image_height'
:
224
,
'pretrained_images_mean'
:
[
0.485
,
0.456
,
0.406
],
'pretrained_images_std'
:
[
0.229
,
0.224
,
0.225
],
'image_channel_order'
:
'RGB'
})
def
main
():
args
=
parser
.
parse_args
()
assert
args
.
model
in
nets
.
__all__
,
"model is not in list %s"
%
nets
.
__all__
print_arguments
(
args
)
create_module
(
args
)
if
__name__
==
'__main__'
:
main
()
demo/image-classification/create_module.sh
已删除
100644 → 0
浏览文件 @
1023c320
#!/bin/bash
set
-o
nounset
set
-o
errexit
model_name
=
"ResNet50"
while
getopts
"m:"
options
do
case
"
$options
"
in
m
)
model_name
=
$OPTARG
;;
?
)
echo
"unknown options"
exit
1
;;
esac
done
script_path
=
$(
cd
`
dirname
$0
`
;
pwd
)
module_path
=
${
model_name
}
.hub_module
if
[
-d
$script_path
/
$module_path
]
then
echo
"
$module_path
already existed!"
exit
0
fi
cd
$script_path
/resources/
if
[
!
-d
${
model_name
}
_pretrained
]
then
sh download.sh
$model_name
fi
cd
$script_path
/
python create_module.py
--pretrained_model
=
resources/
${
model_name
}
_pretrained
--model
${
model_name
}
echo
"Successfully create
$module_path
"
demo/image-classification/finetune.sh
已删除
100644 → 0
浏览文件 @
1023c320
#!/bin/bash
set
-o
nounset
set
-o
errexit
script_path
=
$(
cd
`
dirname
$0
`
;
pwd
)
cd
$script_path
sh create_module.sh
python retrain.py
demo/image-classification/img_classifier.py
0 → 100644
浏览文件 @
fb495c0b
import
argparse
import
os
import
paddle.fluid
as
fluid
import
paddlehub
as
hub
import
numpy
as
np
# yapf: disable
parser
=
argparse
.
ArgumentParser
(
__doc__
)
parser
.
add_argument
(
"--target"
,
type
=
str
,
default
=
"finetune"
,
help
=
"Number of epoches for fine-tuning."
)
parser
.
add_argument
(
"--num_epoch"
,
type
=
int
,
default
=
3
,
help
=
"Number of epoches for fine-tuning."
)
parser
.
add_argument
(
"--use_gpu"
,
type
=
bool
,
default
=
False
,
help
=
"Whether use GPU for finetuning or predict"
)
parser
.
add_argument
(
"--checkpoint_dir"
,
type
=
str
,
default
=
"paddlehub_finetune_ckpt"
,
help
=
"Path to training data."
)
parser
.
add_argument
(
"--batch_size"
,
type
=
int
,
default
=
16
,
help
=
"Total examples' number in batch for training."
)
parser
.
add_argument
(
"--module"
,
type
=
str
,
default
=
"resnet50"
,
help
=
"Total examples' number in batch for training."
)
# yapf: enable.
module_map
=
{
"resnet50"
:
"resnet_v2_50_imagenet"
,
"resnet101"
:
"resnet_v2_101_imagenet"
,
"resnet152"
:
"resnet_v2_152_imagenet"
,
"mobilenet"
:
"mobilenet_v2_imagenet"
,
"nasnet"
:
"nasnet_imagenet"
,
"pnasnet"
:
"pnasnet_imagenet"
}
def
get_reader
(
module
,
dataset
=
None
):
return
hub
.
reader
.
ImageClassificationReader
(
image_width
=
module
.
get_expected_image_width
(),
image_height
=
module
.
get_expected_image_height
(),
images_mean
=
module
.
get_pretrained_images_mean
(),
images_std
=
module
.
get_pretrained_images_std
(),
dataset
=
dataset
)
def
get_task
(
module
,
num_classes
):
input_dict
,
output_dict
,
program
=
module
.
context
(
trainable
=
True
)
with
fluid
.
program_guard
(
program
):
img
=
input_dict
[
"image"
]
feature_map
=
output_dict
[
"feature_map"
]
task
=
hub
.
create_img_cls_task
(
feature
=
feature_map
,
num_classes
=
num_classes
)
return
task
def
finetune
(
args
):
module
=
hub
.
Module
(
name
=
args
.
module
)
input_dict
,
output_dict
,
program
=
module
.
context
(
trainable
=
True
)
dataset
=
hub
.
dataset
.
Flowers
()
data_reader
=
get_reader
(
module
,
dataset
)
task
=
get_task
(
module
,
dataset
.
num_labels
)
img
=
input_dict
[
"image"
]
feed_list
=
[
img
.
name
,
task
.
variable
(
'label'
).
name
]
config
=
hub
.
RunConfig
(
use_cuda
=
args
.
use_gpu
,
num_epoch
=
args
.
num_epoch
,
batch_size
=
args
.
batch_size
,
enable_memory_optim
=
False
,
checkpoint_dir
=
args
.
checkpoint_dir
,
strategy
=
hub
.
finetune
.
strategy
.
DefaultFinetuneStrategy
())
hub
.
finetune_and_eval
(
task
,
feed_list
=
feed_list
,
data_reader
=
data_reader
,
config
=
config
)
def
predict
(
args
):
module
=
hub
.
Module
(
name
=
args
.
module
)
input_dict
,
output_dict
,
program
=
module
.
context
(
trainable
=
True
)
data_reader
=
get_reader
(
module
)
task
=
get_task
(
module
,
5
)
img
=
input_dict
[
"image"
]
feed_list
=
[
img
.
name
]
label_map
=
{
0
:
"roses"
,
1
:
"tulips"
,
2
:
"daisy"
,
3
:
"sunflowers"
,
4
:
"dandelion"
}
with
fluid
.
program_guard
(
task
.
inference_program
()):
place
=
fluid
.
CUDAPlace
(
0
)
if
args
.
use_gpu
else
fluid
.
CPUPlace
()
exe
=
fluid
.
Executor
(
place
)
pretrained_model_dir
=
os
.
path
.
join
(
args
.
checkpoint_dir
,
"best_model"
)
if
not
os
.
path
.
exists
(
pretrained_model_dir
):
hub
.
logger
.
error
(
"pretrained model dir %s didn't exist"
%
pretrained_model_dir
)
exit
(
1
)
fluid
.
io
.
load_persistables
(
exe
,
pretrained_model_dir
)
feeder
=
fluid
.
DataFeeder
(
feed_list
=
feed_list
,
place
=
place
)
data
=
[
"test/test_img_roses.jpg"
,
"test/test_img_daisy.jpg"
]
predict_reader
=
data_reader
.
data_generator
(
phase
=
"predict"
,
batch_size
=
1
,
data
=
data
)
for
index
,
batch
in
enumerate
(
predict_reader
()):
result
,
=
exe
.
run
(
feed
=
feeder
.
feed
(
batch
),
fetch_list
=
[
task
.
variable
(
'probs'
)])
predict_result
=
label_map
[
np
.
argsort
(
result
[
0
])[::
-
1
][
0
]]
print
(
"input %i is %s, and the predict result is %s"
%
(
index
,
data
[
index
],
predict_result
))
def
main
(
args
):
if
args
.
target
==
"finetune"
:
finetune
(
args
)
elif
args
.
target
==
"predict"
:
predict
(
args
)
else
:
hub
.
logger
.
error
(
"target should in %s"
%
[
"finetune"
,
"predict"
])
exit
(
1
)
if
__name__
==
"__main__"
:
args
=
parser
.
parse_args
()
if
not
args
.
module
in
module_map
:
hub
.
logger
.
error
(
"module should in %s"
%
module_map
.
keys
())
exit
(
1
)
args
.
module
=
module_map
[
args
.
module
]
main
(
args
)
demo/image-classification/infer.sh
已删除
100644 → 0
浏览文件 @
1023c320
python ../../paddlehub/commands/hub.py run ResNet50.hub_module/
--signature
classification
--config
resources/test/test.yml
--dataset
resources/test/test.csv
demo/image-classification/nets/__init__.py
已删除
100644 → 0
浏览文件 @
1023c320
from
.mobilenet_v2
import
MobileNetV2
from
.resnet
import
ResNet50
,
ResNet101
,
ResNet152
__all__
=
[
"MobileNetV2"
,
"ResNet50"
,
"ResNet101"
,
"ResNet152"
]
demo/image-classification/nets/mobilenet_v2.py
已删除
100644 → 0
浏览文件 @
1023c320
from
__future__
import
absolute_import
from
__future__
import
division
from
__future__
import
print_function
import
paddle.fluid
as
fluid
from
paddle.fluid.initializer
import
MSRA
from
paddle.fluid.param_attr
import
ParamAttr
__all__
=
[
"MobileNetV2"
]
train_parameters
=
{
"input_size"
:
[
3
,
224
,
224
],
"input_mean"
:
[
0.485
,
0.456
,
0.406
],
"input_std"
:
[
0.229
,
0.224
,
0.225
],
"learning_strategy"
:
{
"name"
:
"piecewise_decay"
,
"batch_size"
:
256
,
"epochs"
:
[
30
,
60
,
90
],
"steps"
:
[
0.1
,
0.01
,
0.001
,
0.0001
]
}
}
class
MobileNetV2
():
def
__init__
(
self
):
self
.
params
=
train_parameters
def
net
(
self
,
input
,
class_dim
=
1000
,
scale
=
1.0
):
bottleneck_params_list
=
[
(
1
,
16
,
1
,
1
),
(
6
,
24
,
2
,
2
),
(
6
,
32
,
3
,
2
),
(
6
,
64
,
4
,
2
),
(
6
,
96
,
3
,
1
),
(
6
,
160
,
3
,
2
),
(
6
,
320
,
1
,
1
),
]
input
=
self
.
conv_bn_layer
(
input
,
num_filters
=
int
(
32
*
scale
),
filter_size
=
3
,
stride
=
2
,
padding
=
1
,
if_act
=
True
)
in_c
=
int
(
32
*
scale
)
for
layer_setting
in
bottleneck_params_list
:
t
,
c
,
n
,
s
=
layer_setting
input
=
self
.
invresi_blocks
(
input
=
input
,
in_c
=
in_c
,
t
=
t
,
c
=
int
(
c
*
scale
),
n
=
n
,
s
=
s
,
)
in_c
=
int
(
c
*
scale
)
input
=
self
.
conv_bn_layer
(
input
=
input
,
num_filters
=
int
(
1280
*
scale
)
if
scale
>
1.0
else
1280
,
filter_size
=
1
,
stride
=
1
,
padding
=
0
,
if_act
=
True
)
input
=
fluid
.
layers
.
pool2d
(
input
=
input
,
pool_size
=
7
,
pool_stride
=
1
,
pool_type
=
'avg'
,
global_pooling
=
True
)
output
=
fluid
.
layers
.
fc
(
input
=
input
,
size
=
class_dim
,
param_attr
=
ParamAttr
(
initializer
=
MSRA
()))
return
output
,
input
def
conv_bn_layer
(
self
,
input
,
filter_size
,
num_filters
,
stride
,
padding
,
channels
=
None
,
num_groups
=
1
,
use_cudnn
=
True
,
if_act
=
True
):
conv
=
fluid
.
layers
.
conv2d
(
input
=
input
,
num_filters
=
num_filters
,
filter_size
=
filter_size
,
stride
=
stride
,
padding
=
padding
,
groups
=
num_groups
,
act
=
None
,
use_cudnn
=
use_cudnn
,
param_attr
=
ParamAttr
(
initializer
=
MSRA
()),
bias_attr
=
False
)
bn
=
fluid
.
layers
.
batch_norm
(
input
=
conv
)
if
if_act
:
return
fluid
.
layers
.
relu6
(
bn
)
else
:
return
bn
def
shortcut
(
self
,
input
,
data_residual
):
return
fluid
.
layers
.
elementwise_add
(
input
,
data_residual
)
def
inverted_residual_unit
(
self
,
input
,
num_in_filter
,
num_filters
,
ifshortcut
,
stride
,
filter_size
,
padding
,
expansion_factor
):
num_expfilter
=
int
(
round
(
num_in_filter
*
expansion_factor
))
channel_expand
=
self
.
conv_bn_layer
(
input
=
input
,
num_filters
=
num_expfilter
,
filter_size
=
1
,
stride
=
1
,
padding
=
0
,
num_groups
=
1
,
if_act
=
True
)
bottleneck_conv
=
self
.
conv_bn_layer
(
input
=
channel_expand
,
num_filters
=
num_expfilter
,
filter_size
=
filter_size
,
stride
=
stride
,
padding
=
padding
,
num_groups
=
num_expfilter
,
if_act
=
True
,
use_cudnn
=
False
)
linear_out
=
self
.
conv_bn_layer
(
input
=
bottleneck_conv
,
num_filters
=
num_filters
,
filter_size
=
1
,
stride
=
1
,
padding
=
0
,
num_groups
=
1
,
if_act
=
False
)
if
ifshortcut
:
out
=
self
.
shortcut
(
input
=
input
,
data_residual
=
linear_out
)
return
out
else
:
return
linear_out
def
invresi_blocks
(
self
,
input
,
in_c
,
t
,
c
,
n
,
s
):
first_block
=
self
.
inverted_residual_unit
(
input
=
input
,
num_in_filter
=
in_c
,
num_filters
=
c
,
ifshortcut
=
False
,
stride
=
s
,
filter_size
=
3
,
padding
=
1
,
expansion_factor
=
t
)
last_residual_block
=
first_block
last_c
=
c
for
i
in
range
(
1
,
n
):
last_residual_block
=
self
.
inverted_residual_unit
(
input
=
last_residual_block
,
num_in_filter
=
last_c
,
num_filters
=
c
,
ifshortcut
=
True
,
stride
=
1
,
filter_size
=
3
,
padding
=
1
,
expansion_factor
=
t
)
return
last_residual_block
demo/image-classification/nets/resnet.py
已删除
100644 → 0
浏览文件 @
1023c320
from
__future__
import
absolute_import
from
__future__
import
division
from
__future__
import
print_function
import
paddle
import
paddle.fluid
as
fluid
import
math
from
paddle.fluid.param_attr
import
ParamAttr
__all__
=
[
"ResNet"
,
"ResNet50"
,
"ResNet101"
,
"ResNet152"
]
train_parameters
=
{
"input_size"
:
[
3
,
224
,
224
],
"input_mean"
:
[
0.485
,
0.456
,
0.406
],
"input_std"
:
[
0.229
,
0.224
,
0.225
],
"learning_strategy"
:
{
"name"
:
"piecewise_decay"
,
"batch_size"
:
256
,
"epochs"
:
[
30
,
60
,
90
],
"steps"
:
[
0.1
,
0.01
,
0.001
,
0.0001
]
}
}
class
ResNet
():
def
__init__
(
self
,
layers
=
50
):
self
.
params
=
train_parameters
self
.
layers
=
layers
def
net
(
self
,
input
,
class_dim
=
1000
):
layers
=
self
.
layers
supported_layers
=
[
50
,
101
,
152
]
assert
layers
in
supported_layers
,
\
"supported layers are {} but input layer is {}"
.
format
(
supported_layers
,
layers
)
if
layers
==
50
:
depth
=
[
3
,
4
,
6
,
3
]
elif
layers
==
101
:
depth
=
[
3
,
4
,
23
,
3
]
elif
layers
==
152
:
depth
=
[
3
,
8
,
36
,
3
]
num_filters
=
[
64
,
128
,
256
,
512
]
conv
=
self
.
conv_bn_layer
(
input
=
input
,
num_filters
=
64
,
filter_size
=
7
,
stride
=
2
,
act
=
'relu'
,
name
=
"conv1"
)
conv
=
fluid
.
layers
.
pool2d
(
input
=
conv
,
pool_size
=
3
,
pool_stride
=
2
,
pool_padding
=
1
,
pool_type
=
'max'
)
for
block
in
range
(
len
(
depth
)):
for
i
in
range
(
depth
[
block
]):
if
layers
in
[
101
,
152
]
and
block
==
2
:
if
i
==
0
:
conv_name
=
"res"
+
str
(
block
+
2
)
+
"a"
else
:
conv_name
=
"res"
+
str
(
block
+
2
)
+
"b"
+
str
(
i
)
else
:
conv_name
=
"res"
+
str
(
block
+
2
)
+
chr
(
97
+
i
)
conv
=
self
.
bottleneck_block
(
input
=
conv
,
num_filters
=
num_filters
[
block
],
stride
=
2
if
i
==
0
and
block
!=
0
else
1
,
name
=
conv_name
)
pool
=
fluid
.
layers
.
pool2d
(
input
=
conv
,
pool_size
=
7
,
pool_type
=
'avg'
,
global_pooling
=
True
)
stdv
=
1.0
/
math
.
sqrt
(
pool
.
shape
[
1
]
*
1.0
)
out
=
fluid
.
layers
.
fc
(
input
=
pool
,
size
=
class_dim
,
param_attr
=
fluid
.
param_attr
.
ParamAttr
(
initializer
=
fluid
.
initializer
.
Uniform
(
-
stdv
,
stdv
)))
return
out
,
pool
def
conv_bn_layer
(
self
,
input
,
num_filters
,
filter_size
,
stride
=
1
,
groups
=
1
,
act
=
None
,
name
=
None
):
conv
=
fluid
.
layers
.
conv2d
(
input
=
input
,
num_filters
=
num_filters
,
filter_size
=
filter_size
,
stride
=
stride
,
padding
=
(
filter_size
-
1
)
//
2
,
groups
=
groups
,
act
=
None
,
param_attr
=
ParamAttr
(
name
=
name
+
"_weights"
),
bias_attr
=
False
,
name
=
name
+
'.conv2d.output.1'
)
if
name
==
"conv1"
:
bn_name
=
"bn_"
+
name
else
:
bn_name
=
"bn"
+
name
[
3
:]
return
fluid
.
layers
.
batch_norm
(
input
=
conv
,
act
=
act
,
name
=
bn_name
+
'.output.1'
,
param_attr
=
ParamAttr
(
name
=
bn_name
+
'_scale'
),
bias_attr
=
ParamAttr
(
bn_name
+
'_offset'
),
moving_mean_name
=
bn_name
+
'_mean'
,
moving_variance_name
=
bn_name
+
'_variance'
,
)
def
shortcut
(
self
,
input
,
ch_out
,
stride
,
name
):
ch_in
=
input
.
shape
[
1
]
if
ch_in
!=
ch_out
or
stride
!=
1
:
return
self
.
conv_bn_layer
(
input
,
ch_out
,
1
,
stride
,
name
=
name
)
else
:
return
input
def
bottleneck_block
(
self
,
input
,
num_filters
,
stride
,
name
):
conv0
=
self
.
conv_bn_layer
(
input
=
input
,
num_filters
=
num_filters
,
filter_size
=
1
,
act
=
'relu'
,
name
=
name
+
"_branch2a"
)
conv1
=
self
.
conv_bn_layer
(
input
=
conv0
,
num_filters
=
num_filters
,
filter_size
=
3
,
stride
=
stride
,
act
=
'relu'
,
name
=
name
+
"_branch2b"
)
conv2
=
self
.
conv_bn_layer
(
input
=
conv1
,
num_filters
=
num_filters
*
4
,
filter_size
=
1
,
act
=
None
,
name
=
name
+
"_branch2c"
)
short
=
self
.
shortcut
(
input
,
num_filters
*
4
,
stride
,
name
=
name
+
"_branch1"
)
return
fluid
.
layers
.
elementwise_add
(
x
=
short
,
y
=
conv2
,
act
=
'relu'
,
name
=
name
+
".add.output.5"
)
def
ResNet50
():
model
=
ResNet
(
layers
=
50
)
return
model
def
ResNet101
():
model
=
ResNet
(
layers
=
101
)
return
model
def
ResNet152
():
model
=
ResNet
(
layers
=
152
)
return
model
demo/image-classification/processor.py
已删除
100644 → 0
浏览文件 @
1023c320
import
os
import
paddle
import
numpy
as
np
from
PIL
import
Image
from
paddlehub
import
BaseProcessor
import
paddlehub
as
hub
DATA_DIM
=
224
img_mean
=
np
.
array
([
0.485
,
0.456
,
0.406
]).
reshape
((
3
,
1
,
1
))
img_std
=
np
.
array
([
0.229
,
0.224
,
0.225
]).
reshape
((
3
,
1
,
1
))
def
softmax
(
x
):
orig_shape
=
x
.
shape
if
len
(
x
.
shape
)
>
1
:
tmp
=
np
.
max
(
x
,
axis
=
1
)
x
-=
tmp
.
reshape
((
x
.
shape
[
0
],
1
))
x
=
np
.
exp
(
x
)
tmp
=
np
.
sum
(
x
,
axis
=
1
)
x
/=
tmp
.
reshape
((
x
.
shape
[
0
],
1
))
else
:
tmp
=
np
.
max
(
x
)
x
-=
tmp
x
=
np
.
exp
(
x
)
tmp
=
np
.
sum
(
x
)
x
/=
tmp
return
x
def
resize_short
(
img
,
target_size
):
percent
=
float
(
target_size
)
/
min
(
img
.
size
[
0
],
img
.
size
[
1
])
resized_width
=
int
(
round
(
img
.
size
[
0
]
*
percent
))
resized_height
=
int
(
round
(
img
.
size
[
1
]
*
percent
))
img
=
img
.
resize
((
resized_width
,
resized_height
),
Image
.
LANCZOS
)
return
img
def
crop_image
(
img
,
target_size
,
center
):
width
,
height
=
img
.
size
size
=
target_size
if
center
==
True
:
w_start
=
(
width
-
size
)
/
2
h_start
=
(
height
-
size
)
/
2
else
:
w_start
=
np
.
random
.
randint
(
0
,
width
-
size
+
1
)
h_start
=
np
.
random
.
randint
(
0
,
height
-
size
+
1
)
w_end
=
w_start
+
size
h_end
=
h_start
+
size
img
=
img
.
crop
((
w_start
,
h_start
,
w_end
,
h_end
))
return
img
def
process_image
(
img
):
img
=
resize_short
(
img
,
target_size
=
256
)
img
=
crop_image
(
img
,
target_size
=
DATA_DIM
,
center
=
True
)
if
img
.
mode
!=
'RGB'
:
img
=
img
.
convert
(
'RGB'
)
img
=
np
.
array
(
img
).
astype
(
'float32'
).
transpose
((
2
,
0
,
1
))
/
255
img
-=
img_mean
img
/=
img_std
return
img
class
Processor
(
BaseProcessor
):
def
__init__
(
self
,
module
):
self
.
module
=
module
label_list_file
=
os
.
path
.
join
(
self
.
module
.
helper
.
assets_path
(),
"label_list.txt"
)
with
open
(
label_list_file
,
"r"
)
as
file
:
content
=
file
.
read
()
self
.
label_list
=
content
.
split
(
"
\n
"
)
def
build_config
(
self
,
**
kwargs
):
self
.
top_only
=
kwargs
.
get
(
"top_only"
,
None
)
try
:
self
.
top_only
=
bool
(
self
.
top_only
)
except
:
self
.
top_only
=
False
def
preprocess
(
self
,
sign_name
,
data_dict
):
result
=
{
'image'
:
[]}
for
path
in
data_dict
[
'image'
]:
result_i
=
{}
result_i
[
'processed'
]
=
process_image
(
Image
.
open
(
path
))
result
[
'image'
].
append
(
result_i
)
return
result
def
postprocess
(
self
,
sign_name
,
data_out
,
data_info
,
**
kwargs
):
self
.
build_config
(
**
kwargs
)
if
sign_name
==
"classification"
:
results
=
np
.
array
(
data_out
[
0
])
output
=
[]
for
index
,
result
in
enumerate
(
results
):
result_i
=
softmax
(
result
)
if
self
.
top_only
:
index
=
np
.
argsort
(
result_i
)[::
-
1
][:
1
][
0
]
label
=
self
.
label_list
[
index
]
output
.
append
({
label
:
result_i
[
index
]})
else
:
output
.
append
({
self
.
label_list
[
index
]:
value
for
index
,
value
in
enumerate
(
result_i
)
})
return
[
output
]
elif
sign_name
==
"feature_map"
:
return
np
.
array
(
results
)
def
data_format
(
self
,
sign_name
):
if
sign_name
==
"classification"
:
return
{
"image"
:
{
'type'
:
hub
.
DataType
.
IMAGE
,
'feed_key'
:
self
.
module
.
signatures
[
sign_name
].
inputs
[
0
].
name
}
}
elif
sign_name
==
"feature_map"
:
return
{
"image"
:
{
'type'
:
hub
.
DataType
.
IMAGE
,
'feed_key'
:
self
.
module
.
signatures
[
sign_name
].
inputs
[
0
].
name
}
}
demo/image-classification/resources/download.sh
已删除
100644 → 0
浏览文件 @
1023c320
#!/bin/bash
set
-o
nounset
set
-o
errexit
script_path
=
$(
cd
`
dirname
$0
`
;
pwd
)
if
[
$#
-ne
1
]
then
echo
"usage: sh
$0
{PRETRAINED_MODEL_NAME}"
exit
1
fi
if
[
$1
!=
"ResNet50"
-a
$1
!=
"ResNet101"
-a
$1
!=
"ResNet152"
-a
$1
!=
"MobileNetV2"
]
then
echo
"only suppory pretrained model in {ResNet50, ResNet101, ResNet152, MobileNetV2}"
exit
1
fi
model_name
=
${
1
}
_pretrained
model
=
${
model_name
}
.zip
cd
${
script_path
}
if
[
-d
${
model_name
}
]
then
echo
"model file
${
model_name
}
is already existed"
exit
0
fi
if
[
!
-f
${
model
}
]
then
wget http://paddle-imagenet-models-name.bj.bcebos.com/
${
model
}
fi
unzip
${
model
}
rm
${
model
}
rm
-rf
__MACOSX
demo/image-classification/resources/label_list.txt
已删除
100644 → 0
浏览文件 @
1023c320
tench
goldfish
great white shark
tiger shark
hammerhead
electric ray
stingray
cock
hen
ostrich
brambling
goldfinch
house finch
junco
indigo bunting
robin
bulbul
jay
magpie
chickadee
water ouzel
kite
bald eagle
vulture
great grey owl
European fire salamander
common newt
eft
spotted salamander
axolotl
bullfrog
tree frog
tailed frog
loggerhead
leatherback turtle
mud turtle
terrapin
box turtle
banded gecko
common iguana
American chameleon
whiptail
agama
frilled lizard
alligator lizard
Gila monster
green lizard
African chameleon
Komodo dragon
African crocodile
American alligator
triceratops
thunder snake
ringneck snake
hognose snake
green snake
king snake
garter snake
water snake
vine snake
night snake
boa constrictor
rock python
Indian cobra
green mamba
sea snake
horned viper
diamondback
sidewinder
trilobite
harvestman
scorpion
black and gold garden spider
barn spider
garden spider
black widow
tarantula
wolf spider
tick
centipede
black grouse
ptarmigan
ruffed grouse
prairie chicken
peacock
quail
partridge
African grey
macaw
sulphur-crested cockatoo
lorikeet
coucal
bee eater
hornbill
hummingbird
jacamar
toucan
drake
red-breasted merganser
goose
black swan
tusker
echidna
platypus
wallaby
koala
wombat
jellyfish
sea anemone
brain coral
flatworm
nematode
conch
snail
slug
sea slug
chiton
chambered nautilus
Dungeness crab
rock crab
fiddler crab
king crab
American lobster
spiny lobster
crayfish
hermit crab
isopod
white stork
black stork
spoonbill
flamingo
little blue heron
American egret
bittern
crane
limpkin
European gallinule
American coot
bustard
ruddy turnstone
red-backed sandpiper
redshank
dowitcher
oystercatcher
pelican
king penguin
albatross
grey whale
killer whale
dugong
sea lion
Chihuahua
Japanese spaniel
Maltese dog
Pekinese
Shih-Tzu
Blenheim spaniel
papillon
toy terrier
Rhodesian ridgeback
Afghan hound
basset
beagle
bloodhound
bluetick
black-and-tan coonhound
Walker hound
English foxhound
redbone
borzoi
Irish wolfhound
Italian greyhound
whippet
Ibizan hound
Norwegian elkhound
otterhound
Saluki
Scottish deerhound
Weimaraner
Staffordshire bullterrier
American Staffordshire terrier
Bedlington terrier
Border terrier
Kerry blue terrier
Irish terrier
Norfolk terrier
Norwich terrier
Yorkshire terrier
wire-haired fox terrier
Lakeland terrier
Sealyham terrier
Airedale
cairn
Australian terrier
Dandie Dinmont
Boston bull
miniature schnauzer
giant schnauzer
standard schnauzer
Scotch terrier
Tibetan terrier
silky terrier
soft-coated wheaten terrier
West Highland white terrier
Lhasa
flat-coated retriever
curly-coated retriever
golden retriever
Labrador retriever
Chesapeake Bay retriever
German short-haired pointer
vizsla
English setter
Irish setter
Gordon setter
Brittany spaniel
clumber
English springer
Welsh springer spaniel
cocker spaniel
Sussex spaniel
Irish water spaniel
kuvasz
schipperke
groenendael
malinois
briard
kelpie
komondor
Old English sheepdog
Shetland sheepdog
collie
Border collie
Bouvier des Flandres
Rottweiler
German shepherd
Doberman
miniature pinscher
Greater Swiss Mountain dog
Bernese mountain dog
Appenzeller
EntleBucher
boxer
bull mastiff
Tibetan mastiff
French bulldog
Great Dane
Saint Bernard
Eskimo dog
malamute
Siberian husky
dalmatian
affenpinscher
basenji
pug
Leonberg
Newfoundland
Great Pyrenees
Samoyed
Pomeranian
chow
keeshond
Brabancon griffon
Pembroke
Cardigan
toy poodle
miniature poodle
standard poodle
Mexican hairless
timber wolf
white wolf
red wolf
coyote
dingo
dhole
African hunting dog
hyena
red fox
kit fox
Arctic fox
grey fox
tabby
tiger cat
Persian cat
Siamese cat
Egyptian cat
cougar
lynx
leopard
snow leopard
jaguar
lion
tiger
cheetah
brown bear
American black bear
ice bear
sloth bear
mongoose
meerkat
tiger beetle
ladybug
ground beetle
long-horned beetle
leaf beetle
dung beetle
rhinoceros beetle
weevil
fly
bee
ant
grasshopper
cricket
walking stick
cockroach
mantis
cicada
leafhopper
lacewing
dragonfly
damselfly
admiral
ringlet
monarch
cabbage butterfly
sulphur butterfly
lycaenid
starfish
sea urchin
sea cucumber
wood rabbit
hare
Angora
hamster
porcupine
fox squirrel
marmot
beaver
guinea pig
sorrel
zebra
hog
wild boar
warthog
hippopotamus
ox
water buffalo
bison
ram
bighorn
ibex
hartebeest
impala
gazelle
Arabian camel
llama
weasel
mink
polecat
black-footed ferret
otter
skunk
badger
armadillo
three-toed sloth
orangutan
gorilla
chimpanzee
gibbon
siamang
guenon
patas
baboon
macaque
langur
colobus
proboscis monkey
marmoset
capuchin
howler monkey
titi
spider monkey
squirrel monkey
Madagascar cat
indri
Indian elephant
African elephant
lesser panda
giant panda
barracouta
eel
coho
rock beauty
anemone fish
sturgeon
gar
lionfish
puffer
abacus
abaya
academic gown
accordion
acoustic guitar
aircraft carrier
airliner
airship
altar
ambulance
amphibian
analog clock
apiary
apron
ashcan
assault rifle
backpack
bakery
balance beam
balloon
ballpoint
Band Aid
banjo
bannister
barbell
barber chair
barbershop
barn
barometer
barrel
barrow
baseball
basketball
bassinet
bassoon
bathing cap
bath towel
bathtub
beach wagon
beacon
beaker
bearskin
beer bottle
beer glass
bell cote
bib
bicycle-built-for-two
bikini
binder
binoculars
birdhouse
boathouse
bobsled
bolo tie
bonnet
bookcase
bookshop
bottlecap
bow
bow tie
brass
brassiere
breakwater
breastplate
broom
bucket
buckle
bulletproof vest
bullet train
butcher shop
cab
caldron
candle
cannon
canoe
can opener
cardigan
car mirror
carousel
carpenters kit
carton
car wheel
cash machine
cassette
cassette player
castle
catamaran
CD player
cello
cellular telephone
chain
chainlink fence
chain mail
chain saw
chest
chiffonier
chime
china cabinet
Christmas stocking
church
cinema
cleaver
cliff dwelling
cloak
clog
cocktail shaker
coffee mug
coffeepot
coil
combination lock
computer keyboard
confectionery
container ship
convertible
corkscrew
cornet
cowboy boot
cowboy hat
cradle
crane
crash helmet
crate
crib
Crock Pot
croquet ball
crutch
cuirass
dam
desk
desktop computer
dial telephone
diaper
digital clock
digital watch
dining table
dishrag
dishwasher
disk brake
dock
dogsled
dome
doormat
drilling platform
drum
drumstick
dumbbell
Dutch oven
electric fan
electric guitar
electric locomotive
entertainment center
envelope
espresso maker
face powder
feather boa
file
fireboat
fire engine
fire screen
flagpole
flute
folding chair
football helmet
forklift
fountain
fountain pen
four-poster
freight car
French horn
frying pan
fur coat
garbage truck
gasmask
gas pump
goblet
go-kart
golf ball
golfcart
gondola
gong
gown
grand piano
greenhouse
grille
grocery store
guillotine
hair slide
hair spray
half track
hammer
hamper
hand blower
hand-held computer
handkerchief
hard disc
harmonica
harp
harvester
hatchet
holster
home theater
honeycomb
hook
hoopskirt
horizontal bar
horse cart
hourglass
iPod
iron
jack-o-lantern
jean
jeep
jersey
jigsaw puzzle
jinrikisha
joystick
kimono
knee pad
knot
lab coat
ladle
lampshade
laptop
lawn mower
lens cap
letter opener
library
lifeboat
lighter
limousine
liner
lipstick
Loafer
lotion
loudspeaker
loupe
lumbermill
magnetic compass
mailbag
mailbox
maillot
maillot
manhole cover
maraca
marimba
mask
matchstick
maypole
maze
measuring cup
medicine chest
megalith
microphone
microwave
military uniform
milk can
minibus
miniskirt
minivan
missile
mitten
mixing bowl
mobile home
Model T
modem
monastery
monitor
moped
mortar
mortarboard
mosque
mosquito net
motor scooter
mountain bike
mountain tent
mouse
mousetrap
moving van
muzzle
nail
neck brace
necklace
nipple
notebook
obelisk
oboe
ocarina
odometer
oil filter
organ
oscilloscope
overskirt
oxcart
oxygen mask
packet
paddle
paddlewheel
padlock
paintbrush
pajama
palace
panpipe
paper towel
parachute
parallel bars
park bench
parking meter
passenger car
patio
pay-phone
pedestal
pencil box
pencil sharpener
perfume
Petri dish
photocopier
pick
pickelhaube
picket fence
pickup
pier
piggy bank
pill bottle
pillow
ping-pong ball
pinwheel
pirate
pitcher
plane
planetarium
plastic bag
plate rack
plow
plunger
Polaroid camera
pole
police van
poncho
pool table
pop bottle
pot
potters wheel
power drill
prayer rug
printer
prison
projectile
projector
puck
punching bag
purse
quill
quilt
racer
racket
radiator
radio
radio telescope
rain barrel
recreational vehicle
reel
reflex camera
refrigerator
remote control
restaurant
revolver
rifle
rocking chair
rotisserie
rubber eraser
rugby ball
rule
running shoe
safe
safety pin
saltshaker
sandal
sarong
sax
scabbard
scale
school bus
schooner
scoreboard
screen
screw
screwdriver
seat belt
sewing machine
shield
shoe shop
shoji
shopping basket
shopping cart
shovel
shower cap
shower curtain
ski
ski mask
sleeping bag
slide rule
sliding door
slot
snorkel
snowmobile
snowplow
soap dispenser
soccer ball
sock
solar dish
sombrero
soup bowl
space bar
space heater
space shuttle
spatula
speedboat
spider web
spindle
sports car
spotlight
stage
steam locomotive
steel arch bridge
steel drum
stethoscope
stole
stone wall
stopwatch
stove
strainer
streetcar
stretcher
studio couch
stupa
submarine
suit
sundial
sunglass
sunglasses
sunscreen
suspension bridge
swab
sweatshirt
swimming trunks
swing
switch
syringe
table lamp
tank
tape player
teapot
teddy
television
tennis ball
thatch
theater curtain
thimble
thresher
throne
tile roof
toaster
tobacco shop
toilet seat
torch
totem pole
tow truck
toyshop
tractor
trailer truck
tray
trench coat
tricycle
trimaran
tripod
triumphal arch
trolleybus
trombone
tub
turnstile
typewriter keyboard
umbrella
unicycle
upright
vacuum
vase
vault
velvet
vending machine
vestment
viaduct
violin
volleyball
waffle iron
wall clock
wallet
wardrobe
warplane
washbasin
washer
water bottle
water jug
water tower
whiskey jug
whistle
wig
window screen
window shade
Windsor tie
wine bottle
wing
wok
wooden spoon
wool
worm fence
wreck
yawl
yurt
web site
comic book
crossword puzzle
street sign
traffic light
book jacket
menu
plate
guacamole
consomme
hot pot
trifle
ice cream
ice lolly
French loaf
bagel
pretzel
cheeseburger
hotdog
mashed potato
head cabbage
broccoli
cauliflower
zucchini
spaghetti squash
acorn squash
butternut squash
cucumber
artichoke
bell pepper
cardoon
mushroom
Granny Smith
strawberry
orange
lemon
fig
pineapple
banana
jackfruit
custard apple
pomegranate
hay
carbonara
chocolate sauce
dough
meat loaf
pizza
potpie
burrito
red wine
espresso
cup
eggnog
alp
bubble
cliff
coral reef
geyser
lakeside
promontory
sandbar
seashore
valley
volcano
ballplayer
groom
scuba diver
rapeseed
daisy
yellow ladys slipper
corn
acorn
hip
buckeye
coral fungus
agaric
gyromitra
stinkhorn
earthstar
hen-of-the-woods
bolete
ear
toilet tissue
demo/image-classification/resources/module_info.yml
已删除
100644 → 0
浏览文件 @
1023c320
name
:
resnet_v2_50_imagenet
type
:
CV/classification
author
:
paddlepaddle
author_email
:
paddle-dev@baidu.com
summary
:
"
Resnet50
is
a
model
used
to
image
classfication,
we
trained
this
model
on
ImageNet-2012
dataset."
version
:
1.0.0
demo/image-classification/retrain.py
已删除
100644 → 0
浏览文件 @
1023c320
import
paddle.fluid
as
fluid
import
paddlehub
as
hub
if
__name__
==
"__main__"
:
resnet_module
=
hub
.
Module
(
module_dir
=
"ResNet50.hub_module"
)
input_dict
,
output_dict
,
program
=
resnet_module
.
context
(
trainable
=
True
)
dataset
=
hub
.
dataset
.
Flowers
()
data_reader
=
hub
.
reader
.
ImageClassificationReader
(
image_width
=
resnet_module
.
get_excepted_image_width
(),
image_height
=
resnet_module
.
get_excepted_image_height
(),
images_mean
=
resnet_module
.
get_pretrained_images_mean
(),
images_std
=
resnet_module
.
get_pretrained_images_std
(),
dataset
=
dataset
)
with
fluid
.
program_guard
(
program
):
label
=
fluid
.
layers
.
data
(
name
=
"label"
,
dtype
=
"int64"
,
shape
=
[
1
])
img
=
input_dict
[
0
]
feature_map
=
output_dict
[
0
]
config
=
hub
.
RunConfig
(
use_cuda
=
True
,
num_epoch
=
10
,
batch_size
=
32
,
enable_memory_optim
=
False
,
strategy
=
hub
.
finetune
.
strategy
.
DefaultFinetuneStrategy
())
feed_list
=
[
img
.
name
,
label
.
name
]
task
=
hub
.
create_img_cls_task
(
feature
=
feature_map
,
label
=
label
,
num_classes
=
dataset
.
num_labels
)
hub
.
finetune_and_eval
(
task
,
feed_list
=
feed_list
,
data_reader
=
data_reader
,
config
=
config
)
demo/image-classification/run_classifier.sh
0 → 100644
浏览文件 @
fb495c0b
cuda_visible_devices
=
0
module
=
resnet50
num_epoch
=
1
batch_size
=
16
use_gpu
=
False
checkpoint_dir
=
paddlehub_finetune_ckpt
while
getopts
"gm:n:b:c:d:"
options
do
case
"
$options
"
in
m
)
module
=
$OPTARG
;;
n
)
num_epoch
=
$OPTARG
;;
b
)
batch_size
=
$OPTARG
;;
c
)
checkpoint_dir
=
$OPTARG
;;
d
)
cuda_visible_devices
=
$OPTARG
;;
g
)
use_gpu
=
True
;;
?
)
echo
"unknown options"
exit
1
;;
esac
done
export
CUDA_VISIBLE_DEVICES
=
${
cuda_visible_devices
}
python
-u
img_classifier.py
--target
finetune
--use_gpu
${
use_gpu
}
--batch_size
${
batch_size
}
--checkpoint_dir
${
checkpoint_dir
}
--num_epoch
${
num_epoch
}
--module
${
module
}
demo/image-classification/run_predict.sh
0 → 100644
浏览文件 @
fb495c0b
cuda_visible_devices
=
0
module
=
resnet50
use_gpu
=
False
checkpoint_dir
=
paddlehub_finetune_ckpt
while
getopts
"gm:c:d:"
options
do
case
"
$options
"
in
m
)
module
=
$OPTARG
;;
c
)
checkpoint_dir
=
$OPTARG
;;
d
)
cuda_visible_devices
=
$OPTARG
;;
g
)
use_gpu
=
True
;;
?
)
echo
"unknown options"
exit
1
;;
esac
done
export
CUDA_VISIBLE_DEVICES
=
${
cuda_visible_devices
}
python
-u
img_classifier.py
--target
predict
--use_gpu
${
use_gpu
}
--checkpoint_dir
${
checkpoint_dir
}
--module
${
module
}
demo/image-classification/
resources/test/test.csv
→
demo/image-classification/
test/test.txt
浏览文件 @
fb495c0b
文件已移动
demo/image-classification/
resources/
test/test.yml
→
demo/image-classification/test/test.yml
浏览文件 @
fb495c0b
文件已移动
demo/image-classification/
resources/
test/test_img_bird.jpg
→
demo/image-classification/test/test_img_bird.jpg
浏览文件 @
fb495c0b
文件已移动
demo/image-classification/
resources/
test/test_img_cat.jpg
→
demo/image-classification/test/test_img_cat.jpg
浏览文件 @
fb495c0b
文件已移动
demo/image-classification/test/test_img_daisy.jpg
0 → 100644
浏览文件 @
fb495c0b
48.0 KB
demo/image-classification/test/test_img_roses.jpg
0 → 100644
浏览文件 @
fb495c0b
92.1 KB
demo/image-classification/
resources/
test/test_img_sheep.jpg
→
demo/image-classification/test/test_img_sheep.jpg
浏览文件 @
fb495c0b
文件已移动
demo/image-classification/utility.py
已删除
100644 → 0
浏览文件 @
1023c320
"""Contains common utility functions."""
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
from
__future__
import
absolute_import
from
__future__
import
division
from
__future__
import
print_function
import
distutils.util
import
numpy
as
np
import
six
from
paddle.fluid
import
core
def
print_arguments
(
args
):
"""Print argparse's arguments.
Usage:
.. code-block:: python
parser = argparse.ArgumentParser()
parser.add_argument("name", default="Jonh", type=str, help="User name.")
args = parser.parse_args()
print_arguments(args)
:param args: Input argparse.Namespace for printing.
:type args: argparse.Namespace
"""
print
(
"----------- Configuration Arguments -----------"
)
for
arg
,
value
in
sorted
(
six
.
iteritems
(
vars
(
args
))):
print
(
"%s: %s"
%
(
arg
,
value
))
print
(
"------------------------------------------------"
)
def
add_arguments
(
argname
,
type
,
default
,
help
,
argparser
,
**
kwargs
):
"""Add argparse's argument.
Usage:
.. code-block:: python
parser = argparse.ArgumentParser()
add_argument("name", str, "Jonh", "User name.", parser)
args = parser.parse_args()
"""
type
=
distutils
.
util
.
strtobool
if
type
==
bool
else
type
argparser
.
add_argument
(
"--"
+
argname
,
default
=
default
,
type
=
type
,
help
=
help
+
' Default: %(default)s.'
,
**
kwargs
)
demo/sequence-labeling/sequence_label.py
浏览文件 @
fb495c0b
...
@@ -46,48 +46,44 @@ if __name__ == '__main__':
...
@@ -46,48 +46,44 @@ if __name__ == '__main__':
max_seq_len
=
args
.
max_seq_len
)
max_seq_len
=
args
.
max_seq_len
)
# Step3: construct transfer learning network
# Step3: construct transfer learning network
with
fluid
.
program_guard
(
program
):
# Use "sequence_output" for token-level output.
label
=
fluid
.
layers
.
data
(
sequence_output
=
outputs
[
"sequence_output"
]
name
=
"label"
,
shape
=
[
args
.
max_seq_len
,
1
],
dtype
=
'int64'
)
seq_len
=
fluid
.
layers
.
data
(
name
=
"seq_len"
,
shape
=
[
1
],
dtype
=
'int64'
)
# Use "sequence_output" for token-level output.
# Define a sequence labeling finetune task by PaddleHub's API
sequence_output
=
outputs
[
"sequence_output"
]
seq_label_task
=
hub
.
create_seq_label_task
(
feature
=
sequence_output
,
max_seq_len
=
args
.
max_seq_len
,
num_classes
=
dataset
.
num_labels
)
# Setup feed list for data feeder
# Setup feed list for data feeder
# Must feed all the tensor of ERNIE's module need
# Must feed all the tensor of ERNIE's module need
# Compared to classification task, we need add seq_len tensor to feedlist
# Compared to classification task, we need add seq_len tensor to feedlist
feed_list
=
[
feed_list
=
[
inputs
[
"input_ids"
].
name
,
inputs
[
"position_ids"
].
name
,
inputs
[
"input_ids"
].
name
,
inputs
[
"position_ids"
].
name
,
inputs
[
"segment_ids"
].
name
,
inputs
[
"input_mask"
].
name
,
label
.
name
,
inputs
[
"segment_ids"
].
name
,
inputs
[
"input_mask"
].
name
,
seq_len
seq_label_task
.
variable
(
'label'
).
name
,
]
seq_label_task
.
variable
(
'seq_len'
).
name
# Define a sequence labeling finetune task by PaddleHub's API
]
seq_label_task
=
hub
.
create_seq_label_task
(
feature
=
sequence_output
,
labels
=
label
,
seq_len
=
seq_len
,
num_classes
=
dataset
.
num_labels
)
# Select a finetune strategy
# Select a finetune strategy
strategy
=
hub
.
AdamWeightDecayStrategy
(
strategy
=
hub
.
AdamWeightDecayStrategy
(
weight_decay
=
args
.
weight_decay
,
weight_decay
=
args
.
weight_decay
,
learning_rate
=
args
.
learning_rate
,
learning_rate
=
args
.
learning_rate
,
lr_scheduler
=
"linear_warmup_decay"
,
lr_scheduler
=
"linear_warmup_decay"
,
)
)
# Setup runing config for PaddleHub Finetune API
# Setup runing config for PaddleHub Finetune API
config
=
hub
.
RunConfig
(
config
=
hub
.
RunConfig
(
use_cuda
=
args
.
use_gpu
,
use_cuda
=
args
.
use_gpu
,
num_epoch
=
args
.
num_epoch
,
num_epoch
=
args
.
num_epoch
,
batch_size
=
args
.
batch_size
,
batch_size
=
args
.
batch_size
,
checkpoint_dir
=
args
.
checkpoint_dir
,
checkpoint_dir
=
args
.
checkpoint_dir
,
strategy
=
strategy
)
strategy
=
strategy
)
# Finetune and evaluate model by PaddleHub's API
# Finetune and evaluate model by PaddleHub's API
# will finish training, evaluation, testing, save model automatically
# will finish training, evaluation, testing, save model automatically
hub
.
finetune_and_eval
(
hub
.
finetune_and_eval
(
task
=
seq_label_task
,
task
=
seq_label_task
,
data_reader
=
reader
,
data_reader
=
reader
,
feed_list
=
feed_list
,
feed_list
=
feed_list
,
config
=
config
)
config
=
config
)
demo/text-classification/predict.py
浏览文件 @
fb495c0b
...
@@ -51,24 +51,22 @@ if __name__ == '__main__':
...
@@ -51,24 +51,22 @@ if __name__ == '__main__':
place
=
fluid
.
CUDAPlace
(
0
)
if
args
.
use_gpu
else
fluid
.
CPUPlace
()
place
=
fluid
.
CUDAPlace
(
0
)
if
args
.
use_gpu
else
fluid
.
CPUPlace
()
exe
=
fluid
.
Executor
(
place
)
exe
=
fluid
.
Executor
(
place
)
with
fluid
.
program_guard
(
program
):
with
fluid
.
program_guard
(
program
):
label
=
fluid
.
layers
.
data
(
name
=
"label"
,
shape
=
[
1
],
dtype
=
'int64'
)
# Use "pooled_output" for classification tasks on an entire sentence.
# Use "pooled_output" for classification tasks on an entire sentence.
# Use "sequence_outputs" for token-level output.
# Use "sequence_outputs" for token-level output.
pooled_output
=
output_dict
[
"pooled_output"
]
pooled_output
=
output_dict
[
"pooled_output"
]
# Define a classfication finetune task by PaddleHub's API
cls_task
=
hub
.
create_text_cls_task
(
feature
=
pooled_output
,
num_classes
=
dataset
.
num_labels
)
# Setup feed list for data feeder
# Setup feed list for data feeder
# Must feed all the tensor of ERNIE's module need
# Must feed all the tensor of ERNIE's module need
feed_list
=
[
feed_list
=
[
input_dict
[
"input_ids"
].
name
,
input_dict
[
"position_ids"
].
name
,
input_dict
[
"input_ids"
].
name
,
input_dict
[
"position_ids"
].
name
,
input_dict
[
"segment_ids"
].
name
,
input_dict
[
"input_mask"
].
name
,
input_dict
[
"segment_ids"
].
name
,
input_dict
[
"input_mask"
].
name
,
label
.
name
cls_task
.
variable
(
'label'
)
.
name
]
]
# Define a classfication finetune task by PaddleHub's API
cls_task
=
hub
.
create_text_cls_task
(
feature
=
pooled_output
,
label
=
label
,
num_classes
=
dataset
.
num_labels
)
# classificatin probability tensor
# classificatin probability tensor
probs
=
cls_task
.
variable
(
"probs"
)
probs
=
cls_task
.
variable
(
"probs"
)
...
...
demo/text-classification/text_classifier.py
浏览文件 @
fb495c0b
...
@@ -58,42 +58,38 @@ if __name__ == '__main__':
...
@@ -58,42 +58,38 @@ if __name__ == '__main__':
max_seq_len
=
args
.
max_seq_len
)
max_seq_len
=
args
.
max_seq_len
)
# Step3: construct transfer learning network
# Step3: construct transfer learning network
with
fluid
.
program_guard
(
program
):
# Use "pooled_output" for classification tasks on an entire sentence.
label
=
fluid
.
layers
.
data
(
name
=
"label"
,
shape
=
[
1
],
dtype
=
'int64'
)
# Use "sequence_output" for token-level output.
pooled_output
=
outputs
[
"pooled_output"
]
# Use "pooled_output" for classification tasks on an entire sentence.
# Define a classfication finetune task by PaddleHub's API
# Use "sequence_output" for token-level output.
cls_task
=
hub
.
create_text_cls_task
(
pooled_output
=
outputs
[
"pooled_output"
]
feature
=
pooled_output
,
num_classes
=
dataset
.
num_labels
)
# Setup feed list for data feeder
# Setup feed list for data feeder
# Must feed all the tensor of ERNIE's module need
# Must feed all the tensor of ERNIE's module need
feed_list
=
[
feed_list
=
[
inputs
[
"input_ids"
].
name
,
inputs
[
"position_ids"
].
name
,
inputs
[
"input_ids"
].
name
,
inputs
[
"position_ids"
].
name
,
inputs
[
"segment_ids"
].
name
,
inputs
[
"input_mask"
].
name
,
label
.
name
inputs
[
"segment_ids"
].
name
,
inputs
[
"input_mask"
].
name
,
]
cls_task
.
variable
(
'label'
).
name
# Define a classfication finetune task by PaddleHub's API
]
cls_task
=
hub
.
create_text_cls_task
(
feature
=
pooled_output
,
label
=
label
,
num_classes
=
dataset
.
num_labels
)
# Step4: Select finetune strategy, setup config and finetune
# Step4: Select finetune strategy, setup config and finetune
strategy
=
hub
.
AdamWeightDecayStrategy
(
strategy
=
hub
.
AdamWeightDecayStrategy
(
weight_decay
=
args
.
weight_decay
,
weight_decay
=
args
.
weight_decay
,
learning_rate
=
args
.
learning_rate
,
learning_rate
=
args
.
learning_rate
,
lr_scheduler
=
"linear_warmup_decay"
,
lr_scheduler
=
"linear_warmup_decay"
,
)
)
# Setup runing config for PaddleHub Finetune API
# Setup runing config for PaddleHub Finetune API
config
=
hub
.
RunConfig
(
config
=
hub
.
RunConfig
(
use_cuda
=
args
.
use_gpu
,
use_cuda
=
args
.
use_gpu
,
num_epoch
=
args
.
num_epoch
,
num_epoch
=
args
.
num_epoch
,
batch_size
=
args
.
batch_size
,
batch_size
=
args
.
batch_size
,
checkpoint_dir
=
args
.
checkpoint_dir
,
checkpoint_dir
=
args
.
checkpoint_dir
,
strategy
=
strategy
)
strategy
=
strategy
)
# Finetune and evaluate by PaddleHub's API
# Finetune and evaluate by PaddleHub's API
# will finish training, evaluation, testing, save model automatically
# will finish training, evaluation, testing, save model automatically
hub
.
finetune_and_eval
(
hub
.
finetune_and_eval
(
task
=
cls_task
,
task
=
cls_task
,
data_reader
=
reader
,
feed_list
=
feed_list
,
config
=
config
)
data_reader
=
reader
,
feed_list
=
feed_list
,
config
=
config
)
paddlehub/finetune/evaluate.py
浏览文件 @
fb495c0b
...
@@ -27,14 +27,14 @@ import paddlehub as hub
...
@@ -27,14 +27,14 @@ import paddlehub as hub
def
evaluate_cls_task
(
task
,
data_reader
,
feed_list
,
phase
=
"test"
,
config
=
None
):
def
evaluate_cls_task
(
task
,
data_reader
,
feed_list
,
phase
=
"test"
,
config
=
None
):
logger
.
info
(
"Evaluation on {} dataset start"
.
format
(
phase
))
logger
.
info
(
"Evaluation on {} dataset start"
.
format
(
phase
))
inference_program
=
task
.
inference
_program
()
test_program
=
task
.
test
_program
()
main_program
=
task
.
main_program
()
main_program
=
task
.
main_program
()
loss
=
task
.
variable
(
"loss"
)
loss
=
task
.
variable
(
"loss"
)
accuracy
=
task
.
variable
(
"accuracy"
)
accuracy
=
task
.
variable
(
"accuracy"
)
batch_size
=
config
.
batch_size
batch_size
=
config
.
batch_size
place
,
dev_count
=
hub
.
common
.
get_running_device_info
(
config
)
place
,
dev_count
=
hub
.
common
.
get_running_device_info
(
config
)
exe
=
fluid
.
Executor
(
place
=
place
)
exe
=
fluid
.
Executor
(
place
=
place
)
with
fluid
.
program_guard
(
inference
_program
):
with
fluid
.
program_guard
(
test
_program
):
data_feeder
=
fluid
.
DataFeeder
(
feed_list
=
feed_list
,
place
=
place
)
data_feeder
=
fluid
.
DataFeeder
(
feed_list
=
feed_list
,
place
=
place
)
num_eval_examples
=
acc_sum
=
loss_sum
=
0
num_eval_examples
=
acc_sum
=
loss_sum
=
0
test_reader
=
data_reader
.
data_generator
(
test_reader
=
data_reader
.
data_generator
(
...
@@ -77,13 +77,13 @@ def evaluate_seq_label_task(task,
...
@@ -77,13 +77,13 @@ def evaluate_seq_label_task(task,
task
.
variable
(
"loss"
).
name
task
.
variable
(
"loss"
).
name
]
]
logger
.
info
(
"Evaluation on {} dataset start"
.
format
(
phase
))
logger
.
info
(
"Evaluation on {} dataset start"
.
format
(
phase
))
inference_program
=
task
.
inference
_program
()
test_program
=
task
.
test
_program
()
batch_size
=
config
.
batch_size
batch_size
=
config
.
batch_size
place
,
dev_count
=
hub
.
common
.
get_running_device_info
(
config
)
place
,
dev_count
=
hub
.
common
.
get_running_device_info
(
config
)
exe
=
fluid
.
Executor
(
place
=
place
)
exe
=
fluid
.
Executor
(
place
=
place
)
# calculate the num of label from probs variable shape
# calculate the num of label from probs variable shape
num_labels
=
task
.
variable
(
"probs"
).
shape
[
1
]
num_labels
=
task
.
variable
(
"probs"
).
shape
[
1
]
with
fluid
.
program_guard
(
inference
_program
):
with
fluid
.
program_guard
(
test
_program
):
data_feeder
=
fluid
.
DataFeeder
(
feed_list
=
feed_list
,
place
=
place
)
data_feeder
=
fluid
.
DataFeeder
(
feed_list
=
feed_list
,
place
=
place
)
num_eval_examples
=
acc_sum
=
loss_sum
=
0
num_eval_examples
=
acc_sum
=
loss_sum
=
0
test_reader
=
data_reader
.
data_generator
(
test_reader
=
data_reader
.
data_generator
(
...
...
paddlehub/finetune/task.py
浏览文件 @
fb495c0b
...
@@ -31,13 +31,18 @@ class Task(object):
...
@@ -31,13 +31,18 @@ class Task(object):
including Paddle's main_program, startup_program and inference program
including Paddle's main_program, startup_program and inference program
"""
"""
def
__init__
(
self
,
task_type
,
graph_var_dict
,
main_program
,
def
__init__
(
self
,
startup_program
):
task_type
,
graph_var_dict
,
main_program
,
startup_program
,
inference_program
=
None
):
self
.
task_type
=
task_type
self
.
task_type
=
task_type
self
.
graph_var_dict
=
graph_var_dict
self
.
graph_var_dict
=
graph_var_dict
self
.
_main_program
=
main_program
self
.
_main_program
=
main_program
self
.
_startup_program
=
startup_program
self
.
_startup_program
=
startup_program
self
.
_inference_program
=
main_program
.
clone
(
for_test
=
True
)
self
.
_inference_program
=
inference_program
self
.
_test_program
=
main_program
.
clone
(
for_test
=
True
)
def
variable
(
self
,
var_name
):
def
variable
(
self
,
var_name
):
if
var_name
in
self
.
graph_var_dict
:
if
var_name
in
self
.
graph_var_dict
:
...
@@ -54,6 +59,9 @@ class Task(object):
...
@@ -54,6 +59,9 @@ class Task(object):
def
inference_program
(
self
):
def
inference_program
(
self
):
return
self
.
_inference_program
return
self
.
_inference_program
def
test_program
(
self
):
return
self
.
_test_program
def
metric_variable_names
(
self
):
def
metric_variable_names
(
self
):
metric_variable_names
=
[]
metric_variable_names
=
[]
for
var_name
in
self
.
graph_var_dict
:
for
var_name
in
self
.
graph_var_dict
:
...
@@ -62,50 +70,61 @@ class Task(object):
...
@@ -62,50 +70,61 @@ class Task(object):
return
metric_variable_names
return
metric_variable_names
def
create_text_cls_task
(
feature
,
label
,
num_classes
,
hidden_units
=
None
):
def
create_text_cls_task
(
feature
,
num_classes
,
hidden_units
=
None
):
"""
"""
Append a multi-layer perceptron classifier for binary classification base
Append a multi-layer perceptron classifier for binary classification base
on input feature
on input feature
"""
"""
cls_feats
=
fluid
.
layers
.
dropout
(
program
=
feature
.
block
.
program
x
=
feature
,
dropout_prob
=
0.1
,
dropout_implementation
=
"upscale_in_train"
)
with
fluid
.
program_guard
(
program
):
cls_feats
=
fluid
.
layers
.
dropout
(
# append fully connected layer according to hidden_units
x
=
feature
,
if
hidden_units
is
not
None
:
dropout_prob
=
0.1
,
for
n_hidden
in
hidden_units
:
dropout_implementation
=
"upscale_in_train"
)
cls_feats
=
fluid
.
layers
.
fc
(
input
=
cls_feats
,
size
=
n_hidden
)
# append fully connected layer according to hidden_units
logits
=
fluid
.
layers
.
fc
(
if
hidden_units
is
not
None
:
input
=
cls_feats
,
for
n_hidden
in
hidden_units
:
size
=
num_classes
,
cls_feats
=
fluid
.
layers
.
fc
(
input
=
cls_feats
,
size
=
n_hidden
)
param_attr
=
fluid
.
ParamAttr
(
name
=
"cls_out_w"
,
logits
=
fluid
.
layers
.
fc
(
initializer
=
fluid
.
initializer
.
TruncatedNormal
(
scale
=
0.02
)),
input
=
cls_feats
,
bias_attr
=
fluid
.
ParamAttr
(
size
=
num_classes
,
name
=
"cls_out_b"
,
initializer
=
fluid
.
initializer
.
Constant
(
0.
)))
param_attr
=
fluid
.
ParamAttr
(
name
=
"cls_out_w"
,
ce_loss
,
probs
=
fluid
.
layers
.
softmax_with_cross_entropy
(
initializer
=
fluid
.
initializer
.
TruncatedNormal
(
scale
=
0.02
)),
logits
=
logits
,
label
=
label
,
return_softmax
=
True
)
bias_attr
=
fluid
.
ParamAttr
(
loss
=
fluid
.
layers
.
mean
(
x
=
ce_loss
)
name
=
"cls_out_b"
,
initializer
=
fluid
.
initializer
.
Constant
(
0.
)),
act
=
"softmax"
)
num_example
=
fluid
.
layers
.
create_tensor
(
dtype
=
'int64'
)
accuracy
=
fluid
.
layers
.
accuracy
(
inference_program
=
fluid
.
default_main_program
().
clone
(
for_test
=
True
)
input
=
probs
,
label
=
label
,
total
=
num_example
)
label
=
fluid
.
layers
.
data
(
name
=
"label"
,
dtype
=
"int64"
,
shape
=
[
1
])
ce_loss
=
fluid
.
layers
.
cross_entropy
(
input
=
logits
,
label
=
label
)
graph_var_dict
=
{
loss
=
fluid
.
layers
.
mean
(
x
=
ce_loss
)
"loss"
:
loss
,
"probs"
:
probs
,
num_example
=
fluid
.
layers
.
create_tensor
(
dtype
=
'int64'
)
"accuracy"
:
accuracy
,
accuracy
=
fluid
.
layers
.
accuracy
(
"num_example"
:
num_example
input
=
logits
,
label
=
label
,
total
=
num_example
)
}
graph_var_dict
=
{
task
=
Task
(
"text_classification"
,
graph_var_dict
,
"loss"
:
loss
,
fluid
.
default_main_program
(),
fluid
.
default_startup_program
())
"accuracy"
:
accuracy
,
"num_example"
:
num_example
,
"label"
:
label
,
"probs"
:
logits
}
task
=
Task
(
"text_classification"
,
graph_var_dict
,
fluid
.
default_main_program
(),
fluid
.
default_startup_program
(),
inference_program
=
inference_program
)
return
task
return
task
def
create_img_cls_task
(
feature
,
label
,
num_classes
,
hidden_units
=
None
):
def
create_img_cls_task
(
feature
,
num_classes
,
hidden_units
=
None
):
"""
"""
Create the transfer learning task for image classification.
Create the transfer learning task for image classification.
Args:
Args:
...
@@ -117,74 +136,98 @@ def create_img_cls_task(feature, label, num_classes, hidden_units=None):
...
@@ -117,74 +136,98 @@ def create_img_cls_task(feature, label, num_classes, hidden_units=None):
Raise:
Raise:
None
None
"""
"""
cls_feats
=
feature
program
=
feature
.
block
.
program
# append fully connected layer according to hidden_units
with
fluid
.
program_guard
(
program
):
if
hidden_units
is
not
None
:
cls_feats
=
feature
for
n_hidden
in
hidden_units
:
# append fully connected layer according to hidden_units
cls_feats
=
fluid
.
layers
.
fc
(
input
=
cls_feats
,
size
=
n_hidden
)
if
hidden_units
is
not
None
:
for
n_hidden
in
hidden_units
:
logits
=
fluid
.
layers
.
fc
(
cls_feats
=
fluid
.
layers
.
fc
(
input
=
cls_feats
,
size
=
n_hidden
)
input
=
cls_feats
,
size
=
num_classes
,
probs
=
fluid
.
layers
.
fc
(
param_attr
=
fluid
.
ParamAttr
(
input
=
cls_feats
,
name
=
"cls_out_w"
,
size
=
num_classes
,
initializer
=
fluid
.
initializer
.
TruncatedNormal
(
scale
=
0.02
)),
param_attr
=
fluid
.
ParamAttr
(
bias_attr
=
fluid
.
ParamAttr
(
name
=
"cls_out_w"
,
name
=
"cls_out_b"
,
initializer
=
fluid
.
initializer
.
Constant
(
0.
)))
initializer
=
fluid
.
initializer
.
TruncatedNormal
(
scale
=
0.02
)),
bias_attr
=
fluid
.
ParamAttr
(
ce_loss
,
probs
=
fluid
.
layers
.
softmax_with_cross_entropy
(
name
=
"cls_out_b"
,
initializer
=
fluid
.
initializer
.
Constant
(
0.
)),
logits
=
logits
,
label
=
label
,
return_softmax
=
True
)
act
=
"softmax"
)
loss
=
fluid
.
layers
.
mean
(
x
=
ce_loss
)
inference_program
=
fluid
.
default_main_program
().
clone
(
for_test
=
True
)
num_example
=
fluid
.
layers
.
create_tensor
(
dtype
=
'int64'
)
accuracy
=
fluid
.
layers
.
accuracy
(
label
=
fluid
.
layers
.
data
(
name
=
"label"
,
dtype
=
"int64"
,
shape
=
[
1
])
input
=
probs
,
label
=
label
,
total
=
num_example
)
ce_loss
=
fluid
.
layers
.
cross_entropy
(
input
=
probs
,
label
=
label
)
loss
=
fluid
.
layers
.
mean
(
x
=
ce_loss
)
graph_var_dict
=
{
"loss"
:
loss
,
num_example
=
fluid
.
layers
.
create_tensor
(
dtype
=
'int64'
)
"probs"
:
probs
,
accuracy
=
fluid
.
layers
.
accuracy
(
"accuracy"
:
accuracy
,
input
=
probs
,
label
=
label
,
total
=
num_example
)
"num_example"
:
num_example
}
graph_var_dict
=
{
"loss"
:
loss
,
task
=
Task
(
"image_classification"
,
graph_var_dict
,
"probs"
:
probs
,
fluid
.
default_main_program
(),
fluid
.
default_startup_program
())
"accuracy"
:
accuracy
,
"num_example"
:
num_example
,
"label"
:
label
,
"probs"
:
probs
}
task
=
Task
(
"image_classification"
,
graph_var_dict
,
fluid
.
default_main_program
(),
fluid
.
default_startup_program
(),
inference_program
=
inference_program
)
return
task
return
task
def
create_seq_label_task
(
feature
,
labels
,
seq_len
,
num_classes
):
def
create_seq_label_task
(
feature
,
max_seq_len
,
num_classes
):
logits
=
fluid
.
layers
.
fc
(
program
=
feature
.
block
.
program
input
=
feature
,
with
fluid
.
program_guard
(
program
):
size
=
num_classes
,
logits
=
fluid
.
layers
.
fc
(
num_flatten_dims
=
2
,
input
=
feature
,
param_attr
=
fluid
.
ParamAttr
(
size
=
num_classes
,
name
=
"cls_seq_label_out_w"
,
num_flatten_dims
=
2
,
initializer
=
fluid
.
initializer
.
TruncatedNormal
(
scale
=
0.02
)),
param_attr
=
fluid
.
ParamAttr
(
bias_attr
=
fluid
.
ParamAttr
(
name
=
"cls_seq_label_out_w"
,
name
=
"cls_seq_label_out_b"
,
initializer
=
fluid
.
initializer
.
TruncatedNormal
(
scale
=
0.02
)),
initializer
=
fluid
.
initializer
.
Constant
(
0.
)))
bias_attr
=
fluid
.
ParamAttr
(
name
=
"cls_seq_label_out_b"
,
ret_labels
=
fluid
.
layers
.
reshape
(
x
=
labels
,
shape
=
[
-
1
,
1
])
initializer
=
fluid
.
initializer
.
Constant
(
0.
)))
ret_infers
=
fluid
.
layers
.
reshape
(
x
=
fluid
.
layers
.
argmax
(
logits
,
axis
=
2
),
shape
=
[
-
1
,
1
])
ret_infers
=
fluid
.
layers
.
reshape
(
x
=
fluid
.
layers
.
argmax
(
logits
,
axis
=
2
),
shape
=
[
-
1
,
1
])
labels
=
fluid
.
layers
.
flatten
(
labels
,
axis
=
2
)
ce_loss
,
probs
=
fluid
.
layers
.
softmax_with_cross_entropy
(
logits
=
fluid
.
layers
.
flatten
(
logits
,
axis
=
2
)
logits
=
fluid
.
layers
.
flatten
(
logits
,
axis
=
2
),
logits
=
fluid
.
layers
.
softmax
(
logits
)
label
=
labels
,
return_softmax
=
True
)
inference_program
=
fluid
.
default_main_program
().
clone
(
for_test
=
True
)
loss
=
fluid
.
layers
.
mean
(
x
=
ce_loss
)
seq_len
=
fluid
.
layers
.
data
(
name
=
"seq_len"
,
shape
=
[
1
],
dtype
=
'int64'
)
graph_var_dict
=
{
label
=
fluid
.
layers
.
data
(
"loss"
:
loss
,
name
=
"label"
,
shape
=
[
max_seq_len
,
1
],
dtype
=
'int64'
)
"probs"
:
probs
,
ret_labels
=
fluid
.
layers
.
reshape
(
x
=
label
,
shape
=
[
-
1
,
1
])
"labels"
:
ret_labels
,
"infers"
:
ret_infers
,
labels
=
fluid
.
layers
.
flatten
(
label
,
axis
=
2
)
"seq_len"
:
seq_len
ce_loss
=
fluid
.
layers
.
cross_entropy
(
input
=
logits
,
label
=
labels
)
}
loss
=
fluid
.
layers
.
mean
(
x
=
ce_loss
)
task
=
Task
(
"sequence_labeling"
,
graph_var_dict
,
graph_var_dict
=
{
fluid
.
default_main_program
(),
fluid
.
default_startup_program
())
"loss"
:
loss
,
"probs"
:
logits
,
"labels"
:
ret_labels
,
"infers"
:
ret_infers
,
"seq_len"
:
seq_len
,
"label"
:
label
}
task
=
Task
(
"sequence_labeling"
,
graph_var_dict
,
fluid
.
default_main_program
(),
fluid
.
default_startup_program
(),
inference_program
=
inference_program
)
return
task
return
task
paddlehub/reader/cv_reader.py
浏览文件 @
fb495c0b
...
@@ -70,7 +70,11 @@ class ImageClassificationReader(object):
...
@@ -70,7 +70,11 @@ class ImageClassificationReader(object):
if
self
.
image_width
<=
0
or
self
.
image_height
<=
0
:
if
self
.
image_width
<=
0
or
self
.
image_height
<=
0
:
raise
ValueError
(
"Image width and height should not be negative."
)
raise
ValueError
(
"Image width and height should not be negative."
)
def
data_generator
(
self
,
batch_size
,
phase
=
"train"
,
shuffle
=
False
):
def
data_generator
(
self
,
batch_size
,
phase
=
"train"
,
shuffle
=
False
,
data
=
None
):
if
phase
==
"train"
:
if
phase
==
"train"
:
data
=
self
.
dataset
.
train_data
(
shuffle
)
data
=
self
.
dataset
.
train_data
(
shuffle
)
elif
phase
==
"test"
:
elif
phase
==
"test"
:
...
@@ -79,30 +83,41 @@ class ImageClassificationReader(object):
...
@@ -79,30 +83,41 @@ class ImageClassificationReader(object):
elif
phase
==
"val"
or
phase
==
"dev"
:
elif
phase
==
"val"
or
phase
==
"dev"
:
shuffle
=
False
shuffle
=
False
data
=
self
.
dataset
.
validate_data
(
shuffle
)
data
=
self
.
dataset
.
validate_data
(
shuffle
)
elif
phase
==
"predict"
:
data
=
data
def
preprocess
(
image_path
):
image
=
Image
.
open
(
image_path
)
image
=
image_augmentation
.
image_resize
(
image
,
self
.
image_width
,
self
.
image_height
)
if
self
.
data_augmentation
:
image
=
image_augmentation
.
image_random_process
(
image
,
enable_resize
=
False
)
# only support RGB
image
=
image
.
convert
(
'RGB'
)
# HWC to CHW
image
=
np
.
array
(
image
).
astype
(
'float32'
)
if
len
(
image
.
shape
)
==
3
:
image
=
np
.
swapaxes
(
image
,
1
,
2
)
image
=
np
.
swapaxes
(
image
,
1
,
0
)
# standardization
image
/=
255
image
-=
self
.
images_mean
image
/=
self
.
images_std
image
=
image
[
channel_order_dict
[
self
.
channel_order
],
:,
:]
return
image
def
_data_reader
():
def
_data_reader
():
for
image_path
,
label
in
data
:
if
phase
==
"predict"
:
image
=
Image
.
open
(
image_path
)
for
image_path
in
data
:
image
=
image_augmentation
.
image_resize
(
image
,
self
.
image_width
,
image
=
preprocess
(
image_path
)
self
.
image_height
)
yield
(
image
,
)
if
self
.
data_augmentation
:
else
:
image
=
image_augmentation
.
image_random_process
(
for
image_path
,
label
in
data
:
image
,
enable_resize
=
False
)
image
=
preprocess
(
image_path
)
yield
(
image
,
label
)
# only support RGB
image
=
image
.
convert
(
'RGB'
)
# HWC to CHW
image
=
np
.
array
(
image
).
astype
(
'float32'
)
if
len
(
image
.
shape
)
==
3
:
image
=
np
.
swapaxes
(
image
,
1
,
2
)
image
=
np
.
swapaxes
(
image
,
1
,
0
)
# standardization
image
/=
255
image
-=
self
.
images_mean
image
/=
self
.
images_std
image
=
image
[
channel_order_dict
[
self
.
channel_order
],
:,
:]
yield
((
image
,
label
))
return
paddle
.
batch
(
_data_reader
,
batch_size
=
batch_size
)
return
paddle
.
batch
(
_data_reader
,
batch_size
=
batch_size
)
paddlehub/version.py
浏览文件 @
fb495c0b
...
@@ -12,5 +12,5 @@
...
@@ -12,5 +12,5 @@
# See the License for the specific language governing permissions and
# See the License for the specific language governing permissions and
# limitations under the License.
# limitations under the License.
""" PaddleHub version string """
""" PaddleHub version string """
hub_version
=
"0.4.
2.alph
a"
hub_version
=
"0.4.
5.bet
a"
module_proto_version
=
"1.0.0"
module_proto_version
=
"1.0.0"
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录