Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
hapi
提交
411664bd
H
hapi
项目概览
PaddlePaddle
/
hapi
通知
11
Star
2
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
4
列表
看板
标记
里程碑
合并请求
7
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
H
hapi
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
4
Issue
4
列表
看板
标记
里程碑
合并请求
7
合并请求
7
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
411664bd
编写于
5月 14, 2020
作者:
L
LielinJiang
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
adapt import
上级
74f19d78
变更
41
显示空白变更内容
内联
并排
Showing
41 changed file
with
234 addition
and
201 deletion
+234
-201
examples/bert/bert_classifier.py
examples/bert/bert_classifier.py
+9
-8
examples/bert_leveldb/bert_classifier.py
examples/bert_leveldb/bert_classifier.py
+9
-8
examples/bmn/bmn_metric.py
examples/bmn/bmn_metric.py
+1
-1
examples/bmn/eval.py
examples/bmn/eval.py
+1
-1
examples/bmn/modeling.py
examples/bmn/modeling.py
+3
-3
examples/bmn/predict.py
examples/bmn/predict.py
+1
-1
examples/bmn/reader.py
examples/bmn/reader.py
+1
-1
examples/bmn/train.py
examples/bmn/train.py
+1
-1
examples/cyclegan/cyclegan.py
examples/cyclegan/cyclegan.py
+2
-2
examples/cyclegan/infer.py
examples/cyclegan/infer.py
+1
-1
examples/cyclegan/test.py
examples/cyclegan/test.py
+1
-1
examples/cyclegan/train.py
examples/cyclegan/train.py
+7
-7
examples/handwritten_number_recognition/mnist.py
examples/handwritten_number_recognition/mnist.py
+5
-5
examples/image_classification/imagenet_dataset.py
examples/image_classification/imagenet_dataset.py
+2
-2
examples/image_classification/main.py
examples/image_classification/main.py
+5
-5
examples/ocr/eval.py
examples/ocr/eval.py
+2
-2
examples/ocr/predict.py
examples/ocr/predict.py
+3
-3
examples/ocr/seq2seq_attn.py
examples/ocr/seq2seq_attn.py
+3
-3
examples/ocr/train.py
examples/ocr/train.py
+2
-2
examples/ocr/utility.py
examples/ocr/utility.py
+2
-2
examples/sentiment_classification/models.py
examples/sentiment_classification/models.py
+39
-27
examples/sentiment_classification/sentiment_classifier.py
examples/sentiment_classification/sentiment_classifier.py
+35
-37
examples/seq2seq/predict.py
examples/seq2seq/predict.py
+1
-1
examples/seq2seq/seq2seq_attn.py
examples/seq2seq/seq2seq_attn.py
+2
-2
examples/seq2seq/seq2seq_base.py
examples/seq2seq/seq2seq_base.py
+2
-2
examples/seq2seq/train.py
examples/seq2seq/train.py
+1
-1
examples/seq2seq/utility.py
examples/seq2seq/utility.py
+2
-2
examples/sequence_tagging/eval.py
examples/sequence_tagging/eval.py
+7
-6
examples/sequence_tagging/predict.py
examples/sequence_tagging/predict.py
+5
-5
examples/sequence_tagging/train.py
examples/sequence_tagging/train.py
+5
-5
examples/style-transfer/README.md
examples/style-transfer/README.md
+3
-3
examples/style-transfer/style_transfer.py
examples/style-transfer/style_transfer.py
+3
-3
examples/transformer/predict.py
examples/transformer/predict.py
+1
-1
examples/transformer/train.py
examples/transformer/train.py
+2
-2
examples/transformer/transformer.py
examples/transformer/transformer.py
+2
-2
examples/tsm/infer.py
examples/tsm/infer.py
+20
-18
examples/tsm/main.py
examples/tsm/main.py
+4
-4
examples/tsm/modeling.py
examples/tsm/modeling.py
+2
-2
examples/yolov3/infer.py
examples/yolov3/infer.py
+33
-15
examples/yolov3/main.py
examples/yolov3/main.py
+3
-3
examples/yolov3/modeling.py
examples/yolov3/modeling.py
+1
-1
未找到文件。
examples/bert/bert_classifier.py
浏览文件 @
411664bd
...
@@ -14,14 +14,14 @@
...
@@ -14,14 +14,14 @@
"""BERT fine-tuning in Paddle Dygraph Mode."""
"""BERT fine-tuning in Paddle Dygraph Mode."""
import
paddle.fluid
as
fluid
import
paddle.fluid
as
fluid
from
hapi.metrics
import
Accuracy
from
paddle.incubate.
hapi.metrics
import
Accuracy
from
hapi.configure
import
Config
from
paddle.incubate.
hapi.configure
import
Config
from
hapi.text.bert
import
BertEncoder
from
paddle.incubate.
hapi.text.bert
import
BertEncoder
from
paddle.fluid.dygraph
import
Linear
,
Layer
from
paddle.fluid.dygraph
import
Linear
,
Layer
from
hapi.loss
import
SoftmaxWithCrossEntropy
from
paddle.incubate.
hapi.loss
import
SoftmaxWithCrossEntropy
from
hapi.model
import
set_device
,
Model
,
Input
from
paddle.incubate.
hapi.model
import
set_device
,
Model
,
Input
import
hapi.text.tokenizer.tokenization
as
tokenization
import
paddle.incubate.
hapi.text.tokenizer.tokenization
as
tokenization
from
hapi.text.bert
import
BertConfig
,
BertDataLoader
,
BertInputExample
,
make_optimizer
from
paddle.incubate.
hapi.text.bert
import
BertConfig
,
BertDataLoader
,
BertInputExample
,
make_optimizer
class
ClsModelLayer
(
Model
):
class
ClsModelLayer
(
Model
):
...
@@ -157,7 +157,8 @@ def main():
...
@@ -157,7 +157,8 @@ def main():
labels
,
labels
,
device
=
device
)
device
=
device
)
cls_model
.
bert_layer
.
load
(
"./bert_uncased_L-12_H-768_A-12/bert"
,
reset_optimizer
=
True
)
cls_model
.
bert_layer
.
load
(
"./bert_uncased_L-12_H-768_A-12/bert"
,
reset_optimizer
=
True
)
# do train
# do train
cls_model
.
fit
(
train_data
=
train_dataloader
.
dataloader
,
cls_model
.
fit
(
train_data
=
train_dataloader
.
dataloader
,
...
...
examples/bert_leveldb/bert_classifier.py
浏览文件 @
411664bd
...
@@ -14,14 +14,14 @@
...
@@ -14,14 +14,14 @@
"""BERT fine-tuning in Paddle Dygraph Mode."""
"""BERT fine-tuning in Paddle Dygraph Mode."""
import
paddle.fluid
as
fluid
import
paddle.fluid
as
fluid
from
hapi.metrics
import
Accuracy
from
paddle.incubate.
hapi.metrics
import
Accuracy
from
hapi.configure
import
Config
from
paddle.incubate.
hapi.configure
import
Config
from
hapi.text.bert
import
BertEncoder
from
paddle.incubate.
hapi.text.bert
import
BertEncoder
from
paddle.fluid.dygraph
import
Linear
,
Layer
from
paddle.fluid.dygraph
import
Linear
,
Layer
from
hapi.loss
import
SoftmaxWithCrossEntropy
from
paddle.incubate.
hapi.loss
import
SoftmaxWithCrossEntropy
from
hapi.model
import
set_device
,
Model
,
Input
from
paddle.incubate.
hapi.model
import
set_device
,
Model
,
Input
import
hapi.text.tokenizer.tokenization
as
tokenization
import
paddle.incubate.
hapi.text.tokenizer.tokenization
as
tokenization
from
hapi.text.bert
import
BertConfig
,
BertDataLoader
,
BertInputExample
,
make_optimizer
from
paddle.incubate.
hapi.text.bert
import
BertConfig
,
BertDataLoader
,
BertInputExample
,
make_optimizer
class
ClsModelLayer
(
Model
):
class
ClsModelLayer
(
Model
):
...
@@ -159,7 +159,8 @@ def main():
...
@@ -159,7 +159,8 @@ def main():
labels
,
labels
,
device
=
device
)
device
=
device
)
cls_model
.
bert_layer
.
load
(
"./bert_uncased_L-12_H-768_A-12/bert"
,
reset_optimizer
=
True
)
cls_model
.
bert_layer
.
load
(
"./bert_uncased_L-12_H-768_A-12/bert"
,
reset_optimizer
=
True
)
# do train
# do train
cls_model
.
fit
(
train_data
=
train_dataloader
.
dataloader
,
cls_model
.
fit
(
train_data
=
train_dataloader
.
dataloader
,
...
...
examples/bmn/bmn_metric.py
浏览文件 @
411664bd
...
@@ -20,7 +20,7 @@ import json
...
@@ -20,7 +20,7 @@ import json
sys
.
path
.
append
(
'../'
)
sys
.
path
.
append
(
'../'
)
from
hapi.metrics
import
Metric
from
paddle.incubate.
hapi.metrics
import
Metric
from
bmn_utils
import
boundary_choose
,
bmn_post_processing
from
bmn_utils
import
boundary_choose
,
bmn_post_processing
...
...
examples/bmn/eval.py
浏览文件 @
411664bd
...
@@ -18,7 +18,7 @@ import sys
...
@@ -18,7 +18,7 @@ import sys
import
logging
import
logging
import
paddle.fluid
as
fluid
import
paddle.fluid
as
fluid
from
hapi.model
import
set_device
,
Input
from
paddle.incubate.
hapi.model
import
set_device
,
Input
from
modeling
import
bmn
,
BmnLoss
from
modeling
import
bmn
,
BmnLoss
from
bmn_metric
import
BmnMetric
from
bmn_metric
import
BmnMetric
...
...
examples/bmn/modeling.py
浏览文件 @
411664bd
...
@@ -17,9 +17,9 @@ from paddle.fluid import ParamAttr
...
@@ -17,9 +17,9 @@ from paddle.fluid import ParamAttr
import
numpy
as
np
import
numpy
as
np
import
math
import
math
from
hapi.model
import
Model
from
paddle.incubate.
hapi.model
import
Model
from
hapi.loss
import
Loss
from
paddle.incubate.
hapi.loss
import
Loss
from
hapi.download
import
get_weights_path_from_url
from
paddle.incubate.
hapi.download
import
get_weights_path_from_url
__all__
=
[
"BMN"
,
"BmnLoss"
,
"bmn"
]
__all__
=
[
"BMN"
,
"BmnLoss"
,
"bmn"
]
...
...
examples/bmn/predict.py
浏览文件 @
411664bd
...
@@ -18,7 +18,7 @@ import os
...
@@ -18,7 +18,7 @@ import os
import
logging
import
logging
import
paddle.fluid
as
fluid
import
paddle.fluid
as
fluid
from
hapi.model
import
set_device
,
Input
from
paddle.incubate.
hapi.model
import
set_device
,
Input
from
modeling
import
bmn
,
BmnLoss
from
modeling
import
bmn
,
BmnLoss
from
bmn_metric
import
BmnMetric
from
bmn_metric
import
BmnMetric
...
...
examples/bmn/reader.py
浏览文件 @
411664bd
...
@@ -21,7 +21,7 @@ import sys
...
@@ -21,7 +21,7 @@ import sys
sys
.
path
.
append
(
'../'
)
sys
.
path
.
append
(
'../'
)
from
hapi.distributed
import
DistributedBatchSampler
from
paddle.incubate.
hapi.distributed
import
DistributedBatchSampler
from
paddle.io
import
Dataset
,
DataLoader
from
paddle.io
import
Dataset
,
DataLoader
logger
=
logging
.
getLogger
(
__name__
)
logger
=
logging
.
getLogger
(
__name__
)
...
...
examples/bmn/train.py
浏览文件 @
411664bd
...
@@ -18,7 +18,7 @@ import logging
...
@@ -18,7 +18,7 @@ import logging
import
sys
import
sys
import
os
import
os
from
hapi.model
import
set_device
,
Input
from
paddle.incubate.
hapi.model
import
set_device
,
Input
from
reader
import
BmnDataset
from
reader
import
BmnDataset
from
config_utils
import
*
from
config_utils
import
*
...
...
examples/cyclegan/cyclegan.py
浏览文件 @
411664bd
...
@@ -19,8 +19,8 @@ from __future__ import print_function
...
@@ -19,8 +19,8 @@ from __future__ import print_function
import
numpy
as
np
import
numpy
as
np
import
paddle.fluid
as
fluid
import
paddle.fluid
as
fluid
from
hapi.model
import
Model
from
paddle.incubate.
hapi.model
import
Model
from
hapi.loss
import
Loss
from
paddle.incubate.
hapi.loss
import
Loss
from
layers
import
ConvBN
,
DeConvBN
from
layers
import
ConvBN
,
DeConvBN
...
...
examples/cyclegan/infer.py
浏览文件 @
411664bd
...
@@ -25,7 +25,7 @@ from PIL import Image
...
@@ -25,7 +25,7 @@ from PIL import Image
from
scipy.misc
import
imsave
from
scipy.misc
import
imsave
import
paddle.fluid
as
fluid
import
paddle.fluid
as
fluid
from
hapi.model
import
Model
,
Input
,
set_device
from
paddle.incubate.
hapi.model
import
Model
,
Input
,
set_device
from
check
import
check_gpu
,
check_version
from
check
import
check_gpu
,
check_version
from
cyclegan
import
Generator
,
GeneratorCombine
from
cyclegan
import
Generator
,
GeneratorCombine
...
...
examples/cyclegan/test.py
浏览文件 @
411664bd
...
@@ -22,7 +22,7 @@ import numpy as np
...
@@ -22,7 +22,7 @@ import numpy as np
from
scipy.misc
import
imsave
from
scipy.misc
import
imsave
import
paddle.fluid
as
fluid
import
paddle.fluid
as
fluid
from
hapi.model
import
Model
,
Input
,
set_device
from
paddle.incubate.
hapi.model
import
Model
,
Input
,
set_device
from
check
import
check_gpu
,
check_version
from
check
import
check_gpu
,
check_version
from
cyclegan
import
Generator
,
GeneratorCombine
from
cyclegan
import
Generator
,
GeneratorCombine
...
...
examples/cyclegan/train.py
浏览文件 @
411664bd
...
@@ -24,7 +24,7 @@ import time
...
@@ -24,7 +24,7 @@ import time
import
paddle
import
paddle
import
paddle.fluid
as
fluid
import
paddle.fluid
as
fluid
from
hapi.model
import
Model
,
Input
,
set_device
from
paddle.incubate.
hapi.model
import
Model
,
Input
,
set_device
from
check
import
check_gpu
,
check_version
from
check
import
check_gpu
,
check_version
from
cyclegan
import
Generator
,
Discriminator
,
GeneratorCombine
,
GLoss
,
DLoss
from
cyclegan
import
Generator
,
Discriminator
,
GeneratorCombine
,
GLoss
,
DLoss
...
@@ -78,12 +78,12 @@ def main():
...
@@ -78,12 +78,12 @@ def main():
g_AB
.
prepare
(
inputs
=
[
input_A
],
device
=
FLAGS
.
device
)
g_AB
.
prepare
(
inputs
=
[
input_A
],
device
=
FLAGS
.
device
)
g_BA
.
prepare
(
inputs
=
[
input_B
],
device
=
FLAGS
.
device
)
g_BA
.
prepare
(
inputs
=
[
input_B
],
device
=
FLAGS
.
device
)
g
.
prepare
(
g_optimizer
,
GLoss
(),
inputs
=
[
input_A
,
input_B
],
g
.
prepare
(
device
=
FLAGS
.
device
)
g_optimizer
,
GLoss
(),
inputs
=
[
input_A
,
input_B
],
device
=
FLAGS
.
device
)
d_A
.
prepare
(
da_optimizer
,
DLoss
(),
inputs
=
[
input_B
,
fake_B
],
d_A
.
prepare
(
device
=
FLAGS
.
device
)
d
a_optimizer
,
DLoss
(),
inputs
=
[
input_B
,
fake_B
],
d
evice
=
FLAGS
.
device
)
d_B
.
prepare
(
db_optimizer
,
DLoss
(),
inputs
=
[
input_A
,
fake_A
],
d_B
.
prepare
(
device
=
FLAGS
.
device
)
d
b_optimizer
,
DLoss
(),
inputs
=
[
input_A
,
fake_A
],
d
evice
=
FLAGS
.
device
)
if
FLAGS
.
resume
:
if
FLAGS
.
resume
:
g
.
load
(
FLAGS
.
resume
)
g
.
load
(
FLAGS
.
resume
)
...
...
examples/handwritten_number_recognition/mnist.py
浏览文件 @
411664bd
...
@@ -19,12 +19,12 @@ import argparse
...
@@ -19,12 +19,12 @@ import argparse
from
paddle
import
fluid
from
paddle
import
fluid
from
paddle.fluid.optimizer
import
Momentum
from
paddle.fluid.optimizer
import
Momentum
from
hapi.datasets.mnist
import
MNIST
as
MnistDataset
from
paddle.incubate.
hapi.datasets.mnist
import
MNIST
as
MnistDataset
from
hapi.model
import
Input
,
set_device
from
paddle.incubate.
hapi.model
import
Input
,
set_device
from
hapi.loss
import
CrossEntropy
from
paddle.incubate.
hapi.loss
import
CrossEntropy
from
hapi.metrics
import
Accuracy
from
paddle.incubate.
hapi.metrics
import
Accuracy
from
hapi.vision.models
import
LeNet
from
paddle.incubate.
hapi.vision.models
import
LeNet
def
main
():
def
main
():
...
...
examples/image_classification/imagenet_dataset.py
浏览文件 @
411664bd
...
@@ -18,8 +18,8 @@ import math
...
@@ -18,8 +18,8 @@ import math
import
random
import
random
import
numpy
as
np
import
numpy
as
np
from
hapi.datasets
import
DatasetFolder
from
paddle.incubate.
hapi.datasets
import
DatasetFolder
from
hapi.vision.transforms
import
transforms
from
paddle.incubate.
hapi.vision.transforms
import
transforms
from
paddle
import
fluid
from
paddle
import
fluid
...
...
examples/image_classification/main.py
浏览文件 @
411664bd
...
@@ -27,11 +27,11 @@ import paddle.fluid as fluid
...
@@ -27,11 +27,11 @@ import paddle.fluid as fluid
from
paddle.fluid.dygraph.parallel
import
ParallelEnv
from
paddle.fluid.dygraph.parallel
import
ParallelEnv
from
paddle.io
import
BatchSampler
,
DataLoader
from
paddle.io
import
BatchSampler
,
DataLoader
from
hapi.model
import
Input
,
set_device
from
paddle.incubate.
hapi.model
import
Input
,
set_device
from
hapi.loss
import
CrossEntropy
from
paddle.incubate.
hapi.loss
import
CrossEntropy
from
hapi.distributed
import
DistributedBatchSampler
from
paddle.incubate.
hapi.distributed
import
DistributedBatchSampler
from
hapi.metrics
import
Accuracy
from
paddle.incubate.
hapi.metrics
import
Accuracy
import
hapi.vision.models
as
models
import
paddle.incubate.
hapi.vision.models
as
models
from
imagenet_dataset
import
ImageNetDataset
from
imagenet_dataset
import
ImageNetDataset
...
...
examples/ocr/eval.py
浏览文件 @
411664bd
...
@@ -19,8 +19,8 @@ import functools
...
@@ -19,8 +19,8 @@ import functools
import
paddle.fluid.profiler
as
profiler
import
paddle.fluid.profiler
as
profiler
import
paddle.fluid
as
fluid
import
paddle.fluid
as
fluid
from
hapi.model
import
Input
,
set_device
from
paddle.incubate.
hapi.model
import
Input
,
set_device
from
hapi.vision.transforms
import
BatchCompose
from
paddle.incubate.
hapi.vision.transforms
import
BatchCompose
from
utility
import
add_arguments
,
print_arguments
from
utility
import
add_arguments
,
print_arguments
from
utility
import
SeqAccuracy
,
LoggerCallBack
,
SeqBeamAccuracy
from
utility
import
SeqAccuracy
,
LoggerCallBack
,
SeqBeamAccuracy
...
...
examples/ocr/predict.py
浏览文件 @
411664bd
...
@@ -25,9 +25,9 @@ from PIL import Image
...
@@ -25,9 +25,9 @@ from PIL import Image
import
paddle.fluid.profiler
as
profiler
import
paddle.fluid.profiler
as
profiler
import
paddle.fluid
as
fluid
import
paddle.fluid
as
fluid
from
hapi.model
import
Input
,
set_device
from
paddle.incubate.
hapi.model
import
Input
,
set_device
from
hapi.datasets.folder
import
ImageFolder
from
paddle.incubate.
hapi.datasets.folder
import
ImageFolder
from
hapi.vision.transforms
import
BatchCompose
from
paddle.incubate.
hapi.vision.transforms
import
BatchCompose
from
utility
import
add_arguments
,
print_arguments
from
utility
import
add_arguments
,
print_arguments
from
utility
import
postprocess
,
index2word
from
utility
import
postprocess
,
index2word
...
...
examples/ocr/seq2seq_attn.py
浏览文件 @
411664bd
...
@@ -19,9 +19,9 @@ import paddle.fluid as fluid
...
@@ -19,9 +19,9 @@ import paddle.fluid as fluid
import
paddle.fluid.layers
as
layers
import
paddle.fluid.layers
as
layers
from
paddle.fluid.layers
import
BeamSearchDecoder
from
paddle.fluid.layers
import
BeamSearchDecoder
from
hapi.text
import
RNNCell
,
RNN
,
DynamicDecode
from
paddle.incubate.
hapi.text
import
RNNCell
,
RNN
,
DynamicDecode
from
hapi.model
import
Model
from
paddle.incubate.
hapi.model
import
Model
from
hapi.loss
import
Loss
from
paddle.incubate.
hapi.loss
import
Loss
class
ConvBNPool
(
fluid
.
dygraph
.
Layer
):
class
ConvBNPool
(
fluid
.
dygraph
.
Layer
):
...
...
examples/ocr/train.py
浏览文件 @
411664bd
...
@@ -24,8 +24,8 @@ import functools
...
@@ -24,8 +24,8 @@ import functools
import
paddle.fluid.profiler
as
profiler
import
paddle.fluid.profiler
as
profiler
import
paddle.fluid
as
fluid
import
paddle.fluid
as
fluid
from
hapi.model
import
Input
,
set_device
from
paddle.incubate.
hapi.model
import
Input
,
set_device
from
hapi.vision.transforms
import
BatchCompose
from
paddle.incubate.
hapi.vision.transforms
import
BatchCompose
from
utility
import
add_arguments
,
print_arguments
from
utility
import
add_arguments
,
print_arguments
from
utility
import
SeqAccuracy
,
LoggerCallBack
from
utility
import
SeqAccuracy
,
LoggerCallBack
...
...
examples/ocr/utility.py
浏览文件 @
411664bd
...
@@ -21,8 +21,8 @@ import numpy as np
...
@@ -21,8 +21,8 @@ import numpy as np
import
paddle.fluid
as
fluid
import
paddle.fluid
as
fluid
import
six
import
six
from
hapi.metrics
import
Metric
from
paddle.incubate.
hapi.metrics
import
Metric
from
hapi.callbacks
import
ProgBarLogger
from
paddle.incubate.
hapi.callbacks
import
ProgBarLogger
def
print_arguments
(
args
):
def
print_arguments
(
args
):
...
...
examples/sentiment_classification/models.py
浏览文件 @
411664bd
...
@@ -15,9 +15,9 @@ import paddle.fluid as fluid
...
@@ -15,9 +15,9 @@ import paddle.fluid as fluid
from
paddle.fluid.dygraph.nn
import
Linear
,
Embedding
from
paddle.fluid.dygraph.nn
import
Linear
,
Embedding
from
paddle.fluid.dygraph.base
import
to_variable
from
paddle.fluid.dygraph.base
import
to_variable
import
numpy
as
np
import
numpy
as
np
from
hapi.model
import
Model
from
paddle.incubate.
hapi.model
import
Model
from
hapi.text.text
import
GRUEncoderLayer
as
BiGRUEncoder
from
paddle.incubate.
hapi.text.text
import
GRUEncoderLayer
as
BiGRUEncoder
from
hapi.text.test
import
BOWEncoder
,
CNNEncoder
,
GRUEncoder
from
paddle.incubate.
hapi.text.test
import
BOWEncoder
,
CNNEncoder
,
GRUEncoder
class
CNN
(
Model
):
class
CNN
(
Model
):
...
@@ -36,14 +36,18 @@ class CNN(Model):
...
@@ -36,14 +36,18 @@ class CNN(Model):
dict_size
=
self
.
dict_dim
+
1
,
dict_size
=
self
.
dict_dim
+
1
,
emb_dim
=
self
.
emb_dim
,
emb_dim
=
self
.
emb_dim
,
seq_len
=
self
.
seq_len
,
seq_len
=
self
.
seq_len
,
filter_size
=
self
.
win_size
,
filter_size
=
self
.
win_size
,
num_filters
=
self
.
hid_dim
,
num_filters
=
self
.
hid_dim
,
hidden_dim
=
self
.
hid_dim
,
hidden_dim
=
self
.
hid_dim
,
padding_idx
=
None
,
padding_idx
=
None
,
act
=
'tanh'
)
act
=
'tanh'
)
self
.
_fc1
=
Linear
(
input_dim
=
self
.
hid_dim
*
self
.
seq_len
,
output_dim
=
self
.
fc_hid_dim
,
act
=
"softmax"
)
self
.
_fc1
=
Linear
(
self
.
_fc_prediction
=
Linear
(
input_dim
=
self
.
fc_hid_dim
,
input_dim
=
self
.
hid_dim
*
self
.
seq_len
,
output_dim
=
self
.
class_dim
,
output_dim
=
self
.
fc_hid_dim
,
act
=
"softmax"
)
self
.
_fc_prediction
=
Linear
(
input_dim
=
self
.
fc_hid_dim
,
output_dim
=
self
.
class_dim
,
act
=
"softmax"
)
act
=
"softmax"
)
def
forward
(
self
,
inputs
):
def
forward
(
self
,
inputs
):
...
@@ -69,10 +73,13 @@ class BOW(Model):
...
@@ -69,10 +73,13 @@ class BOW(Model):
padding_idx
=
None
,
padding_idx
=
None
,
bow_dim
=
self
.
hid_dim
,
bow_dim
=
self
.
hid_dim
,
seq_len
=
self
.
seq_len
)
seq_len
=
self
.
seq_len
)
self
.
_fc1
=
Linear
(
input_dim
=
self
.
hid_dim
,
output_dim
=
self
.
hid_dim
,
act
=
"tanh"
)
self
.
_fc1
=
Linear
(
self
.
_fc2
=
Linear
(
input_dim
=
self
.
hid_dim
,
output_dim
=
self
.
fc_hid_dim
,
act
=
"tanh"
)
input_dim
=
self
.
hid_dim
,
output_dim
=
self
.
hid_dim
,
act
=
"tanh"
)
self
.
_fc_prediction
=
Linear
(
input_dim
=
self
.
fc_hid_dim
,
self
.
_fc2
=
Linear
(
output_dim
=
self
.
class_dim
,
input_dim
=
self
.
hid_dim
,
output_dim
=
self
.
fc_hid_dim
,
act
=
"tanh"
)
self
.
_fc_prediction
=
Linear
(
input_dim
=
self
.
fc_hid_dim
,
output_dim
=
self
.
class_dim
,
act
=
"softmax"
)
act
=
"softmax"
)
def
forward
(
self
,
inputs
):
def
forward
(
self
,
inputs
):
...
@@ -94,8 +101,10 @@ class GRU(Model):
...
@@ -94,8 +101,10 @@ class GRU(Model):
self
.
class_dim
=
2
self
.
class_dim
=
2
self
.
batch_size
=
batch_size
self
.
batch_size
=
batch_size
self
.
seq_len
=
seq_len
self
.
seq_len
=
seq_len
self
.
_fc1
=
Linear
(
input_dim
=
self
.
hid_dim
,
output_dim
=
self
.
fc_hid_dim
,
act
=
"tanh"
)
self
.
_fc1
=
Linear
(
self
.
_fc_prediction
=
Linear
(
input_dim
=
self
.
fc_hid_dim
,
input_dim
=
self
.
hid_dim
,
output_dim
=
self
.
fc_hid_dim
,
act
=
"tanh"
)
self
.
_fc_prediction
=
Linear
(
input_dim
=
self
.
fc_hid_dim
,
output_dim
=
self
.
class_dim
,
output_dim
=
self
.
class_dim
,
act
=
"softmax"
)
act
=
"softmax"
)
self
.
_encoder
=
GRUEncoder
(
self
.
_encoder
=
GRUEncoder
(
...
@@ -130,9 +139,11 @@ class BiGRU(Model):
...
@@ -130,9 +139,11 @@ class BiGRU(Model):
is_sparse
=
False
)
is_sparse
=
False
)
h_0
=
np
.
zeros
((
self
.
batch_size
,
self
.
hid_dim
),
dtype
=
"float32"
)
h_0
=
np
.
zeros
((
self
.
batch_size
,
self
.
hid_dim
),
dtype
=
"float32"
)
h_0
=
to_variable
(
h_0
)
h_0
=
to_variable
(
h_0
)
self
.
_fc1
=
Linear
(
input_dim
=
self
.
hid_dim
,
output_dim
=
self
.
hid_dim
*
3
)
self
.
_fc1
=
Linear
(
input_dim
=
self
.
hid_dim
,
output_dim
=
self
.
hid_dim
*
3
)
self
.
_fc2
=
Linear
(
input_dim
=
self
.
hid_dim
*
2
,
output_dim
=
self
.
fc_hid_dim
,
act
=
"tanh"
)
self
.
_fc2
=
Linear
(
self
.
_fc_prediction
=
Linear
(
input_dim
=
self
.
fc_hid_dim
,
input_dim
=
self
.
hid_dim
*
2
,
output_dim
=
self
.
fc_hid_dim
,
act
=
"tanh"
)
self
.
_fc_prediction
=
Linear
(
input_dim
=
self
.
fc_hid_dim
,
output_dim
=
self
.
class_dim
,
output_dim
=
self
.
class_dim
,
act
=
"softmax"
)
act
=
"softmax"
)
self
.
_encoder
=
BiGRUEncoder
(
self
.
_encoder
=
BiGRUEncoder
(
...
@@ -144,7 +155,8 @@ class BiGRU(Model):
...
@@ -144,7 +155,8 @@ class BiGRU(Model):
def
forward
(
self
,
inputs
):
def
forward
(
self
,
inputs
):
emb
=
self
.
embedding
(
inputs
)
emb
=
self
.
embedding
(
inputs
)
emb
=
fluid
.
layers
.
reshape
(
emb
,
shape
=
[
self
.
batch_size
,
-
1
,
self
.
hid_dim
])
emb
=
fluid
.
layers
.
reshape
(
emb
,
shape
=
[
self
.
batch_size
,
-
1
,
self
.
hid_dim
])
fc_1
=
self
.
_fc1
(
emb
)
fc_1
=
self
.
_fc1
(
emb
)
encoded_vector
=
self
.
_encoder
(
fc_1
)
encoded_vector
=
self
.
_encoder
(
fc_1
)
encoded_vector
=
fluid
.
layers
.
tanh
(
encoded_vector
)
encoded_vector
=
fluid
.
layers
.
tanh
(
encoded_vector
)
...
...
examples/sentiment_classification/sentiment_classifier.py
浏览文件 @
411664bd
...
@@ -13,14 +13,13 @@
...
@@ -13,14 +13,13 @@
# limitations under the License.
# limitations under the License.
"""Sentiment Classification in Paddle Dygraph Mode. """
"""Sentiment Classification in Paddle Dygraph Mode. """
from
__future__
import
print_function
from
__future__
import
print_function
import
numpy
as
np
import
numpy
as
np
import
paddle.fluid
as
fluid
import
paddle.fluid
as
fluid
from
hapi.model
import
set_device
,
Model
,
CrossEntropy
,
Input
from
paddle.incubate.
hapi.model
import
set_device
,
Model
,
CrossEntropy
,
Input
from
hapi.configure
import
Config
from
paddle.incubate.
hapi.configure
import
Config
from
hapi.text.senta
import
SentaProcessor
from
paddle.incubate.
hapi.text.senta
import
SentaProcessor
from
hapi.metrics
import
Accuracy
from
paddle.incubate.
hapi.metrics
import
Accuracy
from
models
import
CNN
,
BOW
,
GRU
,
BiGRU
from
models
import
CNN
,
BOW
,
GRU
,
BiGRU
import
json
import
json
import
os
import
os
...
@@ -32,12 +31,14 @@ args.Print()
...
@@ -32,12 +31,14 @@ args.Print()
device
=
set_device
(
"gpu"
if
args
.
use_cuda
else
"cpu"
)
device
=
set_device
(
"gpu"
if
args
.
use_cuda
else
"cpu"
)
dev_count
=
fluid
.
core
.
get_cuda_device_count
()
if
args
.
use_cuda
else
1
dev_count
=
fluid
.
core
.
get_cuda_device_count
()
if
args
.
use_cuda
else
1
def
main
():
def
main
():
if
args
.
do_train
:
if
args
.
do_train
:
train
()
train
()
elif
args
.
do_infer
:
elif
args
.
do_infer
:
infer
()
infer
()
def
train
():
def
train
():
fluid
.
enable_dygraph
(
device
)
fluid
.
enable_dygraph
(
device
)
processor
=
SentaProcessor
(
processor
=
SentaProcessor
(
...
@@ -66,19 +67,16 @@ def train():
...
@@ -66,19 +67,16 @@ def train():
epoch
=
args
.
epoch
,
epoch
=
args
.
epoch
,
shuffle
=
False
)
shuffle
=
False
)
if
args
.
model_type
==
'cnn_net'
:
if
args
.
model_type
==
'cnn_net'
:
model
=
CNN
(
args
.
vocab_size
,
args
.
batch_size
,
model
=
CNN
(
args
.
vocab_size
,
args
.
batch_size
,
args
.
padding_size
)
args
.
padding_size
)
elif
args
.
model_type
==
'bow_net'
:
elif
args
.
model_type
==
'bow_net'
:
model
=
BOW
(
args
.
vocab_size
,
args
.
batch_size
,
model
=
BOW
(
args
.
vocab_size
,
args
.
batch_size
,
args
.
padding_size
)
args
.
padding_size
)
elif
args
.
model_type
==
'gru_net'
:
elif
args
.
model_type
==
'gru_net'
:
model
=
GRU
(
args
.
vocab_size
,
args
.
batch_size
,
model
=
GRU
(
args
.
vocab_size
,
args
.
batch_size
,
args
.
padding_size
)
args
.
padding_size
)
elif
args
.
model_type
==
'bigru_net'
:
elif
args
.
model_type
==
'bigru_net'
:
model
=
BiGRU
(
args
.
vocab_size
,
args
.
batch_size
,
model
=
BiGRU
(
args
.
vocab_size
,
args
.
batch_size
,
args
.
padding_size
)
args
.
padding_size
)
optimizer
=
fluid
.
optimizer
.
Adagrad
(
learning_rate
=
args
.
lr
,
parameter_list
=
model
.
parameters
())
optimizer
=
fluid
.
optimizer
.
Adagrad
(
learning_rate
=
args
.
lr
,
parameter_list
=
model
.
parameters
())
inputs
=
[
Input
([
None
,
None
],
'int64'
,
name
=
'doc'
)]
inputs
=
[
Input
([
None
,
None
],
'int64'
,
name
=
'doc'
)]
labels
=
[
Input
([
None
,
1
],
'int64'
,
name
=
'label'
)]
labels
=
[
Input
([
None
,
1
],
'int64'
,
name
=
'label'
)]
...
@@ -86,7 +84,7 @@ def train():
...
@@ -86,7 +84,7 @@ def train():
model
.
prepare
(
model
.
prepare
(
optimizer
,
optimizer
,
CrossEntropy
(),
CrossEntropy
(),
Accuracy
(
topk
=
(
1
,)),
Accuracy
(
topk
=
(
1
,
)),
inputs
,
inputs
,
labels
,
labels
,
device
=
device
)
device
=
device
)
...
@@ -99,6 +97,7 @@ def train():
...
@@ -99,6 +97,7 @@ def train():
eval_freq
=
args
.
eval_freq
,
eval_freq
=
args
.
eval_freq
,
save_freq
=
args
.
save_freq
)
save_freq
=
args
.
save_freq
)
def
infer
():
def
infer
():
fluid
.
enable_dygraph
(
device
)
fluid
.
enable_dygraph
(
device
)
processor
=
SentaProcessor
(
processor
=
SentaProcessor
(
...
@@ -114,26 +113,19 @@ def infer():
...
@@ -114,26 +113,19 @@ def infer():
epoch
=
1
,
epoch
=
1
,
shuffle
=
False
)
shuffle
=
False
)
if
args
.
model_type
==
'cnn_net'
:
if
args
.
model_type
==
'cnn_net'
:
model_infer
=
CNN
(
args
.
vocab_size
,
args
.
batch_size
,
model_infer
=
CNN
(
args
.
vocab_size
,
args
.
batch_size
,
args
.
padding_size
)
args
.
padding_size
)
elif
args
.
model_type
==
'bow_net'
:
elif
args
.
model_type
==
'bow_net'
:
model_infer
=
BOW
(
args
.
vocab_size
,
args
.
batch_size
,
model_infer
=
BOW
(
args
.
vocab_size
,
args
.
batch_size
,
args
.
padding_size
)
args
.
padding_size
)
elif
args
.
model_type
==
'gru_net'
:
elif
args
.
model_type
==
'gru_net'
:
model_infer
=
GRU
(
args
.
vocab_size
,
args
.
batch_size
,
model_infer
=
GRU
(
args
.
vocab_size
,
args
.
batch_size
,
args
.
padding_size
)
args
.
padding_size
)
elif
args
.
model_type
==
'bigru_net'
:
elif
args
.
model_type
==
'bigru_net'
:
model_infer
=
BiGRU
(
args
.
vocab_size
,
args
.
batch_size
,
model_infer
=
BiGRU
(
args
.
vocab_size
,
args
.
batch_size
,
args
.
padding_size
)
args
.
padding_size
)
print
(
'Do inferring ...... '
)
print
(
'Do inferring ...... '
)
inputs
=
[
Input
([
None
,
None
],
'int64'
,
name
=
'doc'
)]
inputs
=
[
Input
([
None
,
None
],
'int64'
,
name
=
'doc'
)]
model_infer
.
prepare
(
model_infer
.
prepare
(
None
,
None
,
CrossEntropy
(),
Accuracy
(
topk
=
(
1
,
)),
inputs
,
device
=
device
)
CrossEntropy
(),
Accuracy
(
topk
=
(
1
,)),
inputs
,
device
=
device
)
model_infer
.
load
(
args
.
checkpoints
,
reset_optimizer
=
True
)
model_infer
.
load
(
args
.
checkpoints
,
reset_optimizer
=
True
)
preds
=
model_infer
.
predict
(
test_data
=
infer_data_generator
)
preds
=
model_infer
.
predict
(
test_data
=
infer_data_generator
)
preds
=
np
.
array
(
preds
[
0
]).
reshape
((
-
1
,
2
))
preds
=
np
.
array
(
preds
[
0
]).
reshape
((
-
1
,
2
))
...
@@ -143,9 +135,15 @@ def infer():
...
@@ -143,9 +135,15 @@ def infer():
for
p
in
range
(
len
(
preds
)):
for
p
in
range
(
len
(
preds
)):
label
=
np
.
argmax
(
preds
[
p
])
label
=
np
.
argmax
(
preds
[
p
])
result
=
json
.
dumps
({
'index'
:
p
,
'label'
:
label
,
'probs'
:
preds
[
p
].
tolist
()})
result
=
json
.
dumps
({
w
.
write
(
result
+
'
\n
'
)
'index'
:
p
,
print
(
'Predictions saved at '
+
os
.
path
.
join
(
args
.
output_dir
,
'predictions.json'
))
'label'
:
label
,
'probs'
:
preds
[
p
].
tolist
()
})
w
.
write
(
result
+
'
\n
'
)
print
(
'Predictions saved at '
+
os
.
path
.
join
(
args
.
output_dir
,
'predictions.json'
))
if
__name__
==
'__main__'
:
if
__name__
==
'__main__'
:
main
()
main
()
examples/seq2seq/predict.py
浏览文件 @
411664bd
...
@@ -23,7 +23,7 @@ import paddle.fluid as fluid
...
@@ -23,7 +23,7 @@ import paddle.fluid as fluid
from
paddle.fluid.layers.utils
import
flatten
from
paddle.fluid.layers.utils
import
flatten
from
paddle.fluid.io
import
DataLoader
from
paddle.fluid.io
import
DataLoader
from
hapi.model
import
Input
,
set_device
from
paddle.incubate.
hapi.model
import
Input
,
set_device
from
args
import
parse_args
from
args
import
parse_args
from
seq2seq_base
import
BaseInferModel
from
seq2seq_base
import
BaseInferModel
from
seq2seq_attn
import
AttentionInferModel
from
seq2seq_attn
import
AttentionInferModel
...
...
examples/seq2seq/seq2seq_attn.py
浏览文件 @
411664bd
...
@@ -19,8 +19,8 @@ from paddle.fluid.initializer import UniformInitializer
...
@@ -19,8 +19,8 @@ from paddle.fluid.initializer import UniformInitializer
from
paddle.fluid.dygraph
import
Embedding
,
Linear
,
Layer
from
paddle.fluid.dygraph
import
Embedding
,
Linear
,
Layer
from
paddle.fluid.layers
import
BeamSearchDecoder
from
paddle.fluid.layers
import
BeamSearchDecoder
from
hapi.model
import
Model
,
Loss
from
paddle.incubate.
hapi.model
import
Model
,
Loss
from
hapi.text
import
DynamicDecode
,
RNN
,
BasicLSTMCell
,
RNNCell
from
paddle.incubate.
hapi.text
import
DynamicDecode
,
RNN
,
BasicLSTMCell
,
RNNCell
from
seq2seq_base
import
Encoder
from
seq2seq_base
import
Encoder
...
...
examples/seq2seq/seq2seq_base.py
浏览文件 @
411664bd
...
@@ -19,8 +19,8 @@ from paddle.fluid.initializer import UniformInitializer
...
@@ -19,8 +19,8 @@ from paddle.fluid.initializer import UniformInitializer
from
paddle.fluid.dygraph
import
Embedding
,
Linear
,
Layer
from
paddle.fluid.dygraph
import
Embedding
,
Linear
,
Layer
from
paddle.fluid.layers
import
BeamSearchDecoder
from
paddle.fluid.layers
import
BeamSearchDecoder
from
hapi.model
import
Model
,
Loss
from
paddle.incubate.
hapi.model
import
Model
,
Loss
from
hapi.text
import
DynamicDecode
,
RNN
,
BasicLSTMCell
,
RNNCell
from
paddle.incubate.
hapi.text
import
DynamicDecode
,
RNN
,
BasicLSTMCell
,
RNNCell
class
CrossEntropyCriterion
(
Loss
):
class
CrossEntropyCriterion
(
Loss
):
...
...
examples/seq2seq/train.py
浏览文件 @
411664bd
...
@@ -21,7 +21,7 @@ import numpy as np
...
@@ -21,7 +21,7 @@ import numpy as np
import
paddle.fluid
as
fluid
import
paddle.fluid
as
fluid
from
paddle.fluid.io
import
DataLoader
from
paddle.fluid.io
import
DataLoader
from
hapi.model
import
Input
,
set_device
from
paddle.incubate.
hapi.model
import
Input
,
set_device
from
args
import
parse_args
from
args
import
parse_args
from
seq2seq_base
import
BaseModel
,
CrossEntropyCriterion
from
seq2seq_base
import
BaseModel
,
CrossEntropyCriterion
from
seq2seq_attn
import
AttentionModel
from
seq2seq_attn
import
AttentionModel
...
...
examples/seq2seq/utility.py
浏览文件 @
411664bd
...
@@ -16,8 +16,8 @@ import math
...
@@ -16,8 +16,8 @@ import math
import
paddle.fluid
as
fluid
import
paddle.fluid
as
fluid
from
hapi.metrics
import
Metric
from
paddle.incubate.
hapi.metrics
import
Metric
from
hapi.callbacks
import
ProgBarLogger
from
paddle.incubate.
hapi.callbacks
import
ProgBarLogger
class
TrainCallback
(
ProgBarLogger
):
class
TrainCallback
(
ProgBarLogger
):
...
...
examples/sequence_tagging/eval.py
浏览文件 @
411664bd
...
@@ -28,11 +28,11 @@ import numpy as np
...
@@ -28,11 +28,11 @@ import numpy as np
work_dir
=
os
.
path
.
dirname
(
os
.
path
.
dirname
(
os
.
path
.
abspath
(
__file__
)))
work_dir
=
os
.
path
.
dirname
(
os
.
path
.
dirname
(
os
.
path
.
abspath
(
__file__
)))
sys
.
path
.
append
(
os
.
path
.
join
(
work_dir
,
"../"
))
sys
.
path
.
append
(
os
.
path
.
join
(
work_dir
,
"../"
))
from
hapi.model
import
set_device
,
Input
from
paddle.incubate.
hapi.model
import
set_device
,
Input
from
hapi.text.sequence_tagging
import
SeqTagging
,
ChunkEval
,
LacLoss
from
paddle.incubate.
hapi.text.sequence_tagging
import
SeqTagging
,
ChunkEval
,
LacLoss
from
hapi.text.sequence_tagging
import
LacDataset
,
LacDataLoader
from
paddle.incubate.
hapi.text.sequence_tagging
import
LacDataset
,
LacDataLoader
from
hapi.text.sequence_tagging
import
check_gpu
,
check_version
from
paddle.incubate.
hapi.text.sequence_tagging
import
check_gpu
,
check_version
from
hapi.text.sequence_tagging
import
PDConfig
from
paddle.incubate.
hapi.text.sequence_tagging
import
PDConfig
import
paddle.fluid
as
fluid
import
paddle.fluid
as
fluid
from
paddle.fluid.layers.utils
import
flatten
from
paddle.fluid.layers.utils
import
flatten
...
@@ -65,7 +65,8 @@ def main(args):
...
@@ -65,7 +65,8 @@ def main(args):
device
=
place
)
device
=
place
)
model
.
load
(
args
.
init_from_checkpoint
,
skip_mismatch
=
True
)
model
.
load
(
args
.
init_from_checkpoint
,
skip_mismatch
=
True
)
eval_result
=
model
.
evaluate
(
eval_dataset
.
dataloader
,
batch_size
=
args
.
batch_size
)
eval_result
=
model
.
evaluate
(
eval_dataset
.
dataloader
,
batch_size
=
args
.
batch_size
)
print
(
"precison: %.5f"
%
(
eval_result
[
"precision"
][
0
]))
print
(
"precison: %.5f"
%
(
eval_result
[
"precision"
][
0
]))
print
(
"recall: %.5f"
%
(
eval_result
[
"recall"
][
0
]))
print
(
"recall: %.5f"
%
(
eval_result
[
"recall"
][
0
]))
print
(
"F1: %.5f"
%
(
eval_result
[
"F1"
][
0
]))
print
(
"F1: %.5f"
%
(
eval_result
[
"F1"
][
0
]))
...
...
examples/sequence_tagging/predict.py
浏览文件 @
411664bd
...
@@ -29,11 +29,11 @@ import numpy as np
...
@@ -29,11 +29,11 @@ import numpy as np
work_dir
=
os
.
path
.
dirname
(
os
.
path
.
dirname
(
os
.
path
.
abspath
(
__file__
)))
work_dir
=
os
.
path
.
dirname
(
os
.
path
.
dirname
(
os
.
path
.
abspath
(
__file__
)))
sys
.
path
.
append
(
os
.
path
.
join
(
work_dir
,
"../"
))
sys
.
path
.
append
(
os
.
path
.
join
(
work_dir
,
"../"
))
from
hapi.text.sequence_tagging
import
SeqTagging
from
paddle.incubate.
hapi.text.sequence_tagging
import
SeqTagging
from
hapi.model
import
Input
,
set_device
from
paddle.incubate.
hapi.model
import
Input
,
set_device
from
hapi.text.sequence_tagging
import
LacDataset
,
LacDataLoader
from
paddle.incubate.
hapi.text.sequence_tagging
import
LacDataset
,
LacDataLoader
from
hapi.text.sequence_tagging
import
check_gpu
,
check_version
from
paddle.incubate.
hapi.text.sequence_tagging
import
check_gpu
,
check_version
from
hapi.text.sequence_tagging
import
PDConfig
from
paddle.incubate.
hapi.text.sequence_tagging
import
PDConfig
import
paddle.fluid
as
fluid
import
paddle.fluid
as
fluid
from
paddle.fluid.layers.utils
import
flatten
from
paddle.fluid.layers.utils
import
flatten
...
...
examples/sequence_tagging/train.py
浏览文件 @
411664bd
...
@@ -28,11 +28,11 @@ import numpy as np
...
@@ -28,11 +28,11 @@ import numpy as np
work_dir
=
os
.
path
.
dirname
(
os
.
path
.
dirname
(
os
.
path
.
abspath
(
__file__
)))
work_dir
=
os
.
path
.
dirname
(
os
.
path
.
dirname
(
os
.
path
.
abspath
(
__file__
)))
sys
.
path
.
append
(
os
.
path
.
join
(
work_dir
,
"../"
))
sys
.
path
.
append
(
os
.
path
.
join
(
work_dir
,
"../"
))
from
hapi.model
import
Input
,
set_device
from
paddle.incubate.
hapi.model
import
Input
,
set_device
from
hapi.text.sequence_tagging
import
SeqTagging
,
LacLoss
,
ChunkEval
from
paddle.incubate.
hapi.text.sequence_tagging
import
SeqTagging
,
LacLoss
,
ChunkEval
from
hapi.text.sequence_tagging
import
LacDataset
,
LacDataLoader
from
paddle.incubate.
hapi.text.sequence_tagging
import
LacDataset
,
LacDataLoader
from
hapi.text.sequence_tagging
import
check_gpu
,
check_version
from
paddle.incubate.
hapi.text.sequence_tagging
import
check_gpu
,
check_version
from
hapi.text.sequence_tagging
import
PDConfig
from
paddle.incubate.
hapi.text.sequence_tagging
import
PDConfig
import
paddle.fluid
as
fluid
import
paddle.fluid
as
fluid
from
paddle.fluid.optimizer
import
AdamOptimizer
from
paddle.fluid.optimizer
import
AdamOptimizer
...
...
examples/style-transfer/README.md
浏览文件 @
411664bd
...
@@ -32,10 +32,10 @@ gram_matrix = fluid.layers.matmul(tensor, fluid.layers.transpose(tensor, [1, 0])
...
@@ -32,10 +32,10 @@ gram_matrix = fluid.layers.matmul(tensor, fluid.layers.transpose(tensor, [1, 0])
import
numpy
as
np
import
numpy
as
np
import
matplotlib.pyplot
as
plt
import
matplotlib.pyplot
as
plt
from
hapi.model
import
Model
,
Loss
from
paddle.incubate.
hapi.model
import
Model
,
Loss
from
hapi.vision.models
import
vgg16
from
paddle.incubate.
hapi.vision.models
import
vgg16
from
hapi.vision.transforms
import
transforms
from
paddle.incubate.
hapi.vision.transforms
import
transforms
from
paddle
import
fluid
from
paddle
import
fluid
from
paddle.fluid.io
import
Dataset
from
paddle.fluid.io
import
Dataset
...
...
examples/style-transfer/style_transfer.py
浏览文件 @
411664bd
...
@@ -3,10 +3,10 @@ import argparse
...
@@ -3,10 +3,10 @@ import argparse
import
numpy
as
np
import
numpy
as
np
import
matplotlib.pyplot
as
plt
import
matplotlib.pyplot
as
plt
from
hapi.model
import
Model
,
Loss
from
paddle.incubate.
hapi.model
import
Model
,
Loss
from
hapi.vision.models
import
vgg16
from
paddle.incubate.
hapi.vision.models
import
vgg16
from
hapi.vision.transforms
import
transforms
from
paddle.incubate.
hapi.vision.transforms
import
transforms
from
paddle
import
fluid
from
paddle
import
fluid
from
paddle.fluid.io
import
Dataset
from
paddle.fluid.io
import
Dataset
...
...
examples/transformer/predict.py
浏览文件 @
411664bd
...
@@ -25,7 +25,7 @@ from paddle.fluid.layers.utils import flatten
...
@@ -25,7 +25,7 @@ from paddle.fluid.layers.utils import flatten
from
utils.configure
import
PDConfig
from
utils.configure
import
PDConfig
from
utils.check
import
check_gpu
,
check_version
from
utils.check
import
check_gpu
,
check_version
from
hapi.model
import
Input
,
set_device
from
paddle.incubate.
hapi.model
import
Input
,
set_device
from
reader
import
prepare_infer_input
,
Seq2SeqDataset
,
Seq2SeqBatchSampler
from
reader
import
prepare_infer_input
,
Seq2SeqDataset
,
Seq2SeqBatchSampler
from
transformer
import
InferTransformer
from
transformer
import
InferTransformer
...
...
examples/transformer/train.py
浏览文件 @
411664bd
...
@@ -23,8 +23,8 @@ from paddle.io import DataLoader
...
@@ -23,8 +23,8 @@ from paddle.io import DataLoader
from
utils.configure
import
PDConfig
from
utils.configure
import
PDConfig
from
utils.check
import
check_gpu
,
check_version
from
utils.check
import
check_gpu
,
check_version
from
hapi.model
import
Input
,
set_device
from
paddle.incubate.
hapi.model
import
Input
,
set_device
from
hapi.callbacks
import
ProgBarLogger
from
paddle.incubate.
hapi.callbacks
import
ProgBarLogger
from
reader
import
create_data_loader
from
reader
import
create_data_loader
from
transformer
import
Transformer
,
CrossEntropyCriterion
from
transformer
import
Transformer
,
CrossEntropyCriterion
...
...
examples/transformer/transformer.py
浏览文件 @
411664bd
...
@@ -20,8 +20,8 @@ import paddle.fluid as fluid
...
@@ -20,8 +20,8 @@ import paddle.fluid as fluid
import
paddle.fluid.layers
as
layers
import
paddle.fluid.layers
as
layers
from
paddle.fluid.dygraph
import
Embedding
,
LayerNorm
,
Linear
,
Layer
,
to_variable
from
paddle.fluid.dygraph
import
Embedding
,
LayerNorm
,
Linear
,
Layer
,
to_variable
from
paddle.fluid.dygraph.learning_rate_scheduler
import
LearningRateDecay
from
paddle.fluid.dygraph.learning_rate_scheduler
import
LearningRateDecay
from
hapi.model
import
Model
,
CrossEntropy
,
Loss
from
paddle.incubate.
hapi.model
import
Model
,
CrossEntropy
,
Loss
from
hapi.text
import
TransformerBeamSearchDecoder
,
DynamicDecode
from
paddle.incubate.
hapi.text
import
TransformerBeamSearchDecoder
,
DynamicDecode
def
position_encoding_init
(
n_position
,
d_pos_vec
):
def
position_encoding_init
(
n_position
,
d_pos_vec
):
...
...
examples/tsm/infer.py
浏览文件 @
411664bd
...
@@ -19,8 +19,8 @@ import os
...
@@ -19,8 +19,8 @@ import os
import
argparse
import
argparse
import
numpy
as
np
import
numpy
as
np
from
hapi.model
import
Input
,
set_device
from
paddle.incubate.
hapi.model
import
Input
,
set_device
from
hapi.vision.transforms
import
Compose
from
paddle.incubate.
hapi.vision.transforms
import
Compose
from
check
import
check_gpu
,
check_version
from
check
import
check_gpu
,
check_version
from
modeling
import
tsm_resnet50
from
modeling
import
tsm_resnet50
...
@@ -36,9 +36,7 @@ def main():
...
@@ -36,9 +36,7 @@ def main():
device
=
set_device
(
FLAGS
.
device
)
device
=
set_device
(
FLAGS
.
device
)
fluid
.
enable_dygraph
(
device
)
if
FLAGS
.
dynamic
else
None
fluid
.
enable_dygraph
(
device
)
if
FLAGS
.
dynamic
else
None
transform
=
Compose
([
GroupScale
(),
transform
=
Compose
([
GroupScale
(),
GroupCenterCrop
(),
NormalizeImage
()])
GroupCenterCrop
(),
NormalizeImage
()])
dataset
=
KineticsDataset
(
dataset
=
KineticsDataset
(
pickle_file
=
FLAGS
.
infer_file
,
pickle_file
=
FLAGS
.
infer_file
,
label_list
=
FLAGS
.
label_list
,
label_list
=
FLAGS
.
label_list
,
...
@@ -46,8 +44,8 @@ def main():
...
@@ -46,8 +44,8 @@ def main():
transform
=
transform
)
transform
=
transform
)
labels
=
dataset
.
label_list
labels
=
dataset
.
label_list
model
=
tsm_resnet50
(
num_classes
=
len
(
labels
),
model
=
tsm_resnet50
(
pretrained
=
FLAGS
.
weights
is
None
)
num_classes
=
len
(
labels
),
pretrained
=
FLAGS
.
weights
is
None
)
inputs
=
[
Input
([
None
,
8
,
3
,
224
,
224
],
'float32'
,
name
=
'image'
)]
inputs
=
[
Input
([
None
,
8
,
3
,
224
,
224
],
'float32'
,
name
=
'image'
)]
...
@@ -66,19 +64,23 @@ def main():
...
@@ -66,19 +64,23 @@ def main():
if
__name__
==
'__main__'
:
if
__name__
==
'__main__'
:
parser
=
argparse
.
ArgumentParser
(
"CNN training on TSM"
)
parser
=
argparse
.
ArgumentParser
(
"CNN training on TSM"
)
parser
.
add_argument
(
parser
.
add_argument
(
"--data"
,
type
=
str
,
default
=
'dataset/kinetics'
,
"--data"
,
type
=
str
,
default
=
'dataset/kinetics'
,
help
=
"path to dataset root directory"
)
help
=
"path to dataset root directory"
)
parser
.
add_argument
(
parser
.
add_argument
(
"--device"
,
type
=
str
,
default
=
'gpu'
,
"--device"
,
type
=
str
,
default
=
'gpu'
,
help
=
"device to use, gpu or cpu"
)
help
=
"device to use, gpu or cpu"
)
parser
.
add_argument
(
parser
.
add_argument
(
"-d"
,
"--dynamic"
,
action
=
'store_true'
,
"-d"
,
"--dynamic"
,
action
=
'store_true'
,
help
=
"enable dygraph mode"
)
help
=
"enable dygraph mode"
)
parser
.
add_argument
(
parser
.
add_argument
(
"--label_list"
,
type
=
str
,
default
=
None
,
"--label_list"
,
type
=
str
,
default
=
None
,
help
=
"path to category index label list file"
)
help
=
"path to category index label list file"
)
parser
.
add_argument
(
parser
.
add_argument
(
"--infer_file"
,
type
=
str
,
default
=
None
,
"--infer_file"
,
type
=
str
,
default
=
None
,
help
=
"path to pickle file for inference"
)
help
=
"path to pickle file for inference"
)
parser
.
add_argument
(
parser
.
add_argument
(
"-w"
,
"-w"
,
...
...
examples/tsm/main.py
浏览文件 @
411664bd
...
@@ -22,10 +22,10 @@ import numpy as np
...
@@ -22,10 +22,10 @@ import numpy as np
from
paddle
import
fluid
from
paddle
import
fluid
from
paddle.fluid.dygraph.parallel
import
ParallelEnv
from
paddle.fluid.dygraph.parallel
import
ParallelEnv
from
hapi.model
import
Model
,
Input
,
set_device
from
paddle.incubate.
hapi.model
import
Model
,
Input
,
set_device
from
hapi.loss
import
CrossEntropy
from
paddle.incubate.
hapi.loss
import
CrossEntropy
from
hapi.metrics
import
Accuracy
from
paddle.incubate.
hapi.metrics
import
Accuracy
from
hapi.vision.transforms
import
Compose
from
paddle.incubate.
hapi.vision.transforms
import
Compose
from
modeling
import
tsm_resnet50
from
modeling
import
tsm_resnet50
from
check
import
check_gpu
,
check_version
from
check
import
check_gpu
,
check_version
...
...
examples/tsm/modeling.py
浏览文件 @
411664bd
...
@@ -17,8 +17,8 @@ import paddle.fluid as fluid
...
@@ -17,8 +17,8 @@ import paddle.fluid as fluid
from
paddle.fluid.layer_helper
import
LayerHelper
from
paddle.fluid.layer_helper
import
LayerHelper
from
paddle.fluid.dygraph.nn
import
Conv2D
,
Pool2D
,
BatchNorm
,
Linear
from
paddle.fluid.dygraph.nn
import
Conv2D
,
Pool2D
,
BatchNorm
,
Linear
from
hapi.model
import
Model
from
paddle.incubate.
hapi.model
import
Model
from
hapi.download
import
get_weights_path_from_url
from
paddle.incubate.
hapi.download
import
get_weights_path_from_url
__all__
=
[
"TSM_ResNet"
,
"tsm_resnet50"
]
__all__
=
[
"TSM_ResNet"
,
"tsm_resnet50"
]
...
...
examples/yolov3/infer.py
浏览文件 @
411664bd
...
@@ -24,7 +24,7 @@ from paddle import fluid
...
@@ -24,7 +24,7 @@ from paddle import fluid
from
paddle.fluid.optimizer
import
Momentum
from
paddle.fluid.optimizer
import
Momentum
from
paddle.io
import
DataLoader
from
paddle.io
import
DataLoader
from
hapi.model
import
Model
,
Input
,
set_device
from
paddle.incubate.
hapi.model
import
Model
,
Input
,
set_device
from
modeling
import
yolov3_darknet53
,
YoloLoss
from
modeling
import
yolov3_darknet53
,
YoloLoss
from
transforms
import
*
from
transforms
import
*
...
@@ -65,13 +65,17 @@ def main():
...
@@ -65,13 +65,17 @@ def main():
device
=
set_device
(
FLAGS
.
device
)
device
=
set_device
(
FLAGS
.
device
)
fluid
.
enable_dygraph
(
device
)
if
FLAGS
.
dynamic
else
None
fluid
.
enable_dygraph
(
device
)
if
FLAGS
.
dynamic
else
None
inputs
=
[
Input
([
None
,
1
],
'int64'
,
name
=
'img_id'
),
inputs
=
[
Input
([
None
,
2
],
'int32'
,
name
=
'img_shape'
),
Input
(
Input
([
None
,
3
,
None
,
None
],
'float32'
,
name
=
'image'
)]
[
None
,
1
],
'int64'
,
name
=
'img_id'
),
Input
(
[
None
,
2
],
'int32'
,
name
=
'img_shape'
),
Input
(
[
None
,
3
,
None
,
None
],
'float32'
,
name
=
'image'
)
]
cat2name
=
load_labels
(
FLAGS
.
label_list
,
with_background
=
False
)
cat2name
=
load_labels
(
FLAGS
.
label_list
,
with_background
=
False
)
model
=
yolov3_darknet53
(
num_classes
=
len
(
cat2name
),
model
=
yolov3_darknet53
(
num_classes
=
len
(
cat2name
),
model_mode
=
'test'
,
model_mode
=
'test'
,
pretrained
=
FLAGS
.
weights
is
None
)
pretrained
=
FLAGS
.
weights
is
None
)
...
@@ -106,19 +110,33 @@ if __name__ == '__main__':
...
@@ -106,19 +110,33 @@ if __name__ == '__main__':
parser
.
add_argument
(
parser
.
add_argument
(
"-d"
,
"--dynamic"
,
action
=
'store_true'
,
help
=
"enable dygraph mode"
)
"-d"
,
"--dynamic"
,
action
=
'store_true'
,
help
=
"enable dygraph mode"
)
parser
.
add_argument
(
parser
.
add_argument
(
"--label_list"
,
type
=
str
,
default
=
None
,
"--label_list"
,
type
=
str
,
default
=
None
,
help
=
"path to category label list file"
)
help
=
"path to category label list file"
)
parser
.
add_argument
(
parser
.
add_argument
(
"-t"
,
"--draw_threshold"
,
type
=
float
,
default
=
0.5
,
"-t"
,
"--draw_threshold"
,
type
=
float
,
default
=
0.5
,
help
=
"threshold to reserve the result for visualization"
)
help
=
"threshold to reserve the result for visualization"
)
parser
.
add_argument
(
parser
.
add_argument
(
"-i"
,
"--infer_image"
,
type
=
str
,
default
=
None
,
"-i"
,
"--infer_image"
,
type
=
str
,
default
=
None
,
help
=
"image path for inference"
)
help
=
"image path for inference"
)
parser
.
add_argument
(
parser
.
add_argument
(
"-o"
,
"--output_dir"
,
type
=
str
,
default
=
'output'
,
"-o"
,
"--output_dir"
,
type
=
str
,
default
=
'output'
,
help
=
"directory to save inference result if --visualize is set"
)
help
=
"directory to save inference result if --visualize is set"
)
parser
.
add_argument
(
parser
.
add_argument
(
"-w"
,
"--weights"
,
default
=
None
,
type
=
str
,
"-w"
,
"--weights"
,
default
=
None
,
type
=
str
,
help
=
"path to weights for inference"
)
help
=
"path to weights for inference"
)
FLAGS
=
parser
.
parse_args
()
FLAGS
=
parser
.
parse_args
()
print_arguments
(
FLAGS
)
print_arguments
(
FLAGS
)
...
...
examples/yolov3/main.py
浏览文件 @
411664bd
...
@@ -25,9 +25,9 @@ from paddle import fluid
...
@@ -25,9 +25,9 @@ from paddle import fluid
from
paddle.fluid.optimizer
import
Momentum
from
paddle.fluid.optimizer
import
Momentum
from
paddle.io
import
DataLoader
from
paddle.io
import
DataLoader
from
hapi.model
import
Model
,
Input
,
set_device
from
paddle.incubate.
hapi.model
import
Model
,
Input
,
set_device
from
hapi.distributed
import
DistributedBatchSampler
from
paddle.incubate.
hapi.distributed
import
DistributedBatchSampler
from
hapi.vision.transforms
import
Compose
,
BatchCompose
from
paddle.incubate.
hapi.vision.transforms
import
Compose
,
BatchCompose
from
modeling
import
yolov3_darknet53
,
YoloLoss
from
modeling
import
yolov3_darknet53
,
YoloLoss
from
coco
import
COCODataset
from
coco
import
COCODataset
...
...
examples/yolov3/modeling.py
浏览文件 @
411664bd
...
@@ -23,7 +23,7 @@ from paddle.fluid.regularizer import L2Decay
...
@@ -23,7 +23,7 @@ from paddle.fluid.regularizer import L2Decay
from
hapi.model
import
Model
from
hapi.model
import
Model
from
hapi.loss
import
Loss
from
hapi.loss
import
Loss
from
hapi.download
import
get_weights_path_from_url
from
hapi.download
import
get_weights_path_from_url
from
hapi.vision.models
import
darknet53
from
darknet
import
darknet53
__all__
=
[
'YoloLoss'
,
'YOLOv3'
,
'yolov3_darknet53'
]
__all__
=
[
'YoloLoss'
,
'YOLOv3'
,
'yolov3_darknet53'
]
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录