未验证 提交 b337063c 编写于 作者: L LielinJiang 提交者: GitHub

Merge pull request #77 from LielinJiang/import-from-paddle

adapt import
...@@ -14,14 +14,14 @@ ...@@ -14,14 +14,14 @@
"""BERT fine-tuning in Paddle Dygraph Mode.""" """BERT fine-tuning in Paddle Dygraph Mode."""
import paddle.fluid as fluid import paddle.fluid as fluid
from hapi.metrics import Accuracy from paddle.incubate.hapi.metrics import Accuracy
from hapi.configure import Config from paddle.incubate.hapi.configure import Config
from hapi.text.bert import BertEncoder from paddle.incubate.hapi.text.bert import BertEncoder
from paddle.fluid.dygraph import Linear, Layer from paddle.fluid.dygraph import Linear, Layer
from hapi.loss import SoftmaxWithCrossEntropy from paddle.incubate.hapi.loss import SoftmaxWithCrossEntropy
from hapi.model import set_device, Model, Input from paddle.incubate.hapi.model import set_device, Model, Input
import hapi.text.tokenizer.tokenization as tokenization import paddle.incubate.hapi.text.tokenizer.tokenization as tokenization
from hapi.text.bert import BertConfig, BertDataLoader, BertInputExample, make_optimizer from paddle.incubate.hapi.text.bert import BertConfig, BertDataLoader, BertInputExample, make_optimizer
class ClsModelLayer(Model): class ClsModelLayer(Model):
...@@ -157,7 +157,8 @@ def main(): ...@@ -157,7 +157,8 @@ def main():
labels, labels,
device=device) device=device)
cls_model.bert_layer.load("./bert_uncased_L-12_H-768_A-12/bert", reset_optimizer=True) cls_model.bert_layer.load(
"./bert_uncased_L-12_H-768_A-12/bert", reset_optimizer=True)
# do train # do train
cls_model.fit(train_data=train_dataloader.dataloader, cls_model.fit(train_data=train_dataloader.dataloader,
......
...@@ -14,14 +14,14 @@ ...@@ -14,14 +14,14 @@
"""BERT fine-tuning in Paddle Dygraph Mode.""" """BERT fine-tuning in Paddle Dygraph Mode."""
import paddle.fluid as fluid import paddle.fluid as fluid
from hapi.metrics import Accuracy from paddle.incubate.hapi.metrics import Accuracy
from hapi.configure import Config from paddle.incubate.hapi.configure import Config
from hapi.text.bert import BertEncoder from paddle.incubate.hapi.text.bert import BertEncoder
from paddle.fluid.dygraph import Linear, Layer from paddle.fluid.dygraph import Linear, Layer
from hapi.loss import SoftmaxWithCrossEntropy from paddle.incubate.hapi.loss import SoftmaxWithCrossEntropy
from hapi.model import set_device, Model, Input from paddle.incubate.hapi.model import set_device, Model, Input
import hapi.text.tokenizer.tokenization as tokenization import paddle.incubate.hapi.text.tokenizer.tokenization as tokenization
from hapi.text.bert import BertConfig, BertDataLoader, BertInputExample, make_optimizer from paddle.incubate.hapi.text.bert import BertConfig, BertDataLoader, BertInputExample, make_optimizer
class ClsModelLayer(Model): class ClsModelLayer(Model):
...@@ -159,7 +159,8 @@ def main(): ...@@ -159,7 +159,8 @@ def main():
labels, labels,
device=device) device=device)
cls_model.bert_layer.load("./bert_uncased_L-12_H-768_A-12/bert", reset_optimizer=True) cls_model.bert_layer.load(
"./bert_uncased_L-12_H-768_A-12/bert", reset_optimizer=True)
# do train # do train
cls_model.fit(train_data=train_dataloader.dataloader, cls_model.fit(train_data=train_dataloader.dataloader,
......
...@@ -20,7 +20,7 @@ import json ...@@ -20,7 +20,7 @@ import json
sys.path.append('../') sys.path.append('../')
from hapi.metrics import Metric from paddle.incubate.hapi.metrics import Metric
from bmn_utils import boundary_choose, bmn_post_processing from bmn_utils import boundary_choose, bmn_post_processing
......
...@@ -18,7 +18,7 @@ import sys ...@@ -18,7 +18,7 @@ import sys
import logging import logging
import paddle.fluid as fluid import paddle.fluid as fluid
from hapi.model import set_device, Input from paddle.incubate.hapi.model import set_device, Input
from modeling import bmn, BmnLoss from modeling import bmn, BmnLoss
from bmn_metric import BmnMetric from bmn_metric import BmnMetric
......
...@@ -17,9 +17,9 @@ from paddle.fluid import ParamAttr ...@@ -17,9 +17,9 @@ from paddle.fluid import ParamAttr
import numpy as np import numpy as np
import math import math
from hapi.model import Model from paddle.incubate.hapi.model import Model
from hapi.loss import Loss from paddle.incubate.hapi.loss import Loss
from hapi.download import get_weights_path_from_url from paddle.incubate.hapi.download import get_weights_path_from_url
__all__ = ["BMN", "BmnLoss", "bmn"] __all__ = ["BMN", "BmnLoss", "bmn"]
......
...@@ -18,7 +18,7 @@ import os ...@@ -18,7 +18,7 @@ import os
import logging import logging
import paddle.fluid as fluid import paddle.fluid as fluid
from hapi.model import set_device, Input from paddle.incubate.hapi.model import set_device, Input
from modeling import bmn, BmnLoss from modeling import bmn, BmnLoss
from bmn_metric import BmnMetric from bmn_metric import BmnMetric
......
...@@ -21,7 +21,7 @@ import sys ...@@ -21,7 +21,7 @@ import sys
sys.path.append('../') sys.path.append('../')
from hapi.distributed import DistributedBatchSampler from paddle.incubate.hapi.distributed import DistributedBatchSampler
from paddle.io import Dataset, DataLoader from paddle.io import Dataset, DataLoader
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
......
...@@ -18,7 +18,7 @@ import logging ...@@ -18,7 +18,7 @@ import logging
import sys import sys
import os import os
from hapi.model import set_device, Input from paddle.incubate.hapi.model import set_device, Input
from reader import BmnDataset from reader import BmnDataset
from config_utils import * from config_utils import *
......
...@@ -19,8 +19,8 @@ from __future__ import print_function ...@@ -19,8 +19,8 @@ from __future__ import print_function
import numpy as np import numpy as np
import paddle.fluid as fluid import paddle.fluid as fluid
from hapi.model import Model from paddle.incubate.hapi.model import Model
from hapi.loss import Loss from paddle.incubate.hapi.loss import Loss
from layers import ConvBN, DeConvBN from layers import ConvBN, DeConvBN
......
...@@ -25,7 +25,7 @@ from PIL import Image ...@@ -25,7 +25,7 @@ from PIL import Image
from scipy.misc import imsave from scipy.misc import imsave
import paddle.fluid as fluid import paddle.fluid as fluid
from hapi.model import Model, Input, set_device from paddle.incubate.hapi.model import Model, Input, set_device
from check import check_gpu, check_version from check import check_gpu, check_version
from cyclegan import Generator, GeneratorCombine from cyclegan import Generator, GeneratorCombine
......
...@@ -22,7 +22,7 @@ import numpy as np ...@@ -22,7 +22,7 @@ import numpy as np
from scipy.misc import imsave from scipy.misc import imsave
import paddle.fluid as fluid import paddle.fluid as fluid
from hapi.model import Model, Input, set_device from paddle.incubate.hapi.model import Model, Input, set_device
from check import check_gpu, check_version from check import check_gpu, check_version
from cyclegan import Generator, GeneratorCombine from cyclegan import Generator, GeneratorCombine
......
...@@ -24,7 +24,7 @@ import time ...@@ -24,7 +24,7 @@ import time
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from hapi.model import Model, Input, set_device from paddle.incubate.hapi.model import Model, Input, set_device
from check import check_gpu, check_version from check import check_gpu, check_version
from cyclegan import Generator, Discriminator, GeneratorCombine, GLoss, DLoss from cyclegan import Generator, Discriminator, GeneratorCombine, GLoss, DLoss
...@@ -78,12 +78,12 @@ def main(): ...@@ -78,12 +78,12 @@ def main():
g_AB.prepare(inputs=[input_A], device=FLAGS.device) g_AB.prepare(inputs=[input_A], device=FLAGS.device)
g_BA.prepare(inputs=[input_B], device=FLAGS.device) g_BA.prepare(inputs=[input_B], device=FLAGS.device)
g.prepare(g_optimizer, GLoss(), inputs=[input_A, input_B], g.prepare(
device=FLAGS.device) g_optimizer, GLoss(), inputs=[input_A, input_B], device=FLAGS.device)
d_A.prepare(da_optimizer, DLoss(), inputs=[input_B, fake_B], d_A.prepare(
device=FLAGS.device) da_optimizer, DLoss(), inputs=[input_B, fake_B], device=FLAGS.device)
d_B.prepare(db_optimizer, DLoss(), inputs=[input_A, fake_A], d_B.prepare(
device=FLAGS.device) db_optimizer, DLoss(), inputs=[input_A, fake_A], device=FLAGS.device)
if FLAGS.resume: if FLAGS.resume:
g.load(FLAGS.resume) g.load(FLAGS.resume)
......
...@@ -19,12 +19,12 @@ import argparse ...@@ -19,12 +19,12 @@ import argparse
from paddle import fluid from paddle import fluid
from paddle.fluid.optimizer import Momentum from paddle.fluid.optimizer import Momentum
from hapi.datasets.mnist import MNIST as MnistDataset from paddle.incubate.hapi.datasets.mnist import MNIST as MnistDataset
from hapi.model import Input, set_device from paddle.incubate.hapi.model import Input, set_device
from hapi.loss import CrossEntropy from paddle.incubate.hapi.loss import CrossEntropy
from hapi.metrics import Accuracy from paddle.incubate.hapi.metrics import Accuracy
from hapi.vision.models import LeNet from paddle.incubate.hapi.vision.models import LeNet
def main(): def main():
......
...@@ -18,8 +18,8 @@ import math ...@@ -18,8 +18,8 @@ import math
import random import random
import numpy as np import numpy as np
from hapi.datasets import DatasetFolder from paddle.incubate.hapi.datasets import DatasetFolder
from hapi.vision.transforms import transforms from paddle.incubate.hapi.vision.transforms import transforms
from paddle import fluid from paddle import fluid
......
...@@ -27,11 +27,11 @@ import paddle.fluid as fluid ...@@ -27,11 +27,11 @@ import paddle.fluid as fluid
from paddle.fluid.dygraph.parallel import ParallelEnv from paddle.fluid.dygraph.parallel import ParallelEnv
from paddle.io import BatchSampler, DataLoader from paddle.io import BatchSampler, DataLoader
from hapi.model import Input, set_device from paddle.incubate.hapi.model import Input, set_device
from hapi.loss import CrossEntropy from paddle.incubate.hapi.loss import CrossEntropy
from hapi.distributed import DistributedBatchSampler from paddle.incubate.hapi.distributed import DistributedBatchSampler
from hapi.metrics import Accuracy from paddle.incubate.hapi.metrics import Accuracy
import hapi.vision.models as models import paddle.incubate.hapi.vision.models as models
from imagenet_dataset import ImageNetDataset from imagenet_dataset import ImageNetDataset
......
...@@ -19,8 +19,8 @@ import functools ...@@ -19,8 +19,8 @@ import functools
import paddle.fluid.profiler as profiler import paddle.fluid.profiler as profiler
import paddle.fluid as fluid import paddle.fluid as fluid
from hapi.model import Input, set_device from paddle.incubate.hapi.model import Input, set_device
from hapi.vision.transforms import BatchCompose from paddle.incubate.hapi.vision.transforms import BatchCompose
from utility import add_arguments, print_arguments from utility import add_arguments, print_arguments
from utility import SeqAccuracy, LoggerCallBack, SeqBeamAccuracy from utility import SeqAccuracy, LoggerCallBack, SeqBeamAccuracy
......
...@@ -25,9 +25,9 @@ from PIL import Image ...@@ -25,9 +25,9 @@ from PIL import Image
import paddle.fluid.profiler as profiler import paddle.fluid.profiler as profiler
import paddle.fluid as fluid import paddle.fluid as fluid
from hapi.model import Input, set_device from paddle.incubate.hapi.model import Input, set_device
from hapi.datasets.folder import ImageFolder from paddle.incubate.hapi.datasets.folder import ImageFolder
from hapi.vision.transforms import BatchCompose from paddle.incubate.hapi.vision.transforms import BatchCompose
from utility import add_arguments, print_arguments from utility import add_arguments, print_arguments
from utility import postprocess, index2word from utility import postprocess, index2word
......
...@@ -19,9 +19,9 @@ import paddle.fluid as fluid ...@@ -19,9 +19,9 @@ import paddle.fluid as fluid
import paddle.fluid.layers as layers import paddle.fluid.layers as layers
from paddle.fluid.layers import BeamSearchDecoder from paddle.fluid.layers import BeamSearchDecoder
from hapi.text import RNNCell, RNN, DynamicDecode from paddle.incubate.hapi.text import RNNCell, RNN, DynamicDecode
from hapi.model import Model from paddle.incubate.hapi.model import Model
from hapi.loss import Loss from paddle.incubate.hapi.loss import Loss
class ConvBNPool(fluid.dygraph.Layer): class ConvBNPool(fluid.dygraph.Layer):
......
...@@ -24,8 +24,8 @@ import functools ...@@ -24,8 +24,8 @@ import functools
import paddle.fluid.profiler as profiler import paddle.fluid.profiler as profiler
import paddle.fluid as fluid import paddle.fluid as fluid
from hapi.model import Input, set_device from paddle.incubate.hapi.model import Input, set_device
from hapi.vision.transforms import BatchCompose from paddle.incubate.hapi.vision.transforms import BatchCompose
from utility import add_arguments, print_arguments from utility import add_arguments, print_arguments
from utility import SeqAccuracy, LoggerCallBack from utility import SeqAccuracy, LoggerCallBack
......
...@@ -21,8 +21,8 @@ import numpy as np ...@@ -21,8 +21,8 @@ import numpy as np
import paddle.fluid as fluid import paddle.fluid as fluid
import six import six
from hapi.metrics import Metric from paddle.incubate.hapi.metrics import Metric
from hapi.callbacks import ProgBarLogger from paddle.incubate.hapi.callbacks import ProgBarLogger
def print_arguments(args): def print_arguments(args):
......
...@@ -15,9 +15,9 @@ import paddle.fluid as fluid ...@@ -15,9 +15,9 @@ import paddle.fluid as fluid
from paddle.fluid.dygraph.nn import Linear, Embedding from paddle.fluid.dygraph.nn import Linear, Embedding
from paddle.fluid.dygraph.base import to_variable from paddle.fluid.dygraph.base import to_variable
import numpy as np import numpy as np
from hapi.model import Model from paddle.incubate.hapi.model import Model
from hapi.text.text import GRUEncoderLayer as BiGRUEncoder from paddle.incubate.hapi.text.text import GRUEncoderLayer as BiGRUEncoder
from hapi.text.test import BOWEncoder, CNNEncoder, GRUEncoder from paddle.incubate.hapi.text.test import BOWEncoder, CNNEncoder, GRUEncoder
class CNN(Model): class CNN(Model):
...@@ -36,14 +36,18 @@ class CNN(Model): ...@@ -36,14 +36,18 @@ class CNN(Model):
dict_size=self.dict_dim + 1, dict_size=self.dict_dim + 1,
emb_dim=self.emb_dim, emb_dim=self.emb_dim,
seq_len=self.seq_len, seq_len=self.seq_len,
filter_size= self.win_size, filter_size=self.win_size,
num_filters= self.hid_dim, num_filters=self.hid_dim,
hidden_dim= self.hid_dim, hidden_dim=self.hid_dim,
padding_idx=None, padding_idx=None,
act='tanh') act='tanh')
self._fc1 = Linear(input_dim = self.hid_dim*self.seq_len, output_dim=self.fc_hid_dim, act="softmax") self._fc1 = Linear(
self._fc_prediction = Linear(input_dim = self.fc_hid_dim, input_dim=self.hid_dim * self.seq_len,
output_dim = self.class_dim, output_dim=self.fc_hid_dim,
act="softmax")
self._fc_prediction = Linear(
input_dim=self.fc_hid_dim,
output_dim=self.class_dim,
act="softmax") act="softmax")
def forward(self, inputs): def forward(self, inputs):
...@@ -69,10 +73,13 @@ class BOW(Model): ...@@ -69,10 +73,13 @@ class BOW(Model):
padding_idx=None, padding_idx=None,
bow_dim=self.hid_dim, bow_dim=self.hid_dim,
seq_len=self.seq_len) seq_len=self.seq_len)
self._fc1 = Linear(input_dim = self.hid_dim, output_dim=self.hid_dim, act="tanh") self._fc1 = Linear(
self._fc2 = Linear(input_dim = self.hid_dim, output_dim=self.fc_hid_dim, act="tanh") input_dim=self.hid_dim, output_dim=self.hid_dim, act="tanh")
self._fc_prediction = Linear(input_dim = self.fc_hid_dim, self._fc2 = Linear(
output_dim = self.class_dim, input_dim=self.hid_dim, output_dim=self.fc_hid_dim, act="tanh")
self._fc_prediction = Linear(
input_dim=self.fc_hid_dim,
output_dim=self.class_dim,
act="softmax") act="softmax")
def forward(self, inputs): def forward(self, inputs):
...@@ -94,8 +101,10 @@ class GRU(Model): ...@@ -94,8 +101,10 @@ class GRU(Model):
self.class_dim = 2 self.class_dim = 2
self.batch_size = batch_size self.batch_size = batch_size
self.seq_len = seq_len self.seq_len = seq_len
self._fc1 = Linear(input_dim=self.hid_dim, output_dim=self.fc_hid_dim, act="tanh") self._fc1 = Linear(
self._fc_prediction = Linear(input_dim=self.fc_hid_dim, input_dim=self.hid_dim, output_dim=self.fc_hid_dim, act="tanh")
self._fc_prediction = Linear(
input_dim=self.fc_hid_dim,
output_dim=self.class_dim, output_dim=self.class_dim,
act="softmax") act="softmax")
self._encoder = GRUEncoder( self._encoder = GRUEncoder(
...@@ -130,9 +139,11 @@ class BiGRU(Model): ...@@ -130,9 +139,11 @@ class BiGRU(Model):
is_sparse=False) is_sparse=False)
h_0 = np.zeros((self.batch_size, self.hid_dim), dtype="float32") h_0 = np.zeros((self.batch_size, self.hid_dim), dtype="float32")
h_0 = to_variable(h_0) h_0 = to_variable(h_0)
self._fc1 = Linear(input_dim = self.hid_dim, output_dim=self.hid_dim*3) self._fc1 = Linear(input_dim=self.hid_dim, output_dim=self.hid_dim * 3)
self._fc2 = Linear(input_dim = self.hid_dim*2, output_dim=self.fc_hid_dim, act="tanh") self._fc2 = Linear(
self._fc_prediction = Linear(input_dim=self.fc_hid_dim, input_dim=self.hid_dim * 2, output_dim=self.fc_hid_dim, act="tanh")
self._fc_prediction = Linear(
input_dim=self.fc_hid_dim,
output_dim=self.class_dim, output_dim=self.class_dim,
act="softmax") act="softmax")
self._encoder = BiGRUEncoder( self._encoder = BiGRUEncoder(
...@@ -144,7 +155,8 @@ class BiGRU(Model): ...@@ -144,7 +155,8 @@ class BiGRU(Model):
def forward(self, inputs): def forward(self, inputs):
emb = self.embedding(inputs) emb = self.embedding(inputs)
emb = fluid.layers.reshape(emb, shape=[self.batch_size, -1, self.hid_dim]) emb = fluid.layers.reshape(
emb, shape=[self.batch_size, -1, self.hid_dim])
fc_1 = self._fc1(emb) fc_1 = self._fc1(emb)
encoded_vector = self._encoder(fc_1) encoded_vector = self._encoder(fc_1)
encoded_vector = fluid.layers.tanh(encoded_vector) encoded_vector = fluid.layers.tanh(encoded_vector)
......
...@@ -13,14 +13,13 @@ ...@@ -13,14 +13,13 @@
# limitations under the License. # limitations under the License.
"""Sentiment Classification in Paddle Dygraph Mode. """ """Sentiment Classification in Paddle Dygraph Mode. """
from __future__ import print_function from __future__ import print_function
import numpy as np import numpy as np
import paddle.fluid as fluid import paddle.fluid as fluid
from hapi.model import set_device, Model, CrossEntropy, Input from paddle.incubate.hapi.model import set_device, Model, CrossEntropy, Input
from hapi.configure import Config from paddle.incubate.hapi.configure import Config
from hapi.text.senta import SentaProcessor from paddle.incubate.hapi.text.senta import SentaProcessor
from hapi.metrics import Accuracy from paddle.incubate.hapi.metrics import Accuracy
from models import CNN, BOW, GRU, BiGRU from models import CNN, BOW, GRU, BiGRU
import json import json
import os import os
...@@ -32,12 +31,14 @@ args.Print() ...@@ -32,12 +31,14 @@ args.Print()
device = set_device("gpu" if args.use_cuda else "cpu") device = set_device("gpu" if args.use_cuda else "cpu")
dev_count = fluid.core.get_cuda_device_count() if args.use_cuda else 1 dev_count = fluid.core.get_cuda_device_count() if args.use_cuda else 1
def main(): def main():
if args.do_train: if args.do_train:
train() train()
elif args.do_infer: elif args.do_infer:
infer() infer()
def train(): def train():
fluid.enable_dygraph(device) fluid.enable_dygraph(device)
processor = SentaProcessor( processor = SentaProcessor(
...@@ -66,19 +67,16 @@ def train(): ...@@ -66,19 +67,16 @@ def train():
epoch=args.epoch, epoch=args.epoch,
shuffle=False) shuffle=False)
if args.model_type == 'cnn_net': if args.model_type == 'cnn_net':
model = CNN( args.vocab_size, args.batch_size, model = CNN(args.vocab_size, args.batch_size, args.padding_size)
args.padding_size)
elif args.model_type == 'bow_net': elif args.model_type == 'bow_net':
model = BOW( args.vocab_size, args.batch_size, model = BOW(args.vocab_size, args.batch_size, args.padding_size)
args.padding_size)
elif args.model_type == 'gru_net': elif args.model_type == 'gru_net':
model = GRU( args.vocab_size, args.batch_size, model = GRU(args.vocab_size, args.batch_size, args.padding_size)
args.padding_size)
elif args.model_type == 'bigru_net': elif args.model_type == 'bigru_net':
model = BiGRU( args.vocab_size, args.batch_size, model = BiGRU(args.vocab_size, args.batch_size, args.padding_size)
args.padding_size)
optimizer = fluid.optimizer.Adagrad(learning_rate=args.lr, parameter_list=model.parameters()) optimizer = fluid.optimizer.Adagrad(
learning_rate=args.lr, parameter_list=model.parameters())
inputs = [Input([None, None], 'int64', name='doc')] inputs = [Input([None, None], 'int64', name='doc')]
labels = [Input([None, 1], 'int64', name='label')] labels = [Input([None, 1], 'int64', name='label')]
...@@ -86,7 +84,7 @@ def train(): ...@@ -86,7 +84,7 @@ def train():
model.prepare( model.prepare(
optimizer, optimizer,
CrossEntropy(), CrossEntropy(),
Accuracy(topk=(1,)), Accuracy(topk=(1, )),
inputs, inputs,
labels, labels,
device=device) device=device)
...@@ -99,6 +97,7 @@ def train(): ...@@ -99,6 +97,7 @@ def train():
eval_freq=args.eval_freq, eval_freq=args.eval_freq,
save_freq=args.save_freq) save_freq=args.save_freq)
def infer(): def infer():
fluid.enable_dygraph(device) fluid.enable_dygraph(device)
processor = SentaProcessor( processor = SentaProcessor(
...@@ -114,26 +113,19 @@ def infer(): ...@@ -114,26 +113,19 @@ def infer():
epoch=1, epoch=1,
shuffle=False) shuffle=False)
if args.model_type == 'cnn_net': if args.model_type == 'cnn_net':
model_infer = CNN( args.vocab_size, args.batch_size, model_infer = CNN(args.vocab_size, args.batch_size, args.padding_size)
args.padding_size)
elif args.model_type == 'bow_net': elif args.model_type == 'bow_net':
model_infer = BOW( args.vocab_size, args.batch_size, model_infer = BOW(args.vocab_size, args.batch_size, args.padding_size)
args.padding_size)
elif args.model_type == 'gru_net': elif args.model_type == 'gru_net':
model_infer = GRU( args.vocab_size, args.batch_size, model_infer = GRU(args.vocab_size, args.batch_size, args.padding_size)
args.padding_size)
elif args.model_type == 'bigru_net': elif args.model_type == 'bigru_net':
model_infer = BiGRU( args.vocab_size, args.batch_size, model_infer = BiGRU(args.vocab_size, args.batch_size,
args.padding_size) args.padding_size)
print('Do inferring ...... ') print('Do inferring ...... ')
inputs = [Input([None, None], 'int64', name='doc')] inputs = [Input([None, None], 'int64', name='doc')]
model_infer.prepare( model_infer.prepare(
None, None, CrossEntropy(), Accuracy(topk=(1, )), inputs, device=device)
CrossEntropy(),
Accuracy(topk=(1,)),
inputs,
device=device)
model_infer.load(args.checkpoints, reset_optimizer=True) model_infer.load(args.checkpoints, reset_optimizer=True)
preds = model_infer.predict(test_data=infer_data_generator) preds = model_infer.predict(test_data=infer_data_generator)
preds = np.array(preds[0]).reshape((-1, 2)) preds = np.array(preds[0]).reshape((-1, 2))
...@@ -143,9 +135,15 @@ def infer(): ...@@ -143,9 +135,15 @@ def infer():
for p in range(len(preds)): for p in range(len(preds)):
label = np.argmax(preds[p]) label = np.argmax(preds[p])
result = json.dumps({'index': p, 'label': label, 'probs': preds[p].tolist()}) result = json.dumps({
w.write(result+'\n') 'index': p,
print('Predictions saved at '+os.path.join(args.output_dir, 'predictions.json')) 'label': label,
'probs': preds[p].tolist()
})
w.write(result + '\n')
print('Predictions saved at ' + os.path.join(args.output_dir,
'predictions.json'))
if __name__ == '__main__': if __name__ == '__main__':
main() main()
...@@ -23,7 +23,7 @@ import paddle.fluid as fluid ...@@ -23,7 +23,7 @@ import paddle.fluid as fluid
from paddle.fluid.layers.utils import flatten from paddle.fluid.layers.utils import flatten
from paddle.fluid.io import DataLoader from paddle.fluid.io import DataLoader
from hapi.model import Input, set_device from paddle.incubate.hapi.model import Input, set_device
from args import parse_args from args import parse_args
from seq2seq_base import BaseInferModel from seq2seq_base import BaseInferModel
from seq2seq_attn import AttentionInferModel from seq2seq_attn import AttentionInferModel
......
...@@ -19,8 +19,8 @@ from paddle.fluid.initializer import UniformInitializer ...@@ -19,8 +19,8 @@ from paddle.fluid.initializer import UniformInitializer
from paddle.fluid.dygraph import Embedding, Linear, Layer from paddle.fluid.dygraph import Embedding, Linear, Layer
from paddle.fluid.layers import BeamSearchDecoder from paddle.fluid.layers import BeamSearchDecoder
from hapi.model import Model, Loss from paddle.incubate.hapi.model import Model, Loss
from hapi.text import DynamicDecode, RNN, BasicLSTMCell, RNNCell from paddle.incubate.hapi.text import DynamicDecode, RNN, BasicLSTMCell, RNNCell
from seq2seq_base import Encoder from seq2seq_base import Encoder
......
...@@ -19,8 +19,8 @@ from paddle.fluid.initializer import UniformInitializer ...@@ -19,8 +19,8 @@ from paddle.fluid.initializer import UniformInitializer
from paddle.fluid.dygraph import Embedding, Linear, Layer from paddle.fluid.dygraph import Embedding, Linear, Layer
from paddle.fluid.layers import BeamSearchDecoder from paddle.fluid.layers import BeamSearchDecoder
from hapi.model import Model, Loss from paddle.incubate.hapi.model import Model, Loss
from hapi.text import DynamicDecode, RNN, BasicLSTMCell, RNNCell from paddle.incubate.hapi.text import DynamicDecode, RNN, BasicLSTMCell, RNNCell
class CrossEntropyCriterion(Loss): class CrossEntropyCriterion(Loss):
......
...@@ -21,7 +21,7 @@ import numpy as np ...@@ -21,7 +21,7 @@ import numpy as np
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid.io import DataLoader from paddle.fluid.io import DataLoader
from hapi.model import Input, set_device from paddle.incubate.hapi.model import Input, set_device
from args import parse_args from args import parse_args
from seq2seq_base import BaseModel, CrossEntropyCriterion from seq2seq_base import BaseModel, CrossEntropyCriterion
from seq2seq_attn import AttentionModel from seq2seq_attn import AttentionModel
......
...@@ -16,8 +16,8 @@ import math ...@@ -16,8 +16,8 @@ import math
import paddle.fluid as fluid import paddle.fluid as fluid
from hapi.metrics import Metric from paddle.incubate.hapi.metrics import Metric
from hapi.callbacks import ProgBarLogger from paddle.incubate.hapi.callbacks import ProgBarLogger
class TrainCallback(ProgBarLogger): class TrainCallback(ProgBarLogger):
......
...@@ -28,11 +28,11 @@ import numpy as np ...@@ -28,11 +28,11 @@ import numpy as np
work_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) work_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(os.path.join(work_dir, "../")) sys.path.append(os.path.join(work_dir, "../"))
from hapi.model import set_device, Input from paddle.incubate.hapi.model import set_device, Input
from hapi.text.sequence_tagging import SeqTagging, ChunkEval, LacLoss from paddle.incubate.hapi.text.sequence_tagging import SeqTagging, ChunkEval, LacLoss
from hapi.text.sequence_tagging import LacDataset, LacDataLoader from paddle.incubate.hapi.text.sequence_tagging import LacDataset, LacDataLoader
from hapi.text.sequence_tagging import check_gpu, check_version from paddle.incubate.hapi.text.sequence_tagging import check_gpu, check_version
from hapi.text.sequence_tagging import PDConfig from paddle.incubate.hapi.text.sequence_tagging import PDConfig
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid.layers.utils import flatten from paddle.fluid.layers.utils import flatten
...@@ -65,7 +65,8 @@ def main(args): ...@@ -65,7 +65,8 @@ def main(args):
device=place) device=place)
model.load(args.init_from_checkpoint, skip_mismatch=True) model.load(args.init_from_checkpoint, skip_mismatch=True)
eval_result = model.evaluate(eval_dataset.dataloader, batch_size=args.batch_size) eval_result = model.evaluate(
eval_dataset.dataloader, batch_size=args.batch_size)
print("precison: %.5f" % (eval_result["precision"][0])) print("precison: %.5f" % (eval_result["precision"][0]))
print("recall: %.5f" % (eval_result["recall"][0])) print("recall: %.5f" % (eval_result["recall"][0]))
print("F1: %.5f" % (eval_result["F1"][0])) print("F1: %.5f" % (eval_result["F1"][0]))
......
...@@ -29,11 +29,11 @@ import numpy as np ...@@ -29,11 +29,11 @@ import numpy as np
work_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) work_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(os.path.join(work_dir, "../")) sys.path.append(os.path.join(work_dir, "../"))
from hapi.text.sequence_tagging import SeqTagging from paddle.incubate.hapi.text.sequence_tagging import SeqTagging
from hapi.model import Input, set_device from paddle.incubate.hapi.model import Input, set_device
from hapi.text.sequence_tagging import LacDataset, LacDataLoader from paddle.incubate.hapi.text.sequence_tagging import LacDataset, LacDataLoader
from hapi.text.sequence_tagging import check_gpu, check_version from paddle.incubate.hapi.text.sequence_tagging import check_gpu, check_version
from hapi.text.sequence_tagging import PDConfig from paddle.incubate.hapi.text.sequence_tagging import PDConfig
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid.layers.utils import flatten from paddle.fluid.layers.utils import flatten
......
...@@ -28,11 +28,11 @@ import numpy as np ...@@ -28,11 +28,11 @@ import numpy as np
work_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) work_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(os.path.join(work_dir, "../")) sys.path.append(os.path.join(work_dir, "../"))
from hapi.model import Input, set_device from paddle.incubate.hapi.model import Input, set_device
from hapi.text.sequence_tagging import SeqTagging, LacLoss, ChunkEval from paddle.incubate.hapi.text.sequence_tagging import SeqTagging, LacLoss, ChunkEval
from hapi.text.sequence_tagging import LacDataset, LacDataLoader from paddle.incubate.hapi.text.sequence_tagging import LacDataset, LacDataLoader
from hapi.text.sequence_tagging import check_gpu, check_version from paddle.incubate.hapi.text.sequence_tagging import check_gpu, check_version
from hapi.text.sequence_tagging import PDConfig from paddle.incubate.hapi.text.sequence_tagging import PDConfig
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid.optimizer import AdamOptimizer from paddle.fluid.optimizer import AdamOptimizer
......
...@@ -32,10 +32,10 @@ gram_matrix = fluid.layers.matmul(tensor, fluid.layers.transpose(tensor, [1, 0]) ...@@ -32,10 +32,10 @@ gram_matrix = fluid.layers.matmul(tensor, fluid.layers.transpose(tensor, [1, 0])
import numpy as np import numpy as np
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
from hapi.model import Model, Loss from paddle.incubate.hapi.model import Model, Loss
from hapi.vision.models import vgg16 from paddle.incubate.hapi.vision.models import vgg16
from hapi.vision.transforms import transforms from paddle.incubate.hapi.vision.transforms import transforms
from paddle import fluid from paddle import fluid
from paddle.fluid.io import Dataset from paddle.fluid.io import Dataset
......
...@@ -3,10 +3,10 @@ import argparse ...@@ -3,10 +3,10 @@ import argparse
import numpy as np import numpy as np
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
from hapi.model import Model, Loss from paddle.incubate.hapi.model import Model, Loss
from hapi.vision.models import vgg16 from paddle.incubate.hapi.vision.models import vgg16
from hapi.vision.transforms import transforms from paddle.incubate.hapi.vision.transforms import transforms
from paddle import fluid from paddle import fluid
from paddle.fluid.io import Dataset from paddle.fluid.io import Dataset
......
...@@ -25,7 +25,7 @@ from paddle.fluid.layers.utils import flatten ...@@ -25,7 +25,7 @@ from paddle.fluid.layers.utils import flatten
from utils.configure import PDConfig from utils.configure import PDConfig
from utils.check import check_gpu, check_version from utils.check import check_gpu, check_version
from hapi.model import Input, set_device from paddle.incubate.hapi.model import Input, set_device
from reader import prepare_infer_input, Seq2SeqDataset, Seq2SeqBatchSampler from reader import prepare_infer_input, Seq2SeqDataset, Seq2SeqBatchSampler
from transformer import InferTransformer from transformer import InferTransformer
......
...@@ -23,8 +23,8 @@ from paddle.io import DataLoader ...@@ -23,8 +23,8 @@ from paddle.io import DataLoader
from utils.configure import PDConfig from utils.configure import PDConfig
from utils.check import check_gpu, check_version from utils.check import check_gpu, check_version
from hapi.model import Input, set_device from paddle.incubate.hapi.model import Input, set_device
from hapi.callbacks import ProgBarLogger from paddle.incubate.hapi.callbacks import ProgBarLogger
from reader import create_data_loader from reader import create_data_loader
from transformer import Transformer, CrossEntropyCriterion from transformer import Transformer, CrossEntropyCriterion
......
...@@ -20,8 +20,8 @@ import paddle.fluid as fluid ...@@ -20,8 +20,8 @@ import paddle.fluid as fluid
import paddle.fluid.layers as layers import paddle.fluid.layers as layers
from paddle.fluid.dygraph import Embedding, LayerNorm, Linear, Layer, to_variable from paddle.fluid.dygraph import Embedding, LayerNorm, Linear, Layer, to_variable
from paddle.fluid.dygraph.learning_rate_scheduler import LearningRateDecay from paddle.fluid.dygraph.learning_rate_scheduler import LearningRateDecay
from hapi.model import Model, CrossEntropy, Loss from paddle.incubate.hapi.model import Model, CrossEntropy, Loss
from hapi.text import TransformerBeamSearchDecoder, DynamicDecode from paddle.incubate.hapi.text import TransformerBeamSearchDecoder, DynamicDecode
def position_encoding_init(n_position, d_pos_vec): def position_encoding_init(n_position, d_pos_vec):
......
...@@ -19,8 +19,8 @@ import os ...@@ -19,8 +19,8 @@ import os
import argparse import argparse
import numpy as np import numpy as np
from hapi.model import Input, set_device from paddle.incubate.hapi.model import Input, set_device
from hapi.vision.transforms import Compose from paddle.incubate.hapi.vision.transforms import Compose
from check import check_gpu, check_version from check import check_gpu, check_version
from modeling import tsm_resnet50 from modeling import tsm_resnet50
...@@ -36,9 +36,7 @@ def main(): ...@@ -36,9 +36,7 @@ def main():
device = set_device(FLAGS.device) device = set_device(FLAGS.device)
fluid.enable_dygraph(device) if FLAGS.dynamic else None fluid.enable_dygraph(device) if FLAGS.dynamic else None
transform = Compose([GroupScale(), transform = Compose([GroupScale(), GroupCenterCrop(), NormalizeImage()])
GroupCenterCrop(),
NormalizeImage()])
dataset = KineticsDataset( dataset = KineticsDataset(
pickle_file=FLAGS.infer_file, pickle_file=FLAGS.infer_file,
label_list=FLAGS.label_list, label_list=FLAGS.label_list,
...@@ -46,8 +44,8 @@ def main(): ...@@ -46,8 +44,8 @@ def main():
transform=transform) transform=transform)
labels = dataset.label_list labels = dataset.label_list
model = tsm_resnet50(num_classes=len(labels), model = tsm_resnet50(
pretrained=FLAGS.weights is None) num_classes=len(labels), pretrained=FLAGS.weights is None)
inputs = [Input([None, 8, 3, 224, 224], 'float32', name='image')] inputs = [Input([None, 8, 3, 224, 224], 'float32', name='image')]
...@@ -66,19 +64,23 @@ def main(): ...@@ -66,19 +64,23 @@ def main():
if __name__ == '__main__': if __name__ == '__main__':
parser = argparse.ArgumentParser("CNN training on TSM") parser = argparse.ArgumentParser("CNN training on TSM")
parser.add_argument( parser.add_argument(
"--data", type=str, default='dataset/kinetics', "--data",
type=str,
default='dataset/kinetics',
help="path to dataset root directory") help="path to dataset root directory")
parser.add_argument( parser.add_argument(
"--device", type=str, default='gpu', "--device", type=str, default='gpu', help="device to use, gpu or cpu")
help="device to use, gpu or cpu")
parser.add_argument( parser.add_argument(
"-d", "--dynamic", action='store_true', "-d", "--dynamic", action='store_true', help="enable dygraph mode")
help="enable dygraph mode")
parser.add_argument( parser.add_argument(
"--label_list", type=str, default=None, "--label_list",
type=str,
default=None,
help="path to category index label list file") help="path to category index label list file")
parser.add_argument( parser.add_argument(
"--infer_file", type=str, default=None, "--infer_file",
type=str,
default=None,
help="path to pickle file for inference") help="path to pickle file for inference")
parser.add_argument( parser.add_argument(
"-w", "-w",
......
...@@ -22,10 +22,10 @@ import numpy as np ...@@ -22,10 +22,10 @@ import numpy as np
from paddle import fluid from paddle import fluid
from paddle.fluid.dygraph.parallel import ParallelEnv from paddle.fluid.dygraph.parallel import ParallelEnv
from hapi.model import Model, Input, set_device from paddle.incubate.hapi.model import Model, Input, set_device
from hapi.loss import CrossEntropy from paddle.incubate.hapi.loss import CrossEntropy
from hapi.metrics import Accuracy from paddle.incubate.hapi.metrics import Accuracy
from hapi.vision.transforms import Compose from paddle.incubate.hapi.vision.transforms import Compose
from modeling import tsm_resnet50 from modeling import tsm_resnet50
from check import check_gpu, check_version from check import check_gpu, check_version
......
...@@ -17,8 +17,8 @@ import paddle.fluid as fluid ...@@ -17,8 +17,8 @@ import paddle.fluid as fluid
from paddle.fluid.layer_helper import LayerHelper from paddle.fluid.layer_helper import LayerHelper
from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear
from hapi.model import Model from paddle.incubate.hapi.model import Model
from hapi.download import get_weights_path_from_url from paddle.incubate.hapi.download import get_weights_path_from_url
__all__ = ["TSM_ResNet", "tsm_resnet50"] __all__ = ["TSM_ResNet", "tsm_resnet50"]
......
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
import math
import paddle.fluid as fluid
from paddle.fluid.param_attr import ParamAttr
from paddle.fluid.regularizer import L2Decay
from paddle.fluid.dygraph.nn import Conv2D, BatchNorm, Pool2D, Linear
from paddle.incubate.hapi.model import Model
from paddle.incubate.hapi.download import get_weights_path_from_url
__all__ = ['DarkNet', 'darknet53']
# {num_layers: (url, md5)}
model_urls = {
'darknet53':
('https://paddle-hapi.bj.bcebos.com/models/darknet53.pdparams',
'ca506a90e2efecb9a2093f8ada808708')
}
class ConvBNLayer(fluid.dygraph.Layer):
def __init__(self,
ch_in,
ch_out,
filter_size=3,
stride=1,
groups=1,
padding=0,
act="leaky"):
super(ConvBNLayer, self).__init__()
self.conv = Conv2D(
num_channels=ch_in,
num_filters=ch_out,
filter_size=filter_size,
stride=stride,
padding=padding,
groups=groups,
param_attr=ParamAttr(
initializer=fluid.initializer.Normal(0., 0.02)),
bias_attr=False,
act=None)
self.batch_norm = BatchNorm(
num_channels=ch_out,
param_attr=ParamAttr(
initializer=fluid.initializer.Normal(0., 0.02),
regularizer=L2Decay(0.)),
bias_attr=ParamAttr(
initializer=fluid.initializer.Constant(0.0),
regularizer=L2Decay(0.)))
self.act = act
def forward(self, inputs):
out = self.conv(inputs)
out = self.batch_norm(out)
if self.act == 'leaky':
out = fluid.layers.leaky_relu(x=out, alpha=0.1)
return out
class DownSample(fluid.dygraph.Layer):
def __init__(self, ch_in, ch_out, filter_size=3, stride=2, padding=1):
super(DownSample, self).__init__()
self.conv_bn_layer = ConvBNLayer(
ch_in=ch_in,
ch_out=ch_out,
filter_size=filter_size,
stride=stride,
padding=padding)
self.ch_out = ch_out
def forward(self, inputs):
out = self.conv_bn_layer(inputs)
return out
class BasicBlock(fluid.dygraph.Layer):
def __init__(self, ch_in, ch_out):
super(BasicBlock, self).__init__()
self.conv1 = ConvBNLayer(
ch_in=ch_in, ch_out=ch_out, filter_size=1, stride=1, padding=0)
self.conv2 = ConvBNLayer(
ch_in=ch_out,
ch_out=ch_out * 2,
filter_size=3,
stride=1,
padding=1)
def forward(self, inputs):
conv1 = self.conv1(inputs)
conv2 = self.conv2(conv1)
out = fluid.layers.elementwise_add(x=inputs, y=conv2, act=None)
return out
class LayerWarp(fluid.dygraph.Layer):
def __init__(self, ch_in, ch_out, count):
super(LayerWarp, self).__init__()
self.basicblock0 = BasicBlock(ch_in, ch_out)
self.res_out_list = []
for i in range(1, count):
res_out = self.add_sublayer("basic_block_%d" % (i),
BasicBlock(ch_out * 2, ch_out))
self.res_out_list.append(res_out)
self.ch_out = ch_out
def forward(self, inputs):
y = self.basicblock0(inputs)
for basic_block_i in self.res_out_list:
y = basic_block_i(y)
return y
DarkNet_cfg = {53: ([1, 2, 8, 8, 4])}
class DarkNet(Model):
"""DarkNet model from
`"YOLOv3: An Incremental Improvement" <https://arxiv.org/abs/1804.02767>`_
Args:
num_layers (int): layer number of DarkNet, only 53 supported currently, default: 53.
num_classes (int): output dim of last fc layer. If num_classes <=0, last fc layer
will not be defined. Default: 1000.
with_pool (bool): use pool before the last fc layer or not. Default: True.
classifier_activation (str): activation for the last fc layer. Default: 'softmax'.
"""
def __init__(self,
num_layers=53,
num_classes=1000,
with_pool=True,
classifier_activation='softmax'):
super(DarkNet, self).__init__()
assert num_layers in DarkNet_cfg.keys(), \
"only support num_layers in {} currently" \
.format(DarkNet_cfg.keys())
self.stages = DarkNet_cfg[num_layers]
self.stages = self.stages[0:5]
self.num_classes = num_classes
self.with_pool = True
ch_in = 3
self.conv0 = ConvBNLayer(
ch_in=ch_in, ch_out=32, filter_size=3, stride=1, padding=1)
self.downsample0 = DownSample(ch_in=32, ch_out=32 * 2)
self.darknet53_conv_block_list = []
self.downsample_list = []
ch_in = [64, 128, 256, 512, 1024]
for i, stage in enumerate(self.stages):
conv_block = self.add_sublayer("stage_%d" % (i),
LayerWarp(
int(ch_in[i]), 32 * (2**i),
stage))
self.darknet53_conv_block_list.append(conv_block)
for i in range(len(self.stages) - 1):
downsample = self.add_sublayer(
"stage_%d_downsample" % i,
DownSample(
ch_in=32 * (2**(i + 1)), ch_out=32 * (2**(i + 2))))
self.downsample_list.append(downsample)
if self.with_pool:
self.global_pool = Pool2D(
pool_size=7, pool_type='avg', global_pooling=True)
if self.num_classes > 0:
stdv = 1.0 / math.sqrt(32 * (2**(i + 2)))
self.fc_input_dim = 32 * (2**(i + 2))
self.fc = Linear(
self.fc_input_dim,
num_classes,
act='softmax',
param_attr=fluid.param_attr.ParamAttr(
initializer=fluid.initializer.Uniform(-stdv, stdv)))
def forward(self, inputs):
out = self.conv0(inputs)
out = self.downsample0(out)
for i, conv_block_i in enumerate(self.darknet53_conv_block_list):
out = conv_block_i(out)
if i < len(self.stages) - 1:
out = self.downsample_list[i](out)
if self.with_pool:
out = self.global_pool(out)
if self.num_classes > 0:
out = fluid.layers.reshape(out, shape=[-1, self.fc_input_dim])
out = self.fc(out)
return out
def _darknet(arch, num_layers=53, pretrained=False, **kwargs):
model = DarkNet(num_layers, **kwargs)
if pretrained:
assert arch in model_urls, "{} model do not have a pretrained model now, you should set pretrained=False".format(
arch)
weight_path = get_weights_path_from_url(*(model_urls[arch]))
assert weight_path.endswith('.pdparams'), \
"suffix of weight must be .pdparams"
model.load(weight_path)
return model
def darknet53(pretrained=False, **kwargs):
"""DarkNet 53-layer model
Args:
input_channels (bool): channel number of input data, default 3.
pretrained (bool): If True, returns a model pre-trained on ImageNet,
default True.
"""
return _darknet('darknet53', 53, pretrained, **kwargs)
...@@ -24,7 +24,7 @@ from paddle import fluid ...@@ -24,7 +24,7 @@ from paddle import fluid
from paddle.fluid.optimizer import Momentum from paddle.fluid.optimizer import Momentum
from paddle.io import DataLoader from paddle.io import DataLoader
from hapi.model import Model, Input, set_device from paddle.incubate.hapi.model import Model, Input, set_device
from modeling import yolov3_darknet53, YoloLoss from modeling import yolov3_darknet53, YoloLoss
from transforms import * from transforms import *
...@@ -65,13 +65,17 @@ def main(): ...@@ -65,13 +65,17 @@ def main():
device = set_device(FLAGS.device) device = set_device(FLAGS.device)
fluid.enable_dygraph(device) if FLAGS.dynamic else None fluid.enable_dygraph(device) if FLAGS.dynamic else None
inputs = [Input([None, 1], 'int64', name='img_id'), inputs = [
Input([None, 2], 'int32', name='img_shape'), Input(
Input([None, 3, None, None], 'float32', name='image')] [None, 1], 'int64', name='img_id'), Input(
[None, 2], 'int32', name='img_shape'), Input(
[None, 3, None, None], 'float32', name='image')
]
cat2name = load_labels(FLAGS.label_list, with_background=False) cat2name = load_labels(FLAGS.label_list, with_background=False)
model = yolov3_darknet53(num_classes=len(cat2name), model = yolov3_darknet53(
num_classes=len(cat2name),
model_mode='test', model_mode='test',
pretrained=FLAGS.weights is None) pretrained=FLAGS.weights is None)
...@@ -106,19 +110,33 @@ if __name__ == '__main__': ...@@ -106,19 +110,33 @@ if __name__ == '__main__':
parser.add_argument( parser.add_argument(
"-d", "--dynamic", action='store_true', help="enable dygraph mode") "-d", "--dynamic", action='store_true', help="enable dygraph mode")
parser.add_argument( parser.add_argument(
"--label_list", type=str, default=None, "--label_list",
type=str,
default=None,
help="path to category label list file") help="path to category label list file")
parser.add_argument( parser.add_argument(
"-t", "--draw_threshold", type=float, default=0.5, "-t",
"--draw_threshold",
type=float,
default=0.5,
help="threshold to reserve the result for visualization") help="threshold to reserve the result for visualization")
parser.add_argument( parser.add_argument(
"-i", "--infer_image", type=str, default=None, "-i",
"--infer_image",
type=str,
default=None,
help="image path for inference") help="image path for inference")
parser.add_argument( parser.add_argument(
"-o", "--output_dir", type=str, default='output', "-o",
"--output_dir",
type=str,
default='output',
help="directory to save inference result if --visualize is set") help="directory to save inference result if --visualize is set")
parser.add_argument( parser.add_argument(
"-w", "--weights", default=None, type=str, "-w",
"--weights",
default=None,
type=str,
help="path to weights for inference") help="path to weights for inference")
FLAGS = parser.parse_args() FLAGS = parser.parse_args()
print_arguments(FLAGS) print_arguments(FLAGS)
......
...@@ -25,9 +25,9 @@ from paddle import fluid ...@@ -25,9 +25,9 @@ from paddle import fluid
from paddle.fluid.optimizer import Momentum from paddle.fluid.optimizer import Momentum
from paddle.io import DataLoader from paddle.io import DataLoader
from hapi.model import Model, Input, set_device from paddle.incubate.hapi.model import Model, Input, set_device
from hapi.distributed import DistributedBatchSampler from paddle.incubate.hapi.distributed import DistributedBatchSampler
from hapi.vision.transforms import Compose, BatchCompose from paddle.incubate.hapi.vision.transforms import Compose, BatchCompose
from modeling import yolov3_darknet53, YoloLoss from modeling import yolov3_darknet53, YoloLoss
from coco import COCODataset from coco import COCODataset
......
...@@ -23,7 +23,7 @@ from paddle.fluid.regularizer import L2Decay ...@@ -23,7 +23,7 @@ from paddle.fluid.regularizer import L2Decay
from hapi.model import Model from hapi.model import Model
from hapi.loss import Loss from hapi.loss import Loss
from hapi.download import get_weights_path_from_url from hapi.download import get_weights_path_from_url
from hapi.vision.models import darknet53 from darknet import darknet53
__all__ = ['YoloLoss', 'YOLOv3', 'yolov3_darknet53'] __all__ = ['YoloLoss', 'YOLOv3', 'yolov3_darknet53']
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册