diff --git a/examples/bert/bert_classifier.py b/examples/bert/bert_classifier.py index e3c9726c9a1eb257748e7a8d2fb35e316b7bd80b..3d47ecb5d6b5b38f4a691ef0268ffc301bcfb3eb 100644 --- a/examples/bert/bert_classifier.py +++ b/examples/bert/bert_classifier.py @@ -14,14 +14,14 @@ """BERT fine-tuning in Paddle Dygraph Mode.""" import paddle.fluid as fluid -from hapi.metrics import Accuracy -from hapi.configure import Config -from hapi.text.bert import BertEncoder +from paddle.incubate.hapi.metrics import Accuracy +from paddle.incubate.hapi.configure import Config +from paddle.incubate.hapi.text.bert import BertEncoder from paddle.fluid.dygraph import Linear, Layer -from hapi.loss import SoftmaxWithCrossEntropy -from hapi.model import set_device, Model, Input -import hapi.text.tokenizer.tokenization as tokenization -from hapi.text.bert import BertConfig, BertDataLoader, BertInputExample, make_optimizer +from paddle.incubate.hapi.loss import SoftmaxWithCrossEntropy +from paddle.incubate.hapi.model import set_device, Model, Input +import paddle.incubate.hapi.text.tokenizer.tokenization as tokenization +from paddle.incubate.hapi.text.bert import BertConfig, BertDataLoader, BertInputExample, make_optimizer class ClsModelLayer(Model): @@ -157,7 +157,8 @@ def main(): labels, device=device) - cls_model.bert_layer.load("./bert_uncased_L-12_H-768_A-12/bert", reset_optimizer=True) + cls_model.bert_layer.load( + "./bert_uncased_L-12_H-768_A-12/bert", reset_optimizer=True) # do train cls_model.fit(train_data=train_dataloader.dataloader, diff --git a/examples/bert_leveldb/bert_classifier.py b/examples/bert_leveldb/bert_classifier.py index 51b1192f83a89de6491d82f1748bbead345df742..02d112fe7009eab8230ef51bbbdd5d8ed9958aab 100644 --- a/examples/bert_leveldb/bert_classifier.py +++ b/examples/bert_leveldb/bert_classifier.py @@ -14,14 +14,14 @@ """BERT fine-tuning in Paddle Dygraph Mode.""" import paddle.fluid as fluid -from hapi.metrics import Accuracy -from hapi.configure import Config -from hapi.text.bert import BertEncoder +from paddle.incubate.hapi.metrics import Accuracy +from paddle.incubate.hapi.configure import Config +from paddle.incubate.hapi.text.bert import BertEncoder from paddle.fluid.dygraph import Linear, Layer -from hapi.loss import SoftmaxWithCrossEntropy -from hapi.model import set_device, Model, Input -import hapi.text.tokenizer.tokenization as tokenization -from hapi.text.bert import BertConfig, BertDataLoader, BertInputExample, make_optimizer +from paddle.incubate.hapi.loss import SoftmaxWithCrossEntropy +from paddle.incubate.hapi.model import set_device, Model, Input +import paddle.incubate.hapi.text.tokenizer.tokenization as tokenization +from paddle.incubate.hapi.text.bert import BertConfig, BertDataLoader, BertInputExample, make_optimizer class ClsModelLayer(Model): @@ -159,7 +159,8 @@ def main(): labels, device=device) - cls_model.bert_layer.load("./bert_uncased_L-12_H-768_A-12/bert", reset_optimizer=True) + cls_model.bert_layer.load( + "./bert_uncased_L-12_H-768_A-12/bert", reset_optimizer=True) # do train cls_model.fit(train_data=train_dataloader.dataloader, diff --git a/examples/bmn/bmn_metric.py b/examples/bmn/bmn_metric.py index cbcad9a1e15d5356127c748194ac907bcfda5967..e4ba8aec1c2c7d4673e89b706a6e830204879bff 100644 --- a/examples/bmn/bmn_metric.py +++ b/examples/bmn/bmn_metric.py @@ -20,7 +20,7 @@ import json sys.path.append('../') -from hapi.metrics import Metric +from paddle.incubate.hapi.metrics import Metric from bmn_utils import boundary_choose, bmn_post_processing diff --git a/examples/bmn/eval.py b/examples/bmn/eval.py index c8d4f26caf4b7d4e56ee88623f90b6d40a6e02dd..3fd8aea7792eefba0ee0096db703628fef77bcd9 100644 --- a/examples/bmn/eval.py +++ b/examples/bmn/eval.py @@ -18,7 +18,7 @@ import sys import logging import paddle.fluid as fluid -from hapi.model import set_device, Input +from paddle.incubate.hapi.model import set_device, Input from modeling import bmn, BmnLoss from bmn_metric import BmnMetric diff --git a/examples/bmn/modeling.py b/examples/bmn/modeling.py index bfd65b318ad5f0c8a3ea2191f9a7c2a4d18b691e..5fac438879b61d42e2adad7114dd80cd9693c61c 100644 --- a/examples/bmn/modeling.py +++ b/examples/bmn/modeling.py @@ -17,9 +17,9 @@ from paddle.fluid import ParamAttr import numpy as np import math -from hapi.model import Model -from hapi.loss import Loss -from hapi.download import get_weights_path_from_url +from paddle.incubate.hapi.model import Model +from paddle.incubate.hapi.loss import Loss +from paddle.incubate.hapi.download import get_weights_path_from_url __all__ = ["BMN", "BmnLoss", "bmn"] diff --git a/examples/bmn/predict.py b/examples/bmn/predict.py index b984c06fa8d4863b437e22e91b9d633638994c77..e96e837ccfbbb6e5c56fb12f4353bd2cca69a89e 100644 --- a/examples/bmn/predict.py +++ b/examples/bmn/predict.py @@ -18,7 +18,7 @@ import os import logging import paddle.fluid as fluid -from hapi.model import set_device, Input +from paddle.incubate.hapi.model import set_device, Input from modeling import bmn, BmnLoss from bmn_metric import BmnMetric diff --git a/examples/bmn/reader.py b/examples/bmn/reader.py index d8f125e10337a6e3fb9b241d4893ec96c86eba62..36780b2f3e1044a22626374ba1fa5d1730af0301 100644 --- a/examples/bmn/reader.py +++ b/examples/bmn/reader.py @@ -21,7 +21,7 @@ import sys sys.path.append('../') -from hapi.distributed import DistributedBatchSampler +from paddle.incubate.hapi.distributed import DistributedBatchSampler from paddle.io import Dataset, DataLoader logger = logging.getLogger(__name__) diff --git a/examples/bmn/train.py b/examples/bmn/train.py index 1e2bf449113656060e759c50fc933fd15572ef0b..2a25c1ac76a5dcc0515232756a03b28b611e28ce 100644 --- a/examples/bmn/train.py +++ b/examples/bmn/train.py @@ -18,7 +18,7 @@ import logging import sys import os -from hapi.model import set_device, Input +from paddle.incubate.hapi.model import set_device, Input from reader import BmnDataset from config_utils import * diff --git a/examples/cyclegan/cyclegan.py b/examples/cyclegan/cyclegan.py index 076e13d3ebe079fbb639d6ec80d432403af03cfd..f6a2dae378eb3d97f0345329dc8b7ecb2bb6585e 100644 --- a/examples/cyclegan/cyclegan.py +++ b/examples/cyclegan/cyclegan.py @@ -19,8 +19,8 @@ from __future__ import print_function import numpy as np import paddle.fluid as fluid -from hapi.model import Model -from hapi.loss import Loss +from paddle.incubate.hapi.model import Model +from paddle.incubate.hapi.loss import Loss from layers import ConvBN, DeConvBN diff --git a/examples/cyclegan/infer.py b/examples/cyclegan/infer.py index 2fb2b35eefc8ffb1b4fc10f58ce519e998415055..bbefaf6db4b131d68ac4d3cb84d5a658a5c61bac 100644 --- a/examples/cyclegan/infer.py +++ b/examples/cyclegan/infer.py @@ -25,7 +25,7 @@ from PIL import Image from scipy.misc import imsave import paddle.fluid as fluid -from hapi.model import Model, Input, set_device +from paddle.incubate.hapi.model import Model, Input, set_device from check import check_gpu, check_version from cyclegan import Generator, GeneratorCombine diff --git a/examples/cyclegan/test.py b/examples/cyclegan/test.py index 67f7183e2229ec9509e23e9ac81dc54122290056..ba7d5c5889be77d483d5321edba7c2ac8265cf53 100644 --- a/examples/cyclegan/test.py +++ b/examples/cyclegan/test.py @@ -22,7 +22,7 @@ import numpy as np from scipy.misc import imsave import paddle.fluid as fluid -from hapi.model import Model, Input, set_device +from paddle.incubate.hapi.model import Model, Input, set_device from check import check_gpu, check_version from cyclegan import Generator, GeneratorCombine diff --git a/examples/cyclegan/train.py b/examples/cyclegan/train.py index 4ca77dd66996afcc00218eafaafefa25a2a7c771..de9ed63b5cf4a16bdf2dbc607f74f61f208ba733 100644 --- a/examples/cyclegan/train.py +++ b/examples/cyclegan/train.py @@ -24,7 +24,7 @@ import time import paddle import paddle.fluid as fluid -from hapi.model import Model, Input, set_device +from paddle.incubate.hapi.model import Model, Input, set_device from check import check_gpu, check_version from cyclegan import Generator, Discriminator, GeneratorCombine, GLoss, DLoss @@ -78,12 +78,12 @@ def main(): g_AB.prepare(inputs=[input_A], device=FLAGS.device) g_BA.prepare(inputs=[input_B], device=FLAGS.device) - g.prepare(g_optimizer, GLoss(), inputs=[input_A, input_B], - device=FLAGS.device) - d_A.prepare(da_optimizer, DLoss(), inputs=[input_B, fake_B], - device=FLAGS.device) - d_B.prepare(db_optimizer, DLoss(), inputs=[input_A, fake_A], - device=FLAGS.device) + g.prepare( + g_optimizer, GLoss(), inputs=[input_A, input_B], device=FLAGS.device) + d_A.prepare( + da_optimizer, DLoss(), inputs=[input_B, fake_B], device=FLAGS.device) + d_B.prepare( + db_optimizer, DLoss(), inputs=[input_A, fake_A], device=FLAGS.device) if FLAGS.resume: g.load(FLAGS.resume) diff --git a/examples/handwritten_number_recognition/mnist.py b/examples/handwritten_number_recognition/mnist.py index 36db96ca3715e877c443480bbda598a7545dc1b7..a3b77dab56ea549e07f7e071afab19eb18d08e51 100644 --- a/examples/handwritten_number_recognition/mnist.py +++ b/examples/handwritten_number_recognition/mnist.py @@ -19,12 +19,12 @@ import argparse from paddle import fluid from paddle.fluid.optimizer import Momentum -from hapi.datasets.mnist import MNIST as MnistDataset +from paddle.incubate.hapi.datasets.mnist import MNIST as MnistDataset -from hapi.model import Input, set_device -from hapi.loss import CrossEntropy -from hapi.metrics import Accuracy -from hapi.vision.models import LeNet +from paddle.incubate.hapi.model import Input, set_device +from paddle.incubate.hapi.loss import CrossEntropy +from paddle.incubate.hapi.metrics import Accuracy +from paddle.incubate.hapi.vision.models import LeNet def main(): diff --git a/examples/image_classification/imagenet_dataset.py b/examples/image_classification/imagenet_dataset.py index 27c41d6fb4cfe752f311d2e2c65380aa29bc4323..27a90091e301ce039b129f6f74518a2bd5779557 100644 --- a/examples/image_classification/imagenet_dataset.py +++ b/examples/image_classification/imagenet_dataset.py @@ -18,8 +18,8 @@ import math import random import numpy as np -from hapi.datasets import DatasetFolder -from hapi.vision.transforms import transforms +from paddle.incubate.hapi.datasets import DatasetFolder +from paddle.incubate.hapi.vision.transforms import transforms from paddle import fluid diff --git a/examples/image_classification/main.py b/examples/image_classification/main.py index e5aea412fbeb619d90002f3c9f817788b021821a..ff0c95b534c6ec74792ff4f9962f1563fa4698dd 100644 --- a/examples/image_classification/main.py +++ b/examples/image_classification/main.py @@ -27,11 +27,11 @@ import paddle.fluid as fluid from paddle.fluid.dygraph.parallel import ParallelEnv from paddle.io import BatchSampler, DataLoader -from hapi.model import Input, set_device -from hapi.loss import CrossEntropy -from hapi.distributed import DistributedBatchSampler -from hapi.metrics import Accuracy -import hapi.vision.models as models +from paddle.incubate.hapi.model import Input, set_device +from paddle.incubate.hapi.loss import CrossEntropy +from paddle.incubate.hapi.distributed import DistributedBatchSampler +from paddle.incubate.hapi.metrics import Accuracy +import paddle.incubate.hapi.vision.models as models from imagenet_dataset import ImageNetDataset diff --git a/examples/ocr/eval.py b/examples/ocr/eval.py index 1adffa5401679ab0d49cc586c0238ce1c01fa1b8..6b5fd48bc9ee000d7a4c4e19818931f87af4a47c 100644 --- a/examples/ocr/eval.py +++ b/examples/ocr/eval.py @@ -19,8 +19,8 @@ import functools import paddle.fluid.profiler as profiler import paddle.fluid as fluid -from hapi.model import Input, set_device -from hapi.vision.transforms import BatchCompose +from paddle.incubate.hapi.model import Input, set_device +from paddle.incubate.hapi.vision.transforms import BatchCompose from utility import add_arguments, print_arguments from utility import SeqAccuracy, LoggerCallBack, SeqBeamAccuracy diff --git a/examples/ocr/predict.py b/examples/ocr/predict.py index 242d4f80b9bbdbade61b0cc086196482ffa588e9..f1eac38751a19eb1cf494f673e9680a4ece1f369 100644 --- a/examples/ocr/predict.py +++ b/examples/ocr/predict.py @@ -25,9 +25,9 @@ from PIL import Image import paddle.fluid.profiler as profiler import paddle.fluid as fluid -from hapi.model import Input, set_device -from hapi.datasets.folder import ImageFolder -from hapi.vision.transforms import BatchCompose +from paddle.incubate.hapi.model import Input, set_device +from paddle.incubate.hapi.datasets.folder import ImageFolder +from paddle.incubate.hapi.vision.transforms import BatchCompose from utility import add_arguments, print_arguments from utility import postprocess, index2word diff --git a/examples/ocr/seq2seq_attn.py b/examples/ocr/seq2seq_attn.py index 66da91ce7d84c458bd6424da8c364a9ed25776c4..117749cb27d2b41c7e308e5540433465475a7368 100644 --- a/examples/ocr/seq2seq_attn.py +++ b/examples/ocr/seq2seq_attn.py @@ -19,9 +19,9 @@ import paddle.fluid as fluid import paddle.fluid.layers as layers from paddle.fluid.layers import BeamSearchDecoder -from hapi.text import RNNCell, RNN, DynamicDecode -from hapi.model import Model -from hapi.loss import Loss +from paddle.incubate.hapi.text import RNNCell, RNN, DynamicDecode +from paddle.incubate.hapi.model import Model +from paddle.incubate.hapi.loss import Loss class ConvBNPool(fluid.dygraph.Layer): diff --git a/examples/ocr/train.py b/examples/ocr/train.py index d72173dfde7791b53af80f04697f8e3defd01445..b69c90e752e125da69028d50b02a83edc06b09f3 100644 --- a/examples/ocr/train.py +++ b/examples/ocr/train.py @@ -24,8 +24,8 @@ import functools import paddle.fluid.profiler as profiler import paddle.fluid as fluid -from hapi.model import Input, set_device -from hapi.vision.transforms import BatchCompose +from paddle.incubate.hapi.model import Input, set_device +from paddle.incubate.hapi.vision.transforms import BatchCompose from utility import add_arguments, print_arguments from utility import SeqAccuracy, LoggerCallBack diff --git a/examples/ocr/utility.py b/examples/ocr/utility.py index d47b3f17d16452c1292402abc15b534eec4b3459..59b7f4206b23af6cdfe162596c4dc2f78e9b3a06 100644 --- a/examples/ocr/utility.py +++ b/examples/ocr/utility.py @@ -21,8 +21,8 @@ import numpy as np import paddle.fluid as fluid import six -from hapi.metrics import Metric -from hapi.callbacks import ProgBarLogger +from paddle.incubate.hapi.metrics import Metric +from paddle.incubate.hapi.callbacks import ProgBarLogger def print_arguments(args): diff --git a/examples/sentiment_classification/models.py b/examples/sentiment_classification/models.py index 313b928778f64001c5b37888bc546d6ff33bd970..7efaff520725fc41ff0ed928d1e260111e7103c8 100644 --- a/examples/sentiment_classification/models.py +++ b/examples/sentiment_classification/models.py @@ -15,13 +15,13 @@ import paddle.fluid as fluid from paddle.fluid.dygraph.nn import Linear, Embedding from paddle.fluid.dygraph.base import to_variable import numpy as np -from hapi.model import Model -from hapi.text.text import GRUEncoderLayer as BiGRUEncoder -from hapi.text.test import BOWEncoder, CNNEncoder, GRUEncoder +from paddle.incubate.hapi.model import Model +from paddle.incubate.hapi.text.text import GRUEncoderLayer as BiGRUEncoder +from paddle.incubate.hapi.text.test import BOWEncoder, CNNEncoder, GRUEncoder class CNN(Model): - def __init__(self, dict_dim, batch_size, seq_len): + def __init__(self, dict_dim, batch_size, seq_len): super(CNN, self).__init__() self.dict_dim = dict_dim self.emb_dim = 128 @@ -36,15 +36,19 @@ class CNN(Model): dict_size=self.dict_dim + 1, emb_dim=self.emb_dim, seq_len=self.seq_len, - filter_size= self.win_size, - num_filters= self.hid_dim, - hidden_dim= self.hid_dim, + filter_size=self.win_size, + num_filters=self.hid_dim, + hidden_dim=self.hid_dim, padding_idx=None, act='tanh') - self._fc1 = Linear(input_dim = self.hid_dim*self.seq_len, output_dim=self.fc_hid_dim, act="softmax") - self._fc_prediction = Linear(input_dim = self.fc_hid_dim, - output_dim = self.class_dim, - act="softmax") + self._fc1 = Linear( + input_dim=self.hid_dim * self.seq_len, + output_dim=self.fc_hid_dim, + act="softmax") + self._fc_prediction = Linear( + input_dim=self.fc_hid_dim, + output_dim=self.class_dim, + act="softmax") def forward(self, inputs): conv_3 = self._encoder(inputs) @@ -69,11 +73,14 @@ class BOW(Model): padding_idx=None, bow_dim=self.hid_dim, seq_len=self.seq_len) - self._fc1 = Linear(input_dim = self.hid_dim, output_dim=self.hid_dim, act="tanh") - self._fc2 = Linear(input_dim = self.hid_dim, output_dim=self.fc_hid_dim, act="tanh") - self._fc_prediction = Linear(input_dim = self.fc_hid_dim, - output_dim = self.class_dim, - act="softmax") + self._fc1 = Linear( + input_dim=self.hid_dim, output_dim=self.hid_dim, act="tanh") + self._fc2 = Linear( + input_dim=self.hid_dim, output_dim=self.fc_hid_dim, act="tanh") + self._fc_prediction = Linear( + input_dim=self.fc_hid_dim, + output_dim=self.class_dim, + act="softmax") def forward(self, inputs): bow_1 = self._encoder(inputs) @@ -94,10 +101,12 @@ class GRU(Model): self.class_dim = 2 self.batch_size = batch_size self.seq_len = seq_len - self._fc1 = Linear(input_dim=self.hid_dim, output_dim=self.fc_hid_dim, act="tanh") - self._fc_prediction = Linear(input_dim=self.fc_hid_dim, - output_dim=self.class_dim, - act="softmax") + self._fc1 = Linear( + input_dim=self.hid_dim, output_dim=self.fc_hid_dim, act="tanh") + self._fc_prediction = Linear( + input_dim=self.fc_hid_dim, + output_dim=self.class_dim, + act="softmax") self._encoder = GRUEncoder( dict_size=self.dict_dim + 1, emb_dim=self.emb_dim, @@ -112,7 +121,7 @@ class GRU(Model): prediction = self._fc_prediction(fc_1) return prediction - + class BiGRU(Model): def __init__(self, dict_dim, batch_size, seq_len): super(BiGRU, self).__init__() @@ -130,11 +139,13 @@ class BiGRU(Model): is_sparse=False) h_0 = np.zeros((self.batch_size, self.hid_dim), dtype="float32") h_0 = to_variable(h_0) - self._fc1 = Linear(input_dim = self.hid_dim, output_dim=self.hid_dim*3) - self._fc2 = Linear(input_dim = self.hid_dim*2, output_dim=self.fc_hid_dim, act="tanh") - self._fc_prediction = Linear(input_dim=self.fc_hid_dim, - output_dim=self.class_dim, - act="softmax") + self._fc1 = Linear(input_dim=self.hid_dim, output_dim=self.hid_dim * 3) + self._fc2 = Linear( + input_dim=self.hid_dim * 2, output_dim=self.fc_hid_dim, act="tanh") + self._fc_prediction = Linear( + input_dim=self.fc_hid_dim, + output_dim=self.class_dim, + act="softmax") self._encoder = BiGRUEncoder( grnn_hidden_dim=self.hid_dim, input_dim=self.hid_dim * 3, @@ -144,7 +155,8 @@ class BiGRU(Model): def forward(self, inputs): emb = self.embedding(inputs) - emb = fluid.layers.reshape(emb, shape=[self.batch_size, -1, self.hid_dim]) + emb = fluid.layers.reshape( + emb, shape=[self.batch_size, -1, self.hid_dim]) fc_1 = self._fc1(emb) encoded_vector = self._encoder(fc_1) encoded_vector = fluid.layers.tanh(encoded_vector) diff --git a/examples/sentiment_classification/sentiment_classifier.py b/examples/sentiment_classification/sentiment_classifier.py index b5f6a0d9845420f3d77e97b989cbdd44fc0b38a0..7bd2d6a974f1cd3c01fa7fa909eff7babba258b3 100644 --- a/examples/sentiment_classification/sentiment_classifier.py +++ b/examples/sentiment_classification/sentiment_classifier.py @@ -13,14 +13,13 @@ # limitations under the License. """Sentiment Classification in Paddle Dygraph Mode. """ - from __future__ import print_function import numpy as np import paddle.fluid as fluid -from hapi.model import set_device, Model, CrossEntropy, Input -from hapi.configure import Config -from hapi.text.senta import SentaProcessor -from hapi.metrics import Accuracy +from paddle.incubate.hapi.model import set_device, Model, CrossEntropy, Input +from paddle.incubate.hapi.configure import Config +from paddle.incubate.hapi.text.senta import SentaProcessor +from paddle.incubate.hapi.metrics import Accuracy from models import CNN, BOW, GRU, BiGRU import json import os @@ -32,12 +31,14 @@ args.Print() device = set_device("gpu" if args.use_cuda else "cpu") dev_count = fluid.core.get_cuda_device_count() if args.use_cuda else 1 + def main(): if args.do_train: train() elif args.do_infer: infer() + def train(): fluid.enable_dygraph(device) processor = SentaProcessor( @@ -66,31 +67,28 @@ def train(): epoch=args.epoch, shuffle=False) if args.model_type == 'cnn_net': - model = CNN( args.vocab_size, args.batch_size, - args.padding_size) + model = CNN(args.vocab_size, args.batch_size, args.padding_size) elif args.model_type == 'bow_net': - model = BOW( args.vocab_size, args.batch_size, - args.padding_size) + model = BOW(args.vocab_size, args.batch_size, args.padding_size) elif args.model_type == 'gru_net': - model = GRU( args.vocab_size, args.batch_size, - args.padding_size) + model = GRU(args.vocab_size, args.batch_size, args.padding_size) elif args.model_type == 'bigru_net': - model = BiGRU( args.vocab_size, args.batch_size, - args.padding_size) - - optimizer = fluid.optimizer.Adagrad(learning_rate=args.lr, parameter_list=model.parameters()) - + model = BiGRU(args.vocab_size, args.batch_size, args.padding_size) + + optimizer = fluid.optimizer.Adagrad( + learning_rate=args.lr, parameter_list=model.parameters()) + inputs = [Input([None, None], 'int64', name='doc')] labels = [Input([None, 1], 'int64', name='label')] - + model.prepare( optimizer, CrossEntropy(), - Accuracy(topk=(1,)), + Accuracy(topk=(1, )), inputs, labels, device=device) - + model.fit(train_data=train_data_generator, eval_data=eval_data_generator, batch_size=args.batch_size, @@ -99,6 +97,7 @@ def train(): eval_freq=args.eval_freq, save_freq=args.save_freq) + def infer(): fluid.enable_dygraph(device) processor = SentaProcessor( @@ -114,38 +113,37 @@ def infer(): epoch=1, shuffle=False) if args.model_type == 'cnn_net': - model_infer = CNN( args.vocab_size, args.batch_size, - args.padding_size) + model_infer = CNN(args.vocab_size, args.batch_size, args.padding_size) elif args.model_type == 'bow_net': - model_infer = BOW( args.vocab_size, args.batch_size, - args.padding_size) + model_infer = BOW(args.vocab_size, args.batch_size, args.padding_size) elif args.model_type == 'gru_net': - model_infer = GRU( args.vocab_size, args.batch_size, - args.padding_size) + model_infer = GRU(args.vocab_size, args.batch_size, args.padding_size) elif args.model_type == 'bigru_net': - model_infer = BiGRU( args.vocab_size, args.batch_size, - args.padding_size) - + model_infer = BiGRU(args.vocab_size, args.batch_size, + args.padding_size) + print('Do inferring ...... ') inputs = [Input([None, None], 'int64', name='doc')] model_infer.prepare( - None, - CrossEntropy(), - Accuracy(topk=(1,)), - inputs, - device=device) + None, CrossEntropy(), Accuracy(topk=(1, )), inputs, device=device) model_infer.load(args.checkpoints, reset_optimizer=True) preds = model_infer.predict(test_data=infer_data_generator) preds = np.array(preds[0]).reshape((-1, 2)) if args.output_dir: with open(os.path.join(args.output_dir, 'predictions.json'), 'w') as w: - + for p in range(len(preds)): label = np.argmax(preds[p]) - result = json.dumps({'index': p, 'label': label, 'probs': preds[p].tolist()}) - w.write(result+'\n') - print('Predictions saved at '+os.path.join(args.output_dir, 'predictions.json')) + result = json.dumps({ + 'index': p, + 'label': label, + 'probs': preds[p].tolist() + }) + w.write(result + '\n') + print('Predictions saved at ' + os.path.join(args.output_dir, + 'predictions.json')) + if __name__ == '__main__': main() diff --git a/examples/seq2seq/predict.py b/examples/seq2seq/predict.py index 930c2e5189469ed174da02e9cf5d6e6e8c2b5a05..db8aef1330fb8a33934c17c789563a606fcd5350 100644 --- a/examples/seq2seq/predict.py +++ b/examples/seq2seq/predict.py @@ -23,7 +23,7 @@ import paddle.fluid as fluid from paddle.fluid.layers.utils import flatten from paddle.fluid.io import DataLoader -from hapi.model import Input, set_device +from paddle.incubate.hapi.model import Input, set_device from args import parse_args from seq2seq_base import BaseInferModel from seq2seq_attn import AttentionInferModel diff --git a/examples/seq2seq/seq2seq_attn.py b/examples/seq2seq/seq2seq_attn.py index ce9cc089ca2133549fbdd08ed600e69d4235e08c..37c84cbcd76381b13ff066fd97f600d7b347ab07 100644 --- a/examples/seq2seq/seq2seq_attn.py +++ b/examples/seq2seq/seq2seq_attn.py @@ -19,8 +19,8 @@ from paddle.fluid.initializer import UniformInitializer from paddle.fluid.dygraph import Embedding, Linear, Layer from paddle.fluid.layers import BeamSearchDecoder -from hapi.model import Model, Loss -from hapi.text import DynamicDecode, RNN, BasicLSTMCell, RNNCell +from paddle.incubate.hapi.model import Model, Loss +from paddle.incubate.hapi.text import DynamicDecode, RNN, BasicLSTMCell, RNNCell from seq2seq_base import Encoder diff --git a/examples/seq2seq/seq2seq_base.py b/examples/seq2seq/seq2seq_base.py index c28e2dc52935526d69d78ae73bfd48c92528b93c..6342ca112df6b92baed052fd17eaba215400440f 100644 --- a/examples/seq2seq/seq2seq_base.py +++ b/examples/seq2seq/seq2seq_base.py @@ -19,8 +19,8 @@ from paddle.fluid.initializer import UniformInitializer from paddle.fluid.dygraph import Embedding, Linear, Layer from paddle.fluid.layers import BeamSearchDecoder -from hapi.model import Model, Loss -from hapi.text import DynamicDecode, RNN, BasicLSTMCell, RNNCell +from paddle.incubate.hapi.model import Model, Loss +from paddle.incubate.hapi.text import DynamicDecode, RNN, BasicLSTMCell, RNNCell class CrossEntropyCriterion(Loss): diff --git a/examples/seq2seq/train.py b/examples/seq2seq/train.py index 55a31d39ad74686728593824151ac4bdf7b1b1ba..cd4fab9c932714e1da630c2d32a6b1243f03e7a0 100644 --- a/examples/seq2seq/train.py +++ b/examples/seq2seq/train.py @@ -21,7 +21,7 @@ import numpy as np import paddle.fluid as fluid from paddle.fluid.io import DataLoader -from hapi.model import Input, set_device +from paddle.incubate.hapi.model import Input, set_device from args import parse_args from seq2seq_base import BaseModel, CrossEntropyCriterion from seq2seq_attn import AttentionModel diff --git a/examples/seq2seq/utility.py b/examples/seq2seq/utility.py index aa0dd4a461d24d8a7799ff47c9d63a65bf87d401..95a38ff05d577a48947e6a71b14f6e6bf7c6894a 100644 --- a/examples/seq2seq/utility.py +++ b/examples/seq2seq/utility.py @@ -16,8 +16,8 @@ import math import paddle.fluid as fluid -from hapi.metrics import Metric -from hapi.callbacks import ProgBarLogger +from paddle.incubate.hapi.metrics import Metric +from paddle.incubate.hapi.callbacks import ProgBarLogger class TrainCallback(ProgBarLogger): diff --git a/examples/sequence_tagging/eval.py b/examples/sequence_tagging/eval.py index f58337be83b49978494bb1e9a9634ecb9256b909..2520d95a42c64468d8206015a4c5991174c679ee 100644 --- a/examples/sequence_tagging/eval.py +++ b/examples/sequence_tagging/eval.py @@ -28,11 +28,11 @@ import numpy as np work_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.append(os.path.join(work_dir, "../")) -from hapi.model import set_device, Input -from hapi.text.sequence_tagging import SeqTagging, ChunkEval, LacLoss -from hapi.text.sequence_tagging import LacDataset, LacDataLoader -from hapi.text.sequence_tagging import check_gpu, check_version -from hapi.text.sequence_tagging import PDConfig +from paddle.incubate.hapi.model import set_device, Input +from paddle.incubate.hapi.text.sequence_tagging import SeqTagging, ChunkEval, LacLoss +from paddle.incubate.hapi.text.sequence_tagging import LacDataset, LacDataLoader +from paddle.incubate.hapi.text.sequence_tagging import check_gpu, check_version +from paddle.incubate.hapi.text.sequence_tagging import PDConfig import paddle.fluid as fluid from paddle.fluid.layers.utils import flatten @@ -65,7 +65,8 @@ def main(args): device=place) model.load(args.init_from_checkpoint, skip_mismatch=True) - eval_result = model.evaluate(eval_dataset.dataloader, batch_size=args.batch_size) + eval_result = model.evaluate( + eval_dataset.dataloader, batch_size=args.batch_size) print("precison: %.5f" % (eval_result["precision"][0])) print("recall: %.5f" % (eval_result["recall"][0])) print("F1: %.5f" % (eval_result["F1"][0])) diff --git a/examples/sequence_tagging/predict.py b/examples/sequence_tagging/predict.py index e8802e4a91ee4fbd339be29a83914cf24b9839c1..5b1620d45df6cd490da0fe6ab15d81229c3ed304 100644 --- a/examples/sequence_tagging/predict.py +++ b/examples/sequence_tagging/predict.py @@ -29,11 +29,11 @@ import numpy as np work_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.append(os.path.join(work_dir, "../")) -from hapi.text.sequence_tagging import SeqTagging -from hapi.model import Input, set_device -from hapi.text.sequence_tagging import LacDataset, LacDataLoader -from hapi.text.sequence_tagging import check_gpu, check_version -from hapi.text.sequence_tagging import PDConfig +from paddle.incubate.hapi.text.sequence_tagging import SeqTagging +from paddle.incubate.hapi.model import Input, set_device +from paddle.incubate.hapi.text.sequence_tagging import LacDataset, LacDataLoader +from paddle.incubate.hapi.text.sequence_tagging import check_gpu, check_version +from paddle.incubate.hapi.text.sequence_tagging import PDConfig import paddle.fluid as fluid from paddle.fluid.layers.utils import flatten diff --git a/examples/sequence_tagging/train.py b/examples/sequence_tagging/train.py index 56507ad05b96baab8d7d262da3b7655e82df6f23..69b76b1510c58e66dc423aad457167af5fa0fa93 100644 --- a/examples/sequence_tagging/train.py +++ b/examples/sequence_tagging/train.py @@ -28,11 +28,11 @@ import numpy as np work_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.append(os.path.join(work_dir, "../")) -from hapi.model import Input, set_device -from hapi.text.sequence_tagging import SeqTagging, LacLoss, ChunkEval -from hapi.text.sequence_tagging import LacDataset, LacDataLoader -from hapi.text.sequence_tagging import check_gpu, check_version -from hapi.text.sequence_tagging import PDConfig +from paddle.incubate.hapi.model import Input, set_device +from paddle.incubate.hapi.text.sequence_tagging import SeqTagging, LacLoss, ChunkEval +from paddle.incubate.hapi.text.sequence_tagging import LacDataset, LacDataLoader +from paddle.incubate.hapi.text.sequence_tagging import check_gpu, check_version +from paddle.incubate.hapi.text.sequence_tagging import PDConfig import paddle.fluid as fluid from paddle.fluid.optimizer import AdamOptimizer diff --git a/examples/style-transfer/README.md b/examples/style-transfer/README.md index 84b57bd6a6faabb72c89382fd083f51a7a7ab767..46b2b300a2ce6ad066019602e16ffe451cc2ddbf 100644 --- a/examples/style-transfer/README.md +++ b/examples/style-transfer/README.md @@ -32,10 +32,10 @@ gram_matrix = fluid.layers.matmul(tensor, fluid.layers.transpose(tensor, [1, 0]) import numpy as np import matplotlib.pyplot as plt -from hapi.model import Model, Loss +from paddle.incubate.hapi.model import Model, Loss -from hapi.vision.models import vgg16 -from hapi.vision.transforms import transforms +from paddle.incubate.hapi.vision.models import vgg16 +from paddle.incubate.hapi.vision.transforms import transforms from paddle import fluid from paddle.fluid.io import Dataset diff --git a/examples/style-transfer/style_transfer.py b/examples/style-transfer/style_transfer.py index ad0716f7a164a6961df224e2528a13b96194cf6f..854ca9c77b009d8254822556eb56f2967c8efbf7 100644 --- a/examples/style-transfer/style_transfer.py +++ b/examples/style-transfer/style_transfer.py @@ -3,10 +3,10 @@ import argparse import numpy as np import matplotlib.pyplot as plt -from hapi.model import Model, Loss +from paddle.incubate.hapi.model import Model, Loss -from hapi.vision.models import vgg16 -from hapi.vision.transforms import transforms +from paddle.incubate.hapi.vision.models import vgg16 +from paddle.incubate.hapi.vision.transforms import transforms from paddle import fluid from paddle.fluid.io import Dataset diff --git a/examples/transformer/predict.py b/examples/transformer/predict.py index f99bf774cb2c9d6ceaa5b4cf69b941f9b2558358..5521d6c78478cf72a7a78d91d508a061b4f39cc3 100644 --- a/examples/transformer/predict.py +++ b/examples/transformer/predict.py @@ -25,7 +25,7 @@ from paddle.fluid.layers.utils import flatten from utils.configure import PDConfig from utils.check import check_gpu, check_version -from hapi.model import Input, set_device +from paddle.incubate.hapi.model import Input, set_device from reader import prepare_infer_input, Seq2SeqDataset, Seq2SeqBatchSampler from transformer import InferTransformer diff --git a/examples/transformer/train.py b/examples/transformer/train.py index 39bee1dea46ce459c5f9388ce1d0e08fce914ac4..90f54d01a85e811bee7c5cc6411277466639f4be 100644 --- a/examples/transformer/train.py +++ b/examples/transformer/train.py @@ -23,8 +23,8 @@ from paddle.io import DataLoader from utils.configure import PDConfig from utils.check import check_gpu, check_version -from hapi.model import Input, set_device -from hapi.callbacks import ProgBarLogger +from paddle.incubate.hapi.model import Input, set_device +from paddle.incubate.hapi.callbacks import ProgBarLogger from reader import create_data_loader from transformer import Transformer, CrossEntropyCriterion diff --git a/examples/transformer/transformer.py b/examples/transformer/transformer.py index 30bb931d28c3b52467f484f4cb14b5d5601c76d9..02405d0aa3bf048b3c9442f27a4d5fceac5a7639 100644 --- a/examples/transformer/transformer.py +++ b/examples/transformer/transformer.py @@ -20,8 +20,8 @@ import paddle.fluid as fluid import paddle.fluid.layers as layers from paddle.fluid.dygraph import Embedding, LayerNorm, Linear, Layer, to_variable from paddle.fluid.dygraph.learning_rate_scheduler import LearningRateDecay -from hapi.model import Model, CrossEntropy, Loss -from hapi.text import TransformerBeamSearchDecoder, DynamicDecode +from paddle.incubate.hapi.model import Model, CrossEntropy, Loss +from paddle.incubate.hapi.text import TransformerBeamSearchDecoder, DynamicDecode def position_encoding_init(n_position, d_pos_vec): diff --git a/examples/tsm/infer.py b/examples/tsm/infer.py index cac9745fc8ddc1ad33dc3e95dd82e8f2dbe24277..2351eb2890d9588f40a2c5f9b7e9be7f5f958c6e 100644 --- a/examples/tsm/infer.py +++ b/examples/tsm/infer.py @@ -19,8 +19,8 @@ import os import argparse import numpy as np -from hapi.model import Input, set_device -from hapi.vision.transforms import Compose +from paddle.incubate.hapi.model import Input, set_device +from paddle.incubate.hapi.vision.transforms import Compose from check import check_gpu, check_version from modeling import tsm_resnet50 @@ -36,18 +36,16 @@ def main(): device = set_device(FLAGS.device) fluid.enable_dygraph(device) if FLAGS.dynamic else None - transform = Compose([GroupScale(), - GroupCenterCrop(), - NormalizeImage()]) + transform = Compose([GroupScale(), GroupCenterCrop(), NormalizeImage()]) dataset = KineticsDataset( - pickle_file=FLAGS.infer_file, - label_list=FLAGS.label_list, - mode='test', - transform=transform) + pickle_file=FLAGS.infer_file, + label_list=FLAGS.label_list, + mode='test', + transform=transform) labels = dataset.label_list - model = tsm_resnet50(num_classes=len(labels), - pretrained=FLAGS.weights is None) + model = tsm_resnet50( + num_classes=len(labels), pretrained=FLAGS.weights is None) inputs = [Input([None, 8, 3, 224, 224], 'float32', name='image')] @@ -66,19 +64,23 @@ def main(): if __name__ == '__main__': parser = argparse.ArgumentParser("CNN training on TSM") parser.add_argument( - "--data", type=str, default='dataset/kinetics', + "--data", + type=str, + default='dataset/kinetics', help="path to dataset root directory") parser.add_argument( - "--device", type=str, default='gpu', - help="device to use, gpu or cpu") + "--device", type=str, default='gpu', help="device to use, gpu or cpu") parser.add_argument( - "-d", "--dynamic", action='store_true', - help="enable dygraph mode") + "-d", "--dynamic", action='store_true', help="enable dygraph mode") parser.add_argument( - "--label_list", type=str, default=None, + "--label_list", + type=str, + default=None, help="path to category index label list file") parser.add_argument( - "--infer_file", type=str, default=None, + "--infer_file", + type=str, + default=None, help="path to pickle file for inference") parser.add_argument( "-w", diff --git a/examples/tsm/main.py b/examples/tsm/main.py index deef9f6b8349033f2d7ce83b40f583ca24a56a7e..d292d7d99d2f5114f81116d302cd3af0478617f3 100644 --- a/examples/tsm/main.py +++ b/examples/tsm/main.py @@ -22,10 +22,10 @@ import numpy as np from paddle import fluid from paddle.fluid.dygraph.parallel import ParallelEnv -from hapi.model import Model, Input, set_device -from hapi.loss import CrossEntropy -from hapi.metrics import Accuracy -from hapi.vision.transforms import Compose +from paddle.incubate.hapi.model import Model, Input, set_device +from paddle.incubate.hapi.loss import CrossEntropy +from paddle.incubate.hapi.metrics import Accuracy +from paddle.incubate.hapi.vision.transforms import Compose from modeling import tsm_resnet50 from check import check_gpu, check_version diff --git a/examples/tsm/modeling.py b/examples/tsm/modeling.py index aafd6a4557e01222bf672f6bde47dcdbeee06ce3..a42f4265e09d8f53d207013d801dab11a94ef992 100644 --- a/examples/tsm/modeling.py +++ b/examples/tsm/modeling.py @@ -17,8 +17,8 @@ import paddle.fluid as fluid from paddle.fluid.layer_helper import LayerHelper from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear -from hapi.model import Model -from hapi.download import get_weights_path_from_url +from paddle.incubate.hapi.model import Model +from paddle.incubate.hapi.download import get_weights_path_from_url __all__ = ["TSM_ResNet", "tsm_resnet50"] diff --git a/examples/yolov3/infer.py b/examples/yolov3/infer.py index 7b6976c89dcd5d6efb39d7143656761dffb03350..a2640fec1f1bb1d9844a10df79910b8e62408091 100644 --- a/examples/yolov3/infer.py +++ b/examples/yolov3/infer.py @@ -18,13 +18,13 @@ from __future__ import print_function import os import argparse import numpy as np -from PIL import Image +from PIL import Image from paddle import fluid from paddle.fluid.optimizer import Momentum from paddle.io import DataLoader -from hapi.model import Model, Input, set_device +from paddle.incubate.hapi.model import Model, Input, set_device from modeling import yolov3_darknet53, YoloLoss from transforms import * @@ -64,16 +64,20 @@ def load_labels(label_list, with_background=True): def main(): device = set_device(FLAGS.device) fluid.enable_dygraph(device) if FLAGS.dynamic else None - - inputs = [Input([None, 1], 'int64', name='img_id'), - Input([None, 2], 'int32', name='img_shape'), - Input([None, 3, None, None], 'float32', name='image')] + + inputs = [ + Input( + [None, 1], 'int64', name='img_id'), Input( + [None, 2], 'int32', name='img_shape'), Input( + [None, 3, None, None], 'float32', name='image') + ] cat2name = load_labels(FLAGS.label_list, with_background=False) - model = yolov3_darknet53(num_classes=len(cat2name), - model_mode='test', - pretrained=FLAGS.weights is None) + model = yolov3_darknet53( + num_classes=len(cat2name), + model_mode='test', + pretrained=FLAGS.weights is None) model.prepare(inputs=inputs, device=FLAGS.device) @@ -82,7 +86,7 @@ def main(): # image preprocess orig_img = Image.open(FLAGS.infer_image).convert('RGB') - w, h = orig_img.size + w, h = orig_img.size img = orig_img.resize((608, 608), Image.BICUBIC) img = np.array(img).astype('float32') / 255.0 img -= np.array(IMAGE_MEAN) @@ -106,19 +110,33 @@ if __name__ == '__main__': parser.add_argument( "-d", "--dynamic", action='store_true', help="enable dygraph mode") parser.add_argument( - "--label_list", type=str, default=None, + "--label_list", + type=str, + default=None, help="path to category label list file") parser.add_argument( - "-t", "--draw_threshold", type=float, default=0.5, + "-t", + "--draw_threshold", + type=float, + default=0.5, help="threshold to reserve the result for visualization") parser.add_argument( - "-i", "--infer_image", type=str, default=None, + "-i", + "--infer_image", + type=str, + default=None, help="image path for inference") parser.add_argument( - "-o", "--output_dir", type=str, default='output', + "-o", + "--output_dir", + type=str, + default='output', help="directory to save inference result if --visualize is set") parser.add_argument( - "-w", "--weights", default=None, type=str, + "-w", + "--weights", + default=None, + type=str, help="path to weights for inference") FLAGS = parser.parse_args() print_arguments(FLAGS) diff --git a/examples/yolov3/main.py b/examples/yolov3/main.py index e3c773fbc40e8afd19c568d993d903f6a52240dd..f90ae4fe11a18fa040b7c0492eda930a242253aa 100644 --- a/examples/yolov3/main.py +++ b/examples/yolov3/main.py @@ -25,9 +25,9 @@ from paddle import fluid from paddle.fluid.optimizer import Momentum from paddle.io import DataLoader -from hapi.model import Model, Input, set_device -from hapi.distributed import DistributedBatchSampler -from hapi.vision.transforms import Compose, BatchCompose +from paddle.incubate.hapi.model import Model, Input, set_device +from paddle.incubate.hapi.distributed import DistributedBatchSampler +from paddle.incubate.hapi.vision.transforms import Compose, BatchCompose from modeling import yolov3_darknet53, YoloLoss from coco import COCODataset diff --git a/examples/yolov3/modeling.py b/examples/yolov3/modeling.py index 982c0beae8f215a0bc00441895e8c9bd883b83cb..3beb977849787d007418599a95e6ae5539212bae 100644 --- a/examples/yolov3/modeling.py +++ b/examples/yolov3/modeling.py @@ -23,7 +23,7 @@ from paddle.fluid.regularizer import L2Decay from hapi.model import Model from hapi.loss import Loss from hapi.download import get_weights_path_from_url -from hapi.vision.models import darknet53 +from darknet import darknet53 __all__ = ['YoloLoss', 'YOLOv3', 'yolov3_darknet53']