提交 fc9ad34e 编写于 作者: Y Yu Yang

Merge branch 'feature/inferencer' into feature/recommendation_v2_api

......@@ -44,6 +44,19 @@ def main():
batch_size=32),
event_handler=event_handler)
# output is a softmax layer. It returns probabilities.
# Shape should be (100, 10)
probs = paddle.infer(
output=inference,
parameters=parameters,
reader=paddle.reader.batched(
paddle.reader.limited(
paddle.reader.map_readers(lambda item: (item[0], ),
paddle.dataset.mnist.test()),
limit=100),
batch_size=32))
print probs.shape
if __name__ == '__main__':
main()
......@@ -24,12 +24,13 @@ from . import dataset
from . import reader
import attr
import pooling
import inferencer
import py_paddle.swig_paddle as api
__all__ = [
'optimizer', 'layer', 'activation', 'parameters', 'init', 'trainer',
'event', 'data_type', 'attr', 'pooling', 'data_feeder', 'dataset', 'reader',
'topology'
'topology', 'inferencer', 'infer'
]
......@@ -39,3 +40,6 @@ def init(**kwargs):
args.append('--%s=%s' % (key, str(kwargs[key])))
api.initPaddle(*args)
infer = inferencer.infer
......@@ -35,6 +35,7 @@ def reader_creator(image_filename, label_filename, buffer_size):
l = subprocess.Popen([zcat_cmd, label_filename], stdout=subprocess.PIPE)
l.stdout.read(8) # skip some magic bytes
try: # reader could be break.
while True:
labels = numpy.fromfile(
l.stdout, 'ubyte', count=buffer_size).astype("int")
......@@ -50,7 +51,7 @@ def reader_creator(image_filename, label_filename, buffer_size):
for i in xrange(buffer_size):
yield images[i, :], int(labels[i])
finally:
m.terminate()
l.terminate()
......
import py_paddle.swig_paddle as api
import topology
from data_feeder import DataFeeder
import itertools
import numpy
__all__ = ['InferenceEngine', 'infer']
class InferenceEngine(object):
def __init__(self, output, parameters):
topo = topology.Topology(output)
gm = api.GradientMachine.createFromConfigProto(
topo.proto(), api.CREATE_MODE_TESTING, [api.PARAMETER_VALUE])
for param in gm.getParameters():
val = param.getBuf(api.PARAMETER_VALUE)
name = param.getName()
assert isinstance(val, api.Vector)
val.copyFromNumpyArray(parameters.get(name).flatten())
self.__gradient_machine__ = gm
self.__data_types__ = topo.data_type()
def iter_infer(self, reader, reader_dict=None):
if reader_dict is None:
reader_dict = self.default_reader_dict()
feeder = DataFeeder(self.__data_types__, reader_dict)
self.__gradient_machine__.start()
for data_batch in reader():
yield self.__gradient_machine__.forwardTest(feeder(data_batch))
self.__gradient_machine__.finish()
def iter_infer_field(self, field, **kwargs):
for result in self.iter_infer(**kwargs):
yield [each_result[field] for each_result in result]
def infer(self, field='value', **kwargs):
retv = None
for result in self.iter_infer_field(field=field, **kwargs):
if retv is None:
retv = [[]] * len(result)
for i, item in enumerate(result):
retv[i].append(item)
retv = [numpy.concatenate(out) for out in retv]
if len(retv) == 1:
return retv[0]
else:
return retv
def default_reader_dict(self):
reader_dict = dict()
for i, tp in enumerate(self.__data_types__):
reader_dict[tp[0]] = i
return reader_dict
def infer(output, parameters, reader, reader_dict=None, field='value'):
inferer = InferenceEngine(output=output, parameters=parameters)
return inferer.infer(field=field, reader=reader, reader_dict=reader_dict)
......@@ -14,13 +14,13 @@
__all__ = [
'map_readers', 'buffered', 'compose', 'chain', 'shuffle',
'ComposeNotAligned', 'batched'
'ComposeNotAligned', 'batched', 'limited'
]
from Queue import Queue
from threading import Thread
import itertools
import random
from Queue import Queue
from threading import Thread
def map_readers(func, *readers):
......@@ -213,3 +213,17 @@ def batched(reader, batch_size):
yield batch
return batched_reader
def limited(reader, limit):
"""
Limit the max number of samples that reader could return.
"""
def limited_reader():
for i, item in enumerate(reader()):
if i == limit:
break
yield item
return limited_reader
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册