提交 08ca7267 编写于 作者: 武毅 提交者: dzhwinter

evaluator_accumulate (#4828)

上级 1f11f773
import paddle.v2.framework.op as op
import numpy as np
import paddle.v2.framework.core as core
def avg_accumulate(accumulated_var, per_eval, num_batches, place):
t = np.array(accumulated_var.get_tensor())
t[0] += per_eval[0]
accumulated_var.get_tensor().set([t[0] / float(num_batches)], place)
class Evaluator(object):
def __init__(self,
scope,
operator='accuracy',
input='Inference',
label='Label',
output='Output',
place=core.CPUPlace()):
"""
create an evaluator for evaluating the inference.
NOTE: default run on CPUPlace(), running on GPUPlace doesn't improve performance much.
:param scope: the scope instance contains the input.
:type scope: paddle.v2.framework.core.scope
:param operator: operator name for caculating the evaluation for each mini-batch.
:type operator: string
:param input: output variable name of forward network.
:type input: string
:param label: variable name of label
:type label: string
"""
self.scope = scope
self.place = place
self.output_name = output
self.num_batches = 0
# create variable to store accumulated evaluator output
eval_name = ''.join([operator, "@Eval"])
if scope.find_var(eval_name):
raise Exception("evaluator already exist in scope: %s" % eval_name)
self.accumulated_var = scope.var(eval_name)
t = self.accumulated_var.get_tensor()
t.set_dims((1, ))
t.set([0.0], place)
# self.accumulated_var = block.create_var(block, name=eval_name, shape=(1,))
# self.accumulated_var.get_tensor().set([0.0])
# create operator of evaluation
var_map = dict() # var name -> variable
var_map[input] = [input]
var_map[label] = [label]
var_map[output] = [output]
self.op = op.Operator(operator, **var_map)
def evaluate(self, ctx, accumulator=avg_accumulate):
self.op.run(self.scope, ctx)
per_eval = np.array(self.scope.find_var(self.output_name).get_tensor())
self.num_batches += 1
accumulator(self.accumulated_var, per_eval, self.num_batches,
self.place)
from paddle.v2.framework.evaluator import Evaluator
from paddle.v2.framework.op import Operator
import paddle.v2.framework.core as core
import unittest
import op_test
import numpy as np
class TestEvaluator(unittest.TestCase):
def setup(self, scope, inputs, outputs):
def __create_var__(var_name, arr):
np_arr = np.array(arr)
scope.var(var_name)
# tensor = var.get_tensor()
# tensor.set_dims(np_arr.shape)
for var_name, arr in inputs.iteritems():
__create_var__(var_name, arr)
for var_name, arr in outputs.iteritems():
__create_var__(var_name, arr)
def test_evaluator(self):
inputs = {
'Inference': np.array([[1, 1, 1, 1, 1, 0, 0, 0, 0, 1]]).T,
'Label': np.array([1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
}
outputs = {'Accuracy': np.array([0.9])}
out_name = 'Accuracy'
places = [core.CPUPlace()]
if core.is_compile_gpu():
places.append(core.GPUPlace(0))
for place in places:
scope = core.Scope()
self.setup(scope, inputs, outputs)
evaluator = Evaluator(
scope,
operator='accuracy',
input='Inference',
label='Label',
output=out_name,
place=place)
op_test.set_input(scope, evaluator.op, inputs, place)
ctx = core.DeviceContext.create(place)
for i in range(10): # simulate 10 mini-batches
evaluator.evaluate(ctx)
actual = np.array(scope.find_var(out_name).get_tensor())
print actual
self.assertTrue(
np.allclose(
actual, outputs[out_name], atol=1e-5),
"output name: " + out_name + " has diff.")
if __name__ == '__main__':
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册