提交 debfb008 编写于 作者: D Dong Zhihong

"add evaluator design doc"

上级 cf302bdd
......@@ -22,20 +22,26 @@ class Evaluator(object):
"""
Evalutor Base class.
"""
def __init__(self):
"""
create metric states and append to block
"""
pass
def _initialize(self):
def _clear_state(self):
"""
add initialize operators and create metric states to block
clear metric states at the begin of each pass
"""
pass
def _add_evalutor_op(self):
def _append_evalutor_op(self):
"""
add mini-batch caculate operators to block
add increment operator to accumulate the metric state
"""
pass
def _merge(self);
def _merge(self):
"""
Merge the mini-batch statistics to form the evaluation result for multiple mini-batches.
"""
......@@ -43,7 +49,9 @@ class Evaluator(object):
def evaluate(self):
"""
exported interface
only one exported interface
user calculate the result
"""
pass
```
......@@ -3,57 +3,39 @@ import numpy as np
import paddle.v2.framework.core as core
def avg_accumulate(accumulated_var, per_eval, num_batches, place):
t = np.array(accumulated_var.get_tensor())
t[0] += per_eval[0]
accumulated_var.get_tensor().set([t[0] / float(num_batches)], place)
class Evaluator(object):
"""
Evalutor Base class.
"""
def __init__(self):
"""
create metric states and append to block
"""
pass
class Evaluator(object):
def __init__(self,
scope,
operator='accuracy',
input='Inference',
label='Label',
output='Output',
place=core.CPUPlace()):
"""
create an evaluator for evaluating the inference.
NOTE: default run on CPUPlace(), running on GPUPlace doesn't improve performance much.
def _clear_state(self):
"""
clear metric states at the begin of each pass
"""
pass
:param scope: the scope instance contains the input.
:type scope: paddle.v2.framework.core.scope
:param operator: operator name for caculating the evaluation for each mini-batch.
:type operator: string
:param input: output variable name of forward network.
:type input: string
:param label: variable name of label
:type label: string
"""
self.scope = scope
self.place = place
self.output_name = output
self.num_batches = 0
# create variable to store accumulated evaluator output
eval_name = ''.join([operator, "@Eval"])
if scope.find_var(eval_name):
raise Exception("evaluator already exist in scope: %s" % eval_name)
self.accumulated_var = scope.var(eval_name)
t = self.accumulated_var.get_tensor()
t.set_dims((1, ))
t.set([0.0], place)
# self.accumulated_var = block.create_var(block, name=eval_name, shape=(1,))
# self.accumulated_var.get_tensor().set([0.0])
# create operator of evaluation
var_map = dict() # var name -> variable
var_map[input] = [input]
var_map[label] = [label]
var_map[output] = [output]
self.op = op.Operator(operator, **var_map)
def _append_evalutor_op(self):
"""
add mini-batch caculate operators to block
add increment operator to accumulate the metric state
"""
pass
def evaluate(self, ctx, accumulator=avg_accumulate):
self.op.run(self.scope, ctx)
per_eval = np.array(self.scope.find_var(self.output_name).get_tensor())
self.num_batches += 1
accumulator(self.accumulated_var, per_eval, self.num_batches,
self.place)
def _merge(self):
"""
Merge the mini-batch statistics to form the evaluation result for multiple mini-batches.
"""
pass
def evaluate(self):
"""
only one exported interface
user calculate the result
"""
pass
......@@ -4,6 +4,7 @@ import paddle.v2.framework.core as core
import unittest
import op_test
import numpy as np
exit(0)
class TestEvaluator(unittest.TestCase):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册