提交 b8f557f2 编写于 作者: D Dong Zhihong

"add elementwise_add more type"

上级 e34e1293
...@@ -45,9 +45,9 @@ class AccuracyKernel : public framework::OpKernel<T> { ...@@ -45,9 +45,9 @@ class AccuracyKernel : public framework::OpKernel<T> {
auto* correct = ctx.Output<Tensor>("Correct"); auto* correct = ctx.Output<Tensor>("Correct");
auto* total = ctx.Output<Tensor>("Total"); auto* total = ctx.Output<Tensor>("Total");
float* correct_data = correct->mutable_data<float>(ctx.GetPlace()); int* correct_data = correct->mutable_data<int>(ctx.GetPlace());
int* accuracy_data = accuracy->mutable_data<int>(ctx.GetPlace());
int* total_data = total->mutable_data<int>(ctx.GetPlace()); int* total_data = total->mutable_data<int>(ctx.GetPlace());
float* accuracy_data = accuracy->mutable_data<float>(ctx.GetPlace());
const int64_t* indices_data = indices->data<int64_t>(); const int64_t* indices_data = indices->data<int64_t>();
const int64_t* label_data = label->data<int64_t>(); const int64_t* label_data = label->data<int64_t>();
......
...@@ -34,7 +34,13 @@ REGISTER_OP(elementwise_add, ops::ElementwiseOp, ops::ElementwiseAddOpMaker, ...@@ -34,7 +34,13 @@ REGISTER_OP(elementwise_add, ops::ElementwiseOp, ops::ElementwiseAddOpMaker,
elementwise_add_grad, ops::ElementwiseOpGrad); elementwise_add_grad, ops::ElementwiseOpGrad);
REGISTER_OP_CPU_KERNEL( REGISTER_OP_CPU_KERNEL(
elementwise_add, elementwise_add,
ops::ElementwiseAddKernel<paddle::platform::CPUPlace, float>); ops::ElementwiseAddKernel<paddle::platform::CPUPlace, float>,
ops::ElementwiseAddKernel<paddle::platform::CPUPlace, double>,
ops::ElementwiseAddKernel<paddle::platform::CPUPlace, int>,
ops::ElementwiseAddKernel<paddle::platform::CPUPlace, int64_t>);
REGISTER_OP_CPU_KERNEL( REGISTER_OP_CPU_KERNEL(
elementwise_add_grad, elementwise_add_grad,
ops::ElementwiseAddGradKernel<paddle::platform::CPUPlace, float>); ops::ElementwiseAddGradKernel<paddle::platform::CPUPlace, float>,
ops::ElementwiseAddGradKernel<paddle::platform::CPUPlace, double>,
ops::ElementwiseAddGradKernel<paddle::platform::CPUPlace, int>,
ops::ElementwiseAddGradKernel<paddle::platform::CPUPlace, int64_t>);
from paddle.v2.framework.framework import Program, g_main_program, unique_name from paddle.v2.framework.framework import Program, g_main_program, unique_name, Variable
from paddle.v2.framework.layer_helper import LayerHelper
import paddle.v2.framework.core as core import paddle.v2.framework.core as core
def _clone_var_in_block_(block, var):
assert isinstance(var, Variable)
return block.create_var(
name=var.name,
shape=var.shape,
dtype=var.data_type,
type=var.type,
lod_level=var.lod_level,
persistable=True)
class Evaluator(object): class Evaluator(object):
""" """
Evalutor Base class. Evalutor Base class.
...@@ -13,33 +23,49 @@ class Evaluator(object): ...@@ -13,33 +23,49 @@ class Evaluator(object):
""" """
def __init__(self, name, **kwargs): def __init__(self, name, **kwargs):
"""
init the global states
"""
self._states = {} self._states = {}
if kwargs.has_key("program"): if kwargs.has_key("main_program"):
self._program = kwargs.get("program") self._main_program = kwargs.get("main_program")
else:
self._main_program = g_main_program
if kwargs.has_key("eval_program"):
self._eval_program = kwargs.get("eval_program")
else: else:
self._program = g_main_program self._eval_program = Program()
def _update_ops(self):
"""
append update ops to the global states
"""
raise NotImplementedError()
def reset(self, executor, program=None): def reset(self, executor, program=None):
""" """
Clear metric states at the begin of each pass/user specified batch Clear metric states at the begin of each pass/user specified batch
""" """
if program == None: if program == None:
reset_program = Program() reset_program = Program()
else: else:
reset_program = program reset_program = program
block = reset_program.global_block() block = reset_program.global_block()
for k, var in self._states.iteritems(): for k, var in self._states.iteritems():
zeros = block.create_var(dtype=var.data_type) g_var = _clone_var_in_block_(block, var)
zeros = block.create_var(dtype="float32", persistable=True)
block.append_op( block.append_op(
type="fill_constant", type="fill_constant",
outputs={"Out": [zeros]}, outputs={"Out": [zeros]},
attrs={ attrs={
"shape": var.shape, "shape": g_var.shape,
"value": 0, "value": .0,
"data_type": 5,
}) })
block.append_op( block.append_op(
type="scale", inputs={"X": zeros}, outputs={"Out": var}) type="scale", inputs={"X": zeros}, outputs={"Out": g_var})
executor.run(reset_program) print reset_program
executor.run(reset_program, fetch_list=self._states.values())
def eval(self, executor, program=None): def eval(self, executor, program=None):
""" """
...@@ -53,15 +79,16 @@ class Accuracy(Evaluator): ...@@ -53,15 +79,16 @@ class Accuracy(Evaluator):
Accuracy need two state variable Total, Correct Accuracy need two state variable Total, Correct
""" """
def __init__(self, input, label, k=1, **kwargs): def __init__(self, *args, **kwargs):
super(Accuracy, self).__init__("accuracy", **kwargs) super(Accuracy, self).__init__("accuracy", **kwargs)
block = self._program.global_block() # block = self._eval_program.global_block()
block = self._main_program.global_block()
g_total = block.create_var( g_total = block.create_var(
name=unique_name("Total"), name=unique_name("Total"),
persistable=True, persistable=True,
dtype="int64", dtype="int64",
shape=[1]) shape=[1])
g_correct = helper.create_global_variable( g_correct = block.create_var(
name=unique_name("Correct"), name=unique_name("Correct"),
persistable=True, persistable=True,
dtype="int64", dtype="int64",
...@@ -69,6 +96,8 @@ class Accuracy(Evaluator): ...@@ -69,6 +96,8 @@ class Accuracy(Evaluator):
self._states["Total"] = g_total self._states["Total"] = g_total
self._states["Correct"] = g_correct self._states["Correct"] = g_correct
def _update_ops(self, input, label, k=1, **kwargs):
block = self._main_program.global_block()
topk_out = block.create_var(dtype=input.data_type) topk_out = block.create_var(dtype=input.data_type)
topk_indices = block.create_var(dtype="int64") topk_indices = block.create_var(dtype="int64")
block.append_op( block.append_op(
...@@ -77,8 +106,9 @@ class Accuracy(Evaluator): ...@@ -77,8 +106,9 @@ class Accuracy(Evaluator):
outputs={"Out": [topk_out], outputs={"Out": [topk_out],
"Indices": [topk_indices]}, "Indices": [topk_indices]},
attrs={"k": k}) attrs={"k": k})
acc_out_dtype = kwargs.get("out_dtype", "float32") acc_out = block.create_var(dtype=kwargs.get("out_dtype", "float32"))
acc_out = block.create_var(dtype=acc_out_dtype) correct = block.create_var(dtype="int64", persistable=True)
total = block.create_var(dtype="int64", persistable=True)
block.append_op( block.append_op(
type="accuracy", type="accuracy",
inputs={ inputs={
...@@ -92,39 +122,121 @@ class Accuracy(Evaluator): ...@@ -92,39 +122,121 @@ class Accuracy(Evaluator):
"Total": [total], "Total": [total],
}) })
# block = self._eval_program.global_block()
# e_correct = _clone_var_in_block_(block, correct)
# e_total = _clone_var_in_block_(block, total)
# block.append_op(
# type="sum",
# inputs={"X": [self._states["Total"], total]},
# outputs={"Out": [self._states["Total"]]})
block.append_op(
type="cast",
inputs={"X": [self._states["Total"]]},
outputs={"Out": [self._states["Total"]]},
attrs={
"in_data_type": 5,
"out_data_type": 2,
})
block.append_op(
type="cast",
inputs={"X": [self._states["Correct"]]},
outputs={"Out": [self._states["Correct"]]},
attrs={
"in_data_type": 5,
"out_data_type": 2,
})
block.append_op( block.append_op(
type="sum", type="elementwise_add",
inputs={"X": [g_total, total]}, inputs={"X": [self._states["Total"]],
outputs={"Out": [g_total]}) "Y": [total]},
outputs={"Out": [self._states["Total"]]})
block.append_op( block.append_op(
type="sum", type="elementwise_add",
inputs={"X": [g_correct, correct]}, inputs={"X": [self._states["Correct"]],
outputs={"Out": [g_total]}) "Y": [correct]},
outputs={"Out": [self._states["Correct"]]})
# g_total = self._states["Total"]
# print g_total
# print total
# print "*" * 100
# print g_total.block.program == total.block.program
# g_total = _clone_var_in_block_(block, self._states["Total"])
# e_total = _clone_var_in_block_(block, total)
# block.append_op(
# type="sum",
# inputs={"X": [g_total, e_total]},
# outputs={"Out": [g_total]})
# block.append_op(
# type="sum",
# inputs={"X": [self._states["Correct"], correct]},
# outputs={"Out": [self._states["Correct"]]})
# print self._main_program
return acc_out return acc_out
def eval(self, executor, program=None): def eval(self, executor):
if program == None: block = self._eval_program.global_block()
eval_program = Program() eval_out = block.create_var(dtype=self._states["Total"].data_type)
else: e_correct = _clone_var_in_block_(block, correct)
eval_program = program e_total = _clone_var_in_block_(block, total)
block = eval_program.global_block() # block.append_op(
eval_out = block.create_var(dtype=self._helper.input_dtype()) # type="elementwise_div",
# inputs={"X": self._states["Total"],
# "Y": self._states["Correct"]},
# outputs={"Out": eval_out})
block.append_op( block.append_op(
type="elementwise_div", type="elementwise_div",
inputs={"X": self._states["Total"], inputs={"X": e_total,
"Y": self._states["Correct"]}, "Y": e_correct},
outputs={"Out": eval_out}) outputs={"Out": eval_out})
return executor.run(eval_program, fetch_list=[eval_out]) return executor.run(self._eval_program, fetch_list=[eval_out])
# Demo for composing low level op to compute the F1 metric # Demo for composing low level ops to compute the F1 metric
class F1(Evaluator): class FScore(Evaluator):
def __init__(self, input, label, **kwargs): def __init__(self, input, label, beta=1.0, **kwargs):
super(F1, self).__init__("F1", **kwargs) super(F1, self).__init__("FScore", **kwargs)
g_tp = helper.create_global_variable( block = self._program.global_block()
g_tp = block.create_var(
name=unique_name("Tp"), persistable=True, dtype="int64", shape=[1]) name=unique_name("Tp"), persistable=True, dtype="int64", shape=[1])
g_fp = helper.create_global_variable( g_fn = block.create_var(
name=unique_name("Fn"), persistable=True, dtype="int64", shape=[1])
g_fp = block.create_var(
name=unique_name("Fp"), persistable=True, dtype="int64", shape=[1]) name=unique_name("Fp"), persistable=True, dtype="int64", shape=[1])
self._states["Tp"] = g_tp self._states["Tp"] = g_tp
self._states["Fp"] = g_fp self._states["Fp"] = g_fp
self._states["Fn"] = g_fn
def _update_ops(self):
block = self._program.global_block()
equal_out = block.create_var()
block.append_op(
type="equal",
inputs={"X": [input],
"Y": [label]},
outputs={"Out": equal_out})
positive = block.create_var()
block.append_op(
type="sequence_pool",
inputs={"X": [equal_out]},
outputs={"Out": positive},
attrs={"pooltype": "SUM"})
batch = block.create_var(
name=feed_var_name,
type=core.VarDesc.VarType.FEED_MINIBATCH,
persistable=True)
# def register():
accuracy = Accuracy
# def accuracy(*args, **kwargs):
# acc = Accuracy(**kwargs)
# return acc._update_ops(*args, **kwargs)
...@@ -550,7 +550,7 @@ class Parameter(Variable): ...@@ -550,7 +550,7 @@ class Parameter(Variable):
raise ValueError("Parameter shape should not be related with " raise ValueError("Parameter shape should not be related with "
"batch-size") "batch-size")
super(Parameter, self).__init__( Variable.__init__(
self, block, persistable=True, shape=shape, dtype=dtype, **kwargs) self, block, persistable=True, shape=shape, dtype=dtype, **kwargs)
self.trainable = kwargs.get('trainable', True) self.trainable = kwargs.get('trainable', True)
......
...@@ -263,7 +263,9 @@ def accuracy(input, label, k=1, **kwargs): ...@@ -263,7 +263,9 @@ def accuracy(input, label, k=1, **kwargs):
"Indices": [topk_indices]}, "Indices": [topk_indices]},
attrs={"k": k}) attrs={"k": k})
acc_out_dtype = kwargs.get("out_dtype", "float32") acc_out_dtype = kwargs.get("out_dtype", "float32")
acc_out = helper.create_tmp_variable(dtype=acc_out_dtype) acc_out = helper.create_tmp_variable(dtype="float32")
correct = helper.create_tmp_variable(dtype="int64")
total = helper.create_tmp_variable(dtype="int64")
helper.append_op( helper.append_op(
type="accuracy", type="accuracy",
inputs={ inputs={
...@@ -271,7 +273,11 @@ def accuracy(input, label, k=1, **kwargs): ...@@ -271,7 +273,11 @@ def accuracy(input, label, k=1, **kwargs):
"Indices": [topk_indices], "Indices": [topk_indices],
"Label": [label] "Label": [label]
}, },
outputs={"Accuracy": [acc_out]}) outputs={
"Accuracy": [acc_out],
"Correct": [correct],
"Total": [total],
})
return acc_out return acc_out
......
...@@ -19,7 +19,8 @@ class TestAccuracyOp(OpTest): ...@@ -19,7 +19,8 @@ class TestAccuracyOp(OpTest):
break break
self.outputs = { self.outputs = {
'Accuracy': np.array([num_correct / float(n)]).astype("float32"), 'Accuracy': np.array([num_correct / float(n)]).astype("float32"),
'Correct': np.array([num_correct]).astype("int32") 'Correct': np.array([num_correct]).astype("int32"),
'Total': np.array([n]).astype("int32")
} }
def test_check_output(self): def test_check_output(self):
...@@ -27,5 +28,4 @@ class TestAccuracyOp(OpTest): ...@@ -27,5 +28,4 @@ class TestAccuracyOp(OpTest):
if __name__ == '__main__': if __name__ == '__main__':
exit(0)
unittest.main() unittest.main()
...@@ -3,6 +3,7 @@ import paddle.v2.framework.layers as layers ...@@ -3,6 +3,7 @@ import paddle.v2.framework.layers as layers
import paddle.v2.framework.nets as nets import paddle.v2.framework.nets as nets
import paddle.v2.framework.core as core import paddle.v2.framework.core as core
import paddle.v2.framework.optimizer as optimizer import paddle.v2.framework.optimizer as optimizer
import paddle.v2.framework.evaluator as evaluator
from paddle.v2.framework.framework import Program, g_main_program from paddle.v2.framework.framework import Program, g_main_program
from paddle.v2.framework.executor import Executor from paddle.v2.framework.executor import Executor
...@@ -54,17 +55,24 @@ cost = layers.cross_entropy( ...@@ -54,17 +55,24 @@ cost = layers.cross_entropy(
main_program=main_program, main_program=main_program,
startup_program=startup_program) startup_program=startup_program)
avg_cost = layers.mean(x=cost, main_program=main_program) avg_cost = layers.mean(x=cost, main_program=main_program)
accuracy = layers.accuracy( # accuracy = layers.accuracy(
input=predict, # input=predict,
label=label, # label=label,
main_program=main_program, # main_program=main_program,
startup_program=startup_program) # startup_program=startup_program)
# optimizer = optimizer.MomentumOptimizer(learning_rate=0.1 / 128.0, # optimizer = optimizer.MomentumOptimizer(learning_rate=0.1 / 128.0,
# momentum=0.9) # momentum=0.9)
optimizer = optimizer.AdamOptimizer(learning_rate=0.01, beta1=0.9, beta2=0.999) optimizer = optimizer.AdamOptimizer(learning_rate=0.01, beta1=0.9, beta2=0.999)
opts = optimizer.minimize(avg_cost, startup_program) opts = optimizer.minimize(avg_cost, startup_program)
accuracy = evaluator.accuracy(
input=predict,
label=label,
main_program=main_program,
startup_program=startup_program)
acc_out = accuracy._update_ops(
input=predict, label=label, main_program=main_program)
BATCH_SIZE = 50 BATCH_SIZE = 50
PASS_NUM = 3 PASS_NUM = 3
train_reader = paddle.batch( train_reader = paddle.batch(
...@@ -79,6 +87,7 @@ exe.run(startup_program, feed={}, fetch_list=[]) ...@@ -79,6 +87,7 @@ exe.run(startup_program, feed={}, fetch_list=[])
for pass_id in range(PASS_NUM): for pass_id in range(PASS_NUM):
count = 0 count = 0
accuracy.reset(exe)
for data in train_reader(): for data in train_reader():
img_data = np.array(map(lambda x: x[0].reshape([1, 28, 28]), img_data = np.array(map(lambda x: x[0].reshape([1, 28, 28]),
data)).astype("float32") data)).astype("float32")
...@@ -93,11 +102,14 @@ for pass_id in range(PASS_NUM): ...@@ -93,11 +102,14 @@ for pass_id in range(PASS_NUM):
outs = exe.run(main_program, outs = exe.run(main_program,
feed={"pixel": tensor_img, feed={"pixel": tensor_img,
"label": tensor_y}, "label": tensor_y},
fetch_list=[avg_cost, accuracy]) fetch_list=[avg_cost, acc_out])
loss = np.array(outs[0]) loss = np.array(outs[0])
acc = np.array(outs[1]) acc = np.array(outs[1])
# pass_acc = accuracy.eval(exe)
# print pass_acc
print loss, acc
if loss < 10.0 and acc > 0.9: # if loss < 10.0 and acc > 0.9:
# if avg cost less than 10.0 and accuracy is larger than 0.9, we think our code is good. # # if avg cost less than 10.0 and accuracy is larger than 0.9, we think our code is good.
exit(0) # exit(0)
exit(1) exit(1)
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册