From 46c61b35f7c70cc0d0046b856432bd2d2a7b1701 Mon Sep 17 00:00:00 2001 From: Dong Zhihong Date: Thu, 9 Nov 2017 14:58:23 -0800 Subject: [PATCH] "add elementwise op support" --- paddle/operators/elementwise_div_op.cc | 10 +- paddle/operators/elementwise_mul_op.cc | 8 +- paddle/operators/elementwise_sub_op.cc | 10 +- python/paddle/v2/framework/evaluator.py | 95 ++++++++----------- .../tests/test_recognize_digits_conv.py | 26 ++--- 5 files changed, 71 insertions(+), 78 deletions(-) diff --git a/paddle/operators/elementwise_div_op.cc b/paddle/operators/elementwise_div_op.cc index de75816a2..7a325199b 100644 --- a/paddle/operators/elementwise_div_op.cc +++ b/paddle/operators/elementwise_div_op.cc @@ -35,7 +35,13 @@ REGISTER_OP(elementwise_div, ops::ElementwiseOp, ops::ElementwiseDivOpMaker, elementwise_div_grad, ops::ElementwiseOpGrad); REGISTER_OP_CPU_KERNEL( elementwise_div, - ops::ElementwiseDivKernel); + ops::ElementwiseDivKernel, + ops::ElementwiseDivKernel, + ops::ElementwiseDivKernel, + ops::ElementwiseDivKernel); REGISTER_OP_CPU_KERNEL( elementwise_div_grad, - ops::ElementwiseDivGradKernel); + ops::ElementwiseDivGradKernel, + ops::ElementwiseDivGradKernel, + ops::ElementwiseDivGradKernel, + ops::ElementwiseDivGradKernel); diff --git a/paddle/operators/elementwise_mul_op.cc b/paddle/operators/elementwise_mul_op.cc index ffa10486f..8851267a5 100644 --- a/paddle/operators/elementwise_mul_op.cc +++ b/paddle/operators/elementwise_mul_op.cc @@ -37,8 +37,12 @@ REGISTER_OP(elementwise_mul, ops::ElementwiseOp, ops::ElementwiseMulOpMaker, REGISTER_OP_CPU_KERNEL( elementwise_mul, ops::ElementwiseMulKernel, - ops::ElementwiseMulKernel); + ops::ElementwiseMulKernel, + ops::ElementwiseMulKernel, + ops::ElementwiseMulKernel); REGISTER_OP_CPU_KERNEL( elementwise_mul_grad, ops::ElementwiseMulGradKernel, - ops::ElementwiseMulGradKernel); + ops::ElementwiseMulGradKernel, + ops::ElementwiseMulGradKernel, + ops::ElementwiseMulGradKernel); diff --git a/paddle/operators/elementwise_sub_op.cc b/paddle/operators/elementwise_sub_op.cc index 39702dad0..95d7979e3 100644 --- a/paddle/operators/elementwise_sub_op.cc +++ b/paddle/operators/elementwise_sub_op.cc @@ -34,7 +34,13 @@ REGISTER_OP(elementwise_sub, ops::ElementwiseOp, ops::ElementwiseSubOpMaker, elementwise_sub_grad, ops::ElementwiseOpGrad); REGISTER_OP_CPU_KERNEL( elementwise_sub, - ops::ElementwiseSubKernel); + ops::ElementwiseSubKernel, + ops::ElementwiseSubKernel, + ops::ElementwiseSubKernel, + ops::ElementwiseSubKernel); REGISTER_OP_CPU_KERNEL( elementwise_sub_grad, - ops::ElementwiseSubGradKernel); + ops::ElementwiseSubGradKernel, + ops::ElementwiseSubGradKernel, + ops::ElementwiseSubGradKernel, + ops::ElementwiseSubGradKernel); diff --git a/python/paddle/v2/framework/evaluator.py b/python/paddle/v2/framework/evaluator.py index eb06b7577..252370ffd 100644 --- a/python/paddle/v2/framework/evaluator.py +++ b/python/paddle/v2/framework/evaluator.py @@ -1,3 +1,4 @@ +import numpy as np from paddle.v2.framework.framework import Program, g_main_program, unique_name, Variable import paddle.v2.framework.core as core @@ -31,12 +32,8 @@ class Evaluator(object): self._main_program = kwargs.get("main_program") else: self._main_program = g_main_program - if kwargs.has_key("eval_program"): - self._eval_program = kwargs.get("eval_program") - else: - self._eval_program = Program() - def _update_ops(self): + def _update_ops(self, *args, **kwargs): """ append update ops to the global states """ @@ -64,13 +61,12 @@ class Evaluator(object): }) block.append_op( type="scale", inputs={"X": zeros}, outputs={"Out": g_var}) - print reset_program executor.run(reset_program, fetch_list=self._states.values()) def eval(self, executor, program=None): """ - Merge the mini-batch statistics to form the evaluation result for multiple mini-batches. - """ + Merge the mini-batch statistics to form the evaluation result for multiple mini-batches. + """ raise NotImplementedError() @@ -81,7 +77,6 @@ class Accuracy(Evaluator): def __init__(self, *args, **kwargs): super(Accuracy, self).__init__("accuracy", **kwargs) - # block = self._eval_program.global_block() block = self._main_program.global_block() g_total = block.create_var( name=unique_name("Total"), @@ -122,21 +117,13 @@ class Accuracy(Evaluator): "Total": [total], }) - # block = self._eval_program.global_block() - # e_correct = _clone_var_in_block_(block, correct) - # e_total = _clone_var_in_block_(block, total) - - # block.append_op( - # type="sum", - # inputs={"X": [self._states["Total"], total]}, - # outputs={"Out": [self._states["Total"]]}) block.append_op( type="cast", inputs={"X": [self._states["Total"]]}, outputs={"Out": [self._states["Total"]]}, attrs={ - "in_data_type": 5, - "out_data_type": 2, + "in_data_type": 5, # float32 + "out_data_type": 2, #int32 }) block.append_op( type="cast", @@ -158,44 +145,40 @@ class Accuracy(Evaluator): "Y": [correct]}, outputs={"Out": [self._states["Correct"]]}) - # g_total = self._states["Total"] - # print g_total - # print total - - # print "*" * 100 - # print g_total.block.program == total.block.program - - # g_total = _clone_var_in_block_(block, self._states["Total"]) - # e_total = _clone_var_in_block_(block, total) - - # block.append_op( - # type="sum", - # inputs={"X": [g_total, e_total]}, - # outputs={"Out": [g_total]}) - - # block.append_op( - # type="sum", - # inputs={"X": [self._states["Correct"], correct]}, - # outputs={"Out": [self._states["Correct"]]}) - # print self._main_program return acc_out - def eval(self, executor): - block = self._eval_program.global_block() + def eval(self, executor, program=None): + if program != None: + eval_program = program + else: + eval_program = Program() + block = eval_program.global_block() eval_out = block.create_var(dtype=self._states["Total"].data_type) - e_correct = _clone_var_in_block_(block, correct) - e_total = _clone_var_in_block_(block, total) - # block.append_op( - # type="elementwise_div", - # inputs={"X": self._states["Total"], - # "Y": self._states["Correct"]}, - # outputs={"Out": eval_out}) + e_total = _clone_var_in_block_(block, self._states["Total"]) + e_correct = _clone_var_in_block_(block, self._states["Correct"]) + block.append_op( + type="cast", + inputs={"X": [e_total]}, + outputs={"Out": [e_total]}, + attrs={ + "in_data_type": 2, #int32 + "out_data_type": 5, #float32 + }) + block.append_op( + type="cast", + inputs={"X": [e_correct]}, + outputs={"Out": [e_correct]}, + attrs={ + "in_data_type": 2, + "out_data_type": 5, + }) block.append_op( type="elementwise_div", - inputs={"X": e_total, - "Y": e_correct}, + inputs={"X": e_correct, + "Y": e_total}, outputs={"Out": eval_out}) - return executor.run(self._eval_program, fetch_list=[eval_out]) + out = executor.run(eval_program, fetch_list=[eval_out]) + return np.array(out[0]) # Demo for composing low level ops to compute the F1 metric @@ -235,8 +218,8 @@ class FScore(Evaluator): persistable=True) -# def register(): -accuracy = Accuracy -# def accuracy(*args, **kwargs): -# acc = Accuracy(**kwargs) -# return acc._update_ops(*args, **kwargs) +# FIXME(dzh): add a decorator to call _update_ops automatically +def accuracy(*args, **kwargs): + cls = Accuracy(*args, **kwargs) + out = cls._update_ops(*args, **kwargs) + return cls, out diff --git a/python/paddle/v2/framework/tests/test_recognize_digits_conv.py b/python/paddle/v2/framework/tests/test_recognize_digits_conv.py index a24eabf16..9ec45814a 100644 --- a/python/paddle/v2/framework/tests/test_recognize_digits_conv.py +++ b/python/paddle/v2/framework/tests/test_recognize_digits_conv.py @@ -55,23 +55,14 @@ cost = layers.cross_entropy( main_program=main_program, startup_program=startup_program) avg_cost = layers.mean(x=cost, main_program=main_program) -# accuracy = layers.accuracy( -# input=predict, -# label=label, -# main_program=main_program, -# startup_program=startup_program) -# optimizer = optimizer.MomentumOptimizer(learning_rate=0.1 / 128.0, -# momentum=0.9) optimizer = optimizer.AdamOptimizer(learning_rate=0.01, beta1=0.9, beta2=0.999) opts = optimizer.minimize(avg_cost, startup_program) -accuracy = evaluator.accuracy( +accuracy, acc_out = evaluator.accuracy( input=predict, label=label, main_program=main_program, startup_program=startup_program) -acc_out = accuracy._update_ops( - input=predict, label=label, main_program=main_program) BATCH_SIZE = 50 PASS_NUM = 3 @@ -105,11 +96,14 @@ for pass_id in range(PASS_NUM): fetch_list=[avg_cost, acc_out]) loss = np.array(outs[0]) acc = np.array(outs[1]) - # pass_acc = accuracy.eval(exe) - # print pass_acc - print loss, acc + pass_acc = accuracy.eval(exe) + print "pass id : ", pass_id, pass_acc + # print loss, acc + if loss < 10.0 and acc > 0.9: + # if avg cost less than 10.0 and accuracy is larger than 0.9, we think our code is good. + exit(0) + + pass_acc = accuracy.eval(exe) + print "pass id : ", pass_id, pass_acc - # if loss < 10.0 and acc > 0.9: - # # if avg cost less than 10.0 and accuracy is larger than 0.9, we think our code is good. - # exit(0) exit(1) -- GitLab