From 82a22d3258b7024e64cd4045c5bbf32aa99f070f Mon Sep 17 00:00:00 2001 From: Yang Yu Date: Tue, 26 Dec 2017 17:06:23 +0800 Subject: [PATCH] Update code --- paddle/framework/executor.cc | 8 ------ paddle/framework/tensor_impl.h | 8 ++++++ paddle/operators/sum_op.h | 2 ++ python/paddle/v2/fluid/executor.py | 25 ++++++++++++++++--- .../tests/book/test_label_semantic_roles.py | 2 +- python/paddle/v2/fluid/tests/decorators.py | 6 +++-- .../fluid/tests/test_dynrnn_gradient_check.py | 20 +++++++-------- 7 files changed, 46 insertions(+), 25 deletions(-) diff --git a/paddle/framework/executor.cc b/paddle/framework/executor.cc index a07e8e0b1b..997773c168 100644 --- a/paddle/framework/executor.cc +++ b/paddle/framework/executor.cc @@ -66,14 +66,6 @@ void Executor::Run(const ProgramDesc& pdesc, Scope* scope, int block_id, PADDLE_ENFORCE_LT(static_cast(block_id), pdesc.Size()); auto& block = pdesc.Block(block_id); - if (VLOG_IS_ON(100)) { - std::ostringstream sout; - for (auto& name : scope->GetAllNames(false)) { - sout << name << ", "; - } - VLOG(100) << "Scope has variable " << sout.str(); - } - Scope* local_scope = scope; if (create_vars) { if (create_local_scope) { diff --git a/paddle/framework/tensor_impl.h b/paddle/framework/tensor_impl.h index 6c6f298edc..46ea3b881d 100644 --- a/paddle/framework/tensor_impl.h +++ b/paddle/framework/tensor_impl.h @@ -134,6 +134,14 @@ inline void* Tensor::mutable_data(platform::Place place, std::type_index type) { #endif offset_ = 0; } + + if (typeid(float).hash_code() == type.hash_code()) { + auto buf = reinterpret_cast( + reinterpret_cast(holder_->ptr()) + offset_); + for (int64_t i = 0; i < this->numel(); ++i) { + buf[i] = NAN; + } + } return reinterpret_cast(reinterpret_cast(holder_->ptr()) + offset_); } diff --git a/paddle/operators/sum_op.h b/paddle/operators/sum_op.h index eaa36aa1ae..cbde9976dc 100644 --- a/paddle/operators/sum_op.h +++ b/paddle/operators/sum_op.h @@ -107,10 +107,12 @@ class SumKernel : public framework::OpKernel { out_array.resize(i + 1); } if (out_array[i].numel() == 0) { + VLOG(10) << context.op().Output("Out") << " just copy"; framework::CopyFrom(in_array[i], in_array[i].place(), context.device_context(), &out_array[i]); out_array[i].set_lod(in_array[i].lod()); } else { + VLOG(10) << context.op().Output("Out") << " merged"; PADDLE_ENFORCE(out_array[i].lod() == in_array[i].lod()); auto in = EigenVector::Flatten(in_array[i]); auto result = EigenVector::Flatten(out_array[i]); diff --git a/python/paddle/v2/fluid/executor.py b/python/paddle/v2/fluid/executor.py index 2c91afb363..1d6c594b41 100644 --- a/python/paddle/v2/fluid/executor.py +++ b/python/paddle/v2/fluid/executor.py @@ -1,12 +1,31 @@ import numpy as np +import contextlib +from framework import Program, default_main_program from . import core -from framework import Program, default_main_program, Parameter, Variable -__all__ = ['Executor', 'g_scope'] +__all__ = ['Executor', 'global_scope', 'scope_guard', 'switch_scope'] g_scope = core.Scope() +def global_scope(): + return g_scope + + +def switch_scope(scope): + global g_scope + ex = g_scope + g_scope = scope + return ex + + +@contextlib.contextmanager +def scope_guard(scope): + ex = switch_scope(scope) + yield + switch_scope(ex) + + def as_numpy(tensor): if isinstance(tensor, list): return [as_numpy(t) for t in tensor] @@ -117,7 +136,7 @@ class Executor(object): raise TypeError() if scope is None: - scope = g_scope + scope = global_scope() program = program.clone() global_block = program.global_block() diff --git a/python/paddle/v2/fluid/tests/book/test_label_semantic_roles.py b/python/paddle/v2/fluid/tests/book/test_label_semantic_roles.py index c3591a613a..8acd470c5e 100644 --- a/python/paddle/v2/fluid/tests/book/test_label_semantic_roles.py +++ b/python/paddle/v2/fluid/tests/book/test_label_semantic_roles.py @@ -170,7 +170,7 @@ def main(): exe.run(fluid.default_startup_program()) - embedding_param = fluid.g_scope.find_var(embedding_name).get_tensor() + embedding_param = fluid.global_scope().find_var(embedding_name).get_tensor() embedding_param.set( load_parameter(conll05.get_embedding(), word_dict_len, word_dim), place) diff --git a/python/paddle/v2/fluid/tests/decorators.py b/python/paddle/v2/fluid/tests/decorators.py index d3dcf3562d..154619b0e9 100644 --- a/python/paddle/v2/fluid/tests/decorators.py +++ b/python/paddle/v2/fluid/tests/decorators.py @@ -19,8 +19,10 @@ def prog_scope(): def __fn__(*args, **kwargs): prog = fluid.Program() startup_prog = fluid.Program() - with fluid.program_guard(prog, startup_prog): - fn(*args, **kwargs) + scope = fluid.core.Scope() + with fluid.scope_guard(scope): + with fluid.program_guard(prog, startup_prog): + fn(*args, **kwargs) return __fn__ diff --git a/python/paddle/v2/fluid/tests/test_dynrnn_gradient_check.py b/python/paddle/v2/fluid/tests/test_dynrnn_gradient_check.py index 22bb2b1cdf..7f61b966fd 100644 --- a/python/paddle/v2/fluid/tests/test_dynrnn_gradient_check.py +++ b/python/paddle/v2/fluid/tests/test_dynrnn_gradient_check.py @@ -298,7 +298,6 @@ class TestSimpleMulWithMemory(unittest.TestCase): @prog_scope() def test_forward_backward(self): py_rnn = TestSimpleMulWithMemory.SimpleMulWithMemory() - data = fluid.layers.data( name=self.DATA_NAME, shape=[self.DATA_WIDTH], lod_level=1) data.stop_gradient = False @@ -323,19 +322,18 @@ class TestSimpleMulWithMemory(unittest.TestCase): cpu = fluid.CPUPlace() exe = fluid.Executor(cpu) feed = py_rnn.to_feed(cpu) - for _ in xrange(2): - last_np, w_g, i_g = map(numpy.array, - exe.run(feed=feed, - fetch_list=[ - last, self.PARAM_NAME + "@GRAD", - self.DATA_NAME + "@GRAD" - ], - return_numpy=False)) + last_np, w_g, i_g = map(numpy.array, + exe.run(feed=feed, + fetch_list=[ + last, self.PARAM_NAME + "@GRAD", + self.DATA_NAME + "@GRAD" + ], + return_numpy=False)) last_by_py, = py_rnn.exe().values() - self.assertTrue(numpy.allclose(last_np, last_by_py)) w_g_num = py_rnn.get_numeric_gradient_of_param(self.PARAM_NAME) - print w_g[0], w_g_num[0] + # print w_g_num[0], w_g[0] + self.assertTrue(numpy.allclose(w_g_num, w_g, rtol=0.1)) i_g_num = py_rnn.get_numeric_gradient_of_input(self.DATA_NAME) i_g_num = i_g_num.reshape(i_g.shape) -- GitLab