From d852be7c4886678d1fe12a6f37148cf7437d0a82 Mon Sep 17 00:00:00 2001 From: Xin Pan Date: Sat, 13 Oct 2018 19:25:23 +0800 Subject: [PATCH] Revert "Make variable::GetMutable robust" --- paddle/fluid/framework/executor.cc | 2 +- paddle/fluid/framework/feed_fetch_method.cc | 3 ++- paddle/fluid/framework/naive_executor.cc | 2 +- paddle/fluid/framework/variable.h | 6 +----- paddle/fluid/framework/variable_test.cc | 11 +++++------ python/paddle/fluid/tests/book/test_word2vec.py | 16 ++++++++++++++-- 6 files changed, 24 insertions(+), 16 deletions(-) diff --git a/paddle/fluid/framework/executor.cc b/paddle/fluid/framework/executor.cc index a070b8efb8..70ec6e90a4 100644 --- a/paddle/fluid/framework/executor.cc +++ b/paddle/fluid/framework/executor.cc @@ -66,7 +66,7 @@ void InitializeVariable(Variable* var, proto::VarType::Type var_type) { } else if (var_type == proto::VarType::FETCH_LIST) { var->GetMutable(); } else if (var_type == proto::VarType::STEP_SCOPES) { - var->GetMutable>(); + var->GetMutable>(); } else if (var_type == proto::VarType::LOD_RANK_TABLE) { var->GetMutable(); } else if (var_type == proto::VarType::LOD_TENSOR_ARRAY) { diff --git a/paddle/fluid/framework/feed_fetch_method.cc b/paddle/fluid/framework/feed_fetch_method.cc index 3e9353f5cf..8e1f93c5eb 100644 --- a/paddle/fluid/framework/feed_fetch_method.cc +++ b/paddle/fluid/framework/feed_fetch_method.cc @@ -27,7 +27,8 @@ void SetFeedVariable(Scope* scope, const LoDTensor& input, // be created. VLOG(3) << "SetFeedVariable name=" << var_name << " index=" << index; Variable* g_feed_value = scope->Var(var_name); - auto& feed_inputs = *(g_feed_value->GetMutable()); + auto& feed_inputs = + *(g_feed_value->GetMutable>()); if (index >= feed_inputs.size()) { feed_inputs.resize(index + 1); } diff --git a/paddle/fluid/framework/naive_executor.cc b/paddle/fluid/framework/naive_executor.cc index 2840d503f1..ba10687d65 100644 --- a/paddle/fluid/framework/naive_executor.cc +++ b/paddle/fluid/framework/naive_executor.cc @@ -37,7 +37,7 @@ static void InitializeVariable(Variable *var, proto::VarType::Type var_type) { } else if (var_type == proto::VarType::FETCH_LIST) { var->GetMutable(); } else if (var_type == proto::VarType::STEP_SCOPES) { - var->GetMutable>(); + var->GetMutable>(); } else if (var_type == proto::VarType::LOD_RANK_TABLE) { var->GetMutable(); } else if (var_type == proto::VarType::LOD_TENSOR_ARRAY) { diff --git a/paddle/fluid/framework/variable.h b/paddle/fluid/framework/variable.h index 873e1b20a5..067e0c2b83 100644 --- a/paddle/fluid/framework/variable.h +++ b/paddle/fluid/framework/variable.h @@ -38,12 +38,8 @@ class Variable { template T* GetMutable() { - if (!holder_) { + if (!IsType()) { holder_.reset(new PlaceholderImpl(new T())); - } else { - PADDLE_ENFORCE(IsType(), - "Variable must be type %s, the holding type is %s", - typeid(T).name(), holder_->Type().name()); } return static_cast(holder_->Ptr()); } diff --git a/paddle/fluid/framework/variable_test.cc b/paddle/fluid/framework/variable_test.cc index 003dcfd3df..c5c1d215f4 100644 --- a/paddle/fluid/framework/variable_test.cc +++ b/paddle/fluid/framework/variable_test.cc @@ -33,10 +33,9 @@ TEST(Variable, GetMutable) { const Tensor& tt = v->Get(); EXPECT_EQ(1234, tt.content_); - try { - v->GetMutable(); - } catch (std::exception& e) { - return; - } - EXPECT_TRUE(false); + std::string* s = v->GetMutable(); + *s = "hello"; + + const std::string& ss = v->Get(); + EXPECT_EQ("hello", ss); } diff --git a/python/paddle/fluid/tests/book/test_word2vec.py b/python/paddle/fluid/tests/book/test_word2vec.py index 1f3a230048..9191f0fc20 100644 --- a/python/paddle/fluid/tests/book/test_word2vec.py +++ b/python/paddle/fluid/tests/book/test_word2vec.py @@ -17,6 +17,7 @@ from __future__ import print_function import paddle import paddle.fluid as fluid from paddle.fluid.layers.device import get_places +from paddle.fluid.layers.control_flow import ParallelDo import unittest import os import numpy as np @@ -83,7 +84,18 @@ def train(use_cuda, is_sparse, is_parallel, save_dirname, is_local=True): avg_cost, predict_word = __network__( [first_word, second_word, third_word, forth_word, next_word]) else: - raise ValueError('is_parallel=True not implemented') + places = get_places() + pd = ParallelDo(places) + with pd.do(): + avg_cost, predict_word = __network__( + list( + map(pd.read_input, [ + first_word, second_word, third_word, forth_word, + next_word + ]))) + pd.write_output(avg_cost) + + avg_cost = fluid.layers.mean(pd()) sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.001) sgd_optimizer.minimize(avg_cost) @@ -250,7 +262,7 @@ def inject_test_method(use_cuda, is_sparse, is_parallel): for use_cuda in (False, True): for is_sparse in (False, True): - for is_parallel in (False, ): # TODO(paddle-dev): Add parallel test. + for is_parallel in (False, True): inject_test_method(use_cuda, is_sparse, is_parallel) if __name__ == '__main__': -- GitLab