From 6217f42ab70a731034c7a8ca8ea885fc4807bcff Mon Sep 17 00:00:00 2001 From: Xin Pan Date: Thu, 6 Dec 2018 23:28:45 +0800 Subject: [PATCH] Revert "Imperative" --- paddle/fluid/CMakeLists.txt | 1 - paddle/fluid/framework/feed_fetch_method.cc | 9 - paddle/fluid/framework/feed_fetch_method.h | 2 - paddle/fluid/framework/ir/graph.cc | 5 +- paddle/fluid/imperative/CMakeLists.txt | 3 - paddle/fluid/imperative/engine.cc | 53 ----- paddle/fluid/imperative/engine.h | 39 ---- paddle/fluid/imperative/layer.cc | 221 ------------------ paddle/fluid/imperative/layer.h | 102 -------- paddle/fluid/imperative/tracer.cc | 19 -- paddle/fluid/imperative/tracer.h | 128 ---------- paddle/fluid/pybind/CMakeLists.txt | 5 +- paddle/fluid/pybind/imperative.cc | 36 --- paddle/fluid/pybind/imperative.h | 53 ----- paddle/fluid/pybind/pybind.cc | 39 ---- python/paddle/fluid/__init__.py | 2 - python/paddle/fluid/framework.py | 54 +---- python/paddle/fluid/imperative/__init__.py | 25 -- python/paddle/fluid/imperative/base.py | 56 ----- python/paddle/fluid/imperative/layers.py | 44 ---- python/paddle/fluid/layer_helper.py | 23 +- python/paddle/fluid/layers/nn.py | 3 +- .../fluid/tests/unittests/test_imperative.py | 52 ----- python/setup.py.in | 1 - tools/print_signatures.py | 4 - 25 files changed, 19 insertions(+), 960 deletions(-) delete mode 100644 paddle/fluid/imperative/CMakeLists.txt delete mode 100644 paddle/fluid/imperative/engine.cc delete mode 100644 paddle/fluid/imperative/engine.h delete mode 100644 paddle/fluid/imperative/layer.cc delete mode 100644 paddle/fluid/imperative/layer.h delete mode 100644 paddle/fluid/imperative/tracer.cc delete mode 100644 paddle/fluid/imperative/tracer.h delete mode 100644 paddle/fluid/pybind/imperative.cc delete mode 100644 paddle/fluid/pybind/imperative.h delete mode 100644 python/paddle/fluid/imperative/__init__.py delete mode 100644 python/paddle/fluid/imperative/base.py delete mode 100644 python/paddle/fluid/imperative/layers.py delete mode 100644 python/paddle/fluid/tests/unittests/test_imperative.py diff --git a/paddle/fluid/CMakeLists.txt b/paddle/fluid/CMakeLists.txt index 595454e90b9..6b526f0103a 100644 --- a/paddle/fluid/CMakeLists.txt +++ b/paddle/fluid/CMakeLists.txt @@ -1,7 +1,6 @@ add_subdirectory(memory) add_subdirectory(platform) add_subdirectory(framework) -add_subdirectory(imperative) add_subdirectory(operators) add_subdirectory(string) add_subdirectory(recordio) diff --git a/paddle/fluid/framework/feed_fetch_method.cc b/paddle/fluid/framework/feed_fetch_method.cc index 6338be75a4b..3e9353f5cf6 100644 --- a/paddle/fluid/framework/feed_fetch_method.cc +++ b/paddle/fluid/framework/feed_fetch_method.cc @@ -16,9 +16,7 @@ limitations under the License. */ #include #include #include "glog/logging.h" -#include "paddle/fluid/framework/var_type.h" #include "paddle/fluid/framework/variable.h" -#include "paddle/fluid/platform/place.h" namespace paddle { namespace framework { @@ -55,12 +53,5 @@ LoDTensor& GetFetchVariable(const Scope& scope, const std::string& var_name, return tensor; } -LoDTensor& GetVariableTensor(const Scope& scope, const std::string& var_name) { - Variable* var = scope.FindVar(var_name); - PADDLE_ENFORCE(var, "%s no in scope", var_name); - PADDLE_ENFORCE(var->IsType(), "Only support lod tensor now."); - return *var->GetMutable(); -} - } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/feed_fetch_method.h b/paddle/fluid/framework/feed_fetch_method.h index 031f8e01aa6..7f504bfd232 100644 --- a/paddle/fluid/framework/feed_fetch_method.h +++ b/paddle/fluid/framework/feed_fetch_method.h @@ -27,7 +27,5 @@ void SetFeedVariable(Scope* scope, const LoDTensor& input, LoDTensor& GetFetchVariable(const Scope& scope, const std::string& var_name, size_t index); -LoDTensor& GetVariableTensor(const Scope& scope, const std::string& var_name); - } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/ir/graph.cc b/paddle/fluid/framework/ir/graph.cc index 8679118fe28..fc91564bbae 100644 --- a/paddle/fluid/framework/ir/graph.cc +++ b/paddle/fluid/framework/ir/graph.cc @@ -38,8 +38,9 @@ void CheckProgram(const ProgramDesc &program) { switch (role_id) { case _INT(OpRole::kForward): if (visit.find(_INT(OpRole::kBackward)) != visit.end()) { - LOG(ERROR) << "Cannot add backward operator before forward operator " - << op->Type(); + LOG(ERROR) + << "Cannot add backward operator before forward operator %s." + << op->Type(); } break; case _INT(OpRole::kBackward): diff --git a/paddle/fluid/imperative/CMakeLists.txt b/paddle/fluid/imperative/CMakeLists.txt deleted file mode 100644 index 373d292b443..00000000000 --- a/paddle/fluid/imperative/CMakeLists.txt +++ /dev/null @@ -1,3 +0,0 @@ -cc_library(layer SRCS layer.cc DEPS proto_desc operator) -cc_library(tracer SRCS tracer.cc DEPS proto_desc) -cc_library(engine SRCS engine.cc) diff --git a/paddle/fluid/imperative/engine.cc b/paddle/fluid/imperative/engine.cc deleted file mode 100644 index de7ab0e5918..00000000000 --- a/paddle/fluid/imperative/engine.cc +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "paddle/fluid/imperative/engine.h" - -#include // NOLINT -#include - -#include "glog/logging.h" - -namespace paddle { -namespace imperative { - -static std::once_flag init_engine; -static Engine* engine; - -class DummyEngine : public Engine { - public: - void Enqueue(Runnable* runnable) override { - queued_runnables_.push_back(runnable); - } - - size_t Size() const override { return queued_runnables_.size(); } - - void Sync() override { - for (Runnable* l : queued_runnables_) { - LOG(INFO) << "running " << reinterpret_cast(l); - } - queued_runnables_.clear(); - } - - private: - std::vector queued_runnables_; -}; - -Engine* GetEngine() { - std::call_once(init_engine, []() { engine = new DummyEngine(); }); - return engine; -} - -} // namespace imperative -} // namespace paddle diff --git a/paddle/fluid/imperative/engine.h b/paddle/fluid/imperative/engine.h deleted file mode 100644 index a1dfa5bda38..00000000000 --- a/paddle/fluid/imperative/engine.h +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#pragma once - -#include -#include - -namespace paddle { -namespace imperative { - -struct Runnable {}; - -class Engine { - public: - virtual ~Engine() {} - - virtual void Enqueue(Runnable* runnable) = 0; - - virtual size_t Size() const = 0; - - virtual void Sync() = 0; -}; - -Engine* GetEngine(); - -} // namespace imperative -} // namespace paddle diff --git a/paddle/fluid/imperative/layer.cc b/paddle/fluid/imperative/layer.cc deleted file mode 100644 index 61250376807..00000000000 --- a/paddle/fluid/imperative/layer.cc +++ /dev/null @@ -1,221 +0,0 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "paddle/fluid/imperative/layer.h" -#include -#include -#include -#include -#include - -#include "paddle/fluid/framework/lod_tensor.h" -#include "paddle/fluid/framework/op_registry.h" -#include "paddle/fluid/string/printf.h" - -namespace paddle { -namespace imperative { - -using framework::Variable; - -void AddTo(Variable* src, Variable* dst) { - framework::LoDTensor* dst_tensor = dst->GetMutable(); - framework::LoDTensor* src_tensor = src->GetMutable(); - PADDLE_ENFORCE(dst_tensor->numel() == src_tensor->numel(), "%lld vs %lld", - dst_tensor->numel(), src_tensor->numel()); - float* dst_data = dst_tensor->mutable_data(platform::CPUPlace()); - const float* src_data = src_tensor->data(); - for (size_t i = 0; i < src_tensor->numel(); ++i) { - dst_data[i] += src_data[i]; - } -} - -class Autograd { - public: - explicit Autograd(framework::Scope* scope) : scope_(scope) {} - - void RunBackward(VarBase* var) { - PADDLE_ENFORCE(var->pre_op_->op_desc_); - // TODO(panyx0718): Only create for vars that "require_grad" - (*var->pre_op_->output_vars_)[var->pre_op_out_idx_]->grads_ = var->grads_; - - std::deque ready; - ready.push_back(var->pre_op_); - - std::map dep_counts = ComputeDepCounts(var->pre_op_); - - while (!ready.empty()) { - OpBase* ready_op = ready.front(); - ready.pop_front(); - std::vector input_grads = ready_op->ApplyGrad(scope_); - - for (size_t i = 0; i < input_grads.size(); ++i) { - if (!input_grads[i]) continue; - OpBase* pre_op = ready_op->pre_ops_->at(i); - if (!pre_op) continue; - - dep_counts[pre_op] -= 1; - PADDLE_ENFORCE(dep_counts[pre_op] >= 0); - bool pre_op_ready = dep_counts[pre_op] == 0; - if (pre_op_ready) { - ready.push_back(pre_op); - } - } - } - } - - private: - std::map ComputeDepCounts(OpBase* op) { - std::map ret; - - std::deque queue; - queue.push_back(op); - std::unordered_set visited; - visited.insert(op); - while (!queue.empty()) { - OpBase* candidate = queue.front(); - queue.pop_front(); - for (OpBase* pre_op : *(candidate->pre_ops_)) { - if (!pre_op) continue; - if (visited.find(pre_op) == visited.end()) { - visited.insert(pre_op); - queue.push_back(pre_op); - } - ret[pre_op] += 1; - } - } - - return ret; - } - - framework::Scope* scope_; -}; - -framework::Variable* CreateVariable(const std::string& name, - const framework::DDim& dim, float val, - framework::Scope* scope, - bool random_name = true) { - std::string varname = name; - if (random_name) { - std::mt19937 rng; - rng.seed(std::random_device()()); - std::uniform_int_distribution dist6( - 1, std::numeric_limits::max()); - int id = dist6(rng); - varname = string::Sprintf("%s@%d", varname, id); - } - - VLOG(3) << "creating var " << varname; - framework::Variable* var = scope->Var(varname); - framework::LoDTensor* tensor = var->GetMutable(); - - float* data = tensor->mutable_data(dim, platform::CPUPlace()); - std::fill(data, data + tensor->numel(), val); - return var; -} - -framework::LoDTensor& VarBase::Grad() { - VLOG(3) << "get var grad " << var_desc_->Name(); - return *grads_->GetMutable(); -} - -void VarBase::ApplyGrad(framework::Scope* scope, Variable* grad) { - VLOG(3) << "apply var grad " << var_desc_->Name() << " " - << grad->Get().data()[0]; - if (!grads_) { - grads_ = - CreateVariable(string::Sprintf("%s@IGrad", var_desc_->Name()), - var_->Get().dims(), 0.0, scope); - } - AddTo(grad, grads_); - VLOG(3) << "grad_ after apply var grad " << var_desc_->Name() << " " - << grads_->Get().data()[0]; -} - -std::vector OpBase::ApplyGrad(framework::Scope* scope) { - VLOG(3) << "op grad " << grad_op_desc_->Type(); - - for (const std::string& grad_invar : grad_op_desc_->InputArgumentNames()) { - if (grad_to_var_->find(grad_invar) == grad_to_var_->end()) { - // grad op inputs can be forward inputs, so not in grad_to_var. - continue; - } - VLOG(3) << "op grad in var " << grad_invar; - block_->FindRecursiveOrCreateVar(grad_invar); - framework::Variable* var = scope->Var(grad_invar); - const std::string& invar = grad_to_var_->at(grad_invar); - for (VarBase* varbase : *output_vars_) { - // Use the accumulated grads_ by sharing the input with grads_. - if (varbase->var_desc_->Name() == invar) { - var->GetMutable()->ShareDataWith( - varbase->grads_->Get()); - break; - } - } - } - - for (const std::string& outvar : grad_op_desc_->OutputArgumentNames()) { - VLOG(3) << "grad outvar " << outvar; - block_->FindRecursiveOrCreateVar(outvar); - framework::Variable* var = scope->Var(outvar); - if (!var->IsInitialized()) { - framework::VarDesc* var_desc = block_->FindVar(outvar); - if (var_desc->GetType() == framework::proto::VarType::LOD_TENSOR) { - var->GetMutable(); - } else { - LOG(ERROR) << "tracer doesn't support yet"; - } - } - } - grad_op_desc_->InferShape(*block_); - grad_op_desc_->InferVarType(block_); - std::unique_ptr opbase = - framework::OpRegistry::CreateOp(*grad_op_desc_); - - opbase->Run(*scope, platform::CPUPlace()); - - // `ret` matches exactly with `input_vars_` of forward op. - std::vector ret; - for (size_t i = 0; i < input_vars_->size(); ++i) { - bool found = false; - for (const std::string& outvar : grad_op_desc_->OutputArgumentNames()) { - Variable* var = scope->FindVar(outvar); - VarBase* origin_var = (*input_vars_)[i]; - std::string orig_var = grad_to_var_->at(outvar); - PADDLE_ENFORCE(origin_var->var_desc_->Name() == orig_var); - VLOG(3) << "apply grad " << outvar << " with origin " << orig_var; - origin_var->ApplyGrad(scope, var); - found = true; - ret.push_back(var); - // TODO(panyx0718): There might be another outvar with the same name. - // In that case, it doesn't matter the first one or the second one is - // used. - break; - } - if (!found) { - ret.push_back(nullptr); - } - } - return ret; -} - -void VarBase::RunBackward(framework::Scope* scope) { - grads_ = CreateVariable(framework::GradVarName(var_desc_->Name()), - var_->Get().dims(), 1.0, scope, - false); - if (!pre_op_) return; - Autograd(scope).RunBackward(this); -} - -} // namespace imperative -} // namespace paddle diff --git a/paddle/fluid/imperative/layer.h b/paddle/fluid/imperative/layer.h deleted file mode 100644 index 85a71ca83d2..00000000000 --- a/paddle/fluid/imperative/layer.h +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#pragma once - -#include -#include -#include "paddle/fluid/framework/op_desc.h" -#include "paddle/fluid/framework/operator.h" -#include "paddle/fluid/framework/scope.h" -#include "paddle/fluid/framework/var_desc.h" -#include "paddle/fluid/platform/enforce.h" - -namespace paddle { -namespace imperative { - -class OpBase; - -class VarBase { - public: - VarBase() - : pre_op_(nullptr), - pre_op_out_idx_(-1), - var_desc_(nullptr), - var_(nullptr), - grads_(nullptr) {} - - virtual ~VarBase() {} - - void ApplyGrad(framework::Scope* scope, framework::Variable* grad); - - void RunBackward(framework::Scope* scope); - - framework::LoDTensor& Grad(); - - OpBase* pre_op_; - int pre_op_out_idx_; - - framework::VarDesc* var_desc_; - framework::Variable* var_; - framework::Variable* grads_; -}; - -class OpBase { - public: - OpBase() - : input_vars_(new std::vector()), - output_vars_(new std::vector()), - pre_ops_(new std::vector()), - pre_ops_out_idx_(new std::vector()), - op_desc_(nullptr), - grad_op_desc_(nullptr) {} - - virtual ~OpBase() { - delete input_vars_; - delete output_vars_; - - delete pre_ops_; - delete pre_ops_out_idx_; - - if (grad_op_desc_) delete grad_op_desc_; - if (grad_to_var_) delete grad_to_var_; - } - - std::vector ApplyGrad(framework::Scope* scope); - - std::vector* input_vars_; - std::vector* output_vars_; - std::vector* pre_ops_; - std::vector* pre_ops_out_idx_; - framework::OpDesc* op_desc_; - - framework::OpDesc* grad_op_desc_; - std::unordered_map* grad_to_var_; - framework::BlockDesc* block_; -}; - -class Layer { - public: - virtual ~Layer() {} - - virtual std::vector Forward(const std::vector& inputs) { - std::vector vars; - return vars; - } - - virtual void Backward() { LOG(ERROR) << "To support customize"; } -}; - -} // namespace imperative -} // namespace paddle diff --git a/paddle/fluid/imperative/tracer.cc b/paddle/fluid/imperative/tracer.cc deleted file mode 100644 index f64f9e72c4a..00000000000 --- a/paddle/fluid/imperative/tracer.cc +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "paddle/fluid/imperative/tracer.h" - -namespace paddle { -namespace imperative {} // namespace imperative -} // namespace paddle diff --git a/paddle/fluid/imperative/tracer.h b/paddle/fluid/imperative/tracer.h deleted file mode 100644 index 433d07c0e5a..00000000000 --- a/paddle/fluid/imperative/tracer.h +++ /dev/null @@ -1,128 +0,0 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#pragma once - -#include -#include -#include - -#include "paddle/fluid/framework/op_desc.h" -#include "paddle/fluid/framework/op_registry.h" -#include "paddle/fluid/framework/scope.h" -#include "paddle/fluid/imperative/engine.h" -#include "paddle/fluid/imperative/layer.h" - -namespace paddle { -namespace imperative { - -void CreateGradOp(const framework::OpDesc& op_desc, - const std::unordered_set& no_grad_set, - const std::vector& grad_sub_block, - framework::OpDesc** grad_op_desc, - std::unordered_map* grad_to_var) { - std::vector> grad_op_descs = - framework::OpInfoMap::Instance() - .Get(op_desc.Type()) - .GradOpMaker()(op_desc, no_grad_set, grad_to_var, grad_sub_block); - PADDLE_ENFORCE(grad_op_descs.size() == 1, "Only support 1 grad op now."); - // TODO(panyx0718): Leak? - *grad_op_desc = grad_op_descs[0].release(); -} - -class Tracer { - public: - explicit Tracer(framework::BlockDesc* root_block) : root_block_(root_block) { - root_scope_ = new framework::Scope(); - scopes_[root_block_] = root_scope_; - } - - virtual ~Tracer() { delete root_scope_; } - - void Trace(OpBase* op, const std::vector& inputs, - const std::vector& outputs, - framework::BlockDesc* block) { - framework::Scope* scope = GetScope(block); - framework::OpDesc* op_desc = op->op_desc_; - VLOG(3) << "tracer tracing " << op_desc->Type(); - op_desc->InferShape(*block); - op_desc->InferVarType(block); - std::unique_ptr op_base = - framework::OpRegistry::CreateOp(*op_desc); - - *op->input_vars_ = inputs; - for (VarBase* input : inputs) { - const std::string vname = input->var_desc_->Name(); - framework::Variable* var = scope->Var(vname); - input->var_ = var; - if (!var->IsInitialized()) { - framework::VarDesc* var_desc = block->FindVar(vname); - if (var_desc->GetType() == framework::proto::VarType::LOD_TENSOR) { - var->GetMutable(); - } else { - LOG(ERROR) << "tracer doesn't support yet"; - } - } - if (input->pre_op_) { - op->pre_ops_->push_back(input->pre_op_); - op->pre_ops_out_idx_->push_back(input->pre_op_out_idx_); - } else { - op->pre_ops_->push_back(nullptr); - } - } - - *op->output_vars_ = outputs; - for (size_t i = 0; i < outputs.size(); ++i) { - const std::string vname = outputs[i]->var_desc_->Name(); - framework::Variable* var = scope->Var(vname); - if (!var->IsInitialized()) { - framework::VarDesc* var_desc = block->FindVar(vname); - if (var_desc->GetType() == framework::proto::VarType::LOD_TENSOR) { - var->GetMutable(); - } else { - LOG(ERROR) << "tracer doesn't support yet"; - } - } - outputs[i]->var_ = var; - outputs[i]->pre_op_ = op; - outputs[i]->pre_op_out_idx_ = i; - } - op_base->Run(*scope, platform::CPUPlace()); - framework::OpDesc* grad_op_desc; - auto grad_to_var = new std::unordered_map(); - CreateGradOp(*op_desc, {}, {block}, &grad_op_desc, grad_to_var); - op->grad_op_desc_ = grad_op_desc; - op->grad_to_var_ = grad_to_var; - op->block_ = block; - } - - framework::Scope* GetScope(framework::BlockDesc* block) { - if (scopes_.find(block) != scopes_.end()) { - return scopes_.at(block); - } - framework::BlockDesc* parent_block = block->ParentBlock(); - PADDLE_ENFORCE(scopes_.find(parent_block) != scopes_.end()); - framework::Scope* scope = &scopes_[parent_block]->NewScope(); - scopes_[block] = scope; - return scope; - } - - private: - std::map scopes_; - framework::BlockDesc* root_block_; - framework::Scope* root_scope_; -}; - -} // namespace imperative -} // namespace paddle diff --git a/paddle/fluid/pybind/CMakeLists.txt b/paddle/fluid/pybind/CMakeLists.txt index b8954cb1262..d602613fc82 100644 --- a/paddle/fluid/pybind/CMakeLists.txt +++ b/paddle/fluid/pybind/CMakeLists.txt @@ -1,7 +1,6 @@ -set(PYBIND_DEPS pybind python proto_desc memory executor async_executor prune feed_fetch_method pass_builder parallel_executor profiler layer) -set(PYBIND_SRCS pybind.cc exception.cc protobuf.cc const_value.cc recordio.cc async_executor_py.cc imperative.cc) - +set(PYBIND_DEPS pybind python proto_desc memory executor async_executor prune feed_fetch_method pass_builder parallel_executor profiler) +set(PYBIND_SRCS pybind.cc exception.cc protobuf.cc const_value.cc recordio.cc async_executor_py.cc) if(WITH_PYTHON) if(WITH_AMD_GPU) hip_library(paddle_pybind SHARED diff --git a/paddle/fluid/pybind/imperative.cc b/paddle/fluid/pybind/imperative.cc deleted file mode 100644 index 34e9c897d9e..00000000000 --- a/paddle/fluid/pybind/imperative.cc +++ /dev/null @@ -1,36 +0,0 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#include "paddle/fluid/pybind/imperative.h" -#include "paddle/fluid/framework/block_desc.h" -#include "paddle/fluid/framework/scope.h" -#include "paddle/fluid/imperative/tracer.h" - -namespace paddle { -namespace pybind { - -// Bind Methods -void BindTracer(pybind11::module *m) { - pybind11::class_(*m, "Tracer", "") - .def("__init__", - [](imperative::Tracer &self, framework::BlockDesc *root_block) { - new (&self) imperative::Tracer(root_block); - }) - .def("trace", &imperative::Tracer::Trace) - .def("get_scope", &imperative::Tracer::GetScope, - pybind11::return_value_policy::reference); -} - -} // namespace pybind -} // namespace paddle diff --git a/paddle/fluid/pybind/imperative.h b/paddle/fluid/pybind/imperative.h deleted file mode 100644 index 7a9d3a01ea8..00000000000 --- a/paddle/fluid/pybind/imperative.h +++ /dev/null @@ -1,53 +0,0 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ -#pragma once - -#include -#include -#include "paddle/fluid/imperative/layer.h" -#include "pybind11/pybind11.h" -#include "pybind11/stl.h" - -namespace paddle { -namespace pybind { - -class PyLayer : public imperative::Layer { - public: - using imperative::Layer::Layer; // Inherit constructors - - std::vector Forward( - const std::vector& inputs) override { - PYBIND11_OVERLOAD(std::vector, Layer, Forward, - inputs); // NOLINT - } - - void Backward() override { - PYBIND11_OVERLOAD(void, Layer, Backward, ); // NOLINT - } -}; - -class PyOpBase : public imperative::OpBase { - public: - using imperative::OpBase::OpBase; // Inherit constructors -}; - -class PyVarBase : public imperative::VarBase { - public: - using imperative::VarBase::VarBase; // Inherit constructors -}; - -void BindTracer(pybind11::module* m); - -} // namespace pybind -} // namespace paddle diff --git a/paddle/fluid/pybind/pybind.cc b/paddle/fluid/pybind/pybind.cc index ea07372a289..fc7991d2974 100644 --- a/paddle/fluid/pybind/pybind.cc +++ b/paddle/fluid/pybind/pybind.cc @@ -34,7 +34,6 @@ limitations under the License. */ #include "paddle/fluid/framework/reader.h" #include "paddle/fluid/framework/selected_rows.h" #include "paddle/fluid/framework/version.h" -#include "paddle/fluid/imperative/layer.h" #include "paddle/fluid/memory/allocation/allocator_strategy.h" #include "paddle/fluid/operators/activation_op.h" #include "paddle/fluid/operators/reader/lod_tensor_blocking_queue.h" @@ -46,7 +45,6 @@ limitations under the License. */ #include "paddle/fluid/pybind/async_executor_py.h" #include "paddle/fluid/pybind/const_value.h" #include "paddle/fluid/pybind/exception.h" -#include "paddle/fluid/pybind/imperative.h" #include "paddle/fluid/pybind/protobuf.h" #include "paddle/fluid/pybind/pybind.h" // NOLINT #include "paddle/fluid/pybind/recordio.h" @@ -102,42 +100,6 @@ PYBIND11_MODULE(core, m) { BindException(&m); - py::class_(m, "VarBase", R"DOC()DOC") - .def(py::init<>()) - .def("_run_backward", - [](imperative::VarBase &self, framework::Scope *scope) { - self.RunBackward(scope); - }) - .def("_grad", &imperative::VarBase::Grad) - .def_property( - "desc", - [](const imperative::VarBase &self) { return self.var_desc_; }, - [](imperative::VarBase &self, framework::VarDesc *var_desc) { - self.var_desc_ = var_desc; - }, - py::return_value_policy::reference); - - py::class_(m, "OpBase", R"DOC()DOC") - .def(py::init<>()) - .def_property( - "desc", [](const imperative::OpBase &self) { return self.op_desc_; }, - [](imperative::OpBase &self, framework::OpDesc *op_desc) { - if (op_desc) { - self.op_desc_ = op_desc; - } - }, - py::return_value_policy::reference); - - py::class_ layer(m, "Layer"); - layer.def(py::init<>()) - .def("forward", - [](imperative::Layer &self, - const std::vector &inputs) { - return self.Forward(inputs); - }) - .def("backward", &imperative::Layer::Backward); - BindTracer(&m); - py::class_(m, "Tensor", py::buffer_protocol()) .def_buffer( [](Tensor &self) -> py::buffer_info { return CastToPyBuffer(self); }) @@ -639,7 +601,6 @@ All parameter, weight, gradient are variables in Paddle. m.def("set_feed_variable", framework::SetFeedVariable); m.def("get_fetch_variable", framework::GetFetchVariable); - m.def("get_variable_tensor", framework::GetVariableTensor); m.def("_is_program_version_supported", IsProgramVersionSupported); diff --git a/python/paddle/fluid/__init__.py b/python/paddle/fluid/__init__.py index 52417a1eaf7..2a53519188e 100644 --- a/python/paddle/fluid/__init__.py +++ b/python/paddle/fluid/__init__.py @@ -34,7 +34,6 @@ from . import io from . import evaluator from . import initializer from . import layers -from . import imperative from . import contrib from . import nets from . import optimizer @@ -68,7 +67,6 @@ __all__ = framework.__all__ + executor.__all__ + \ 'initializer', 'layers', 'contrib', - 'imperative', 'transpiler', 'nets', 'optimizer', diff --git a/python/paddle/fluid/framework.py b/python/paddle/fluid/framework.py index 9e6345f148c..b156db53d29 100644 --- a/python/paddle/fluid/framework.py +++ b/python/paddle/fluid/framework.py @@ -18,7 +18,6 @@ import collections import contextlib import re import six -import sys import numpy as np @@ -50,16 +49,6 @@ GRAD_VAR_SUFFIX = core.kGradVarSuffix() ZERO_VAR_SUFFIX = core.kZeroVarSuffix() CONTROL_DEP_VAR_PREFIX = core.kControlDepVarName() -_imperative_tracer_ = None - - -def _in_imperative_mode(): - return _imperative_tracer_ is not None - - -def _imperative_tracer(): - return _imperative_tracer_ - class NameScope(object): def __init__(self, name="", parent=None): @@ -213,7 +202,7 @@ def _debug_string_(proto, throw_on_error=True): return proto.__str__() -class Variable(core.VarBase): +class Variable(object): """ In Fluid, every input and output of an operator is a variable. In most cases, variables are used for holding different kinds of data or training @@ -277,7 +266,6 @@ class Variable(core.VarBase): stop_gradient=False, is_data=False, **kwargs): - core.VarBase.__init__(self) self.block = block self.error_clip = error_clip @@ -358,18 +346,6 @@ class Variable(core.VarBase): self.stop_gradient = stop_gradient self.is_data = is_data - def _numpy(self): - scope = _imperative_tracer().get_scope(self.block.desc) - tensor = core.get_variable_tensor(scope, self.desc.name()) - return np.array(tensor) - - def _backward(self): - scope = _imperative_tracer().get_scope(self.block.desc) - self._run_backward(scope) - - def _gradient(self): - return np.array(self._grad()) - def __str__(self): return self.to_string(True) @@ -516,7 +492,7 @@ class OpProtoHolder(object): } -class Operator(core.OpBase): +class Operator(object): """ In Fluid, all the operation are represented by Operator, and Operator is regarded as a build in an instruction of a Block. Users can use the @@ -572,7 +548,6 @@ class Operator(core.OpBase): inputs=None, outputs=None, attrs=None): - core.OpBase.__init__(self) self.block = block self.desc = desc # note: not add self.attrs here: @@ -612,7 +587,6 @@ class Operator(core.OpBase): return True return False - self.inputs = [] if inputs is not None: for in_proto in proto.inputs: found = find_name(inputs, in_proto.name) @@ -639,13 +613,6 @@ class Operator(core.OpBase): else: self.desc.set_input(in_proto.name, []) - for inp in inputs.values(): - if isinstance(inp, Variable): - self.inputs.append(inp) - elif isinstance(inp, list) or isinstance(inp, tuple): - self.inputs.extend(inp[:]) - - self.outputs = [] if outputs is not None: given = set() need = set() @@ -674,12 +641,6 @@ class Operator(core.OpBase): arg.op = self self.desc.set_output(out_proto.name, out_arg_names) - for out in outputs.values(): - if isinstance(out, Variable): - self.outputs.append(out) - elif isinstance(out, list) or isinstance(out, tuple): - self.outputs.extend(out[:]) - if op_attrs is not None: if not isinstance(op_attrs, dict): raise TypeError("'attrs' should be a dict.") @@ -1245,8 +1206,6 @@ class Block(object): """ op_desc = self.desc.append_op() op = Operator(block=self, desc=op_desc, *args, **kwargs) - if _in_imperative_mode(): - _imperative_tracer().trace(op, op.inputs, op.outputs, self.desc) self.ops.append(op) return op @@ -2250,12 +2209,3 @@ def _get_var(name, program=None): assert isinstance(program, Program) return program.global_block().var(name) - - -@contextlib.contextmanager -def _imperative_guard(tracer): - global _imperative_tracer_ - tmp_trace = _imperative_tracer_ - _imperative_tracer_ = tracer - yield - _imperative_tracer_ = tmp_trace diff --git a/python/paddle/fluid/imperative/__init__.py b/python/paddle/fluid/imperative/__init__.py deleted file mode 100644 index 922308b6b18..00000000000 --- a/python/paddle/fluid/imperative/__init__.py +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import print_function - -from . import base -from .base import * - -from . import layers -from .layers import * - -__all__ = [] -__all__ += layers.__all__ -__all__ += base.__all__ diff --git a/python/paddle/fluid/imperative/base.py b/python/paddle/fluid/imperative/base.py deleted file mode 100644 index 15d38ddb56c..00000000000 --- a/python/paddle/fluid/imperative/base.py +++ /dev/null @@ -1,56 +0,0 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import contextlib -import numpy as np - -from paddle.fluid import core -from paddle.fluid import framework - -__all__ = ['enabled', 'guard', 'to_variable'] - - -def enabled(): - return framework._in_imperative_mode() - - -@contextlib.contextmanager -def guard(): - train = framework.Program() - startup = framework.Program() - tracer = core.Tracer(train.current_block().desc) - with framework.program_guard(train, startup): - with framework.unique_name.guard(): - with framework._imperative_guard(tracer): - yield - - -def to_variable(value, block=None): - if isinstance(value, np.ndarray): - if not block: - block = framework.default_main_program().current_block() - py_var = framework.Variable( - block, - type=core.VarDesc.VarType.LOD_TENSOR, - name=None, - shape=value.shape, - dtype=value.dtype) - scope = framework._imperative_tracer().get_scope(block.desc) - var = scope.var(py_var.name) - tensor = var.get_tensor() - tensor.set(value, core.CPUPlace()) - return py_var - elif isinstance(value, framework.Variable): - return value - else: - raise ValueError("Unsupported type %s" % type(value)) diff --git a/python/paddle/fluid/imperative/layers.py b/python/paddle/fluid/imperative/layers.py deleted file mode 100644 index 1a28f7f4ae3..00000000000 --- a/python/paddle/fluid/imperative/layers.py +++ /dev/null @@ -1,44 +0,0 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import contextlib -import sys -import numpy as np - -from paddle.fluid import core -from paddle.fluid import framework -from paddle.fluid.imperative import base - -__all__ = ['PyLayer'] - - -class PyLayer(core.Layer): - def __init__(self): - pass - - def __call__(self, inputs): - # TODO(panyx0718): Support declarative mode as well. - assert base.enabled() - if not isinstance(inputs, list) and not isinstance(inputs, tuple): - inputs = [inputs] - - var_inputs = [] - for x in inputs: - py_var = base.to_variable(x) - var_inputs.append(py_var) - outputs = self.forward(var_inputs) - return outputs - - def forward(self, inputs): - return [] diff --git a/python/paddle/fluid/layer_helper.py b/python/paddle/fluid/layer_helper.py index 74b4a977db6..dc317de9abb 100644 --- a/python/paddle/fluid/layer_helper.py +++ b/python/paddle/fluid/layer_helper.py @@ -17,13 +17,10 @@ from __future__ import print_function import copy import itertools import six -import sys -import numpy as np from .framework import Variable, Parameter, default_main_program, default_startup_program, dtype_is_floating from . import unique_name from paddle.fluid.initializer import Constant, Xavier -from paddle.fluid.imperative import base from .param_attr import ParamAttr, WeightNormParamAttr from . import core from six.moves import zip @@ -49,21 +46,23 @@ class LayerHelper(object): def startup_program(self): return default_startup_program() - def to_variable(self, x): - return base.to_variable(x, self.main_program.current_block()) - def append_op(self, *args, **kwargs): return self.main_program.current_block().append_op(*args, **kwargs) def multiple_input(self, input_param_name='input'): inputs = self.kwargs.get(input_param_name, []) - ret = [] - if isinstance(inputs, list) or isinstance(inputs, tuple): - for inp in inputs: - ret.append(self.to_variable(inp)) + type_error = TypeError( + "Input of {0} layer should be Variable or sequence of Variable". + format(self.layer_type)) + if isinstance(inputs, Variable): + inputs = [inputs] + elif not isinstance(inputs, list) and not isinstance(inputs, tuple): + raise type_error else: - ret.append(self.to_variable(inputs)) - return ret + for each in inputs: + if not isinstance(each, Variable): + raise type_error + return inputs def input(self, input_param_name='input'): inputs = self.multiple_input(input_param_name) diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index fac7538a6ad..4833212d311 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -6623,8 +6623,7 @@ def relu(x, name=None): helper = LayerHelper('relu', **locals()) dtype = helper.input_dtype(input_param_name='x') out = helper.create_variable_for_type_inference(dtype) - helper.append_op( - type="relu", inputs={"X": helper.input('x')}, outputs={"Out": out}) + helper.append_op(type="relu", inputs={"X": x}, outputs={"Out": out}) return out diff --git a/python/paddle/fluid/tests/unittests/test_imperative.py b/python/paddle/fluid/tests/unittests/test_imperative.py deleted file mode 100644 index b5b6305155d..00000000000 --- a/python/paddle/fluid/tests/unittests/test_imperative.py +++ /dev/null @@ -1,52 +0,0 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest -import sys -import numpy as np - -import paddle.fluid as fluid -from paddle.fluid import core - - -class MyLayer(fluid.imperative.PyLayer): - def __init__(self): - super(MyLayer, self).__init__() - - def forward(self, inputs): - x = fluid.layers.relu(inputs[0]) - self._x_for_debug = x - return [fluid.layers.elementwise_mul(x, x)] - - -class TestImperative(unittest.TestCase): - def test_layer(self): - with fluid.imperative.guard(): - cl = core.Layer() - cl.forward([]) - l = fluid.imperative.PyLayer() - l.forward([]) - - def test_layer_in_out(self): - with fluid.imperative.guard(): - l = MyLayer() - x = l(np.array([1.0, 2.0, -1.0], dtype=np.float32))[0] - self.assertIsNotNone(x) - sys.stderr.write("%s output: %s\n" % (x, x._numpy())) - x._backward() - sys.stderr.write("grad %s\n" % l._x_for_debug._gradient()) - - -if __name__ == '__main__': - unittest.main() diff --git a/python/setup.py.in b/python/setup.py.in index 0eb69cdb5c7..5aee26b6383 100644 --- a/python/setup.py.in +++ b/python/setup.py.in @@ -101,7 +101,6 @@ packages=['paddle', 'paddle.dataset', 'paddle.reader', 'paddle.fluid', - 'paddle.fluid.imperative', 'paddle.fluid.proto', 'paddle.fluid.proto.profiler', 'paddle.fluid.layers', diff --git a/tools/print_signatures.py b/tools/print_signatures.py index 7e61dde0a44..5c5266f904f 100644 --- a/tools/print_signatures.py +++ b/tools/print_signatures.py @@ -27,8 +27,6 @@ import pydoc member_dict = collections.OrderedDict() -experimental_namespace = {"paddle.fluid.imperative"} - def visit_member(parent_name, member): cur_name = ".".join([parent_name, member.__name__]) @@ -53,8 +51,6 @@ def visit_member(parent_name, member): def visit_all_module(mod): - if (mod.__name__ in experimental_namespace): - return for member_name in ( name for name in (mod.__all__ if hasattr(mod, "__all__") else dir(mod)) -- GitLab