From 79c502c80c1a5c2c790f0a88e477b9f3ee5cc61c Mon Sep 17 00:00:00 2001 From: wangliu Date: Tue, 5 Jun 2018 19:08:45 +0800 Subject: [PATCH] remove unused code --- src/framework/attribute.cpp | 8 +- src/framework/attribute.h | 73 +++++++---------- src/framework/data_layout.h | 1 - src/framework/data_transform.cpp | 88 --------------------- src/framework/data_type.h | 37 --------- src/framework/ddim.cpp | 2 +- src/framework/ddim.h | 13 +-- src/framework/lod_tensor.cpp | 35 +++----- src/framework/op_info.h | 15 ++-- src/framework/scope.cpp | 9 --- src/operators/conv_op.cpp | 1 - src/operators/depthwise_conv_op.cpp | 1 - src/operators/kernel/arm/sigmoid_kernel.cpp | 14 ---- 13 files changed, 48 insertions(+), 249 deletions(-) delete mode 100644 src/framework/data_transform.cpp delete mode 100644 src/framework/data_type.h diff --git a/src/framework/attribute.cpp b/src/framework/attribute.cpp index 01b0ed523c..8b150f4e9e 100644 --- a/src/framework/attribute.cpp +++ b/src/framework/attribute.cpp @@ -17,14 +17,8 @@ limitations under the License. */ namespace paddle_mobile { namespace framework { -/* - * Variant, std::vector, - std::vector, bool, std::vector, BlockDesc *, - int64_t> - * */ - struct PrintVistor : Vistor { - PrintVistor(Print &printer) : printer_(printer) {} + explicit PrintVistor(Print &printer) : printer_(printer) {} template Print &operator()(const T &value) { printer_ << value; diff --git a/src/framework/attribute.h b/src/framework/attribute.h index b77d94521e..7db107ceef 100644 --- a/src/framework/attribute.h +++ b/src/framework/attribute.h @@ -14,7 +14,9 @@ limitations under the License. */ #pragma once +#include #include +#include #include "common/enforce.h" #include "common/log.h" #include "common/variant.h" @@ -22,28 +24,15 @@ limitations under the License. */ namespace paddle_mobile { namespace framework { +using std::string; +using std::vector; class BlockDesc; class Attribute { public: - /* - * PADDLE_MOBILE__FRAMEWORK__PROTO__ATTR_TYPE__INT = 0, - PADDLE_MOBILE__FRAMEWORK__PROTO__ATTR_TYPE__FLOAT = 1, - PADDLE_MOBILE__FRAMEWORK__PROTO__ATTR_TYPE__STRING = 2, - PADDLE_MOBILE__FRAMEWORK__PROTO__ATTR_TYPE__INTS = 3, - PADDLE_MOBILE__FRAMEWORK__PROTO__ATTR_TYPE__FLOATS = 4, - PADDLE_MOBILE__FRAMEWORK__PROTO__ATTR_TYPE__STRINGS = 5, - PADDLE_MOBILE__FRAMEWORK__PROTO__ATTR_TYPE__BOOLEAN = 6, - PADDLE_MOBILE__FRAMEWORK__PROTO__ATTR_TYPE__BOOLEANS = 7, - PADDLE_MOBILE__FRAMEWORK__PROTO__ATTR_TYPE__BLOCK = 8, - PADDLE_MOBILE__FRAMEWORK__PROTO__ATTR_TYPE__LONG = 9 - PROTOBUF_C__FORCE_ENUM_TO_BE_INT_SIZE(PADDLE_MOBILE__FRAMEWORK__PROTO__ATTR_TYPE) - * - * */ static Attribute GetAttrValue( PaddleMobile__Framework__Proto__OpDesc__Attr *attr_desc) { - // std::cout << "begin get attr value" << std::endl; Attribute attr; switch (attr_desc->type) { case PADDLE_MOBILE__FRAMEWORK__PROTO__ATTR_TYPE__BOOLEAN: { @@ -63,35 +52,35 @@ class Attribute { break; } case PADDLE_MOBILE__FRAMEWORK__PROTO__ATTR_TYPE__BOOLEANS: { - std::vector val(attr_desc->n_bools); + vector val(attr_desc->n_bools); for (int i = 0; i < attr_desc->n_bools; ++i) { val[i] = attr_desc->bools[i]; } - attr.Set>(val); + attr.Set>(val); break; } case PADDLE_MOBILE__FRAMEWORK__PROTO__ATTR_TYPE__INTS: { - std::vector val(attr_desc->n_ints); + vector val(attr_desc->n_ints); for (int i = 0; i < attr_desc->n_ints; ++i) { val[i] = attr_desc->ints[i]; } - attr.Set>(val); + attr.Set>(val); break; } case PADDLE_MOBILE__FRAMEWORK__PROTO__ATTR_TYPE__FLOATS: { - std::vector val(attr_desc->n_floats); + vector val(attr_desc->n_floats); for (int i = 0; i < attr_desc->n_floats; ++i) { val[i] = attr_desc->floats[i]; } - attr.Set>(val); + attr.Set>(val); break; } case PADDLE_MOBILE__FRAMEWORK__PROTO__ATTR_TYPE__STRINGS: { - std::vector val(attr_desc->n_strings); + vector val(attr_desc->n_strings); for (int i = 0; i < attr_desc->n_strings; ++i) { val[i] = attr_desc->strings[i]; } - attr.Set>(val); + attr.Set>(val); break; } case PADDLE_MOBILE__FRAMEWORK__PROTO__ATTR_TYPE__LONG: { @@ -122,21 +111,18 @@ class Attribute { return vistor(attr.variant_.Get()); } else if (attr.variant_.TypeId() == typeid(float).hash_code()) { return vistor(attr.variant_.Get()); - } else if (attr.variant_.TypeId() == typeid(std::string).hash_code()) { - return vistor(attr.variant_.Get()); - } else if (attr.variant_.TypeId() == typeid(std::vector).hash_code()) { - return vistor(attr.variant_.Get>()); - } else if (attr.variant_.TypeId() == - typeid(std::vector).hash_code()) { - return vistor(attr.variant_.Get>()); - } else if (attr.variant_.TypeId() == - typeid(std::vector).hash_code()) { - return vistor(attr.variant_.Get>()); + } else if (attr.variant_.TypeId() == typeid(string).hash_code()) { + return vistor(attr.variant_.Get()); + } else if (attr.variant_.TypeId() == typeid(vector).hash_code()) { + return vistor(attr.variant_.Get>()); + } else if (attr.variant_.TypeId() == typeid(vector).hash_code()) { + return vistor(attr.variant_.Get>()); + } else if (attr.variant_.TypeId() == typeid(vector).hash_code()) { + return vistor(attr.variant_.Get>()); } else if (attr.variant_.TypeId() == typeid(bool).hash_code()) { return vistor(attr.variant_.Get()); - } else if (attr.variant_.TypeId() == - typeid(std::vector).hash_code()) { - return vistor(attr.variant_.Get>()); + } else if (attr.variant_.TypeId() == typeid(vector).hash_code()) { + return vistor(attr.variant_.Get>()); } else if (attr.variant_.TypeId() == typeid(int64_t).hash_code()) { return vistor(attr.variant_.Get()); } else { @@ -145,24 +131,21 @@ class Attribute { } private: - Variant, std::vector, - std::vector, bool, std::vector, BlockDesc *, - int64_t> + Variant, vector, vector, bool, + vector, BlockDesc *, int64_t> variant_; }; -using AttributeMap = std::unordered_map; +using AttributeMap = std::unordered_map; class AttrReader { public: explicit AttrReader(const AttributeMap &attrs) : attrs_(attrs) {} template - inline T Get(const std::string &name) const { - // PADDLE_ENFORCE(attrs_.count(name) != 0, "%s should - // be in - // AttributeMap", - // name); + inline T Get(const string &name) const { + PADDLE_MOBILE_ENFORCE(attrs_.count(name) != 0, + "%s should be in AttributeMap", name); return ((Attribute)attrs_.at(name)).Get(); } diff --git a/src/framework/data_layout.h b/src/framework/data_layout.h index 72c16c3673..9944c88c8f 100644 --- a/src/framework/data_layout.h +++ b/src/framework/data_layout.h @@ -54,7 +54,6 @@ inline std::string DataLayoutToString(const DataLayout &data_layout) { return "ANY_LAYOUT"; default: break; - // std::cout << "unknown DataLayou %d", data_layout; } } diff --git a/src/framework/data_transform.cpp b/src/framework/data_transform.cpp deleted file mode 100644 index a6be4d2fcb..0000000000 --- a/src/framework/data_transform.cpp +++ /dev/null @@ -1,88 +0,0 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once - -#include "framework/data_transform.h" - -namespace paddle_mobile { -namespace framework { - -static void PassTensorData(Tensor *from, Tensor *to) { - to->ShareDataWith(*from); - *from = Tensor(); -} - -void DataTransform(const OpKernelType &expected_kernel_type, - const OpKernelType &kernel_type_for_var, - const Tensor &input_tensor, Tensor *output_tensor) { - bool transformed = false; - Tensor in; - in.ShareDataWith(input_tensor); - Tensor out; - - // // do layout transform - // if (NeedTransformLayout(expected_kernel_type.data_layout_, - // kernel_type_for_var.data_layout_)) { - // TransDataLayout(kernel_type_for_var, expected_kernel_type, in, - // &out); - // transformed = true; - // PassTensorData(&out, &in); - // } - // - // // do data type transform - // if (expected_kernel_type.data_type_ != - // kernel_type_for_var.data_type_) { - // TransDataType(kernel_type_for_var, expected_kernel_type, in, - // &out); - // transformed = true; - // PassTensorData(&out, &in); - // } - // - // // do device transform - // if (!platform::is_same_place(kernel_type_for_var.place_, - // expected_kernel_type.place_)) { - // TransDataDevice(in, expected_kernel_type.place_, &out); - // transformed = true; - // PassTensorData(&out, &in); - // } - // - // PADDLE_ENFORCE(transformed, "No transform is applied, please - // check!"); - // get output data - output_tensor->ShareDataWith(in); -} - -void CopyVariableWithTensor(const Variable &in_var, const Tensor &tensor, - Variable *out_var) { - // if (in_var.IsType()) { - // auto& in_lod_tensor = in_var.Get(); - // auto* tran_lod_tensor = out_var.GetMutable(); - // tran_lod_tensor->set_lod(in_lod_tensor.lod()); - // tran_lod_tensor->set_layout(in_lod_tensor.layout()); - // tran_lod_tensor->ShareDataWith(tensor); - // } else if (in_var.IsType()) { - // auto& in_selected_rows = in_var.Get(); - // auto* trans_selected_rows = - // out_var.GetMutable(); - // trans_selected_rows->set_height(in_selected_rows.height()); - // trans_selected_rows->set_rows(in_selected_rows.rows()); - // trans_selected_rows->mutable_value()->ShareDataWith(tensor); - // } else { - // PADDLE_THROW("unknown var type"); - // } -} - -} // namespace framework -} // namespace paddle_mobile diff --git a/src/framework/data_type.h b/src/framework/data_type.h deleted file mode 100644 index ddfc0dcc4a..0000000000 --- a/src/framework/data_type.h +++ /dev/null @@ -1,37 +0,0 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once - -namespace paddle_mobile { -namespace framework { - -// inline proto::VarType::Type ToDataType(std::type_index type) { -// using namespace paddle_mobile::framework::proto; -// if (typeid(float).hash_code() == type.hash_code()) { -// return proto::VarType::FP32; -// } else if (typeid(double).hash_code() == type.hash_code()) { -// return proto::VarType::FP64; -// } else if (typeid(int).hash_code() == type.hash_code()) { -// return proto::VarType::INT32; -// } else if (typeid(int64_t).hash_code() == type.hash_code()) { -// return proto::VarType::INT64; -// } else if (typeid(bool).hash_code() == type.hash_code()) { -// return proto::VarType::BOOL; -// } else { -//// PADDLE_THROW("Not supported"); -// } -// } -} // namespace framework -} // namespace paddle_mobile diff --git a/src/framework/ddim.cpp b/src/framework/ddim.cpp index db6f2cd6ab..4fa01564d5 100644 --- a/src/framework/ddim.cpp +++ b/src/framework/ddim.cpp @@ -183,7 +183,7 @@ DDim DDim::operator*(DDim d) const { int64_t get(const DDim &ddim, int idx) { return ddim[idx]; } -void set(DDim &ddim, int idx, int value) { ddim[idx] = value; } +void set(DDim *ddim, int idx, int value) { (*ddim)[idx] = value; } /// @cond HIDDEN struct VectorizeVisitor : Vistor { diff --git a/src/framework/ddim.h b/src/framework/ddim.h index 88039b2e0a..f0b9db9506 100644 --- a/src/framework/ddim.h +++ b/src/framework/ddim.h @@ -83,17 +83,6 @@ struct DDim { int64_t operator[](int idx) const; - // template - // typename Visitor::result_type apply_visitor(Visitor& visitor) { - // return var.apply_visitor(visitor); - // } - // - // template - // typename Visitor::result_type apply_visitor(Visitor& visitor) - // const { - // return var.apply_visitor(visitor); - // } - DDimVar getVar() { return var; } bool operator==(DDim d) const; @@ -126,7 +115,7 @@ DDim make_ddim(std::initializer_list dims); int64_t get(const DDim &dim, int idx); -void set(DDim &dim, int idx, int val); +void set(DDim *dim, int idx, int val); std::vector vectorize(const DDim &ddim); diff --git a/src/framework/lod_tensor.cpp b/src/framework/lod_tensor.cpp index 48c2c46989..dc0b77f32e 100644 --- a/src/framework/lod_tensor.cpp +++ b/src/framework/lod_tensor.cpp @@ -42,23 +42,10 @@ std::ostream &operator<<(std::ostream &os, const LoD &lod) { } std::ostream &operator<<(std::ostream &os, const LoDTensor &t) { - // PADDLE_ENFORCE(t.type().hash_code() == - // typeid(float).hash_code()); - - // if (!platform::is_cpu_place(t.place())) { - // LoDTensor tt; - // framework::TensorCopy(t, platform::CPUPlace(), &tt); - // platform::DeviceContextPool &pool = - // platform::DeviceContextPool::Instance(); auto &dev_ctx = - // *pool.Get(t.place()); dev_ctx.Wait(); - // - // os << tt; - // return os; - // } - + PADDLE_MOBILE_ENFORCE(t.type().hash_code() == typeid(float).hash_code(), + "t.type() is not float"); os << "dim: " << t.dims() << "\n"; os << "lod: " << t.lod() << "\n"; - // only print first ten elements int64_t size = t.numel() < 10 ? t.numel() : 10; for (int64_t i = 0; i < size; ++i) { @@ -76,9 +63,9 @@ std::string LoDToString(const LoD &lod) { LoD SliceInLevel(const LoD &in, size_t level, size_t elem_begin, size_t elem_end) { - // PADDLE_ENFORCE_LT(level, in.size()); - // PADDLE_ENFORCE_LT(elem_end, in[level].size()); - + PADDLE_MOBILE_ENFORCE(level < in.size(), "level should >= in.size()"); + PADDLE_MOBILE_ENFORCE(elem_end < in[level].size(), + "elem_end >= in[level].size()"); LoD res; res.resize(in.size() - level); // copy the first level @@ -211,8 +198,9 @@ LoDAndOffset GetSubLoDAndAbsoluteOffset(const LoD &lod, size_t start_idx, LoD sub_lod; for (size_t level_idx = start_level; level_idx < lod.size(); ++level_idx) { - // PADDLE_ENFORCE_LE(start_idx, end_idx); - // PADDLE_ENFORCE_LT(end_idx, lod[level_idx].size()); + PADDLE_MOBILE_ENFORCE(start_idx <= end_idx, "start_idx > end_idx"); + PADDLE_MOBILE_ENFORCE(end_idx < lod[level_idx].size(), + "end_idx >= lod[level_idx].size()"); std::vector level_lens; for (size_t i = start_idx; i < end_idx; ++i) { level_lens.push_back(lod[level_idx][i + 1] - lod[level_idx][i]); @@ -226,10 +214,9 @@ LoDAndOffset GetSubLoDAndAbsoluteOffset(const LoD &lod, size_t start_idx, } void AppendLoD(LoD *lod, const LoD &lod_length) { - // PADDLE_ENFORCE( - // lod->empty() || lod->size() == lod_length.size(), - // "The lod_length should has the same size with the appended - // lod."); + PADDLE_MOBILE_ENFORCE( + lod->empty() || lod->size() == lod_length.size(), + "The lod_length should has the same size with the appended lod."); if (lod->empty()) { for (size_t i = 0; i < lod_length.size(); ++i) { lod->emplace_back(1, 0); // size = 1, value = 0; diff --git a/src/framework/op_info.h b/src/framework/op_info.h index 7475d15523..16b3487955 100644 --- a/src/framework/op_info.h +++ b/src/framework/op_info.h @@ -25,9 +25,8 @@ template struct OpInfo { OpCreator creator_; const OpCreator &Creator() const { - // PADDLE_ENFORCE_NOT_NULL(creator_, - // "Operator Creator has not been - // registered"); + PADDLE_MOBILE_ENFORCE(creator_ != nullptr, + "Operator Creator has not been registered"); return creator_; } }; @@ -48,17 +47,15 @@ class OpInfoMap { } void Insert(const std::string &type, const OpInfo &info) { - // PADDLE_ENFORCE(!Has(type), "Operator %s has been - // registered", type); + PADDLE_MOBILE_ENFORCE(!Has(type), "Operator %s has been registered", + type.c_str()); map_.insert({type, info}); } const OpInfo &Get(const std::string &type) const { auto op_info_ptr = GetNullable(type); - // PADDLE_ENFORCE_NOT_NULL(op_info_ptr, "Operator %s has not - // been - // registered", - // type); + PADDLE_MOBILE_ENFORCE(op_info_ptr != nullptr, + "Operator %s has not been registered", type.c_str()); return *op_info_ptr; } diff --git a/src/framework/scope.cpp b/src/framework/scope.cpp index c5ee2d39fa..664499d7e6 100644 --- a/src/framework/scope.cpp +++ b/src/framework/scope.cpp @@ -76,7 +76,6 @@ void Scope::DeleteScope(Scope *scope) const { auto it = std::find(kids_.begin(), kids_.end(), scope); kids_.erase(it); delete scope; - // deferent } void Scope::EraseVars(const std::vector &var_names) { @@ -104,14 +103,6 @@ void Scope::Rename(const std::string &origin_name, vars_[new_name] = origin_it->second; vars_.erase(origin_it); } -// -// std::string Scope::Rename(const std::string& origin_name) -// const { -// auto var_name = string::Sprintf("%p.%d", this, -// vars_.size()); -// Rename(origin_name, var_name); -// return var_name; -// } Variable *Scope::FindVarLocally(const std::string &name) const { auto it = vars_.find(name); diff --git a/src/operators/conv_op.cpp b/src/operators/conv_op.cpp index ad31c296c9..f9576f2598 100644 --- a/src/operators/conv_op.cpp +++ b/src/operators/conv_op.cpp @@ -14,7 +14,6 @@ limitations under the License. */ #include "operators/conv_op.h" #include -#include "framework/data_type.h" #include "framework/op_proto_maker.h" #include "framework/op_registry.h" diff --git a/src/operators/depthwise_conv_op.cpp b/src/operators/depthwise_conv_op.cpp index 2538298175..3e3e510d76 100644 --- a/src/operators/depthwise_conv_op.cpp +++ b/src/operators/depthwise_conv_op.cpp @@ -14,7 +14,6 @@ limitations under the License. */ #include "operators/depthwise_conv_op.h" #include -#include "framework/data_type.h" #include "framework/op_proto_maker.h" #include "framework/op_registry.h" #include "operators/conv_op.h" diff --git a/src/operators/kernel/arm/sigmoid_kernel.cpp b/src/operators/kernel/arm/sigmoid_kernel.cpp index 74bc298780..9f0886d582 100644 --- a/src/operators/kernel/arm/sigmoid_kernel.cpp +++ b/src/operators/kernel/arm/sigmoid_kernel.cpp @@ -25,35 +25,21 @@ using framework::Tensor; void sigmoid(const Tensor *X, Tensor *Y) { #if __ARM_NEON - DLOG << "step1"; const float *input = X->data(); - DLOG << "step11"; - float *output = Y->mutable_data(); - DLOG << "step2"; - const DDim &dDim = X->dims(); - DLOG << "step3"; - int axis_index = 1; if (dDim.size() < 4) { axis_index = 0; } - DLOG << "step4"; - DDim outer_ddim = paddle_mobile::framework::slice_ddim(dDim, 0, axis_index + 1); DDim inner_ddim = paddle_mobile::framework::slice_ddim(dDim, axis_index + 1, dDim.size()); - DLOG << "step5"; - int out_size = paddle_mobile::framework::product(outer_ddim); int inner_size = paddle_mobile::framework::product(inner_ddim); - DLOG << "step6"; #pragma omp parallel for - DLOG << "outsize=" << out_size; - DLOG << "innersize=" << inner_size; for (int i = 0; i < out_size; ++i) { const float *input_outer_ptr = input + i * inner_size; float *output_outer_ptr = output + i * inner_size; -- GitLab