提交 79c502c8 编写于 作者: W wangliu

remove unused code

上级 9b53e2d6
......@@ -17,14 +17,8 @@ limitations under the License. */
namespace paddle_mobile {
namespace framework {
/*
* Variant<int, float, std::string, std::vector<int>, std::vector<float>,
std::vector<std::string>, bool, std::vector<bool>, BlockDesc *,
int64_t>
* */
struct PrintVistor : Vistor<Print &> {
PrintVistor(Print &printer) : printer_(printer) {}
explicit PrintVistor(Print &printer) : printer_(printer) {}
template <typename T>
Print &operator()(const T &value) {
printer_ << value;
......
......@@ -14,7 +14,9 @@ limitations under the License. */
#pragma once
#include <string>
#include <unordered_map>
#include <vector>
#include "common/enforce.h"
#include "common/log.h"
#include "common/variant.h"
......@@ -22,28 +24,15 @@ limitations under the License. */
namespace paddle_mobile {
namespace framework {
using std::string;
using std::vector;
class BlockDesc;
class Attribute {
public:
/*
* PADDLE_MOBILE__FRAMEWORK__PROTO__ATTR_TYPE__INT = 0,
PADDLE_MOBILE__FRAMEWORK__PROTO__ATTR_TYPE__FLOAT = 1,
PADDLE_MOBILE__FRAMEWORK__PROTO__ATTR_TYPE__STRING = 2,
PADDLE_MOBILE__FRAMEWORK__PROTO__ATTR_TYPE__INTS = 3,
PADDLE_MOBILE__FRAMEWORK__PROTO__ATTR_TYPE__FLOATS = 4,
PADDLE_MOBILE__FRAMEWORK__PROTO__ATTR_TYPE__STRINGS = 5,
PADDLE_MOBILE__FRAMEWORK__PROTO__ATTR_TYPE__BOOLEAN = 6,
PADDLE_MOBILE__FRAMEWORK__PROTO__ATTR_TYPE__BOOLEANS = 7,
PADDLE_MOBILE__FRAMEWORK__PROTO__ATTR_TYPE__BLOCK = 8,
PADDLE_MOBILE__FRAMEWORK__PROTO__ATTR_TYPE__LONG = 9
PROTOBUF_C__FORCE_ENUM_TO_BE_INT_SIZE(PADDLE_MOBILE__FRAMEWORK__PROTO__ATTR_TYPE)
*
* */
static Attribute GetAttrValue(
PaddleMobile__Framework__Proto__OpDesc__Attr *attr_desc) {
// std::cout << "begin get attr value" << std::endl;
Attribute attr;
switch (attr_desc->type) {
case PADDLE_MOBILE__FRAMEWORK__PROTO__ATTR_TYPE__BOOLEAN: {
......@@ -63,35 +52,35 @@ class Attribute {
break;
}
case PADDLE_MOBILE__FRAMEWORK__PROTO__ATTR_TYPE__BOOLEANS: {
std::vector<bool> val(attr_desc->n_bools);
vector<bool> val(attr_desc->n_bools);
for (int i = 0; i < attr_desc->n_bools; ++i) {
val[i] = attr_desc->bools[i];
}
attr.Set<std::vector<bool>>(val);
attr.Set<vector<bool>>(val);
break;
}
case PADDLE_MOBILE__FRAMEWORK__PROTO__ATTR_TYPE__INTS: {
std::vector<int> val(attr_desc->n_ints);
vector<int> val(attr_desc->n_ints);
for (int i = 0; i < attr_desc->n_ints; ++i) {
val[i] = attr_desc->ints[i];
}
attr.Set<std::vector<int>>(val);
attr.Set<vector<int>>(val);
break;
}
case PADDLE_MOBILE__FRAMEWORK__PROTO__ATTR_TYPE__FLOATS: {
std::vector<float> val(attr_desc->n_floats);
vector<float> val(attr_desc->n_floats);
for (int i = 0; i < attr_desc->n_floats; ++i) {
val[i] = attr_desc->floats[i];
}
attr.Set<std::vector<float>>(val);
attr.Set<vector<float>>(val);
break;
}
case PADDLE_MOBILE__FRAMEWORK__PROTO__ATTR_TYPE__STRINGS: {
std::vector<std::string> val(attr_desc->n_strings);
vector<string> val(attr_desc->n_strings);
for (int i = 0; i < attr_desc->n_strings; ++i) {
val[i] = attr_desc->strings[i];
}
attr.Set<std::vector<std::string>>(val);
attr.Set<vector<string>>(val);
break;
}
case PADDLE_MOBILE__FRAMEWORK__PROTO__ATTR_TYPE__LONG: {
......@@ -122,21 +111,18 @@ class Attribute {
return vistor(attr.variant_.Get<int>());
} else if (attr.variant_.TypeId() == typeid(float).hash_code()) {
return vistor(attr.variant_.Get<float>());
} else if (attr.variant_.TypeId() == typeid(std::string).hash_code()) {
return vistor(attr.variant_.Get<std::string>());
} else if (attr.variant_.TypeId() == typeid(std::vector<int>).hash_code()) {
return vistor(attr.variant_.Get<std::vector<int>>());
} else if (attr.variant_.TypeId() ==
typeid(std::vector<float>).hash_code()) {
return vistor(attr.variant_.Get<std::vector<float>>());
} else if (attr.variant_.TypeId() ==
typeid(std::vector<std::string>).hash_code()) {
return vistor(attr.variant_.Get<std::vector<std::string>>());
} else if (attr.variant_.TypeId() == typeid(string).hash_code()) {
return vistor(attr.variant_.Get<string>());
} else if (attr.variant_.TypeId() == typeid(vector<int>).hash_code()) {
return vistor(attr.variant_.Get<vector<int>>());
} else if (attr.variant_.TypeId() == typeid(vector<float>).hash_code()) {
return vistor(attr.variant_.Get<vector<float>>());
} else if (attr.variant_.TypeId() == typeid(vector<string>).hash_code()) {
return vistor(attr.variant_.Get<vector<string>>());
} else if (attr.variant_.TypeId() == typeid(bool).hash_code()) {
return vistor(attr.variant_.Get<bool>());
} else if (attr.variant_.TypeId() ==
typeid(std::vector<bool>).hash_code()) {
return vistor(attr.variant_.Get<std::vector<bool>>());
} else if (attr.variant_.TypeId() == typeid(vector<bool>).hash_code()) {
return vistor(attr.variant_.Get<vector<bool>>());
} else if (attr.variant_.TypeId() == typeid(int64_t).hash_code()) {
return vistor(attr.variant_.Get<int64_t>());
} else {
......@@ -145,24 +131,21 @@ class Attribute {
}
private:
Variant<int, float, std::string, std::vector<int>, std::vector<float>,
std::vector<std::string>, bool, std::vector<bool>, BlockDesc *,
int64_t>
Variant<int, float, string, vector<int>, vector<float>, vector<string>, bool,
vector<bool>, BlockDesc *, int64_t>
variant_;
};
using AttributeMap = std::unordered_map<std::string, Attribute>;
using AttributeMap = std::unordered_map<string, Attribute>;
class AttrReader {
public:
explicit AttrReader(const AttributeMap &attrs) : attrs_(attrs) {}
template <typename T>
inline T Get(const std::string &name) const {
// PADDLE_ENFORCE(attrs_.count(name) != 0, "%s should
// be in
// AttributeMap",
// name);
inline T Get(const string &name) const {
PADDLE_MOBILE_ENFORCE(attrs_.count(name) != 0,
"%s should be in AttributeMap", name);
return ((Attribute)attrs_.at(name)).Get<T>();
}
......
......@@ -54,7 +54,6 @@ inline std::string DataLayoutToString(const DataLayout &data_layout) {
return "ANY_LAYOUT";
default:
break;
// std::cout << "unknown DataLayou %d", data_layout;
}
}
......
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "framework/data_transform.h"
namespace paddle_mobile {
namespace framework {
static void PassTensorData(Tensor *from, Tensor *to) {
to->ShareDataWith(*from);
*from = Tensor();
}
void DataTransform(const OpKernelType &expected_kernel_type,
const OpKernelType &kernel_type_for_var,
const Tensor &input_tensor, Tensor *output_tensor) {
bool transformed = false;
Tensor in;
in.ShareDataWith(input_tensor);
Tensor out;
// // do layout transform
// if (NeedTransformLayout(expected_kernel_type.data_layout_,
// kernel_type_for_var.data_layout_)) {
// TransDataLayout(kernel_type_for_var, expected_kernel_type, in,
// &out);
// transformed = true;
// PassTensorData(&out, &in);
// }
//
// // do data type transform
// if (expected_kernel_type.data_type_ !=
// kernel_type_for_var.data_type_) {
// TransDataType(kernel_type_for_var, expected_kernel_type, in,
// &out);
// transformed = true;
// PassTensorData(&out, &in);
// }
//
// // do device transform
// if (!platform::is_same_place(kernel_type_for_var.place_,
// expected_kernel_type.place_)) {
// TransDataDevice(in, expected_kernel_type.place_, &out);
// transformed = true;
// PassTensorData(&out, &in);
// }
//
// PADDLE_ENFORCE(transformed, "No transform is applied, please
// check!");
// get output data
output_tensor->ShareDataWith(in);
}
void CopyVariableWithTensor(const Variable &in_var, const Tensor &tensor,
Variable *out_var) {
// if (in_var.IsType<LoDTensor>()) {
// auto& in_lod_tensor = in_var.Get<LoDTensor>();
// auto* tran_lod_tensor = out_var.GetMutable<LoDTensor>();
// tran_lod_tensor->set_lod(in_lod_tensor.lod());
// tran_lod_tensor->set_layout(in_lod_tensor.layout());
// tran_lod_tensor->ShareDataWith(tensor);
// } else if (in_var.IsType<SelectedRows>()) {
// auto& in_selected_rows = in_var.Get<SelectedRows>();
// auto* trans_selected_rows =
// out_var.GetMutable<SelectedRows>();
// trans_selected_rows->set_height(in_selected_rows.height());
// trans_selected_rows->set_rows(in_selected_rows.rows());
// trans_selected_rows->mutable_value()->ShareDataWith(tensor);
// } else {
// PADDLE_THROW("unknown var type");
// }
}
} // namespace framework
} // namespace paddle_mobile
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
namespace paddle_mobile {
namespace framework {
// inline proto::VarType::Type ToDataType(std::type_index type) {
// using namespace paddle_mobile::framework::proto;
// if (typeid(float).hash_code() == type.hash_code()) {
// return proto::VarType::FP32;
// } else if (typeid(double).hash_code() == type.hash_code()) {
// return proto::VarType::FP64;
// } else if (typeid(int).hash_code() == type.hash_code()) {
// return proto::VarType::INT32;
// } else if (typeid(int64_t).hash_code() == type.hash_code()) {
// return proto::VarType::INT64;
// } else if (typeid(bool).hash_code() == type.hash_code()) {
// return proto::VarType::BOOL;
// } else {
//// PADDLE_THROW("Not supported");
// }
// }
} // namespace framework
} // namespace paddle_mobile
......@@ -183,7 +183,7 @@ DDim DDim::operator*(DDim d) const {
int64_t get(const DDim &ddim, int idx) { return ddim[idx]; }
void set(DDim &ddim, int idx, int value) { ddim[idx] = value; }
void set(DDim *ddim, int idx, int value) { (*ddim)[idx] = value; }
/// @cond HIDDEN
struct VectorizeVisitor : Vistor<void> {
......
......@@ -83,17 +83,6 @@ struct DDim {
int64_t operator[](int idx) const;
// template <typename Visitor>
// typename Visitor::result_type apply_visitor(Visitor& visitor) {
// return var.apply_visitor(visitor);
// }
//
// template <typename Visitor>
// typename Visitor::result_type apply_visitor(Visitor& visitor)
// const {
// return var.apply_visitor(visitor);
// }
DDimVar getVar() { return var; }
bool operator==(DDim d) const;
......@@ -126,7 +115,7 @@ DDim make_ddim(std::initializer_list<int64_t> dims);
int64_t get(const DDim &dim, int idx);
void set(DDim &dim, int idx, int val);
void set(DDim *dim, int idx, int val);
std::vector<int64_t> vectorize(const DDim &ddim);
......
......@@ -42,23 +42,10 @@ std::ostream &operator<<(std::ostream &os, const LoD &lod) {
}
std::ostream &operator<<(std::ostream &os, const LoDTensor &t) {
// PADDLE_ENFORCE(t.type().hash_code() ==
// typeid(float).hash_code());
// if (!platform::is_cpu_place(t.place())) {
// LoDTensor tt;
// framework::TensorCopy(t, platform::CPUPlace(), &tt);
// platform::DeviceContextPool &pool =
// platform::DeviceContextPool::Instance(); auto &dev_ctx =
// *pool.Get(t.place()); dev_ctx.Wait();
//
// os << tt;
// return os;
// }
PADDLE_MOBILE_ENFORCE(t.type().hash_code() == typeid(float).hash_code(),
"t.type() is not float");
os << "dim: " << t.dims() << "\n";
os << "lod: " << t.lod() << "\n";
// only print first ten elements
int64_t size = t.numel() < 10 ? t.numel() : 10;
for (int64_t i = 0; i < size; ++i) {
......@@ -76,9 +63,9 @@ std::string LoDToString(const LoD &lod) {
LoD SliceInLevel(const LoD &in, size_t level, size_t elem_begin,
size_t elem_end) {
// PADDLE_ENFORCE_LT(level, in.size());
// PADDLE_ENFORCE_LT(elem_end, in[level].size());
PADDLE_MOBILE_ENFORCE(level < in.size(), "level should >= in.size()");
PADDLE_MOBILE_ENFORCE(elem_end < in[level].size(),
"elem_end >= in[level].size()");
LoD res;
res.resize(in.size() - level);
// copy the first level
......@@ -211,8 +198,9 @@ LoDAndOffset GetSubLoDAndAbsoluteOffset(const LoD &lod, size_t start_idx,
LoD sub_lod;
for (size_t level_idx = start_level; level_idx < lod.size(); ++level_idx) {
// PADDLE_ENFORCE_LE(start_idx, end_idx);
// PADDLE_ENFORCE_LT(end_idx, lod[level_idx].size());
PADDLE_MOBILE_ENFORCE(start_idx <= end_idx, "start_idx > end_idx");
PADDLE_MOBILE_ENFORCE(end_idx < lod[level_idx].size(),
"end_idx >= lod[level_idx].size()");
std::vector<size_t> level_lens;
for (size_t i = start_idx; i < end_idx; ++i) {
level_lens.push_back(lod[level_idx][i + 1] - lod[level_idx][i]);
......@@ -226,10 +214,9 @@ LoDAndOffset GetSubLoDAndAbsoluteOffset(const LoD &lod, size_t start_idx,
}
void AppendLoD(LoD *lod, const LoD &lod_length) {
// PADDLE_ENFORCE(
// lod->empty() || lod->size() == lod_length.size(),
// "The lod_length should has the same size with the appended
// lod.");
PADDLE_MOBILE_ENFORCE(
lod->empty() || lod->size() == lod_length.size(),
"The lod_length should has the same size with the appended lod.");
if (lod->empty()) {
for (size_t i = 0; i < lod_length.size(); ++i) {
lod->emplace_back(1, 0); // size = 1, value = 0;
......
......@@ -25,9 +25,8 @@ template <typename Dtype>
struct OpInfo {
OpCreator<Dtype> creator_;
const OpCreator<Dtype> &Creator() const {
// PADDLE_ENFORCE_NOT_NULL(creator_,
// "Operator Creator has not been
// registered");
PADDLE_MOBILE_ENFORCE(creator_ != nullptr,
"Operator Creator has not been registered");
return creator_;
}
};
......@@ -48,17 +47,15 @@ class OpInfoMap {
}
void Insert(const std::string &type, const OpInfo<Dtype> &info) {
// PADDLE_ENFORCE(!Has(type), "Operator %s has been
// registered", type);
PADDLE_MOBILE_ENFORCE(!Has(type), "Operator %s has been registered",
type.c_str());
map_.insert({type, info});
}
const OpInfo<Dtype> &Get(const std::string &type) const {
auto op_info_ptr = GetNullable(type);
// PADDLE_ENFORCE_NOT_NULL(op_info_ptr, "Operator %s has not
// been
// registered",
// type);
PADDLE_MOBILE_ENFORCE(op_info_ptr != nullptr,
"Operator %s has not been registered", type.c_str());
return *op_info_ptr;
}
......
......@@ -76,7 +76,6 @@ void Scope::DeleteScope(Scope *scope) const {
auto it = std::find(kids_.begin(), kids_.end(), scope);
kids_.erase(it);
delete scope;
// deferent
}
void Scope::EraseVars(const std::vector<std::string> &var_names) {
......@@ -104,14 +103,6 @@ void Scope::Rename(const std::string &origin_name,
vars_[new_name] = origin_it->second;
vars_.erase(origin_it);
}
//
// std::string Scope::Rename(const std::string& origin_name)
// const {
// auto var_name = string::Sprintf("%p.%d", this,
// vars_.size());
// Rename(origin_name, var_name);
// return var_name;
// }
Variable *Scope::FindVarLocally(const std::string &name) const {
auto it = vars_.find(name);
......
......@@ -14,7 +14,6 @@ limitations under the License. */
#include "operators/conv_op.h"
#include <vector>
#include "framework/data_type.h"
#include "framework/op_proto_maker.h"
#include "framework/op_registry.h"
......
......@@ -14,7 +14,6 @@ limitations under the License. */
#include "operators/depthwise_conv_op.h"
#include <vector>
#include "framework/data_type.h"
#include "framework/op_proto_maker.h"
#include "framework/op_registry.h"
#include "operators/conv_op.h"
......
......@@ -25,35 +25,21 @@ using framework::Tensor;
void sigmoid(const Tensor *X, Tensor *Y) {
#if __ARM_NEON
DLOG << "step1";
const float *input = X->data<float>();
DLOG << "step11";
float *output = Y->mutable_data<float>();
DLOG << "step2";
const DDim &dDim = X->dims();
DLOG << "step3";
int axis_index = 1;
if (dDim.size() < 4) {
axis_index = 0;
}
DLOG << "step4";
DDim outer_ddim =
paddle_mobile::framework::slice_ddim(dDim, 0, axis_index + 1);
DDim inner_ddim =
paddle_mobile::framework::slice_ddim(dDim, axis_index + 1, dDim.size());
DLOG << "step5";
int out_size = paddle_mobile::framework::product(outer_ddim);
int inner_size = paddle_mobile::framework::product(inner_ddim);
DLOG << "step6";
#pragma omp parallel for
DLOG << "outsize=" << out_size;
DLOG << "innersize=" << inner_size;
for (int i = 0; i < out_size; ++i) {
const float *input_outer_ptr = input + i * inner_size;
float *output_outer_ptr = output + i * inner_size;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册