未验证 提交 230729e2 编写于 作者: W WangLiu 提交者: GitHub

Merge pull request #360 from cocodark/develop

remove unused code
...@@ -17,14 +17,8 @@ limitations under the License. */ ...@@ -17,14 +17,8 @@ limitations under the License. */
namespace paddle_mobile { namespace paddle_mobile {
namespace framework { namespace framework {
/*
* Variant<int, float, std::string, std::vector<int>, std::vector<float>,
std::vector<std::string>, bool, std::vector<bool>, BlockDesc *,
int64_t>
* */
struct PrintVistor : Vistor<Print &> { struct PrintVistor : Vistor<Print &> {
PrintVistor(Print &printer) : printer_(printer) {} explicit PrintVistor(Print &printer) : printer_(printer) {}
template <typename T> template <typename T>
Print &operator()(const T &value) { Print &operator()(const T &value) {
printer_ << value; printer_ << value;
......
...@@ -14,7 +14,9 @@ limitations under the License. */ ...@@ -14,7 +14,9 @@ limitations under the License. */
#pragma once #pragma once
#include <string>
#include <unordered_map> #include <unordered_map>
#include <vector>
#include "common/enforce.h" #include "common/enforce.h"
#include "common/log.h" #include "common/log.h"
#include "common/variant.h" #include "common/variant.h"
...@@ -22,28 +24,15 @@ limitations under the License. */ ...@@ -22,28 +24,15 @@ limitations under the License. */
namespace paddle_mobile { namespace paddle_mobile {
namespace framework { namespace framework {
using std::string;
using std::vector;
class BlockDesc; class BlockDesc;
class Attribute { class Attribute {
public: public:
/*
* PADDLE_MOBILE__FRAMEWORK__PROTO__ATTR_TYPE__INT = 0,
PADDLE_MOBILE__FRAMEWORK__PROTO__ATTR_TYPE__FLOAT = 1,
PADDLE_MOBILE__FRAMEWORK__PROTO__ATTR_TYPE__STRING = 2,
PADDLE_MOBILE__FRAMEWORK__PROTO__ATTR_TYPE__INTS = 3,
PADDLE_MOBILE__FRAMEWORK__PROTO__ATTR_TYPE__FLOATS = 4,
PADDLE_MOBILE__FRAMEWORK__PROTO__ATTR_TYPE__STRINGS = 5,
PADDLE_MOBILE__FRAMEWORK__PROTO__ATTR_TYPE__BOOLEAN = 6,
PADDLE_MOBILE__FRAMEWORK__PROTO__ATTR_TYPE__BOOLEANS = 7,
PADDLE_MOBILE__FRAMEWORK__PROTO__ATTR_TYPE__BLOCK = 8,
PADDLE_MOBILE__FRAMEWORK__PROTO__ATTR_TYPE__LONG = 9
PROTOBUF_C__FORCE_ENUM_TO_BE_INT_SIZE(PADDLE_MOBILE__FRAMEWORK__PROTO__ATTR_TYPE)
*
* */
static Attribute GetAttrValue( static Attribute GetAttrValue(
PaddleMobile__Framework__Proto__OpDesc__Attr *attr_desc) { PaddleMobile__Framework__Proto__OpDesc__Attr *attr_desc) {
// std::cout << "begin get attr value" << std::endl;
Attribute attr; Attribute attr;
switch (attr_desc->type) { switch (attr_desc->type) {
case PADDLE_MOBILE__FRAMEWORK__PROTO__ATTR_TYPE__BOOLEAN: { case PADDLE_MOBILE__FRAMEWORK__PROTO__ATTR_TYPE__BOOLEAN: {
...@@ -63,35 +52,35 @@ class Attribute { ...@@ -63,35 +52,35 @@ class Attribute {
break; break;
} }
case PADDLE_MOBILE__FRAMEWORK__PROTO__ATTR_TYPE__BOOLEANS: { case PADDLE_MOBILE__FRAMEWORK__PROTO__ATTR_TYPE__BOOLEANS: {
std::vector<bool> val(attr_desc->n_bools); vector<bool> val(attr_desc->n_bools);
for (int i = 0; i < attr_desc->n_bools; ++i) { for (int i = 0; i < attr_desc->n_bools; ++i) {
val[i] = attr_desc->bools[i]; val[i] = attr_desc->bools[i];
} }
attr.Set<std::vector<bool>>(val); attr.Set<vector<bool>>(val);
break; break;
} }
case PADDLE_MOBILE__FRAMEWORK__PROTO__ATTR_TYPE__INTS: { case PADDLE_MOBILE__FRAMEWORK__PROTO__ATTR_TYPE__INTS: {
std::vector<int> val(attr_desc->n_ints); vector<int> val(attr_desc->n_ints);
for (int i = 0; i < attr_desc->n_ints; ++i) { for (int i = 0; i < attr_desc->n_ints; ++i) {
val[i] = attr_desc->ints[i]; val[i] = attr_desc->ints[i];
} }
attr.Set<std::vector<int>>(val); attr.Set<vector<int>>(val);
break; break;
} }
case PADDLE_MOBILE__FRAMEWORK__PROTO__ATTR_TYPE__FLOATS: { case PADDLE_MOBILE__FRAMEWORK__PROTO__ATTR_TYPE__FLOATS: {
std::vector<float> val(attr_desc->n_floats); vector<float> val(attr_desc->n_floats);
for (int i = 0; i < attr_desc->n_floats; ++i) { for (int i = 0; i < attr_desc->n_floats; ++i) {
val[i] = attr_desc->floats[i]; val[i] = attr_desc->floats[i];
} }
attr.Set<std::vector<float>>(val); attr.Set<vector<float>>(val);
break; break;
} }
case PADDLE_MOBILE__FRAMEWORK__PROTO__ATTR_TYPE__STRINGS: { case PADDLE_MOBILE__FRAMEWORK__PROTO__ATTR_TYPE__STRINGS: {
std::vector<std::string> val(attr_desc->n_strings); vector<string> val(attr_desc->n_strings);
for (int i = 0; i < attr_desc->n_strings; ++i) { for (int i = 0; i < attr_desc->n_strings; ++i) {
val[i] = attr_desc->strings[i]; val[i] = attr_desc->strings[i];
} }
attr.Set<std::vector<std::string>>(val); attr.Set<vector<string>>(val);
break; break;
} }
case PADDLE_MOBILE__FRAMEWORK__PROTO__ATTR_TYPE__LONG: { case PADDLE_MOBILE__FRAMEWORK__PROTO__ATTR_TYPE__LONG: {
...@@ -122,21 +111,18 @@ class Attribute { ...@@ -122,21 +111,18 @@ class Attribute {
return vistor(attr.variant_.Get<int>()); return vistor(attr.variant_.Get<int>());
} else if (attr.variant_.TypeId() == typeid(float).hash_code()) { } else if (attr.variant_.TypeId() == typeid(float).hash_code()) {
return vistor(attr.variant_.Get<float>()); return vistor(attr.variant_.Get<float>());
} else if (attr.variant_.TypeId() == typeid(std::string).hash_code()) { } else if (attr.variant_.TypeId() == typeid(string).hash_code()) {
return vistor(attr.variant_.Get<std::string>()); return vistor(attr.variant_.Get<string>());
} else if (attr.variant_.TypeId() == typeid(std::vector<int>).hash_code()) { } else if (attr.variant_.TypeId() == typeid(vector<int>).hash_code()) {
return vistor(attr.variant_.Get<std::vector<int>>()); return vistor(attr.variant_.Get<vector<int>>());
} else if (attr.variant_.TypeId() == } else if (attr.variant_.TypeId() == typeid(vector<float>).hash_code()) {
typeid(std::vector<float>).hash_code()) { return vistor(attr.variant_.Get<vector<float>>());
return vistor(attr.variant_.Get<std::vector<float>>()); } else if (attr.variant_.TypeId() == typeid(vector<string>).hash_code()) {
} else if (attr.variant_.TypeId() == return vistor(attr.variant_.Get<vector<string>>());
typeid(std::vector<std::string>).hash_code()) {
return vistor(attr.variant_.Get<std::vector<std::string>>());
} else if (attr.variant_.TypeId() == typeid(bool).hash_code()) { } else if (attr.variant_.TypeId() == typeid(bool).hash_code()) {
return vistor(attr.variant_.Get<bool>()); return vistor(attr.variant_.Get<bool>());
} else if (attr.variant_.TypeId() == } else if (attr.variant_.TypeId() == typeid(vector<bool>).hash_code()) {
typeid(std::vector<bool>).hash_code()) { return vistor(attr.variant_.Get<vector<bool>>());
return vistor(attr.variant_.Get<std::vector<bool>>());
} else if (attr.variant_.TypeId() == typeid(int64_t).hash_code()) { } else if (attr.variant_.TypeId() == typeid(int64_t).hash_code()) {
return vistor(attr.variant_.Get<int64_t>()); return vistor(attr.variant_.Get<int64_t>());
} else { } else {
...@@ -145,24 +131,21 @@ class Attribute { ...@@ -145,24 +131,21 @@ class Attribute {
} }
private: private:
Variant<int, float, std::string, std::vector<int>, std::vector<float>, Variant<int, float, string, vector<int>, vector<float>, vector<string>, bool,
std::vector<std::string>, bool, std::vector<bool>, BlockDesc *, vector<bool>, BlockDesc *, int64_t>
int64_t>
variant_; variant_;
}; };
using AttributeMap = std::unordered_map<std::string, Attribute>; using AttributeMap = std::unordered_map<string, Attribute>;
class AttrReader { class AttrReader {
public: public:
explicit AttrReader(const AttributeMap &attrs) : attrs_(attrs) {} explicit AttrReader(const AttributeMap &attrs) : attrs_(attrs) {}
template <typename T> template <typename T>
inline T Get(const std::string &name) const { inline T Get(const string &name) const {
// PADDLE_ENFORCE(attrs_.count(name) != 0, "%s should PADDLE_MOBILE_ENFORCE(attrs_.count(name) != 0,
// be in "%s should be in AttributeMap", name);
// AttributeMap",
// name);
return ((Attribute)attrs_.at(name)).Get<T>(); return ((Attribute)attrs_.at(name)).Get<T>();
} }
......
...@@ -54,7 +54,6 @@ inline std::string DataLayoutToString(const DataLayout &data_layout) { ...@@ -54,7 +54,6 @@ inline std::string DataLayoutToString(const DataLayout &data_layout) {
return "ANY_LAYOUT"; return "ANY_LAYOUT";
default: default:
break; break;
// std::cout << "unknown DataLayou %d", data_layout;
} }
} }
......
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "framework/data_transform.h"
namespace paddle_mobile {
namespace framework {
static void PassTensorData(Tensor *from, Tensor *to) {
to->ShareDataWith(*from);
*from = Tensor();
}
void DataTransform(const OpKernelType &expected_kernel_type,
const OpKernelType &kernel_type_for_var,
const Tensor &input_tensor, Tensor *output_tensor) {
bool transformed = false;
Tensor in;
in.ShareDataWith(input_tensor);
Tensor out;
// // do layout transform
// if (NeedTransformLayout(expected_kernel_type.data_layout_,
// kernel_type_for_var.data_layout_)) {
// TransDataLayout(kernel_type_for_var, expected_kernel_type, in,
// &out);
// transformed = true;
// PassTensorData(&out, &in);
// }
//
// // do data type transform
// if (expected_kernel_type.data_type_ !=
// kernel_type_for_var.data_type_) {
// TransDataType(kernel_type_for_var, expected_kernel_type, in,
// &out);
// transformed = true;
// PassTensorData(&out, &in);
// }
//
// // do device transform
// if (!platform::is_same_place(kernel_type_for_var.place_,
// expected_kernel_type.place_)) {
// TransDataDevice(in, expected_kernel_type.place_, &out);
// transformed = true;
// PassTensorData(&out, &in);
// }
//
// PADDLE_ENFORCE(transformed, "No transform is applied, please
// check!");
// get output data
output_tensor->ShareDataWith(in);
}
void CopyVariableWithTensor(const Variable &in_var, const Tensor &tensor,
Variable *out_var) {
// if (in_var.IsType<LoDTensor>()) {
// auto& in_lod_tensor = in_var.Get<LoDTensor>();
// auto* tran_lod_tensor = out_var.GetMutable<LoDTensor>();
// tran_lod_tensor->set_lod(in_lod_tensor.lod());
// tran_lod_tensor->set_layout(in_lod_tensor.layout());
// tran_lod_tensor->ShareDataWith(tensor);
// } else if (in_var.IsType<SelectedRows>()) {
// auto& in_selected_rows = in_var.Get<SelectedRows>();
// auto* trans_selected_rows =
// out_var.GetMutable<SelectedRows>();
// trans_selected_rows->set_height(in_selected_rows.height());
// trans_selected_rows->set_rows(in_selected_rows.rows());
// trans_selected_rows->mutable_value()->ShareDataWith(tensor);
// } else {
// PADDLE_THROW("unknown var type");
// }
}
} // namespace framework
} // namespace paddle_mobile
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
namespace paddle_mobile {
namespace framework {
// inline proto::VarType::Type ToDataType(std::type_index type) {
// using namespace paddle_mobile::framework::proto;
// if (typeid(float).hash_code() == type.hash_code()) {
// return proto::VarType::FP32;
// } else if (typeid(double).hash_code() == type.hash_code()) {
// return proto::VarType::FP64;
// } else if (typeid(int).hash_code() == type.hash_code()) {
// return proto::VarType::INT32;
// } else if (typeid(int64_t).hash_code() == type.hash_code()) {
// return proto::VarType::INT64;
// } else if (typeid(bool).hash_code() == type.hash_code()) {
// return proto::VarType::BOOL;
// } else {
//// PADDLE_THROW("Not supported");
// }
// }
} // namespace framework
} // namespace paddle_mobile
...@@ -183,7 +183,7 @@ DDim DDim::operator*(DDim d) const { ...@@ -183,7 +183,7 @@ DDim DDim::operator*(DDim d) const {
int64_t get(const DDim &ddim, int idx) { return ddim[idx]; } int64_t get(const DDim &ddim, int idx) { return ddim[idx]; }
void set(DDim &ddim, int idx, int value) { ddim[idx] = value; } void set(DDim *ddim, int idx, int value) { (*ddim)[idx] = value; }
/// @cond HIDDEN /// @cond HIDDEN
struct VectorizeVisitor : Vistor<void> { struct VectorizeVisitor : Vistor<void> {
......
...@@ -83,17 +83,6 @@ struct DDim { ...@@ -83,17 +83,6 @@ struct DDim {
int64_t operator[](int idx) const; int64_t operator[](int idx) const;
// template <typename Visitor>
// typename Visitor::result_type apply_visitor(Visitor& visitor) {
// return var.apply_visitor(visitor);
// }
//
// template <typename Visitor>
// typename Visitor::result_type apply_visitor(Visitor& visitor)
// const {
// return var.apply_visitor(visitor);
// }
DDimVar getVar() { return var; } DDimVar getVar() { return var; }
bool operator==(DDim d) const; bool operator==(DDim d) const;
...@@ -126,7 +115,7 @@ DDim make_ddim(std::initializer_list<int64_t> dims); ...@@ -126,7 +115,7 @@ DDim make_ddim(std::initializer_list<int64_t> dims);
int64_t get(const DDim &dim, int idx); int64_t get(const DDim &dim, int idx);
void set(DDim &dim, int idx, int val); void set(DDim *dim, int idx, int val);
std::vector<int64_t> vectorize(const DDim &ddim); std::vector<int64_t> vectorize(const DDim &ddim);
......
...@@ -42,23 +42,10 @@ std::ostream &operator<<(std::ostream &os, const LoD &lod) { ...@@ -42,23 +42,10 @@ std::ostream &operator<<(std::ostream &os, const LoD &lod) {
} }
std::ostream &operator<<(std::ostream &os, const LoDTensor &t) { std::ostream &operator<<(std::ostream &os, const LoDTensor &t) {
// PADDLE_ENFORCE(t.type().hash_code() == PADDLE_MOBILE_ENFORCE(t.type().hash_code() == typeid(float).hash_code(),
// typeid(float).hash_code()); "t.type() is not float");
// if (!platform::is_cpu_place(t.place())) {
// LoDTensor tt;
// framework::TensorCopy(t, platform::CPUPlace(), &tt);
// platform::DeviceContextPool &pool =
// platform::DeviceContextPool::Instance(); auto &dev_ctx =
// *pool.Get(t.place()); dev_ctx.Wait();
//
// os << tt;
// return os;
// }
os << "dim: " << t.dims() << "\n"; os << "dim: " << t.dims() << "\n";
os << "lod: " << t.lod() << "\n"; os << "lod: " << t.lod() << "\n";
// only print first ten elements // only print first ten elements
int64_t size = t.numel() < 10 ? t.numel() : 10; int64_t size = t.numel() < 10 ? t.numel() : 10;
for (int64_t i = 0; i < size; ++i) { for (int64_t i = 0; i < size; ++i) {
...@@ -76,9 +63,9 @@ std::string LoDToString(const LoD &lod) { ...@@ -76,9 +63,9 @@ std::string LoDToString(const LoD &lod) {
LoD SliceInLevel(const LoD &in, size_t level, size_t elem_begin, LoD SliceInLevel(const LoD &in, size_t level, size_t elem_begin,
size_t elem_end) { size_t elem_end) {
// PADDLE_ENFORCE_LT(level, in.size()); PADDLE_MOBILE_ENFORCE(level < in.size(), "level should >= in.size()");
// PADDLE_ENFORCE_LT(elem_end, in[level].size()); PADDLE_MOBILE_ENFORCE(elem_end < in[level].size(),
"elem_end >= in[level].size()");
LoD res; LoD res;
res.resize(in.size() - level); res.resize(in.size() - level);
// copy the first level // copy the first level
...@@ -211,8 +198,9 @@ LoDAndOffset GetSubLoDAndAbsoluteOffset(const LoD &lod, size_t start_idx, ...@@ -211,8 +198,9 @@ LoDAndOffset GetSubLoDAndAbsoluteOffset(const LoD &lod, size_t start_idx,
LoD sub_lod; LoD sub_lod;
for (size_t level_idx = start_level; level_idx < lod.size(); ++level_idx) { for (size_t level_idx = start_level; level_idx < lod.size(); ++level_idx) {
// PADDLE_ENFORCE_LE(start_idx, end_idx); PADDLE_MOBILE_ENFORCE(start_idx <= end_idx, "start_idx > end_idx");
// PADDLE_ENFORCE_LT(end_idx, lod[level_idx].size()); PADDLE_MOBILE_ENFORCE(end_idx < lod[level_idx].size(),
"end_idx >= lod[level_idx].size()");
std::vector<size_t> level_lens; std::vector<size_t> level_lens;
for (size_t i = start_idx; i < end_idx; ++i) { for (size_t i = start_idx; i < end_idx; ++i) {
level_lens.push_back(lod[level_idx][i + 1] - lod[level_idx][i]); level_lens.push_back(lod[level_idx][i + 1] - lod[level_idx][i]);
...@@ -226,10 +214,9 @@ LoDAndOffset GetSubLoDAndAbsoluteOffset(const LoD &lod, size_t start_idx, ...@@ -226,10 +214,9 @@ LoDAndOffset GetSubLoDAndAbsoluteOffset(const LoD &lod, size_t start_idx,
} }
void AppendLoD(LoD *lod, const LoD &lod_length) { void AppendLoD(LoD *lod, const LoD &lod_length) {
// PADDLE_ENFORCE( PADDLE_MOBILE_ENFORCE(
// lod->empty() || lod->size() == lod_length.size(), lod->empty() || lod->size() == lod_length.size(),
// "The lod_length should has the same size with the appended "The lod_length should has the same size with the appended lod.");
// lod.");
if (lod->empty()) { if (lod->empty()) {
for (size_t i = 0; i < lod_length.size(); ++i) { for (size_t i = 0; i < lod_length.size(); ++i) {
lod->emplace_back(1, 0); // size = 1, value = 0; lod->emplace_back(1, 0); // size = 1, value = 0;
......
...@@ -25,9 +25,8 @@ template <typename Dtype> ...@@ -25,9 +25,8 @@ template <typename Dtype>
struct OpInfo { struct OpInfo {
OpCreator<Dtype> creator_; OpCreator<Dtype> creator_;
const OpCreator<Dtype> &Creator() const { const OpCreator<Dtype> &Creator() const {
// PADDLE_ENFORCE_NOT_NULL(creator_, PADDLE_MOBILE_ENFORCE(creator_ != nullptr,
// "Operator Creator has not been "Operator Creator has not been registered");
// registered");
return creator_; return creator_;
} }
}; };
...@@ -48,17 +47,15 @@ class OpInfoMap { ...@@ -48,17 +47,15 @@ class OpInfoMap {
} }
void Insert(const std::string &type, const OpInfo<Dtype> &info) { void Insert(const std::string &type, const OpInfo<Dtype> &info) {
// PADDLE_ENFORCE(!Has(type), "Operator %s has been PADDLE_MOBILE_ENFORCE(!Has(type), "Operator %s has been registered",
// registered", type); type.c_str());
map_.insert({type, info}); map_.insert({type, info});
} }
const OpInfo<Dtype> &Get(const std::string &type) const { const OpInfo<Dtype> &Get(const std::string &type) const {
auto op_info_ptr = GetNullable(type); auto op_info_ptr = GetNullable(type);
// PADDLE_ENFORCE_NOT_NULL(op_info_ptr, "Operator %s has not PADDLE_MOBILE_ENFORCE(op_info_ptr != nullptr,
// been "Operator %s has not been registered", type.c_str());
// registered",
// type);
return *op_info_ptr; return *op_info_ptr;
} }
......
...@@ -76,7 +76,6 @@ void Scope::DeleteScope(Scope *scope) const { ...@@ -76,7 +76,6 @@ void Scope::DeleteScope(Scope *scope) const {
auto it = std::find(kids_.begin(), kids_.end(), scope); auto it = std::find(kids_.begin(), kids_.end(), scope);
kids_.erase(it); kids_.erase(it);
delete scope; delete scope;
// deferent
} }
void Scope::EraseVars(const std::vector<std::string> &var_names) { void Scope::EraseVars(const std::vector<std::string> &var_names) {
...@@ -104,14 +103,6 @@ void Scope::Rename(const std::string &origin_name, ...@@ -104,14 +103,6 @@ void Scope::Rename(const std::string &origin_name,
vars_[new_name] = origin_it->second; vars_[new_name] = origin_it->second;
vars_.erase(origin_it); vars_.erase(origin_it);
} }
//
// std::string Scope::Rename(const std::string& origin_name)
// const {
// auto var_name = string::Sprintf("%p.%d", this,
// vars_.size());
// Rename(origin_name, var_name);
// return var_name;
// }
Variable *Scope::FindVarLocally(const std::string &name) const { Variable *Scope::FindVarLocally(const std::string &name) const {
auto it = vars_.find(name); auto it = vars_.find(name);
......
...@@ -14,7 +14,6 @@ limitations under the License. */ ...@@ -14,7 +14,6 @@ limitations under the License. */
#include "operators/conv_op.h" #include "operators/conv_op.h"
#include <vector> #include <vector>
#include "framework/data_type.h"
#include "framework/op_proto_maker.h" #include "framework/op_proto_maker.h"
#include "framework/op_registry.h" #include "framework/op_registry.h"
......
...@@ -14,7 +14,6 @@ limitations under the License. */ ...@@ -14,7 +14,6 @@ limitations under the License. */
#include "operators/depthwise_conv_op.h" #include "operators/depthwise_conv_op.h"
#include <vector> #include <vector>
#include "framework/data_type.h"
#include "framework/op_proto_maker.h" #include "framework/op_proto_maker.h"
#include "framework/op_registry.h" #include "framework/op_registry.h"
#include "operators/conv_op.h" #include "operators/conv_op.h"
......
...@@ -25,35 +25,21 @@ using framework::Tensor; ...@@ -25,35 +25,21 @@ using framework::Tensor;
void sigmoid(const Tensor *X, Tensor *Y) { void sigmoid(const Tensor *X, Tensor *Y) {
#if __ARM_NEON #if __ARM_NEON
DLOG << "step1";
const float *input = X->data<float>(); const float *input = X->data<float>();
DLOG << "step11";
float *output = Y->mutable_data<float>(); float *output = Y->mutable_data<float>();
DLOG << "step2";
const DDim &dDim = X->dims(); const DDim &dDim = X->dims();
DLOG << "step3";
int axis_index = 1; int axis_index = 1;
if (dDim.size() < 4) { if (dDim.size() < 4) {
axis_index = 0; axis_index = 0;
} }
DLOG << "step4";
DDim outer_ddim = DDim outer_ddim =
paddle_mobile::framework::slice_ddim(dDim, 0, axis_index + 1); paddle_mobile::framework::slice_ddim(dDim, 0, axis_index + 1);
DDim inner_ddim = DDim inner_ddim =
paddle_mobile::framework::slice_ddim(dDim, axis_index + 1, dDim.size()); paddle_mobile::framework::slice_ddim(dDim, axis_index + 1, dDim.size());
DLOG << "step5";
int out_size = paddle_mobile::framework::product(outer_ddim); int out_size = paddle_mobile::framework::product(outer_ddim);
int inner_size = paddle_mobile::framework::product(inner_ddim); int inner_size = paddle_mobile::framework::product(inner_ddim);
DLOG << "step6";
#pragma omp parallel for #pragma omp parallel for
DLOG << "outsize=" << out_size;
DLOG << "innersize=" << inner_size;
for (int i = 0; i < out_size; ++i) { for (int i = 0; i < out_size; ++i) {
const float *input_outer_ptr = input + i * inner_size; const float *input_outer_ptr = input + i * inner_size;
float *output_outer_ptr = output + i * inner_size; float *output_outer_ptr = output + i * inner_size;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册