提交 5220e87e 编写于 作者: W wangliu

modify operator interface for printing tensor array

上级 c84ae255
...@@ -13,11 +13,32 @@ See the License for the specific language governing permissions and ...@@ -13,11 +13,32 @@ See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "framework/operator.h" #include "framework/operator.h"
#include "framework/op_info.h" #include "operators/op_param.h"
namespace paddle_mobile { namespace paddle_mobile {
namespace framework { namespace framework {
template <typename Dtype>
vector<string> OperatorBase<Dtype>::GetOutKeys() const {
auto it = op_input_output_key.find(type_);
if (it == op_input_output_key.end()) {
DLOG << type_ << " has no outputs";
}
return it->second.second;
}
template <typename T>
static T *GetVarValue(const string &key, const VariableNameMap &var_map,
const Scope &scope) {
auto var_vec = var_map.at(key);
if (!var_vec.empty()) {
auto var = scope.FindVar(var_vec[0]);
return var->GetMutable<T>();
} else {
return nullptr;
}
}
template <typename Dtype> template <typename Dtype>
OperatorBase<Dtype>::OperatorBase(const std::string &type, OperatorBase<Dtype>::OperatorBase(const std::string &type,
const VariableNameMap &inputs, const VariableNameMap &inputs,
...@@ -31,9 +52,22 @@ OperatorBase<Dtype>::OperatorBase(const std::string &type, ...@@ -31,9 +52,22 @@ OperatorBase<Dtype>::OperatorBase(const std::string &type,
scope_(scope) { scope_(scope) {
CheckAllInputOutputSet(); CheckAllInputOutputSet();
} }
template <typename Dtype> template <typename Dtype>
void OperatorBase<Dtype>::CheckAllInputOutputSet() const {} void OperatorBase<Dtype>::CheckAllInputOutputSet() const {}
template <typename Dtype>
void OperatorBase<Dtype>::Run() const {
RunImpl();
#ifdef PADDLE_MOBILE_DEBUG
vector<string> output_keys = GetOutKeys();
for (const auto key : output_keys) {
Tensor *out_ = GetVarValue<framework::LoDTensor>(key, outputs_, *scope_);
DLOG << type_ << " output- " << key << "=" << *out_;
}
#endif
}
template class OperatorBase<CPU>; template class OperatorBase<CPU>;
template class OperatorWithKernel<CPU>; template class OperatorWithKernel<CPU>;
......
...@@ -36,6 +36,8 @@ limitations under the License. */ ...@@ -36,6 +36,8 @@ limitations under the License. */
namespace paddle_mobile { namespace paddle_mobile {
namespace framework { namespace framework {
using std::string;
using std::vector;
static std::unordered_map< static std::unordered_map<
std::string, std::pair<std::vector<std::string>, std::vector<std::string>>> std::string, std::pair<std::vector<std::string>, std::vector<std::string>>>
op_input_output_key = {{"conv2d", {{"Input"}, {"Output"}}}, op_input_output_key = {{"conv2d", {{"Input"}, {"Output"}}},
...@@ -57,7 +59,9 @@ class OperatorBase : PaddleMobileObject { ...@@ -57,7 +59,9 @@ class OperatorBase : PaddleMobileObject {
const VariableNameMap &outputs, const AttributeMap &attrs, const VariableNameMap &outputs, const AttributeMap &attrs,
std::shared_ptr<Scope> scope); std::shared_ptr<Scope> scope);
virtual ~OperatorBase() {} virtual ~OperatorBase() {}
virtual void Run() const = 0; void Run() const;
vector<string> GetOutKeys() const;
virtual void RunImpl() const = 0;
virtual void InferShape() const = 0; virtual void InferShape() const = 0;
const VariableNameMap &Inputs() const { return inputs_; } const VariableNameMap &Inputs() const { return inputs_; }
...@@ -88,7 +92,8 @@ class OperatorWithKernel : public OperatorBase<Dtype> { ...@@ -88,7 +92,8 @@ class OperatorWithKernel : public OperatorBase<Dtype> {
const VariableNameMap &outputs, const AttributeMap &attrs, const VariableNameMap &outputs, const AttributeMap &attrs,
std::shared_ptr<Scope> scope) std::shared_ptr<Scope> scope)
: OperatorBase<Dtype>(type, inputs, outputs, attrs, scope) {} : OperatorBase<Dtype>(type, inputs, outputs, attrs, scope) {}
virtual void Run() const = 0;
virtual void RunImpl() const = 0;
virtual void InferShape() const = 0; virtual void InferShape() const = 0;
}; };
...@@ -113,7 +118,7 @@ class FusionOpMatcher : PaddleMobileObject { ...@@ -113,7 +118,7 @@ class FusionOpMatcher : PaddleMobileObject {
virtual std::string Type() = 0; virtual std::string Type() = 0;
virtual void FolderNodes(Node &node) { virtual void FolderNodes(const Node &node) {
node.Folder(node_.Depth(), Type(), {}); node.Folder(node_.Depth(), Type(), {});
} }
......
...@@ -18,11 +18,12 @@ limitations under the License. */ ...@@ -18,11 +18,12 @@ limitations under the License. */
#include <cstdint> #include <cstdint>
#include <cstring> #include <cstring>
#include <memory> #include <memory>
#include <type_traits>
#include <typeindex> #include <typeindex>
#include <vector> #include <vector>
#include "data_layout.h" #include "framework/data_layout.h"
#include "ddim.h" #include "framework/ddim.h"
#include "memory/t_malloc.h" #include "memory/t_malloc.h"
namespace paddle_mobile { namespace paddle_mobile {
...@@ -62,8 +63,8 @@ struct SizeOfTypeFunctor<HEAD, TAIL...> { ...@@ -62,8 +63,8 @@ struct SizeOfTypeFunctor<HEAD, TAIL...> {
static inline size_t SizeOfType(std::type_index type) { static inline size_t SizeOfType(std::type_index type) {
SizeOfTypeFunctor<int, float, double, int16_t, int64_t, bool, size_t> functor; SizeOfTypeFunctor<int, float, double, int16_t, int64_t, bool, size_t> functor;
size_t size = functor(type); size_t size = functor(type);
// PADDLE_ENFORCE(size != 0UL, "Cannot get size of type %s",
// type.name()); PADDLE_MOBILE_ENFORCE(size != 0UL, "Cannot get size of type %s", type.name());
return size; return size;
} }
...@@ -72,16 +73,27 @@ class LoDTensor; ...@@ -72,16 +73,27 @@ class LoDTensor;
class Tensor { class Tensor {
public: public:
Tensor() : offset_(0) {} Tensor() : offset_(0) {}
template <typename T>
Tensor(std::vector<T> input, DDim ddim) : offset_(0) {
PADDLE_MOBILE_ENFORCE(
input.size() == framework::product(ddim),
"input vector'length should be equal to tensor's length");
auto input_ptr = mutable_data<T>(ddim);
for (int i = 0; i < input.size(); ++i) {
input_ptr[i] = input[i];
}
}
/*! Return a pointer to mutable memory block. */ /*! Return a pointer to mutable memory block. */
template <typename T> template <typename T>
inline T *data() { inline T *data() {
check_memory_size(); check_memory_size();
// PADDLE_ENFORCE(std::is_same<T, void>::value || PADDLE_MOBILE_ENFORCE(
// holder_->type().hash_code() == (std::is_same<T, void>::value ||
// typeid(T).hash_code(), holder_->type().hash_code() == typeid(T).hash_code()),
// "Tensor holds the wrong type, it holds %s", "Tensor holds the wrong type, it holds %s",
// this->holder_->type().name()); this->holder_->type().name());
return reinterpret_cast<T *>(reinterpret_cast<uintptr_t>(holder_->ptr()) + return reinterpret_cast<T *>(reinterpret_cast<uintptr_t>(holder_->ptr()) +
offset_); offset_);
} }
...@@ -90,11 +102,11 @@ class Tensor { ...@@ -90,11 +102,11 @@ class Tensor {
template <typename T> template <typename T>
inline const T *data() const { inline const T *data() const {
check_memory_size(); check_memory_size();
// PADDLE_ENFORCE(std::is_same<T, void>::value || PADDLE_MOBILE_ENFORCE(
// holder_->type().hash_code() == (std::is_same<T, void>::value ||
// typeid(T).hash_code(), holder_->type().hash_code() == typeid(T).hash_code()),
// "Tensor holds the wrong type, it holds %s", "Tensor holds the wrong type, it holds %s",
// this->holder_->type().name()); this->holder_->type().name());
return reinterpret_cast<const T *>( return reinterpret_cast<const T *>(
reinterpret_cast<uintptr_t>(holder_->ptr()) + offset_); reinterpret_cast<uintptr_t>(holder_->ptr()) + offset_);
...@@ -116,17 +128,11 @@ class Tensor { ...@@ -116,17 +128,11 @@ class Tensor {
if (holder_ != nullptr) { if (holder_ != nullptr) {
holder_->set_type(type); holder_->set_type(type);
} }
// PADDLE_ENFORCE_GE(numel(), 0, PADDLE_MOBILE_ENFORCE(numel() >= 0, "the Tensor'snumel must >=0.")
// "When calling this method, the Tensor's
// numel must be
// " "equal or larger than zero. " "Please
// check
// Tensor::Resize has been called first.");
int64_t size = numel() * SizeOfType(type); int64_t size = numel() * SizeOfType(type);
/* some versions of boost::variant don't have operator!= */ /* some versions of boost::variant don't have operator!= */
if (holder_ == nullptr || holder_->size() < size + offset_) { if (holder_ == nullptr || holder_->size() < size + offset_) {
holder_.reset(new PlaceholderImpl(size, type)); holder_.reset(new PlaceholderImpl(size, type));
offset_ = 0; offset_ = 0;
} }
return reinterpret_cast<void *>( return reinterpret_cast<void *>(
...@@ -179,16 +185,13 @@ class Tensor { ...@@ -179,16 +185,13 @@ class Tensor {
*/ */
inline Tensor Slice(int begin_idx, int end_idx) const { inline Tensor Slice(int begin_idx, int end_idx) const {
check_memory_size(); check_memory_size();
// PADDLE_ENFORCE_GE(begin_idx, 0, PADDLE_MOBILE_ENFORCE(begin_idx >= 0,
// "The start row index must be greater than "The start row index must be greater than 0.")
// 0."); PADDLE_MOBILE_ENFORCE(end_idx <= dims_[0],
// PADDLE_ENFORCE_LE(end_idx, dims_[0], "The end row index is "The end row index is out of bound.")
// out of PADDLE_MOBILE_ENFORCE(
// bound."); PADDLE_ENFORCE_LT( begin_idx < end_idx,
// begin_idx, end_idx, "The start row index must be lesser than the end row index")
// "The start row index must be lesser than the end row
// index.");
if (dims_[0] == 1) { if (dims_[0] == 1) {
return *this; return *this;
} else { } else {
...@@ -205,10 +208,9 @@ class Tensor { ...@@ -205,10 +208,9 @@ class Tensor {
} }
std::type_index type() const { std::type_index type() const {
// PADDLE_ENFORCE_NOT_NULL( PADDLE_MOBILE_ENFORCE(
// holder_, "Tensor not initialized yet holder_ != nullptr,
// when "Tensor not initialized yet when Tensor::type() is called.")
// Tensor::type() is called.");
return holder_->type(); return holder_->type();
} }
...@@ -221,12 +223,8 @@ class Tensor { ...@@ -221,12 +223,8 @@ class Tensor {
PADDLE_MOBILE_ENFORCE( PADDLE_MOBILE_ENFORCE(
holder_ != nullptr, holder_ != nullptr,
"Tensor holds no memory. Call Tensor::mutable_data first."); "Tensor holds no memory. Call Tensor::mutable_data first.");
PADDLE_MOBILE_ENFORCE( PADDLE_MOBILE_ENFORCE(numel() * SizeOfType(type()) <= memory_size(),
numel() * SizeOfType(type()) <= memory_size(), "Tensor's dims_ is out of bound. ");
"Tensor's dims_ is out of bound. CallTensor::mutable_data "
"first to re-allocate memory.\n"
"or maybe the required data-type mismatches the data\
already stored.");
} }
inline DataLayout layout() const { return layout_; } inline DataLayout layout() const { return layout_; }
...@@ -257,13 +255,8 @@ class Tensor { ...@@ -257,13 +255,8 @@ class Tensor {
memory::PODDeleter<uint8_t>()), memory::PODDeleter<uint8_t>()),
size_(size), size_(size),
type_(type) { type_(type) {
// PADDLE_ENFORCE_NOT_NULL(ptr_, PADDLE_MOBILE_ENFORCE(ptr_ != nullptr,
// "Insufficient %s "Insufficient memory to allocation");
// memory to allocation.",
// (is_cpu_place(place_)
// ?
// "CPU" :
// "GPU"));
} }
virtual size_t size() const { return size_; } virtual size_t size() const { return size_; }
...@@ -321,6 +314,19 @@ class Tensor { ...@@ -321,6 +314,19 @@ class Tensor {
size_t offset_; size_t offset_;
}; };
#ifdef PADDLE_MOBILE_DEBUG
inline Print &operator<<(Print &printer, const Tensor &tensor) {
printer << " dims: " << tensor.dims() << "\n";
int stride = tensor.numel() / 20;
stride = stride > 0 ? stride : 1;
for (int i = 0; i < tensor.numel(); i += stride) {
printer << tensor.data<float>()[i] << " ";
}
return printer;
}
#endif
inline Tensor ReshapeToMatrix(const Tensor &src, int num_col_dims) { inline Tensor ReshapeToMatrix(const Tensor &src, int num_col_dims) {
Tensor res; Tensor res;
res.ShareDataWith(src); res.ShareDataWith(src);
......
...@@ -12,10 +12,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ...@@ -12,10 +12,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "io.h" #include "/io.h"
#include <fstream> #include <fstream>
#include <vector> #include <vector>
#include "common/enforce.h" #include "common/enforce.h"
#include "common/log.h" #include "common/log.h"
#include "framework/framework.pb-c.h" #include "framework/framework.pb-c.h"
...@@ -53,7 +52,7 @@ static size_t ReadBuffer(const char *file_name, uint8_t **out) { ...@@ -53,7 +52,7 @@ static size_t ReadBuffer(const char *file_name, uint8_t **out) {
DLOG << "model size: " << size; DLOG << "model size: " << size;
*out = (uint8_t *)malloc(size); *out = reinterpret_cast<uint8_t *>(size);
size_t cur_len = 0; size_t cur_len = 0;
size_t nread; size_t nread;
...@@ -364,7 +363,7 @@ void Executor<Dtype, P>::LoadMemory(const framework::VarDesc var_desc, ...@@ -364,7 +363,7 @@ void Executor<Dtype, P>::LoadMemory(const framework::VarDesc var_desc,
is.read(static_cast<char *>(memory), memory_size * type_size); is.read(static_cast<char *>(memory), memory_size * type_size);
is.close(); is.close();
}; }
template <typename Dtype, Precision P> template <typename Dtype, Precision P>
void Executor<Dtype, P>::InitMemory() { void Executor<Dtype, P>::InitMemory() {
...@@ -381,6 +380,7 @@ void Executor<Dtype, P>::InitMemory() { ...@@ -381,6 +380,7 @@ void Executor<Dtype, P>::InitMemory() {
} else { } else {
if (var_desc->Type() == framework::VARTYPE_TYPE_LOD_TENSOR) { if (var_desc->Type() == framework::VARTYPE_TYPE_LOD_TENSOR) {
auto tensor = var->template GetMutable<framework::LoDTensor>(); auto tensor = var->template GetMutable<framework::LoDTensor>();
tensor->template mutable_data<Ptype>(); tensor->template mutable_data<Ptype>();
} }
} }
...@@ -406,15 +406,7 @@ void Executor<Dtype, P>::predict(const framework::Tensor &t, int block_id) { ...@@ -406,15 +406,7 @@ void Executor<Dtype, P>::predict(const framework::Tensor &t, int block_id) {
template <typename Dtype, Precision P> template <typename Dtype, Precision P>
std::vector<typename Executor<Dtype, P>::Ptype> Executor<Dtype, P>::predict( std::vector<typename Executor<Dtype, P>::Ptype> Executor<Dtype, P>::predict(
const std::vector<Ptype> &input, const std::vector<int64_t> &dims) { const std::vector<Ptype> &input, const std::vector<int64_t> &dims) {
DLOG << "start predict: "; framework::Tensor tensor(input, framework::make_ddim(dims));
framework::LoDTensor tensor;
auto ddim = framework::make_ddim(dims);
auto input_ptr = tensor.mutable_data<Ptype>(ddim);
for (int i = 0; i < input.size(); ++i) {
input_ptr[i] = input[i];
}
predict(tensor, 0); predict(tensor, 0);
......
...@@ -12,19 +12,20 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ...@@ -12,19 +12,20 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#pragma once
#include <string>
#include "framework/operator.h" #include "framework/operator.h"
#include "operators/kernel/batchnorm_kernel.h" #include "operators/kernel/batchnorm_kernel.h"
#include "operators/op_param.h" #include "operators/op_param.h"
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {
using std::string;
using namespace framework;
template <typename DeviceType, typename T> template <typename DeviceType, typename T>
class BatchNormOp : public framework::OperatorWithKernel<DeviceType> { class BatchNormOp : public framework::OperatorWithKernel<DeviceType> {
public: public:
BatchNormOp(const std::string &type, const VariableNameMap &inputs, BatchNormOp(const string &type, const VariableNameMap &inputs,
const VariableNameMap &outputs, const VariableNameMap &outputs,
const framework::AttributeMap attrs, const framework::AttributeMap attrs,
std::shared_ptr<framework::Scope> scope) std::shared_ptr<framework::Scope> scope)
...@@ -32,7 +33,7 @@ class BatchNormOp : public framework::OperatorWithKernel<DeviceType> { ...@@ -32,7 +33,7 @@ class BatchNormOp : public framework::OperatorWithKernel<DeviceType> {
scope), scope),
param_(inputs, outputs, attrs, *scope) {} param_(inputs, outputs, attrs, *scope) {}
void Run() const { void RunImpl() const {
operators::BatchNormKernel<DeviceType, T> kernel; operators::BatchNormKernel<DeviceType, T> kernel;
kernel.Compute(param_); kernel.Compute(param_);
} }
......
...@@ -36,7 +36,7 @@ class BoxCoderOp : public framework::OperatorWithKernel<DeviceType> { ...@@ -36,7 +36,7 @@ class BoxCoderOp : public framework::OperatorWithKernel<DeviceType> {
scope), scope),
param_(inputs, outputs, attrs, *scope) {} param_(inputs, outputs, attrs, *scope) {}
void Run() const { void RunImpl() const {
operators::BoxCoderKernel<DeviceType, T> kernel; operators::BoxCoderKernel<DeviceType, T> kernel;
kernel.Compute(param_); kernel.Compute(param_);
} }
......
...@@ -13,25 +13,25 @@ See the License for the specific language governing permissions and ...@@ -13,25 +13,25 @@ See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#pragma once #pragma once
#include <string>
#include "framework/operator.h" #include "framework/operator.h"
#include "operators/kernel/concat_kernel.h" #include "operators/kernel/concat_kernel.h"
#include "operators/op_param.h" #include "operators/op_param.h"
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {
using std::string;
using namespace framework;
template <typename DeviceType, typename T> template <typename DeviceType, typename T>
class ConcatOp : public framework::OperatorWithKernel<DeviceType> { class ConcatOp : public framework::OperatorWithKernel<DeviceType> {
public: public:
ConcatOp(const std::string &type, const VariableNameMap &inputs, ConcatOp(const string &type, const VariableNameMap &inputs,
const VariableNameMap &outputs, const framework::AttributeMap attrs, const VariableNameMap &outputs, const framework::AttributeMap attrs,
std::shared_ptr<framework::Scope> scope) std::shared_ptr<framework::Scope> scope)
: framework::OperatorWithKernel<DeviceType>(type, inputs, outputs, attrs, : framework::OperatorWithKernel<DeviceType>(type, inputs, outputs, attrs,
scope), scope),
param_(inputs, outputs, attrs, *scope) {} param_(inputs, outputs, attrs, *scope) {}
void Run() const { void RunImpl() const {
operators::ConcatKernel<DeviceType, T> kernel; operators::ConcatKernel<DeviceType, T> kernel;
kernel.Compute(param_); kernel.Compute(param_);
} }
......
...@@ -14,14 +14,13 @@ limitations under the License. */ ...@@ -14,14 +14,13 @@ limitations under the License. */
#pragma once #pragma once
#include <string>
#include "framework/operator.h" #include "framework/operator.h"
#include "operators/kernel/conv_kernel.h" #include "operators/kernel/conv_kernel.h"
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {
using std::string;
using namespace framework;
template <typename DeviceType, typename T> template <typename DeviceType, typename T>
class ConvOp : public framework::OperatorWithKernel<DeviceType> { class ConvOp : public framework::OperatorWithKernel<DeviceType> {
public: public:
...@@ -35,7 +34,7 @@ class ConvOp : public framework::OperatorWithKernel<DeviceType> { ...@@ -35,7 +34,7 @@ class ConvOp : public framework::OperatorWithKernel<DeviceType> {
using framework::OperatorWithKernel<DeviceType>::OperatorWithKernel; using framework::OperatorWithKernel<DeviceType>::OperatorWithKernel;
void InferShape() const override; void InferShape() const override;
void Run() const { void RunImpl() const {
operators::ConvKernel<DeviceType, T> kernel; operators::ConvKernel<DeviceType, T> kernel;
kernel.Compute(param_); kernel.Compute(param_);
this->ClearVariables({"Filter", "Input"}); this->ClearVariables({"Filter", "Input"});
......
...@@ -12,19 +12,20 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ...@@ -12,19 +12,20 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#pragma once
#include <string>
#include "framework/operator.h" #include "framework/operator.h"
#include "kernel/elementwise_add_kernel.h" #include "kernel/elementwise_add_kernel.h"
#include "op_param.h" #include "operators/op_param.h"
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {
using std::string;
using namespace framework;
template <typename DeviceType, typename T> template <typename DeviceType, typename T>
class ElementwiseAddOp : public framework::OperatorWithKernel<DeviceType> { class ElementwiseAddOp : public framework::OperatorWithKernel<DeviceType> {
public: public:
ElementwiseAddOp(const std::string &type, const VariableNameMap &inputs, ElementwiseAddOp(const string &type, const VariableNameMap &inputs,
const VariableNameMap &outputs, const VariableNameMap &outputs,
const framework::AttributeMap attrs, const framework::AttributeMap attrs,
std::shared_ptr<framework::Scope> scope) std::shared_ptr<framework::Scope> scope)
...@@ -32,7 +33,7 @@ class ElementwiseAddOp : public framework::OperatorWithKernel<DeviceType> { ...@@ -32,7 +33,7 @@ class ElementwiseAddOp : public framework::OperatorWithKernel<DeviceType> {
scope), scope),
param_(inputs, outputs, attrs, *scope) {} param_(inputs, outputs, attrs, *scope) {}
void Run() const { void RunImpl() const {
operators::ElementwiseAddKernel<DeviceType, T> kernel; operators::ElementwiseAddKernel<DeviceType, T> kernel;
kernel.Compute(param_); kernel.Compute(param_);
} }
......
...@@ -14,22 +14,23 @@ limitations under the License. */ ...@@ -14,22 +14,23 @@ limitations under the License. */
#pragma once #pragma once
#include <string>
#include "framework/operator.h" #include "framework/operator.h"
#include "operators/op_param.h" #include "operators/op_param.h"
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {
using std::string;
template <typename DeviceType, typename T> template <typename DeviceType, typename T>
class FeedOp : public framework::OperatorBase<DeviceType> { class FeedOp : public framework::OperatorBase<DeviceType> {
public: public:
FeedOp(const std::string &type, const VariableNameMap &inputs, FeedOp(const string &type, const VariableNameMap &inputs,
const VariableNameMap &outputs, const framework::AttributeMap attrs, const VariableNameMap &outputs, const framework::AttributeMap attrs,
std::shared_ptr<framework::Scope> scope) std::shared_ptr<framework::Scope> scope)
: framework::OperatorBase<DeviceType>(type, inputs, outputs, attrs, : framework::OperatorBase<DeviceType>(type, inputs, outputs, attrs,
scope), scope),
param_(inputs, outputs, attrs, *scope) {} param_(inputs, outputs, attrs, *scope) {}
void Run() const { param_.Out()->ShareDataWith(*param_.InputX()); } void RunImpl() const { param_.Out()->ShareDataWith(*param_.InputX()); }
void InferShape() const { void InferShape() const {
auto out_dims = param_.Out()->dims(); auto out_dims = param_.Out()->dims();
......
...@@ -14,27 +14,24 @@ limitations under the License. */ ...@@ -14,27 +14,24 @@ limitations under the License. */
#pragma once #pragma once
#include <string>
#include "framework/operator.h" #include "framework/operator.h"
#include "operators/op_param.h" #include "operators/op_param.h"
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {
using std::string;
template <typename DeviceType, typename T> template <typename DeviceType, typename T>
class FetchOp : public framework::OperatorBase<DeviceType> { class FetchOp : public framework::OperatorBase<DeviceType> {
public: public:
FetchOp(const std::string &type, const VariableNameMap &inputs, FetchOp(const string &type, const VariableNameMap &inputs,
const VariableNameMap &outputs, const framework::AttributeMap attrs, const VariableNameMap &outputs, const framework::AttributeMap attrs,
std::shared_ptr<framework::Scope> scope) std::shared_ptr<framework::Scope> scope)
: framework::OperatorBase<DeviceType>(type, inputs, outputs, attrs, : framework::OperatorBase<DeviceType>(type, inputs, outputs, attrs,
scope), scope),
param_(inputs, outputs, attrs, *scope) {} param_(inputs, outputs, attrs, *scope) {}
void Run() const { void RunImpl() const { param_.Out()->ShareDataWith(*param_.InputX()); }
param_.Out()->ShareDataWith(*param_.InputX());
for (int i = 0; i < param_.Out()->numel(); ++i) {
DLOG << param_.Out()->template data<float>()[i];
}
}
void InferShape() const { void InferShape() const {
auto x_dims = param_.InputX()->dims(); auto x_dims = param_.InputX()->dims();
......
...@@ -15,6 +15,7 @@ limitations under the License. */ ...@@ -15,6 +15,7 @@ limitations under the License. */
#pragma once #pragma once
#include <string> #include <string>
#include <vector>
#include "framework/operator.h" #include "framework/operator.h"
#include "framework/program/program-optimize/fusion_op_register.h" #include "framework/program/program-optimize/fusion_op_register.h"
...@@ -22,7 +23,8 @@ limitations under the License. */ ...@@ -22,7 +23,8 @@ limitations under the License. */
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {
using std::string;
using std::vector;
class FusionFcMatcher : public framework::FusionOpMatcher { class FusionFcMatcher : public framework::FusionOpMatcher {
public: public:
FusionFcMatcher() { FusionFcMatcher() {
...@@ -30,8 +32,8 @@ class FusionFcMatcher : public framework::FusionOpMatcher { ...@@ -30,8 +32,8 @@ class FusionFcMatcher : public framework::FusionOpMatcher {
node_ > std::make_shared<framework::Node>("elementwise_add"); node_ > std::make_shared<framework::Node>("elementwise_add");
} }
void FolderNodes(framework::Node &node) { void FolderNodes(const framework::Node &node) {
std::vector<std::shared_ptr<framework::OpDesc>> origin_descs = vector<std::shared_ptr<framework::OpDesc>> origin_descs =
node.OpDescs(node_.Depth()); node.OpDescs(node_.Depth());
node.Folder(node_.Depth(), Type(), {{"elementwise_add", {"Y", "Z"}}}); node.Folder(node_.Depth(), Type(), {{"elementwise_add", {"Y", "Z"}}});
} }
...@@ -42,7 +44,7 @@ class FusionFcMatcher : public framework::FusionOpMatcher { ...@@ -42,7 +44,7 @@ class FusionFcMatcher : public framework::FusionOpMatcher {
template <typename DeviceType, typename T> template <typename DeviceType, typename T>
class FushionFcOp : public framework::OperatorWithKernel<DeviceType> { class FushionFcOp : public framework::OperatorWithKernel<DeviceType> {
public: public:
FushionFcOp(const std::string &type, const VariableNameMap &inputs, FushionFcOp(const string &type, const VariableNameMap &inputs,
const VariableNameMap &outputs, const VariableNameMap &outputs,
const framework::AttributeMap attrs, const framework::AttributeMap attrs,
std::shared_ptr<framework::Scope> scope) std::shared_ptr<framework::Scope> scope)
...@@ -50,7 +52,7 @@ class FushionFcOp : public framework::OperatorWithKernel<DeviceType> { ...@@ -50,7 +52,7 @@ class FushionFcOp : public framework::OperatorWithKernel<DeviceType> {
scope), scope),
param_(inputs, outputs, attrs, *scope) {} param_(inputs, outputs, attrs, *scope) {}
void Run() const { void RunImpl() const {
operators::FushionFcKernel<DeviceType, T> kernel; operators::FushionFcKernel<DeviceType, T> kernel;
kernel.Compute(param_); kernel.Compute(param_);
} }
......
...@@ -11,27 +11,27 @@ distributed under the License is distributed on an "AS IS" BASIS, ...@@ -11,27 +11,27 @@ distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#pragma once
#include <string>
#include "framework/operator.h" #include "framework/operator.h"
#include "operators/kernel/lrn_kernel.h" #include "operators/kernel/lrn_kernel.h"
#include "operators/op_param.h" #include "operators/op_param.h"
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {
using std::string;
using namespace framework;
template <typename DeviceType, typename T> template <typename DeviceType, typename T>
class LrnOp : public framework::OperatorWithKernel<DeviceType> { class LrnOp : public framework::OperatorWithKernel<DeviceType> {
public: public:
LrnOp(const std::string &type, const VariableNameMap &inputs, LrnOp(const string &type, const VariableNameMap &inputs,
const VariableNameMap &outputs, const framework::AttributeMap attrs, const VariableNameMap &outputs, const framework::AttributeMap attrs,
std::shared_ptr<framework::Scope> scope) std::shared_ptr<framework::Scope> scope)
: framework::OperatorWithKernel<DeviceType>(type, inputs, outputs, attrs, : framework::OperatorWithKernel<DeviceType>(type, inputs, outputs, attrs,
scope), scope),
param_(inputs, outputs, attrs, *scope) {} param_(inputs, outputs, attrs, *scope) {}
void Run() const { void RunImpl() const {
operators::LrnKernel<DeviceType, T> kernel; operators::LrnKernel<DeviceType, T> kernel;
kernel.Compute(param_); kernel.Compute(param_);
} }
......
...@@ -11,7 +11,9 @@ distributed under the License is distributed on an "AS IS" BASIS, ...@@ -11,7 +11,9 @@ distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#pragma once
#include <string>
#include "framework/operator.h" #include "framework/operator.h"
#include "operators/kernel/mul_kernel.h" #include "operators/kernel/mul_kernel.h"
#include "operators/op_param.h" #include "operators/op_param.h"
...@@ -19,8 +21,6 @@ limitations under the License. */ ...@@ -19,8 +21,6 @@ limitations under the License. */
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {
using namespace framework;
template <typename DeviceType, typename T> template <typename DeviceType, typename T>
class MulOp : public framework::OperatorWithKernel<DeviceType> { class MulOp : public framework::OperatorWithKernel<DeviceType> {
public: public:
...@@ -31,7 +31,7 @@ class MulOp : public framework::OperatorWithKernel<DeviceType> { ...@@ -31,7 +31,7 @@ class MulOp : public framework::OperatorWithKernel<DeviceType> {
scope), scope),
param_(inputs, outputs, attrs, *scope) {} param_(inputs, outputs, attrs, *scope) {}
void Run() const { void RunImpl() const {
operators::MulKernel<DeviceType, T> kernel; operators::MulKernel<DeviceType, T> kernel;
kernel.Compute(param_); kernel.Compute(param_);
} }
......
...@@ -36,7 +36,7 @@ class MultiClassNMSOp : public framework::OperatorWithKernel<DeviceType> { ...@@ -36,7 +36,7 @@ class MultiClassNMSOp : public framework::OperatorWithKernel<DeviceType> {
scope), scope),
param_(inputs, outputs, attrs, *scope) {} param_(inputs, outputs, attrs, *scope) {}
void Run() const { void RunImpl() const {
operators::MultiClassNMSKernel<DeviceType, T> kernel; operators::MultiClassNMSKernel<DeviceType, T> kernel;
kernel.Compute(param_); kernel.Compute(param_);
} }
......
...@@ -17,25 +17,25 @@ limitations under the License. */ ...@@ -17,25 +17,25 @@ limitations under the License. */
#include <framework/operator.h> #include <framework/operator.h>
#include <operators/kernel/pool_kernel.h> #include <operators/kernel/pool_kernel.h>
#include <operators/op_param.h> #include <operators/op_param.h>
#include <string>
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {
using namespace framework; using framework::AttributeMap;
using framework::Scope;
using std::string;
template <typename DeviceType, typename T> template <typename DeviceType, typename T>
class PoolOp : public framework::OperatorWithKernel<DeviceType> { class PoolOp : public OperatorWithKernel<DeviceType> {
public: public:
PoolOp(const std::string &type, const VariableNameMap &inputs, PoolOp(const string &type, const VariableNameMap &inputs,
const VariableNameMap &outputs, const framework::AttributeMap &attrs, const VariableNameMap &outputs, const AttributeMap &attrs,
std::shared_ptr<framework::Scope> scope) std::shared_ptr<Scope> scope)
: framework::OperatorWithKernel<DeviceType>(type, inputs, outputs, attrs, : OperatorWithKernel<DeviceType>(type, inputs, outputs, attrs, scope),
scope),
param_(inputs, outputs, attrs, *scope) {} param_(inputs, outputs, attrs, *scope) {}
using framework::OperatorWithKernel<DeviceType>::OperatorWithKernel; using OperatorWithKernel<DeviceType>::OperatorWithKernel;
void InferShape() const override; void InferShape() const override;
void Run() const { void RunImpl() const {
// InferShape();
operators::PoolKernel<DeviceType, T> kernel; operators::PoolKernel<DeviceType, T> kernel;
kernel.Compute(param_); kernel.Compute(param_);
this->ClearVariables({"X"}); this->ClearVariables({"X"});
......
...@@ -36,7 +36,7 @@ class PriorBoxOp : public framework::OperatorWithKernel<DeviceType> { ...@@ -36,7 +36,7 @@ class PriorBoxOp : public framework::OperatorWithKernel<DeviceType> {
scope), scope),
param_(inputs, outputs, attrs, *scope) {} param_(inputs, outputs, attrs, *scope) {}
void Run() const { void RunImpl() const {
operators::PriorBoxKernel<DeviceType, T> kernel; operators::PriorBoxKernel<DeviceType, T> kernel;
kernel.Compute(param_); kernel.Compute(param_);
} }
......
...@@ -35,7 +35,7 @@ class ReluOp : public framework::OperatorWithKernel<DeviceType> { ...@@ -35,7 +35,7 @@ class ReluOp : public framework::OperatorWithKernel<DeviceType> {
scope), scope),
param_(inputs, outputs, attrs, *scope) {} param_(inputs, outputs, attrs, *scope) {}
void Run() const { void RunImpl() const {
operators::ReluKernel<DeviceType, T> kernel; operators::ReluKernel<DeviceType, T> kernel;
kernel.Compute(param_); kernel.Compute(param_);
} }
......
...@@ -35,7 +35,7 @@ class ReshapeOp : public framework::OperatorWithKernel<DeviceType> { ...@@ -35,7 +35,7 @@ class ReshapeOp : public framework::OperatorWithKernel<DeviceType> {
scope), scope),
param_(inputs, outputs, attrs, *scope) {} param_(inputs, outputs, attrs, *scope) {}
void Run() const { void RunImpl() const {
operators::ReshapeKernel<DeviceType, T> kernel; operators::ReshapeKernel<DeviceType, T> kernel;
kernel.Compute(param_); kernel.Compute(param_);
} }
......
...@@ -36,7 +36,7 @@ class SigmoidOp : public framework::OperatorWithKernel<DeviceType> { ...@@ -36,7 +36,7 @@ class SigmoidOp : public framework::OperatorWithKernel<DeviceType> {
void InferShape() const override; void InferShape() const override;
void Run() const { void RunImpl() const {
operators::SigmoidKernel<DeviceType, T> kernel; operators::SigmoidKernel<DeviceType, T> kernel;
kernel.Compute(param_); kernel.Compute(param_);
this->ClearVariables({"X"}); this->ClearVariables({"X"});
......
...@@ -36,7 +36,7 @@ class SoftmaxOp : public framework::OperatorWithKernel<DeviceType> { ...@@ -36,7 +36,7 @@ class SoftmaxOp : public framework::OperatorWithKernel<DeviceType> {
void InferShape() const override; void InferShape() const override;
void Run() const { void RunImpl() const {
operators::SoftmaxKernel<DeviceType, T> kernel; operators::SoftmaxKernel<DeviceType, T> kernel;
kernel.Compute(param_); kernel.Compute(param_);
this->ClearVariables({"X"}); this->ClearVariables({"X"});
......
...@@ -36,7 +36,7 @@ class TransposeOp : public framework::OperatorWithKernel<DeviceType> { ...@@ -36,7 +36,7 @@ class TransposeOp : public framework::OperatorWithKernel<DeviceType> {
scope), scope),
param_(inputs, outputs, attrs, *scope) {} param_(inputs, outputs, attrs, *scope) {}
void Run() const { void RunImpl() const {
operators::TransposeKernel<DeviceType, T> kernel; operators::TransposeKernel<DeviceType, T> kernel;
kernel.Compute(param_); kernel.Compute(param_);
} }
......
...@@ -17,9 +17,9 @@ limitations under the License. */ ...@@ -17,9 +17,9 @@ limitations under the License. */
#include <string> #include <string>
#include <vector> #include <vector>
#include "./io.h"
#include "common/log.h" #include "common/log.h"
#include "framework/op_registry.h" #include "framework/op_registry.h"
#include "io/io.h"
#include "operators/conv_op.h" #include "operators/conv_op.h"
#include "operators/elementwise_add_op.h" #include "operators/elementwise_add_op.h"
#include "operators/pool_op.h" #include "operators/pool_op.h"
......
...@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ...@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "io.h" #include "io/io.h"
int main() { int main() {
paddle_mobile::Loader<paddle_mobile::CPU> loader; paddle_mobile::Loader<paddle_mobile::CPU> loader;
......
...@@ -14,7 +14,7 @@ limitations under the License. */ ...@@ -14,7 +14,7 @@ limitations under the License. */
#include "framework/program/program-optimize/node.h" #include "framework/program/program-optimize/node.h"
#include "framework/program/program-optimize/program_optimize.h" #include "framework/program/program-optimize/program_optimize.h"
#include "io.h" #include "io/io.h"
int main() { int main() {
paddle_mobile::Loader<paddle_mobile::CPU> loader; paddle_mobile::Loader<paddle_mobile::CPU> loader;
......
...@@ -16,7 +16,7 @@ limitations under the License. */ ...@@ -16,7 +16,7 @@ limitations under the License. */
#include "../test_helper.h" #include "../test_helper.h"
#include "../test_include.h" #include "../test_include.h"
#include "io.h" #include "io/io.h"
int main() { int main() {
paddle_mobile::Loader<paddle_mobile::CPU> loader; paddle_mobile::Loader<paddle_mobile::CPU> loader;
......
...@@ -14,7 +14,7 @@ limitations under the License. */ ...@@ -14,7 +14,7 @@ limitations under the License. */
#include "../executor_for_test.h" #include "../executor_for_test.h"
#include "../test_helper.h" #include "../test_helper.h"
#include "io.h" #include "io/io.h"
int main() { int main() {
paddle_mobile::Loader<paddle_mobile::CPU> loader; paddle_mobile::Loader<paddle_mobile::CPU> loader;
......
...@@ -14,7 +14,7 @@ limitations under the License. */ ...@@ -14,7 +14,7 @@ limitations under the License. */
#include "../executor_for_test.h" #include "../executor_for_test.h"
#include "../test_helper.h" #include "../test_helper.h"
#include "./io.h" #include "io/io.h"
int main() { int main() {
paddle_mobile::Loader<paddle_mobile::CPU> loader; paddle_mobile::Loader<paddle_mobile::CPU> loader;
......
...@@ -14,7 +14,7 @@ limitations under the License. */ ...@@ -14,7 +14,7 @@ limitations under the License. */
#include "../../src/operators/kernel/sigmoid_kernel.h" #include "../../src/operators/kernel/sigmoid_kernel.h"
#include "../test_helper.h" #include "../test_helper.h"
#include "./io.h" #include "io/io.h"
int main() { int main() {
paddle_mobile::framework::Tensor input; paddle_mobile::framework::Tensor input;
......
...@@ -14,7 +14,7 @@ limitations under the License. */ ...@@ -14,7 +14,7 @@ limitations under the License. */
#include "../executor_for_test.h" #include "../executor_for_test.h"
#include "../test_helper.h" #include "../test_helper.h"
#include "./io.h" #include "io/io.h"
int main() { int main() {
paddle_mobile::Loader<paddle_mobile::CPU> loader; paddle_mobile::Loader<paddle_mobile::CPU> loader;
......
...@@ -14,7 +14,7 @@ limitations under the License. */ ...@@ -14,7 +14,7 @@ limitations under the License. */
#include "../executor_for_test.h" #include "../executor_for_test.h"
#include "../test_helper.h" #include "../test_helper.h"
#include "./io.h" #include "io/io.h"
int main() { int main() {
paddle_mobile::Loader<paddle_mobile::CPU> loader; paddle_mobile::Loader<paddle_mobile::CPU> loader;
......
...@@ -29,4 +29,4 @@ limitations under the License. */ ...@@ -29,4 +29,4 @@ limitations under the License. */
#include "framework/scope.h" #include "framework/scope.h"
#include "framework/tensor.h" #include "framework/tensor.h"
#include "framework/variable.h" #include "framework/variable.h"
#include "io.h" #include "io/io.h"
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册