提交 fcb16d5d 编写于 作者: E eclipsycn 提交者: GitHub

Merge pull request #205 from Eclipsess/develop

fix #204 add lrn_op and lrn_op_test
......@@ -37,8 +37,7 @@ class ElementwiseAddOp : public framework::OperatorWithKernel<DeviceType> {
param_(inputs, outputs, attrs, *scope) {}
void Run() const {
operators::ElementwiseAddKernel<DeviceType, T, ElementwiseAddParam>
kernel;
operators::ElementwiseAddKernel<DeviceType, T> kernel;
kernel.Compute(param_);
}
......
......@@ -24,7 +24,7 @@ template <typename T> struct AddFunctor {
};
template <>
void ElementwiseAddKernel<CPU, float, ElementwiseAddParam>::Compute(
void ElementwiseAddKernel<CPU, float>::Compute(
const ElementwiseAddParam &param) const {
const Tensor *input_x = param.InputX();
const Tensor *input_y = param.InputY();
......@@ -35,7 +35,7 @@ void ElementwiseAddKernel<CPU, float, ElementwiseAddParam>::Compute(
AddFunctor<float>(), Out);
}
template class ElementwiseAddKernel<CPU, float, ElementwiseAddParam>;
template class ElementwiseAddKernel<CPU, float>;
} // namespace operators
} // namespace paddle_mobile
/* Copyright (c) 2016 Baidu, Inc. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
==============================================================================*/
#pragma once
#include "operators/kernel/lrn_kernel.h"
namespace paddle_mobile {
namespace operators {
template <> void LrnKernel<CPU, float>::Compute(const LrnParam &param) const {
const Tensor *input_x = param.InputX();
auto x_dims = input_x->dims();
/// data_format = NCHW
const int N = x_dims[0];
const int C = x_dims[1];
const int H = x_dims[2];
const int W = x_dims[3];
Tensor *out = param.Out();
out->mutable_data<float>();
const int n = param.N();
const float alpha = param.Alpha();
const float beta = param.Beta();
const float k = param.K();
LRNFunctor<float> lrnFunctor;
lrnFunctor(*input_x, out, N, C, H, W, n, k, alpha, beta);
}
template class LrnKernel<CPU, float>;
} // namespace operators
} // namespace paddle_mobile
......@@ -23,8 +23,7 @@ SOFTWARE.
namespace paddle_mobile {
namespace operators {
template <>
void MulKernel<CPU, float, MulParam>::Compute(const MulParam &param) const {
template <> void MulKernel<CPU, float>::Compute(const MulParam &param) const {
const Tensor *input_x = param.InputX();
const Tensor *input_y = param.InputY();
Tensor *out = param.Out();
......@@ -48,7 +47,7 @@ void MulKernel<CPU, float, MulParam>::Compute(const MulParam &param) const {
}
}
template class MulKernel<CPU, float, MulParam>;
template class MulKernel<CPU, float>;
} // namespace operators
} // namespace paddle_mobile
......@@ -26,7 +26,7 @@ namespace operators {
using namespace framework;
template <typename DeviceType, typename T, typename P>
template <typename DeviceType, typename T>
class ElementwiseAddKernel
: public framework::OpKernelBase<DeviceType, ElementwiseAddParam> {
public:
......
/* Copyright (c) 2016 Baidu, Inc. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
==============================================================================*/
#include "framework/operator.h"
#include "operators/op_param.h"
#pragma once;
namespace paddle_mobile {
namespace operators {
using namespace framework;
template <typename T> struct LRNFunctor {
void operator()(const framework::Tensor &input, framework::Tensor *out,
int N, int C, int H, int W, int n, T k, T alpha, T beta) {
auto input_ptr = input.data<T>();
const int start = -(n - 1) / 2;
const int end = start + n;
const int stride0 = C * H * W;
const int stride1 = H * W;
const int stride2 = W;
const int stride3 = 1;
framework::Tensor sqr_buffer;
auto sqr_buffer_ptr = sqr_buffer.mutable_data<T>(input.dims());
std::fill(sqr_buffer_ptr, sqr_buffer_ptr + sqr_buffer.numel(), k);
for (int a = 0; a < N; a++) {
for (int b = 0; b < C; b++) {
for (int index = start; index < end; index++) {
int channel = b + index;
if (channel >= 0 && channel < C) {
for (int c = 0; c < H; c++) {
for (int d = 0; d < W; d++) {
int u =
a * stride0 + b * stride1 + c * stride2 + d;
int i = a * stride0 + channel * stride1 +
c * stride2 + d;
sqr_buffer_ptr[u] +=
alpha * input_ptr[i] * input_ptr[i];
}
}
}
}
}
}
auto out_ptr = out->data<T>();
for (int i = 0; i < input.numel(); i++) {
out_ptr[i] = input_ptr[i] / pow(sqr_buffer_ptr[i], beta);
}
}
};
template <typename DeviceType, typename T>
class LrnKernel : public framework::OpKernelBase<DeviceType, LrnParam> {
public:
void Compute(const LrnParam &param) const;
};
} // namespace operators
} // namespace paddle_mobile
......@@ -26,7 +26,7 @@ namespace operators {
using namespace framework;
template <typename DeviceType, typename T, typename P>
template <typename DeviceType, typename T>
class MulKernel : public framework::OpKernelBase<DeviceType, MulParam> {
public:
void Compute(const MulParam &param) const;
......
/* Copyright (c) 2016 Baidu, Inc. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
==============================================================================*/
#include "lrn_op.h"
namespace paddle_mobile {
namespace operators {
template <typename Dtype, typename T> void LrnOp<Dtype, T>::InferShape() const {
auto x_dims = param_.InputX()->dims();
param_.Out()->Resize(x_dims);
}
template class LrnOp<CPU, float>;
} // namespace operators
} // namespace paddle_mobile
/* Copyright (c) 2016 Baidu, Inc. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
==============================================================================*/
#include "framework/operator.h"
#include "operators/kernel/lrn_kernel.h"
#include "operators/op_param.h"
namespace paddle_mobile {
namespace operators {
using namespace framework;
template <typename DeviceType, typename T>
class LrnOp : public framework::OperatorWithKernel<DeviceType> {
public:
LrnOp(const std::string &type, const VariableNameMap &inputs,
const VariableNameMap &outputs, const framework::AttributeMap attrs,
std::shared_ptr<framework::Scope> scope)
: framework::OperatorWithKernel<DeviceType>(type, inputs, outputs,
attrs, scope),
param_(inputs, outputs, attrs, *scope) {}
void Run() const {
operators::LrnKernel<DeviceType, T> kernel;
kernel.Compute(param_);
}
using framework::OperatorWithKernel<DeviceType>::OperatorWithKernel;
void InferShape() const override;
protected:
LrnParam param_;
};
} // namespace operators
} // namespace paddle_mobile
......@@ -36,7 +36,7 @@ class MulOp : public framework::OperatorWithKernel<DeviceType> {
param_(inputs, outputs, attrs, *scope) {}
void Run() const {
operators::MulKernel<DeviceType, T, MulParam> kernel;
operators::MulKernel<DeviceType, T> kernel;
kernel.Compute(param_);
}
......
......@@ -64,6 +64,11 @@ class OpParam : PaddleMobileObject {
return GetVarValue<T>("Out", outputs, scope);
}
template <typename T>
static T *MidOutFrom(const VariableNameMap &outputs, const Scope &scope) {
return GetVarValue<T>("MidOut", outputs, scope);
}
template <typename T>
static T *FilterFrom(const VariableNameMap &inputs, const Scope &scope) {
return GetVarValue<T>("Filter", inputs, scope);
......@@ -222,5 +227,47 @@ class ConcatParam : public OpParam {
int axis_;
};
class LrnParam : public OpParam {
public:
LrnParam(const VariableNameMap &inputs, const VariableNameMap &outputs,
const framework::AttributeMap &attrs,
const framework::Scope &scope) {
input_x_ = InputXFrom<framework::Tensor>(inputs, scope);
out_ = OutFrom<framework::Tensor>(outputs, scope);
mid_out_ = MidOutFrom<framework::Tensor>(outputs, scope);
n_ = GetAttr<int>("n", attrs);
alpha_ = GetAttr<float>("alpha", attrs);
beta_ = GetAttr<float>("beta", attrs);
k_ = GetAttr<float>("k", attrs);
data_format_ = GetAttr<std::string>("data_format", attrs);
}
const Tensor *InputX() const { return input_x_; }
Tensor *Out() const { return out_; }
Tensor *MidOut() const { return mid_out_; }
const int &N() const { return n_; }
const float &Alpha() const { return alpha_; }
const float &Beta() const { return beta_; }
const float &K() const { return k_; }
const std::string &DataFormat() const { return data_format_; }
private:
Tensor *input_x_;
Tensor *out_;
Tensor *mid_out_;
int n_;
float alpha_;
float beta_;
float k_;
std::string data_format_;
};
} // namespace operators
} // namespace paddle_mobile
......@@ -15,6 +15,10 @@ target_link_libraries(test-elementwiseadd-op paddle-mobile)
ADD_EXECUTABLE(test-concat-op operators/test_concat_op.cpp test_helper.h test_include.h)
target_link_libraries(test-concat-op paddle-mobile)
# gen test
ADD_EXECUTABLE(test-lrn-op operators/test_lrn_op.cpp test_helper.h test_include.h)
target_link_libraries(test-lrn-op paddle-mobile)
# gen test log
ADD_EXECUTABLE(test-log common/test_log.cpp)
target_link_libraries(test-log paddle-mobile)
......
......@@ -136,28 +136,26 @@ template class TestConcatOp<CPU>;
int main() {
DLOG << "----------**********----------";
DLOG << "begin to run MulOp Test";
DLOG << "begin to run ConcatOp Test";
paddle_mobile::Loader<paddle_mobile::CPU> loader;
auto program = loader.Load(std::string("../../test/models/googlenet"));
/// input x (3,2,1,1)
/// input x (4,10,2,2)
paddle_mobile::framework::Tensor inputx1;
SetupTensor<float>(&inputx1, {4, 10, 2, 2}, static_cast<float>(0),
static_cast<float>(1));
auto *inputx1_ptr = inputx1.data<float>();
/// input x (3,2,1,1)
/// input x (4,20,2,2)
paddle_mobile::framework::Tensor inputx2;
SetupTensor<float>(&inputx2, {4, 20, 2, 2}, static_cast<float>(0),
static_cast<float>(1));
auto *inputx2_ptr = inputx2.data<float>();
/// input x (3,2,1,1)
/// input x (4,30,2,2)
paddle_mobile::framework::Tensor inputx3;
SetupTensor<float>(&inputx3, {4, 30, 2, 2}, static_cast<float>(0),
static_cast<float>(1));
auto *inputx3_ptr = inputx3.data<float>();
/// input x (3,2,1,1)
/// input x (4,40,2,2)
paddle_mobile::framework::Tensor inputx4;
SetupTensor<float>(&inputx4, {4, 40, 2, 2}, static_cast<float>(0),
static_cast<float>(1));
......
/* Copyright (c) 2016 Baidu, Inc. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
==============================================================================*/
#pragma once
#include "../test_include.h"
#include "operators/lrn_op.h"
namespace paddle_mobile {
namespace framework {
template <typename Dtype> class TestLrnOp {
public:
explicit TestLrnOp(const Program<Dtype> p) : program_(p) {
if (use_optimize_) {
to_predict_program_ = program_.optimizeProgram;
} else {
to_predict_program_ = program_.originProgram;
}
const std::vector<std::shared_ptr<BlockDesc>> blocks =
to_predict_program_->Blocks();
// DLOG << " **block size " << blocks.size();
for (int i = 0; i < blocks.size(); ++i) {
std::shared_ptr<BlockDesc> block_desc = blocks[i];
std::vector<std::shared_ptr<OpDesc>> ops = block_desc->Ops();
// DLOG << " ops " << ops.size();
for (int j = 0; j < ops.size(); ++j) {
std::shared_ptr<OpDesc> op = ops[j];
// if (op->Type() == "mul") {
// DLOG << "x_num_col_dims :
// "
// << op->GetAttrMap()
// .at("x_num_col_dims")
// .Get<int>();
// DLOG << "y_num_col_dims :
// "
// << op->GetAttrMap()
// .at("y_num_col_dims")
// .Get<int>();
// DLOG << " Input X is : "
// << op->Input("X")[0];
// }
// DLOG << "op:" << op->Type();
if (op->Type() == "lrn" &&
op->Input("X")[0] == "pool2d_0.tmp_0") {
DLOG << " mul attr size: " << op->GetAttrMap().size();
DLOG << " inputs size: " << op->GetInputs().size();
DLOG << " outputs size: " << op->GetOutputs().size();
DLOG << " Input X is : " << op->Input("X")[0];
DLOG << " Output Out is : " << op->Output("Out")[0];
DLOG << " n : " << op->GetAttrMap().at("n").Get<int>();
DLOG << " alpha : "
<< op->GetAttrMap().at("alpha").Get<float>();
DLOG << " beta : "
<< op->GetAttrMap().at("beta").Get<float>();
DLOG << " k : " << op->GetAttrMap().at("k").Get<float>();
std::shared_ptr<operators::LrnOp<Dtype, float>> lrn =
std::make_shared<operators::LrnOp<Dtype, float>>(
op->Type(), op->GetInputs(), op->GetOutputs(),
op->GetAttrMap(), program_.scope);
ops_of_block_[*block_desc.get()].push_back(lrn);
}
}
}
}
std::shared_ptr<Tensor> predict_lrn(Tensor &t1) {
// feed
auto scope = program_.scope;
Variable *x1_feed_value = scope->Var("pool2d_0.tmp_0");
auto tensor_x1 = x1_feed_value->GetMutable<Tensor>();
tensor_x1->ShareDataWith(t1);
Variable *con_output = scope->Var("pool1_norm1.tmp_1");
auto *output_tensor = con_output->GetMutable<Tensor>();
output_tensor->mutable_data<float>({3, 4, 2, 2});
// DLOG << typeid(output_tensor).name();
// DLOG << "output_tensor dims: " << output_tensor->dims();
std::shared_ptr<Tensor> out_tensor = std::make_shared<LoDTensor>();
out_tensor.reset(output_tensor);
predict_lrn(t1, 0);
return out_tensor;
}
private:
const framework::Program<Dtype> program_;
std::shared_ptr<ProgramDesc> to_predict_program_;
std::map<framework::BlockDesc,
std::vector<std::shared_ptr<OperatorBase<Dtype>>>>
ops_of_block_;
bool use_optimize_ = false;
void predict_lrn(const Tensor &t1, int block_id) {
std::shared_ptr<BlockDesc> to_predict_block =
to_predict_program_->Block(block_id);
for (int j = 0; j < ops_of_block_[*to_predict_block.get()].size();
++j) {
auto op = ops_of_block_[*to_predict_block.get()][j];
DLOG << "op -> run()";
op->Run();
}
}
};
template class TestLrnOp<CPU>;
} // namespace framework
} // namespace paddle_mobile
int main() {
DLOG << "----------**********----------";
DLOG << "begin to run LrnOp Test";
paddle_mobile::Loader<paddle_mobile::CPU> loader;
auto program = loader.Load(std::string("../../test/models/googlenet"));
/// input x (3,4,2,2)
paddle_mobile::framework::Tensor inputx1;
SetupTensor<float>(&inputx1, {3, 4, 2, 2}, static_cast<float>(0),
static_cast<float>(1));
auto *inputx1_ptr = inputx1.data<float>();
paddle_mobile::framework::TestLrnOp<paddle_mobile::CPU> testLrnOp(program);
auto output_lrn = testLrnOp.predict_lrn(inputx1);
auto *output_lrn_ptr = output_lrn->data<float>();
DLOG << " LrnOp input: ";
for (int i = 0; i < 3; i++) {
for (int j = 0; j < 4; j++) {
for (int c = 0; c < 2; c++) {
for (int d = 0; d < 2; d++) {
DLOGF("%f ", inputx1_ptr[i * 16 + j * 4 + c * 2 + d]);
}
DLOGF("\n");
}
DLOGF("\n");
}
DLOGF("\n");
}
DLOG << " LrnOp output: ";
for (int i = 0; i < 3; i++) {
for (int j = 0; j < 4; j++) {
for (int c = 0; c < 2; c++) {
for (int d = 0; d < 2; d++) {
DLOGF("%f ", output_lrn_ptr[i * 16 + j * 4 + c * 2 + d]);
}
DLOGF("\n");
}
DLOGF("\n");
}
DLOGF("\n");
}
DLOG << inputx1_ptr[0] << " / ((1 + 0.00002 * ( " << inputx1_ptr[0]
<< "^2 + " << inputx1_ptr[4] << "^2 + " << inputx1_ptr[8]
<< "^2 ))^0.75) = ";
DLOG << output_lrn_ptr[0];
return 0;
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册