提交 dc349ae9 编写于 作者: E eclipsycn 提交者: GitHub

Merge pull request #188 from Eclipsess/develop

fix #187 add mul_op and mul_op_test
......@@ -47,5 +47,6 @@ target_link_libraries(paddle-mobile-static protobuf-lite openblas)
# gen test
ADD_EXECUTABLE(paddle-mobile-test test/main.cpp test/test_helper.h
test/elementwise_add_op_test.h test/test_include.h)
test/elementwise_add_op_test.h test/test_include.h
test/mul_op_test.h)
target_link_libraries(paddle-mobile-test paddle-mobile)
/* Copyright (c) 2016 Baidu, Inc. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
==============================================================================*/
#pragma once
#include "operators/kernel/mul_kernel.h"
namespace paddle_mobile {
namespace operators {
template <>
void
MulKernel<CPU, float, MulParam>::Compute(const MulParam &param) const {
const Tensor *input_x = param.InputX();
const Tensor *input_y = param.InputY();
Tensor *out = param.Out();
out->mutable_data<float>();
const Tensor x_matrix =
input_x->dims().size() > 2
? framework::ReshapeToMatrix(*input_x, param.XNumColDims())
: *input_x;
const Tensor y_matrix =
input_y->dims().size() > 2
? framework::ReshapeToMatrix(*input_y, param.YNumColDims())
: *input_y;
auto out_dim = out->dims();
if (out_dim.size() != 2) {
out->Resize({x_matrix.dims()[0], y_matrix.dims()[1]});
}
math::matmul<float>(x_matrix, false, y_matrix, false,
static_cast<float>(1), out,
static_cast<float>(0));
if (out_dim.size() != 2) {
out->Resize(out_dim);
}
}
template class MulKernel<CPU, float, MulParam>;
} // namespace operators
} // namespace paddle
/* Copyright (c) 2016 Baidu, Inc. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
==============================================================================*/
#include "framework/operator.h"
#include "operators/math/math_function.h"
#include "operators/op_param.h"
#pragma once;
namespace paddle_mobile {
namespace operators {
using namespace framework;
template <typename DeviceType, typename T, typename P>
class MulKernel : public framework::OpKernelBase<DeviceType, MulParam> {
public:
void Compute(const MulParam &param) const;
};
}
}
/* Copyright (c) 2016 Baidu, Inc. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
==============================================================================*/
#include "mul_op.h"
namespace paddle_mobile {
namespace operators {
template <typename Dtype, typename T>
void MulOp<Dtype, T>::InferShape() const {
auto x_dims = param_.InputX()->dims();
auto y_dims = param_.InputY()->dims();
int x_num_col_dims = param_.XNumColDims();
int y_num_col_dims = param_.YNumColDims();
assert(x_dims.size() > x_num_col_dims);
assert(y_dims.size() > y_num_col_dims);
/// (1,2,3,4) , x_num_col_dims = 2 -> (2,12)
auto x_mat_dims = framework::flatten_to_2d(x_dims, x_num_col_dims);
auto y_mat_dims = framework::flatten_to_2d(y_dims, y_num_col_dims);
assert(x_mat_dims[1] == y_mat_dims[0]);
std::vector<int64_t> output_dims;
output_dims.reserve(static_cast<size_t>(
x_num_col_dims + y_dims.size() - y_num_col_dims));
for (int i = 0; i < x_num_col_dims; ++i) {
output_dims.push_back(x_dims[i]);
}
for (int i = y_num_col_dims; i < y_dims.size(); ++i) {
output_dims.push_back(y_dims[i]);
}
framework::DDim ddim = framework::make_ddim(output_dims);
param_.Out()->Resize(ddim);
}
template class MulOp<CPU, float>;
}
}
/* Copyright (c) 2016 Baidu, Inc. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
==============================================================================*/
#include "framework/operator.h"
#include "operators/kernel/mul_kernel.h"
#include "operators/op_param.h"
namespace paddle_mobile {
namespace operators {
using namespace framework;
template <typename DeviceType, typename T>
class MulOp : public framework::OperatorWithKernel<DeviceType> {
public:
MulOp(const std::string &type, const VariableNameMap &inputs,
const VariableNameMap &outputs,
const framework::AttributeMap attrs,
std::shared_ptr<framework::Scope> scope)
: framework::OperatorWithKernel<DeviceType>(
type, inputs, outputs, attrs, scope),
param_(inputs, outputs, attrs, *scope) {}
void Run() const {
operators::MulKernel<DeviceType, T, MulParam> kernel;
kernel.Compute(param_);
}
using framework::OperatorWithKernel<DeviceType>::OperatorWithKernel;
void InferShape() const override;
protected:
MulParam param_;
};
} // namespace operators
} // namespace paddle
......@@ -42,15 +42,19 @@ namespace paddle_mobile {
// std::cout << " ops " << ops.size() << std::endl;
for (int j = 0; j < ops.size(); ++j) {
std::shared_ptr<OpDesc> op = ops[j];
if (op->Type() == "elementwise_add") {
if (op->GetAttrMap().at("axis").Get<int>() != -1) {
std::cout
<< "attr: axis = "
<< op->GetAttrMap().at("axis").Get<int>()
<< std::endl;
}
}
std::cout << "op:" << op->Type() << std::endl;
// if (op->Type() ==
// "elementwise_add") {
// if
// (op->GetAttrMap().at("axis").Get<int>()
// != -1) {
// std::cout
// << "attr: axis = "
// <<
// op->GetAttrMap().at("axis").Get<int>()
// << std::endl;
// }
// }
// std::cout << "op:" << op->Type() << std::endl;
if (op->Type() == "elementwise_add" &&
op->Input("X")[0] == "batch_norm_2.tmp_2") {
std::cout << " elementwise_add attr size: "
......@@ -138,6 +142,8 @@ namespace paddle_mobile {
namespace test {
void testElementwiseAdd() {
std::cout << "----------**********----------" << std::endl;
std::cout << "begin to run ElementAddOp Test" << std::endl;
paddle_mobile::Loader<paddle_mobile::CPU> loader;
auto program = loader.Load(
std::string("../../test/models/"
......
......@@ -17,6 +17,7 @@ SOFTWARE.
==============================================================================*/
#include "elementwise_add_op_test.h"
#include "mul_op_test.h"
#include "framework/executor.h"
#include "io.h"
#include "test_helper.h"
......@@ -76,5 +77,6 @@ int main() {
// std::cout << " value of output: " << output_ptr[j] << std::endl;
//
paddle_mobile::test::testElementwiseAdd();
paddle_mobile::test::testMul();
return 0;
}
/* Copyright (c) 2016 Baidu, Inc. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
==============================================================================*/
#pragma once
#include "operators/mul_op.h"
#include "test_include.h"
namespace paddle_mobile {
namespace framework {
template <typename Dtype> class TestMulOp {
public:
TestMulOp(const Program<Dtype> p) : program_(p) {
if (use_optimize_) {
to_predict_program_ = program_.optimizeProgram;
} else {
to_predict_program_ = program_.originProgram;
}
const std::vector<std::shared_ptr<BlockDesc>> blocks =
to_predict_program_->Blocks();
// std::cout << " **block size " << blocks.size() << std::endl;
for (int i = 0; i < blocks.size(); ++i) {
std::shared_ptr<BlockDesc> block_desc = blocks[i];
std::vector<std::shared_ptr<OpDesc>> ops =
block_desc->Ops();
// std::cout << " ops " << ops.size() << std::endl;
for (int j = 0; j < ops.size(); ++j) {
std::shared_ptr<OpDesc> op = ops[j];
if (op->Type() == "mul") {
std::cout << "x_num_col_dims : "
<< op->GetAttrMap()
.at("x_num_col_dims")
.Get<int>()
<< std::endl;
std::cout << "y_num_col_dims : "
<< op->GetAttrMap()
.at("y_num_col_dims")
.Get<int>()
<< std::endl;
std::cout << " Input X is : " << op->Input("X")[0]
<< std::endl;
}
// std::cout << "op:" << op->Type() << std::endl;
if (op->Type() == "mul" &&
op->Input("X")[0] == "pool2d_0.tmp_0") {
std::cout
<< " mul attr size: " << op->GetAttrMap().size()
<< std::endl;
std::cout
<< " inputs size: " << op->GetInputs().size()
<< std::endl;
std::cout
<< " outputs size: " << op->GetOutputs().size()
<< std::endl;
std::cout << " Input X is : " << op->Input("X")[0]
<< std::endl;
std::cout << " Input Y is : " << op->Input("Y")[0]
<< std::endl;
std::cout
<< " Output Out is : " << op->Output("Out")[0]
<< std::endl;
std::cout << "x_num_col_dims : "
<< op->GetAttrMap()
.at("x_num_col_dims")
.Get<int>()
<< std::endl;
std::cout << "y_num_col_dims : "
<< op->GetAttrMap()
.at("y_num_col_dims")
.Get<int>()
<< std::endl;
std::shared_ptr<operators::MulOp<Dtype, float>>
add = std::make_shared<
operators::MulOp<Dtype, float>>(
op->Type(), op->GetInputs(),
op->GetOutputs(), op->GetAttrMap(),
program_.scope);
ops_of_block_[*block_desc.get()].push_back(add);
}
}
}
}
std::shared_ptr<Tensor> predict_add(Tensor &t1, Tensor &t2) {
// feed
auto scope = program_.scope;
Variable *x_feed_value = scope->Var("pool2d_0.tmp_0");
auto tensor_x = x_feed_value->GetMutable<Tensor>();
tensor_x->ShareDataWith(t1);
Variable *y_feed_value = scope->Var("fc_0.w_0");
auto tensor_y = y_feed_value->GetMutable<Tensor>();
tensor_y->ShareDataWith(t2);
Variable *con_output = scope->Var("fc_0.tmp_0");
Tensor *output_tensor = con_output->GetMutable<Tensor>();
output_tensor->mutable_data<float>({3, 3});
// std::cout << typeid(output_tensor).name() << std::endl;
// std::cout << "output_tensor dims: " << output_tensor->dims()
// << std::endl;
std::shared_ptr<Tensor> out_tensor =
std::make_shared<LoDTensor>();
out_tensor.reset(output_tensor);
predict_add(t1, t2, 0);
return out_tensor;
}
private:
const framework::Program<Dtype> program_;
std::shared_ptr<ProgramDesc> to_predict_program_;
std::map<framework::BlockDesc,
std::vector<std::shared_ptr<OperatorBase<Dtype>>>>
ops_of_block_;
bool use_optimize_ = false;
void predict_add(const Tensor &t1, const Tensor &t2, int block_id) {
std::shared_ptr<BlockDesc> to_predict_block =
to_predict_program_->Block(block_id);
for (int j = 0;
j < ops_of_block_[*to_predict_block.get()].size(); ++j) {
auto op = ops_of_block_[*to_predict_block.get()][j];
std::cout << "op -> run()" << std::endl;
op->Run();
}
}
};
template class TestMulOp<CPU>;
} // namespace framework
namespace test {
void testMul() {
std::cout << "----------**********----------" << std::endl;
std::cout << "begin to run MulOp Test" << std::endl;
paddle_mobile::Loader<paddle_mobile::CPU> loader;
auto program = loader.Load(
std::string("../../test/models/"
"image_classification_resnet.inference.model"));
/// input x (3,2,1,1)
paddle_mobile::framework::Tensor inputx;
SetupTensor<float>(&inputx, {3, 2, 1, 1}, static_cast<float>(0),
static_cast<float>(1));
float *inputx_ptr = inputx.data<float>();
/// input y (2,3)
paddle_mobile::framework::Tensor inputy;
SetupTensor<float>(&inputy, {2, 3}, static_cast<float>(0),
static_cast<float>(1));
float *inputy_ptr = inputy.data<float>();
paddle_mobile::framework::TestMulOp<paddle_mobile::CPU> testMulOp(
program);
auto output_mul = testMulOp.predict_add(inputx, inputy);
float *output_mul_ptr = output_mul->data<float>();
auto dimx_1 = inputx.numel() / inputx.dims()[0];
std::cout << "inputx : " << std::endl;
for (int i = 0; i < inputx.dims()[0]; ++i) {
for (int j = 0; j < dimx_1; ++j) {
std::cout << inputx_ptr[i * dimx_1 + j] << " ";
}
std::cout << std::endl;
}
auto dimy_1 = inputy.numel() / inputy.dims()[0];
std::cout << "inputy : " << std::endl;
for (int i = 0; i < inputy.dims()[0]; ++i) {
for (int j = 0; j < dimy_1; ++j) {
std::cout << inputy_ptr[i * dimy_1 + j] << " ";
}
std::cout << std::endl;
}
auto dim_output_1 = output_mul->numel() / output_mul->dims()[0];
std::cout << "output : " << std::endl;
for (int i = 0; i < output_mul->dims()[0]; ++i) {
for (int j = 0; j < dim_output_1; ++j) {
std::cout << output_mul_ptr[i * dimy_1 + j] << " ";
}
std::cout << std::endl;
}
/// output (3,3)
std::cout << "output memory size : " << output_mul->memory_size()
<< std::endl;
std::cout << "output numel : " << output_mul->numel() << std::endl;
std::cout << inputx_ptr[0] << " x " << inputy_ptr[0] << " + "
<< inputx_ptr[1] << " x " << inputy_ptr[0 + 3] << " = "
<< output_mul_ptr[0] << std::endl;
}
} // namespace test
} // namespace paddle_mobile
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册