提交 813cf38d 编写于 作者: E eclipsess

conflict

...@@ -33,21 +33,15 @@ struct OpInfo { ...@@ -33,21 +33,15 @@ struct OpInfo {
} }
}; };
template <typename Dtype>
class OpInfoMap;
template <typename Dtype>
static OpInfoMap<Dtype> *g_op_info_map = nullptr;
template <typename Dtype> template <typename Dtype>
class OpInfoMap { class OpInfoMap {
public: public:
static OpInfoMap &Instance() { static OpInfoMap<Dtype> *Instance() {
LOG(paddle_mobile::kLOG_DEBUG1) << " TODO: fix bug"; static OpInfoMap<Dtype> *s_instance = nullptr;
if (g_op_info_map<Dtype> == nullptr) { if (s_instance == nullptr) {
g_op_info_map<Dtype> = new OpInfoMap(); s_instance = new OpInfoMap();
} }
return *g_op_info_map<Dtype>; return s_instance;
} }
bool Has(const std::string &op_type) const { bool Has(const std::string &op_type) const {
......
...@@ -35,7 +35,7 @@ class OperatorRegistrarRecursive; ...@@ -35,7 +35,7 @@ class OperatorRegistrarRecursive;
template <typename Dtype, typename... ARGS> template <typename Dtype, typename... ARGS>
struct OperatorRegistrar : public Registrar { struct OperatorRegistrar : public Registrar {
explicit OperatorRegistrar(const std::string& op_type) { explicit OperatorRegistrar(const std::string& op_type) {
if (OpInfoMap<Dtype>::Instance().Has(op_type)) { if (OpInfoMap<Dtype>::Instance()->Has(op_type)) {
LOG(paddle_mobile::kLOG_DEBUG1) LOG(paddle_mobile::kLOG_DEBUG1)
<< op_type << " is registered more than once."; << op_type << " is registered more than once.";
return; return;
...@@ -47,7 +47,7 @@ struct OperatorRegistrar : public Registrar { ...@@ -47,7 +47,7 @@ struct OperatorRegistrar : public Registrar {
} }
OpInfo<Dtype> info; OpInfo<Dtype> info;
OperatorRegistrarRecursive<Dtype, 0, false, ARGS...>(op_type, &info); OperatorRegistrarRecursive<Dtype, 0, false, ARGS...>(op_type, &info);
OpInfoMap<Dtype>::Instance().Insert(op_type, info); OpInfoMap<Dtype>::Instance()->Insert(op_type, info);
} }
}; };
...@@ -95,10 +95,10 @@ class OpRegistry { ...@@ -95,10 +95,10 @@ class OpRegistry {
LOG(paddle_mobile::kLOG_DEBUG1) << " output size: " << outputs.size(); LOG(paddle_mobile::kLOG_DEBUG1) << " output size: " << outputs.size();
LOG(paddle_mobile::kLOG_DEBUG1) << " attr size: " << attrs.size(); LOG(paddle_mobile::kLOG_DEBUG1) << " attr size: " << attrs.size();
LOG(paddle_mobile::kLOG_DEBUG1) LOG(paddle_mobile::kLOG_DEBUG1)
<< " OpInfoMap size: " << OpInfoMap<Dtype>::Instance().map().size(); << " OpInfoMap size: " << OpInfoMap<Dtype>::Instance()->map().size();
LOG(paddle_mobile::kLOG_DEBUG1) << " has type: " << type << " " LOG(paddle_mobile::kLOG_DEBUG1) << " has type: " << type << " "
<< OpInfoMap<Dtype>::Instance().Has(type); << OpInfoMap<Dtype>::Instance()->Has(type);
auto& info = OpInfoMap<Dtype>::Instance().Get(type); auto& info = OpInfoMap<Dtype>::Instance()->Get(type);
auto op = info.Creator()(type, inputs, outputs, attrs, scope); auto op = info.Creator()(type, inputs, outputs, attrs, scope);
return std::shared_ptr<OperatorBase<Dtype>>(op); return std::shared_ptr<OperatorBase<Dtype>>(op);
} }
......
...@@ -132,13 +132,6 @@ class Tensor { ...@@ -132,13 +132,6 @@ class Tensor {
reinterpret_cast<uintptr_t>(holder_->ptr()) + offset_); reinterpret_cast<uintptr_t>(holder_->ptr()) + offset_);
} }
inline void *mutable_data() {
// PADDLE_ENFORCE(this->holder_ != nullptr,
// "Cannot invoke mutable data if current hold
// nothing.");
return mutable_data(holder_->type());
}
/** /**
* @brief Return a pointer to mutable memory block. * @brief Return a pointer to mutable memory block.
* *
......
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "../sigmoid_kernel.h"
#if __ARM_NEON
#include "../../math/math_func_neon.h"
#endif
namespace paddle_mobile {
namespace operators {
using framework::DDim;
using framework::Tensor;
void sigmoid(const Tensor *X, Tensor *Y) {
#if __ARM_NEON
DLOG << "step1";
const float *input = X->data<float>();
DLOG << "step11";
float *output = Y->mutable_data<float>();
DLOG << "step2";
const DDim &dDim = X->dims();
DLOG << "step3";
int axis_index = 1;
if (dDim.size() < 4) {
axis_index = 0;
}
DLOG << "step4";
DDim outer_ddim =
paddle_mobile::framework::slice_ddim(dDim, 0, axis_index + 1);
DDim inner_ddim =
paddle_mobile::framework::slice_ddim(dDim, axis_index + 1, dDim.size());
DLOG << "step5";
int out_size = paddle_mobile::framework::product(outer_ddim);
int inner_size = paddle_mobile::framework::product(inner_ddim);
DLOG << "step6";
#pragma omp parallel for
DLOG << "outsize=" << out_size;
DLOG << "innersize=" << inner_size;
for (int i = 0; i < out_size; ++i) {
const float *input_outer_ptr = input + i * inner_size;
float *output_outer_ptr = output + i * inner_size;
int nn = inner_size >> 2;
int remain = inner_size - (nn << 2);
float32x4_t _one = vdupq_n_f32(1.f);
for (; nn > 0; nn--) {
float32x4_t data = vld1q_f32(input_outer_ptr);
data = vnegq_f32(data);
data = exp_ps(data);
data = vaddq_f32(data, _one);
float32x4_t out_data = vrecpeq_f32(data);
out_data = vmulq_f32(vrecpsq_f32(data, out_data), out_data);
vst1q_f32(output_outer_ptr, out_data);
input_outer_ptr += 4;
output_outer_ptr += 4;
}
for (; remain > 0; remain--) {
*output_outer_ptr = 1.f / (1.f + exp(-*input_outer_ptr));
output_outer_ptr++;
input_outer_ptr++;
}
}
#endif
}
template <>
void SigmoidKernel<CPU, float>::Compute(const SigmoidParam &param) const {
const Tensor *in_x = param.InputX();
Tensor *out = param.Out();
auto x_dims = in_x->dims();
out->Resize(x_dims);
sigmoid(in_x, out);
}
template class SigmoidKernel<CPU, float>;
} // namespace operators
} // namespace paddle_mobile
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "framework/operator.h"
#include "operators/op_param.h"
namespace paddle_mobile {
namespace operators {
using framework::OpKernelBase;
void sigmoid(const Tensor* X, Tensor* Y);
template <typename DeviceType, typename T>
class SigmoidKernel : public OpKernelBase<DeviceType, SigmoidParam> {
public:
void Compute(const SigmoidParam& param) const override;
};
} // namespace operators
} // namespace paddle_mobile
...@@ -21,6 +21,8 @@ namespace paddle_mobile { ...@@ -21,6 +21,8 @@ namespace paddle_mobile {
namespace operators { namespace operators {
using framework::OpKernelBase; using framework::OpKernelBase;
void simoid(Tensor *X, Tensor *Y);
template <typename DeviceType, typename T> template <typename DeviceType, typename T>
class SoftmaxKernel : public OpKernelBase<DeviceType, SoftmaxParam> { class SoftmaxKernel : public OpKernelBase<DeviceType, SoftmaxParam> {
public: public:
......
...@@ -11,11 +11,11 @@ distributed under the License is distributed on an "AS IS" BASIS, ...@@ -11,11 +11,11 @@ distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "operators/math/softmax.h" #include "operators/math/softmax.h"
#include "common/types.h" #include "common/types.h"
#if __ARM_NEON #if __ARM_NEON
#include <math.h> #include <math.h>
#include <algorithm>
#include "operators/math/math_func_neon.h" #include "operators/math/math_func_neon.h"
#endif #endif
...@@ -108,7 +108,7 @@ class SoftmaxFuntor<CPU, T> { ...@@ -108,7 +108,7 @@ class SoftmaxFuntor<CPU, T> {
// sum exp // sum exp
sum(exp_sub_max, sumptr, inner_size, out_size); sum(exp_sub_max, sumptr, inner_size, out_size);
// div // div
auto *out_ptr = static_cast<float *>(Y->mutable_data()); auto *out_ptr = Y->mutable_data<float>();
for (int l = 0; l < out_size; ++l) { for (int l = 0; l < out_size; ++l) {
const float *input_outer_ptr = exp_sub_max + l * inner_size; const float *input_outer_ptr = exp_sub_max + l * inner_size;
float *output_outer_ptr = out_ptr + l * inner_size; float *output_outer_ptr = out_ptr + l * inner_size;
......
...@@ -542,6 +542,22 @@ class SoftmaxParam : public OpParam { ...@@ -542,6 +542,22 @@ class SoftmaxParam : public OpParam {
Tensor *input_x_; Tensor *input_x_;
Tensor *out_; Tensor *out_;
}; };
class SigmoidParam : public OpParam {
public:
SigmoidParam(const VariableNameMap &inputs, const VariableNameMap &outputs,
const framework::AttributeMap &attrs,
const framework::Scope &scope) {
input_x_ = InputXFrom<framework::Tensor>(inputs, scope);
out_ = OutFrom<framework::Tensor>(outputs, scope);
}
const Tensor *InputX() const { return input_x_; }
Tensor *Out() const { return out_; }
private:
Tensor *input_x_;
Tensor *out_;
};
class MultiClassNMSParam : public OpParam { class MultiClassNMSParam : public OpParam {
public: public:
MultiClassNMSParam(const VariableNameMap &inputs, MultiClassNMSParam(const VariableNameMap &inputs,
......
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "operators/sigmoid_op.h"
namespace paddle_mobile {
namespace operators {
template <typename DeviceType, typename T>
void SigmoidOp<DeviceType, T>::InferShape() const {
param_.Out()->Resize(param_.InputX()->dims());
}
template class SigmoidOp<CPU, float>;
} // namespace operators
} // namespace paddle_mobile
namespace ops = paddle_mobile::operators;
USE_OP(sigmoid);
REGISTER_OPERATOR(sigmoid, ops::SigmoidOp);
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <framework/operator.h>
#include <operators/op_param.h>
#include <string>
#include "operators/kernel/sigmoid_kernel.h"
namespace paddle_mobile {
namespace operators {
template <typename DeviceType, typename T>
class SigmoidOp : public framework::OperatorWithKernel<DeviceType> {
public:
SigmoidOp(const std::string &type, const VariableNameMap &inputs,
const VariableNameMap &outputs,
const framework::AttributeMap &attrs,
std::shared_ptr<framework::Scope> scope)
: framework::OperatorWithKernel<DeviceType>(type, inputs, outputs, attrs,
scope),
param_(inputs, outputs, attrs, *scope) {}
using framework::OperatorWithKernel<DeviceType>::OperatorWithKernel;
void InferShape() const override;
void Run() const {
operators::SigmoidKernel<DeviceType, T> kernel;
kernel.Compute(param_);
this->ClearVariables({"X"});
}
private:
SigmoidParam param_;
};
} // namespace operators
} // namespace paddle_mobile
...@@ -82,3 +82,7 @@ target_link_libraries(test-enforce paddle-mobile) ...@@ -82,3 +82,7 @@ target_link_libraries(test-enforce paddle-mobile)
# gen test # gen test
ADD_EXECUTABLE(test-googlenet net/test_googlenet.cpp test_helper.h test_include.h executor_for_test.h) ADD_EXECUTABLE(test-googlenet net/test_googlenet.cpp test_helper.h test_include.h executor_for_test.h)
target_link_libraries(test-googlenet paddle-mobile) target_link_libraries(test-googlenet paddle-mobile)
# gen test
ADD_EXECUTABLE(test-sigmoid operators/test_sigmoid_op.cpp test_include.h)
target_link_libraries(test-sigmoid paddle-mobile)
...@@ -17,12 +17,14 @@ limitations under the License. */ ...@@ -17,12 +17,14 @@ limitations under the License. */
#include <string> #include <string>
#include <vector> #include <vector>
#include "./io.h"
#include "common/log.h" #include "common/log.h"
#include "io.h" #include "framework/op_registry.h"
#include "operators/conv_op.h" #include "operators/conv_op.h"
#include "operators/pool_op.h" #include "operators/pool_op.h"
#include "operators/relu_op.h" #include "operators/relu_op.h"
#include "operators/reshape_op.h" #include "operators/reshape_op.h"
#include "operators/sigmoid_op.h"
#include "operators/softmax_op.h" #include "operators/softmax_op.h"
#include "operators/transpose_op.h" #include "operators/transpose_op.h"
...@@ -58,9 +60,12 @@ class Executor4Test : public Executor<DeviceType> { ...@@ -58,9 +60,12 @@ class Executor4Test : public Executor<DeviceType> {
for (std::shared_ptr<OpDesc> op : ops) { for (std::shared_ptr<OpDesc> op : ops) {
if (op->Type() == op_type) { if (op->Type() == op_type) {
/// test first meeting op in program /// test first meeting op in program
std::shared_ptr<OpType> op_ptr = std::make_shared<OpType>( std::shared_ptr<paddle_mobile::framework::OperatorBase<DeviceType>>
op->Type(), op->GetInputs(), op->GetOutputs(), op->GetAttrMap(), op_ptr = paddle_mobile::framework::OpRegistry<
this->program_.scope); paddle_mobile::CPU>::CreateOp(op->Type(), op->GetInputs(),
op->GetOutputs(),
op->GetAttrMap(),
this->program_.scope);
this->ops_of_block_[*block_desc.get()].push_back(op_ptr); this->ops_of_block_[*block_desc.get()].push_back(op_ptr);
break; break;
} }
......
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "../../src/operators/kernel/sigmoid_kernel.h"
#include "../test_helper.h"
#include "./io.h"
int main() {
paddle_mobile::framework::Tensor input;
paddle_mobile::framework::Tensor output;
DLOG << 1;
SetupTensor<float>(&input, {1, 4, 60, 60}, static_cast<float>(0),
static_cast<float>(1));
DLOG << 2;
auto out_ddim = paddle_mobile::framework::make_ddim({1, 4, 60, 60});
output.Resize(out_ddim);
DLOG << 3;
paddle_mobile::operators::sigmoid(&input, &output);
DLOG << 4;
auto *output_ptr = output.data<float>();
for (int j = 0; j < output.numel(); ++j) {
DLOG << " value of output: " << output_ptr[j];
}
DLOG << 5;
return 0;
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册