提交 8735c538 编写于 作者: I itminner

fix code style; reset build.sh change

上级 0d3490e4
...@@ -18,23 +18,21 @@ limitations under the License. */ ...@@ -18,23 +18,21 @@ limitations under the License. */
#include <operators/math/transform.h> #include <operators/math/transform.h>
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {
template <typename T> template <typename T>
struct PReluFunctor { struct PReluFunctor {
explicit PReluFunctor(float slope) { this->slope_ = slope; } explicit PReluFunctor(float slope) { this->slope_ = slope; }
inline T operator()(T in) const { inline T operator()(T in) const { return in > 0 ? in : in * slope_; }
return in > 0 ? in : in * slope_;
}
float slope_ = 0.0f; float slope_ = 0.0f;
}; };
/* /*
* @b 特化到具体平台的实现, param 从 op 层传入 * @b 特化到具体平台的实现, param 从 op 层传入
* */ * */
template <> template <>
void PReluKernel<CPU, float>::Compute(const PReluParam &param) const { void PReluKernel<CPU, float>::Compute(const PReluParam &param) const {
const auto *input_x = param.InputX(); const auto *input_x = param.InputX();
auto *input_x_ptr = input_x->data<float>(); auto *input_x_ptr = input_x->data<float>();
auto *out = param.Out(); auto *out = param.Out();
...@@ -46,7 +44,7 @@ namespace paddle_mobile { ...@@ -46,7 +44,7 @@ namespace paddle_mobile {
trans(input_x_ptr, input_x_ptr + input_x->numel(), out_ptr, func_); trans(input_x_ptr, input_x_ptr + input_x->numel(), out_ptr, func_);
} else if (param.Slopes().size() > 1) { } else if (param.Slopes().size() > 1) {
const int dim_size = input_x->dims().size(); const int dim_size = input_x->dims().size();
switch (dim_size){ switch (dim_size) {
case 0: case 0:
break; break;
case 1: { case 1: {
...@@ -54,44 +52,40 @@ namespace paddle_mobile { ...@@ -54,44 +52,40 @@ namespace paddle_mobile {
math::Transform trans; math::Transform trans;
#pragma omp parallel for #pragma omp parallel for
for (int w = 0; w < input_width; ++ w) { for (int w = 0; w < input_width; ++w) {
out_ptr[w] = input_x_ptr[w] * param.Slopes()[w]; out_ptr[w] = input_x_ptr[w] * param.Slopes()[w];
} }
} } break;
break;
case 2: { case 2: {
const int input_height = input_x->dims()[0]; const int input_height = input_x->dims()[0];
const int input_width = input_x->dims()[1]; const int input_width = input_x->dims()[1];
math::Transform trans; math::Transform trans;
#pragma omp parallel for #pragma omp parallel for
for (int h = 0; h < input_height; ++ h) { for (int h = 0; h < input_height; ++h) {
PReluFunctor<float> func_(param.Slopes()[h]); PReluFunctor<float> func_(param.Slopes()[h]);
const float *ptr = input_x_ptr + h * input_width; const float *ptr = input_x_ptr + h * input_width;
float *optr = out_ptr + + h * input_width; float *optr = out_ptr + +h * input_width;
trans(ptr, ptr + input_width, optr, func_); trans(ptr, ptr + input_width, optr, func_);
} }
} } break;
break; case 3: {
case 3:
{
const int chan_size = input_x->dims()[0]; const int chan_size = input_x->dims()[0];
const int input_height = input_x->dims()[1]; const int input_height = input_x->dims()[1];
const int input_width = input_x->dims()[2]; const int input_width = input_x->dims()[2];
math::Transform trans; math::Transform trans;
#pragma omp parallel for #pragma omp parallel for
for (int c = 0; c < chan_size; ++ c) { for (int c = 0; c < chan_size; ++c) {
PReluFunctor<float> func_(param.Slopes()[c]); PReluFunctor<float> func_(param.Slopes()[c]);
int size = input_height * input_width; int size = input_height * input_width;
const float *ptr = input_x_ptr + c * size; const float *ptr = input_x_ptr + c * size;
float *optr = out_ptr + c * size; float *optr = out_ptr + c * size;
trans(ptr, ptr + size, optr, func_); trans(ptr, ptr + size, optr, func_);
} }
} } break;
break;
case 4: case 4:
default:{ default: {
const int batch_size = input_x->dims()[0]; const int batch_size = input_x->dims()[0];
const int chan_size = input_x->dims()[1]; const int chan_size = input_x->dims()[1];
const int input_height = input_x->dims()[2]; const int input_height = input_x->dims()[2];
...@@ -99,21 +93,21 @@ namespace paddle_mobile { ...@@ -99,21 +93,21 @@ namespace paddle_mobile {
math::Transform trans; math::Transform trans;
#pragma omp parallel for #pragma omp parallel for
for (int b = 0; b < batch_size; ++ b) { for (int b = 0; b < batch_size; ++b) {
for (int c = 0; c < chan_size; ++ c) { for (int c = 0; c < chan_size; ++c) {
PReluFunctor<float> func_(param.Slopes()[c]); PReluFunctor<float> func_(param.Slopes()[c]);
int size = input_height * input_width; int size = input_height * input_width;
const float *ptr = input_x_ptr + b * c * size; const float *ptr = input_x_ptr + b * c * size;
float *optr = out_ptr + + b * c * size; float *optr = out_ptr + +b * c * size;
trans(ptr, ptr + size, optr, func_); trans(ptr, ptr + size, optr, func_);
} }
} }
}// case 3,default } // case 3,default
break; break;
} }
} }
} }
} // namespace operators } // namespace operators
} // namespace paddle_mobile } // namespace paddle_mobile
#endif #endif
...@@ -14,21 +14,20 @@ limitations under the License. */ ...@@ -14,21 +14,20 @@ limitations under the License. */
#ifdef RESIZE_OP #ifdef RESIZE_OP
#include <cmath>
#include "operators/kernel/resize_kernel.h" #include "operators/kernel/resize_kernel.h"
#include <cmath>
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {
void BiLinearResizeTensor(const float* src, const int src_height, const int src_width, void BiLinearResizeTensor(const float* src, const int src_height,
float* dst, const int dst_height, const int dst_width) const int src_width, float* dst, const int dst_height,
{ const int dst_width) {
const float scale_w = src_width / (float)dst_width; const float scale_w = src_width / (float)dst_width;
const float scale_h = src_height / (float)dst_height; const float scale_h = src_height / (float)dst_height;
float* dst_data = dst; float* dst_data = dst;
const float* src_data = src; const float* src_data = src;
for(int dst_h = 0; dst_h < dst_height; ++dst_h) { for (int dst_h = 0; dst_h < dst_height; ++dst_h) {
float fh = dst_h * scale_h; float fh = dst_h * scale_h;
int src_h = std::floor(fh); int src_h = std::floor(fh);
...@@ -42,7 +41,7 @@ namespace paddle_mobile { ...@@ -42,7 +41,7 @@ namespace paddle_mobile {
float* dst_data_ptr = dst_data + dst_offset_1; float* dst_data_ptr = dst_data + dst_offset_1;
for(int dst_w = 0 ; dst_w < dst_width; ++dst_w) { for (int dst_w = 0; dst_w < dst_width; ++dst_w) {
float fw = dst_w * scale_w; float fw = dst_w * scale_w;
int src_w = std::floor(fw); int src_w = std::floor(fw);
fw -= src_w; fw -= src_w;
...@@ -54,25 +53,25 @@ namespace paddle_mobile { ...@@ -54,25 +53,25 @@ namespace paddle_mobile {
const int src_idx = src_offset_1 + src_w; const int src_idx = src_offset_1 + src_w;
dst_value += (w_h0 * w_w0 * src_data[src_idx]); dst_value += (w_h0 * w_w0 * src_data[src_idx]);
int flag = 0; int flag = 0;
if (src_w + 1 < src_width){ if (src_w + 1 < src_width) {
dst_value += (w_h0 * w_w1 * src_data[src_idx + 1]); dst_value += (w_h0 * w_w1 * src_data[src_idx + 1]);
++flag; ++flag;
} }
if (src_h + 1 < src_height){ if (src_h + 1 < src_height) {
dst_value += (w_h1 * w_w0 * src_data[src_idx + src_width]); dst_value += (w_h1 * w_w0 * src_data[src_idx + src_width]);
++flag; ++flag;
} }
if (flag>1){ if (flag > 1) {
dst_value += (w_h1 * w_w1 * src_data[src_idx + src_width + 1]); dst_value += (w_h1 * w_w1 * src_data[src_idx + src_width + 1]);
// ++flag; // ++flag;
} }
*(dst_data_ptr++) = dst_value; *(dst_data_ptr++) = dst_value;
} }
} }
} }
void ResizeTensor(const Tensor* src, const int src_n, const int src_c, void ResizeTensor(const Tensor* src, const int src_n, const int src_c,
Tensor* dst, const int dst_n, const int dst_c) { Tensor* dst, const int dst_n, const int dst_c) {
framework::DDim in_dims = src->dims(); framework::DDim in_dims = src->dims();
const int src_chans = in_dims[1]; const int src_chans = in_dims[1];
...@@ -86,42 +85,40 @@ namespace paddle_mobile { ...@@ -86,42 +85,40 @@ namespace paddle_mobile {
const int dst_width = out_dims[3]; const int dst_width = out_dims[3];
const int dst_offset = (dst_n * dst_chans + dst_c) * dst_height * dst_width; const int dst_offset = (dst_n * dst_chans + dst_c) * dst_height * dst_width;
const auto *src_ptr = src->data<float>(); const auto* src_ptr = src->data<float>();
auto *dst_ptr = dst->data<float>(); auto* dst_ptr = dst->data<float>();
const auto* src_data = &(src_ptr[src_offset]); const auto* src_data = &(src_ptr[src_offset]);
auto* dst_data = &(dst_ptr[dst_offset]); auto* dst_data = &(dst_ptr[dst_offset]);
BiLinearResizeTensor(src_data, src_height, src_width, BiLinearResizeTensor(src_data, src_height, src_width, dst_data, dst_height,
dst_data, dst_height, dst_width); dst_width);
} }
void ResizeTensor(const Tensor* src, Tensor* dst) { void ResizeTensor(const Tensor* src, Tensor* dst) {
framework::DDim in_dims = src->dims(); framework::DDim in_dims = src->dims();
framework::DDim out_dims = dst->dims(); framework::DDim out_dims = dst->dims();
PADDLE_MOBILE_ENFORCE(in_dims[0] == out_dims[0], PADDLE_MOBILE_ENFORCE(in_dims[0] == out_dims[0],
"src tensor batch num not equal to dst tensor"); "src tensor batch num not equal to dst tensor");
PADDLE_MOBILE_ENFORCE(in_dims[1] == out_dims[1], PADDLE_MOBILE_ENFORCE(in_dims[1] == out_dims[1],
"src tensor channel num not equal to dst tensor"); "src tensor channel num not equal to dst tensor");
for (int n = 0, batch_num = in_dims[0]; n < batch_num; ++n) for (int n = 0, batch_num = in_dims[0]; n < batch_num; ++n) {
{ for (int c = 0, chan_num = in_dims[1]; c < chan_num; ++c) {
for (int c = 0, chan_num = in_dims[1]; c < chan_num; ++c)
{
ResizeTensor(src, n, c, dst, n, c); ResizeTensor(src, n, c, dst, n, c);
} }
} }
} }
template <> template <>
void ResizeKernel<CPU, float>::Compute(const ResizeParam &param) const { void ResizeKernel<CPU, float>::Compute(const ResizeParam& param) const {
const auto *input_x = param.InputX(); const auto* input_x = param.InputX();
const auto &input_x_dims = input_x->dims(); const auto& input_x_dims = input_x->dims();
auto *out = param.Out(); auto* out = param.Out();
framework::DDim out_dims = CalOutputShape(param); framework::DDim out_dims = CalOutputShape(param);
out->Resize(out_dims); out->Resize(out_dims);
ResizeTensor(input_x, out); ResizeTensor(input_x, out);
} }
} // namespace operators } // namespace operators
} // namespace paddle_mobile } // namespace paddle_mobile
#endif #endif
...@@ -17,13 +17,13 @@ limitations under the License. */ ...@@ -17,13 +17,13 @@ limitations under the License. */
#include "operators/kernel/scale_kernel.h" #include "operators/kernel/scale_kernel.h"
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {
/* /*
* @b 特化到具体平台的实现, param 从 op 层传入 * @b 特化到具体平台的实现, param 从 op 层传入
* */ * */
template <> template <>
void ScaleKernel<CPU, float>::Compute(const ScaleParam &param) const { void ScaleKernel<CPU, float>::Compute(const ScaleParam &param) const {
const auto *input_x = param.InputX(); const auto *input_x = param.InputX();
auto *input_x_ptr = input_x->data<float>(); auto *input_x_ptr = input_x->data<float>();
auto *out = param.Out(); auto *out = param.Out();
...@@ -33,7 +33,7 @@ namespace paddle_mobile { ...@@ -33,7 +33,7 @@ namespace paddle_mobile {
bool has_bias = param.HasBias(); bool has_bias = param.HasBias();
const int dim_size = input_x->dims().size(); const int dim_size = input_x->dims().size();
switch (dim_size){ switch (dim_size) {
case 1: { case 1: {
const int input_width = input_x->dims()[0]; const int input_width = input_x->dims()[0];
if (has_bias) { if (has_bias) {
...@@ -48,8 +48,7 @@ namespace paddle_mobile { ...@@ -48,8 +48,7 @@ namespace paddle_mobile {
out_ptr[w] = input_x_ptr[w] * scales[w]; out_ptr[w] = input_x_ptr[w] * scales[w];
} }
} }
} } break;
break;
case 2: { case 2: {
const int input_height = input_x->dims()[0]; const int input_height = input_x->dims()[0];
const int input_width = input_x->dims()[1]; const int input_width = input_x->dims()[1];
...@@ -74,8 +73,7 @@ namespace paddle_mobile { ...@@ -74,8 +73,7 @@ namespace paddle_mobile {
} }
} }
} }
} } break;
break;
case 3: { case 3: {
const int chan_size = input_x->dims()[0]; const int chan_size = input_x->dims()[0];
const int input_height = input_x->dims()[1]; const int input_height = input_x->dims()[1];
...@@ -103,11 +101,9 @@ namespace paddle_mobile { ...@@ -103,11 +101,9 @@ namespace paddle_mobile {
} }
} }
} }
} } break;
break;
case 4: case 4: {
{
const int batch_size = input_x->dims()[0]; const int batch_size = input_x->dims()[0];
const int chan_size = input_x->dims()[0]; const int chan_size = input_x->dims()[0];
const int input_height = input_x->dims()[1]; const int input_height = input_x->dims()[1];
...@@ -139,13 +135,12 @@ namespace paddle_mobile { ...@@ -139,13 +135,12 @@ namespace paddle_mobile {
} }
} }
} }
} } break;
break;
default: default:
break; break;
} }
} }
} // namespace operators } // namespace operators
} // namespace paddle_mobile } // namespace paddle_mobile
#endif #endif
...@@ -17,7 +17,6 @@ limitations under the License. */ ...@@ -17,7 +17,6 @@ limitations under the License. */
#include "operators/kernel/slice_kernel.h" #include "operators/kernel/slice_kernel.h"
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {}
} } // namespace paddle_mobile
}
#endif #endif
...@@ -18,12 +18,12 @@ limitations under the License. */ ...@@ -18,12 +18,12 @@ limitations under the License. */
#pragma once; #pragma once;
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {
template <typename DeviceType, typename T> template <typename DeviceType, typename T>
class PReluKernel : public framework::OpKernelBase<DeviceType, PReluParam> { class PReluKernel : public framework::OpKernelBase<DeviceType, PReluParam> {
public: public:
void Compute(const PReluParam& param) const; void Compute(const PReluParam& param) const;
}; };
} // namespace operators } // namespace operators
} // namespace paddle_mobile } // namespace paddle_mobile
...@@ -22,9 +22,9 @@ limitations under the License. */ ...@@ -22,9 +22,9 @@ limitations under the License. */
#include "operators/op_param.h" #include "operators/op_param.h"
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {
inline framework::DDim CalOutputShape(const ResizeParam& param) { inline framework::DDim CalOutputShape(const ResizeParam &param) {
const auto *input_x = param.InputX(); const auto *input_x = param.InputX();
const auto &input_x_dims = input_x->dims(); const auto &input_x_dims = input_x->dims();
auto *out = param.Out(); auto *out = param.Out();
...@@ -34,7 +34,8 @@ namespace paddle_mobile { ...@@ -34,7 +34,8 @@ namespace paddle_mobile {
if (input_shape) { if (input_shape) {
auto *shape_data = input_shape->data<int>(); auto *shape_data = input_shape->data<int>();
framework::Tensor cpu_shape_tensor; framework::Tensor cpu_shape_tensor;
auto shape = std::vector<int>(shape_data, shape_data + input_shape->numel()); auto shape =
std::vector<int>(shape_data, shape_data + input_shape->numel());
const int in_batch_size = input_x->dims()[0]; const int in_batch_size = input_x->dims()[0];
const int in_chan_size = input_x->dims()[1]; const int in_chan_size = input_x->dims()[1];
const int in_height = input_x->dims()[2]; const int in_height = input_x->dims()[2];
...@@ -43,13 +44,11 @@ namespace paddle_mobile { ...@@ -43,13 +44,11 @@ namespace paddle_mobile {
int out_height = 0; int out_height = 0;
int out_width = 0; int out_width = 0;
bool is_pyramid_test = param.IsPyramidTest(); bool is_pyramid_test = param.IsPyramidTest();
if(is_pyramid_test == false) { if (is_pyramid_test == false) {
out_height = param.Height(); out_height = param.Height();
out_width = param.Width(); out_width = param.Width();
PADDLE_MOBILE_ENFORCE(out_height > 0, PADDLE_MOBILE_ENFORCE(out_height > 0, "output height is required");
"output height is required"); PADDLE_MOBILE_ENFORCE(out_width > 0, "output width is required");
PADDLE_MOBILE_ENFORCE(out_width > 0,
"output width is required");
} else { } else {
float out_height_scale = param.OutHeightScale(); float out_height_scale = param.OutHeightScale();
...@@ -59,23 +58,22 @@ namespace paddle_mobile { ...@@ -59,23 +58,22 @@ namespace paddle_mobile {
PADDLE_MOBILE_ENFORCE(out_width_scale > 0, PADDLE_MOBILE_ENFORCE(out_width_scale > 0,
"output width scale is required"); "output width scale is required");
out_height = int (out_height_scale * in_height); out_height = int(out_height_scale * in_height);
out_width = int (out_width_scale * in_width); out_width = int(out_width_scale * in_width);
} }
out_dims = framework::make_ddim( out_dims = framework::make_ddim(
{ in_batch_size, in_chan_size, in_height, in_width } {in_batch_size, in_chan_size, in_height, in_width});
);
} }
return out_dims; return out_dims;
} }
template <typename DeviceType, typename T> template <typename DeviceType, typename T>
class ResizeKernel : public framework::OpKernelBase<DeviceType, ResizeParam> { class ResizeKernel : public framework::OpKernelBase<DeviceType, ResizeParam> {
public: public:
void Compute(const ResizeParam& param) const; void Compute(const ResizeParam &param) const;
}; };
} // namespace operators } // namespace operators
} // namespace paddle_mobile } // namespace paddle_mobile
#endif #endif
...@@ -18,12 +18,12 @@ limitations under the License. */ ...@@ -18,12 +18,12 @@ limitations under the License. */
#pragma once; #pragma once;
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {
template <typename DeviceType, typename T> template <typename DeviceType, typename T>
class ScaleKernel : public framework::OpKernelBase<DeviceType, ScaleParam> { class ScaleKernel : public framework::OpKernelBase<DeviceType, ScaleParam> {
public: public:
void Compute(const ScaleParam& param) const; void Compute(const ScaleParam& param) const;
}; };
} // namespace operators } // namespace operators
} // namespace paddle_mobile } // namespace paddle_mobile
...@@ -18,12 +18,12 @@ limitations under the License. */ ...@@ -18,12 +18,12 @@ limitations under the License. */
#pragma once; #pragma once;
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {
template <typename DeviceType, typename T> template <typename DeviceType, typename T>
class SliceKernel : public framework::OpKernelBase<DeviceType, SliceParam> { class SliceKernel : public framework::OpKernelBase<DeviceType, SliceParam> {
public: public:
void Compute(const SliceParam& param) const{} void Compute(const SliceParam& param) const {}
}; };
} // namespace operators } // namespace operators
} // namespace paddle_mobile } // namespace paddle_mobile
...@@ -730,7 +730,7 @@ class ReshapeParam : public OpParam { ...@@ -730,7 +730,7 @@ class ReshapeParam : public OpParam {
#endif #endif
#ifdef SCALE_OP #ifdef SCALE_OP
class ScaleParam : public OpParam { class ScaleParam : public OpParam {
public: public:
ScaleParam(const VariableNameMap &inputs, const VariableNameMap &outputs, ScaleParam(const VariableNameMap &inputs, const VariableNameMap &outputs,
const AttributeMap &attrs, const Scope &scope) { const AttributeMap &attrs, const Scope &scope) {
...@@ -765,11 +765,11 @@ class ReshapeParam : public OpParam { ...@@ -765,11 +765,11 @@ class ReshapeParam : public OpParam {
bool has_bias_; bool has_bias_;
vector<float> scales_; vector<float> scales_;
vector<float> biases_; vector<float> biases_;
}; };
#endif #endif
#ifdef SLICE_OP #ifdef SLICE_OP
class SliceParam : public OpParam { class SliceParam : public OpParam {
public: public:
SliceParam(const VariableNameMap &inputs, const VariableNameMap &outputs, SliceParam(const VariableNameMap &inputs, const VariableNameMap &outputs,
const AttributeMap &attrs, const Scope &scope) { const AttributeMap &attrs, const Scope &scope) {
...@@ -800,12 +800,12 @@ class ReshapeParam : public OpParam { ...@@ -800,12 +800,12 @@ class ReshapeParam : public OpParam {
int axis_; int axis_;
vector<int> slice_points_; vector<int> slice_points_;
bool inplace_; bool inplace_;
}; };
#endif #endif
#ifdef RESIZE_OP #ifdef RESIZE_OP
class ResizeParam : public OpParam { class ResizeParam : public OpParam {
public: public:
ResizeParam(const VariableNameMap &inputs, const VariableNameMap &outputs, ResizeParam(const VariableNameMap &inputs, const VariableNameMap &outputs,
const AttributeMap &attrs, const Scope &scope) { const AttributeMap &attrs, const Scope &scope) {
input_x_ = InputXFrom<LoDTensor>(inputs, scope); input_x_ = InputXFrom<LoDTensor>(inputs, scope);
...@@ -834,7 +834,7 @@ public: ...@@ -834,7 +834,7 @@ public:
const float &OutWidthScale() const { return out_width_scale_; } const float &OutWidthScale() const { return out_width_scale_; }
private: private:
Tensor *input_x_; Tensor *input_x_;
Tensor *input_shape_; Tensor *input_shape_;
Tensor *out_; Tensor *out_;
...@@ -846,7 +846,6 @@ private: ...@@ -846,7 +846,6 @@ private:
}; };
#endif #endif
#ifdef RELU_OP #ifdef RELU_OP
/* /*
* @b op 层实例化好这个 param 传递给 kernel 层使用 * @b op 层实例化好这个 param 传递给 kernel 层使用
...@@ -871,7 +870,7 @@ class ReluParam : public OpParam { ...@@ -871,7 +870,7 @@ class ReluParam : public OpParam {
#ifdef PRELU_OP #ifdef PRELU_OP
class PReluParam : public OpParam { class PReluParam : public OpParam {
public: public:
PReluParam(const VariableNameMap &inputs, const VariableNameMap &outputs, PReluParam(const VariableNameMap &inputs, const VariableNameMap &outputs,
const AttributeMap &attrs, const Scope &scope) { const AttributeMap &attrs, const Scope &scope) {
input_x_ = InputXFrom<LoDTensor>(inputs, scope); input_x_ = InputXFrom<LoDTensor>(inputs, scope);
...@@ -883,7 +882,7 @@ public: ...@@ -883,7 +882,7 @@ public:
Tensor *Out() const { return out_; } Tensor *Out() const { return out_; }
const vector<float> &Slopes() const { return slopes_; } const vector<float> &Slopes() const { return slopes_; }
private: private:
Tensor *input_x_; Tensor *input_x_;
Tensor *out_; Tensor *out_;
vector<float> slopes_; vector<float> slopes_;
......
...@@ -16,15 +16,15 @@ limitations under the License. */ ...@@ -16,15 +16,15 @@ limitations under the License. */
#include "operators/prelu_op.h" #include "operators/prelu_op.h"
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {
template <typename Dtype, typename T> template <typename Dtype, typename T>
void PReluOp<Dtype, T>::InferShape() const { void PReluOp<Dtype, T>::InferShape() const {
auto input_dims = this->param_.InputX()->dims(); auto input_dims = this->param_.InputX()->dims();
this->param_.Out()->Resize(input_dims); this->param_.Out()->Resize(input_dims);
} }
template class PReluOp<CPU, float>; template class PReluOp<CPU, float>;
} // namespace operators } // namespace operators
} // namespace paddle_mobile } // namespace paddle_mobile
/* /*
......
...@@ -23,18 +23,17 @@ limitations under the License. */ ...@@ -23,18 +23,17 @@ limitations under the License. */
#include "operators/op_param.h" #include "operators/op_param.h"
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {
using paddle_mobile::framework::Tensor; using paddle_mobile::framework::Tensor;
template <typename DeviceType, typename T> template <typename DeviceType, typename T>
class PReluOp class PReluOp
: public framework::OperatorWithKernel< : public framework::OperatorWithKernel<
DeviceType, PReluParam, operators::PReluKernel<DeviceType, T>> { DeviceType, PReluParam, operators::PReluKernel<DeviceType, T>> {
public: public:
PReluOp(const std::string &type, const VariableNameMap &inputs, PReluOp(const std::string &type, const VariableNameMap &inputs,
const VariableNameMap &outputs, const VariableNameMap &outputs, const framework::AttributeMap &attrs,
const framework::AttributeMap &attrs,
std::shared_ptr<framework::Scope> scope) std::shared_ptr<framework::Scope> scope)
: framework::OperatorWithKernel<DeviceType, PReluParam, : framework::OperatorWithKernel<DeviceType, PReluParam,
operators::PReluKernel<DeviceType, T>>( operators::PReluKernel<DeviceType, T>>(
...@@ -46,9 +45,9 @@ namespace paddle_mobile { ...@@ -46,9 +45,9 @@ namespace paddle_mobile {
void InferShape() const override; void InferShape() const override;
protected: protected:
}; };
} // namespace operators } // namespace operators
} // namespace paddle_mobile } // namespace paddle_mobile
#endif #endif
...@@ -17,15 +17,15 @@ limitations under the License. */ ...@@ -17,15 +17,15 @@ limitations under the License. */
#include "operators/resize_op.h" #include "operators/resize_op.h"
#include <vector> #include <vector>
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {
template <typename Dtype, typename T> template <typename Dtype, typename T>
void ResizeOp<Dtype, T>::InferShape() const { void ResizeOp<Dtype, T>::InferShape() const {
auto out_dims = CalOutputShape(this->param_); auto out_dims = CalOutputShape(this->param_);
this->param_.Out()->Resize(out_dims); this->param_.Out()->Resize(out_dims);
} }
template class ResizeOp<CPU, float>; template class ResizeOp<CPU, float>;
} // namespace operators } // namespace operators
} // namespace paddle_mobile } // namespace paddle_mobile
namespace ops = paddle_mobile::operators; namespace ops = paddle_mobile::operators;
......
...@@ -23,12 +23,12 @@ limitations under the License. */ ...@@ -23,12 +23,12 @@ limitations under the License. */
#include "operators/op_param.h" #include "operators/op_param.h"
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {
using paddle_mobile::framework::Tensor; using paddle_mobile::framework::Tensor;
template <typename DeviceType, typename T> template <typename DeviceType, typename T>
class ResizeOp class ResizeOp
: public framework::OperatorWithKernel< : public framework::OperatorWithKernel<
DeviceType, ResizeParam, operators::ResizeKernel<DeviceType, T>> { DeviceType, ResizeParam, operators::ResizeKernel<DeviceType, T>> {
public: public:
...@@ -36,8 +36,8 @@ namespace paddle_mobile { ...@@ -36,8 +36,8 @@ namespace paddle_mobile {
const VariableNameMap &outputs, const framework::AttributeMap attrs, const VariableNameMap &outputs, const framework::AttributeMap attrs,
std::shared_ptr<framework::Scope> scope) std::shared_ptr<framework::Scope> scope)
: framework::OperatorWithKernel<DeviceType, ResizeParam, : framework::OperatorWithKernel<DeviceType, ResizeParam,
operators::ResizeKernel<DeviceType, T>> operators::ResizeKernel<DeviceType, T>>(
(type, inputs, outputs, attrs, scope){} type, inputs, outputs, attrs, scope) {}
using framework::OperatorWithKernel< using framework::OperatorWithKernel<
DeviceType, ResizeParam, DeviceType, ResizeParam,
...@@ -45,8 +45,8 @@ namespace paddle_mobile { ...@@ -45,8 +45,8 @@ namespace paddle_mobile {
void InferShape() const override; void InferShape() const override;
protected: protected:
}; };
} // namespace operators } // namespace operators
} // namespace paddle_mobile } // namespace paddle_mobile
#endif #endif
...@@ -17,15 +17,15 @@ limitations under the License. */ ...@@ -17,15 +17,15 @@ limitations under the License. */
#include "operators/scale_op.h" #include "operators/scale_op.h"
#include <vector> #include <vector>
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {
template <typename Dtype, typename T> template <typename Dtype, typename T>
void ScaleOp<Dtype, T>::InferShape() const { void ScaleOp<Dtype, T>::InferShape() const {
auto input_dims = this->param_.InputX()->dims(); auto input_dims = this->param_.InputX()->dims();
this->param_.Out()->Resize(input_dims); this->param_.Out()->Resize(input_dims);
} }
template class ScaleOp<CPU, float>; template class ScaleOp<CPU, float>;
} // namespace operators } // namespace operators
} // namespace paddle_mobile } // namespace paddle_mobile
namespace ops = paddle_mobile::operators; namespace ops = paddle_mobile::operators;
......
...@@ -23,18 +23,17 @@ limitations under the License. */ ...@@ -23,18 +23,17 @@ limitations under the License. */
#include "operators/op_param.h" #include "operators/op_param.h"
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {
using paddle_mobile::framework::Tensor; using paddle_mobile::framework::Tensor;
template <typename DeviceType, typename T> template <typename DeviceType, typename T>
class ScaleOp class ScaleOp
: public framework::OperatorWithKernel< : public framework::OperatorWithKernel<
DeviceType, ScaleParam, operators::ScaleKernel<DeviceType, T>> { DeviceType, ScaleParam, operators::ScaleKernel<DeviceType, T>> {
public: public:
ScaleOp(const std::string &type, const VariableNameMap &inputs, ScaleOp(const std::string &type, const VariableNameMap &inputs,
const VariableNameMap &outputs, const VariableNameMap &outputs, const framework::AttributeMap &attrs,
const framework::AttributeMap &attrs,
std::shared_ptr<framework::Scope> scope) std::shared_ptr<framework::Scope> scope)
: framework::OperatorWithKernel<DeviceType, ScaleParam, : framework::OperatorWithKernel<DeviceType, ScaleParam,
operators::ScaleKernel<DeviceType, T>>( operators::ScaleKernel<DeviceType, T>>(
...@@ -46,9 +45,9 @@ namespace paddle_mobile { ...@@ -46,9 +45,9 @@ namespace paddle_mobile {
void InferShape() const override; void InferShape() const override;
protected: protected:
}; };
} // namespace operators } // namespace operators
} // namespace paddle_mobile } // namespace paddle_mobile
#endif #endif
...@@ -17,15 +17,14 @@ limitations under the License. */ ...@@ -17,15 +17,14 @@ limitations under the License. */
#include "operators/slice_op.h" #include "operators/slice_op.h"
#include <vector> #include <vector>
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {
template <typename Dtype, typename T> template <typename Dtype, typename T>
void SliceOp<Dtype, T>::InferShape() const { void SliceOp<Dtype, T>::InferShape() const {
/// todo: add InputShape() detection. /// todo: add InputShape() detection.
}
} template class SliceOp<CPU, float>;
template class SliceOp<CPU, float>; } // namespace operators
} // namespace operators
} // namespace paddle_mobile } // namespace paddle_mobile
namespace ops = paddle_mobile::operators; namespace ops = paddle_mobile::operators;
......
...@@ -23,18 +23,17 @@ limitations under the License. */ ...@@ -23,18 +23,17 @@ limitations under the License. */
#include "operators/op_param.h" #include "operators/op_param.h"
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {
using paddle_mobile::framework::Tensor; using paddle_mobile::framework::Tensor;
template <typename DeviceType, typename T> template <typename DeviceType, typename T>
class SliceOp class SliceOp
: public framework::OperatorWithKernel< : public framework::OperatorWithKernel<
DeviceType, SliceParam, operators::SliceKernel<DeviceType, T>> { DeviceType, SliceParam, operators::SliceKernel<DeviceType, T>> {
public: public:
SliceOp(const std::string &type, const VariableNameMap &inputs, SliceOp(const std::string &type, const VariableNameMap &inputs,
const VariableNameMap &outputs, const VariableNameMap &outputs, const framework::AttributeMap &attrs,
const framework::AttributeMap &attrs,
std::shared_ptr<framework::Scope> scope) std::shared_ptr<framework::Scope> scope)
: framework::OperatorWithKernel<DeviceType, SliceParam, : framework::OperatorWithKernel<DeviceType, SliceParam,
operators::SliceKernel<DeviceType, T>>( operators::SliceKernel<DeviceType, T>>(
...@@ -46,9 +45,9 @@ namespace paddle_mobile { ...@@ -46,9 +45,9 @@ namespace paddle_mobile {
void InferShape() const override; void InferShape() const override;
protected: protected:
}; };
} // namespace operators } // namespace operators
} // namespace paddle_mobile } // namespace paddle_mobile
#endif #endif
...@@ -15,4 +15,4 @@ limitations under the License. */ ...@@ -15,4 +15,4 @@ limitations under the License. */
#include "../test_include.h" #include "../test_include.h"
#include "operators/slice_op.h" #include "operators/slice_op.h"
int main() { } int main() {}
#!/usr/bin/env bash #!/usr/bin/env bash
export ANDROID_NDK=/Users/tianfei01/workspace/Android/NDK/android-ndk-r16b
build_for_mac() { build_for_mac() {
if [ ! `which brew` ]; then if [ ! `which brew` ]; then
...@@ -14,9 +13,6 @@ build_for_mac() { ...@@ -14,9 +13,6 @@ build_for_mac() {
return return
fi fi
fi fi
alias gcc='gcc-5'
export CC=gcc-5
export CXX=g++-5
PLATFORM="x86" PLATFORM="x86"
MODE="Release" MODE="Release"
BUILD_DIR=../build/release/"${PLATFORM}" BUILD_DIR=../build/release/"${PLATFORM}"
...@@ -36,8 +32,8 @@ build_for_mac() { ...@@ -36,8 +32,8 @@ build_for_mac() {
build_for_android() { build_for_android() {
#rm -rf "../build" #rm -rf "../build"
if [ -z "${ANDROID_NDK}" ]; then if [ -z "${NDK_ROOT}" ]; then
echo "ANDROID_NDK not found!" echo "NDK_ROOT not found!"
exit -1 exit -1
fi fi
...@@ -60,12 +56,10 @@ build_for_android() { ...@@ -60,12 +56,10 @@ build_for_android() {
MODE="Release" MODE="Release"
ANDROID_PLATFORM_VERSION="android-15" ANDROID_PLATFORM_VERSION="android-22"
#TOOLCHAIN_FILE="./tools/android-cmake/android.toolchain.cmake" TOOLCHAIN_FILE="./tools/android-cmake/android.toolchain.cmake"
TOOLCHAIN_FILE="${ANDROID_NDK}/build/cmake/android.toolchain.cmake"
ANDROID_ARM_MODE="arm" ANDROID_ARM_MODE="arm"
if [ $# -eq 1 ]; then if [ $# -eq 1 ]; then
NET=$1
cmake .. \ cmake .. \
-B"../build/release/${PLATFORM}" \ -B"../build/release/${PLATFORM}" \
-DANDROID_ABI="${ABI}" \ -DANDROID_ABI="${ABI}" \
...@@ -75,7 +69,7 @@ build_for_android() { ...@@ -75,7 +69,7 @@ build_for_android() {
-DCMAKE_CXX_FLAGS="${CXX_FLAGS}" \ -DCMAKE_CXX_FLAGS="${CXX_FLAGS}" \
-DANDROID_STL=c++_static \ -DANDROID_STL=c++_static \
-DANDROID=true \ -DANDROID=true \
-D"${NET}=true" \ -DNET=$1 \
-D"${ARM_PLATFORM}"=true -D"${ARM_PLATFORM}"=true
else else
...@@ -95,7 +89,7 @@ build_for_android() { ...@@ -95,7 +89,7 @@ build_for_android() {
} }
build_for_ios() { build_for_ios() {
rm -rf "../build" # rm -rf "../build"
PLATFORM="ios" PLATFORM="ios"
MODE="Release" MODE="Release"
BUILD_DIR=../build/release/"${PLATFORM}" BUILD_DIR=../build/release/"${PLATFORM}"
...@@ -104,7 +98,6 @@ build_for_ios() { ...@@ -104,7 +98,6 @@ build_for_ios() {
CXX_FLAGS="-fobjc-abi-version=2 -fobjc-arc -std=gnu++14 -stdlib=libc++ -isysroot ${CMAKE_OSX_SYSROOT}" CXX_FLAGS="-fobjc-abi-version=2 -fobjc-arc -std=gnu++14 -stdlib=libc++ -isysroot ${CMAKE_OSX_SYSROOT}"
mkdir -p "${BUILD_DIR}" mkdir -p "${BUILD_DIR}"
if [ $# -eq 1 ]; then if [ $# -eq 1 ]; then
NET=$1
cmake .. \ cmake .. \
-B"${BUILD_DIR}" \ -B"${BUILD_DIR}" \
-DCMAKE_BUILD_TYPE="${MODE}" \ -DCMAKE_BUILD_TYPE="${MODE}" \
...@@ -112,7 +105,7 @@ build_for_ios() { ...@@ -112,7 +105,7 @@ build_for_ios() {
-DIOS_PLATFORM=OS \ -DIOS_PLATFORM=OS \
-DCMAKE_C_FLAGS="${C_FLAGS}" \ -DCMAKE_C_FLAGS="${C_FLAGS}" \
-DCMAKE_CXX_FLAGS="${CXX_FLAGS}" \ -DCMAKE_CXX_FLAGS="${CXX_FLAGS}" \
-D"${NET}"=true \ -DNET=$1 \
-DIS_IOS="true" -DIS_IOS="true"
else else
cmake .. \ cmake .. \
...@@ -126,6 +119,9 @@ build_for_ios() { ...@@ -126,6 +119,9 @@ build_for_ios() {
fi fi
cd "${BUILD_DIR}" cd "${BUILD_DIR}"
make -j 8 make -j 8
cd ./build
# 生成符号表
ranlib *.a
} }
build_error() { build_error() {
...@@ -134,16 +130,12 @@ build_error() { ...@@ -134,16 +130,12 @@ build_error() {
if [ $# -lt 1 ]; then if [ $# -lt 1 ]; then
echo "error: target missing!" echo "error: target missing!"
echo "available targets: mac|linux|ios|android" echo "available targets: ios|android"
echo "sample usage: ./build.sh mac" echo "sample usage: ./build.sh android"
else else
if [ $# -eq 2 ]; then if [ $# -eq 2 ]; then
if [ $2 != "googlenet" -a $2 != "mobilenet" -a $2 != "yolo" -a $2 != "squeezenet" -a $2 != "resnet" ]; then if [ $2 != "googlenet" -a $2 != "mobilenet" -a $2 != "yolo" -a $2 != "squeezenet" -a $2 != "resnet" ]; then
if [ $1 = "mac" ]; then if [ $1 = "android" ]; then
build_for_mac
elif [ $1 = "linux" ]; then
build_for_linux
elif [ $1 = "android" ]; then
build_for_android build_for_android
elif [ $1 = "ios" ]; then elif [ $1 = "ios" ]; then
build_for_ios build_for_ios
...@@ -151,11 +143,7 @@ else ...@@ -151,11 +143,7 @@ else
build_error build_error
fi fi
else else
if [ $1 = "mac" ]; then if [ $1 = "android" ]; then
build_for_mac $2
elif [ $1 = "linux" ]; then
build_for_linux $2
elif [ $1 = "android" ]; then
build_for_android $2 build_for_android $2
elif [ $1 = "ios" ]; then elif [ $1 = "ios" ]; then
build_for_ios $2 build_for_ios $2
...@@ -164,11 +152,7 @@ else ...@@ -164,11 +152,7 @@ else
fi fi
fi fi
else else
if [ $1 = "mac" ]; then if [ $1 = "android" ]; then
build_for_mac
elif [ $1 = "linux" ]; then
build_for_linux
elif [ $1 = "android" ]; then
build_for_android build_for_android
elif [ $1 = "ios" ]; then elif [ $1 = "ios" ]; then
build_for_ios build_for_ios
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册