提交 a7f97c6a 编写于 作者: I itminner

fix code style; reset build.sh change

上级 f3147f0a
...@@ -18,102 +18,96 @@ limitations under the License. */ ...@@ -18,102 +18,96 @@ limitations under the License. */
#include <operators/math/transform.h> #include <operators/math/transform.h>
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {
template <typename T> template <typename T>
struct PReluFunctor { struct PReluFunctor {
explicit PReluFunctor(float slope) { this->slope_ = slope; } explicit PReluFunctor(float slope) { this->slope_ = slope; }
inline T operator()(T in) const { inline T operator()(T in) const { return in > 0 ? in : in * slope_; }
return in > 0 ? in : in * slope_;
}
float slope_ = 0.0f; float slope_ = 0.0f;
}; };
/* /*
* @b 特化到具体平台的实现, param 从 op 层传入 * @b 特化到具体平台的实现, param 从 op 层传入
* */ * */
template <> template <>
void PReluKernel<CPU, float>::Compute(const PReluParam &param) const { void PReluKernel<CPU, float>::Compute(const PReluParam &param) const {
const auto *input_x = param.InputX(); const auto *input_x = param.InputX();
auto *input_x_ptr = input_x->data<float>(); auto *input_x_ptr = input_x->data<float>();
auto *out = param.Out(); auto *out = param.Out();
auto *out_ptr = out->mutable_data<float>(); auto *out_ptr = out->mutable_data<float>();
if (param.Slopes().size() == 1) { if (param.Slopes().size() == 1) {
PReluFunctor<float> func_(param.Slopes()[0]); PReluFunctor<float> func_(param.Slopes()[0]);
math::Transform trans; math::Transform trans;
trans(input_x_ptr, input_x_ptr + input_x->numel(), out_ptr, func_); trans(input_x_ptr, input_x_ptr + input_x->numel(), out_ptr, func_);
} else if (param.Slopes().size() > 1) { } else if (param.Slopes().size() > 1) {
const int dim_size = input_x->dims().size(); const int dim_size = input_x->dims().size();
switch (dim_size){ switch (dim_size) {
case 0: case 0:
break; break;
case 1: { case 1: {
const int input_width = input_x->dims()[0]; const int input_width = input_x->dims()[0];
math::Transform trans; math::Transform trans;
#pragma omp parallel for #pragma omp parallel for
for (int w = 0; w < input_width; ++ w) { for (int w = 0; w < input_width; ++w) {
out_ptr[w] = input_x_ptr[w] * param.Slopes()[w]; out_ptr[w] = input_x_ptr[w] * param.Slopes()[w];
} }
} } break;
break; case 2: {
case 2: { const int input_height = input_x->dims()[0];
const int input_height = input_x->dims()[0]; const int input_width = input_x->dims()[1];
const int input_width = input_x->dims()[1];
math::Transform trans; math::Transform trans;
#pragma omp parallel for #pragma omp parallel for
for (int h = 0; h < input_height; ++ h) { for (int h = 0; h < input_height; ++h) {
PReluFunctor<float> func_(param.Slopes()[h]); PReluFunctor<float> func_(param.Slopes()[h]);
const float *ptr = input_x_ptr + h * input_width; const float *ptr = input_x_ptr + h * input_width;
float *optr = out_ptr + + h * input_width; float *optr = out_ptr + +h * input_width;
trans(ptr, ptr + input_width, optr, func_); trans(ptr, ptr + input_width, optr, func_);
} }
} } break;
break; case 3: {
case 3: const int chan_size = input_x->dims()[0];
{ const int input_height = input_x->dims()[1];
const int chan_size = input_x->dims()[0]; const int input_width = input_x->dims()[2];
const int input_height = input_x->dims()[1];
const int input_width = input_x->dims()[2];
math::Transform trans; math::Transform trans;
#pragma omp parallel for #pragma omp parallel for
for (int c = 0; c < chan_size; ++ c) { for (int c = 0; c < chan_size; ++c) {
PReluFunctor<float> func_(param.Slopes()[c]); PReluFunctor<float> func_(param.Slopes()[c]);
int size = input_height * input_width; int size = input_height * input_width;
const float *ptr = input_x_ptr + c * size; const float *ptr = input_x_ptr + c * size;
float *optr = out_ptr + c * size; float *optr = out_ptr + c * size;
trans(ptr, ptr + size, optr, func_); trans(ptr, ptr + size, optr, func_);
} }
} } break;
break; case 4:
case 4: default: {
default:{ const int batch_size = input_x->dims()[0];
const int batch_size = input_x->dims()[0]; const int chan_size = input_x->dims()[1];
const int chan_size = input_x->dims()[1]; const int input_height = input_x->dims()[2];
const int input_height = input_x->dims()[2]; const int input_width = input_x->dims()[3];
const int input_width = input_x->dims()[3]; math::Transform trans;
math::Transform trans;
#pragma omp parallel for #pragma omp parallel for
for (int b = 0; b < batch_size; ++ b) { for (int b = 0; b < batch_size; ++b) {
for (int c = 0; c < chan_size; ++ c) { for (int c = 0; c < chan_size; ++c) {
PReluFunctor<float> func_(param.Slopes()[c]); PReluFunctor<float> func_(param.Slopes()[c]);
int size = input_height * input_width; int size = input_height * input_width;
const float *ptr = input_x_ptr + b * c * size; const float *ptr = input_x_ptr + b * c * size;
float *optr = out_ptr + + b * c * size; float *optr = out_ptr + +b * c * size;
trans(ptr, ptr + size, optr, func_); trans(ptr, ptr + size, optr, func_);
} }
}
}// case 3,default
break;
}
}
} }
} // namespace operators } // case 3,default
break;
}
}
}
} // namespace operators
} // namespace paddle_mobile } // namespace paddle_mobile
#endif #endif
\ No newline at end of file
...@@ -14,114 +14,111 @@ limitations under the License. */ ...@@ -14,114 +14,111 @@ limitations under the License. */
#ifdef RESIZE_OP #ifdef RESIZE_OP
#include <cmath>
#include "operators/kernel/resize_kernel.h" #include "operators/kernel/resize_kernel.h"
#include <cmath>
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {
void BiLinearResizeTensor(const float* src, const int src_height, const int src_width, void BiLinearResizeTensor(const float* src, const int src_height,
float* dst, const int dst_height, const int dst_width) const int src_width, float* dst, const int dst_height,
{ const int dst_width) {
const float scale_w = src_width / (float)dst_width; const float scale_w = src_width / (float)dst_width;
const float scale_h = src_height / (float)dst_height; const float scale_h = src_height / (float)dst_height;
float* dst_data = dst; float* dst_data = dst;
const float* src_data = src; const float* src_data = src;
for(int dst_h = 0; dst_h < dst_height; ++dst_h) { for (int dst_h = 0; dst_h < dst_height; ++dst_h) {
float fh = dst_h * scale_h; float fh = dst_h * scale_h;
int src_h = std::floor(fh); int src_h = std::floor(fh);
fh -= src_h; fh -= src_h;
const float w_h0 = std::abs((float)1.0 - fh); const float w_h0 = std::abs((float)1.0 - fh);
const float w_h1 = std::abs(fh); const float w_h1 = std::abs(fh);
const int dst_offset_1 = dst_h * dst_width; const int dst_offset_1 = dst_h * dst_width;
const int src_offset_1 = src_h * src_width; const int src_offset_1 = src_h * src_width;
float* dst_data_ptr = dst_data + dst_offset_1; float* dst_data_ptr = dst_data + dst_offset_1;
for(int dst_w = 0 ; dst_w < dst_width; ++dst_w) { for (int dst_w = 0; dst_w < dst_width; ++dst_w) {
float fw = dst_w * scale_w; float fw = dst_w * scale_w;
int src_w = std::floor(fw); int src_w = std::floor(fw);
fw -= src_w; fw -= src_w;
const float w_w0 = std::abs((float)1.0 - fw); const float w_w0 = std::abs((float)1.0 - fw);
const float w_w1 = std::abs(fw); const float w_w1 = std::abs(fw);
float dst_value = 0; float dst_value = 0;
const int src_idx = src_offset_1 + src_w; const int src_idx = src_offset_1 + src_w;
dst_value += (w_h0 * w_w0 * src_data[src_idx]); dst_value += (w_h0 * w_w0 * src_data[src_idx]);
int flag = 0; int flag = 0;
if (src_w + 1 < src_width){ if (src_w + 1 < src_width) {
dst_value += (w_h0 * w_w1 * src_data[src_idx + 1]); dst_value += (w_h0 * w_w1 * src_data[src_idx + 1]);
++flag; ++flag;
} }
if (src_h + 1 < src_height){ if (src_h + 1 < src_height) {
dst_value += (w_h1 * w_w0 * src_data[src_idx + src_width]); dst_value += (w_h1 * w_w0 * src_data[src_idx + src_width]);
++flag; ++flag;
} }
if (flag>1){ if (flag > 1) {
dst_value += (w_h1 * w_w1 * src_data[src_idx + src_width + 1]); dst_value += (w_h1 * w_w1 * src_data[src_idx + src_width + 1]);
// ++flag; // ++flag;
} }
*(dst_data_ptr++) = dst_value; *(dst_data_ptr++) = dst_value;
} }
} }
} }
void ResizeTensor(const Tensor* src, const int src_n, const int src_c, void ResizeTensor(const Tensor* src, const int src_n, const int src_c,
Tensor* dst, const int dst_n, const int dst_c) { Tensor* dst, const int dst_n, const int dst_c) {
framework::DDim in_dims = src->dims(); framework::DDim in_dims = src->dims();
const int src_chans = in_dims[1]; const int src_chans = in_dims[1];
const int src_height = in_dims[2]; const int src_height = in_dims[2];
const int src_width = in_dims[3]; const int src_width = in_dims[3];
const int src_offset = (src_n * src_chans + src_c) * src_height * src_width; const int src_offset = (src_n * src_chans + src_c) * src_height * src_width;
framework::DDim out_dims = dst->dims(); framework::DDim out_dims = dst->dims();
const int dst_chans = out_dims[1]; const int dst_chans = out_dims[1];
const int dst_height = out_dims[2]; const int dst_height = out_dims[2];
const int dst_width = out_dims[3]; const int dst_width = out_dims[3];
const int dst_offset = (dst_n * dst_chans + dst_c) * dst_height * dst_width; const int dst_offset = (dst_n * dst_chans + dst_c) * dst_height * dst_width;
const auto *src_ptr = src->data<float>(); const auto* src_ptr = src->data<float>();
auto *dst_ptr = dst->data<float>(); auto* dst_ptr = dst->data<float>();
const auto* src_data = &(src_ptr[src_offset]); const auto* src_data = &(src_ptr[src_offset]);
auto* dst_data = &(dst_ptr[dst_offset]); auto* dst_data = &(dst_ptr[dst_offset]);
BiLinearResizeTensor(src_data, src_height, src_width, BiLinearResizeTensor(src_data, src_height, src_width, dst_data, dst_height,
dst_data, dst_height, dst_width); dst_width);
} }
void ResizeTensor(const Tensor* src, Tensor* dst) { void ResizeTensor(const Tensor* src, Tensor* dst) {
framework::DDim in_dims = src->dims(); framework::DDim in_dims = src->dims();
framework::DDim out_dims = dst->dims(); framework::DDim out_dims = dst->dims();
PADDLE_MOBILE_ENFORCE(in_dims[0] == out_dims[0], PADDLE_MOBILE_ENFORCE(in_dims[0] == out_dims[0],
"src tensor batch num not equal to dst tensor"); "src tensor batch num not equal to dst tensor");
PADDLE_MOBILE_ENFORCE(in_dims[1] == out_dims[1], PADDLE_MOBILE_ENFORCE(in_dims[1] == out_dims[1],
"src tensor channel num not equal to dst tensor"); "src tensor channel num not equal to dst tensor");
for (int n = 0, batch_num = in_dims[0]; n < batch_num; ++n) for (int n = 0, batch_num = in_dims[0]; n < batch_num; ++n) {
{ for (int c = 0, chan_num = in_dims[1]; c < chan_num; ++c) {
for (int c = 0, chan_num = in_dims[1]; c < chan_num; ++c) ResizeTensor(src, n, c, dst, n, c);
{ }
ResizeTensor(src, n, c, dst, n, c); }
} }
}
} template <>
void ResizeKernel<CPU, float>::Compute(const ResizeParam& param) const {
template <> const auto* input_x = param.InputX();
void ResizeKernel<CPU, float>::Compute(const ResizeParam &param) const { const auto& input_x_dims = input_x->dims();
const auto *input_x = param.InputX(); auto* out = param.Out();
const auto &input_x_dims = input_x->dims(); framework::DDim out_dims = CalOutputShape(param);
auto *out = param.Out();
framework::DDim out_dims = CalOutputShape(param); out->Resize(out_dims);
ResizeTensor(input_x, out);
out->Resize(out_dims); }
ResizeTensor(input_x, out);
} } // namespace operators
} // namespace operators
} // namespace paddle_mobile } // namespace paddle_mobile
#endif #endif
...@@ -17,135 +17,130 @@ limitations under the License. */ ...@@ -17,135 +17,130 @@ limitations under the License. */
#include "operators/kernel/scale_kernel.h" #include "operators/kernel/scale_kernel.h"
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {
/* /*
* @b 特化到具体平台的实现, param 从 op 层传入 * @b 特化到具体平台的实现, param 从 op 层传入
* */ * */
template <> template <>
void ScaleKernel<CPU, float>::Compute(const ScaleParam &param) const { void ScaleKernel<CPU, float>::Compute(const ScaleParam &param) const {
const auto *input_x = param.InputX(); const auto *input_x = param.InputX();
auto *input_x_ptr = input_x->data<float>(); auto *input_x_ptr = input_x->data<float>();
auto *out = param.Out(); auto *out = param.Out();
auto *out_ptr = out->mutable_data<float>(); auto *out_ptr = out->mutable_data<float>();
const vector<float> scales = param.Scales(); const vector<float> scales = param.Scales();
bool has_bias = param.HasBias(); bool has_bias = param.HasBias();
const int dim_size = input_x->dims().size(); const int dim_size = input_x->dims().size();
switch (dim_size){ switch (dim_size) {
case 1: { case 1: {
const int input_width = input_x->dims()[0]; const int input_width = input_x->dims()[0];
if (has_bias) { if (has_bias) {
const vector<float> biases = param.Biases(); const vector<float> biases = param.Biases();
#pragma omp parallel for #pragma omp parallel for
for (int w = 0; w < input_width; w++) { for (int w = 0; w < input_width; w++) {
out_ptr[w] = input_x_ptr[w] * scales[w] + biases[w]; out_ptr[w] = input_x_ptr[w] * scales[w] + biases[w];
} }
} else { } else {
#pragma omp parallel for #pragma omp parallel for
for (int w = 0; w < input_width; w++) { for (int w = 0; w < input_width; w++) {
out_ptr[w] = input_x_ptr[w] * scales[w]; out_ptr[w] = input_x_ptr[w] * scales[w];
} }
} }
} } break;
break; case 2: {
case 2: { const int input_height = input_x->dims()[0];
const int input_height = input_x->dims()[0]; const int input_width = input_x->dims()[1];
const int input_width = input_x->dims()[1];
if (has_bias) {
if (has_bias) { const vector<float> biases = param.Biases();
const vector<float> biases = param.Biases(); #pragma omp parallel for
#pragma omp parallel for for (int h = 0; h < input_height; ++h) {
for (int h = 0; h < input_height; ++h) { const float *iptr = input_x_ptr + h * input_width;
const float *iptr = input_x_ptr + h * input_width; float *optr = out_ptr + h * input_width;
float *optr = out_ptr + h * input_width; for (int w = 0; w < input_width; ++w) {
for (int w = 0; w < input_width; ++w) { optr[w] = iptr[w] * scales[w] + biases[w];
optr[w] = iptr[w] * scales[w] + biases[w]; }
} }
} } else {
} else { #pragma omp parallel for
#pragma omp parallel for for (int h = 0; h < input_height; ++h) {
for (int h = 0; h < input_height; ++h) { const float *iptr = input_x_ptr + h * input_width;
const float *iptr = input_x_ptr + h * input_width; float *optr = out_ptr + h * input_width;
float *optr = out_ptr + h * input_width; for (int w = 0; w < input_width; ++w) {
for (int w = 0; w < input_width; ++w) { optr[w] = iptr[w] * scales[w];
optr[w] = iptr[w] * scales[w]; }
} }
} }
} } break;
} case 3: {
break; const int chan_size = input_x->dims()[0];
case 3: { const int input_height = input_x->dims()[1];
const int chan_size = input_x->dims()[0]; const int input_width = input_x->dims()[2];
const int input_height = input_x->dims()[1]; int size = input_width * input_height;
const int input_width = input_x->dims()[2];
int size = input_width * input_height; if (has_bias) {
const vector<float> biases = param.Biases();
if (has_bias) {
const vector<float> biases = param.Biases(); #pragma omp parallel for
for (int c = 0; c < chan_size; ++c) {
#pragma omp parallel for const float *iptr = input_x_ptr + c * size;
for (int c = 0; c < chan_size; ++c) { float *optr = out_ptr + c * size;
const float *iptr = input_x_ptr + c * size; for (int i = 0; i < size; ++i) {
float *optr = out_ptr + c * size; optr[i] = iptr[i] * scales[c] + biases[c];
for (int i = 0; i < size; ++i) { }
optr[i] = iptr[i] * scales[c] + biases[c]; }
} } else {
} #pragma omp parallel for
} else { for (int c = 0; c < chan_size; ++c) {
#pragma omp parallel for const float *iptr = input_x_ptr + c * size;
for (int c = 0; c < chan_size; ++c) { float *optr = out_ptr + c * size;
const float *iptr = input_x_ptr + c * size; for (int i = 0; i < size; ++i) {
float *optr = out_ptr + c * size; optr[i] = iptr[i] * scales[c];
for (int i = 0; i < size; ++i) { }
optr[i] = iptr[i] * scales[c]; }
} }
} } break;
}
} case 4: {
break; const int batch_size = input_x->dims()[0];
const int chan_size = input_x->dims()[0];
case 4: const int input_height = input_x->dims()[1];
{ const int input_width = input_x->dims()[2];
const int batch_size = input_x->dims()[0]; int size = input_width * input_height;
const int chan_size = input_x->dims()[0];
const int input_height = input_x->dims()[1]; if (has_bias) {
const int input_width = input_x->dims()[2]; const vector<float> biases = param.Biases();
int size = input_width * input_height;
#pragma omp parallel for
if (has_bias) { for (int b = 0; b < batch_size; ++b) {
const vector<float> biases = param.Biases(); for (int c = 0; c < chan_size; ++c) {
const float *iptr = input_x_ptr + b * c * size;
#pragma omp parallel for float *optr = out_ptr + b * c * size;
for (int b = 0; b < batch_size; ++b) { for (int i = 0; i < size; ++i) {
for (int c = 0; c < chan_size; ++c) { optr[i] = iptr[i] * scales[c] + biases[c];
const float *iptr = input_x_ptr + b * c * size; }
float *optr = out_ptr + b * c * size; }
for (int i = 0; i < size; ++i) { }
optr[i] = iptr[i] * scales[c] + biases[c]; } else {
} #pragma omp parallel for
} for (int b = 0; b < batch_size; ++b) {
} for (int c = 0; c < chan_size; ++c) {
} else { const float *iptr = input_x_ptr + b * c * size;
#pragma omp parallel for float *optr = out_ptr + b * c * size;
for (int b = 0; b < batch_size; ++b) { for (int i = 0; i < size; ++i) {
for (int c = 0; c < chan_size; ++c) { optr[i] = iptr[i] * scales[c];
const float *iptr = input_x_ptr + b * c * size;
float *optr = out_ptr + b * c * size;
for (int i = 0; i < size; ++i) {
optr[i] = iptr[i] * scales[c];
}
}
}
}
}
break;
default:
break;
} }
}
} }
} // namespace operators }
} break;
default:
break;
}
}
} // namespace operators
} // namespace paddle_mobile } // namespace paddle_mobile
#endif #endif
\ No newline at end of file
...@@ -17,7 +17,6 @@ limitations under the License. */ ...@@ -17,7 +17,6 @@ limitations under the License. */
#include "operators/kernel/slice_kernel.h" #include "operators/kernel/slice_kernel.h"
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {}
} } // namespace paddle_mobile
}
#endif #endif
...@@ -18,12 +18,12 @@ limitations under the License. */ ...@@ -18,12 +18,12 @@ limitations under the License. */
#pragma once; #pragma once;
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {
template <typename DeviceType, typename T> template <typename DeviceType, typename T>
class PReluKernel : public framework::OpKernelBase<DeviceType, PReluParam> { class PReluKernel : public framework::OpKernelBase<DeviceType, PReluParam> {
public: public:
void Compute(const PReluParam& param) const; void Compute(const PReluParam& param) const;
}; };
} // namespace operators } // namespace operators
} // namespace paddle_mobile } // namespace paddle_mobile
...@@ -22,60 +22,58 @@ limitations under the License. */ ...@@ -22,60 +22,58 @@ limitations under the License. */
#include "operators/op_param.h" #include "operators/op_param.h"
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {
inline framework::DDim CalOutputShape(const ResizeParam& param) { inline framework::DDim CalOutputShape(const ResizeParam &param) {
const auto *input_x = param.InputX(); const auto *input_x = param.InputX();
const auto &input_x_dims = input_x->dims(); const auto &input_x_dims = input_x->dims();
auto *out = param.Out(); auto *out = param.Out();
framework::DDim out_dims = out->dims(); framework::DDim out_dims = out->dims();
const auto *input_shape = param.InputShape(); const auto *input_shape = param.InputShape();
if (input_shape) { if (input_shape) {
auto *shape_data = input_shape->data<int>(); auto *shape_data = input_shape->data<int>();
framework::Tensor cpu_shape_tensor; framework::Tensor cpu_shape_tensor;
auto shape = std::vector<int>(shape_data, shape_data + input_shape->numel()); auto shape =
const int in_batch_size = input_x->dims()[0]; std::vector<int>(shape_data, shape_data + input_shape->numel());
const int in_chan_size = input_x->dims()[1]; const int in_batch_size = input_x->dims()[0];
const int in_height = input_x->dims()[2]; const int in_chan_size = input_x->dims()[1];
const int in_width = input_x->dims()[3]; const int in_height = input_x->dims()[2];
const int in_width = input_x->dims()[3];
int out_height = 0;
int out_width = 0; int out_height = 0;
bool is_pyramid_test = param.IsPyramidTest(); int out_width = 0;
if(is_pyramid_test == false) { bool is_pyramid_test = param.IsPyramidTest();
out_height = param.Height(); if (is_pyramid_test == false) {
out_width = param.Width(); out_height = param.Height();
PADDLE_MOBILE_ENFORCE(out_height > 0, out_width = param.Width();
"output height is required"); PADDLE_MOBILE_ENFORCE(out_height > 0, "output height is required");
PADDLE_MOBILE_ENFORCE(out_width > 0, PADDLE_MOBILE_ENFORCE(out_width > 0, "output width is required");
"output width is required");
} else {
} else { float out_height_scale = param.OutHeightScale();
float out_height_scale = param.OutHeightScale(); float out_width_scale = param.OutWidthScale();
float out_width_scale = param.OutWidthScale(); PADDLE_MOBILE_ENFORCE(out_height_scale > 0,
PADDLE_MOBILE_ENFORCE(out_height_scale > 0, "output height scale is required");
"output height scale is required"); PADDLE_MOBILE_ENFORCE(out_width_scale > 0,
PADDLE_MOBILE_ENFORCE(out_width_scale > 0, "output width scale is required");
"output width scale is required");
out_height = int(out_height_scale * in_height);
out_height = int (out_height_scale * in_height); out_width = int(out_width_scale * in_width);
out_width = int (out_width_scale * in_width); }
}
out_dims = framework::make_ddim(
out_dims = framework::make_ddim( {in_batch_size, in_chan_size, in_height, in_width});
{ in_batch_size, in_chan_size, in_height, in_width } }
); return out_dims;
} }
return out_dims;
} template <typename DeviceType, typename T>
class ResizeKernel : public framework::OpKernelBase<DeviceType, ResizeParam> {
template <typename DeviceType, typename T> public:
class ResizeKernel : public framework::OpKernelBase<DeviceType, ResizeParam> { void Compute(const ResizeParam &param) const;
public: };
void Compute(const ResizeParam& param) const; } // namespace operators
};
} // namespace operators
} // namespace paddle_mobile } // namespace paddle_mobile
#endif #endif
...@@ -18,12 +18,12 @@ limitations under the License. */ ...@@ -18,12 +18,12 @@ limitations under the License. */
#pragma once; #pragma once;
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {
template <typename DeviceType, typename T> template <typename DeviceType, typename T>
class ScaleKernel : public framework::OpKernelBase<DeviceType, ScaleParam> { class ScaleKernel : public framework::OpKernelBase<DeviceType, ScaleParam> {
public: public:
void Compute(const ScaleParam& param) const; void Compute(const ScaleParam& param) const;
}; };
} // namespace operators } // namespace operators
} // namespace paddle_mobile } // namespace paddle_mobile
\ No newline at end of file
...@@ -18,12 +18,12 @@ limitations under the License. */ ...@@ -18,12 +18,12 @@ limitations under the License. */
#pragma once; #pragma once;
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {
template <typename DeviceType, typename T> template <typename DeviceType, typename T>
class SliceKernel : public framework::OpKernelBase<DeviceType, SliceParam> { class SliceKernel : public framework::OpKernelBase<DeviceType, SliceParam> {
public: public:
void Compute(const SliceParam& param) const{} void Compute(const SliceParam& param) const {}
}; };
} // namespace operators } // namespace operators
} // namespace paddle_mobile } // namespace paddle_mobile
...@@ -730,123 +730,122 @@ class ReshapeParam : public OpParam { ...@@ -730,123 +730,122 @@ class ReshapeParam : public OpParam {
#endif #endif
#ifdef SCALE_OP #ifdef SCALE_OP
class ScaleParam : public OpParam { class ScaleParam : public OpParam {
public: public:
ScaleParam(const VariableNameMap &inputs, const VariableNameMap &outputs, ScaleParam(const VariableNameMap &inputs, const VariableNameMap &outputs,
const AttributeMap &attrs, const Scope &scope) { const AttributeMap &attrs, const Scope &scope) {
input_x_ = InputXFrom<LoDTensor>(inputs, scope); input_x_ = InputXFrom<LoDTensor>(inputs, scope);
input_bias_ = InputBiasFrom<framework::LoDTensor>(inputs, scope); input_bias_ = InputBiasFrom<framework::LoDTensor>(inputs, scope);
out_ = OutFrom<LoDTensor>(outputs, scope); out_ = OutFrom<LoDTensor>(outputs, scope);
inplace_ = GetAttr<bool>("inplace", attrs); inplace_ = GetAttr<bool>("inplace", attrs);
has_bias_ = GetAttr<bool>("has_bias", attrs); has_bias_ = GetAttr<bool>("has_bias", attrs);
scales_ = GetAttr<vector<float>>("scales", attrs); scales_ = GetAttr<vector<float>>("scales", attrs);
biases_ = GetAttr<vector<float>>("biases", attrs); biases_ = GetAttr<vector<float>>("biases", attrs);
} }
const Tensor *InputX() const { return input_x_; } const Tensor *InputX() const { return input_x_; }
const Tensor *InputBias() const { return input_bias_; } const Tensor *InputBias() const { return input_bias_; }
Tensor *Out() const { return out_; } Tensor *Out() const { return out_; }
const bool &Inplace() const { return inplace_; } const bool &Inplace() const { return inplace_; }
const bool &HasBias() const { return has_bias_; } const bool &HasBias() const { return has_bias_; }
const vector<float> &Scales() const { return scales_; } const vector<float> &Scales() const { return scales_; }
const vector<float> &Biases() const { return biases_; } const vector<float> &Biases() const { return biases_; }
private: private:
Tensor *input_x_; Tensor *input_x_;
Tensor *input_bias_; Tensor *input_bias_;
Tensor *out_; Tensor *out_;
bool inplace_; bool inplace_;
bool has_bias_; bool has_bias_;
vector<float> scales_; vector<float> scales_;
vector<float> biases_; vector<float> biases_;
}; };
#endif #endif
#ifdef SLICE_OP #ifdef SLICE_OP
class SliceParam : public OpParam { class SliceParam : public OpParam {
public: public:
SliceParam(const VariableNameMap &inputs, const VariableNameMap &outputs, SliceParam(const VariableNameMap &inputs, const VariableNameMap &outputs,
const AttributeMap &attrs, const Scope &scope) { const AttributeMap &attrs, const Scope &scope) {
input_x_ = InputXFrom<LoDTensor>(inputs, scope); input_x_ = InputXFrom<LoDTensor>(inputs, scope);
input_shape_ = InputShapeFrom<LoDTensor>(inputs, scope); input_shape_ = InputShapeFrom<LoDTensor>(inputs, scope);
out_ = OutFrom<LoDTensor>(outputs, scope); out_ = OutFrom<LoDTensor>(outputs, scope);
axis_ = GetAttr<int>("axis", attrs); axis_ = GetAttr<int>("axis", attrs);
slice_points_ = GetAttr<vector<int>>("slice_points", attrs); slice_points_ = GetAttr<vector<int>>("slice_points", attrs);
inplace_ = GetAttr<bool>("inplace", attrs); inplace_ = GetAttr<bool>("inplace", attrs);
} }
const Tensor *InputX() const { return input_x_; } const Tensor *InputX() const { return input_x_; }
const Tensor *InputShape() const { return input_shape_; } const Tensor *InputShape() const { return input_shape_; }
Tensor *Out() const { return out_; } Tensor *Out() const { return out_; }
const int &Axis() const { return axis_; } const int &Axis() const { return axis_; }
const vector<int> &SlicePoints() const { return slice_points_; } const vector<int> &SlicePoints() const { return slice_points_; }
const bool &Inplace() const { return inplace_; } const bool &Inplace() const { return inplace_; }
private: private:
Tensor *input_x_; Tensor *input_x_;
Tensor *input_shape_; Tensor *input_shape_;
Tensor *out_; Tensor *out_;
int axis_; int axis_;
vector<int> slice_points_; vector<int> slice_points_;
bool inplace_; bool inplace_;
}; };
#endif #endif
#ifdef RESIZE_OP #ifdef RESIZE_OP
class ResizeParam : public OpParam { class ResizeParam : public OpParam {
public: public:
ResizeParam(const VariableNameMap &inputs, const VariableNameMap &outputs, ResizeParam(const VariableNameMap &inputs, const VariableNameMap &outputs,
const AttributeMap &attrs, const Scope &scope) { const AttributeMap &attrs, const Scope &scope) {
input_x_ = InputXFrom<LoDTensor>(inputs, scope); input_x_ = InputXFrom<LoDTensor>(inputs, scope);
input_shape_ = InputShapeFrom<LoDTensor>(inputs, scope); input_shape_ = InputShapeFrom<LoDTensor>(inputs, scope);
out_ = OutFrom<LoDTensor>(outputs, scope); out_ = OutFrom<LoDTensor>(outputs, scope);
is_pyramid_test_ = GetAttr<bool>("is_pyramid_test", attrs); is_pyramid_test_ = GetAttr<bool>("is_pyramid_test", attrs);
height_ = GetAttr<int>("height", attrs); height_ = GetAttr<int>("height", attrs);
width_ = GetAttr<int>("width", attrs); width_ = GetAttr<int>("width", attrs);
out_height_scale_ = GetAttr<float>("out_height_scale", attrs); out_height_scale_ = GetAttr<float>("out_height_scale", attrs);
out_width_scale_ = GetAttr<float>("out_width_scale", attrs); out_width_scale_ = GetAttr<float>("out_width_scale", attrs);
} }
const Tensor *InputX() const { return input_x_; } const Tensor *InputX() const { return input_x_; }
const Tensor *InputShape() const { return input_shape_; } const Tensor *InputShape() const { return input_shape_; }
Tensor *Out() const { return out_; } Tensor *Out() const { return out_; }
const bool &IsPyramidTest() const { return is_pyramid_test_; } const bool &IsPyramidTest() const { return is_pyramid_test_; }
const int &Height() const { return height_; } const int &Height() const { return height_; }
const int &Width() const { return width_; } const int &Width() const { return width_; }
const float &OutHeightScale() const { return out_height_scale_; } const float &OutHeightScale() const { return out_height_scale_; }
const float &OutWidthScale() const { return out_width_scale_; } const float &OutWidthScale() const { return out_width_scale_; }
private: private:
Tensor *input_x_; Tensor *input_x_;
Tensor *input_shape_; Tensor *input_shape_;
Tensor *out_; Tensor *out_;
bool is_pyramid_test_; bool is_pyramid_test_;
int height_; int height_;
int width_; int width_;
float out_height_scale_; float out_height_scale_;
float out_width_scale_; float out_width_scale_;
}; };
#endif #endif
#ifdef RELU_OP #ifdef RELU_OP
/* /*
* @b op 层实例化好这个 param 传递给 kernel 层使用 * @b op 层实例化好这个 param 传递给 kernel 层使用
...@@ -871,22 +870,22 @@ class ReluParam : public OpParam { ...@@ -871,22 +870,22 @@ class ReluParam : public OpParam {
#ifdef PRELU_OP #ifdef PRELU_OP
class PReluParam : public OpParam { class PReluParam : public OpParam {
public: public:
PReluParam(const VariableNameMap &inputs, const VariableNameMap &outputs, PReluParam(const VariableNameMap &inputs, const VariableNameMap &outputs,
const AttributeMap &attrs, const Scope &scope) { const AttributeMap &attrs, const Scope &scope) {
input_x_ = InputXFrom<LoDTensor>(inputs, scope); input_x_ = InputXFrom<LoDTensor>(inputs, scope);
out_ = OutFrom<LoDTensor>(outputs, scope); out_ = OutFrom<LoDTensor>(outputs, scope);
slopes_ = GetAttr<vector<float>>("slopes", attrs); slopes_ = GetAttr<vector<float>>("slopes", attrs);
} }
const Tensor *InputX() const { return input_x_; } const Tensor *InputX() const { return input_x_; }
Tensor *Out() const { return out_; } Tensor *Out() const { return out_; }
const vector<float> &Slopes() const { return slopes_; } const vector<float> &Slopes() const { return slopes_; }
private: private:
Tensor *input_x_; Tensor *input_x_;
Tensor *out_; Tensor *out_;
vector<float> slopes_; vector<float> slopes_;
}; };
#endif #endif
......
...@@ -16,15 +16,15 @@ limitations under the License. */ ...@@ -16,15 +16,15 @@ limitations under the License. */
#include "operators/prelu_op.h" #include "operators/prelu_op.h"
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {
template <typename Dtype, typename T> template <typename Dtype, typename T>
void PReluOp<Dtype, T>::InferShape() const { void PReluOp<Dtype, T>::InferShape() const {
auto input_dims = this->param_.InputX()->dims(); auto input_dims = this->param_.InputX()->dims();
this->param_.Out()->Resize(input_dims); this->param_.Out()->Resize(input_dims);
} }
template class PReluOp<CPU, float>; template class PReluOp<CPU, float>;
} // namespace operators } // namespace operators
} // namespace paddle_mobile } // namespace paddle_mobile
/* /*
......
...@@ -23,32 +23,31 @@ limitations under the License. */ ...@@ -23,32 +23,31 @@ limitations under the License. */
#include "operators/op_param.h" #include "operators/op_param.h"
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {
using paddle_mobile::framework::Tensor; using paddle_mobile::framework::Tensor;
template <typename DeviceType, typename T> template <typename DeviceType, typename T>
class PReluOp class PReluOp
: public framework::OperatorWithKernel< : public framework::OperatorWithKernel<
DeviceType, PReluParam, operators::PReluKernel<DeviceType, T>> { DeviceType, PReluParam, operators::PReluKernel<DeviceType, T>> {
public: public:
PReluOp(const std::string &type, const VariableNameMap &inputs, PReluOp(const std::string &type, const VariableNameMap &inputs,
const VariableNameMap &outputs, const VariableNameMap &outputs, const framework::AttributeMap &attrs,
const framework::AttributeMap &attrs, std::shared_ptr<framework::Scope> scope)
std::shared_ptr<framework::Scope> scope) : framework::OperatorWithKernel<DeviceType, PReluParam,
: framework::OperatorWithKernel<DeviceType, PReluParam, operators::PReluKernel<DeviceType, T>>(
operators::PReluKernel<DeviceType, T>>( type, inputs, outputs, attrs, scope) {}
type, inputs, outputs, attrs, scope) {}
using framework::OperatorWithKernel<
using framework::OperatorWithKernel< DeviceType, PReluParam,
DeviceType, PReluParam, operators::PReluKernel<DeviceType, T>>::OperatorWithKernel;
operators::PReluKernel<DeviceType, T>>::OperatorWithKernel; void InferShape() const override;
void InferShape() const override;
protected:
protected: };
};
} // namespace operators
} // namespace operators
} // namespace paddle_mobile } // namespace paddle_mobile
#endif #endif
...@@ -17,15 +17,15 @@ limitations under the License. */ ...@@ -17,15 +17,15 @@ limitations under the License. */
#include "operators/resize_op.h" #include "operators/resize_op.h"
#include <vector> #include <vector>
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {
template <typename Dtype, typename T> template <typename Dtype, typename T>
void ResizeOp<Dtype, T>::InferShape() const { void ResizeOp<Dtype, T>::InferShape() const {
auto out_dims = CalOutputShape(this->param_); auto out_dims = CalOutputShape(this->param_);
this->param_.Out()->Resize(out_dims); this->param_.Out()->Resize(out_dims);
} }
template class ResizeOp<CPU, float>; template class ResizeOp<CPU, float>;
} // namespace operators } // namespace operators
} // namespace paddle_mobile } // namespace paddle_mobile
namespace ops = paddle_mobile::operators; namespace ops = paddle_mobile::operators;
......
...@@ -23,30 +23,30 @@ limitations under the License. */ ...@@ -23,30 +23,30 @@ limitations under the License. */
#include "operators/op_param.h" #include "operators/op_param.h"
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {
using paddle_mobile::framework::Tensor; using paddle_mobile::framework::Tensor;
template <typename DeviceType, typename T> template <typename DeviceType, typename T>
class ResizeOp class ResizeOp
: public framework::OperatorWithKernel< : public framework::OperatorWithKernel<
DeviceType, ResizeParam, operators::ResizeKernel<DeviceType, T>> { DeviceType, ResizeParam, operators::ResizeKernel<DeviceType, T>> {
public: public:
ResizeOp(const std::string &type, const VariableNameMap &inputs, ResizeOp(const std::string &type, const VariableNameMap &inputs,
const VariableNameMap &outputs, const framework::AttributeMap attrs, const VariableNameMap &outputs, const framework::AttributeMap attrs,
std::shared_ptr<framework::Scope> scope) std::shared_ptr<framework::Scope> scope)
: framework::OperatorWithKernel<DeviceType, ResizeParam, : framework::OperatorWithKernel<DeviceType, ResizeParam,
operators::ResizeKernel<DeviceType, T>> operators::ResizeKernel<DeviceType, T>>(
(type, inputs, outputs, attrs, scope){} type, inputs, outputs, attrs, scope) {}
using framework::OperatorWithKernel< using framework::OperatorWithKernel<
DeviceType, ResizeParam, DeviceType, ResizeParam,
operators::ResizeKernel<DeviceType, T>>::OperatorWithKernel; operators::ResizeKernel<DeviceType, T>>::OperatorWithKernel;
void InferShape() const override; void InferShape() const override;
protected: protected:
}; };
} // namespace operators } // namespace operators
} // namespace paddle_mobile } // namespace paddle_mobile
#endif #endif
...@@ -17,15 +17,15 @@ limitations under the License. */ ...@@ -17,15 +17,15 @@ limitations under the License. */
#include "operators/scale_op.h" #include "operators/scale_op.h"
#include <vector> #include <vector>
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {
template <typename Dtype, typename T> template <typename Dtype, typename T>
void ScaleOp<Dtype, T>::InferShape() const { void ScaleOp<Dtype, T>::InferShape() const {
auto input_dims = this->param_.InputX()->dims(); auto input_dims = this->param_.InputX()->dims();
this->param_.Out()->Resize(input_dims); this->param_.Out()->Resize(input_dims);
} }
template class ScaleOp<CPU, float>; template class ScaleOp<CPU, float>;
} // namespace operators } // namespace operators
} // namespace paddle_mobile } // namespace paddle_mobile
namespace ops = paddle_mobile::operators; namespace ops = paddle_mobile::operators;
......
...@@ -23,32 +23,31 @@ limitations under the License. */ ...@@ -23,32 +23,31 @@ limitations under the License. */
#include "operators/op_param.h" #include "operators/op_param.h"
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {
using paddle_mobile::framework::Tensor; using paddle_mobile::framework::Tensor;
template <typename DeviceType, typename T> template <typename DeviceType, typename T>
class ScaleOp class ScaleOp
: public framework::OperatorWithKernel< : public framework::OperatorWithKernel<
DeviceType, ScaleParam, operators::ScaleKernel<DeviceType, T>> { DeviceType, ScaleParam, operators::ScaleKernel<DeviceType, T>> {
public: public:
ScaleOp(const std::string &type, const VariableNameMap &inputs, ScaleOp(const std::string &type, const VariableNameMap &inputs,
const VariableNameMap &outputs, const VariableNameMap &outputs, const framework::AttributeMap &attrs,
const framework::AttributeMap &attrs, std::shared_ptr<framework::Scope> scope)
std::shared_ptr<framework::Scope> scope) : framework::OperatorWithKernel<DeviceType, ScaleParam,
: framework::OperatorWithKernel<DeviceType, ScaleParam, operators::ScaleKernel<DeviceType, T>>(
operators::ScaleKernel<DeviceType, T>>( type, inputs, outputs, attrs, scope) {}
type, inputs, outputs, attrs, scope) {}
using framework::OperatorWithKernel<
using framework::OperatorWithKernel< DeviceType, ScaleParam,
DeviceType, ScaleParam, operators::ScaleKernel<DeviceType, T>>::OperatorWithKernel;
operators::ScaleKernel<DeviceType, T>>::OperatorWithKernel; void InferShape() const override;
void InferShape() const override;
protected:
protected: };
};
} // namespace operators
} // namespace operators
} // namespace paddle_mobile } // namespace paddle_mobile
#endif #endif
...@@ -17,15 +17,14 @@ limitations under the License. */ ...@@ -17,15 +17,14 @@ limitations under the License. */
#include "operators/slice_op.h" #include "operators/slice_op.h"
#include <vector> #include <vector>
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {
template <typename Dtype, typename T> template <typename Dtype, typename T>
void SliceOp<Dtype, T>::InferShape() const { void SliceOp<Dtype, T>::InferShape() const {
/// todo: add InputShape() detection. /// todo: add InputShape() detection.
}
} template class SliceOp<CPU, float>;
template class SliceOp<CPU, float>; } // namespace operators
} // namespace operators
} // namespace paddle_mobile } // namespace paddle_mobile
namespace ops = paddle_mobile::operators; namespace ops = paddle_mobile::operators;
......
...@@ -23,32 +23,31 @@ limitations under the License. */ ...@@ -23,32 +23,31 @@ limitations under the License. */
#include "operators/op_param.h" #include "operators/op_param.h"
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {
using paddle_mobile::framework::Tensor; using paddle_mobile::framework::Tensor;
template <typename DeviceType, typename T> template <typename DeviceType, typename T>
class SliceOp class SliceOp
: public framework::OperatorWithKernel< : public framework::OperatorWithKernel<
DeviceType, SliceParam, operators::SliceKernel<DeviceType, T>> { DeviceType, SliceParam, operators::SliceKernel<DeviceType, T>> {
public: public:
SliceOp(const std::string &type, const VariableNameMap &inputs, SliceOp(const std::string &type, const VariableNameMap &inputs,
const VariableNameMap &outputs, const VariableNameMap &outputs, const framework::AttributeMap &attrs,
const framework::AttributeMap &attrs, std::shared_ptr<framework::Scope> scope)
std::shared_ptr<framework::Scope> scope) : framework::OperatorWithKernel<DeviceType, SliceParam,
: framework::OperatorWithKernel<DeviceType, SliceParam, operators::SliceKernel<DeviceType, T>>(
operators::SliceKernel<DeviceType, T>>( type, inputs, outputs, attrs, scope) {}
type, inputs, outputs, attrs, scope) {}
using framework::OperatorWithKernel<
using framework::OperatorWithKernel< DeviceType, SliceParam,
DeviceType, SliceParam, operators::SliceKernel<DeviceType, T>>::OperatorWithKernel;
operators::SliceKernel<DeviceType, T>>::OperatorWithKernel; void InferShape() const override;
void InferShape() const override;
protected:
protected: };
};
} // namespace operators
} // namespace operators
} // namespace paddle_mobile } // namespace paddle_mobile
#endif #endif
...@@ -17,42 +17,42 @@ limitations under the License. */ ...@@ -17,42 +17,42 @@ limitations under the License. */
#include "operators/prelu_op.h" #include "operators/prelu_op.h"
int main() { int main() {
paddle_mobile::Loader<paddle_mobile::CPU> loader; paddle_mobile::Loader<paddle_mobile::CPU> loader;
auto program = loader.Load(g_resnet); auto program = loader.Load(g_resnet);
PADDLE_MOBILE_ENFORCE(program.originProgram != nullptr, PADDLE_MOBILE_ENFORCE(program.originProgram != nullptr,
"program file read fail"); "program file read fail");
Executor4Test<paddle_mobile::CPU, Executor4Test<paddle_mobile::CPU,
paddle_mobile::operators::PReluOp<paddle_mobile::CPU, float>> paddle_mobile::operators::PReluOp<paddle_mobile::CPU, float>>
executor(program, "prelu"); executor(program, "prelu");
// 1. input_tensors; // 1. input_tensors;
vector<Tensor> input_tensors; vector<Tensor> input_tensors;
Tensor input1; Tensor input1;
auto input1_data = CreateInput<float>(&input1, {1, 2, 3, 4}, -1, 1); auto input1_data = CreateInput<float>(&input1, {1, 2, 3, 4}, -1, 1);
input_tensors.push_back(input1); input_tensors.push_back(input1);
// 2. input_names // 2. input_names
vector<string> input_names({ vector<string> input_names({
"batch_norm_0.tmp_2", "batch_norm_0.tmp_2",
}); });
// 3. output_names // 3. output_names
vector<string> output_names({"batch_norm_0.tmp_3"}); vector<string> output_names({"batch_norm_0.tmp_3"});
// 4. out_dims; // 4. out_dims;
vector<DDim> out_ddims; vector<DDim> out_ddims;
auto out_ddim = paddle_mobile::framework::make_ddim({1, 2, 3, 4}); auto out_ddim = paddle_mobile::framework::make_ddim({1, 2, 3, 4});
out_ddims.push_back(out_ddim); out_ddims.push_back(out_ddim);
auto output = executor.Predict<LoDTensor>(input_tensors, input_names, auto output = executor.Predict<LoDTensor>(input_tensors, input_names,
output_names, out_ddims); output_names, out_ddims);
auto output0_data = output[0]->data<float>(); auto output0_data = output[0]->data<float>();
for (int j = 0; j < output[0]->numel(); ++j) { for (int j = 0; j < output[0]->numel(); ++j) {
DLOG << " value of output: " << output0_data[j]; DLOG << " value of output: " << output0_data[j];
} }
return 0; return 0;
} }
...@@ -16,32 +16,32 @@ limitations under the License. */ ...@@ -16,32 +16,32 @@ limitations under the License. */
#include "operators/resize_op.h" #include "operators/resize_op.h"
int main() { int main() {
paddle_mobile::Loader<paddle_mobile::CPU> loader; paddle_mobile::Loader<paddle_mobile::CPU> loader;
auto program = loader.Load(std::string(g_mobilenet_ssd)); auto program = loader.Load(std::string(g_mobilenet_ssd));
if (program.originProgram == nullptr) { if (program.originProgram == nullptr) {
DLOG << "program read file"; DLOG << "program read file";
} }
Executor4Test<paddle_mobile::CPU, Executor4Test<paddle_mobile::CPU,
paddle_mobile::operators::ResizeOp<paddle_mobile::CPU, float>> paddle_mobile::operators::ResizeOp<paddle_mobile::CPU, float>>
executor(program, "resize"); executor(program, "resize");
paddle_mobile::framework::Tensor input; paddle_mobile::framework::Tensor input;
SetupTensor<float>(&input, {2, 3, 3, 2}, static_cast<float>(0), SetupTensor<float>(&input, {2, 3, 3, 2}, static_cast<float>(0),
static_cast<float>(1)); static_cast<float>(1));
auto input_ptr = input.data<float>(); auto input_ptr = input.data<float>();
auto out_ddim = paddle_mobile::framework::make_ddim({2, 9, 2}); auto out_ddim = paddle_mobile::framework::make_ddim({2, 9, 2});
auto output = auto output =
executor.Predict(input, "transpose_0.tmp_0", "reshape_0.tmp_0", out_ddim); executor.Predict(input, "transpose_0.tmp_0", "reshape_0.tmp_0", out_ddim);
auto *output_ptr = output->data<float>(); auto *output_ptr = output->data<float>();
DLOG << "input : "; DLOG << "input : ";
for (int j = 0; j < input.numel(); ++j) { for (int j = 0; j < input.numel(); ++j) {
DLOG << " index " << j << " : " << input_ptr[j]; DLOG << " index " << j << " : " << input_ptr[j];
} }
DLOG << "output : "; DLOG << "output : ";
for (int j = 0; j < output->numel(); ++j) { for (int j = 0; j < output->numel(); ++j) {
DLOG << " index " << j << " : " << output_ptr[j]; DLOG << " index " << j << " : " << output_ptr[j];
} }
return 0; return 0;
} }
...@@ -15,4 +15,4 @@ limitations under the License. */ ...@@ -15,4 +15,4 @@ limitations under the License. */
#include "../test_include.h" #include "../test_include.h"
#include "operators/slice_op.h" #include "operators/slice_op.h"
int main() { } int main() {}
#!/usr/bin/env bash #!/usr/bin/env bash
export ANDROID_NDK=/Users/tianfei01/workspace/Android/NDK/android-ndk-r16b
build_for_mac() { build_for_mac() {
if [ ! `which brew` ]; then if [ ! `which brew` ]; then
...@@ -14,9 +13,6 @@ build_for_mac() { ...@@ -14,9 +13,6 @@ build_for_mac() {
return return
fi fi
fi fi
alias gcc='gcc-5'
export CC=gcc-5
export CXX=g++-5
PLATFORM="x86" PLATFORM="x86"
MODE="Release" MODE="Release"
BUILD_DIR=../build/release/"${PLATFORM}" BUILD_DIR=../build/release/"${PLATFORM}"
...@@ -36,8 +32,8 @@ build_for_mac() { ...@@ -36,8 +32,8 @@ build_for_mac() {
build_for_android() { build_for_android() {
#rm -rf "../build" #rm -rf "../build"
if [ -z "${ANDROID_NDK}" ]; then if [ -z "${NDK_ROOT}" ]; then
echo "ANDROID_NDK not found!" echo "NDK_ROOT not found!"
exit -1 exit -1
fi fi
...@@ -60,12 +56,10 @@ build_for_android() { ...@@ -60,12 +56,10 @@ build_for_android() {
MODE="Release" MODE="Release"
ANDROID_PLATFORM_VERSION="android-15" ANDROID_PLATFORM_VERSION="android-22"
#TOOLCHAIN_FILE="./tools/android-cmake/android.toolchain.cmake" TOOLCHAIN_FILE="./tools/android-cmake/android.toolchain.cmake"
TOOLCHAIN_FILE="${ANDROID_NDK}/build/cmake/android.toolchain.cmake"
ANDROID_ARM_MODE="arm" ANDROID_ARM_MODE="arm"
if [ $# -eq 1 ]; then if [ $# -eq 1 ]; then
NET=$1
cmake .. \ cmake .. \
-B"../build/release/${PLATFORM}" \ -B"../build/release/${PLATFORM}" \
-DANDROID_ABI="${ABI}" \ -DANDROID_ABI="${ABI}" \
...@@ -75,7 +69,7 @@ build_for_android() { ...@@ -75,7 +69,7 @@ build_for_android() {
-DCMAKE_CXX_FLAGS="${CXX_FLAGS}" \ -DCMAKE_CXX_FLAGS="${CXX_FLAGS}" \
-DANDROID_STL=c++_static \ -DANDROID_STL=c++_static \
-DANDROID=true \ -DANDROID=true \
-D"${NET}=true" \ -DNET=$1 \
-D"${ARM_PLATFORM}"=true -D"${ARM_PLATFORM}"=true
else else
...@@ -95,7 +89,7 @@ build_for_android() { ...@@ -95,7 +89,7 @@ build_for_android() {
} }
build_for_ios() { build_for_ios() {
rm -rf "../build" # rm -rf "../build"
PLATFORM="ios" PLATFORM="ios"
MODE="Release" MODE="Release"
BUILD_DIR=../build/release/"${PLATFORM}" BUILD_DIR=../build/release/"${PLATFORM}"
...@@ -104,7 +98,6 @@ build_for_ios() { ...@@ -104,7 +98,6 @@ build_for_ios() {
CXX_FLAGS="-fobjc-abi-version=2 -fobjc-arc -std=gnu++14 -stdlib=libc++ -isysroot ${CMAKE_OSX_SYSROOT}" CXX_FLAGS="-fobjc-abi-version=2 -fobjc-arc -std=gnu++14 -stdlib=libc++ -isysroot ${CMAKE_OSX_SYSROOT}"
mkdir -p "${BUILD_DIR}" mkdir -p "${BUILD_DIR}"
if [ $# -eq 1 ]; then if [ $# -eq 1 ]; then
NET=$1
cmake .. \ cmake .. \
-B"${BUILD_DIR}" \ -B"${BUILD_DIR}" \
-DCMAKE_BUILD_TYPE="${MODE}" \ -DCMAKE_BUILD_TYPE="${MODE}" \
...@@ -112,7 +105,7 @@ build_for_ios() { ...@@ -112,7 +105,7 @@ build_for_ios() {
-DIOS_PLATFORM=OS \ -DIOS_PLATFORM=OS \
-DCMAKE_C_FLAGS="${C_FLAGS}" \ -DCMAKE_C_FLAGS="${C_FLAGS}" \
-DCMAKE_CXX_FLAGS="${CXX_FLAGS}" \ -DCMAKE_CXX_FLAGS="${CXX_FLAGS}" \
-D"${NET}"=true \ -DNET=$1 \
-DIS_IOS="true" -DIS_IOS="true"
else else
cmake .. \ cmake .. \
...@@ -126,6 +119,9 @@ build_for_ios() { ...@@ -126,6 +119,9 @@ build_for_ios() {
fi fi
cd "${BUILD_DIR}" cd "${BUILD_DIR}"
make -j 8 make -j 8
cd ./build
# 生成符号表
ranlib *.a
} }
build_error() { build_error() {
...@@ -134,16 +130,12 @@ build_error() { ...@@ -134,16 +130,12 @@ build_error() {
if [ $# -lt 1 ]; then if [ $# -lt 1 ]; then
echo "error: target missing!" echo "error: target missing!"
echo "available targets: mac|linux|ios|android" echo "available targets: ios|android"
echo "sample usage: ./build.sh mac" echo "sample usage: ./build.sh android"
else else
if [ $# -eq 2 ]; then if [ $# -eq 2 ]; then
if [ $2 != "googlenet" -a $2 != "mobilenet" -a $2 != "yolo" -a $2 != "squeezenet" -a $2 != "resnet" ]; then if [ $2 != "googlenet" -a $2 != "mobilenet" -a $2 != "yolo" -a $2 != "squeezenet" -a $2 != "resnet" ]; then
if [ $1 = "mac" ]; then if [ $1 = "android" ]; then
build_for_mac
elif [ $1 = "linux" ]; then
build_for_linux
elif [ $1 = "android" ]; then
build_for_android build_for_android
elif [ $1 = "ios" ]; then elif [ $1 = "ios" ]; then
build_for_ios build_for_ios
...@@ -151,11 +143,7 @@ else ...@@ -151,11 +143,7 @@ else
build_error build_error
fi fi
else else
if [ $1 = "mac" ]; then if [ $1 = "android" ]; then
build_for_mac $2
elif [ $1 = "linux" ]; then
build_for_linux $2
elif [ $1 = "android" ]; then
build_for_android $2 build_for_android $2
elif [ $1 = "ios" ]; then elif [ $1 = "ios" ]; then
build_for_ios $2 build_for_ios $2
...@@ -164,11 +152,7 @@ else ...@@ -164,11 +152,7 @@ else
fi fi
fi fi
else else
if [ $1 = "mac" ]; then if [ $1 = "android" ]; then
build_for_mac
elif [ $1 = "linux" ]; then
build_for_linux
elif [ $1 = "android" ]; then
build_for_android build_for_android
elif [ $1 = "ios" ]; then elif [ $1 = "ios" ]; then
build_for_ios build_for_ios
...@@ -176,4 +160,4 @@ else ...@@ -176,4 +160,4 @@ else
build_error build_error
fi fi
fi fi
fi fi
\ No newline at end of file
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册