“355d1a9d5aca34480ef73d1a989cd217f090584b”上不存在“tools/python/micro/jinja2_files/micro_graph_data.h.jinja2”
提交 fdb477e2 编写于 作者: 刘托

Merge branch 'refactor-eltwise' into 'master'

Refactor eltwise op.

See merge request !428
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include <algorithm> #include <algorithm>
#include <memory> #include <memory>
#include <vector> #include <vector>
#include <utility>
#include "mace/core/future.h" #include "mace/core/future.h"
#include "mace/core/tensor.h" #include "mace/core/tensor.h"
...@@ -30,216 +31,331 @@ namespace mace { ...@@ -30,216 +31,331 @@ namespace mace {
namespace kernels { namespace kernels {
enum EltwiseType { enum EltwiseType {
PROD = 0, SUM = 0,
SUM = 1, SUB = 1,
MAX = 2, PROD = 2,
MIN = 3, DIV = 3,
SUB = 4, MIN = 4,
DIV = 5, MAX = 5,
NEG = 6, NEG = 6,
ABS = 7, ABS = 7,
SQR_DIFF = 8, SQR_DIFF = 8,
NONE = 9,
}; };
struct EltwiseFunctorBase { inline void TensorScalar(const EltwiseType type,
EltwiseFunctorBase(const EltwiseType type, const float *input0,
const std::vector<float> &coeff) const float value,
: type_(type), coeff_(coeff) {} const index_t size,
float *output) {
EltwiseType type_; switch (type) {
std::vector<float> coeff_; case SUM:
};
template <DeviceType D, typename T>
struct EltwiseFunctor : EltwiseFunctorBase {
EltwiseFunctor(const EltwiseType type,
const std::vector<float> &coeff)
: EltwiseFunctorBase(type, coeff) {}
void operator()(const Tensor *input0,
const Tensor *input1,
const index_t start_axis,
const bool is_scaler,
const float value,
const bool swap,
Tensor *output,
StatsFuture *future) {
if (is_scaler) {
Tensor::MappingGuard input0_guard(input0);
Tensor::MappingGuard output_guard(output);
const T *input0_ptr = input0->data<T>();
T *output_ptr = output->mutable_data<T>();
const index_t num = input0->size();
switch (type_) {
case PROD:
#pragma omp parallel for #pragma omp parallel for
for (index_t i = 0; i < num; ++i) { for (index_t i = 0; i < size; ++i) {
output_ptr[i] = input0_ptr[i] * value; output[i] = input0[i] + value;
} }
break; break;
case SUM: case SUB:
if (coeff_.empty()) {
#pragma omp parallel for #pragma omp parallel for
for (index_t i = 0; i < num; ++i) { for (index_t i = 0; i < size; ++i) {
output_ptr[i] = input0_ptr[i] + value; output[i] = input0[i] - value;
} }
} else { break;
const float coeff_0 = swap ? coeff_[1] : coeff_[0]; case PROD:
const float coeff_1 = swap ? coeff_[0] : coeff_[1];
#pragma omp parallel for #pragma omp parallel for
for (index_t i = 0; i < num; ++i) { for (index_t i = 0; i < size; ++i) {
output_ptr[i] = coeff_0 * input0_ptr[i] + output[i] = input0[i] * value;
coeff_1 * value; }
} break;
} case DIV:
break;
case MAX:
#pragma omp parallel for #pragma omp parallel for
for (index_t i = 0; i < num; ++i) { for (index_t i = 0; i < size; ++i) {
output_ptr[i] = std::max<T>(input0_ptr[i], value); output[i] = input0[i] / value;
} }
break; break;
case MIN: case MIN:
#pragma omp parallel for #pragma omp parallel for
for (index_t i = 0; i < num; ++i) { for (index_t i = 0; i < size; ++i) {
output_ptr[i] = std::min<T>(input0_ptr[i], value); output[i] = std::min<float>(input0[i], value);
} }
break; break;
case SUB: case MAX:
#pragma omp parallel for #pragma omp parallel for
for (index_t i = 0; i < num; ++i) { for (index_t i = 0; i < size; ++i) {
output_ptr[i] = swap ? value - input0_ptr[i] : output[i] = std::max<float>(input0[i], value);
input0_ptr[i] - value; }
} break;
break; case NEG:
case DIV:
if (!swap) {
MACE_CHECK(fabs(value) > 1e-6, "cannot divided by 0.");
#pragma omp parallel for #pragma omp parallel for
for (index_t i = 0; i < num; ++i) { for (index_t i = 0; i < size; ++i) {
output_ptr[i] = input0_ptr[i] / value; output[i] = -input0[i];
} }
} else { break;
case ABS:
#pragma omp parallel for #pragma omp parallel for
for (index_t i = 0; i < num; ++i) { for (index_t i = 0; i < size; ++i) {
MACE_CHECK(fabs(input0_ptr[i]) > 1e-6, "cannot divided by 0."); output[i] = std::abs(input0[i]);
output_ptr[i] = value / input0_ptr[i]; }
} break;
} case SQR_DIFF:
break;
case SQR_DIFF:
#pragma omp parallel for #pragma omp parallel for
for (index_t i = 0; i < num; ++i) { for (index_t i = 0; i < size; ++i) {
const float tmp = input0_ptr[i] - value; output[i] = std::pow(input0[i] - value, 2.f);
output_ptr[i] = tmp * tmp;
}
break;
default:
LOG(FATAL) << "Eltwise op not support type " << type_;
} }
} else { break;
MACE_CHECK_NOTNULL(input0); default:
MACE_CHECK_NOTNULL(input1); LOG(FATAL) << "Eltwise op not support type " << type;
Tensor::MappingGuard input0_guard(input0); }
Tensor::MappingGuard input1_guard(input1); }
Tensor::MappingGuard output_guard(output);
const T *input0_ptr = input0->data<T>();
const T *input1_ptr = input1->data<T>();
T *output_ptr = output->mutable_data<T>();
const index_t size0 = input0->size();
const index_t size1 = input1->size();
const index_t num = size0 / size1; inline void TensorVector(const EltwiseType type,
switch (type_) { const float *input0,
case PROD: const float *input1,
#pragma omp parallel for collapse(2) const index_t batch,
for (index_t i = 0; i < num; ++i) { const index_t channel,
for (index_t j= 0; j < size1; ++j) { const index_t hw,
output_ptr[i * size1 + j] = const bool swapped,
input0_ptr[i * size1 + j] * input1_ptr[j]; float *output) {
} switch (type) {
case SUM:
#pragma omp parallel for collapse(3)
for (index_t b = 0; b < batch; ++b) {
for (index_t c = 0; c < channel; ++c) {
for (index_t i = 0; i < hw; ++i) {
const index_t idx0 = (b * channel + c) * hw + i;
const index_t idx1 = b * channel + c;
output[idx0] = input0[idx0] + input1[idx1];
} }
break; }
case SUM: }
if (coeff_.empty()) { break;
#pragma omp parallel for collapse(2) case SUB:
for (index_t i = 0; i < num; ++i) { if (swapped) {
for (index_t j = 0; j < size1; ++j) { #pragma omp parallel for collapse(3)
output_ptr[i * size1 + j] = for (index_t b = 0; b < batch; ++b) {
input0_ptr[i * size1 + j] + input1_ptr[j]; for (index_t c = 0; c < channel; ++c) {
} for (index_t i = 0; i < hw; ++i) {
} const index_t idx0 = (b * channel + c) * hw + i;
} else { const index_t idx1 = b * channel + c;
const float coeff_0 = swap ? coeff_[1] : coeff_[0]; output[idx0] = input1[idx1] - input0[idx0];
const float coeff_1 = swap ? coeff_[0] : coeff_[1];
#pragma omp parallel for collapse(2)
for (index_t i = 0; i < num; ++i) {
for (index_t j = 0; j < size1; ++j) {
output_ptr[i * size1 + j] =
coeff_0 * input0_ptr[i * size1 + j] +
coeff_1 * input1_ptr[j];
}
} }
} }
break; }
case MAX: } else {
#pragma omp parallel for collapse(2) #pragma omp parallel for collapse(3)
for (index_t i = 0; i < num; ++i) { for (index_t b = 0; b < batch; ++b) {
for (index_t j = 0; j < size1; ++j) { for (index_t c = 0; c < channel; ++c) {
output_ptr[i * size1 + j] = for (index_t i = 0; i < hw; ++i) {
std::max<T>(input0_ptr[i * size1 + j], input1_ptr[j]); const index_t idx0 = (b * channel + c) * hw + i;
const index_t idx1 = b * channel + c;
output[idx0] = input0[idx0] - input1[idx1];
} }
} }
break; }
case MIN: }
#pragma omp parallel for collapse(2) break;
for (index_t i = 0; i < num; ++i) { case PROD:
for (index_t j = 0; j < size1; ++j) { #pragma omp parallel for collapse(3)
output_ptr[i * size1 + j] = for (index_t b = 0; b < batch; ++b) {
std::min<T>(input0_ptr[i * size1 + j], input1_ptr[j]); for (index_t c = 0; c < channel; ++c) {
} for (index_t i = 0; i < hw; ++i) {
const index_t idx0 = (b * channel + c) * hw + i;
const index_t idx1 = b * channel + c;
output[idx0] = input0[idx0] * input1[idx1];
} }
break; }
case SUB: }
#pragma omp parallel for collapse(2) break;
for (index_t i = 0; i < num; ++i) { case DIV:
for (index_t j = 0; j < size1; ++j) { if (swapped) {
output_ptr[i * size1 + j] = swap ? #pragma omp parallel for collapse(3)
input0_ptr[i * size1 + j] - input1_ptr[j] : for (index_t b = 0; b < batch; ++b) {
input1_ptr[j] - input0_ptr[i * size1 + j]; for (index_t c = 0; c < channel; ++c) {
for (index_t i = 0; i < hw; ++i) {
const index_t idx0 = (b * channel + c) * hw + i;
const index_t idx1 = b * channel + c;
output[idx0] = input1[idx1] / input0[idx0];
} }
} }
break; }
case DIV: } else {
#pragma omp parallel for collapse(2) #pragma omp parallel for collapse(3)
for (index_t i = 0; i < num; ++i) { for (index_t b = 0; b < batch; ++b) {
for (index_t j = 0; j < size1; ++j) { for (index_t c = 0; c < channel; ++c) {
if (!swap) { for (index_t i = 0; i < hw; ++i) {
MACE_CHECK(fabs(input1_ptr[j]) > 1e-6, "cannot divided by 0."); const index_t idx0 = (b * channel + c) * hw + i;
output_ptr[i * size1 + j] = const index_t idx1 = b * channel + c;
input0_ptr[i * size1 + j] / input1_ptr[j]; output[idx0] = input0[idx0] / input1[idx1];
} else {
MACE_CHECK(fabs(input0_ptr[i * size1 + j]) > 1e-6,
"cannot divided by 0.");
output_ptr[i * size1 + j] =
input1_ptr[j] / input0_ptr[i * size1 + j];
}
} }
} }
break; }
case SQR_DIFF: }
#pragma omp parallel for collapse(2) break;
for (index_t i = 0; i < num; ++i) { case MIN:
for (index_t j = 0; j < size1; ++j) { #pragma omp parallel for collapse(3)
const T tmp = input0_ptr[i * size1 + j] - input1_ptr[j]; for (index_t b = 0; b < batch; ++b) {
output_ptr[i * size1 + j] = tmp * tmp; for (index_t c = 0; c < channel; ++c) {
} for (index_t i = 0; i < hw; ++i) {
const index_t idx0 = (b * channel + c) * hw + i;
const index_t idx1 = b * channel + c;
output[idx0] = std::min<float>(input0[idx0], input1[idx1]);
}
}
}
break;
case MAX:
#pragma omp parallel for collapse(3)
for (index_t b = 0; b < batch; ++b) {
for (index_t c = 0; c < channel; ++c) {
for (index_t i = 0; i < hw; ++i) {
const index_t idx0 = (b * channel + c) * hw + i;
const index_t idx1 = b * channel + c;
output[idx0] = std::max<float>(input0[idx0], input1[idx1]);
}
}
}
break;
case SQR_DIFF:
#pragma omp parallel for collapse(3)
for (index_t b = 0; b < batch; ++b) {
for (index_t c = 0; c < channel; ++c) {
for (index_t i = 0; i < hw; ++i) {
const index_t idx0 = (b * channel + c) * hw + i;
const index_t idx1 = b * channel + c;
output[idx0] = std::pow(input0[idx0] - input1[idx1], 2.f);
}
}
}
break;
default:
LOG(FATAL) << "Eltwise op not support type " << type;
}
}
inline void TensorEltwise(const EltwiseType type,
const float *input0,
const float *input1,
const index_t size,
float *output) {
switch (type) {
case SUM:
#pragma omp parallel for
for (index_t i = 0; i < size; ++i) {
output[i] = input0[i] + input1[i];
}
break;
case SUB:
#pragma omp parallel for
for (index_t i = 0; i < size; ++i) {
output[i] = input0[i] - input1[i];
}
break;
case PROD:
#pragma omp parallel for
for (index_t i = 0; i < size; ++i) {
output[i] = input0[i] * input1[i];
}
break;
case DIV:
#pragma omp parallel for
for (index_t i = 0; i < size; ++i) {
output[i] = input0[i] / input1[i];
}
break;
case MIN:
#pragma omp parallel for
for (index_t i = 0; i < size; ++i) {
output[i] = std::min<float>(input0[i], input1[i]);
}
break;
case MAX:
#pragma omp parallel for
for (index_t i = 0; i < size; ++i) {
output[i] = std::max<float>(input0[i], input1[i]);
}
break;
case SQR_DIFF:
#pragma omp parallel for
for (index_t i = 0; i < size; ++i) {
output[i] = std::pow(input0[i] - input1[i], 2.f);
}
break;
default:
LOG(FATAL) << "Eltwise op not support type " << type;
}
}
struct EltwiseFunctorBase {
EltwiseFunctorBase(const EltwiseType type,
const std::vector<float> &coeff,
const float value)
: type_(type), coeff_(coeff), value_(value) {}
EltwiseType type_;
std::vector<float> coeff_;
float value_;
};
template <DeviceType D, typename T>
struct EltwiseFunctor;
template <>
struct EltwiseFunctor<DeviceType::CPU, float>: EltwiseFunctorBase {
EltwiseFunctor(const EltwiseType type,
const std::vector<float> &coeff,
const float value)
: EltwiseFunctorBase(type, coeff, value) {}
void operator()(const Tensor *input0,
const Tensor *input1,
Tensor *output,
StatsFuture *future) {
bool swapped = false;
if (input1 != nullptr) {
MACE_CHECK(input0->dim_size() == input1->dim_size())
<< "Inputs of Eltwise op must be same shape";
if (input0->size() != input1->size()) {
if (input0->size() < input1->size()) {
std::swap(input0, input1);
swapped = true;
}
MACE_CHECK(input0->dim(0) == input1->dim(0) &&
input0->dim(1) == input1->dim(1) &&
input1->dim(2) == 1 &&
input1->dim(3) == 1)
<< "Element-Wise op only support channel dimension broadcast";
}
}
output->ResizeLike(input0);
Tensor::MappingGuard input0_guard(input0);
Tensor::MappingGuard output_guard(output);
const float *input0_ptr = input0->data<float>();
float *output_ptr = output->mutable_data<float>();
const index_t size = input0->size();
if (input1 == nullptr) {
TensorScalar(type_, input0_ptr, value_, size, output_ptr);
} else {
Tensor::MappingGuard input1_guard(input1);
const float *input1_ptr = input1->data<float>();
if (input1->size() != input0->size()) {
const index_t batch = input0->dim(0);
const index_t channel = input0->dim(1);
const index_t hw = input0->dim(2) * input0->dim(3);
TensorVector(type_, input0_ptr, input1_ptr,
batch, channel, hw, swapped, output_ptr);
} else {
if (!coeff_.empty() && type_ == SUM) {
#pragma omp parallel for
for (index_t i = 0; i < size; ++i) {
output_ptr[i] = coeff_[0] * input0_ptr[i] +
coeff_[1] * input1_ptr[i];
} }
break; } else {
default: TensorEltwise(type_, input0_ptr, input1_ptr, size, output_ptr);
LOG(FATAL) << "Eltwise op not support type " << type_; }
} }
} }
} }
...@@ -249,15 +365,12 @@ struct EltwiseFunctor : EltwiseFunctorBase { ...@@ -249,15 +365,12 @@ struct EltwiseFunctor : EltwiseFunctorBase {
template <typename T> template <typename T>
struct EltwiseFunctor<DeviceType::OPENCL, T> : EltwiseFunctorBase { struct EltwiseFunctor<DeviceType::OPENCL, T> : EltwiseFunctorBase {
EltwiseFunctor(const EltwiseType type, EltwiseFunctor(const EltwiseType type,
const std::vector<float> &coeff) const std::vector<float> &coeff,
: EltwiseFunctorBase(type, coeff) {} const float value)
: EltwiseFunctorBase(type, coeff, value) {}
void operator()(const Tensor *input0, void operator()(const Tensor *input0,
const Tensor *input1, const Tensor *input1,
const index_t start_axis,
const bool is_scaler,
const float value,
const bool swap,
Tensor *output, Tensor *output,
StatsFuture *future); StatsFuture *future);
......
...@@ -3,8 +3,11 @@ ...@@ -3,8 +3,11 @@
__kernel void eltwise(KERNEL_ERROR_PARAMS __kernel void eltwise(KERNEL_ERROR_PARAMS
GLOBAL_WORK_GROUP_SIZE_DIM3 GLOBAL_WORK_GROUP_SIZE_DIM3
__read_only image2d_t input0, __read_only image2d_t input0,
__read_only image2d_t input1, #if INPUT_TYPE == 1
__private const float value, __private const float value,
#else
__read_only image2d_t input1,
#endif
__private const int height, __private const int height,
__private const int width, __private const int width,
__private const int channel, __private const int channel,
...@@ -13,101 +16,76 @@ __kernel void eltwise(KERNEL_ERROR_PARAMS ...@@ -13,101 +16,76 @@ __kernel void eltwise(KERNEL_ERROR_PARAMS
__private const float coeff1, __private const float coeff1,
#endif #endif
__write_only image2d_t output) { __write_only image2d_t output) {
const int c = get_global_id(0); const int chan_idx = get_global_id(0);
const int w = get_global_id(1); const int width_idx = get_global_id(1);
const int hb = get_global_id(2); const int hb = get_global_id(2);
#ifndef NON_UNIFORM_WORK_GROUP #ifndef NON_UNIFORM_WORK_GROUP
if (c >= global_size_dim0 || w >= global_size_dim1 || hb >= global_size_dim2) if (chan_idx >= global_size_dim0 ||
width_idx >= global_size_dim1 || hb >= global_size_dim2)
return; return;
#endif #endif
int pos_w; const int pos = mad24(chan_idx, width, width_idx);
int pos_h;
#if START_AXIS == 0
pos_w = mad24(c, width, w);
pos_h = hb;
#elif START_AXIS == 1
pos_w = mad24(c, width, w);
pos_h = hb % height;
#elif START_AXIS == 2
pos_w = mad24(c, width, w);
pos_h = 0;
#elif START_AXIS == 3
pos_w = c;
pos_h = 0;
#endif
const int pos = mad24(c, width, w);
const int remain_channel = channel - 4 * c;
DATA_TYPE4 in0 = READ_IMAGET(input0, SAMPLER, (int2)(pos, hb)); DATA_TYPE4 in0 = READ_IMAGET(input0, SAMPLER, (int2)(pos, hb));
DATA_TYPE4 in1 ; #if INPUT_TYPE == 1
#if IS_SCALER == 1 DATA_TYPE4 in1 = (DATA_TYPE4)(value, value, value, value);
in1 = (DATA_TYPE4){value, value, value, value}; #elif INPUT_TYPE == 2
const int batch_idx = hb / height;
DATA_TYPE4 in1 = READ_IMAGET(input1, SAMPLER, (int2)(chan_idx, batch_idx));
#else #else
in1 = READ_IMAGET(input1, SAMPLER, (int2)(pos_w, pos_h)); DATA_TYPE4 in1 = READ_IMAGET(input1, SAMPLER, (int2)(pos, hb));
#endif #endif
DATA_TYPE4 out; DATA_TYPE4 out;
#if ELTWISE_TYPE == 0 #if ELTWISE_TYPE == 0
out = in0 * in1; #ifdef COEFF_SUM
out = mad(coeff1, in0, mad(coeff0, in1, 0));
#else
out = in0 + in1;
#endif
#elif ELTWISE_TYPE == 1 #elif ELTWISE_TYPE == 1
#ifdef SWAPPED
#ifdef COEFF_SUM out = in1 - in0;
#if NEEDSWAP == 0
out = mad(coeff0, in0, mad(coeff1, in1, 0));
#else #else
out = mad(coeff1, in0, mad(coeff0, in1, 0)); out = in0 - in1;
#endif #endif
#else
out = in0 + in1;
#endif
#elif ELTWISE_TYPE == 2 #elif ELTWISE_TYPE == 2
out = fmax(in0, in1); out = in0 * in1;
#elif ELTWISE_TYPE == 3 #elif ELTWISE_TYPE == 3
out = fmin(in0, in1); #ifdef SWAPPED
#elif ELTWISE_TYPE == 4 out = in1 / in0;
#if NEED_SWAP == 0
out = in0 - in1;
#else #else
out = in1 - in0; out = in0 / in1;
#endif #endif
#elif ELTWISE_TYPE == 4
out = fmin(in0, in1);
#elif ELTWISE_TYPE == 5 #elif ELTWISE_TYPE == 5
#if NEED_SWAP == 0 out = fmax(in0, in1);
if (fabs(in1.x) > 0.000001f) #elif ELTWISE_TYPE == 6
out.x = in0.x / in1.x; in1 = (DATA_TYPE4)(0, 0, 0, 0);
if (fabs(in1.y) > 0.000001f) out = in1 - in0;
out.y = in0.y / in1.y; #elif ELTWISE_TYPE == 7
if (fabs(in1.z) > 0.000001f) out = fabs(in0);
out.z = in0.z / in1.z;
if (fabs(in1.w) > 0.000001f)
out.w = in0.w / in1.w;
#else
if (fabs(in1.x) > 0.000001f)
out.x = in1.x / in0.x;
if (fabs(in1.y) > 0.000001f)
out.y = in1.y / in0.y;
if (fabs(in1.z) > 0.000001f)
out.z = in1.z / in0.z;
if (fabs(in1.w) > 0.000001f)
out.w = in1.w / in0.w;
#endif
#elif ELTWISE_TYPE == 8 #elif ELTWISE_TYPE == 8
DATA_TYPE4 diff = in0 - in1; DATA_TYPE4 diff = in0 - in1;
out = diff * diff; out = diff * diff;
#endif #endif
#if ELTWISE_TYPE == 1 || ELTWISE_TYPE == 2 || ELTWISE_TYPE == 3 \ #if INPUT_TYPE == 1
|| ELTWISE_TYPE == 4 || ELTWISE_TYPE == 8 #if ELTWISE_TYPE == 0 || ELTWISE_TYPE == 1 || ELTWISE_TYPE == 4 || ELTWISE_TYPE == 5 || ELTWISE_TYPE == 8
if (remain_channel < 4) { const int remain_channel = channel - 4 * chan_idx;
switch (remain_channel) { if (remain_channel < 4) {
case 1: switch (remain_channel) {
out.y = 0; case 1:
case 2: out.y = 0;
out.z = 0; case 2:
case 3: out.z = 0;
out.w = 0; case 3:
out.w = 0;
}
} }
} #endif
#endif #endif
WRITE_IMAGET(output, (int2)(pos, hb), out); WRITE_IMAGET(output, (int2)(pos, hb), out);
......
...@@ -23,16 +23,29 @@ namespace kernels { ...@@ -23,16 +23,29 @@ namespace kernels {
template <typename T> template <typename T>
void EltwiseFunctor<DeviceType::OPENCL, T>::operator()(const Tensor *input0, void EltwiseFunctor<DeviceType::OPENCL, T>::operator()(const Tensor *input0,
const Tensor *input1, const Tensor *input1,
const index_t start_axis,
const bool is_scaler,
const float value,
const bool swap,
Tensor *output, Tensor *output,
StatsFuture *future) { StatsFuture *future) {
const index_t batch = input0->dim(0); bool swapped = false;
const index_t height = input0->dim(1); if (input1 != nullptr) {
const index_t width = input0->dim(2); MACE_CHECK(input0->dim_size() == input1->dim_size())
const index_t channels = input0->dim(3); << "Inputs of Eltwise op must be same shape";
if (input0->size() != input1->size()) {
if (input0->size() < input1->size()) {
std::swap(input0, input1);
swapped = true;
}
MACE_CHECK(input0->dim(0) == input1->dim(0) &&
input1->dim(1) == 1 &&
input1->dim(2) == 1 &&
input0->dim(3) == input1->dim(3))
<< "Element-Wise op only support channel dimension broadcast";
}
}
output->ResizeLike(input0);
const index_t batch = output->dim(0);
const index_t height = output->dim(1);
const index_t width = output->dim(2);
const index_t channels = output->dim(3);
const index_t channel_blocks = RoundUpDiv4(channels); const index_t channel_blocks = RoundUpDiv4(channels);
const index_t batch_height_pixels = batch * height; const index_t batch_height_pixels = batch * height;
...@@ -41,8 +54,6 @@ void EltwiseFunctor<DeviceType::OPENCL, T>::operator()(const Tensor *input0, ...@@ -41,8 +54,6 @@ void EltwiseFunctor<DeviceType::OPENCL, T>::operator()(const Tensor *input0,
static_cast<uint32_t>(width), static_cast<uint32_t>(width),
static_cast<uint32_t>(batch_height_pixels)}; static_cast<uint32_t>(batch_height_pixels)};
const int scaler = is_scaler ? 1 : 0;
const int need_swap = swap ? 1 : 0;
auto runtime = OpenCLRuntime::Global(); auto runtime = OpenCLRuntime::Global();
if (kernel_.get() == nullptr) { if (kernel_.get() == nullptr) {
std::set<std::string> built_options; std::set<std::string> built_options;
...@@ -52,9 +63,14 @@ void EltwiseFunctor<DeviceType::OPENCL, T>::operator()(const Tensor *input0, ...@@ -52,9 +63,14 @@ void EltwiseFunctor<DeviceType::OPENCL, T>::operator()(const Tensor *input0,
built_options.emplace("-DDATA_TYPE=" + DtToUpstreamCLDt(dt)); built_options.emplace("-DDATA_TYPE=" + DtToUpstreamCLDt(dt));
built_options.emplace("-DCMD_DATA_TYPE=" + DtToUpstreamCLCMDDt(dt)); built_options.emplace("-DCMD_DATA_TYPE=" + DtToUpstreamCLCMDDt(dt));
built_options.emplace(MakeString("-DELTWISE_TYPE=", type_)); built_options.emplace(MakeString("-DELTWISE_TYPE=", type_));
built_options.emplace(MakeString("-DSTART_AXIS=", start_axis)); if (input1 == nullptr) {
built_options.emplace(MakeString("-DIS_SCALER=", scaler)); built_options.emplace("-DINPUT_TYPE=1");
built_options.emplace(MakeString("-DNEEDSWAP=", need_swap)); } else if (input0->size() != input1->size()) {
built_options.emplace("-DINPUT_TYPE=2");
if (swapped) built_options.emplace("-DSWAPPED");
}
if (!coeff_.empty()) built_options.emplace("-DCOEFF_SUM");
if (runtime->IsOutOfRangeCheckEnabled()) { if (runtime->IsOutOfRangeCheckEnabled()) {
built_options.emplace("-DOUT_OF_RANGE_CHECK"); built_options.emplace("-DOUT_OF_RANGE_CHECK");
kernel_error_ = std::move(std::unique_ptr<Buffer>( kernel_error_ = std::move(std::unique_ptr<Buffer>(
...@@ -66,7 +82,6 @@ void EltwiseFunctor<DeviceType::OPENCL, T>::operator()(const Tensor *input0, ...@@ -66,7 +82,6 @@ void EltwiseFunctor<DeviceType::OPENCL, T>::operator()(const Tensor *input0,
if (runtime->IsNonUniformWorkgroupsSupported()) { if (runtime->IsNonUniformWorkgroupsSupported()) {
built_options.emplace("-DNON_UNIFORM_WORK_GROUP"); built_options.emplace("-DNON_UNIFORM_WORK_GROUP");
} }
if (!coeff_.empty()) built_options.emplace("-DCOEFF_SUM");
kernel_ = runtime->BuildKernel("eltwise", kernel_name, built_options); kernel_ = runtime->BuildKernel("eltwise", kernel_name, built_options);
kwg_size_ = kwg_size_ =
...@@ -84,8 +99,11 @@ void EltwiseFunctor<DeviceType::OPENCL, T>::operator()(const Tensor *input0, ...@@ -84,8 +99,11 @@ void EltwiseFunctor<DeviceType::OPENCL, T>::operator()(const Tensor *input0,
kernel_.setArg(idx++, gws[2]); kernel_.setArg(idx++, gws[2]);
} }
kernel_.setArg(idx++, *(input0->opencl_image())); kernel_.setArg(idx++, *(input0->opencl_image()));
kernel_.setArg(idx++, *(input1->opencl_image())); if (input1 == nullptr) {
kernel_.setArg(idx++, value); kernel_.setArg(idx++, value_);
} else {
kernel_.setArg(idx++, *(input1->opencl_image()));
}
kernel_.setArg(idx++, static_cast<int32_t>(height)); kernel_.setArg(idx++, static_cast<int32_t>(height));
kernel_.setArg(idx++, static_cast<int32_t>(width)); kernel_.setArg(idx++, static_cast<int32_t>(width));
kernel_.setArg(idx++, static_cast<int32_t>(channels)); kernel_.setArg(idx++, static_cast<int32_t>(channels));
......
...@@ -28,57 +28,20 @@ class EltwiseOp : public Operator<D, T> { ...@@ -28,57 +28,20 @@ class EltwiseOp : public Operator<D, T> {
: Operator<D, T>(op_def, ws), : Operator<D, T>(op_def, ws),
functor_(static_cast<kernels::EltwiseType>( functor_(static_cast<kernels::EltwiseType>(
OperatorBase::GetSingleArgument<int>( OperatorBase::GetSingleArgument<int>(
"type", static_cast<int>(kernels::EltwiseType::SUM))), "type", static_cast<int>(kernels::EltwiseType::NONE))),
OperatorBase::GetRepeatedArgument<float>("coeff")) {} OperatorBase::GetRepeatedArgument<float>("coeff"),
OperatorBase::GetSingleArgument<float>("x", 1.0)) {}
bool Run(StatsFuture *future) override { bool Run(StatsFuture *future) override {
if (this->InputSize() == 1) { const Tensor* input0 = this->Input(0);
const Tensor* input = this->Input(0); const Tensor* input1 = this->InputSize() == 2 ? this->Input(1) : nullptr;
Tensor *output = this->Output(OUTPUT); Tensor *output = this->Output(OUTPUT);
start_axis_ = input->dim_size() - 1; functor_(input0, input1, output, future);
is_scaler_ = true;
output->ResizeLike(input);
const float x = OperatorBase::GetSingleArgument<float>("x", 1.0);
functor_(input, nullptr, start_axis_,
is_scaler_, x, false, output, future);
} else {
const index_t size0 = this->Input(0)->size();
const index_t size1 = this->Input(1)->size();
const bool swap = (size0 < size1);
const Tensor *input0 = swap ? this->Input(1) : this->Input(0);
const Tensor *input1 = swap ? this->Input(0) : this->Input(1);
Tensor *output = this->Output(OUTPUT);
MACE_CHECK(input0->dim_size() == input1->dim_size())
<< "Inputs of Eltwise op must be same shape";
start_axis_ = input0->dim_size() - 1;
is_scaler_ = (input1->size() == 1);
uint32_t compared_size = 1;
if (!is_scaler_) {
while (start_axis_ >= 0) {
MACE_CHECK(input0->dim(start_axis_) == input1->dim(start_axis_),
"Invalid inputs dimension at axis: ") << start_axis_
<< "input 0: " << input0->dim(start_axis_)
<< "input 1: " << input1->dim(start_axis_);
compared_size *= input1->dim(start_axis_);
if (compared_size == input1->size()) {
break;
}
start_axis_--;
}
}
output->ResizeLike(input0);
const float x = OperatorBase::GetSingleArgument<float>("x", 1.0);
functor_(input0, input1, start_axis_,
is_scaler_, x, swap, output, future);
}
return true; return true;
} }
private: private:
kernels::EltwiseFunctor<D, T> functor_; kernels::EltwiseFunctor<D, T> functor_;
index_t start_axis_;
bool is_scaler_;
private: private:
OP_OUTPUT_TAGS(OUTPUT); OP_OUTPUT_TAGS(OUTPUT);
......
...@@ -35,10 +35,10 @@ void EltwiseBenchmark( ...@@ -35,10 +35,10 @@ void EltwiseBenchmark(
net.AddRandomInput<D, T>("Input1", {n, h, w, c}); net.AddRandomInput<D, T>("Input1", {n, h, w, c});
if (D == DeviceType::OPENCL) { if (D == DeviceType::OPENCL) {
BufferToImage<D, half>(&net, "Input0", "InputImg0", BufferToImage<D, T>(&net, "Input0", "InputImg0",
kernels::BufferType::IN_OUT_CHANNEL); kernels::BufferType::IN_OUT_CHANNEL);
BufferToImage<D, half>(&net, "Input1", "InputImg1", BufferToImage<D, T>(&net, "Input1", "InputImg1",
kernels::BufferType::IN_OUT_CHANNEL); kernels::BufferType::IN_OUT_CHANNEL);
OpDefBuilder("Eltwise", "EltwiseTest") OpDefBuilder("Eltwise", "EltwiseTest")
.Input("InputImg0") .Input("InputImg0")
.Input("InputImg1") .Input("InputImg1")
...@@ -48,9 +48,13 @@ void EltwiseBenchmark( ...@@ -48,9 +48,13 @@ void EltwiseBenchmark(
.Output("OutputImg") .Output("OutputImg")
.Finalize(net.NewOperatorDef()); .Finalize(net.NewOperatorDef());
} else { } else {
net.TransformDataFormat<D, float>("Input0", NHWC,
"TInput0", NCHW);
net.TransformDataFormat<D, float>("Input1", NHWC,
"TInput1", NCHW);
OpDefBuilder("Eltwise", "EltwiseTest") OpDefBuilder("Eltwise", "EltwiseTest")
.Input("Input0") .Input("TInput0")
.Input("Input1") .Input("TInput1")
.AddIntArg("type", static_cast<int>(type)) .AddIntArg("type", static_cast<int>(type))
.AddFloatsArg("coeff", {1.2, 2.1}) .AddFloatsArg("coeff", {1.2, 2.1})
.AddIntArg("T", static_cast<int>(DataTypeToEnum<T>::value)) .AddIntArg("T", static_cast<int>(DataTypeToEnum<T>::value))
...@@ -89,13 +93,13 @@ void EltwiseBenchmark( ...@@ -89,13 +93,13 @@ void EltwiseBenchmark(
BM_ELTWISE_MACRO(ELT_TYPE, N, H, W, C, float, OPENCL); \ BM_ELTWISE_MACRO(ELT_TYPE, N, H, W, C, float, OPENCL); \
BM_ELTWISE_MACRO(ELT_TYPE, N, H, W, C, half, OPENCL); BM_ELTWISE_MACRO(ELT_TYPE, N, H, W, C, half, OPENCL);
BM_ELTWISE(0, 1, 256, 256, 32);
BM_ELTWISE(0, 1, 128, 128, 32);
BM_ELTWISE(1, 1, 128, 128, 32);
BM_ELTWISE(2, 1, 128, 128, 32); BM_ELTWISE(2, 1, 128, 128, 32);
BM_ELTWISE(0, 1, 240, 240, 256);
BM_ELTWISE(1, 1, 240, 240, 256);
BM_ELTWISE(2, 1, 240, 240, 256); BM_ELTWISE(2, 1, 240, 240, 256);
BM_ELTWISE(2, 1, 256, 256, 32);
BM_ELTWISE(0, 1, 128, 128, 32);
BM_ELTWISE(0, 1, 240, 240, 256);
BM_ELTWISE(5, 1, 128, 128, 32);
BM_ELTWISE(5, 1, 240, 240, 256);
} // namespace test } // namespace test
} // namespace ops } // namespace ops
......
此差异已折叠。
...@@ -41,6 +41,12 @@ activation_name_map = { ...@@ -41,6 +41,12 @@ activation_name_map = {
'TanH': 'TANH', 'TanH': 'TANH',
} }
math_type_mode = {
0: 2, # PROD
1: 0, # SUM
2: 5, # MAX
}
MACE_INPUT_NODE_NAME = "mace_input_node" MACE_INPUT_NODE_NAME = "mace_input_node"
MACE_OUTPUT_NODE_NAME = "mace_output_node" MACE_OUTPUT_NODE_NAME = "mace_output_node"
...@@ -921,11 +927,11 @@ class CaffeConverter(object): ...@@ -921,11 +927,11 @@ class CaffeConverter(object):
param = op.layer.eltwise_param param = op.layer.eltwise_param
type_arg = op_def.arg.add() type_arg = op_def.arg.add()
type_arg.name = 'type' type_arg.name = 'type'
type_arg.i = param.operation type_arg.i = math_type_mode[param.operation]
if len(param.coeff) > 0: if len(param.coeff) > 0:
coeff_arg = op_def.arg.add() coeff_arg = op_def.arg.add()
coeff_arg.name = 'coeff' coeff_arg.name = 'coeff'
coeff_arg.ints.extend(list(param.coeff)) coeff_arg.floats.extend(list(param.coeff))
output_shape = op.parents[0].output_shape_map[op.layer.bottom[0]] output_shape = op.parents[0].output_shape_map[op.layer.bottom[0]]
op.output_shape_map[op.layer.top[0]] = output_shape op.output_shape_map[op.layer.top[0]] = output_shape
......
...@@ -30,14 +30,14 @@ pooling_type_mode = {'AvgPool': 1, 'MaxPool': 2} ...@@ -30,14 +30,14 @@ pooling_type_mode = {'AvgPool': 1, 'MaxPool': 2}
# and also cwise type's in mace/kernels/cwise.h # and also cwise type's in mace/kernels/cwise.h
# cuz these math ops should have compatible with "EltWise" and "CWise" # cuz these math ops should have compatible with "EltWise" and "CWise"
math_type_mode = { math_type_mode = {
'MUL': 0, 'ADD': 0,
'ADD': 1, 'SUB': 1,
'MAX': 2, 'MUL': 2,
'MIN': 3, 'DIV': 3,
'SUB': 4, 'MIN': 4,
'DIV': 5, 'MAX': 5,
'NEG': 6, 'NEG': 6,
'ABS': 7 'ABS': 7,
} }
buffer_type_map = { buffer_type_map = {
...@@ -859,18 +859,26 @@ class TFConverter(object): ...@@ -859,18 +859,26 @@ class TFConverter(object):
arg.i = self.dt arg.i = self.dt
op_def.name = op.name op_def.name = op.name
op_def.type = "Eltwise" op_def.type = "Eltwise"
op_def.input.extend([input.name for input in op.inputs]) if len(op.inputs) == 2:
x_value = op.get_attr('x')
if len(op.inputs) >= 2:
input_tensor0 = get_input_tensor(op, 0) input_tensor0 = get_input_tensor(op, 0)
input_tensor1 = get_input_tensor(op, 1) input_tensor1 = get_input_tensor(op, 1)
if len(input_tensor0) == 1: x_value = None
x_value = input_tensor0.eval().astype(np.float32) if np.asarray(input_tensor1.shape).size == 0:
elif len(input_tensor1) == 1: x_value = input_tensor1.eval()
x_value = input_tensor1.eval().astype(np.float32) op_def.input.extend([op.inputs[0].name])
x_arg = op_def.arg.add() self.unused_tensor.add(input_tensor1.name)
x_arg.name = 'x' elif np.asarray(input_tensor0.shape).size == 0:
x_arg.f = x_value x_value = input_tensor0.eval()
op_def.input.extend([op.inputs[1].name])
self.unused_tensor.add(input_tensor0.name)
else:
op_def.input.extend([input.name for input in op.inputs])
if x_value is not None:
x_arg = op_def.arg.add()
x_arg.name = 'x'
x_arg.f = x_value
else:
op_def.input.extend([input.name for input in op.inputs])
type_arg = op_def.arg.add() type_arg = op_def.arg.add()
type_arg.name = 'type' type_arg.name = 'type'
type_arg.i = math_type_mode[math_type] type_arg.i = math_type_mode[math_type]
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册