提交 5c47bbff 编写于 作者: L liuqi

Rename is_relu to fused_relu.

上级 8eb55b61
......@@ -9,22 +9,22 @@ namespace mace {
namespace kernels {
extern void Conv2dOpenclK1x1S1(const Tensor *input, const Tensor *filter,
const Tensor *bias, const bool is_relu,
const Tensor *bias, const bool fused_relu,
const int *padding, const DataType dt,
Tensor *output);
extern void Conv2dOpenclK1x1S2(const Tensor *input, const Tensor *filter,
const Tensor *bias, const bool is_relu,
const Tensor *bias, const bool fused_relu,
const int *padding, const DataType dt,
Tensor *output);
extern void Conv2dOpenclK3x3S1(const Tensor *input, const Tensor *filter,
const Tensor *bias, const bool is_relu,
const Tensor *bias, const bool fused_relu,
const int *padding, const DataType dt,
Tensor *output);
extern void Conv2dOpenclK3x3S2(const Tensor *input, const Tensor *filter,
const Tensor *bias, const bool is_relu,
const Tensor *bias, const bool fused_relu,
const int *padding, const DataType dt,
Tensor *output);
......@@ -34,7 +34,7 @@ void Conv2dFunctor<DeviceType::OPENCL, T>::operator()(const Tensor *input,
const Tensor *bias,
Tensor *output) {
typedef void (*Conv2dOpenclFunction)(const Tensor *input, const Tensor *filter,
const Tensor *bias, const bool is_relu,
const Tensor *bias, const bool fused_relu,
const int *padding, const DataType dt,
Tensor *output);
// Selection matrix: kernel_size x stride_size
......
......@@ -14,7 +14,7 @@ namespace kernels {
void Conv1x1(const Tensor *input,
const Tensor *filter,
const Tensor *bias,
const bool is_relu,
const bool fused_relu,
const int stride,
const DataType dt,
Tensor *output) {
......@@ -40,7 +40,7 @@ void Conv1x1(const Tensor *input,
if (bias != nullptr) {
built_options.emplace("-DBIAS");
}
if (is_relu) {
if (fused_relu) {
built_options.emplace("-DFUSED_RELU");
}
......@@ -78,21 +78,21 @@ void Conv1x1(const Tensor *input,
extern void Conv2dOpenclK1x1S1(const Tensor *input,
const Tensor *filter,
const Tensor *bias,
const bool is_relu,
const bool fused_relu,
const int *padding,
const DataType dt,
Tensor *output) {
Conv1x1(input, filter, bias, is_relu, 1, dt, output);
Conv1x1(input, filter, bias, fused_relu, 1, dt, output);
};
extern void Conv2dOpenclK1x1S2(const Tensor *input,
const Tensor *filter,
const Tensor *bias,
const bool is_relu,
const bool fused_relu,
const int *padding,
const DataType dt,
Tensor *output) {
Conv1x1(input, filter, bias, is_relu, 2, dt, output);
Conv1x1(input, filter, bias, fused_relu, 2, dt, output);
};
} // namespace kernels
......
......@@ -12,7 +12,7 @@ namespace mace {
namespace kernels {
static void Conv2d3x3S12(const Tensor *input, const Tensor *filter,
const Tensor *bias, const bool is_relu,
const Tensor *bias, const bool fused_relu,
const uint32_t stride, const int *padding,
const DataType dt, Tensor *output) {
const index_t batch = output->dim(0);
......@@ -30,7 +30,7 @@ static void Conv2d3x3S12(const Tensor *input, const Tensor *filter,
built_options.emplace("-DCMD_DATA_TYPE=" + DataTypeToOPENCLCMDDataType(dt));
built_options.emplace(bias != nullptr ? "-DBIAS" : "");
built_options.emplace("-DSTRIDE=" + ToString(stride));
if (is_relu) {
if (fused_relu) {
built_options.emplace("-DFUSED_RELU");
}
......@@ -69,21 +69,21 @@ static void Conv2d3x3S12(const Tensor *input, const Tensor *filter,
void Conv2dOpenclK3x3S1(const Tensor *input,
const Tensor *filter,
const Tensor *bias,
const bool is_relu,
const bool fused_relu,
const int *padding,
const DataType dt,
Tensor *output) {
Conv2d3x3S12(input, filter, bias, is_relu, 1, padding, dt, output);
Conv2d3x3S12(input, filter, bias, fused_relu, 1, padding, dt, output);
};
void Conv2dOpenclK3x3S2(const Tensor *input,
const Tensor *filter,
const Tensor *bias,
const bool is_relu,
const bool fused_relu,
const int *padding,
const DataType dt,
Tensor *output) {
Conv2d3x3S12(input, filter, bias, is_relu, 2, padding, dt, output);
Conv2d3x3S12(input, filter, bias, fused_relu, 2, padding, dt, output);
};
} // namespace kernels
......
......@@ -9,22 +9,22 @@ namespace mace {
namespace kernels {
extern void Conv2dOpenclK1x1S1(const Tensor *input, const Tensor *filter,
const Tensor *bias, const bool is_relu,
const Tensor *bias, const bool fused_relu,
const int *padding, const DataType dt,
Tensor *output);
extern void Conv2dOpenclK1x1S2(const Tensor *input, const Tensor *filter,
const Tensor *bias, const bool is_relu,
const Tensor *bias, const bool fused_relu,
const int *padding, const DataType dt,
Tensor *output);
extern void Conv2dOpenclK3x3S1(const Tensor *input, const Tensor *filter,
const Tensor *bias, const bool is_relu,
const Tensor *bias, const bool fused_relu,
const int *padding, const DataType dt,
Tensor *output);
extern void Conv2dOpenclK3x3S2(const Tensor *input, const Tensor *filter,
const Tensor *bias, const bool is_relu,
const Tensor *bias, const bool fused_relu,
const int *padding, const DataType dt,
Tensor *output);
......@@ -34,7 +34,7 @@ void FusedConv2dFunctor<DeviceType::OPENCL, T>::operator()(const Tensor *input,
const Tensor *bias,
Tensor *output) {
typedef void (*Conv2dOpenclFunction)(const Tensor *input, const Tensor *filter,
const Tensor *bias, const bool is_relu,
const Tensor *bias, const bool fused_relu,
const int *padding, const DataType dt,
Tensor *output);
// Selection matrix: kernel_size x stride_size
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册