未验证 提交 0c968b9d 编写于 作者: F feng_shuai 提交者: GitHub

add div plugin and add filter (#41243)

上级 2de82224
...@@ -1007,6 +1007,14 @@ bool OpTeller::Tell(const framework::ir::Node* node, bool use_no_calib_int8, ...@@ -1007,6 +1007,14 @@ bool OpTeller::Tell(const framework::ir::Node* node, bool use_no_calib_int8,
auto* y_var_desc = block->FindVar(desc.Input("Y")[0]); auto* y_var_desc = block->FindVar(desc.Input("Y")[0]);
const auto x_shape = x_var_desc->GetShape(); const auto x_shape = x_var_desc->GetShape();
const auto y_shape = y_var_desc->GetShape(); const auto y_shape = y_var_desc->GetShape();
if (op_type == "elementwise_add" && y_var_desc->Persistable()) {
if (y_shape.size() != 1) {
return false;
}
if (y_shape[0] != x_shape[1]) {
return false;
}
}
if (x_shape.size() == 1 && y_shape.size() == 1) { if (x_shape.size() == 1 && y_shape.size() == 1) {
VLOG(3) << "Now trt may not support two 1d tensor elementwise op."; VLOG(3) << "Now trt may not support two 1d tensor elementwise op.";
return false; return false;
......
...@@ -30,6 +30,11 @@ template <typename T> ...@@ -30,6 +30,11 @@ template <typename T>
struct Mul { struct Mul {
__device__ T operator()(const T &a, const T &b) const { return a * b; } __device__ T operator()(const T &a, const T &b) const { return a * b; }
}; };
template <typename T>
struct Div {
__device__ T operator()(const T &a, const T &b) const { return a / b; }
};
} // namespace details } // namespace details
template <typename T, typename Operator> template <typename T, typename Operator>
...@@ -130,6 +135,10 @@ int ElementWisePlugin::enqueue(int batch_size, const void *const *inputs, ...@@ -130,6 +135,10 @@ int ElementWisePlugin::enqueue(int batch_size, const void *const *inputs,
elementwise_kernel<<<block, thread, 0, stream>>>( elementwise_kernel<<<block, thread, 0, stream>>>(
num, x, y, out, prev_size_, batch_size * midd_size_, post_size_, num, x, y, out, prev_size_, batch_size * midd_size_, post_size_,
details::Mul<float>()); details::Mul<float>());
} else if (type_ == "div") {
elementwise_kernel<<<block, thread, 0, stream>>>(
num, x, y, out, prev_size_, batch_size * midd_size_, post_size_,
details::Div<float>());
} else { } else {
PADDLE_THROW(platform::errors::Fatal( PADDLE_THROW(platform::errors::Fatal(
"The %s type elementwise is not implemented in trt plugin.", type_)); "The %s type elementwise is not implemented in trt plugin.", type_));
...@@ -242,11 +251,15 @@ int ElementwisePluginDynamic::enqueue( ...@@ -242,11 +251,15 @@ int ElementwisePluginDynamic::enqueue(
} else if (type_ == "mul") { } else if (type_ == "mul") {
elementwise_kernel<<<block, thread, 0, stream>>>( elementwise_kernel<<<block, thread, 0, stream>>>(
num, x, y, out, prev_size, midd_size, post_size, details::Mul<float>()); num, x, y, out, prev_size, midd_size, post_size, details::Mul<float>());
} else if (type_ == "div") {
elementwise_kernel<<<block, thread, 0, stream>>>(
num, x, y, out, prev_size, midd_size, post_size, details::Div<float>());
} else { } else {
PADDLE_THROW(platform::errors::Unimplemented( PADDLE_THROW(
"Paddle-TRT only support elementwise operation: {add, mul} currently, " platform::errors::Unimplemented("Paddle-TRT only support elementwise "
"but got %s.", "operation: {add, mul, div} currently, "
type_)); "but got %s.",
type_));
} }
return cudaGetLastError() != cudaSuccess; return cudaGetLastError() != cudaSuccess;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册